Make linux_ptrace_attach_fail_reason return an std::string
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2018 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25 #include "signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #include "common-inferior.h"
51 #include "nat/fork-inferior.h"
52 #include "environ.h"
53 #ifndef ELFMAG0
54 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
55 then ELFMAG0 will have been defined. If it didn't get included by
56 gdb_proc_service.h then including it will likely introduce a duplicate
57 definition of elf_fpregset_t. */
58 #include <elf.h>
59 #endif
60 #include "nat/linux-namespaces.h"
61
62 #ifndef SPUFS_MAGIC
63 #define SPUFS_MAGIC 0x23c9b64e
64 #endif
65
66 #ifdef HAVE_PERSONALITY
67 # include <sys/personality.h>
68 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
69 # define ADDR_NO_RANDOMIZE 0x0040000
70 # endif
71 #endif
72
73 #ifndef O_LARGEFILE
74 #define O_LARGEFILE 0
75 #endif
76
77 /* Some targets did not define these ptrace constants from the start,
78 so gdbserver defines them locally here. In the future, these may
79 be removed after they are added to asm/ptrace.h. */
80 #if !(defined(PT_TEXT_ADDR) \
81 || defined(PT_DATA_ADDR) \
82 || defined(PT_TEXT_END_ADDR))
83 #if defined(__mcoldfire__)
84 /* These are still undefined in 3.10 kernels. */
85 #define PT_TEXT_ADDR 49*4
86 #define PT_DATA_ADDR 50*4
87 #define PT_TEXT_END_ADDR 51*4
88 /* BFIN already defines these since at least 2.6.32 kernels. */
89 #elif defined(BFIN)
90 #define PT_TEXT_ADDR 220
91 #define PT_TEXT_END_ADDR 224
92 #define PT_DATA_ADDR 228
93 /* These are still undefined in 3.10 kernels. */
94 #elif defined(__TMS320C6X__)
95 #define PT_TEXT_ADDR (0x10000*4)
96 #define PT_DATA_ADDR (0x10004*4)
97 #define PT_TEXT_END_ADDR (0x10008*4)
98 #endif
99 #endif
100
101 #ifdef HAVE_LINUX_BTRACE
102 # include "nat/linux-btrace.h"
103 # include "btrace-common.h"
104 #endif
105
106 #ifndef HAVE_ELF32_AUXV_T
107 /* Copied from glibc's elf.h. */
108 typedef struct
109 {
110 uint32_t a_type; /* Entry type */
111 union
112 {
113 uint32_t a_val; /* Integer value */
114 /* We use to have pointer elements added here. We cannot do that,
115 though, since it does not work when using 32-bit definitions
116 on 64-bit platforms and vice versa. */
117 } a_un;
118 } Elf32_auxv_t;
119 #endif
120
121 #ifndef HAVE_ELF64_AUXV_T
122 /* Copied from glibc's elf.h. */
123 typedef struct
124 {
125 uint64_t a_type; /* Entry type */
126 union
127 {
128 uint64_t a_val; /* Integer value */
129 /* We use to have pointer elements added here. We cannot do that,
130 though, since it does not work when using 32-bit definitions
131 on 64-bit platforms and vice versa. */
132 } a_un;
133 } Elf64_auxv_t;
134 #endif
135
136 /* Does the current host support PTRACE_GETREGSET? */
137 int have_ptrace_getregset = -1;
138
139 /* LWP accessors. */
140
141 /* See nat/linux-nat.h. */
142
143 ptid_t
144 ptid_of_lwp (struct lwp_info *lwp)
145 {
146 return ptid_of (get_lwp_thread (lwp));
147 }
148
149 /* See nat/linux-nat.h. */
150
151 void
152 lwp_set_arch_private_info (struct lwp_info *lwp,
153 struct arch_lwp_info *info)
154 {
155 lwp->arch_private = info;
156 }
157
158 /* See nat/linux-nat.h. */
159
160 struct arch_lwp_info *
161 lwp_arch_private_info (struct lwp_info *lwp)
162 {
163 return lwp->arch_private;
164 }
165
166 /* See nat/linux-nat.h. */
167
168 int
169 lwp_is_stopped (struct lwp_info *lwp)
170 {
171 return lwp->stopped;
172 }
173
174 /* See nat/linux-nat.h. */
175
176 enum target_stop_reason
177 lwp_stop_reason (struct lwp_info *lwp)
178 {
179 return lwp->stop_reason;
180 }
181
182 /* See nat/linux-nat.h. */
183
184 int
185 lwp_is_stepping (struct lwp_info *lwp)
186 {
187 return lwp->stepping;
188 }
189
190 /* A list of all unknown processes which receive stop signals. Some
191 other process will presumably claim each of these as forked
192 children momentarily. */
193
194 struct simple_pid_list
195 {
196 /* The process ID. */
197 int pid;
198
199 /* The status as reported by waitpid. */
200 int status;
201
202 /* Next in chain. */
203 struct simple_pid_list *next;
204 };
205 struct simple_pid_list *stopped_pids;
206
207 /* Trivial list manipulation functions to keep track of a list of new
208 stopped processes. */
209
210 static void
211 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
212 {
213 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
214
215 new_pid->pid = pid;
216 new_pid->status = status;
217 new_pid->next = *listp;
218 *listp = new_pid;
219 }
220
221 static int
222 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
223 {
224 struct simple_pid_list **p;
225
226 for (p = listp; *p != NULL; p = &(*p)->next)
227 if ((*p)->pid == pid)
228 {
229 struct simple_pid_list *next = (*p)->next;
230
231 *statusp = (*p)->status;
232 xfree (*p);
233 *p = next;
234 return 1;
235 }
236 return 0;
237 }
238
239 enum stopping_threads_kind
240 {
241 /* Not stopping threads presently. */
242 NOT_STOPPING_THREADS,
243
244 /* Stopping threads. */
245 STOPPING_THREADS,
246
247 /* Stopping and suspending threads. */
248 STOPPING_AND_SUSPENDING_THREADS
249 };
250
251 /* This is set while stop_all_lwps is in effect. */
252 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
253
254 /* FIXME make into a target method? */
255 int using_threads = 1;
256
257 /* True if we're presently stabilizing threads (moving them out of
258 jump pads). */
259 static int stabilizing_threads;
260
261 static void linux_resume_one_lwp (struct lwp_info *lwp,
262 int step, int signal, siginfo_t *info);
263 static void linux_resume (struct thread_resume *resume_info, size_t n);
264 static void stop_all_lwps (int suspend, struct lwp_info *except);
265 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
266 static void unsuspend_all_lwps (struct lwp_info *except);
267 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
268 int *wstat, int options);
269 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
270 static struct lwp_info *add_lwp (ptid_t ptid);
271 static void linux_mourn (struct process_info *process);
272 static int linux_stopped_by_watchpoint (void);
273 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
274 static int lwp_is_marked_dead (struct lwp_info *lwp);
275 static void proceed_all_lwps (void);
276 static int finish_step_over (struct lwp_info *lwp);
277 static int kill_lwp (unsigned long lwpid, int signo);
278 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
279 static void complete_ongoing_step_over (void);
280 static int linux_low_ptrace_options (int attached);
281 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
282 static void proceed_one_lwp (thread_info *thread, lwp_info *except);
283
284 /* When the event-loop is doing a step-over, this points at the thread
285 being stepped. */
286 ptid_t step_over_bkpt;
287
288 /* True if the low target can hardware single-step. */
289
290 static int
291 can_hardware_single_step (void)
292 {
293 if (the_low_target.supports_hardware_single_step != NULL)
294 return the_low_target.supports_hardware_single_step ();
295 else
296 return 0;
297 }
298
299 /* True if the low target can software single-step. Such targets
300 implement the GET_NEXT_PCS callback. */
301
302 static int
303 can_software_single_step (void)
304 {
305 return (the_low_target.get_next_pcs != NULL);
306 }
307
308 /* True if the low target supports memory breakpoints. If so, we'll
309 have a GET_PC implementation. */
310
311 static int
312 supports_breakpoints (void)
313 {
314 return (the_low_target.get_pc != NULL);
315 }
316
317 /* Returns true if this target can support fast tracepoints. This
318 does not mean that the in-process agent has been loaded in the
319 inferior. */
320
321 static int
322 supports_fast_tracepoints (void)
323 {
324 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
325 }
326
327 /* True if LWP is stopped in its stepping range. */
328
329 static int
330 lwp_in_step_range (struct lwp_info *lwp)
331 {
332 CORE_ADDR pc = lwp->stop_pc;
333
334 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
335 }
336
337 struct pending_signals
338 {
339 int signal;
340 siginfo_t info;
341 struct pending_signals *prev;
342 };
343
344 /* The read/write ends of the pipe registered as waitable file in the
345 event loop. */
346 static int linux_event_pipe[2] = { -1, -1 };
347
348 /* True if we're currently in async mode. */
349 #define target_is_async_p() (linux_event_pipe[0] != -1)
350
351 static void send_sigstop (struct lwp_info *lwp);
352 static void wait_for_sigstop (void);
353
354 /* Return non-zero if HEADER is a 64-bit ELF file. */
355
356 static int
357 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
358 {
359 if (header->e_ident[EI_MAG0] == ELFMAG0
360 && header->e_ident[EI_MAG1] == ELFMAG1
361 && header->e_ident[EI_MAG2] == ELFMAG2
362 && header->e_ident[EI_MAG3] == ELFMAG3)
363 {
364 *machine = header->e_machine;
365 return header->e_ident[EI_CLASS] == ELFCLASS64;
366
367 }
368 *machine = EM_NONE;
369 return -1;
370 }
371
372 /* Return non-zero if FILE is a 64-bit ELF file,
373 zero if the file is not a 64-bit ELF file,
374 and -1 if the file is not accessible or doesn't exist. */
375
376 static int
377 elf_64_file_p (const char *file, unsigned int *machine)
378 {
379 Elf64_Ehdr header;
380 int fd;
381
382 fd = open (file, O_RDONLY);
383 if (fd < 0)
384 return -1;
385
386 if (read (fd, &header, sizeof (header)) != sizeof (header))
387 {
388 close (fd);
389 return 0;
390 }
391 close (fd);
392
393 return elf_64_header_p (&header, machine);
394 }
395
396 /* Accepts an integer PID; Returns true if the executable PID is
397 running is a 64-bit ELF file.. */
398
399 int
400 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
401 {
402 char file[PATH_MAX];
403
404 sprintf (file, "/proc/%d/exe", pid);
405 return elf_64_file_p (file, machine);
406 }
407
408 static void
409 delete_lwp (struct lwp_info *lwp)
410 {
411 struct thread_info *thr = get_lwp_thread (lwp);
412
413 if (debug_threads)
414 debug_printf ("deleting %ld\n", lwpid_of (thr));
415
416 remove_thread (thr);
417
418 if (the_low_target.delete_thread != NULL)
419 the_low_target.delete_thread (lwp->arch_private);
420 else
421 gdb_assert (lwp->arch_private == NULL);
422
423 free (lwp);
424 }
425
426 /* Add a process to the common process list, and set its private
427 data. */
428
429 static struct process_info *
430 linux_add_process (int pid, int attached)
431 {
432 struct process_info *proc;
433
434 proc = add_process (pid, attached);
435 proc->priv = XCNEW (struct process_info_private);
436
437 if (the_low_target.new_process != NULL)
438 proc->priv->arch_private = the_low_target.new_process ();
439
440 return proc;
441 }
442
443 static CORE_ADDR get_pc (struct lwp_info *lwp);
444
445 /* Call the target arch_setup function on the current thread. */
446
447 static void
448 linux_arch_setup (void)
449 {
450 the_low_target.arch_setup ();
451 }
452
453 /* Call the target arch_setup function on THREAD. */
454
455 static void
456 linux_arch_setup_thread (struct thread_info *thread)
457 {
458 struct thread_info *saved_thread;
459
460 saved_thread = current_thread;
461 current_thread = thread;
462
463 linux_arch_setup ();
464
465 current_thread = saved_thread;
466 }
467
468 /* Handle a GNU/Linux extended wait response. If we see a clone,
469 fork, or vfork event, we need to add the new LWP to our list
470 (and return 0 so as not to report the trap to higher layers).
471 If we see an exec event, we will modify ORIG_EVENT_LWP to point
472 to a new LWP representing the new program. */
473
474 static int
475 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
476 {
477 struct lwp_info *event_lwp = *orig_event_lwp;
478 int event = linux_ptrace_get_extended_event (wstat);
479 struct thread_info *event_thr = get_lwp_thread (event_lwp);
480 struct lwp_info *new_lwp;
481
482 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
483
484 /* All extended events we currently use are mid-syscall. Only
485 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
486 you have to be using PTRACE_SEIZE to get that. */
487 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
488
489 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
490 || (event == PTRACE_EVENT_CLONE))
491 {
492 ptid_t ptid;
493 unsigned long new_pid;
494 int ret, status;
495
496 /* Get the pid of the new lwp. */
497 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
498 &new_pid);
499
500 /* If we haven't already seen the new PID stop, wait for it now. */
501 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
502 {
503 /* The new child has a pending SIGSTOP. We can't affect it until it
504 hits the SIGSTOP, but we're already attached. */
505
506 ret = my_waitpid (new_pid, &status, __WALL);
507
508 if (ret == -1)
509 perror_with_name ("waiting for new child");
510 else if (ret != new_pid)
511 warning ("wait returned unexpected PID %d", ret);
512 else if (!WIFSTOPPED (status))
513 warning ("wait returned unexpected status 0x%x", status);
514 }
515
516 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
517 {
518 struct process_info *parent_proc;
519 struct process_info *child_proc;
520 struct lwp_info *child_lwp;
521 struct thread_info *child_thr;
522 struct target_desc *tdesc;
523
524 ptid = ptid_build (new_pid, new_pid, 0);
525
526 if (debug_threads)
527 {
528 debug_printf ("HEW: Got fork event from LWP %ld, "
529 "new child is %d\n",
530 ptid_get_lwp (ptid_of (event_thr)),
531 ptid_get_pid (ptid));
532 }
533
534 /* Add the new process to the tables and clone the breakpoint
535 lists of the parent. We need to do this even if the new process
536 will be detached, since we will need the process object and the
537 breakpoints to remove any breakpoints from memory when we
538 detach, and the client side will access registers. */
539 child_proc = linux_add_process (new_pid, 0);
540 gdb_assert (child_proc != NULL);
541 child_lwp = add_lwp (ptid);
542 gdb_assert (child_lwp != NULL);
543 child_lwp->stopped = 1;
544 child_lwp->must_set_ptrace_flags = 1;
545 child_lwp->status_pending_p = 0;
546 child_thr = get_lwp_thread (child_lwp);
547 child_thr->last_resume_kind = resume_stop;
548 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
549
550 /* If we're suspending all threads, leave this one suspended
551 too. If the fork/clone parent is stepping over a breakpoint,
552 all other threads have been suspended already. Leave the
553 child suspended too. */
554 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
555 || event_lwp->bp_reinsert != 0)
556 {
557 if (debug_threads)
558 debug_printf ("HEW: leaving child suspended\n");
559 child_lwp->suspended = 1;
560 }
561
562 parent_proc = get_thread_process (event_thr);
563 child_proc->attached = parent_proc->attached;
564
565 if (event_lwp->bp_reinsert != 0
566 && can_software_single_step ()
567 && event == PTRACE_EVENT_VFORK)
568 {
569 /* If we leave single-step breakpoints there, child will
570 hit it, so uninsert single-step breakpoints from parent
571 (and child). Once vfork child is done, reinsert
572 them back to parent. */
573 uninsert_single_step_breakpoints (event_thr);
574 }
575
576 clone_all_breakpoints (child_thr, event_thr);
577
578 tdesc = allocate_target_description ();
579 copy_target_description (tdesc, parent_proc->tdesc);
580 child_proc->tdesc = tdesc;
581
582 /* Clone arch-specific process data. */
583 if (the_low_target.new_fork != NULL)
584 the_low_target.new_fork (parent_proc, child_proc);
585
586 /* Save fork info in the parent thread. */
587 if (event == PTRACE_EVENT_FORK)
588 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
589 else if (event == PTRACE_EVENT_VFORK)
590 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
591
592 event_lwp->waitstatus.value.related_pid = ptid;
593
594 /* The status_pending field contains bits denoting the
595 extended event, so when the pending event is handled,
596 the handler will look at lwp->waitstatus. */
597 event_lwp->status_pending_p = 1;
598 event_lwp->status_pending = wstat;
599
600 /* Link the threads until the parent event is passed on to
601 higher layers. */
602 event_lwp->fork_relative = child_lwp;
603 child_lwp->fork_relative = event_lwp;
604
605 /* If the parent thread is doing step-over with single-step
606 breakpoints, the list of single-step breakpoints are cloned
607 from the parent's. Remove them from the child process.
608 In case of vfork, we'll reinsert them back once vforked
609 child is done. */
610 if (event_lwp->bp_reinsert != 0
611 && can_software_single_step ())
612 {
613 /* The child process is forked and stopped, so it is safe
614 to access its memory without stopping all other threads
615 from other processes. */
616 delete_single_step_breakpoints (child_thr);
617
618 gdb_assert (has_single_step_breakpoints (event_thr));
619 gdb_assert (!has_single_step_breakpoints (child_thr));
620 }
621
622 /* Report the event. */
623 return 0;
624 }
625
626 if (debug_threads)
627 debug_printf ("HEW: Got clone event "
628 "from LWP %ld, new child is LWP %ld\n",
629 lwpid_of (event_thr), new_pid);
630
631 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
632 new_lwp = add_lwp (ptid);
633
634 /* Either we're going to immediately resume the new thread
635 or leave it stopped. linux_resume_one_lwp is a nop if it
636 thinks the thread is currently running, so set this first
637 before calling linux_resume_one_lwp. */
638 new_lwp->stopped = 1;
639
640 /* If we're suspending all threads, leave this one suspended
641 too. If the fork/clone parent is stepping over a breakpoint,
642 all other threads have been suspended already. Leave the
643 child suspended too. */
644 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
645 || event_lwp->bp_reinsert != 0)
646 new_lwp->suspended = 1;
647
648 /* Normally we will get the pending SIGSTOP. But in some cases
649 we might get another signal delivered to the group first.
650 If we do get another signal, be sure not to lose it. */
651 if (WSTOPSIG (status) != SIGSTOP)
652 {
653 new_lwp->stop_expected = 1;
654 new_lwp->status_pending_p = 1;
655 new_lwp->status_pending = status;
656 }
657 else if (report_thread_events)
658 {
659 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
660 new_lwp->status_pending_p = 1;
661 new_lwp->status_pending = status;
662 }
663
664 thread_db_notice_clone (event_thr, ptid);
665
666 /* Don't report the event. */
667 return 1;
668 }
669 else if (event == PTRACE_EVENT_VFORK_DONE)
670 {
671 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
672
673 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
674 {
675 reinsert_single_step_breakpoints (event_thr);
676
677 gdb_assert (has_single_step_breakpoints (event_thr));
678 }
679
680 /* Report the event. */
681 return 0;
682 }
683 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
684 {
685 struct process_info *proc;
686 std::vector<int> syscalls_to_catch;
687 ptid_t event_ptid;
688 pid_t event_pid;
689
690 if (debug_threads)
691 {
692 debug_printf ("HEW: Got exec event from LWP %ld\n",
693 lwpid_of (event_thr));
694 }
695
696 /* Get the event ptid. */
697 event_ptid = ptid_of (event_thr);
698 event_pid = ptid_get_pid (event_ptid);
699
700 /* Save the syscall list from the execing process. */
701 proc = get_thread_process (event_thr);
702 syscalls_to_catch = std::move (proc->syscalls_to_catch);
703
704 /* Delete the execing process and all its threads. */
705 linux_mourn (proc);
706 current_thread = NULL;
707
708 /* Create a new process/lwp/thread. */
709 proc = linux_add_process (event_pid, 0);
710 event_lwp = add_lwp (event_ptid);
711 event_thr = get_lwp_thread (event_lwp);
712 gdb_assert (current_thread == event_thr);
713 linux_arch_setup_thread (event_thr);
714
715 /* Set the event status. */
716 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
717 event_lwp->waitstatus.value.execd_pathname
718 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
719
720 /* Mark the exec status as pending. */
721 event_lwp->stopped = 1;
722 event_lwp->status_pending_p = 1;
723 event_lwp->status_pending = wstat;
724 event_thr->last_resume_kind = resume_continue;
725 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
726
727 /* Update syscall state in the new lwp, effectively mid-syscall too. */
728 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
729
730 /* Restore the list to catch. Don't rely on the client, which is free
731 to avoid sending a new list when the architecture doesn't change.
732 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
733 proc->syscalls_to_catch = std::move (syscalls_to_catch);
734
735 /* Report the event. */
736 *orig_event_lwp = event_lwp;
737 return 0;
738 }
739
740 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
741 }
742
743 /* Return the PC as read from the regcache of LWP, without any
744 adjustment. */
745
746 static CORE_ADDR
747 get_pc (struct lwp_info *lwp)
748 {
749 struct thread_info *saved_thread;
750 struct regcache *regcache;
751 CORE_ADDR pc;
752
753 if (the_low_target.get_pc == NULL)
754 return 0;
755
756 saved_thread = current_thread;
757 current_thread = get_lwp_thread (lwp);
758
759 regcache = get_thread_regcache (current_thread, 1);
760 pc = (*the_low_target.get_pc) (regcache);
761
762 if (debug_threads)
763 debug_printf ("pc is 0x%lx\n", (long) pc);
764
765 current_thread = saved_thread;
766 return pc;
767 }
768
769 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
770 Fill *SYSNO with the syscall nr trapped. */
771
772 static void
773 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
774 {
775 struct thread_info *saved_thread;
776 struct regcache *regcache;
777
778 if (the_low_target.get_syscall_trapinfo == NULL)
779 {
780 /* If we cannot get the syscall trapinfo, report an unknown
781 system call number. */
782 *sysno = UNKNOWN_SYSCALL;
783 return;
784 }
785
786 saved_thread = current_thread;
787 current_thread = get_lwp_thread (lwp);
788
789 regcache = get_thread_regcache (current_thread, 1);
790 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
791
792 if (debug_threads)
793 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
794
795 current_thread = saved_thread;
796 }
797
798 static int check_stopped_by_watchpoint (struct lwp_info *child);
799
800 /* Called when the LWP stopped for a signal/trap. If it stopped for a
801 trap check what caused it (breakpoint, watchpoint, trace, etc.),
802 and save the result in the LWP's stop_reason field. If it stopped
803 for a breakpoint, decrement the PC if necessary on the lwp's
804 architecture. Returns true if we now have the LWP's stop PC. */
805
806 static int
807 save_stop_reason (struct lwp_info *lwp)
808 {
809 CORE_ADDR pc;
810 CORE_ADDR sw_breakpoint_pc;
811 struct thread_info *saved_thread;
812 #if USE_SIGTRAP_SIGINFO
813 siginfo_t siginfo;
814 #endif
815
816 if (the_low_target.get_pc == NULL)
817 return 0;
818
819 pc = get_pc (lwp);
820 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
821
822 /* breakpoint_at reads from the current thread. */
823 saved_thread = current_thread;
824 current_thread = get_lwp_thread (lwp);
825
826 #if USE_SIGTRAP_SIGINFO
827 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
828 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
829 {
830 if (siginfo.si_signo == SIGTRAP)
831 {
832 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
833 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
834 {
835 /* The si_code is ambiguous on this arch -- check debug
836 registers. */
837 if (!check_stopped_by_watchpoint (lwp))
838 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
839 }
840 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
841 {
842 /* If we determine the LWP stopped for a SW breakpoint,
843 trust it. Particularly don't check watchpoint
844 registers, because at least on s390, we'd find
845 stopped-by-watchpoint as long as there's a watchpoint
846 set. */
847 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
848 }
849 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
850 {
851 /* This can indicate either a hardware breakpoint or
852 hardware watchpoint. Check debug registers. */
853 if (!check_stopped_by_watchpoint (lwp))
854 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
855 }
856 else if (siginfo.si_code == TRAP_TRACE)
857 {
858 /* We may have single stepped an instruction that
859 triggered a watchpoint. In that case, on some
860 architectures (such as x86), instead of TRAP_HWBKPT,
861 si_code indicates TRAP_TRACE, and we need to check
862 the debug registers separately. */
863 if (!check_stopped_by_watchpoint (lwp))
864 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
865 }
866 }
867 }
868 #else
869 /* We may have just stepped a breakpoint instruction. E.g., in
870 non-stop mode, GDB first tells the thread A to step a range, and
871 then the user inserts a breakpoint inside the range. In that
872 case we need to report the breakpoint PC. */
873 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
874 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
875 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
876
877 if (hardware_breakpoint_inserted_here (pc))
878 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
879
880 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
881 check_stopped_by_watchpoint (lwp);
882 #endif
883
884 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
885 {
886 if (debug_threads)
887 {
888 struct thread_info *thr = get_lwp_thread (lwp);
889
890 debug_printf ("CSBB: %s stopped by software breakpoint\n",
891 target_pid_to_str (ptid_of (thr)));
892 }
893
894 /* Back up the PC if necessary. */
895 if (pc != sw_breakpoint_pc)
896 {
897 struct regcache *regcache
898 = get_thread_regcache (current_thread, 1);
899 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
900 }
901
902 /* Update this so we record the correct stop PC below. */
903 pc = sw_breakpoint_pc;
904 }
905 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
906 {
907 if (debug_threads)
908 {
909 struct thread_info *thr = get_lwp_thread (lwp);
910
911 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
912 target_pid_to_str (ptid_of (thr)));
913 }
914 }
915 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
916 {
917 if (debug_threads)
918 {
919 struct thread_info *thr = get_lwp_thread (lwp);
920
921 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
922 target_pid_to_str (ptid_of (thr)));
923 }
924 }
925 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
926 {
927 if (debug_threads)
928 {
929 struct thread_info *thr = get_lwp_thread (lwp);
930
931 debug_printf ("CSBB: %s stopped by trace\n",
932 target_pid_to_str (ptid_of (thr)));
933 }
934 }
935
936 lwp->stop_pc = pc;
937 current_thread = saved_thread;
938 return 1;
939 }
940
941 static struct lwp_info *
942 add_lwp (ptid_t ptid)
943 {
944 struct lwp_info *lwp;
945
946 lwp = XCNEW (struct lwp_info);
947
948 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
949
950 if (the_low_target.new_thread != NULL)
951 the_low_target.new_thread (lwp);
952
953 lwp->thread = add_thread (ptid, lwp);
954
955 return lwp;
956 }
957
958 /* Callback to be used when calling fork_inferior, responsible for
959 actually initiating the tracing of the inferior. */
960
961 static void
962 linux_ptrace_fun ()
963 {
964 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
965 (PTRACE_TYPE_ARG4) 0) < 0)
966 trace_start_error_with_name ("ptrace");
967
968 if (setpgid (0, 0) < 0)
969 trace_start_error_with_name ("setpgid");
970
971 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
972 stdout to stderr so that inferior i/o doesn't corrupt the connection.
973 Also, redirect stdin to /dev/null. */
974 if (remote_connection_is_stdio ())
975 {
976 if (close (0) < 0)
977 trace_start_error_with_name ("close");
978 if (open ("/dev/null", O_RDONLY) < 0)
979 trace_start_error_with_name ("open");
980 if (dup2 (2, 1) < 0)
981 trace_start_error_with_name ("dup2");
982 if (write (2, "stdin/stdout redirected\n",
983 sizeof ("stdin/stdout redirected\n") - 1) < 0)
984 {
985 /* Errors ignored. */;
986 }
987 }
988 }
989
990 /* Start an inferior process and returns its pid.
991 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
992 are its arguments. */
993
994 static int
995 linux_create_inferior (const char *program,
996 const std::vector<char *> &program_args)
997 {
998 struct lwp_info *new_lwp;
999 int pid;
1000 ptid_t ptid;
1001
1002 {
1003 maybe_disable_address_space_randomization restore_personality
1004 (disable_randomization);
1005 std::string str_program_args = stringify_argv (program_args);
1006
1007 pid = fork_inferior (program,
1008 str_program_args.c_str (),
1009 get_environ ()->envp (), linux_ptrace_fun,
1010 NULL, NULL, NULL, NULL);
1011 }
1012
1013 linux_add_process (pid, 0);
1014
1015 ptid = ptid_build (pid, pid, 0);
1016 new_lwp = add_lwp (ptid);
1017 new_lwp->must_set_ptrace_flags = 1;
1018
1019 post_fork_inferior (pid, program);
1020
1021 return pid;
1022 }
1023
1024 /* Implement the post_create_inferior target_ops method. */
1025
1026 static void
1027 linux_post_create_inferior (void)
1028 {
1029 struct lwp_info *lwp = get_thread_lwp (current_thread);
1030
1031 linux_arch_setup ();
1032
1033 if (lwp->must_set_ptrace_flags)
1034 {
1035 struct process_info *proc = current_process ();
1036 int options = linux_low_ptrace_options (proc->attached);
1037
1038 linux_enable_event_reporting (lwpid_of (current_thread), options);
1039 lwp->must_set_ptrace_flags = 0;
1040 }
1041 }
1042
1043 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1044 error. */
1045
1046 int
1047 linux_attach_lwp (ptid_t ptid)
1048 {
1049 struct lwp_info *new_lwp;
1050 int lwpid = ptid_get_lwp (ptid);
1051
1052 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1053 != 0)
1054 return errno;
1055
1056 new_lwp = add_lwp (ptid);
1057
1058 /* We need to wait for SIGSTOP before being able to make the next
1059 ptrace call on this LWP. */
1060 new_lwp->must_set_ptrace_flags = 1;
1061
1062 if (linux_proc_pid_is_stopped (lwpid))
1063 {
1064 if (debug_threads)
1065 debug_printf ("Attached to a stopped process\n");
1066
1067 /* The process is definitely stopped. It is in a job control
1068 stop, unless the kernel predates the TASK_STOPPED /
1069 TASK_TRACED distinction, in which case it might be in a
1070 ptrace stop. Make sure it is in a ptrace stop; from there we
1071 can kill it, signal it, et cetera.
1072
1073 First make sure there is a pending SIGSTOP. Since we are
1074 already attached, the process can not transition from stopped
1075 to running without a PTRACE_CONT; so we know this signal will
1076 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1077 probably already in the queue (unless this kernel is old
1078 enough to use TASK_STOPPED for ptrace stops); but since
1079 SIGSTOP is not an RT signal, it can only be queued once. */
1080 kill_lwp (lwpid, SIGSTOP);
1081
1082 /* Finally, resume the stopped process. This will deliver the
1083 SIGSTOP (or a higher priority signal, just like normal
1084 PTRACE_ATTACH), which we'll catch later on. */
1085 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1086 }
1087
1088 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1089 brings it to a halt.
1090
1091 There are several cases to consider here:
1092
1093 1) gdbserver has already attached to the process and is being notified
1094 of a new thread that is being created.
1095 In this case we should ignore that SIGSTOP and resume the
1096 process. This is handled below by setting stop_expected = 1,
1097 and the fact that add_thread sets last_resume_kind ==
1098 resume_continue.
1099
1100 2) This is the first thread (the process thread), and we're attaching
1101 to it via attach_inferior.
1102 In this case we want the process thread to stop.
1103 This is handled by having linux_attach set last_resume_kind ==
1104 resume_stop after we return.
1105
1106 If the pid we are attaching to is also the tgid, we attach to and
1107 stop all the existing threads. Otherwise, we attach to pid and
1108 ignore any other threads in the same group as this pid.
1109
1110 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1111 existing threads.
1112 In this case we want the thread to stop.
1113 FIXME: This case is currently not properly handled.
1114 We should wait for the SIGSTOP but don't. Things work apparently
1115 because enough time passes between when we ptrace (ATTACH) and when
1116 gdb makes the next ptrace call on the thread.
1117
1118 On the other hand, if we are currently trying to stop all threads, we
1119 should treat the new thread as if we had sent it a SIGSTOP. This works
1120 because we are guaranteed that the add_lwp call above added us to the
1121 end of the list, and so the new thread has not yet reached
1122 wait_for_sigstop (but will). */
1123 new_lwp->stop_expected = 1;
1124
1125 return 0;
1126 }
1127
1128 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1129 already attached. Returns true if a new LWP is found, false
1130 otherwise. */
1131
1132 static int
1133 attach_proc_task_lwp_callback (ptid_t ptid)
1134 {
1135 /* Is this a new thread? */
1136 if (find_thread_ptid (ptid) == NULL)
1137 {
1138 int lwpid = ptid_get_lwp (ptid);
1139 int err;
1140
1141 if (debug_threads)
1142 debug_printf ("Found new lwp %d\n", lwpid);
1143
1144 err = linux_attach_lwp (ptid);
1145
1146 /* Be quiet if we simply raced with the thread exiting. EPERM
1147 is returned if the thread's task still exists, and is marked
1148 as exited or zombie, as well as other conditions, so in that
1149 case, confirm the status in /proc/PID/status. */
1150 if (err == ESRCH
1151 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1152 {
1153 if (debug_threads)
1154 {
1155 debug_printf ("Cannot attach to lwp %d: "
1156 "thread is gone (%d: %s)\n",
1157 lwpid, err, strerror (err));
1158 }
1159 }
1160 else if (err != 0)
1161 {
1162 std::string reason
1163 = linux_ptrace_attach_fail_reason_string (ptid, err);
1164
1165 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1166 }
1167
1168 return 1;
1169 }
1170 return 0;
1171 }
1172
1173 static void async_file_mark (void);
1174
1175 /* Attach to PID. If PID is the tgid, attach to it and all
1176 of its threads. */
1177
1178 static int
1179 linux_attach (unsigned long pid)
1180 {
1181 struct process_info *proc;
1182 struct thread_info *initial_thread;
1183 ptid_t ptid = ptid_build (pid, pid, 0);
1184 int err;
1185
1186 /* Attach to PID. We will check for other threads
1187 soon. */
1188 err = linux_attach_lwp (ptid);
1189 if (err != 0)
1190 {
1191 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1192
1193 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1194 }
1195
1196 proc = linux_add_process (pid, 1);
1197
1198 /* Don't ignore the initial SIGSTOP if we just attached to this
1199 process. It will be collected by wait shortly. */
1200 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1201 initial_thread->last_resume_kind = resume_stop;
1202
1203 /* We must attach to every LWP. If /proc is mounted, use that to
1204 find them now. On the one hand, the inferior may be using raw
1205 clone instead of using pthreads. On the other hand, even if it
1206 is using pthreads, GDB may not be connected yet (thread_db needs
1207 to do symbol lookups, through qSymbol). Also, thread_db walks
1208 structures in the inferior's address space to find the list of
1209 threads/LWPs, and those structures may well be corrupted. Note
1210 that once thread_db is loaded, we'll still use it to list threads
1211 and associate pthread info with each LWP. */
1212 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1213
1214 /* GDB will shortly read the xml target description for this
1215 process, to figure out the process' architecture. But the target
1216 description is only filled in when the first process/thread in
1217 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1218 that now, otherwise, if GDB is fast enough, it could read the
1219 target description _before_ that initial stop. */
1220 if (non_stop)
1221 {
1222 struct lwp_info *lwp;
1223 int wstat, lwpid;
1224 ptid_t pid_ptid = pid_to_ptid (pid);
1225
1226 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1227 &wstat, __WALL);
1228 gdb_assert (lwpid > 0);
1229
1230 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1231
1232 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1233 {
1234 lwp->status_pending_p = 1;
1235 lwp->status_pending = wstat;
1236 }
1237
1238 initial_thread->last_resume_kind = resume_continue;
1239
1240 async_file_mark ();
1241
1242 gdb_assert (proc->tdesc != NULL);
1243 }
1244
1245 return 0;
1246 }
1247
1248 static int
1249 last_thread_of_process_p (int pid)
1250 {
1251 bool seen_one = false;
1252
1253 thread_info *thread = find_thread (pid, [&] (thread_info *thread)
1254 {
1255 if (!seen_one)
1256 {
1257 /* This is the first thread of this process we see. */
1258 seen_one = true;
1259 return false;
1260 }
1261 else
1262 {
1263 /* This is the second thread of this process we see. */
1264 return true;
1265 }
1266 });
1267
1268 return thread == NULL;
1269 }
1270
1271 /* Kill LWP. */
1272
1273 static void
1274 linux_kill_one_lwp (struct lwp_info *lwp)
1275 {
1276 struct thread_info *thr = get_lwp_thread (lwp);
1277 int pid = lwpid_of (thr);
1278
1279 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1280 there is no signal context, and ptrace(PTRACE_KILL) (or
1281 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1282 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1283 alternative is to kill with SIGKILL. We only need one SIGKILL
1284 per process, not one for each thread. But since we still support
1285 support debugging programs using raw clone without CLONE_THREAD,
1286 we send one for each thread. For years, we used PTRACE_KILL
1287 only, so we're being a bit paranoid about some old kernels where
1288 PTRACE_KILL might work better (dubious if there are any such, but
1289 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1290 second, and so we're fine everywhere. */
1291
1292 errno = 0;
1293 kill_lwp (pid, SIGKILL);
1294 if (debug_threads)
1295 {
1296 int save_errno = errno;
1297
1298 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1299 target_pid_to_str (ptid_of (thr)),
1300 save_errno ? strerror (save_errno) : "OK");
1301 }
1302
1303 errno = 0;
1304 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1305 if (debug_threads)
1306 {
1307 int save_errno = errno;
1308
1309 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1310 target_pid_to_str (ptid_of (thr)),
1311 save_errno ? strerror (save_errno) : "OK");
1312 }
1313 }
1314
1315 /* Kill LWP and wait for it to die. */
1316
1317 static void
1318 kill_wait_lwp (struct lwp_info *lwp)
1319 {
1320 struct thread_info *thr = get_lwp_thread (lwp);
1321 int pid = ptid_get_pid (ptid_of (thr));
1322 int lwpid = ptid_get_lwp (ptid_of (thr));
1323 int wstat;
1324 int res;
1325
1326 if (debug_threads)
1327 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1328
1329 do
1330 {
1331 linux_kill_one_lwp (lwp);
1332
1333 /* Make sure it died. Notes:
1334
1335 - The loop is most likely unnecessary.
1336
1337 - We don't use linux_wait_for_event as that could delete lwps
1338 while we're iterating over them. We're not interested in
1339 any pending status at this point, only in making sure all
1340 wait status on the kernel side are collected until the
1341 process is reaped.
1342
1343 - We don't use __WALL here as the __WALL emulation relies on
1344 SIGCHLD, and killing a stopped process doesn't generate
1345 one, nor an exit status.
1346 */
1347 res = my_waitpid (lwpid, &wstat, 0);
1348 if (res == -1 && errno == ECHILD)
1349 res = my_waitpid (lwpid, &wstat, __WCLONE);
1350 } while (res > 0 && WIFSTOPPED (wstat));
1351
1352 /* Even if it was stopped, the child may have already disappeared.
1353 E.g., if it was killed by SIGKILL. */
1354 if (res < 0 && errno != ECHILD)
1355 perror_with_name ("kill_wait_lwp");
1356 }
1357
1358 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1359 except the leader. */
1360
1361 static void
1362 kill_one_lwp_callback (thread_info *thread, int pid)
1363 {
1364 struct lwp_info *lwp = get_thread_lwp (thread);
1365
1366 /* We avoid killing the first thread here, because of a Linux kernel (at
1367 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1368 the children get a chance to be reaped, it will remain a zombie
1369 forever. */
1370
1371 if (lwpid_of (thread) == pid)
1372 {
1373 if (debug_threads)
1374 debug_printf ("lkop: is last of process %s\n",
1375 target_pid_to_str (thread->id));
1376 return;
1377 }
1378
1379 kill_wait_lwp (lwp);
1380 }
1381
1382 static int
1383 linux_kill (int pid)
1384 {
1385 struct process_info *process;
1386 struct lwp_info *lwp;
1387
1388 process = find_process_pid (pid);
1389 if (process == NULL)
1390 return -1;
1391
1392 /* If we're killing a running inferior, make sure it is stopped
1393 first, as PTRACE_KILL will not work otherwise. */
1394 stop_all_lwps (0, NULL);
1395
1396 for_each_thread (pid, [&] (thread_info *thread)
1397 {
1398 kill_one_lwp_callback (thread, pid);
1399 });
1400
1401 /* See the comment in linux_kill_one_lwp. We did not kill the first
1402 thread in the list, so do so now. */
1403 lwp = find_lwp_pid (pid_to_ptid (pid));
1404
1405 if (lwp == NULL)
1406 {
1407 if (debug_threads)
1408 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1409 pid);
1410 }
1411 else
1412 kill_wait_lwp (lwp);
1413
1414 the_target->mourn (process);
1415
1416 /* Since we presently can only stop all lwps of all processes, we
1417 need to unstop lwps of other processes. */
1418 unstop_all_lwps (0, NULL);
1419 return 0;
1420 }
1421
1422 /* Get pending signal of THREAD, for detaching purposes. This is the
1423 signal the thread last stopped for, which we need to deliver to the
1424 thread when detaching, otherwise, it'd be suppressed/lost. */
1425
1426 static int
1427 get_detach_signal (struct thread_info *thread)
1428 {
1429 enum gdb_signal signo = GDB_SIGNAL_0;
1430 int status;
1431 struct lwp_info *lp = get_thread_lwp (thread);
1432
1433 if (lp->status_pending_p)
1434 status = lp->status_pending;
1435 else
1436 {
1437 /* If the thread had been suspended by gdbserver, and it stopped
1438 cleanly, then it'll have stopped with SIGSTOP. But we don't
1439 want to deliver that SIGSTOP. */
1440 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1441 || thread->last_status.value.sig == GDB_SIGNAL_0)
1442 return 0;
1443
1444 /* Otherwise, we may need to deliver the signal we
1445 intercepted. */
1446 status = lp->last_status;
1447 }
1448
1449 if (!WIFSTOPPED (status))
1450 {
1451 if (debug_threads)
1452 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1453 target_pid_to_str (ptid_of (thread)));
1454 return 0;
1455 }
1456
1457 /* Extended wait statuses aren't real SIGTRAPs. */
1458 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1459 {
1460 if (debug_threads)
1461 debug_printf ("GPS: lwp %s had stopped with extended "
1462 "status: no pending signal\n",
1463 target_pid_to_str (ptid_of (thread)));
1464 return 0;
1465 }
1466
1467 signo = gdb_signal_from_host (WSTOPSIG (status));
1468
1469 if (program_signals_p && !program_signals[signo])
1470 {
1471 if (debug_threads)
1472 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1473 target_pid_to_str (ptid_of (thread)),
1474 gdb_signal_to_string (signo));
1475 return 0;
1476 }
1477 else if (!program_signals_p
1478 /* If we have no way to know which signals GDB does not
1479 want to have passed to the program, assume
1480 SIGTRAP/SIGINT, which is GDB's default. */
1481 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1482 {
1483 if (debug_threads)
1484 debug_printf ("GPS: lwp %s had signal %s, "
1485 "but we don't know if we should pass it. "
1486 "Default to not.\n",
1487 target_pid_to_str (ptid_of (thread)),
1488 gdb_signal_to_string (signo));
1489 return 0;
1490 }
1491 else
1492 {
1493 if (debug_threads)
1494 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1495 target_pid_to_str (ptid_of (thread)),
1496 gdb_signal_to_string (signo));
1497
1498 return WSTOPSIG (status);
1499 }
1500 }
1501
1502 /* Detach from LWP. */
1503
1504 static void
1505 linux_detach_one_lwp (struct lwp_info *lwp)
1506 {
1507 struct thread_info *thread = get_lwp_thread (lwp);
1508 int sig;
1509 int lwpid;
1510
1511 /* If there is a pending SIGSTOP, get rid of it. */
1512 if (lwp->stop_expected)
1513 {
1514 if (debug_threads)
1515 debug_printf ("Sending SIGCONT to %s\n",
1516 target_pid_to_str (ptid_of (thread)));
1517
1518 kill_lwp (lwpid_of (thread), SIGCONT);
1519 lwp->stop_expected = 0;
1520 }
1521
1522 /* Pass on any pending signal for this thread. */
1523 sig = get_detach_signal (thread);
1524
1525 /* Preparing to resume may try to write registers, and fail if the
1526 lwp is zombie. If that happens, ignore the error. We'll handle
1527 it below, when detach fails with ESRCH. */
1528 TRY
1529 {
1530 /* Flush any pending changes to the process's registers. */
1531 regcache_invalidate_thread (thread);
1532
1533 /* Finally, let it resume. */
1534 if (the_low_target.prepare_to_resume != NULL)
1535 the_low_target.prepare_to_resume (lwp);
1536 }
1537 CATCH (ex, RETURN_MASK_ERROR)
1538 {
1539 if (!check_ptrace_stopped_lwp_gone (lwp))
1540 throw_exception (ex);
1541 }
1542 END_CATCH
1543
1544 lwpid = lwpid_of (thread);
1545 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1546 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1547 {
1548 int save_errno = errno;
1549
1550 /* We know the thread exists, so ESRCH must mean the lwp is
1551 zombie. This can happen if one of the already-detached
1552 threads exits the whole thread group. In that case we're
1553 still attached, and must reap the lwp. */
1554 if (save_errno == ESRCH)
1555 {
1556 int ret, status;
1557
1558 ret = my_waitpid (lwpid, &status, __WALL);
1559 if (ret == -1)
1560 {
1561 warning (_("Couldn't reap LWP %d while detaching: %s"),
1562 lwpid, strerror (errno));
1563 }
1564 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1565 {
1566 warning (_("Reaping LWP %d while detaching "
1567 "returned unexpected status 0x%x"),
1568 lwpid, status);
1569 }
1570 }
1571 else
1572 {
1573 error (_("Can't detach %s: %s"),
1574 target_pid_to_str (ptid_of (thread)),
1575 strerror (save_errno));
1576 }
1577 }
1578 else if (debug_threads)
1579 {
1580 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1581 target_pid_to_str (ptid_of (thread)),
1582 strsignal (sig));
1583 }
1584
1585 delete_lwp (lwp);
1586 }
1587
1588 /* Callback for for_each_thread. Detaches from non-leader threads of a
1589 given process. */
1590
1591 static void
1592 linux_detach_lwp_callback (thread_info *thread)
1593 {
1594 /* We don't actually detach from the thread group leader just yet.
1595 If the thread group exits, we must reap the zombie clone lwps
1596 before we're able to reap the leader. */
1597 if (thread->id.pid () == thread->id.lwp ())
1598 return;
1599
1600 lwp_info *lwp = get_thread_lwp (thread);
1601 linux_detach_one_lwp (lwp);
1602 }
1603
1604 static int
1605 linux_detach (int pid)
1606 {
1607 struct process_info *process;
1608 struct lwp_info *main_lwp;
1609
1610 process = find_process_pid (pid);
1611 if (process == NULL)
1612 return -1;
1613
1614 /* As there's a step over already in progress, let it finish first,
1615 otherwise nesting a stabilize_threads operation on top gets real
1616 messy. */
1617 complete_ongoing_step_over ();
1618
1619 /* Stop all threads before detaching. First, ptrace requires that
1620 the thread is stopped to sucessfully detach. Second, thread_db
1621 may need to uninstall thread event breakpoints from memory, which
1622 only works with a stopped process anyway. */
1623 stop_all_lwps (0, NULL);
1624
1625 #ifdef USE_THREAD_DB
1626 thread_db_detach (process);
1627 #endif
1628
1629 /* Stabilize threads (move out of jump pads). */
1630 stabilize_threads ();
1631
1632 /* Detach from the clone lwps first. If the thread group exits just
1633 while we're detaching, we must reap the clone lwps before we're
1634 able to reap the leader. */
1635 for_each_thread (pid, linux_detach_lwp_callback);
1636
1637 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1638 linux_detach_one_lwp (main_lwp);
1639
1640 the_target->mourn (process);
1641
1642 /* Since we presently can only stop all lwps of all processes, we
1643 need to unstop lwps of other processes. */
1644 unstop_all_lwps (0, NULL);
1645 return 0;
1646 }
1647
1648 /* Remove all LWPs that belong to process PROC from the lwp list. */
1649
1650 static void
1651 linux_mourn (struct process_info *process)
1652 {
1653 struct process_info_private *priv;
1654
1655 #ifdef USE_THREAD_DB
1656 thread_db_mourn (process);
1657 #endif
1658
1659 for_each_thread (process->pid, [] (thread_info *thread)
1660 {
1661 delete_lwp (get_thread_lwp (thread));
1662 });
1663
1664 /* Freeing all private data. */
1665 priv = process->priv;
1666 if (the_low_target.delete_process != NULL)
1667 the_low_target.delete_process (priv->arch_private);
1668 else
1669 gdb_assert (priv->arch_private == NULL);
1670 free (priv);
1671 process->priv = NULL;
1672
1673 remove_process (process);
1674 }
1675
1676 static void
1677 linux_join (int pid)
1678 {
1679 int status, ret;
1680
1681 do {
1682 ret = my_waitpid (pid, &status, 0);
1683 if (WIFEXITED (status) || WIFSIGNALED (status))
1684 break;
1685 } while (ret != -1 || errno != ECHILD);
1686 }
1687
1688 /* Return nonzero if the given thread is still alive. */
1689 static int
1690 linux_thread_alive (ptid_t ptid)
1691 {
1692 struct lwp_info *lwp = find_lwp_pid (ptid);
1693
1694 /* We assume we always know if a thread exits. If a whole process
1695 exited but we still haven't been able to report it to GDB, we'll
1696 hold on to the last lwp of the dead process. */
1697 if (lwp != NULL)
1698 return !lwp_is_marked_dead (lwp);
1699 else
1700 return 0;
1701 }
1702
1703 /* Return 1 if this lwp still has an interesting status pending. If
1704 not (e.g., it had stopped for a breakpoint that is gone), return
1705 false. */
1706
1707 static int
1708 thread_still_has_status_pending_p (struct thread_info *thread)
1709 {
1710 struct lwp_info *lp = get_thread_lwp (thread);
1711
1712 if (!lp->status_pending_p)
1713 return 0;
1714
1715 if (thread->last_resume_kind != resume_stop
1716 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1717 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1718 {
1719 struct thread_info *saved_thread;
1720 CORE_ADDR pc;
1721 int discard = 0;
1722
1723 gdb_assert (lp->last_status != 0);
1724
1725 pc = get_pc (lp);
1726
1727 saved_thread = current_thread;
1728 current_thread = thread;
1729
1730 if (pc != lp->stop_pc)
1731 {
1732 if (debug_threads)
1733 debug_printf ("PC of %ld changed\n",
1734 lwpid_of (thread));
1735 discard = 1;
1736 }
1737
1738 #if !USE_SIGTRAP_SIGINFO
1739 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1740 && !(*the_low_target.breakpoint_at) (pc))
1741 {
1742 if (debug_threads)
1743 debug_printf ("previous SW breakpoint of %ld gone\n",
1744 lwpid_of (thread));
1745 discard = 1;
1746 }
1747 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1748 && !hardware_breakpoint_inserted_here (pc))
1749 {
1750 if (debug_threads)
1751 debug_printf ("previous HW breakpoint of %ld gone\n",
1752 lwpid_of (thread));
1753 discard = 1;
1754 }
1755 #endif
1756
1757 current_thread = saved_thread;
1758
1759 if (discard)
1760 {
1761 if (debug_threads)
1762 debug_printf ("discarding pending breakpoint status\n");
1763 lp->status_pending_p = 0;
1764 return 0;
1765 }
1766 }
1767
1768 return 1;
1769 }
1770
1771 /* Returns true if LWP is resumed from the client's perspective. */
1772
1773 static int
1774 lwp_resumed (struct lwp_info *lwp)
1775 {
1776 struct thread_info *thread = get_lwp_thread (lwp);
1777
1778 if (thread->last_resume_kind != resume_stop)
1779 return 1;
1780
1781 /* Did gdb send us a `vCont;t', but we haven't reported the
1782 corresponding stop to gdb yet? If so, the thread is still
1783 resumed/running from gdb's perspective. */
1784 if (thread->last_resume_kind == resume_stop
1785 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1786 return 1;
1787
1788 return 0;
1789 }
1790
1791 /* Return true if this lwp has an interesting status pending. */
1792 static bool
1793 status_pending_p_callback (thread_info *thread, ptid_t ptid)
1794 {
1795 struct lwp_info *lp = get_thread_lwp (thread);
1796
1797 /* Check if we're only interested in events from a specific process
1798 or a specific LWP. */
1799 if (!thread->id.matches (ptid))
1800 return 0;
1801
1802 if (!lwp_resumed (lp))
1803 return 0;
1804
1805 if (lp->status_pending_p
1806 && !thread_still_has_status_pending_p (thread))
1807 {
1808 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1809 return 0;
1810 }
1811
1812 return lp->status_pending_p;
1813 }
1814
1815 struct lwp_info *
1816 find_lwp_pid (ptid_t ptid)
1817 {
1818 thread_info *thread = find_thread ([&] (thread_info *thread)
1819 {
1820 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1821 return thread->id.lwp () == lwp;
1822 });
1823
1824 if (thread == NULL)
1825 return NULL;
1826
1827 return get_thread_lwp (thread);
1828 }
1829
1830 /* Return the number of known LWPs in the tgid given by PID. */
1831
1832 static int
1833 num_lwps (int pid)
1834 {
1835 int count = 0;
1836
1837 for_each_thread (pid, [&] (thread_info *thread)
1838 {
1839 count++;
1840 });
1841
1842 return count;
1843 }
1844
1845 /* See nat/linux-nat.h. */
1846
1847 struct lwp_info *
1848 iterate_over_lwps (ptid_t filter,
1849 iterate_over_lwps_ftype callback,
1850 void *data)
1851 {
1852 thread_info *thread = find_thread (filter, [&] (thread_info *thread)
1853 {
1854 lwp_info *lwp = get_thread_lwp (thread);
1855
1856 return callback (lwp, data);
1857 });
1858
1859 if (thread == NULL)
1860 return NULL;
1861
1862 return get_thread_lwp (thread);
1863 }
1864
1865 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1866 their exits until all other threads in the group have exited. */
1867
1868 static void
1869 check_zombie_leaders (void)
1870 {
1871 for_each_process ([] (process_info *proc) {
1872 pid_t leader_pid = pid_of (proc);
1873 struct lwp_info *leader_lp;
1874
1875 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1876
1877 if (debug_threads)
1878 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1879 "num_lwps=%d, zombie=%d\n",
1880 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1881 linux_proc_pid_is_zombie (leader_pid));
1882
1883 if (leader_lp != NULL && !leader_lp->stopped
1884 /* Check if there are other threads in the group, as we may
1885 have raced with the inferior simply exiting. */
1886 && !last_thread_of_process_p (leader_pid)
1887 && linux_proc_pid_is_zombie (leader_pid))
1888 {
1889 /* A leader zombie can mean one of two things:
1890
1891 - It exited, and there's an exit status pending
1892 available, or only the leader exited (not the whole
1893 program). In the latter case, we can't waitpid the
1894 leader's exit status until all other threads are gone.
1895
1896 - There are 3 or more threads in the group, and a thread
1897 other than the leader exec'd. On an exec, the Linux
1898 kernel destroys all other threads (except the execing
1899 one) in the thread group, and resets the execing thread's
1900 tid to the tgid. No exit notification is sent for the
1901 execing thread -- from the ptracer's perspective, it
1902 appears as though the execing thread just vanishes.
1903 Until we reap all other threads except the leader and the
1904 execing thread, the leader will be zombie, and the
1905 execing thread will be in `D (disc sleep)'. As soon as
1906 all other threads are reaped, the execing thread changes
1907 it's tid to the tgid, and the previous (zombie) leader
1908 vanishes, giving place to the "new" leader. We could try
1909 distinguishing the exit and exec cases, by waiting once
1910 more, and seeing if something comes out, but it doesn't
1911 sound useful. The previous leader _does_ go away, and
1912 we'll re-add the new one once we see the exec event
1913 (which is just the same as what would happen if the
1914 previous leader did exit voluntarily before some other
1915 thread execs). */
1916
1917 if (debug_threads)
1918 debug_printf ("CZL: Thread group leader %d zombie "
1919 "(it exited, or another thread execd).\n",
1920 leader_pid);
1921
1922 delete_lwp (leader_lp);
1923 }
1924 });
1925 }
1926
1927 /* Callback for `find_thread'. Returns the first LWP that is not
1928 stopped. */
1929
1930 static bool
1931 not_stopped_callback (thread_info *thread, ptid_t filter)
1932 {
1933 if (!thread->id.matches (filter))
1934 return false;
1935
1936 lwp_info *lwp = get_thread_lwp (thread);
1937
1938 return !lwp->stopped;
1939 }
1940
1941 /* Increment LWP's suspend count. */
1942
1943 static void
1944 lwp_suspended_inc (struct lwp_info *lwp)
1945 {
1946 lwp->suspended++;
1947
1948 if (debug_threads && lwp->suspended > 4)
1949 {
1950 struct thread_info *thread = get_lwp_thread (lwp);
1951
1952 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1953 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1954 }
1955 }
1956
1957 /* Decrement LWP's suspend count. */
1958
1959 static void
1960 lwp_suspended_decr (struct lwp_info *lwp)
1961 {
1962 lwp->suspended--;
1963
1964 if (lwp->suspended < 0)
1965 {
1966 struct thread_info *thread = get_lwp_thread (lwp);
1967
1968 internal_error (__FILE__, __LINE__,
1969 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1970 lwp->suspended);
1971 }
1972 }
1973
1974 /* This function should only be called if the LWP got a SIGTRAP.
1975
1976 Handle any tracepoint steps or hits. Return true if a tracepoint
1977 event was handled, 0 otherwise. */
1978
1979 static int
1980 handle_tracepoints (struct lwp_info *lwp)
1981 {
1982 struct thread_info *tinfo = get_lwp_thread (lwp);
1983 int tpoint_related_event = 0;
1984
1985 gdb_assert (lwp->suspended == 0);
1986
1987 /* If this tracepoint hit causes a tracing stop, we'll immediately
1988 uninsert tracepoints. To do this, we temporarily pause all
1989 threads, unpatch away, and then unpause threads. We need to make
1990 sure the unpausing doesn't resume LWP too. */
1991 lwp_suspended_inc (lwp);
1992
1993 /* And we need to be sure that any all-threads-stopping doesn't try
1994 to move threads out of the jump pads, as it could deadlock the
1995 inferior (LWP could be in the jump pad, maybe even holding the
1996 lock.) */
1997
1998 /* Do any necessary step collect actions. */
1999 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
2000
2001 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2002
2003 /* See if we just hit a tracepoint and do its main collect
2004 actions. */
2005 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2006
2007 lwp_suspended_decr (lwp);
2008
2009 gdb_assert (lwp->suspended == 0);
2010 gdb_assert (!stabilizing_threads
2011 || (lwp->collecting_fast_tracepoint
2012 != fast_tpoint_collect_result::not_collecting));
2013
2014 if (tpoint_related_event)
2015 {
2016 if (debug_threads)
2017 debug_printf ("got a tracepoint event\n");
2018 return 1;
2019 }
2020
2021 return 0;
2022 }
2023
2024 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2025 collection status. */
2026
2027 static fast_tpoint_collect_result
2028 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2029 struct fast_tpoint_collect_status *status)
2030 {
2031 CORE_ADDR thread_area;
2032 struct thread_info *thread = get_lwp_thread (lwp);
2033
2034 if (the_low_target.get_thread_area == NULL)
2035 return fast_tpoint_collect_result::not_collecting;
2036
2037 /* Get the thread area address. This is used to recognize which
2038 thread is which when tracing with the in-process agent library.
2039 We don't read anything from the address, and treat it as opaque;
2040 it's the address itself that we assume is unique per-thread. */
2041 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2042 return fast_tpoint_collect_result::not_collecting;
2043
2044 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2045 }
2046
2047 /* The reason we resume in the caller, is because we want to be able
2048 to pass lwp->status_pending as WSTAT, and we need to clear
2049 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2050 refuses to resume. */
2051
2052 static int
2053 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2054 {
2055 struct thread_info *saved_thread;
2056
2057 saved_thread = current_thread;
2058 current_thread = get_lwp_thread (lwp);
2059
2060 if ((wstat == NULL
2061 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2062 && supports_fast_tracepoints ()
2063 && agent_loaded_p ())
2064 {
2065 struct fast_tpoint_collect_status status;
2066
2067 if (debug_threads)
2068 debug_printf ("Checking whether LWP %ld needs to move out of the "
2069 "jump pad.\n",
2070 lwpid_of (current_thread));
2071
2072 fast_tpoint_collect_result r
2073 = linux_fast_tracepoint_collecting (lwp, &status);
2074
2075 if (wstat == NULL
2076 || (WSTOPSIG (*wstat) != SIGILL
2077 && WSTOPSIG (*wstat) != SIGFPE
2078 && WSTOPSIG (*wstat) != SIGSEGV
2079 && WSTOPSIG (*wstat) != SIGBUS))
2080 {
2081 lwp->collecting_fast_tracepoint = r;
2082
2083 if (r != fast_tpoint_collect_result::not_collecting)
2084 {
2085 if (r == fast_tpoint_collect_result::before_insn
2086 && lwp->exit_jump_pad_bkpt == NULL)
2087 {
2088 /* Haven't executed the original instruction yet.
2089 Set breakpoint there, and wait till it's hit,
2090 then single-step until exiting the jump pad. */
2091 lwp->exit_jump_pad_bkpt
2092 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2093 }
2094
2095 if (debug_threads)
2096 debug_printf ("Checking whether LWP %ld needs to move out of "
2097 "the jump pad...it does\n",
2098 lwpid_of (current_thread));
2099 current_thread = saved_thread;
2100
2101 return 1;
2102 }
2103 }
2104 else
2105 {
2106 /* If we get a synchronous signal while collecting, *and*
2107 while executing the (relocated) original instruction,
2108 reset the PC to point at the tpoint address, before
2109 reporting to GDB. Otherwise, it's an IPA lib bug: just
2110 report the signal to GDB, and pray for the best. */
2111
2112 lwp->collecting_fast_tracepoint
2113 = fast_tpoint_collect_result::not_collecting;
2114
2115 if (r != fast_tpoint_collect_result::not_collecting
2116 && (status.adjusted_insn_addr <= lwp->stop_pc
2117 && lwp->stop_pc < status.adjusted_insn_addr_end))
2118 {
2119 siginfo_t info;
2120 struct regcache *regcache;
2121
2122 /* The si_addr on a few signals references the address
2123 of the faulting instruction. Adjust that as
2124 well. */
2125 if ((WSTOPSIG (*wstat) == SIGILL
2126 || WSTOPSIG (*wstat) == SIGFPE
2127 || WSTOPSIG (*wstat) == SIGBUS
2128 || WSTOPSIG (*wstat) == SIGSEGV)
2129 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2130 (PTRACE_TYPE_ARG3) 0, &info) == 0
2131 /* Final check just to make sure we don't clobber
2132 the siginfo of non-kernel-sent signals. */
2133 && (uintptr_t) info.si_addr == lwp->stop_pc)
2134 {
2135 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2136 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2137 (PTRACE_TYPE_ARG3) 0, &info);
2138 }
2139
2140 regcache = get_thread_regcache (current_thread, 1);
2141 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2142 lwp->stop_pc = status.tpoint_addr;
2143
2144 /* Cancel any fast tracepoint lock this thread was
2145 holding. */
2146 force_unlock_trace_buffer ();
2147 }
2148
2149 if (lwp->exit_jump_pad_bkpt != NULL)
2150 {
2151 if (debug_threads)
2152 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2153 "stopping all threads momentarily.\n");
2154
2155 stop_all_lwps (1, lwp);
2156
2157 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2158 lwp->exit_jump_pad_bkpt = NULL;
2159
2160 unstop_all_lwps (1, lwp);
2161
2162 gdb_assert (lwp->suspended >= 0);
2163 }
2164 }
2165 }
2166
2167 if (debug_threads)
2168 debug_printf ("Checking whether LWP %ld needs to move out of the "
2169 "jump pad...no\n",
2170 lwpid_of (current_thread));
2171
2172 current_thread = saved_thread;
2173 return 0;
2174 }
2175
2176 /* Enqueue one signal in the "signals to report later when out of the
2177 jump pad" list. */
2178
2179 static void
2180 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2181 {
2182 struct pending_signals *p_sig;
2183 struct thread_info *thread = get_lwp_thread (lwp);
2184
2185 if (debug_threads)
2186 debug_printf ("Deferring signal %d for LWP %ld.\n",
2187 WSTOPSIG (*wstat), lwpid_of (thread));
2188
2189 if (debug_threads)
2190 {
2191 struct pending_signals *sig;
2192
2193 for (sig = lwp->pending_signals_to_report;
2194 sig != NULL;
2195 sig = sig->prev)
2196 debug_printf (" Already queued %d\n",
2197 sig->signal);
2198
2199 debug_printf (" (no more currently queued signals)\n");
2200 }
2201
2202 /* Don't enqueue non-RT signals if they are already in the deferred
2203 queue. (SIGSTOP being the easiest signal to see ending up here
2204 twice) */
2205 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2206 {
2207 struct pending_signals *sig;
2208
2209 for (sig = lwp->pending_signals_to_report;
2210 sig != NULL;
2211 sig = sig->prev)
2212 {
2213 if (sig->signal == WSTOPSIG (*wstat))
2214 {
2215 if (debug_threads)
2216 debug_printf ("Not requeuing already queued non-RT signal %d"
2217 " for LWP %ld\n",
2218 sig->signal,
2219 lwpid_of (thread));
2220 return;
2221 }
2222 }
2223 }
2224
2225 p_sig = XCNEW (struct pending_signals);
2226 p_sig->prev = lwp->pending_signals_to_report;
2227 p_sig->signal = WSTOPSIG (*wstat);
2228
2229 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2230 &p_sig->info);
2231
2232 lwp->pending_signals_to_report = p_sig;
2233 }
2234
2235 /* Dequeue one signal from the "signals to report later when out of
2236 the jump pad" list. */
2237
2238 static int
2239 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2240 {
2241 struct thread_info *thread = get_lwp_thread (lwp);
2242
2243 if (lwp->pending_signals_to_report != NULL)
2244 {
2245 struct pending_signals **p_sig;
2246
2247 p_sig = &lwp->pending_signals_to_report;
2248 while ((*p_sig)->prev != NULL)
2249 p_sig = &(*p_sig)->prev;
2250
2251 *wstat = W_STOPCODE ((*p_sig)->signal);
2252 if ((*p_sig)->info.si_signo != 0)
2253 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2254 &(*p_sig)->info);
2255 free (*p_sig);
2256 *p_sig = NULL;
2257
2258 if (debug_threads)
2259 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2260 WSTOPSIG (*wstat), lwpid_of (thread));
2261
2262 if (debug_threads)
2263 {
2264 struct pending_signals *sig;
2265
2266 for (sig = lwp->pending_signals_to_report;
2267 sig != NULL;
2268 sig = sig->prev)
2269 debug_printf (" Still queued %d\n",
2270 sig->signal);
2271
2272 debug_printf (" (no more queued signals)\n");
2273 }
2274
2275 return 1;
2276 }
2277
2278 return 0;
2279 }
2280
2281 /* Fetch the possibly triggered data watchpoint info and store it in
2282 CHILD.
2283
2284 On some archs, like x86, that use debug registers to set
2285 watchpoints, it's possible that the way to know which watched
2286 address trapped, is to check the register that is used to select
2287 which address to watch. Problem is, between setting the watchpoint
2288 and reading back which data address trapped, the user may change
2289 the set of watchpoints, and, as a consequence, GDB changes the
2290 debug registers in the inferior. To avoid reading back a stale
2291 stopped-data-address when that happens, we cache in LP the fact
2292 that a watchpoint trapped, and the corresponding data address, as
2293 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2294 registers meanwhile, we have the cached data we can rely on. */
2295
2296 static int
2297 check_stopped_by_watchpoint (struct lwp_info *child)
2298 {
2299 if (the_low_target.stopped_by_watchpoint != NULL)
2300 {
2301 struct thread_info *saved_thread;
2302
2303 saved_thread = current_thread;
2304 current_thread = get_lwp_thread (child);
2305
2306 if (the_low_target.stopped_by_watchpoint ())
2307 {
2308 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2309
2310 if (the_low_target.stopped_data_address != NULL)
2311 child->stopped_data_address
2312 = the_low_target.stopped_data_address ();
2313 else
2314 child->stopped_data_address = 0;
2315 }
2316
2317 current_thread = saved_thread;
2318 }
2319
2320 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2321 }
2322
2323 /* Return the ptrace options that we want to try to enable. */
2324
2325 static int
2326 linux_low_ptrace_options (int attached)
2327 {
2328 int options = 0;
2329
2330 if (!attached)
2331 options |= PTRACE_O_EXITKILL;
2332
2333 if (report_fork_events)
2334 options |= PTRACE_O_TRACEFORK;
2335
2336 if (report_vfork_events)
2337 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2338
2339 if (report_exec_events)
2340 options |= PTRACE_O_TRACEEXEC;
2341
2342 options |= PTRACE_O_TRACESYSGOOD;
2343
2344 return options;
2345 }
2346
2347 /* Do low-level handling of the event, and check if we should go on
2348 and pass it to caller code. Return the affected lwp if we are, or
2349 NULL otherwise. */
2350
2351 static struct lwp_info *
2352 linux_low_filter_event (int lwpid, int wstat)
2353 {
2354 struct lwp_info *child;
2355 struct thread_info *thread;
2356 int have_stop_pc = 0;
2357
2358 child = find_lwp_pid (pid_to_ptid (lwpid));
2359
2360 /* Check for stop events reported by a process we didn't already
2361 know about - anything not already in our LWP list.
2362
2363 If we're expecting to receive stopped processes after
2364 fork, vfork, and clone events, then we'll just add the
2365 new one to our list and go back to waiting for the event
2366 to be reported - the stopped process might be returned
2367 from waitpid before or after the event is.
2368
2369 But note the case of a non-leader thread exec'ing after the
2370 leader having exited, and gone from our lists (because
2371 check_zombie_leaders deleted it). The non-leader thread
2372 changes its tid to the tgid. */
2373
2374 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2375 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2376 {
2377 ptid_t child_ptid;
2378
2379 /* A multi-thread exec after we had seen the leader exiting. */
2380 if (debug_threads)
2381 {
2382 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2383 "after exec.\n", lwpid);
2384 }
2385
2386 child_ptid = ptid_build (lwpid, lwpid, 0);
2387 child = add_lwp (child_ptid);
2388 child->stopped = 1;
2389 current_thread = child->thread;
2390 }
2391
2392 /* If we didn't find a process, one of two things presumably happened:
2393 - A process we started and then detached from has exited. Ignore it.
2394 - A process we are controlling has forked and the new child's stop
2395 was reported to us by the kernel. Save its PID. */
2396 if (child == NULL && WIFSTOPPED (wstat))
2397 {
2398 add_to_pid_list (&stopped_pids, lwpid, wstat);
2399 return NULL;
2400 }
2401 else if (child == NULL)
2402 return NULL;
2403
2404 thread = get_lwp_thread (child);
2405
2406 child->stopped = 1;
2407
2408 child->last_status = wstat;
2409
2410 /* Check if the thread has exited. */
2411 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2412 {
2413 if (debug_threads)
2414 debug_printf ("LLFE: %d exited.\n", lwpid);
2415
2416 if (finish_step_over (child))
2417 {
2418 /* Unsuspend all other LWPs, and set them back running again. */
2419 unsuspend_all_lwps (child);
2420 }
2421
2422 /* If there is at least one more LWP, then the exit signal was
2423 not the end of the debugged application and should be
2424 ignored, unless GDB wants to hear about thread exits. */
2425 if (report_thread_events
2426 || last_thread_of_process_p (pid_of (thread)))
2427 {
2428 /* Since events are serialized to GDB core, and we can't
2429 report this one right now. Leave the status pending for
2430 the next time we're able to report it. */
2431 mark_lwp_dead (child, wstat);
2432 return child;
2433 }
2434 else
2435 {
2436 delete_lwp (child);
2437 return NULL;
2438 }
2439 }
2440
2441 gdb_assert (WIFSTOPPED (wstat));
2442
2443 if (WIFSTOPPED (wstat))
2444 {
2445 struct process_info *proc;
2446
2447 /* Architecture-specific setup after inferior is running. */
2448 proc = find_process_pid (pid_of (thread));
2449 if (proc->tdesc == NULL)
2450 {
2451 if (proc->attached)
2452 {
2453 /* This needs to happen after we have attached to the
2454 inferior and it is stopped for the first time, but
2455 before we access any inferior registers. */
2456 linux_arch_setup_thread (thread);
2457 }
2458 else
2459 {
2460 /* The process is started, but GDBserver will do
2461 architecture-specific setup after the program stops at
2462 the first instruction. */
2463 child->status_pending_p = 1;
2464 child->status_pending = wstat;
2465 return child;
2466 }
2467 }
2468 }
2469
2470 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2471 {
2472 struct process_info *proc = find_process_pid (pid_of (thread));
2473 int options = linux_low_ptrace_options (proc->attached);
2474
2475 linux_enable_event_reporting (lwpid, options);
2476 child->must_set_ptrace_flags = 0;
2477 }
2478
2479 /* Always update syscall_state, even if it will be filtered later. */
2480 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2481 {
2482 child->syscall_state
2483 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2484 ? TARGET_WAITKIND_SYSCALL_RETURN
2485 : TARGET_WAITKIND_SYSCALL_ENTRY);
2486 }
2487 else
2488 {
2489 /* Almost all other ptrace-stops are known to be outside of system
2490 calls, with further exceptions in handle_extended_wait. */
2491 child->syscall_state = TARGET_WAITKIND_IGNORE;
2492 }
2493
2494 /* Be careful to not overwrite stop_pc until save_stop_reason is
2495 called. */
2496 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2497 && linux_is_extended_waitstatus (wstat))
2498 {
2499 child->stop_pc = get_pc (child);
2500 if (handle_extended_wait (&child, wstat))
2501 {
2502 /* The event has been handled, so just return without
2503 reporting it. */
2504 return NULL;
2505 }
2506 }
2507
2508 if (linux_wstatus_maybe_breakpoint (wstat))
2509 {
2510 if (save_stop_reason (child))
2511 have_stop_pc = 1;
2512 }
2513
2514 if (!have_stop_pc)
2515 child->stop_pc = get_pc (child);
2516
2517 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2518 && child->stop_expected)
2519 {
2520 if (debug_threads)
2521 debug_printf ("Expected stop.\n");
2522 child->stop_expected = 0;
2523
2524 if (thread->last_resume_kind == resume_stop)
2525 {
2526 /* We want to report the stop to the core. Treat the
2527 SIGSTOP as a normal event. */
2528 if (debug_threads)
2529 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2530 target_pid_to_str (ptid_of (thread)));
2531 }
2532 else if (stopping_threads != NOT_STOPPING_THREADS)
2533 {
2534 /* Stopping threads. We don't want this SIGSTOP to end up
2535 pending. */
2536 if (debug_threads)
2537 debug_printf ("LLW: SIGSTOP caught for %s "
2538 "while stopping threads.\n",
2539 target_pid_to_str (ptid_of (thread)));
2540 return NULL;
2541 }
2542 else
2543 {
2544 /* This is a delayed SIGSTOP. Filter out the event. */
2545 if (debug_threads)
2546 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2547 child->stepping ? "step" : "continue",
2548 target_pid_to_str (ptid_of (thread)));
2549
2550 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2551 return NULL;
2552 }
2553 }
2554
2555 child->status_pending_p = 1;
2556 child->status_pending = wstat;
2557 return child;
2558 }
2559
2560 /* Return true if THREAD is doing hardware single step. */
2561
2562 static int
2563 maybe_hw_step (struct thread_info *thread)
2564 {
2565 if (can_hardware_single_step ())
2566 return 1;
2567 else
2568 {
2569 /* GDBserver must insert single-step breakpoint for software
2570 single step. */
2571 gdb_assert (has_single_step_breakpoints (thread));
2572 return 0;
2573 }
2574 }
2575
2576 /* Resume LWPs that are currently stopped without any pending status
2577 to report, but are resumed from the core's perspective. */
2578
2579 static void
2580 resume_stopped_resumed_lwps (thread_info *thread)
2581 {
2582 struct lwp_info *lp = get_thread_lwp (thread);
2583
2584 if (lp->stopped
2585 && !lp->suspended
2586 && !lp->status_pending_p
2587 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2588 {
2589 int step = 0;
2590
2591 if (thread->last_resume_kind == resume_step)
2592 step = maybe_hw_step (thread);
2593
2594 if (debug_threads)
2595 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2596 target_pid_to_str (ptid_of (thread)),
2597 paddress (lp->stop_pc),
2598 step);
2599
2600 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2601 }
2602 }
2603
2604 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2605 match FILTER_PTID (leaving others pending). The PTIDs can be:
2606 minus_one_ptid, to specify any child; a pid PTID, specifying all
2607 lwps of a thread group; or a PTID representing a single lwp. Store
2608 the stop status through the status pointer WSTAT. OPTIONS is
2609 passed to the waitpid call. Return 0 if no event was found and
2610 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2611 was found. Return the PID of the stopped child otherwise. */
2612
2613 static int
2614 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2615 int *wstatp, int options)
2616 {
2617 struct thread_info *event_thread;
2618 struct lwp_info *event_child, *requested_child;
2619 sigset_t block_mask, prev_mask;
2620
2621 retry:
2622 /* N.B. event_thread points to the thread_info struct that contains
2623 event_child. Keep them in sync. */
2624 event_thread = NULL;
2625 event_child = NULL;
2626 requested_child = NULL;
2627
2628 /* Check for a lwp with a pending status. */
2629
2630 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2631 {
2632 event_thread = find_thread_in_random ([&] (thread_info *thread)
2633 {
2634 return status_pending_p_callback (thread, filter_ptid);
2635 });
2636
2637 if (event_thread != NULL)
2638 event_child = get_thread_lwp (event_thread);
2639 if (debug_threads && event_thread)
2640 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2641 }
2642 else if (!ptid_equal (filter_ptid, null_ptid))
2643 {
2644 requested_child = find_lwp_pid (filter_ptid);
2645
2646 if (stopping_threads == NOT_STOPPING_THREADS
2647 && requested_child->status_pending_p
2648 && (requested_child->collecting_fast_tracepoint
2649 != fast_tpoint_collect_result::not_collecting))
2650 {
2651 enqueue_one_deferred_signal (requested_child,
2652 &requested_child->status_pending);
2653 requested_child->status_pending_p = 0;
2654 requested_child->status_pending = 0;
2655 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2656 }
2657
2658 if (requested_child->suspended
2659 && requested_child->status_pending_p)
2660 {
2661 internal_error (__FILE__, __LINE__,
2662 "requesting an event out of a"
2663 " suspended child?");
2664 }
2665
2666 if (requested_child->status_pending_p)
2667 {
2668 event_child = requested_child;
2669 event_thread = get_lwp_thread (event_child);
2670 }
2671 }
2672
2673 if (event_child != NULL)
2674 {
2675 if (debug_threads)
2676 debug_printf ("Got an event from pending child %ld (%04x)\n",
2677 lwpid_of (event_thread), event_child->status_pending);
2678 *wstatp = event_child->status_pending;
2679 event_child->status_pending_p = 0;
2680 event_child->status_pending = 0;
2681 current_thread = event_thread;
2682 return lwpid_of (event_thread);
2683 }
2684
2685 /* But if we don't find a pending event, we'll have to wait.
2686
2687 We only enter this loop if no process has a pending wait status.
2688 Thus any action taken in response to a wait status inside this
2689 loop is responding as soon as we detect the status, not after any
2690 pending events. */
2691
2692 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2693 all signals while here. */
2694 sigfillset (&block_mask);
2695 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2696
2697 /* Always pull all events out of the kernel. We'll randomly select
2698 an event LWP out of all that have events, to prevent
2699 starvation. */
2700 while (event_child == NULL)
2701 {
2702 pid_t ret = 0;
2703
2704 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2705 quirks:
2706
2707 - If the thread group leader exits while other threads in the
2708 thread group still exist, waitpid(TGID, ...) hangs. That
2709 waitpid won't return an exit status until the other threads
2710 in the group are reaped.
2711
2712 - When a non-leader thread execs, that thread just vanishes
2713 without reporting an exit (so we'd hang if we waited for it
2714 explicitly in that case). The exec event is reported to
2715 the TGID pid. */
2716 errno = 0;
2717 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2718
2719 if (debug_threads)
2720 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2721 ret, errno ? strerror (errno) : "ERRNO-OK");
2722
2723 if (ret > 0)
2724 {
2725 if (debug_threads)
2726 {
2727 debug_printf ("LLW: waitpid %ld received %s\n",
2728 (long) ret, status_to_str (*wstatp));
2729 }
2730
2731 /* Filter all events. IOW, leave all events pending. We'll
2732 randomly select an event LWP out of all that have events
2733 below. */
2734 linux_low_filter_event (ret, *wstatp);
2735 /* Retry until nothing comes out of waitpid. A single
2736 SIGCHLD can indicate more than one child stopped. */
2737 continue;
2738 }
2739
2740 /* Now that we've pulled all events out of the kernel, resume
2741 LWPs that don't have an interesting event to report. */
2742 if (stopping_threads == NOT_STOPPING_THREADS)
2743 for_each_thread (resume_stopped_resumed_lwps);
2744
2745 /* ... and find an LWP with a status to report to the core, if
2746 any. */
2747 event_thread = find_thread_in_random ([&] (thread_info *thread)
2748 {
2749 return status_pending_p_callback (thread, filter_ptid);
2750 });
2751
2752 if (event_thread != NULL)
2753 {
2754 event_child = get_thread_lwp (event_thread);
2755 *wstatp = event_child->status_pending;
2756 event_child->status_pending_p = 0;
2757 event_child->status_pending = 0;
2758 break;
2759 }
2760
2761 /* Check for zombie thread group leaders. Those can't be reaped
2762 until all other threads in the thread group are. */
2763 check_zombie_leaders ();
2764
2765 auto not_stopped = [&] (thread_info *thread)
2766 {
2767 return not_stopped_callback (thread, wait_ptid);
2768 };
2769
2770 /* If there are no resumed children left in the set of LWPs we
2771 want to wait for, bail. We can't just block in
2772 waitpid/sigsuspend, because lwps might have been left stopped
2773 in trace-stop state, and we'd be stuck forever waiting for
2774 their status to change (which would only happen if we resumed
2775 them). Even if WNOHANG is set, this return code is preferred
2776 over 0 (below), as it is more detailed. */
2777 if (find_thread (not_stopped) == NULL)
2778 {
2779 if (debug_threads)
2780 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2781 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2782 return -1;
2783 }
2784
2785 /* No interesting event to report to the caller. */
2786 if ((options & WNOHANG))
2787 {
2788 if (debug_threads)
2789 debug_printf ("WNOHANG set, no event found\n");
2790
2791 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2792 return 0;
2793 }
2794
2795 /* Block until we get an event reported with SIGCHLD. */
2796 if (debug_threads)
2797 debug_printf ("sigsuspend'ing\n");
2798
2799 sigsuspend (&prev_mask);
2800 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2801 goto retry;
2802 }
2803
2804 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2805
2806 current_thread = event_thread;
2807
2808 return lwpid_of (event_thread);
2809 }
2810
2811 /* Wait for an event from child(ren) PTID. PTIDs can be:
2812 minus_one_ptid, to specify any child; a pid PTID, specifying all
2813 lwps of a thread group; or a PTID representing a single lwp. Store
2814 the stop status through the status pointer WSTAT. OPTIONS is
2815 passed to the waitpid call. Return 0 if no event was found and
2816 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2817 was found. Return the PID of the stopped child otherwise. */
2818
2819 static int
2820 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2821 {
2822 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2823 }
2824
2825 /* Select one LWP out of those that have events pending. */
2826
2827 static void
2828 select_event_lwp (struct lwp_info **orig_lp)
2829 {
2830 int random_selector;
2831 struct thread_info *event_thread = NULL;
2832
2833 /* In all-stop, give preference to the LWP that is being
2834 single-stepped. There will be at most one, and it's the LWP that
2835 the core is most interested in. If we didn't do this, then we'd
2836 have to handle pending step SIGTRAPs somehow in case the core
2837 later continues the previously-stepped thread, otherwise we'd
2838 report the pending SIGTRAP, and the core, not having stepped the
2839 thread, wouldn't understand what the trap was for, and therefore
2840 would report it to the user as a random signal. */
2841 if (!non_stop)
2842 {
2843 event_thread = find_thread ([] (thread_info *thread)
2844 {
2845 lwp_info *lp = get_thread_lwp (thread);
2846
2847 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2848 && thread->last_resume_kind == resume_step
2849 && lp->status_pending_p);
2850 });
2851
2852 if (event_thread != NULL)
2853 {
2854 if (debug_threads)
2855 debug_printf ("SEL: Select single-step %s\n",
2856 target_pid_to_str (ptid_of (event_thread)));
2857 }
2858 }
2859 if (event_thread == NULL)
2860 {
2861 /* No single-stepping LWP. Select one at random, out of those
2862 which have had events. */
2863
2864 /* First see how many events we have. */
2865 int num_events = 0;
2866 for_each_thread ([&] (thread_info *thread)
2867 {
2868 lwp_info *lp = get_thread_lwp (thread);
2869
2870 /* Count only resumed LWPs that have an event pending. */
2871 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2872 && lp->status_pending_p)
2873 num_events++;
2874 });
2875 gdb_assert (num_events > 0);
2876
2877 /* Now randomly pick a LWP out of those that have had
2878 events. */
2879 random_selector = (int)
2880 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2881
2882 if (debug_threads && num_events > 1)
2883 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2884 num_events, random_selector);
2885
2886 event_thread = find_thread ([&] (thread_info *thread)
2887 {
2888 lwp_info *lp = get_thread_lwp (thread);
2889
2890 /* Select only resumed LWPs that have an event pending. */
2891 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2892 && lp->status_pending_p)
2893 if (random_selector-- == 0)
2894 return true;
2895
2896 return false;
2897 });
2898 }
2899
2900 if (event_thread != NULL)
2901 {
2902 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2903
2904 /* Switch the event LWP. */
2905 *orig_lp = event_lp;
2906 }
2907 }
2908
2909 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2910 NULL. */
2911
2912 static void
2913 unsuspend_all_lwps (struct lwp_info *except)
2914 {
2915 for_each_thread ([&] (thread_info *thread)
2916 {
2917 lwp_info *lwp = get_thread_lwp (thread);
2918
2919 if (lwp != except)
2920 lwp_suspended_decr (lwp);
2921 });
2922 }
2923
2924 static void move_out_of_jump_pad_callback (thread_info *thread);
2925 static bool stuck_in_jump_pad_callback (thread_info *thread);
2926 static bool lwp_running (thread_info *thread);
2927 static ptid_t linux_wait_1 (ptid_t ptid,
2928 struct target_waitstatus *ourstatus,
2929 int target_options);
2930
2931 /* Stabilize threads (move out of jump pads).
2932
2933 If a thread is midway collecting a fast tracepoint, we need to
2934 finish the collection and move it out of the jump pad before
2935 reporting the signal.
2936
2937 This avoids recursion while collecting (when a signal arrives
2938 midway, and the signal handler itself collects), which would trash
2939 the trace buffer. In case the user set a breakpoint in a signal
2940 handler, this avoids the backtrace showing the jump pad, etc..
2941 Most importantly, there are certain things we can't do safely if
2942 threads are stopped in a jump pad (or in its callee's). For
2943 example:
2944
2945 - starting a new trace run. A thread still collecting the
2946 previous run, could trash the trace buffer when resumed. The trace
2947 buffer control structures would have been reset but the thread had
2948 no way to tell. The thread could even midway memcpy'ing to the
2949 buffer, which would mean that when resumed, it would clobber the
2950 trace buffer that had been set for a new run.
2951
2952 - we can't rewrite/reuse the jump pads for new tracepoints
2953 safely. Say you do tstart while a thread is stopped midway while
2954 collecting. When the thread is later resumed, it finishes the
2955 collection, and returns to the jump pad, to execute the original
2956 instruction that was under the tracepoint jump at the time the
2957 older run had been started. If the jump pad had been rewritten
2958 since for something else in the new run, the thread would now
2959 execute the wrong / random instructions. */
2960
2961 static void
2962 linux_stabilize_threads (void)
2963 {
2964 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
2965
2966 if (thread_stuck != NULL)
2967 {
2968 if (debug_threads)
2969 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2970 lwpid_of (thread_stuck));
2971 return;
2972 }
2973
2974 thread_info *saved_thread = current_thread;
2975
2976 stabilizing_threads = 1;
2977
2978 /* Kick 'em all. */
2979 for_each_thread (move_out_of_jump_pad_callback);
2980
2981 /* Loop until all are stopped out of the jump pads. */
2982 while (find_thread (lwp_running) != NULL)
2983 {
2984 struct target_waitstatus ourstatus;
2985 struct lwp_info *lwp;
2986 int wstat;
2987
2988 /* Note that we go through the full wait even loop. While
2989 moving threads out of jump pad, we need to be able to step
2990 over internal breakpoints and such. */
2991 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2992
2993 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2994 {
2995 lwp = get_thread_lwp (current_thread);
2996
2997 /* Lock it. */
2998 lwp_suspended_inc (lwp);
2999
3000 if (ourstatus.value.sig != GDB_SIGNAL_0
3001 || current_thread->last_resume_kind == resume_stop)
3002 {
3003 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3004 enqueue_one_deferred_signal (lwp, &wstat);
3005 }
3006 }
3007 }
3008
3009 unsuspend_all_lwps (NULL);
3010
3011 stabilizing_threads = 0;
3012
3013 current_thread = saved_thread;
3014
3015 if (debug_threads)
3016 {
3017 thread_stuck = find_thread (stuck_in_jump_pad_callback);
3018
3019 if (thread_stuck != NULL)
3020 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3021 lwpid_of (thread_stuck));
3022 }
3023 }
3024
3025 /* Convenience function that is called when the kernel reports an
3026 event that is not passed out to GDB. */
3027
3028 static ptid_t
3029 ignore_event (struct target_waitstatus *ourstatus)
3030 {
3031 /* If we got an event, there may still be others, as a single
3032 SIGCHLD can indicate more than one child stopped. This forces
3033 another target_wait call. */
3034 async_file_mark ();
3035
3036 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3037 return null_ptid;
3038 }
3039
3040 /* Convenience function that is called when the kernel reports an exit
3041 event. This decides whether to report the event to GDB as a
3042 process exit event, a thread exit event, or to suppress the
3043 event. */
3044
3045 static ptid_t
3046 filter_exit_event (struct lwp_info *event_child,
3047 struct target_waitstatus *ourstatus)
3048 {
3049 struct thread_info *thread = get_lwp_thread (event_child);
3050 ptid_t ptid = ptid_of (thread);
3051
3052 if (!last_thread_of_process_p (pid_of (thread)))
3053 {
3054 if (report_thread_events)
3055 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3056 else
3057 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3058
3059 delete_lwp (event_child);
3060 }
3061 return ptid;
3062 }
3063
3064 /* Returns 1 if GDB is interested in any event_child syscalls. */
3065
3066 static int
3067 gdb_catching_syscalls_p (struct lwp_info *event_child)
3068 {
3069 struct thread_info *thread = get_lwp_thread (event_child);
3070 struct process_info *proc = get_thread_process (thread);
3071
3072 return !proc->syscalls_to_catch.empty ();
3073 }
3074
3075 /* Returns 1 if GDB is interested in the event_child syscall.
3076 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3077
3078 static int
3079 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3080 {
3081 int sysno;
3082 struct thread_info *thread = get_lwp_thread (event_child);
3083 struct process_info *proc = get_thread_process (thread);
3084
3085 if (proc->syscalls_to_catch.empty ())
3086 return 0;
3087
3088 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
3089 return 1;
3090
3091 get_syscall_trapinfo (event_child, &sysno);
3092
3093 for (int iter : proc->syscalls_to_catch)
3094 if (iter == sysno)
3095 return 1;
3096
3097 return 0;
3098 }
3099
3100 /* Wait for process, returns status. */
3101
3102 static ptid_t
3103 linux_wait_1 (ptid_t ptid,
3104 struct target_waitstatus *ourstatus, int target_options)
3105 {
3106 int w;
3107 struct lwp_info *event_child;
3108 int options;
3109 int pid;
3110 int step_over_finished;
3111 int bp_explains_trap;
3112 int maybe_internal_trap;
3113 int report_to_gdb;
3114 int trace_event;
3115 int in_step_range;
3116 int any_resumed;
3117
3118 if (debug_threads)
3119 {
3120 debug_enter ();
3121 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3122 }
3123
3124 /* Translate generic target options into linux options. */
3125 options = __WALL;
3126 if (target_options & TARGET_WNOHANG)
3127 options |= WNOHANG;
3128
3129 bp_explains_trap = 0;
3130 trace_event = 0;
3131 in_step_range = 0;
3132 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3133
3134 auto status_pending_p_any = [&] (thread_info *thread)
3135 {
3136 return status_pending_p_callback (thread, minus_one_ptid);
3137 };
3138
3139 auto not_stopped = [&] (thread_info *thread)
3140 {
3141 return not_stopped_callback (thread, minus_one_ptid);
3142 };
3143
3144 /* Find a resumed LWP, if any. */
3145 if (find_thread (status_pending_p_any) != NULL)
3146 any_resumed = 1;
3147 else if (find_thread (not_stopped) != NULL)
3148 any_resumed = 1;
3149 else
3150 any_resumed = 0;
3151
3152 if (ptid_equal (step_over_bkpt, null_ptid))
3153 pid = linux_wait_for_event (ptid, &w, options);
3154 else
3155 {
3156 if (debug_threads)
3157 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3158 target_pid_to_str (step_over_bkpt));
3159 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3160 }
3161
3162 if (pid == 0 || (pid == -1 && !any_resumed))
3163 {
3164 gdb_assert (target_options & TARGET_WNOHANG);
3165
3166 if (debug_threads)
3167 {
3168 debug_printf ("linux_wait_1 ret = null_ptid, "
3169 "TARGET_WAITKIND_IGNORE\n");
3170 debug_exit ();
3171 }
3172
3173 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3174 return null_ptid;
3175 }
3176 else if (pid == -1)
3177 {
3178 if (debug_threads)
3179 {
3180 debug_printf ("linux_wait_1 ret = null_ptid, "
3181 "TARGET_WAITKIND_NO_RESUMED\n");
3182 debug_exit ();
3183 }
3184
3185 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3186 return null_ptid;
3187 }
3188
3189 event_child = get_thread_lwp (current_thread);
3190
3191 /* linux_wait_for_event only returns an exit status for the last
3192 child of a process. Report it. */
3193 if (WIFEXITED (w) || WIFSIGNALED (w))
3194 {
3195 if (WIFEXITED (w))
3196 {
3197 ourstatus->kind = TARGET_WAITKIND_EXITED;
3198 ourstatus->value.integer = WEXITSTATUS (w);
3199
3200 if (debug_threads)
3201 {
3202 debug_printf ("linux_wait_1 ret = %s, exited with "
3203 "retcode %d\n",
3204 target_pid_to_str (ptid_of (current_thread)),
3205 WEXITSTATUS (w));
3206 debug_exit ();
3207 }
3208 }
3209 else
3210 {
3211 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3212 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3213
3214 if (debug_threads)
3215 {
3216 debug_printf ("linux_wait_1 ret = %s, terminated with "
3217 "signal %d\n",
3218 target_pid_to_str (ptid_of (current_thread)),
3219 WTERMSIG (w));
3220 debug_exit ();
3221 }
3222 }
3223
3224 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3225 return filter_exit_event (event_child, ourstatus);
3226
3227 return ptid_of (current_thread);
3228 }
3229
3230 /* If step-over executes a breakpoint instruction, in the case of a
3231 hardware single step it means a gdb/gdbserver breakpoint had been
3232 planted on top of a permanent breakpoint, in the case of a software
3233 single step it may just mean that gdbserver hit the reinsert breakpoint.
3234 The PC has been adjusted by save_stop_reason to point at
3235 the breakpoint address.
3236 So in the case of the hardware single step advance the PC manually
3237 past the breakpoint and in the case of software single step advance only
3238 if it's not the single_step_breakpoint we are hitting.
3239 This avoids that a program would keep trapping a permanent breakpoint
3240 forever. */
3241 if (!ptid_equal (step_over_bkpt, null_ptid)
3242 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3243 && (event_child->stepping
3244 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3245 {
3246 int increment_pc = 0;
3247 int breakpoint_kind = 0;
3248 CORE_ADDR stop_pc = event_child->stop_pc;
3249
3250 breakpoint_kind =
3251 the_target->breakpoint_kind_from_current_state (&stop_pc);
3252 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3253
3254 if (debug_threads)
3255 {
3256 debug_printf ("step-over for %s executed software breakpoint\n",
3257 target_pid_to_str (ptid_of (current_thread)));
3258 }
3259
3260 if (increment_pc != 0)
3261 {
3262 struct regcache *regcache
3263 = get_thread_regcache (current_thread, 1);
3264
3265 event_child->stop_pc += increment_pc;
3266 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3267
3268 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3269 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3270 }
3271 }
3272
3273 /* If this event was not handled before, and is not a SIGTRAP, we
3274 report it. SIGILL and SIGSEGV are also treated as traps in case
3275 a breakpoint is inserted at the current PC. If this target does
3276 not support internal breakpoints at all, we also report the
3277 SIGTRAP without further processing; it's of no concern to us. */
3278 maybe_internal_trap
3279 = (supports_breakpoints ()
3280 && (WSTOPSIG (w) == SIGTRAP
3281 || ((WSTOPSIG (w) == SIGILL
3282 || WSTOPSIG (w) == SIGSEGV)
3283 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3284
3285 if (maybe_internal_trap)
3286 {
3287 /* Handle anything that requires bookkeeping before deciding to
3288 report the event or continue waiting. */
3289
3290 /* First check if we can explain the SIGTRAP with an internal
3291 breakpoint, or if we should possibly report the event to GDB.
3292 Do this before anything that may remove or insert a
3293 breakpoint. */
3294 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3295
3296 /* We have a SIGTRAP, possibly a step-over dance has just
3297 finished. If so, tweak the state machine accordingly,
3298 reinsert breakpoints and delete any single-step
3299 breakpoints. */
3300 step_over_finished = finish_step_over (event_child);
3301
3302 /* Now invoke the callbacks of any internal breakpoints there. */
3303 check_breakpoints (event_child->stop_pc);
3304
3305 /* Handle tracepoint data collecting. This may overflow the
3306 trace buffer, and cause a tracing stop, removing
3307 breakpoints. */
3308 trace_event = handle_tracepoints (event_child);
3309
3310 if (bp_explains_trap)
3311 {
3312 if (debug_threads)
3313 debug_printf ("Hit a gdbserver breakpoint.\n");
3314 }
3315 }
3316 else
3317 {
3318 /* We have some other signal, possibly a step-over dance was in
3319 progress, and it should be cancelled too. */
3320 step_over_finished = finish_step_over (event_child);
3321 }
3322
3323 /* We have all the data we need. Either report the event to GDB, or
3324 resume threads and keep waiting for more. */
3325
3326 /* If we're collecting a fast tracepoint, finish the collection and
3327 move out of the jump pad before delivering a signal. See
3328 linux_stabilize_threads. */
3329
3330 if (WIFSTOPPED (w)
3331 && WSTOPSIG (w) != SIGTRAP
3332 && supports_fast_tracepoints ()
3333 && agent_loaded_p ())
3334 {
3335 if (debug_threads)
3336 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3337 "to defer or adjust it.\n",
3338 WSTOPSIG (w), lwpid_of (current_thread));
3339
3340 /* Allow debugging the jump pad itself. */
3341 if (current_thread->last_resume_kind != resume_step
3342 && maybe_move_out_of_jump_pad (event_child, &w))
3343 {
3344 enqueue_one_deferred_signal (event_child, &w);
3345
3346 if (debug_threads)
3347 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3348 WSTOPSIG (w), lwpid_of (current_thread));
3349
3350 linux_resume_one_lwp (event_child, 0, 0, NULL);
3351
3352 if (debug_threads)
3353 debug_exit ();
3354 return ignore_event (ourstatus);
3355 }
3356 }
3357
3358 if (event_child->collecting_fast_tracepoint
3359 != fast_tpoint_collect_result::not_collecting)
3360 {
3361 if (debug_threads)
3362 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3363 "Check if we're already there.\n",
3364 lwpid_of (current_thread),
3365 (int) event_child->collecting_fast_tracepoint);
3366
3367 trace_event = 1;
3368
3369 event_child->collecting_fast_tracepoint
3370 = linux_fast_tracepoint_collecting (event_child, NULL);
3371
3372 if (event_child->collecting_fast_tracepoint
3373 != fast_tpoint_collect_result::before_insn)
3374 {
3375 /* No longer need this breakpoint. */
3376 if (event_child->exit_jump_pad_bkpt != NULL)
3377 {
3378 if (debug_threads)
3379 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3380 "stopping all threads momentarily.\n");
3381
3382 /* Other running threads could hit this breakpoint.
3383 We don't handle moribund locations like GDB does,
3384 instead we always pause all threads when removing
3385 breakpoints, so that any step-over or
3386 decr_pc_after_break adjustment is always taken
3387 care of while the breakpoint is still
3388 inserted. */
3389 stop_all_lwps (1, event_child);
3390
3391 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3392 event_child->exit_jump_pad_bkpt = NULL;
3393
3394 unstop_all_lwps (1, event_child);
3395
3396 gdb_assert (event_child->suspended >= 0);
3397 }
3398 }
3399
3400 if (event_child->collecting_fast_tracepoint
3401 == fast_tpoint_collect_result::not_collecting)
3402 {
3403 if (debug_threads)
3404 debug_printf ("fast tracepoint finished "
3405 "collecting successfully.\n");
3406
3407 /* We may have a deferred signal to report. */
3408 if (dequeue_one_deferred_signal (event_child, &w))
3409 {
3410 if (debug_threads)
3411 debug_printf ("dequeued one signal.\n");
3412 }
3413 else
3414 {
3415 if (debug_threads)
3416 debug_printf ("no deferred signals.\n");
3417
3418 if (stabilizing_threads)
3419 {
3420 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3421 ourstatus->value.sig = GDB_SIGNAL_0;
3422
3423 if (debug_threads)
3424 {
3425 debug_printf ("linux_wait_1 ret = %s, stopped "
3426 "while stabilizing threads\n",
3427 target_pid_to_str (ptid_of (current_thread)));
3428 debug_exit ();
3429 }
3430
3431 return ptid_of (current_thread);
3432 }
3433 }
3434 }
3435 }
3436
3437 /* Check whether GDB would be interested in this event. */
3438
3439 /* Check if GDB is interested in this syscall. */
3440 if (WIFSTOPPED (w)
3441 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3442 && !gdb_catch_this_syscall_p (event_child))
3443 {
3444 if (debug_threads)
3445 {
3446 debug_printf ("Ignored syscall for LWP %ld.\n",
3447 lwpid_of (current_thread));
3448 }
3449
3450 linux_resume_one_lwp (event_child, event_child->stepping,
3451 0, NULL);
3452
3453 if (debug_threads)
3454 debug_exit ();
3455 return ignore_event (ourstatus);
3456 }
3457
3458 /* If GDB is not interested in this signal, don't stop other
3459 threads, and don't report it to GDB. Just resume the inferior
3460 right away. We do this for threading-related signals as well as
3461 any that GDB specifically requested we ignore. But never ignore
3462 SIGSTOP if we sent it ourselves, and do not ignore signals when
3463 stepping - they may require special handling to skip the signal
3464 handler. Also never ignore signals that could be caused by a
3465 breakpoint. */
3466 if (WIFSTOPPED (w)
3467 && current_thread->last_resume_kind != resume_step
3468 && (
3469 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3470 (current_process ()->priv->thread_db != NULL
3471 && (WSTOPSIG (w) == __SIGRTMIN
3472 || WSTOPSIG (w) == __SIGRTMIN + 1))
3473 ||
3474 #endif
3475 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3476 && !(WSTOPSIG (w) == SIGSTOP
3477 && current_thread->last_resume_kind == resume_stop)
3478 && !linux_wstatus_maybe_breakpoint (w))))
3479 {
3480 siginfo_t info, *info_p;
3481
3482 if (debug_threads)
3483 debug_printf ("Ignored signal %d for LWP %ld.\n",
3484 WSTOPSIG (w), lwpid_of (current_thread));
3485
3486 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3487 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3488 info_p = &info;
3489 else
3490 info_p = NULL;
3491
3492 if (step_over_finished)
3493 {
3494 /* We cancelled this thread's step-over above. We still
3495 need to unsuspend all other LWPs, and set them back
3496 running again while the signal handler runs. */
3497 unsuspend_all_lwps (event_child);
3498
3499 /* Enqueue the pending signal info so that proceed_all_lwps
3500 doesn't lose it. */
3501 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3502
3503 proceed_all_lwps ();
3504 }
3505 else
3506 {
3507 linux_resume_one_lwp (event_child, event_child->stepping,
3508 WSTOPSIG (w), info_p);
3509 }
3510
3511 if (debug_threads)
3512 debug_exit ();
3513
3514 return ignore_event (ourstatus);
3515 }
3516
3517 /* Note that all addresses are always "out of the step range" when
3518 there's no range to begin with. */
3519 in_step_range = lwp_in_step_range (event_child);
3520
3521 /* If GDB wanted this thread to single step, and the thread is out
3522 of the step range, we always want to report the SIGTRAP, and let
3523 GDB handle it. Watchpoints should always be reported. So should
3524 signals we can't explain. A SIGTRAP we can't explain could be a
3525 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3526 do, we're be able to handle GDB breakpoints on top of internal
3527 breakpoints, by handling the internal breakpoint and still
3528 reporting the event to GDB. If we don't, we're out of luck, GDB
3529 won't see the breakpoint hit. If we see a single-step event but
3530 the thread should be continuing, don't pass the trap to gdb.
3531 That indicates that we had previously finished a single-step but
3532 left the single-step pending -- see
3533 complete_ongoing_step_over. */
3534 report_to_gdb = (!maybe_internal_trap
3535 || (current_thread->last_resume_kind == resume_step
3536 && !in_step_range)
3537 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3538 || (!in_step_range
3539 && !bp_explains_trap
3540 && !trace_event
3541 && !step_over_finished
3542 && !(current_thread->last_resume_kind == resume_continue
3543 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3544 || (gdb_breakpoint_here (event_child->stop_pc)
3545 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3546 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3547 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3548
3549 run_breakpoint_commands (event_child->stop_pc);
3550
3551 /* We found no reason GDB would want us to stop. We either hit one
3552 of our own breakpoints, or finished an internal step GDB
3553 shouldn't know about. */
3554 if (!report_to_gdb)
3555 {
3556 if (debug_threads)
3557 {
3558 if (bp_explains_trap)
3559 debug_printf ("Hit a gdbserver breakpoint.\n");
3560 if (step_over_finished)
3561 debug_printf ("Step-over finished.\n");
3562 if (trace_event)
3563 debug_printf ("Tracepoint event.\n");
3564 if (lwp_in_step_range (event_child))
3565 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3566 paddress (event_child->stop_pc),
3567 paddress (event_child->step_range_start),
3568 paddress (event_child->step_range_end));
3569 }
3570
3571 /* We're not reporting this breakpoint to GDB, so apply the
3572 decr_pc_after_break adjustment to the inferior's regcache
3573 ourselves. */
3574
3575 if (the_low_target.set_pc != NULL)
3576 {
3577 struct regcache *regcache
3578 = get_thread_regcache (current_thread, 1);
3579 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3580 }
3581
3582 if (step_over_finished)
3583 {
3584 /* If we have finished stepping over a breakpoint, we've
3585 stopped and suspended all LWPs momentarily except the
3586 stepping one. This is where we resume them all again.
3587 We're going to keep waiting, so use proceed, which
3588 handles stepping over the next breakpoint. */
3589 unsuspend_all_lwps (event_child);
3590 }
3591 else
3592 {
3593 /* Remove the single-step breakpoints if any. Note that
3594 there isn't single-step breakpoint if we finished stepping
3595 over. */
3596 if (can_software_single_step ()
3597 && has_single_step_breakpoints (current_thread))
3598 {
3599 stop_all_lwps (0, event_child);
3600 delete_single_step_breakpoints (current_thread);
3601 unstop_all_lwps (0, event_child);
3602 }
3603 }
3604
3605 if (debug_threads)
3606 debug_printf ("proceeding all threads.\n");
3607 proceed_all_lwps ();
3608
3609 if (debug_threads)
3610 debug_exit ();
3611
3612 return ignore_event (ourstatus);
3613 }
3614
3615 if (debug_threads)
3616 {
3617 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3618 {
3619 std::string str
3620 = target_waitstatus_to_string (&event_child->waitstatus);
3621
3622 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3623 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3624 }
3625 if (current_thread->last_resume_kind == resume_step)
3626 {
3627 if (event_child->step_range_start == event_child->step_range_end)
3628 debug_printf ("GDB wanted to single-step, reporting event.\n");
3629 else if (!lwp_in_step_range (event_child))
3630 debug_printf ("Out of step range, reporting event.\n");
3631 }
3632 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3633 debug_printf ("Stopped by watchpoint.\n");
3634 else if (gdb_breakpoint_here (event_child->stop_pc))
3635 debug_printf ("Stopped by GDB breakpoint.\n");
3636 if (debug_threads)
3637 debug_printf ("Hit a non-gdbserver trap event.\n");
3638 }
3639
3640 /* Alright, we're going to report a stop. */
3641
3642 /* Remove single-step breakpoints. */
3643 if (can_software_single_step ())
3644 {
3645 /* Remove single-step breakpoints or not. It it is true, stop all
3646 lwps, so that other threads won't hit the breakpoint in the
3647 staled memory. */
3648 int remove_single_step_breakpoints_p = 0;
3649
3650 if (non_stop)
3651 {
3652 remove_single_step_breakpoints_p
3653 = has_single_step_breakpoints (current_thread);
3654 }
3655 else
3656 {
3657 /* In all-stop, a stop reply cancels all previous resume
3658 requests. Delete all single-step breakpoints. */
3659
3660 find_thread ([&] (thread_info *thread) {
3661 if (has_single_step_breakpoints (thread))
3662 {
3663 remove_single_step_breakpoints_p = 1;
3664 return true;
3665 }
3666
3667 return false;
3668 });
3669 }
3670
3671 if (remove_single_step_breakpoints_p)
3672 {
3673 /* If we remove single-step breakpoints from memory, stop all lwps,
3674 so that other threads won't hit the breakpoint in the staled
3675 memory. */
3676 stop_all_lwps (0, event_child);
3677
3678 if (non_stop)
3679 {
3680 gdb_assert (has_single_step_breakpoints (current_thread));
3681 delete_single_step_breakpoints (current_thread);
3682 }
3683 else
3684 {
3685 for_each_thread ([] (thread_info *thread){
3686 if (has_single_step_breakpoints (thread))
3687 delete_single_step_breakpoints (thread);
3688 });
3689 }
3690
3691 unstop_all_lwps (0, event_child);
3692 }
3693 }
3694
3695 if (!stabilizing_threads)
3696 {
3697 /* In all-stop, stop all threads. */
3698 if (!non_stop)
3699 stop_all_lwps (0, NULL);
3700
3701 if (step_over_finished)
3702 {
3703 if (!non_stop)
3704 {
3705 /* If we were doing a step-over, all other threads but
3706 the stepping one had been paused in start_step_over,
3707 with their suspend counts incremented. We don't want
3708 to do a full unstop/unpause, because we're in
3709 all-stop mode (so we want threads stopped), but we
3710 still need to unsuspend the other threads, to
3711 decrement their `suspended' count back. */
3712 unsuspend_all_lwps (event_child);
3713 }
3714 else
3715 {
3716 /* If we just finished a step-over, then all threads had
3717 been momentarily paused. In all-stop, that's fine,
3718 we want threads stopped by now anyway. In non-stop,
3719 we need to re-resume threads that GDB wanted to be
3720 running. */
3721 unstop_all_lwps (1, event_child);
3722 }
3723 }
3724
3725 /* If we're not waiting for a specific LWP, choose an event LWP
3726 from among those that have had events. Giving equal priority
3727 to all LWPs that have had events helps prevent
3728 starvation. */
3729 if (ptid_equal (ptid, minus_one_ptid))
3730 {
3731 event_child->status_pending_p = 1;
3732 event_child->status_pending = w;
3733
3734 select_event_lwp (&event_child);
3735
3736 /* current_thread and event_child must stay in sync. */
3737 current_thread = get_lwp_thread (event_child);
3738
3739 event_child->status_pending_p = 0;
3740 w = event_child->status_pending;
3741 }
3742
3743
3744 /* Stabilize threads (move out of jump pads). */
3745 if (!non_stop)
3746 stabilize_threads ();
3747 }
3748 else
3749 {
3750 /* If we just finished a step-over, then all threads had been
3751 momentarily paused. In all-stop, that's fine, we want
3752 threads stopped by now anyway. In non-stop, we need to
3753 re-resume threads that GDB wanted to be running. */
3754 if (step_over_finished)
3755 unstop_all_lwps (1, event_child);
3756 }
3757
3758 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3759 {
3760 /* If the reported event is an exit, fork, vfork or exec, let
3761 GDB know. */
3762
3763 /* Break the unreported fork relationship chain. */
3764 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3765 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3766 {
3767 event_child->fork_relative->fork_relative = NULL;
3768 event_child->fork_relative = NULL;
3769 }
3770
3771 *ourstatus = event_child->waitstatus;
3772 /* Clear the event lwp's waitstatus since we handled it already. */
3773 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3774 }
3775 else
3776 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3777
3778 /* Now that we've selected our final event LWP, un-adjust its PC if
3779 it was a software breakpoint, and the client doesn't know we can
3780 adjust the breakpoint ourselves. */
3781 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3782 && !swbreak_feature)
3783 {
3784 int decr_pc = the_low_target.decr_pc_after_break;
3785
3786 if (decr_pc != 0)
3787 {
3788 struct regcache *regcache
3789 = get_thread_regcache (current_thread, 1);
3790 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3791 }
3792 }
3793
3794 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3795 {
3796 get_syscall_trapinfo (event_child,
3797 &ourstatus->value.syscall_number);
3798 ourstatus->kind = event_child->syscall_state;
3799 }
3800 else if (current_thread->last_resume_kind == resume_stop
3801 && WSTOPSIG (w) == SIGSTOP)
3802 {
3803 /* A thread that has been requested to stop by GDB with vCont;t,
3804 and it stopped cleanly, so report as SIG0. The use of
3805 SIGSTOP is an implementation detail. */
3806 ourstatus->value.sig = GDB_SIGNAL_0;
3807 }
3808 else if (current_thread->last_resume_kind == resume_stop
3809 && WSTOPSIG (w) != SIGSTOP)
3810 {
3811 /* A thread that has been requested to stop by GDB with vCont;t,
3812 but, it stopped for other reasons. */
3813 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3814 }
3815 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3816 {
3817 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3818 }
3819
3820 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3821
3822 if (debug_threads)
3823 {
3824 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3825 target_pid_to_str (ptid_of (current_thread)),
3826 ourstatus->kind, ourstatus->value.sig);
3827 debug_exit ();
3828 }
3829
3830 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3831 return filter_exit_event (event_child, ourstatus);
3832
3833 return ptid_of (current_thread);
3834 }
3835
3836 /* Get rid of any pending event in the pipe. */
3837 static void
3838 async_file_flush (void)
3839 {
3840 int ret;
3841 char buf;
3842
3843 do
3844 ret = read (linux_event_pipe[0], &buf, 1);
3845 while (ret >= 0 || (ret == -1 && errno == EINTR));
3846 }
3847
3848 /* Put something in the pipe, so the event loop wakes up. */
3849 static void
3850 async_file_mark (void)
3851 {
3852 int ret;
3853
3854 async_file_flush ();
3855
3856 do
3857 ret = write (linux_event_pipe[1], "+", 1);
3858 while (ret == 0 || (ret == -1 && errno == EINTR));
3859
3860 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3861 be awakened anyway. */
3862 }
3863
3864 static ptid_t
3865 linux_wait (ptid_t ptid,
3866 struct target_waitstatus *ourstatus, int target_options)
3867 {
3868 ptid_t event_ptid;
3869
3870 /* Flush the async file first. */
3871 if (target_is_async_p ())
3872 async_file_flush ();
3873
3874 do
3875 {
3876 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3877 }
3878 while ((target_options & TARGET_WNOHANG) == 0
3879 && ptid_equal (event_ptid, null_ptid)
3880 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3881
3882 /* If at least one stop was reported, there may be more. A single
3883 SIGCHLD can signal more than one child stop. */
3884 if (target_is_async_p ()
3885 && (target_options & TARGET_WNOHANG) != 0
3886 && !ptid_equal (event_ptid, null_ptid))
3887 async_file_mark ();
3888
3889 return event_ptid;
3890 }
3891
3892 /* Send a signal to an LWP. */
3893
3894 static int
3895 kill_lwp (unsigned long lwpid, int signo)
3896 {
3897 int ret;
3898
3899 errno = 0;
3900 ret = syscall (__NR_tkill, lwpid, signo);
3901 if (errno == ENOSYS)
3902 {
3903 /* If tkill fails, then we are not using nptl threads, a
3904 configuration we no longer support. */
3905 perror_with_name (("tkill"));
3906 }
3907 return ret;
3908 }
3909
3910 void
3911 linux_stop_lwp (struct lwp_info *lwp)
3912 {
3913 send_sigstop (lwp);
3914 }
3915
3916 static void
3917 send_sigstop (struct lwp_info *lwp)
3918 {
3919 int pid;
3920
3921 pid = lwpid_of (get_lwp_thread (lwp));
3922
3923 /* If we already have a pending stop signal for this process, don't
3924 send another. */
3925 if (lwp->stop_expected)
3926 {
3927 if (debug_threads)
3928 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3929
3930 return;
3931 }
3932
3933 if (debug_threads)
3934 debug_printf ("Sending sigstop to lwp %d\n", pid);
3935
3936 lwp->stop_expected = 1;
3937 kill_lwp (pid, SIGSTOP);
3938 }
3939
3940 static void
3941 send_sigstop (thread_info *thread, lwp_info *except)
3942 {
3943 struct lwp_info *lwp = get_thread_lwp (thread);
3944
3945 /* Ignore EXCEPT. */
3946 if (lwp == except)
3947 return;
3948
3949 if (lwp->stopped)
3950 return;
3951
3952 send_sigstop (lwp);
3953 }
3954
3955 /* Increment the suspend count of an LWP, and stop it, if not stopped
3956 yet. */
3957 static void
3958 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3959 {
3960 struct lwp_info *lwp = get_thread_lwp (thread);
3961
3962 /* Ignore EXCEPT. */
3963 if (lwp == except)
3964 return;
3965
3966 lwp_suspended_inc (lwp);
3967
3968 send_sigstop (thread, except);
3969 }
3970
3971 static void
3972 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3973 {
3974 /* Store the exit status for later. */
3975 lwp->status_pending_p = 1;
3976 lwp->status_pending = wstat;
3977
3978 /* Store in waitstatus as well, as there's nothing else to process
3979 for this event. */
3980 if (WIFEXITED (wstat))
3981 {
3982 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3983 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3984 }
3985 else if (WIFSIGNALED (wstat))
3986 {
3987 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3988 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3989 }
3990
3991 /* Prevent trying to stop it. */
3992 lwp->stopped = 1;
3993
3994 /* No further stops are expected from a dead lwp. */
3995 lwp->stop_expected = 0;
3996 }
3997
3998 /* Return true if LWP has exited already, and has a pending exit event
3999 to report to GDB. */
4000
4001 static int
4002 lwp_is_marked_dead (struct lwp_info *lwp)
4003 {
4004 return (lwp->status_pending_p
4005 && (WIFEXITED (lwp->status_pending)
4006 || WIFSIGNALED (lwp->status_pending)));
4007 }
4008
4009 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4010
4011 static void
4012 wait_for_sigstop (void)
4013 {
4014 struct thread_info *saved_thread;
4015 ptid_t saved_tid;
4016 int wstat;
4017 int ret;
4018
4019 saved_thread = current_thread;
4020 if (saved_thread != NULL)
4021 saved_tid = saved_thread->id;
4022 else
4023 saved_tid = null_ptid; /* avoid bogus unused warning */
4024
4025 if (debug_threads)
4026 debug_printf ("wait_for_sigstop: pulling events\n");
4027
4028 /* Passing NULL_PTID as filter indicates we want all events to be
4029 left pending. Eventually this returns when there are no
4030 unwaited-for children left. */
4031 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4032 &wstat, __WALL);
4033 gdb_assert (ret == -1);
4034
4035 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4036 current_thread = saved_thread;
4037 else
4038 {
4039 if (debug_threads)
4040 debug_printf ("Previously current thread died.\n");
4041
4042 /* We can't change the current inferior behind GDB's back,
4043 otherwise, a subsequent command may apply to the wrong
4044 process. */
4045 current_thread = NULL;
4046 }
4047 }
4048
4049 /* Returns true if THREAD is stopped in a jump pad, and we can't
4050 move it out, because we need to report the stop event to GDB. For
4051 example, if the user puts a breakpoint in the jump pad, it's
4052 because she wants to debug it. */
4053
4054 static bool
4055 stuck_in_jump_pad_callback (thread_info *thread)
4056 {
4057 struct lwp_info *lwp = get_thread_lwp (thread);
4058
4059 if (lwp->suspended != 0)
4060 {
4061 internal_error (__FILE__, __LINE__,
4062 "LWP %ld is suspended, suspended=%d\n",
4063 lwpid_of (thread), lwp->suspended);
4064 }
4065 gdb_assert (lwp->stopped);
4066
4067 /* Allow debugging the jump pad, gdb_collect, etc.. */
4068 return (supports_fast_tracepoints ()
4069 && agent_loaded_p ()
4070 && (gdb_breakpoint_here (lwp->stop_pc)
4071 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4072 || thread->last_resume_kind == resume_step)
4073 && (linux_fast_tracepoint_collecting (lwp, NULL)
4074 != fast_tpoint_collect_result::not_collecting));
4075 }
4076
4077 static void
4078 move_out_of_jump_pad_callback (thread_info *thread)
4079 {
4080 struct thread_info *saved_thread;
4081 struct lwp_info *lwp = get_thread_lwp (thread);
4082 int *wstat;
4083
4084 if (lwp->suspended != 0)
4085 {
4086 internal_error (__FILE__, __LINE__,
4087 "LWP %ld is suspended, suspended=%d\n",
4088 lwpid_of (thread), lwp->suspended);
4089 }
4090 gdb_assert (lwp->stopped);
4091
4092 /* For gdb_breakpoint_here. */
4093 saved_thread = current_thread;
4094 current_thread = thread;
4095
4096 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4097
4098 /* Allow debugging the jump pad, gdb_collect, etc. */
4099 if (!gdb_breakpoint_here (lwp->stop_pc)
4100 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4101 && thread->last_resume_kind != resume_step
4102 && maybe_move_out_of_jump_pad (lwp, wstat))
4103 {
4104 if (debug_threads)
4105 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4106 lwpid_of (thread));
4107
4108 if (wstat)
4109 {
4110 lwp->status_pending_p = 0;
4111 enqueue_one_deferred_signal (lwp, wstat);
4112
4113 if (debug_threads)
4114 debug_printf ("Signal %d for LWP %ld deferred "
4115 "(in jump pad)\n",
4116 WSTOPSIG (*wstat), lwpid_of (thread));
4117 }
4118
4119 linux_resume_one_lwp (lwp, 0, 0, NULL);
4120 }
4121 else
4122 lwp_suspended_inc (lwp);
4123
4124 current_thread = saved_thread;
4125 }
4126
4127 static bool
4128 lwp_running (thread_info *thread)
4129 {
4130 struct lwp_info *lwp = get_thread_lwp (thread);
4131
4132 if (lwp_is_marked_dead (lwp))
4133 return false;
4134
4135 return !lwp->stopped;
4136 }
4137
4138 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4139 If SUSPEND, then also increase the suspend count of every LWP,
4140 except EXCEPT. */
4141
4142 static void
4143 stop_all_lwps (int suspend, struct lwp_info *except)
4144 {
4145 /* Should not be called recursively. */
4146 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4147
4148 if (debug_threads)
4149 {
4150 debug_enter ();
4151 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4152 suspend ? "stop-and-suspend" : "stop",
4153 except != NULL
4154 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4155 : "none");
4156 }
4157
4158 stopping_threads = (suspend
4159 ? STOPPING_AND_SUSPENDING_THREADS
4160 : STOPPING_THREADS);
4161
4162 if (suspend)
4163 for_each_thread ([&] (thread_info *thread)
4164 {
4165 suspend_and_send_sigstop (thread, except);
4166 });
4167 else
4168 for_each_thread ([&] (thread_info *thread)
4169 {
4170 send_sigstop (thread, except);
4171 });
4172
4173 wait_for_sigstop ();
4174 stopping_threads = NOT_STOPPING_THREADS;
4175
4176 if (debug_threads)
4177 {
4178 debug_printf ("stop_all_lwps done, setting stopping_threads "
4179 "back to !stopping\n");
4180 debug_exit ();
4181 }
4182 }
4183
4184 /* Enqueue one signal in the chain of signals which need to be
4185 delivered to this process on next resume. */
4186
4187 static void
4188 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4189 {
4190 struct pending_signals *p_sig = XNEW (struct pending_signals);
4191
4192 p_sig->prev = lwp->pending_signals;
4193 p_sig->signal = signal;
4194 if (info == NULL)
4195 memset (&p_sig->info, 0, sizeof (siginfo_t));
4196 else
4197 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4198 lwp->pending_signals = p_sig;
4199 }
4200
4201 /* Install breakpoints for software single stepping. */
4202
4203 static void
4204 install_software_single_step_breakpoints (struct lwp_info *lwp)
4205 {
4206 struct thread_info *thread = get_lwp_thread (lwp);
4207 struct regcache *regcache = get_thread_regcache (thread, 1);
4208 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
4209
4210 current_thread = thread;
4211 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
4212
4213 for (CORE_ADDR pc : next_pcs)
4214 set_single_step_breakpoint (pc, current_ptid);
4215
4216 do_cleanups (old_chain);
4217 }
4218
4219 /* Single step via hardware or software single step.
4220 Return 1 if hardware single stepping, 0 if software single stepping
4221 or can't single step. */
4222
4223 static int
4224 single_step (struct lwp_info* lwp)
4225 {
4226 int step = 0;
4227
4228 if (can_hardware_single_step ())
4229 {
4230 step = 1;
4231 }
4232 else if (can_software_single_step ())
4233 {
4234 install_software_single_step_breakpoints (lwp);
4235 step = 0;
4236 }
4237 else
4238 {
4239 if (debug_threads)
4240 debug_printf ("stepping is not implemented on this target");
4241 }
4242
4243 return step;
4244 }
4245
4246 /* The signal can be delivered to the inferior if we are not trying to
4247 finish a fast tracepoint collect. Since signal can be delivered in
4248 the step-over, the program may go to signal handler and trap again
4249 after return from the signal handler. We can live with the spurious
4250 double traps. */
4251
4252 static int
4253 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4254 {
4255 return (lwp->collecting_fast_tracepoint
4256 == fast_tpoint_collect_result::not_collecting);
4257 }
4258
4259 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4260 SIGNAL is nonzero, give it that signal. */
4261
4262 static void
4263 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4264 int step, int signal, siginfo_t *info)
4265 {
4266 struct thread_info *thread = get_lwp_thread (lwp);
4267 struct thread_info *saved_thread;
4268 int ptrace_request;
4269 struct process_info *proc = get_thread_process (thread);
4270
4271 /* Note that target description may not be initialised
4272 (proc->tdesc == NULL) at this point because the program hasn't
4273 stopped at the first instruction yet. It means GDBserver skips
4274 the extra traps from the wrapper program (see option --wrapper).
4275 Code in this function that requires register access should be
4276 guarded by proc->tdesc == NULL or something else. */
4277
4278 if (lwp->stopped == 0)
4279 return;
4280
4281 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4282
4283 fast_tpoint_collect_result fast_tp_collecting
4284 = lwp->collecting_fast_tracepoint;
4285
4286 gdb_assert (!stabilizing_threads
4287 || (fast_tp_collecting
4288 != fast_tpoint_collect_result::not_collecting));
4289
4290 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4291 user used the "jump" command, or "set $pc = foo"). */
4292 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4293 {
4294 /* Collecting 'while-stepping' actions doesn't make sense
4295 anymore. */
4296 release_while_stepping_state_list (thread);
4297 }
4298
4299 /* If we have pending signals or status, and a new signal, enqueue the
4300 signal. Also enqueue the signal if it can't be delivered to the
4301 inferior right now. */
4302 if (signal != 0
4303 && (lwp->status_pending_p
4304 || lwp->pending_signals != NULL
4305 || !lwp_signal_can_be_delivered (lwp)))
4306 {
4307 enqueue_pending_signal (lwp, signal, info);
4308
4309 /* Postpone any pending signal. It was enqueued above. */
4310 signal = 0;
4311 }
4312
4313 if (lwp->status_pending_p)
4314 {
4315 if (debug_threads)
4316 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4317 " has pending status\n",
4318 lwpid_of (thread), step ? "step" : "continue",
4319 lwp->stop_expected ? "expected" : "not expected");
4320 return;
4321 }
4322
4323 saved_thread = current_thread;
4324 current_thread = thread;
4325
4326 /* This bit needs some thinking about. If we get a signal that
4327 we must report while a single-step reinsert is still pending,
4328 we often end up resuming the thread. It might be better to
4329 (ew) allow a stack of pending events; then we could be sure that
4330 the reinsert happened right away and not lose any signals.
4331
4332 Making this stack would also shrink the window in which breakpoints are
4333 uninserted (see comment in linux_wait_for_lwp) but not enough for
4334 complete correctness, so it won't solve that problem. It may be
4335 worthwhile just to solve this one, however. */
4336 if (lwp->bp_reinsert != 0)
4337 {
4338 if (debug_threads)
4339 debug_printf (" pending reinsert at 0x%s\n",
4340 paddress (lwp->bp_reinsert));
4341
4342 if (can_hardware_single_step ())
4343 {
4344 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4345 {
4346 if (step == 0)
4347 warning ("BAD - reinserting but not stepping.");
4348 if (lwp->suspended)
4349 warning ("BAD - reinserting and suspended(%d).",
4350 lwp->suspended);
4351 }
4352 }
4353
4354 step = maybe_hw_step (thread);
4355 }
4356
4357 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4358 {
4359 if (debug_threads)
4360 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4361 " (exit-jump-pad-bkpt)\n",
4362 lwpid_of (thread));
4363 }
4364 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4365 {
4366 if (debug_threads)
4367 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4368 " single-stepping\n",
4369 lwpid_of (thread));
4370
4371 if (can_hardware_single_step ())
4372 step = 1;
4373 else
4374 {
4375 internal_error (__FILE__, __LINE__,
4376 "moving out of jump pad single-stepping"
4377 " not implemented on this target");
4378 }
4379 }
4380
4381 /* If we have while-stepping actions in this thread set it stepping.
4382 If we have a signal to deliver, it may or may not be set to
4383 SIG_IGN, we don't know. Assume so, and allow collecting
4384 while-stepping into a signal handler. A possible smart thing to
4385 do would be to set an internal breakpoint at the signal return
4386 address, continue, and carry on catching this while-stepping
4387 action only when that breakpoint is hit. A future
4388 enhancement. */
4389 if (thread->while_stepping != NULL)
4390 {
4391 if (debug_threads)
4392 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4393 lwpid_of (thread));
4394
4395 step = single_step (lwp);
4396 }
4397
4398 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4399 {
4400 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4401
4402 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4403
4404 if (debug_threads)
4405 {
4406 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4407 (long) lwp->stop_pc);
4408 }
4409 }
4410
4411 /* If we have pending signals, consume one if it can be delivered to
4412 the inferior. */
4413 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4414 {
4415 struct pending_signals **p_sig;
4416
4417 p_sig = &lwp->pending_signals;
4418 while ((*p_sig)->prev != NULL)
4419 p_sig = &(*p_sig)->prev;
4420
4421 signal = (*p_sig)->signal;
4422 if ((*p_sig)->info.si_signo != 0)
4423 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4424 &(*p_sig)->info);
4425
4426 free (*p_sig);
4427 *p_sig = NULL;
4428 }
4429
4430 if (debug_threads)
4431 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4432 lwpid_of (thread), step ? "step" : "continue", signal,
4433 lwp->stop_expected ? "expected" : "not expected");
4434
4435 if (the_low_target.prepare_to_resume != NULL)
4436 the_low_target.prepare_to_resume (lwp);
4437
4438 regcache_invalidate_thread (thread);
4439 errno = 0;
4440 lwp->stepping = step;
4441 if (step)
4442 ptrace_request = PTRACE_SINGLESTEP;
4443 else if (gdb_catching_syscalls_p (lwp))
4444 ptrace_request = PTRACE_SYSCALL;
4445 else
4446 ptrace_request = PTRACE_CONT;
4447 ptrace (ptrace_request,
4448 lwpid_of (thread),
4449 (PTRACE_TYPE_ARG3) 0,
4450 /* Coerce to a uintptr_t first to avoid potential gcc warning
4451 of coercing an 8 byte integer to a 4 byte pointer. */
4452 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4453
4454 current_thread = saved_thread;
4455 if (errno)
4456 perror_with_name ("resuming thread");
4457
4458 /* Successfully resumed. Clear state that no longer makes sense,
4459 and mark the LWP as running. Must not do this before resuming
4460 otherwise if that fails other code will be confused. E.g., we'd
4461 later try to stop the LWP and hang forever waiting for a stop
4462 status. Note that we must not throw after this is cleared,
4463 otherwise handle_zombie_lwp_error would get confused. */
4464 lwp->stopped = 0;
4465 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4466 }
4467
4468 /* Called when we try to resume a stopped LWP and that errors out. If
4469 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4470 or about to become), discard the error, clear any pending status
4471 the LWP may have, and return true (we'll collect the exit status
4472 soon enough). Otherwise, return false. */
4473
4474 static int
4475 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4476 {
4477 struct thread_info *thread = get_lwp_thread (lp);
4478
4479 /* If we get an error after resuming the LWP successfully, we'd
4480 confuse !T state for the LWP being gone. */
4481 gdb_assert (lp->stopped);
4482
4483 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4484 because even if ptrace failed with ESRCH, the tracee may be "not
4485 yet fully dead", but already refusing ptrace requests. In that
4486 case the tracee has 'R (Running)' state for a little bit
4487 (observed in Linux 3.18). See also the note on ESRCH in the
4488 ptrace(2) man page. Instead, check whether the LWP has any state
4489 other than ptrace-stopped. */
4490
4491 /* Don't assume anything if /proc/PID/status can't be read. */
4492 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4493 {
4494 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4495 lp->status_pending_p = 0;
4496 return 1;
4497 }
4498 return 0;
4499 }
4500
4501 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4502 disappears while we try to resume it. */
4503
4504 static void
4505 linux_resume_one_lwp (struct lwp_info *lwp,
4506 int step, int signal, siginfo_t *info)
4507 {
4508 TRY
4509 {
4510 linux_resume_one_lwp_throw (lwp, step, signal, info);
4511 }
4512 CATCH (ex, RETURN_MASK_ERROR)
4513 {
4514 if (!check_ptrace_stopped_lwp_gone (lwp))
4515 throw_exception (ex);
4516 }
4517 END_CATCH
4518 }
4519
4520 /* This function is called once per thread via for_each_thread.
4521 We look up which resume request applies to THREAD and mark it with a
4522 pointer to the appropriate resume request.
4523
4524 This algorithm is O(threads * resume elements), but resume elements
4525 is small (and will remain small at least until GDB supports thread
4526 suspension). */
4527
4528 static void
4529 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4530 {
4531 struct lwp_info *lwp = get_thread_lwp (thread);
4532
4533 for (int ndx = 0; ndx < n; ndx++)
4534 {
4535 ptid_t ptid = resume[ndx].thread;
4536 if (ptid_equal (ptid, minus_one_ptid)
4537 || ptid == thread->id
4538 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4539 of PID'. */
4540 || (ptid_get_pid (ptid) == pid_of (thread)
4541 && (ptid_is_pid (ptid)
4542 || ptid_get_lwp (ptid) == -1)))
4543 {
4544 if (resume[ndx].kind == resume_stop
4545 && thread->last_resume_kind == resume_stop)
4546 {
4547 if (debug_threads)
4548 debug_printf ("already %s LWP %ld at GDB's request\n",
4549 (thread->last_status.kind
4550 == TARGET_WAITKIND_STOPPED)
4551 ? "stopped"
4552 : "stopping",
4553 lwpid_of (thread));
4554
4555 continue;
4556 }
4557
4558 /* Ignore (wildcard) resume requests for already-resumed
4559 threads. */
4560 if (resume[ndx].kind != resume_stop
4561 && thread->last_resume_kind != resume_stop)
4562 {
4563 if (debug_threads)
4564 debug_printf ("already %s LWP %ld at GDB's request\n",
4565 (thread->last_resume_kind
4566 == resume_step)
4567 ? "stepping"
4568 : "continuing",
4569 lwpid_of (thread));
4570 continue;
4571 }
4572
4573 /* Don't let wildcard resumes resume fork children that GDB
4574 does not yet know are new fork children. */
4575 if (lwp->fork_relative != NULL)
4576 {
4577 struct lwp_info *rel = lwp->fork_relative;
4578
4579 if (rel->status_pending_p
4580 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4581 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4582 {
4583 if (debug_threads)
4584 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4585 lwpid_of (thread));
4586 continue;
4587 }
4588 }
4589
4590 /* If the thread has a pending event that has already been
4591 reported to GDBserver core, but GDB has not pulled the
4592 event out of the vStopped queue yet, likewise, ignore the
4593 (wildcard) resume request. */
4594 if (in_queued_stop_replies (thread->id))
4595 {
4596 if (debug_threads)
4597 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4598 lwpid_of (thread));
4599 continue;
4600 }
4601
4602 lwp->resume = &resume[ndx];
4603 thread->last_resume_kind = lwp->resume->kind;
4604
4605 lwp->step_range_start = lwp->resume->step_range_start;
4606 lwp->step_range_end = lwp->resume->step_range_end;
4607
4608 /* If we had a deferred signal to report, dequeue one now.
4609 This can happen if LWP gets more than one signal while
4610 trying to get out of a jump pad. */
4611 if (lwp->stopped
4612 && !lwp->status_pending_p
4613 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4614 {
4615 lwp->status_pending_p = 1;
4616
4617 if (debug_threads)
4618 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4619 "leaving status pending.\n",
4620 WSTOPSIG (lwp->status_pending),
4621 lwpid_of (thread));
4622 }
4623
4624 return;
4625 }
4626 }
4627
4628 /* No resume action for this thread. */
4629 lwp->resume = NULL;
4630 }
4631
4632 /* find_thread callback for linux_resume. Return true if this lwp has an
4633 interesting status pending. */
4634
4635 static bool
4636 resume_status_pending_p (thread_info *thread)
4637 {
4638 struct lwp_info *lwp = get_thread_lwp (thread);
4639
4640 /* LWPs which will not be resumed are not interesting, because
4641 we might not wait for them next time through linux_wait. */
4642 if (lwp->resume == NULL)
4643 return false;
4644
4645 return thread_still_has_status_pending_p (thread);
4646 }
4647
4648 /* Return 1 if this lwp that GDB wants running is stopped at an
4649 internal breakpoint that we need to step over. It assumes that any
4650 required STOP_PC adjustment has already been propagated to the
4651 inferior's regcache. */
4652
4653 static bool
4654 need_step_over_p (thread_info *thread)
4655 {
4656 struct lwp_info *lwp = get_thread_lwp (thread);
4657 struct thread_info *saved_thread;
4658 CORE_ADDR pc;
4659 struct process_info *proc = get_thread_process (thread);
4660
4661 /* GDBserver is skipping the extra traps from the wrapper program,
4662 don't have to do step over. */
4663 if (proc->tdesc == NULL)
4664 return false;
4665
4666 /* LWPs which will not be resumed are not interesting, because we
4667 might not wait for them next time through linux_wait. */
4668
4669 if (!lwp->stopped)
4670 {
4671 if (debug_threads)
4672 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4673 lwpid_of (thread));
4674 return false;
4675 }
4676
4677 if (thread->last_resume_kind == resume_stop)
4678 {
4679 if (debug_threads)
4680 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4681 " stopped\n",
4682 lwpid_of (thread));
4683 return false;
4684 }
4685
4686 gdb_assert (lwp->suspended >= 0);
4687
4688 if (lwp->suspended)
4689 {
4690 if (debug_threads)
4691 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4692 lwpid_of (thread));
4693 return false;
4694 }
4695
4696 if (lwp->status_pending_p)
4697 {
4698 if (debug_threads)
4699 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4700 " status.\n",
4701 lwpid_of (thread));
4702 return false;
4703 }
4704
4705 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4706 or we have. */
4707 pc = get_pc (lwp);
4708
4709 /* If the PC has changed since we stopped, then don't do anything,
4710 and let the breakpoint/tracepoint be hit. This happens if, for
4711 instance, GDB handled the decr_pc_after_break subtraction itself,
4712 GDB is OOL stepping this thread, or the user has issued a "jump"
4713 command, or poked thread's registers herself. */
4714 if (pc != lwp->stop_pc)
4715 {
4716 if (debug_threads)
4717 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4718 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4719 lwpid_of (thread),
4720 paddress (lwp->stop_pc), paddress (pc));
4721 return false;
4722 }
4723
4724 /* On software single step target, resume the inferior with signal
4725 rather than stepping over. */
4726 if (can_software_single_step ()
4727 && lwp->pending_signals != NULL
4728 && lwp_signal_can_be_delivered (lwp))
4729 {
4730 if (debug_threads)
4731 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4732 " signals.\n",
4733 lwpid_of (thread));
4734
4735 return false;
4736 }
4737
4738 saved_thread = current_thread;
4739 current_thread = thread;
4740
4741 /* We can only step over breakpoints we know about. */
4742 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4743 {
4744 /* Don't step over a breakpoint that GDB expects to hit
4745 though. If the condition is being evaluated on the target's side
4746 and it evaluate to false, step over this breakpoint as well. */
4747 if (gdb_breakpoint_here (pc)
4748 && gdb_condition_true_at_breakpoint (pc)
4749 && gdb_no_commands_at_breakpoint (pc))
4750 {
4751 if (debug_threads)
4752 debug_printf ("Need step over [LWP %ld]? yes, but found"
4753 " GDB breakpoint at 0x%s; skipping step over\n",
4754 lwpid_of (thread), paddress (pc));
4755
4756 current_thread = saved_thread;
4757 return false;
4758 }
4759 else
4760 {
4761 if (debug_threads)
4762 debug_printf ("Need step over [LWP %ld]? yes, "
4763 "found breakpoint at 0x%s\n",
4764 lwpid_of (thread), paddress (pc));
4765
4766 /* We've found an lwp that needs stepping over --- return 1 so
4767 that find_thread stops looking. */
4768 current_thread = saved_thread;
4769
4770 return true;
4771 }
4772 }
4773
4774 current_thread = saved_thread;
4775
4776 if (debug_threads)
4777 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4778 " at 0x%s\n",
4779 lwpid_of (thread), paddress (pc));
4780
4781 return false;
4782 }
4783
4784 /* Start a step-over operation on LWP. When LWP stopped at a
4785 breakpoint, to make progress, we need to remove the breakpoint out
4786 of the way. If we let other threads run while we do that, they may
4787 pass by the breakpoint location and miss hitting it. To avoid
4788 that, a step-over momentarily stops all threads while LWP is
4789 single-stepped by either hardware or software while the breakpoint
4790 is temporarily uninserted from the inferior. When the single-step
4791 finishes, we reinsert the breakpoint, and let all threads that are
4792 supposed to be running, run again. */
4793
4794 static int
4795 start_step_over (struct lwp_info *lwp)
4796 {
4797 struct thread_info *thread = get_lwp_thread (lwp);
4798 struct thread_info *saved_thread;
4799 CORE_ADDR pc;
4800 int step;
4801
4802 if (debug_threads)
4803 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4804 lwpid_of (thread));
4805
4806 stop_all_lwps (1, lwp);
4807
4808 if (lwp->suspended != 0)
4809 {
4810 internal_error (__FILE__, __LINE__,
4811 "LWP %ld suspended=%d\n", lwpid_of (thread),
4812 lwp->suspended);
4813 }
4814
4815 if (debug_threads)
4816 debug_printf ("Done stopping all threads for step-over.\n");
4817
4818 /* Note, we should always reach here with an already adjusted PC,
4819 either by GDB (if we're resuming due to GDB's request), or by our
4820 caller, if we just finished handling an internal breakpoint GDB
4821 shouldn't care about. */
4822 pc = get_pc (lwp);
4823
4824 saved_thread = current_thread;
4825 current_thread = thread;
4826
4827 lwp->bp_reinsert = pc;
4828 uninsert_breakpoints_at (pc);
4829 uninsert_fast_tracepoint_jumps_at (pc);
4830
4831 step = single_step (lwp);
4832
4833 current_thread = saved_thread;
4834
4835 linux_resume_one_lwp (lwp, step, 0, NULL);
4836
4837 /* Require next event from this LWP. */
4838 step_over_bkpt = thread->id;
4839 return 1;
4840 }
4841
4842 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4843 start_step_over, if still there, and delete any single-step
4844 breakpoints we've set, on non hardware single-step targets. */
4845
4846 static int
4847 finish_step_over (struct lwp_info *lwp)
4848 {
4849 if (lwp->bp_reinsert != 0)
4850 {
4851 struct thread_info *saved_thread = current_thread;
4852
4853 if (debug_threads)
4854 debug_printf ("Finished step over.\n");
4855
4856 current_thread = get_lwp_thread (lwp);
4857
4858 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4859 may be no breakpoint to reinsert there by now. */
4860 reinsert_breakpoints_at (lwp->bp_reinsert);
4861 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4862
4863 lwp->bp_reinsert = 0;
4864
4865 /* Delete any single-step breakpoints. No longer needed. We
4866 don't have to worry about other threads hitting this trap,
4867 and later not being able to explain it, because we were
4868 stepping over a breakpoint, and we hold all threads but
4869 LWP stopped while doing that. */
4870 if (!can_hardware_single_step ())
4871 {
4872 gdb_assert (has_single_step_breakpoints (current_thread));
4873 delete_single_step_breakpoints (current_thread);
4874 }
4875
4876 step_over_bkpt = null_ptid;
4877 current_thread = saved_thread;
4878 return 1;
4879 }
4880 else
4881 return 0;
4882 }
4883
4884 /* If there's a step over in progress, wait until all threads stop
4885 (that is, until the stepping thread finishes its step), and
4886 unsuspend all lwps. The stepping thread ends with its status
4887 pending, which is processed later when we get back to processing
4888 events. */
4889
4890 static void
4891 complete_ongoing_step_over (void)
4892 {
4893 if (!ptid_equal (step_over_bkpt, null_ptid))
4894 {
4895 struct lwp_info *lwp;
4896 int wstat;
4897 int ret;
4898
4899 if (debug_threads)
4900 debug_printf ("detach: step over in progress, finish it first\n");
4901
4902 /* Passing NULL_PTID as filter indicates we want all events to
4903 be left pending. Eventually this returns when there are no
4904 unwaited-for children left. */
4905 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4906 &wstat, __WALL);
4907 gdb_assert (ret == -1);
4908
4909 lwp = find_lwp_pid (step_over_bkpt);
4910 if (lwp != NULL)
4911 finish_step_over (lwp);
4912 step_over_bkpt = null_ptid;
4913 unsuspend_all_lwps (lwp);
4914 }
4915 }
4916
4917 /* This function is called once per thread. We check the thread's resume
4918 request, which will tell us whether to resume, step, or leave the thread
4919 stopped; and what signal, if any, it should be sent.
4920
4921 For threads which we aren't explicitly told otherwise, we preserve
4922 the stepping flag; this is used for stepping over gdbserver-placed
4923 breakpoints.
4924
4925 If pending_flags was set in any thread, we queue any needed
4926 signals, since we won't actually resume. We already have a pending
4927 event to report, so we don't need to preserve any step requests;
4928 they should be re-issued if necessary. */
4929
4930 static void
4931 linux_resume_one_thread (thread_info *thread, bool leave_all_stopped)
4932 {
4933 struct lwp_info *lwp = get_thread_lwp (thread);
4934 int leave_pending;
4935
4936 if (lwp->resume == NULL)
4937 return;
4938
4939 if (lwp->resume->kind == resume_stop)
4940 {
4941 if (debug_threads)
4942 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4943
4944 if (!lwp->stopped)
4945 {
4946 if (debug_threads)
4947 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4948
4949 /* Stop the thread, and wait for the event asynchronously,
4950 through the event loop. */
4951 send_sigstop (lwp);
4952 }
4953 else
4954 {
4955 if (debug_threads)
4956 debug_printf ("already stopped LWP %ld\n",
4957 lwpid_of (thread));
4958
4959 /* The LWP may have been stopped in an internal event that
4960 was not meant to be notified back to GDB (e.g., gdbserver
4961 breakpoint), so we should be reporting a stop event in
4962 this case too. */
4963
4964 /* If the thread already has a pending SIGSTOP, this is a
4965 no-op. Otherwise, something later will presumably resume
4966 the thread and this will cause it to cancel any pending
4967 operation, due to last_resume_kind == resume_stop. If
4968 the thread already has a pending status to report, we
4969 will still report it the next time we wait - see
4970 status_pending_p_callback. */
4971
4972 /* If we already have a pending signal to report, then
4973 there's no need to queue a SIGSTOP, as this means we're
4974 midway through moving the LWP out of the jumppad, and we
4975 will report the pending signal as soon as that is
4976 finished. */
4977 if (lwp->pending_signals_to_report == NULL)
4978 send_sigstop (lwp);
4979 }
4980
4981 /* For stop requests, we're done. */
4982 lwp->resume = NULL;
4983 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4984 return;
4985 }
4986
4987 /* If this thread which is about to be resumed has a pending status,
4988 then don't resume it - we can just report the pending status.
4989 Likewise if it is suspended, because e.g., another thread is
4990 stepping past a breakpoint. Make sure to queue any signals that
4991 would otherwise be sent. In all-stop mode, we do this decision
4992 based on if *any* thread has a pending status. If there's a
4993 thread that needs the step-over-breakpoint dance, then don't
4994 resume any other thread but that particular one. */
4995 leave_pending = (lwp->suspended
4996 || lwp->status_pending_p
4997 || leave_all_stopped);
4998
4999 /* If we have a new signal, enqueue the signal. */
5000 if (lwp->resume->sig != 0)
5001 {
5002 siginfo_t info, *info_p;
5003
5004 /* If this is the same signal we were previously stopped by,
5005 make sure to queue its siginfo. */
5006 if (WIFSTOPPED (lwp->last_status)
5007 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
5008 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
5009 (PTRACE_TYPE_ARG3) 0, &info) == 0)
5010 info_p = &info;
5011 else
5012 info_p = NULL;
5013
5014 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
5015 }
5016
5017 if (!leave_pending)
5018 {
5019 if (debug_threads)
5020 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5021
5022 proceed_one_lwp (thread, NULL);
5023 }
5024 else
5025 {
5026 if (debug_threads)
5027 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5028 }
5029
5030 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5031 lwp->resume = NULL;
5032 }
5033
5034 static void
5035 linux_resume (struct thread_resume *resume_info, size_t n)
5036 {
5037 struct thread_info *need_step_over = NULL;
5038
5039 if (debug_threads)
5040 {
5041 debug_enter ();
5042 debug_printf ("linux_resume:\n");
5043 }
5044
5045 for_each_thread ([&] (thread_info *thread)
5046 {
5047 linux_set_resume_request (thread, resume_info, n);
5048 });
5049
5050 /* If there is a thread which would otherwise be resumed, which has
5051 a pending status, then don't resume any threads - we can just
5052 report the pending status. Make sure to queue any signals that
5053 would otherwise be sent. In non-stop mode, we'll apply this
5054 logic to each thread individually. We consume all pending events
5055 before considering to start a step-over (in all-stop). */
5056 bool any_pending = false;
5057 if (!non_stop)
5058 any_pending = find_thread (resume_status_pending_p) != NULL;
5059
5060 /* If there is a thread which would otherwise be resumed, which is
5061 stopped at a breakpoint that needs stepping over, then don't
5062 resume any threads - have it step over the breakpoint with all
5063 other threads stopped, then resume all threads again. Make sure
5064 to queue any signals that would otherwise be delivered or
5065 queued. */
5066 if (!any_pending && supports_breakpoints ())
5067 need_step_over = find_thread (need_step_over_p);
5068
5069 bool leave_all_stopped = (need_step_over != NULL || any_pending);
5070
5071 if (debug_threads)
5072 {
5073 if (need_step_over != NULL)
5074 debug_printf ("Not resuming all, need step over\n");
5075 else if (any_pending)
5076 debug_printf ("Not resuming, all-stop and found "
5077 "an LWP with pending status\n");
5078 else
5079 debug_printf ("Resuming, no pending status or step over needed\n");
5080 }
5081
5082 /* Even if we're leaving threads stopped, queue all signals we'd
5083 otherwise deliver. */
5084 for_each_thread ([&] (thread_info *thread)
5085 {
5086 linux_resume_one_thread (thread, leave_all_stopped);
5087 });
5088
5089 if (need_step_over)
5090 start_step_over (get_thread_lwp (need_step_over));
5091
5092 if (debug_threads)
5093 {
5094 debug_printf ("linux_resume done\n");
5095 debug_exit ();
5096 }
5097
5098 /* We may have events that were pending that can/should be sent to
5099 the client now. Trigger a linux_wait call. */
5100 if (target_is_async_p ())
5101 async_file_mark ();
5102 }
5103
5104 /* This function is called once per thread. We check the thread's
5105 last resume request, which will tell us whether to resume, step, or
5106 leave the thread stopped. Any signal the client requested to be
5107 delivered has already been enqueued at this point.
5108
5109 If any thread that GDB wants running is stopped at an internal
5110 breakpoint that needs stepping over, we start a step-over operation
5111 on that particular thread, and leave all others stopped. */
5112
5113 static void
5114 proceed_one_lwp (thread_info *thread, lwp_info *except)
5115 {
5116 struct lwp_info *lwp = get_thread_lwp (thread);
5117 int step;
5118
5119 if (lwp == except)
5120 return;
5121
5122 if (debug_threads)
5123 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5124
5125 if (!lwp->stopped)
5126 {
5127 if (debug_threads)
5128 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5129 return;
5130 }
5131
5132 if (thread->last_resume_kind == resume_stop
5133 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5134 {
5135 if (debug_threads)
5136 debug_printf (" client wants LWP to remain %ld stopped\n",
5137 lwpid_of (thread));
5138 return;
5139 }
5140
5141 if (lwp->status_pending_p)
5142 {
5143 if (debug_threads)
5144 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5145 lwpid_of (thread));
5146 return;
5147 }
5148
5149 gdb_assert (lwp->suspended >= 0);
5150
5151 if (lwp->suspended)
5152 {
5153 if (debug_threads)
5154 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5155 return;
5156 }
5157
5158 if (thread->last_resume_kind == resume_stop
5159 && lwp->pending_signals_to_report == NULL
5160 && (lwp->collecting_fast_tracepoint
5161 == fast_tpoint_collect_result::not_collecting))
5162 {
5163 /* We haven't reported this LWP as stopped yet (otherwise, the
5164 last_status.kind check above would catch it, and we wouldn't
5165 reach here. This LWP may have been momentarily paused by a
5166 stop_all_lwps call while handling for example, another LWP's
5167 step-over. In that case, the pending expected SIGSTOP signal
5168 that was queued at vCont;t handling time will have already
5169 been consumed by wait_for_sigstop, and so we need to requeue
5170 another one here. Note that if the LWP already has a SIGSTOP
5171 pending, this is a no-op. */
5172
5173 if (debug_threads)
5174 debug_printf ("Client wants LWP %ld to stop. "
5175 "Making sure it has a SIGSTOP pending\n",
5176 lwpid_of (thread));
5177
5178 send_sigstop (lwp);
5179 }
5180
5181 if (thread->last_resume_kind == resume_step)
5182 {
5183 if (debug_threads)
5184 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5185 lwpid_of (thread));
5186
5187 /* If resume_step is requested by GDB, install single-step
5188 breakpoints when the thread is about to be actually resumed if
5189 the single-step breakpoints weren't removed. */
5190 if (can_software_single_step ()
5191 && !has_single_step_breakpoints (thread))
5192 install_software_single_step_breakpoints (lwp);
5193
5194 step = maybe_hw_step (thread);
5195 }
5196 else if (lwp->bp_reinsert != 0)
5197 {
5198 if (debug_threads)
5199 debug_printf (" stepping LWP %ld, reinsert set\n",
5200 lwpid_of (thread));
5201
5202 step = maybe_hw_step (thread);
5203 }
5204 else
5205 step = 0;
5206
5207 linux_resume_one_lwp (lwp, step, 0, NULL);
5208 }
5209
5210 static void
5211 unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except)
5212 {
5213 struct lwp_info *lwp = get_thread_lwp (thread);
5214
5215 if (lwp == except)
5216 return;
5217
5218 lwp_suspended_decr (lwp);
5219
5220 proceed_one_lwp (thread, except);
5221 }
5222
5223 /* When we finish a step-over, set threads running again. If there's
5224 another thread that may need a step-over, now's the time to start
5225 it. Eventually, we'll move all threads past their breakpoints. */
5226
5227 static void
5228 proceed_all_lwps (void)
5229 {
5230 struct thread_info *need_step_over;
5231
5232 /* If there is a thread which would otherwise be resumed, which is
5233 stopped at a breakpoint that needs stepping over, then don't
5234 resume any threads - have it step over the breakpoint with all
5235 other threads stopped, then resume all threads again. */
5236
5237 if (supports_breakpoints ())
5238 {
5239 need_step_over = find_thread (need_step_over_p);
5240
5241 if (need_step_over != NULL)
5242 {
5243 if (debug_threads)
5244 debug_printf ("proceed_all_lwps: found "
5245 "thread %ld needing a step-over\n",
5246 lwpid_of (need_step_over));
5247
5248 start_step_over (get_thread_lwp (need_step_over));
5249 return;
5250 }
5251 }
5252
5253 if (debug_threads)
5254 debug_printf ("Proceeding, no step-over needed\n");
5255
5256 for_each_thread ([] (thread_info *thread)
5257 {
5258 proceed_one_lwp (thread, NULL);
5259 });
5260 }
5261
5262 /* Stopped LWPs that the client wanted to be running, that don't have
5263 pending statuses, are set to run again, except for EXCEPT, if not
5264 NULL. This undoes a stop_all_lwps call. */
5265
5266 static void
5267 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5268 {
5269 if (debug_threads)
5270 {
5271 debug_enter ();
5272 if (except)
5273 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5274 lwpid_of (get_lwp_thread (except)));
5275 else
5276 debug_printf ("unstopping all lwps\n");
5277 }
5278
5279 if (unsuspend)
5280 for_each_thread ([&] (thread_info *thread)
5281 {
5282 unsuspend_and_proceed_one_lwp (thread, except);
5283 });
5284 else
5285 for_each_thread ([&] (thread_info *thread)
5286 {
5287 proceed_one_lwp (thread, except);
5288 });
5289
5290 if (debug_threads)
5291 {
5292 debug_printf ("unstop_all_lwps done\n");
5293 debug_exit ();
5294 }
5295 }
5296
5297
5298 #ifdef HAVE_LINUX_REGSETS
5299
5300 #define use_linux_regsets 1
5301
5302 /* Returns true if REGSET has been disabled. */
5303
5304 static int
5305 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5306 {
5307 return (info->disabled_regsets != NULL
5308 && info->disabled_regsets[regset - info->regsets]);
5309 }
5310
5311 /* Disable REGSET. */
5312
5313 static void
5314 disable_regset (struct regsets_info *info, struct regset_info *regset)
5315 {
5316 int dr_offset;
5317
5318 dr_offset = regset - info->regsets;
5319 if (info->disabled_regsets == NULL)
5320 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5321 info->disabled_regsets[dr_offset] = 1;
5322 }
5323
5324 static int
5325 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5326 struct regcache *regcache)
5327 {
5328 struct regset_info *regset;
5329 int saw_general_regs = 0;
5330 int pid;
5331 struct iovec iov;
5332
5333 pid = lwpid_of (current_thread);
5334 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5335 {
5336 void *buf, *data;
5337 int nt_type, res;
5338
5339 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5340 continue;
5341
5342 buf = xmalloc (regset->size);
5343
5344 nt_type = regset->nt_type;
5345 if (nt_type)
5346 {
5347 iov.iov_base = buf;
5348 iov.iov_len = regset->size;
5349 data = (void *) &iov;
5350 }
5351 else
5352 data = buf;
5353
5354 #ifndef __sparc__
5355 res = ptrace (regset->get_request, pid,
5356 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5357 #else
5358 res = ptrace (regset->get_request, pid, data, nt_type);
5359 #endif
5360 if (res < 0)
5361 {
5362 if (errno == EIO)
5363 {
5364 /* If we get EIO on a regset, do not try it again for
5365 this process mode. */
5366 disable_regset (regsets_info, regset);
5367 }
5368 else if (errno == ENODATA)
5369 {
5370 /* ENODATA may be returned if the regset is currently
5371 not "active". This can happen in normal operation,
5372 so suppress the warning in this case. */
5373 }
5374 else if (errno == ESRCH)
5375 {
5376 /* At this point, ESRCH should mean the process is
5377 already gone, in which case we simply ignore attempts
5378 to read its registers. */
5379 }
5380 else
5381 {
5382 char s[256];
5383 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5384 pid);
5385 perror (s);
5386 }
5387 }
5388 else
5389 {
5390 if (regset->type == GENERAL_REGS)
5391 saw_general_regs = 1;
5392 regset->store_function (regcache, buf);
5393 }
5394 free (buf);
5395 }
5396 if (saw_general_regs)
5397 return 0;
5398 else
5399 return 1;
5400 }
5401
5402 static int
5403 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5404 struct regcache *regcache)
5405 {
5406 struct regset_info *regset;
5407 int saw_general_regs = 0;
5408 int pid;
5409 struct iovec iov;
5410
5411 pid = lwpid_of (current_thread);
5412 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5413 {
5414 void *buf, *data;
5415 int nt_type, res;
5416
5417 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5418 || regset->fill_function == NULL)
5419 continue;
5420
5421 buf = xmalloc (regset->size);
5422
5423 /* First fill the buffer with the current register set contents,
5424 in case there are any items in the kernel's regset that are
5425 not in gdbserver's regcache. */
5426
5427 nt_type = regset->nt_type;
5428 if (nt_type)
5429 {
5430 iov.iov_base = buf;
5431 iov.iov_len = regset->size;
5432 data = (void *) &iov;
5433 }
5434 else
5435 data = buf;
5436
5437 #ifndef __sparc__
5438 res = ptrace (regset->get_request, pid,
5439 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5440 #else
5441 res = ptrace (regset->get_request, pid, data, nt_type);
5442 #endif
5443
5444 if (res == 0)
5445 {
5446 /* Then overlay our cached registers on that. */
5447 regset->fill_function (regcache, buf);
5448
5449 /* Only now do we write the register set. */
5450 #ifndef __sparc__
5451 res = ptrace (regset->set_request, pid,
5452 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5453 #else
5454 res = ptrace (regset->set_request, pid, data, nt_type);
5455 #endif
5456 }
5457
5458 if (res < 0)
5459 {
5460 if (errno == EIO)
5461 {
5462 /* If we get EIO on a regset, do not try it again for
5463 this process mode. */
5464 disable_regset (regsets_info, regset);
5465 }
5466 else if (errno == ESRCH)
5467 {
5468 /* At this point, ESRCH should mean the process is
5469 already gone, in which case we simply ignore attempts
5470 to change its registers. See also the related
5471 comment in linux_resume_one_lwp. */
5472 free (buf);
5473 return 0;
5474 }
5475 else
5476 {
5477 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5478 }
5479 }
5480 else if (regset->type == GENERAL_REGS)
5481 saw_general_regs = 1;
5482 free (buf);
5483 }
5484 if (saw_general_regs)
5485 return 0;
5486 else
5487 return 1;
5488 }
5489
5490 #else /* !HAVE_LINUX_REGSETS */
5491
5492 #define use_linux_regsets 0
5493 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5494 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5495
5496 #endif
5497
5498 /* Return 1 if register REGNO is supported by one of the regset ptrace
5499 calls or 0 if it has to be transferred individually. */
5500
5501 static int
5502 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5503 {
5504 unsigned char mask = 1 << (regno % 8);
5505 size_t index = regno / 8;
5506
5507 return (use_linux_regsets
5508 && (regs_info->regset_bitmap == NULL
5509 || (regs_info->regset_bitmap[index] & mask) != 0));
5510 }
5511
5512 #ifdef HAVE_LINUX_USRREGS
5513
5514 static int
5515 register_addr (const struct usrregs_info *usrregs, int regnum)
5516 {
5517 int addr;
5518
5519 if (regnum < 0 || regnum >= usrregs->num_regs)
5520 error ("Invalid register number %d.", regnum);
5521
5522 addr = usrregs->regmap[regnum];
5523
5524 return addr;
5525 }
5526
5527 /* Fetch one register. */
5528 static void
5529 fetch_register (const struct usrregs_info *usrregs,
5530 struct regcache *regcache, int regno)
5531 {
5532 CORE_ADDR regaddr;
5533 int i, size;
5534 char *buf;
5535 int pid;
5536
5537 if (regno >= usrregs->num_regs)
5538 return;
5539 if ((*the_low_target.cannot_fetch_register) (regno))
5540 return;
5541
5542 regaddr = register_addr (usrregs, regno);
5543 if (regaddr == -1)
5544 return;
5545
5546 size = ((register_size (regcache->tdesc, regno)
5547 + sizeof (PTRACE_XFER_TYPE) - 1)
5548 & -sizeof (PTRACE_XFER_TYPE));
5549 buf = (char *) alloca (size);
5550
5551 pid = lwpid_of (current_thread);
5552 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5553 {
5554 errno = 0;
5555 *(PTRACE_XFER_TYPE *) (buf + i) =
5556 ptrace (PTRACE_PEEKUSER, pid,
5557 /* Coerce to a uintptr_t first to avoid potential gcc warning
5558 of coercing an 8 byte integer to a 4 byte pointer. */
5559 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5560 regaddr += sizeof (PTRACE_XFER_TYPE);
5561 if (errno != 0)
5562 {
5563 /* Mark register REGNO unavailable. */
5564 supply_register (regcache, regno, NULL);
5565 return;
5566 }
5567 }
5568
5569 if (the_low_target.supply_ptrace_register)
5570 the_low_target.supply_ptrace_register (regcache, regno, buf);
5571 else
5572 supply_register (regcache, regno, buf);
5573 }
5574
5575 /* Store one register. */
5576 static void
5577 store_register (const struct usrregs_info *usrregs,
5578 struct regcache *regcache, int regno)
5579 {
5580 CORE_ADDR regaddr;
5581 int i, size;
5582 char *buf;
5583 int pid;
5584
5585 if (regno >= usrregs->num_regs)
5586 return;
5587 if ((*the_low_target.cannot_store_register) (regno))
5588 return;
5589
5590 regaddr = register_addr (usrregs, regno);
5591 if (regaddr == -1)
5592 return;
5593
5594 size = ((register_size (regcache->tdesc, regno)
5595 + sizeof (PTRACE_XFER_TYPE) - 1)
5596 & -sizeof (PTRACE_XFER_TYPE));
5597 buf = (char *) alloca (size);
5598 memset (buf, 0, size);
5599
5600 if (the_low_target.collect_ptrace_register)
5601 the_low_target.collect_ptrace_register (regcache, regno, buf);
5602 else
5603 collect_register (regcache, regno, buf);
5604
5605 pid = lwpid_of (current_thread);
5606 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5607 {
5608 errno = 0;
5609 ptrace (PTRACE_POKEUSER, pid,
5610 /* Coerce to a uintptr_t first to avoid potential gcc warning
5611 about coercing an 8 byte integer to a 4 byte pointer. */
5612 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5613 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5614 if (errno != 0)
5615 {
5616 /* At this point, ESRCH should mean the process is
5617 already gone, in which case we simply ignore attempts
5618 to change its registers. See also the related
5619 comment in linux_resume_one_lwp. */
5620 if (errno == ESRCH)
5621 return;
5622
5623 if ((*the_low_target.cannot_store_register) (regno) == 0)
5624 error ("writing register %d: %s", regno, strerror (errno));
5625 }
5626 regaddr += sizeof (PTRACE_XFER_TYPE);
5627 }
5628 }
5629
5630 /* Fetch all registers, or just one, from the child process.
5631 If REGNO is -1, do this for all registers, skipping any that are
5632 assumed to have been retrieved by regsets_fetch_inferior_registers,
5633 unless ALL is non-zero.
5634 Otherwise, REGNO specifies which register (so we can save time). */
5635 static void
5636 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5637 struct regcache *regcache, int regno, int all)
5638 {
5639 struct usrregs_info *usr = regs_info->usrregs;
5640
5641 if (regno == -1)
5642 {
5643 for (regno = 0; regno < usr->num_regs; regno++)
5644 if (all || !linux_register_in_regsets (regs_info, regno))
5645 fetch_register (usr, regcache, regno);
5646 }
5647 else
5648 fetch_register (usr, regcache, regno);
5649 }
5650
5651 /* Store our register values back into the inferior.
5652 If REGNO is -1, do this for all registers, skipping any that are
5653 assumed to have been saved by regsets_store_inferior_registers,
5654 unless ALL is non-zero.
5655 Otherwise, REGNO specifies which register (so we can save time). */
5656 static void
5657 usr_store_inferior_registers (const struct regs_info *regs_info,
5658 struct regcache *regcache, int regno, int all)
5659 {
5660 struct usrregs_info *usr = regs_info->usrregs;
5661
5662 if (regno == -1)
5663 {
5664 for (regno = 0; regno < usr->num_regs; regno++)
5665 if (all || !linux_register_in_regsets (regs_info, regno))
5666 store_register (usr, regcache, regno);
5667 }
5668 else
5669 store_register (usr, regcache, regno);
5670 }
5671
5672 #else /* !HAVE_LINUX_USRREGS */
5673
5674 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5675 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5676
5677 #endif
5678
5679
5680 static void
5681 linux_fetch_registers (struct regcache *regcache, int regno)
5682 {
5683 int use_regsets;
5684 int all = 0;
5685 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5686
5687 if (regno == -1)
5688 {
5689 if (the_low_target.fetch_register != NULL
5690 && regs_info->usrregs != NULL)
5691 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5692 (*the_low_target.fetch_register) (regcache, regno);
5693
5694 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5695 if (regs_info->usrregs != NULL)
5696 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5697 }
5698 else
5699 {
5700 if (the_low_target.fetch_register != NULL
5701 && (*the_low_target.fetch_register) (regcache, regno))
5702 return;
5703
5704 use_regsets = linux_register_in_regsets (regs_info, regno);
5705 if (use_regsets)
5706 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5707 regcache);
5708 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5709 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5710 }
5711 }
5712
5713 static void
5714 linux_store_registers (struct regcache *regcache, int regno)
5715 {
5716 int use_regsets;
5717 int all = 0;
5718 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5719
5720 if (regno == -1)
5721 {
5722 all = regsets_store_inferior_registers (regs_info->regsets_info,
5723 regcache);
5724 if (regs_info->usrregs != NULL)
5725 usr_store_inferior_registers (regs_info, regcache, regno, all);
5726 }
5727 else
5728 {
5729 use_regsets = linux_register_in_regsets (regs_info, regno);
5730 if (use_regsets)
5731 all = regsets_store_inferior_registers (regs_info->regsets_info,
5732 regcache);
5733 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5734 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5735 }
5736 }
5737
5738
5739 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5740 to debugger memory starting at MYADDR. */
5741
5742 static int
5743 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5744 {
5745 int pid = lwpid_of (current_thread);
5746 PTRACE_XFER_TYPE *buffer;
5747 CORE_ADDR addr;
5748 int count;
5749 char filename[64];
5750 int i;
5751 int ret;
5752 int fd;
5753
5754 /* Try using /proc. Don't bother for one word. */
5755 if (len >= 3 * sizeof (long))
5756 {
5757 int bytes;
5758
5759 /* We could keep this file open and cache it - possibly one per
5760 thread. That requires some juggling, but is even faster. */
5761 sprintf (filename, "/proc/%d/mem", pid);
5762 fd = open (filename, O_RDONLY | O_LARGEFILE);
5763 if (fd == -1)
5764 goto no_proc;
5765
5766 /* If pread64 is available, use it. It's faster if the kernel
5767 supports it (only one syscall), and it's 64-bit safe even on
5768 32-bit platforms (for instance, SPARC debugging a SPARC64
5769 application). */
5770 #ifdef HAVE_PREAD64
5771 bytes = pread64 (fd, myaddr, len, memaddr);
5772 #else
5773 bytes = -1;
5774 if (lseek (fd, memaddr, SEEK_SET) != -1)
5775 bytes = read (fd, myaddr, len);
5776 #endif
5777
5778 close (fd);
5779 if (bytes == len)
5780 return 0;
5781
5782 /* Some data was read, we'll try to get the rest with ptrace. */
5783 if (bytes > 0)
5784 {
5785 memaddr += bytes;
5786 myaddr += bytes;
5787 len -= bytes;
5788 }
5789 }
5790
5791 no_proc:
5792 /* Round starting address down to longword boundary. */
5793 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5794 /* Round ending address up; get number of longwords that makes. */
5795 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5796 / sizeof (PTRACE_XFER_TYPE));
5797 /* Allocate buffer of that many longwords. */
5798 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5799
5800 /* Read all the longwords */
5801 errno = 0;
5802 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5803 {
5804 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5805 about coercing an 8 byte integer to a 4 byte pointer. */
5806 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5807 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5808 (PTRACE_TYPE_ARG4) 0);
5809 if (errno)
5810 break;
5811 }
5812 ret = errno;
5813
5814 /* Copy appropriate bytes out of the buffer. */
5815 if (i > 0)
5816 {
5817 i *= sizeof (PTRACE_XFER_TYPE);
5818 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5819 memcpy (myaddr,
5820 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5821 i < len ? i : len);
5822 }
5823
5824 return ret;
5825 }
5826
5827 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5828 memory at MEMADDR. On failure (cannot write to the inferior)
5829 returns the value of errno. Always succeeds if LEN is zero. */
5830
5831 static int
5832 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5833 {
5834 int i;
5835 /* Round starting address down to longword boundary. */
5836 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5837 /* Round ending address up; get number of longwords that makes. */
5838 int count
5839 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5840 / sizeof (PTRACE_XFER_TYPE);
5841
5842 /* Allocate buffer of that many longwords. */
5843 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5844
5845 int pid = lwpid_of (current_thread);
5846
5847 if (len == 0)
5848 {
5849 /* Zero length write always succeeds. */
5850 return 0;
5851 }
5852
5853 if (debug_threads)
5854 {
5855 /* Dump up to four bytes. */
5856 char str[4 * 2 + 1];
5857 char *p = str;
5858 int dump = len < 4 ? len : 4;
5859
5860 for (i = 0; i < dump; i++)
5861 {
5862 sprintf (p, "%02x", myaddr[i]);
5863 p += 2;
5864 }
5865 *p = '\0';
5866
5867 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5868 str, (long) memaddr, pid);
5869 }
5870
5871 /* Fill start and end extra bytes of buffer with existing memory data. */
5872
5873 errno = 0;
5874 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5875 about coercing an 8 byte integer to a 4 byte pointer. */
5876 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5877 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5878 (PTRACE_TYPE_ARG4) 0);
5879 if (errno)
5880 return errno;
5881
5882 if (count > 1)
5883 {
5884 errno = 0;
5885 buffer[count - 1]
5886 = ptrace (PTRACE_PEEKTEXT, pid,
5887 /* Coerce to a uintptr_t first to avoid potential gcc warning
5888 about coercing an 8 byte integer to a 4 byte pointer. */
5889 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5890 * sizeof (PTRACE_XFER_TYPE)),
5891 (PTRACE_TYPE_ARG4) 0);
5892 if (errno)
5893 return errno;
5894 }
5895
5896 /* Copy data to be written over corresponding part of buffer. */
5897
5898 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5899 myaddr, len);
5900
5901 /* Write the entire buffer. */
5902
5903 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5904 {
5905 errno = 0;
5906 ptrace (PTRACE_POKETEXT, pid,
5907 /* Coerce to a uintptr_t first to avoid potential gcc warning
5908 about coercing an 8 byte integer to a 4 byte pointer. */
5909 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5910 (PTRACE_TYPE_ARG4) buffer[i]);
5911 if (errno)
5912 return errno;
5913 }
5914
5915 return 0;
5916 }
5917
5918 static void
5919 linux_look_up_symbols (void)
5920 {
5921 #ifdef USE_THREAD_DB
5922 struct process_info *proc = current_process ();
5923
5924 if (proc->priv->thread_db != NULL)
5925 return;
5926
5927 thread_db_init ();
5928 #endif
5929 }
5930
5931 static void
5932 linux_request_interrupt (void)
5933 {
5934 /* Send a SIGINT to the process group. This acts just like the user
5935 typed a ^C on the controlling terminal. */
5936 kill (-signal_pid, SIGINT);
5937 }
5938
5939 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5940 to debugger memory starting at MYADDR. */
5941
5942 static int
5943 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5944 {
5945 char filename[PATH_MAX];
5946 int fd, n;
5947 int pid = lwpid_of (current_thread);
5948
5949 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5950
5951 fd = open (filename, O_RDONLY);
5952 if (fd < 0)
5953 return -1;
5954
5955 if (offset != (CORE_ADDR) 0
5956 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5957 n = -1;
5958 else
5959 n = read (fd, myaddr, len);
5960
5961 close (fd);
5962
5963 return n;
5964 }
5965
5966 /* These breakpoint and watchpoint related wrapper functions simply
5967 pass on the function call if the target has registered a
5968 corresponding function. */
5969
5970 static int
5971 linux_supports_z_point_type (char z_type)
5972 {
5973 return (the_low_target.supports_z_point_type != NULL
5974 && the_low_target.supports_z_point_type (z_type));
5975 }
5976
5977 static int
5978 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5979 int size, struct raw_breakpoint *bp)
5980 {
5981 if (type == raw_bkpt_type_sw)
5982 return insert_memory_breakpoint (bp);
5983 else if (the_low_target.insert_point != NULL)
5984 return the_low_target.insert_point (type, addr, size, bp);
5985 else
5986 /* Unsupported (see target.h). */
5987 return 1;
5988 }
5989
5990 static int
5991 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5992 int size, struct raw_breakpoint *bp)
5993 {
5994 if (type == raw_bkpt_type_sw)
5995 return remove_memory_breakpoint (bp);
5996 else if (the_low_target.remove_point != NULL)
5997 return the_low_target.remove_point (type, addr, size, bp);
5998 else
5999 /* Unsupported (see target.h). */
6000 return 1;
6001 }
6002
6003 /* Implement the to_stopped_by_sw_breakpoint target_ops
6004 method. */
6005
6006 static int
6007 linux_stopped_by_sw_breakpoint (void)
6008 {
6009 struct lwp_info *lwp = get_thread_lwp (current_thread);
6010
6011 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6012 }
6013
6014 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
6015 method. */
6016
6017 static int
6018 linux_supports_stopped_by_sw_breakpoint (void)
6019 {
6020 return USE_SIGTRAP_SIGINFO;
6021 }
6022
6023 /* Implement the to_stopped_by_hw_breakpoint target_ops
6024 method. */
6025
6026 static int
6027 linux_stopped_by_hw_breakpoint (void)
6028 {
6029 struct lwp_info *lwp = get_thread_lwp (current_thread);
6030
6031 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6032 }
6033
6034 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6035 method. */
6036
6037 static int
6038 linux_supports_stopped_by_hw_breakpoint (void)
6039 {
6040 return USE_SIGTRAP_SIGINFO;
6041 }
6042
6043 /* Implement the supports_hardware_single_step target_ops method. */
6044
6045 static int
6046 linux_supports_hardware_single_step (void)
6047 {
6048 return can_hardware_single_step ();
6049 }
6050
6051 static int
6052 linux_supports_software_single_step (void)
6053 {
6054 return can_software_single_step ();
6055 }
6056
6057 static int
6058 linux_stopped_by_watchpoint (void)
6059 {
6060 struct lwp_info *lwp = get_thread_lwp (current_thread);
6061
6062 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6063 }
6064
6065 static CORE_ADDR
6066 linux_stopped_data_address (void)
6067 {
6068 struct lwp_info *lwp = get_thread_lwp (current_thread);
6069
6070 return lwp->stopped_data_address;
6071 }
6072
6073 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6074 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6075 && defined(PT_TEXT_END_ADDR)
6076
6077 /* This is only used for targets that define PT_TEXT_ADDR,
6078 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6079 the target has different ways of acquiring this information, like
6080 loadmaps. */
6081
6082 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6083 to tell gdb about. */
6084
6085 static int
6086 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6087 {
6088 unsigned long text, text_end, data;
6089 int pid = lwpid_of (current_thread);
6090
6091 errno = 0;
6092
6093 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6094 (PTRACE_TYPE_ARG4) 0);
6095 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6096 (PTRACE_TYPE_ARG4) 0);
6097 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6098 (PTRACE_TYPE_ARG4) 0);
6099
6100 if (errno == 0)
6101 {
6102 /* Both text and data offsets produced at compile-time (and so
6103 used by gdb) are relative to the beginning of the program,
6104 with the data segment immediately following the text segment.
6105 However, the actual runtime layout in memory may put the data
6106 somewhere else, so when we send gdb a data base-address, we
6107 use the real data base address and subtract the compile-time
6108 data base-address from it (which is just the length of the
6109 text segment). BSS immediately follows data in both
6110 cases. */
6111 *text_p = text;
6112 *data_p = data - (text_end - text);
6113
6114 return 1;
6115 }
6116 return 0;
6117 }
6118 #endif
6119
6120 static int
6121 linux_qxfer_osdata (const char *annex,
6122 unsigned char *readbuf, unsigned const char *writebuf,
6123 CORE_ADDR offset, int len)
6124 {
6125 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6126 }
6127
6128 /* Convert a native/host siginfo object, into/from the siginfo in the
6129 layout of the inferiors' architecture. */
6130
6131 static void
6132 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6133 {
6134 int done = 0;
6135
6136 if (the_low_target.siginfo_fixup != NULL)
6137 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6138
6139 /* If there was no callback, or the callback didn't do anything,
6140 then just do a straight memcpy. */
6141 if (!done)
6142 {
6143 if (direction == 1)
6144 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6145 else
6146 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6147 }
6148 }
6149
6150 static int
6151 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6152 unsigned const char *writebuf, CORE_ADDR offset, int len)
6153 {
6154 int pid;
6155 siginfo_t siginfo;
6156 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6157
6158 if (current_thread == NULL)
6159 return -1;
6160
6161 pid = lwpid_of (current_thread);
6162
6163 if (debug_threads)
6164 debug_printf ("%s siginfo for lwp %d.\n",
6165 readbuf != NULL ? "Reading" : "Writing",
6166 pid);
6167
6168 if (offset >= sizeof (siginfo))
6169 return -1;
6170
6171 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6172 return -1;
6173
6174 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6175 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6176 inferior with a 64-bit GDBSERVER should look the same as debugging it
6177 with a 32-bit GDBSERVER, we need to convert it. */
6178 siginfo_fixup (&siginfo, inf_siginfo, 0);
6179
6180 if (offset + len > sizeof (siginfo))
6181 len = sizeof (siginfo) - offset;
6182
6183 if (readbuf != NULL)
6184 memcpy (readbuf, inf_siginfo + offset, len);
6185 else
6186 {
6187 memcpy (inf_siginfo + offset, writebuf, len);
6188
6189 /* Convert back to ptrace layout before flushing it out. */
6190 siginfo_fixup (&siginfo, inf_siginfo, 1);
6191
6192 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6193 return -1;
6194 }
6195
6196 return len;
6197 }
6198
6199 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6200 so we notice when children change state; as the handler for the
6201 sigsuspend in my_waitpid. */
6202
6203 static void
6204 sigchld_handler (int signo)
6205 {
6206 int old_errno = errno;
6207
6208 if (debug_threads)
6209 {
6210 do
6211 {
6212 /* fprintf is not async-signal-safe, so call write
6213 directly. */
6214 if (write (2, "sigchld_handler\n",
6215 sizeof ("sigchld_handler\n") - 1) < 0)
6216 break; /* just ignore */
6217 } while (0);
6218 }
6219
6220 if (target_is_async_p ())
6221 async_file_mark (); /* trigger a linux_wait */
6222
6223 errno = old_errno;
6224 }
6225
6226 static int
6227 linux_supports_non_stop (void)
6228 {
6229 return 1;
6230 }
6231
6232 static int
6233 linux_async (int enable)
6234 {
6235 int previous = target_is_async_p ();
6236
6237 if (debug_threads)
6238 debug_printf ("linux_async (%d), previous=%d\n",
6239 enable, previous);
6240
6241 if (previous != enable)
6242 {
6243 sigset_t mask;
6244 sigemptyset (&mask);
6245 sigaddset (&mask, SIGCHLD);
6246
6247 sigprocmask (SIG_BLOCK, &mask, NULL);
6248
6249 if (enable)
6250 {
6251 if (pipe (linux_event_pipe) == -1)
6252 {
6253 linux_event_pipe[0] = -1;
6254 linux_event_pipe[1] = -1;
6255 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6256
6257 warning ("creating event pipe failed.");
6258 return previous;
6259 }
6260
6261 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6262 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6263
6264 /* Register the event loop handler. */
6265 add_file_handler (linux_event_pipe[0],
6266 handle_target_event, NULL);
6267
6268 /* Always trigger a linux_wait. */
6269 async_file_mark ();
6270 }
6271 else
6272 {
6273 delete_file_handler (linux_event_pipe[0]);
6274
6275 close (linux_event_pipe[0]);
6276 close (linux_event_pipe[1]);
6277 linux_event_pipe[0] = -1;
6278 linux_event_pipe[1] = -1;
6279 }
6280
6281 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6282 }
6283
6284 return previous;
6285 }
6286
6287 static int
6288 linux_start_non_stop (int nonstop)
6289 {
6290 /* Register or unregister from event-loop accordingly. */
6291 linux_async (nonstop);
6292
6293 if (target_is_async_p () != (nonstop != 0))
6294 return -1;
6295
6296 return 0;
6297 }
6298
6299 static int
6300 linux_supports_multi_process (void)
6301 {
6302 return 1;
6303 }
6304
6305 /* Check if fork events are supported. */
6306
6307 static int
6308 linux_supports_fork_events (void)
6309 {
6310 return linux_supports_tracefork ();
6311 }
6312
6313 /* Check if vfork events are supported. */
6314
6315 static int
6316 linux_supports_vfork_events (void)
6317 {
6318 return linux_supports_tracefork ();
6319 }
6320
6321 /* Check if exec events are supported. */
6322
6323 static int
6324 linux_supports_exec_events (void)
6325 {
6326 return linux_supports_traceexec ();
6327 }
6328
6329 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6330 ptrace flags for all inferiors. This is in case the new GDB connection
6331 doesn't support the same set of events that the previous one did. */
6332
6333 static void
6334 linux_handle_new_gdb_connection (void)
6335 {
6336 /* Request that all the lwps reset their ptrace options. */
6337 for_each_thread ([] (thread_info *thread)
6338 {
6339 struct lwp_info *lwp = get_thread_lwp (thread);
6340
6341 if (!lwp->stopped)
6342 {
6343 /* Stop the lwp so we can modify its ptrace options. */
6344 lwp->must_set_ptrace_flags = 1;
6345 linux_stop_lwp (lwp);
6346 }
6347 else
6348 {
6349 /* Already stopped; go ahead and set the ptrace options. */
6350 struct process_info *proc = find_process_pid (pid_of (thread));
6351 int options = linux_low_ptrace_options (proc->attached);
6352
6353 linux_enable_event_reporting (lwpid_of (thread), options);
6354 lwp->must_set_ptrace_flags = 0;
6355 }
6356 });
6357 }
6358
6359 static int
6360 linux_supports_disable_randomization (void)
6361 {
6362 #ifdef HAVE_PERSONALITY
6363 return 1;
6364 #else
6365 return 0;
6366 #endif
6367 }
6368
6369 static int
6370 linux_supports_agent (void)
6371 {
6372 return 1;
6373 }
6374
6375 static int
6376 linux_supports_range_stepping (void)
6377 {
6378 if (can_software_single_step ())
6379 return 1;
6380 if (*the_low_target.supports_range_stepping == NULL)
6381 return 0;
6382
6383 return (*the_low_target.supports_range_stepping) ();
6384 }
6385
6386 /* Enumerate spufs IDs for process PID. */
6387 static int
6388 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6389 {
6390 int pos = 0;
6391 int written = 0;
6392 char path[128];
6393 DIR *dir;
6394 struct dirent *entry;
6395
6396 sprintf (path, "/proc/%ld/fd", pid);
6397 dir = opendir (path);
6398 if (!dir)
6399 return -1;
6400
6401 rewinddir (dir);
6402 while ((entry = readdir (dir)) != NULL)
6403 {
6404 struct stat st;
6405 struct statfs stfs;
6406 int fd;
6407
6408 fd = atoi (entry->d_name);
6409 if (!fd)
6410 continue;
6411
6412 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6413 if (stat (path, &st) != 0)
6414 continue;
6415 if (!S_ISDIR (st.st_mode))
6416 continue;
6417
6418 if (statfs (path, &stfs) != 0)
6419 continue;
6420 if (stfs.f_type != SPUFS_MAGIC)
6421 continue;
6422
6423 if (pos >= offset && pos + 4 <= offset + len)
6424 {
6425 *(unsigned int *)(buf + pos - offset) = fd;
6426 written += 4;
6427 }
6428 pos += 4;
6429 }
6430
6431 closedir (dir);
6432 return written;
6433 }
6434
6435 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6436 object type, using the /proc file system. */
6437 static int
6438 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6439 unsigned const char *writebuf,
6440 CORE_ADDR offset, int len)
6441 {
6442 long pid = lwpid_of (current_thread);
6443 char buf[128];
6444 int fd = 0;
6445 int ret = 0;
6446
6447 if (!writebuf && !readbuf)
6448 return -1;
6449
6450 if (!*annex)
6451 {
6452 if (!readbuf)
6453 return -1;
6454 else
6455 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6456 }
6457
6458 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6459 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6460 if (fd <= 0)
6461 return -1;
6462
6463 if (offset != 0
6464 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6465 {
6466 close (fd);
6467 return 0;
6468 }
6469
6470 if (writebuf)
6471 ret = write (fd, writebuf, (size_t) len);
6472 else
6473 ret = read (fd, readbuf, (size_t) len);
6474
6475 close (fd);
6476 return ret;
6477 }
6478
6479 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6480 struct target_loadseg
6481 {
6482 /* Core address to which the segment is mapped. */
6483 Elf32_Addr addr;
6484 /* VMA recorded in the program header. */
6485 Elf32_Addr p_vaddr;
6486 /* Size of this segment in memory. */
6487 Elf32_Word p_memsz;
6488 };
6489
6490 # if defined PT_GETDSBT
6491 struct target_loadmap
6492 {
6493 /* Protocol version number, must be zero. */
6494 Elf32_Word version;
6495 /* Pointer to the DSBT table, its size, and the DSBT index. */
6496 unsigned *dsbt_table;
6497 unsigned dsbt_size, dsbt_index;
6498 /* Number of segments in this map. */
6499 Elf32_Word nsegs;
6500 /* The actual memory map. */
6501 struct target_loadseg segs[/*nsegs*/];
6502 };
6503 # define LINUX_LOADMAP PT_GETDSBT
6504 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6505 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6506 # else
6507 struct target_loadmap
6508 {
6509 /* Protocol version number, must be zero. */
6510 Elf32_Half version;
6511 /* Number of segments in this map. */
6512 Elf32_Half nsegs;
6513 /* The actual memory map. */
6514 struct target_loadseg segs[/*nsegs*/];
6515 };
6516 # define LINUX_LOADMAP PTRACE_GETFDPIC
6517 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6518 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6519 # endif
6520
6521 static int
6522 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6523 unsigned char *myaddr, unsigned int len)
6524 {
6525 int pid = lwpid_of (current_thread);
6526 int addr = -1;
6527 struct target_loadmap *data = NULL;
6528 unsigned int actual_length, copy_length;
6529
6530 if (strcmp (annex, "exec") == 0)
6531 addr = (int) LINUX_LOADMAP_EXEC;
6532 else if (strcmp (annex, "interp") == 0)
6533 addr = (int) LINUX_LOADMAP_INTERP;
6534 else
6535 return -1;
6536
6537 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6538 return -1;
6539
6540 if (data == NULL)
6541 return -1;
6542
6543 actual_length = sizeof (struct target_loadmap)
6544 + sizeof (struct target_loadseg) * data->nsegs;
6545
6546 if (offset < 0 || offset > actual_length)
6547 return -1;
6548
6549 copy_length = actual_length - offset < len ? actual_length - offset : len;
6550 memcpy (myaddr, (char *) data + offset, copy_length);
6551 return copy_length;
6552 }
6553 #else
6554 # define linux_read_loadmap NULL
6555 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6556
6557 static void
6558 linux_process_qsupported (char **features, int count)
6559 {
6560 if (the_low_target.process_qsupported != NULL)
6561 the_low_target.process_qsupported (features, count);
6562 }
6563
6564 static int
6565 linux_supports_catch_syscall (void)
6566 {
6567 return (the_low_target.get_syscall_trapinfo != NULL
6568 && linux_supports_tracesysgood ());
6569 }
6570
6571 static int
6572 linux_get_ipa_tdesc_idx (void)
6573 {
6574 if (the_low_target.get_ipa_tdesc_idx == NULL)
6575 return 0;
6576
6577 return (*the_low_target.get_ipa_tdesc_idx) ();
6578 }
6579
6580 static int
6581 linux_supports_tracepoints (void)
6582 {
6583 if (*the_low_target.supports_tracepoints == NULL)
6584 return 0;
6585
6586 return (*the_low_target.supports_tracepoints) ();
6587 }
6588
6589 static CORE_ADDR
6590 linux_read_pc (struct regcache *regcache)
6591 {
6592 if (the_low_target.get_pc == NULL)
6593 return 0;
6594
6595 return (*the_low_target.get_pc) (regcache);
6596 }
6597
6598 static void
6599 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6600 {
6601 gdb_assert (the_low_target.set_pc != NULL);
6602
6603 (*the_low_target.set_pc) (regcache, pc);
6604 }
6605
6606 static int
6607 linux_thread_stopped (struct thread_info *thread)
6608 {
6609 return get_thread_lwp (thread)->stopped;
6610 }
6611
6612 /* This exposes stop-all-threads functionality to other modules. */
6613
6614 static void
6615 linux_pause_all (int freeze)
6616 {
6617 stop_all_lwps (freeze, NULL);
6618 }
6619
6620 /* This exposes unstop-all-threads functionality to other gdbserver
6621 modules. */
6622
6623 static void
6624 linux_unpause_all (int unfreeze)
6625 {
6626 unstop_all_lwps (unfreeze, NULL);
6627 }
6628
6629 static int
6630 linux_prepare_to_access_memory (void)
6631 {
6632 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6633 running LWP. */
6634 if (non_stop)
6635 linux_pause_all (1);
6636 return 0;
6637 }
6638
6639 static void
6640 linux_done_accessing_memory (void)
6641 {
6642 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6643 running LWP. */
6644 if (non_stop)
6645 linux_unpause_all (1);
6646 }
6647
6648 static int
6649 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6650 CORE_ADDR collector,
6651 CORE_ADDR lockaddr,
6652 ULONGEST orig_size,
6653 CORE_ADDR *jump_entry,
6654 CORE_ADDR *trampoline,
6655 ULONGEST *trampoline_size,
6656 unsigned char *jjump_pad_insn,
6657 ULONGEST *jjump_pad_insn_size,
6658 CORE_ADDR *adjusted_insn_addr,
6659 CORE_ADDR *adjusted_insn_addr_end,
6660 char *err)
6661 {
6662 return (*the_low_target.install_fast_tracepoint_jump_pad)
6663 (tpoint, tpaddr, collector, lockaddr, orig_size,
6664 jump_entry, trampoline, trampoline_size,
6665 jjump_pad_insn, jjump_pad_insn_size,
6666 adjusted_insn_addr, adjusted_insn_addr_end,
6667 err);
6668 }
6669
6670 static struct emit_ops *
6671 linux_emit_ops (void)
6672 {
6673 if (the_low_target.emit_ops != NULL)
6674 return (*the_low_target.emit_ops) ();
6675 else
6676 return NULL;
6677 }
6678
6679 static int
6680 linux_get_min_fast_tracepoint_insn_len (void)
6681 {
6682 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6683 }
6684
6685 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6686
6687 static int
6688 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6689 CORE_ADDR *phdr_memaddr, int *num_phdr)
6690 {
6691 char filename[PATH_MAX];
6692 int fd;
6693 const int auxv_size = is_elf64
6694 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6695 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6696
6697 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6698
6699 fd = open (filename, O_RDONLY);
6700 if (fd < 0)
6701 return 1;
6702
6703 *phdr_memaddr = 0;
6704 *num_phdr = 0;
6705 while (read (fd, buf, auxv_size) == auxv_size
6706 && (*phdr_memaddr == 0 || *num_phdr == 0))
6707 {
6708 if (is_elf64)
6709 {
6710 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6711
6712 switch (aux->a_type)
6713 {
6714 case AT_PHDR:
6715 *phdr_memaddr = aux->a_un.a_val;
6716 break;
6717 case AT_PHNUM:
6718 *num_phdr = aux->a_un.a_val;
6719 break;
6720 }
6721 }
6722 else
6723 {
6724 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6725
6726 switch (aux->a_type)
6727 {
6728 case AT_PHDR:
6729 *phdr_memaddr = aux->a_un.a_val;
6730 break;
6731 case AT_PHNUM:
6732 *num_phdr = aux->a_un.a_val;
6733 break;
6734 }
6735 }
6736 }
6737
6738 close (fd);
6739
6740 if (*phdr_memaddr == 0 || *num_phdr == 0)
6741 {
6742 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6743 "phdr_memaddr = %ld, phdr_num = %d",
6744 (long) *phdr_memaddr, *num_phdr);
6745 return 2;
6746 }
6747
6748 return 0;
6749 }
6750
6751 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6752
6753 static CORE_ADDR
6754 get_dynamic (const int pid, const int is_elf64)
6755 {
6756 CORE_ADDR phdr_memaddr, relocation;
6757 int num_phdr, i;
6758 unsigned char *phdr_buf;
6759 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6760
6761 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6762 return 0;
6763
6764 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6765 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6766
6767 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6768 return 0;
6769
6770 /* Compute relocation: it is expected to be 0 for "regular" executables,
6771 non-zero for PIE ones. */
6772 relocation = -1;
6773 for (i = 0; relocation == -1 && i < num_phdr; i++)
6774 if (is_elf64)
6775 {
6776 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6777
6778 if (p->p_type == PT_PHDR)
6779 relocation = phdr_memaddr - p->p_vaddr;
6780 }
6781 else
6782 {
6783 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6784
6785 if (p->p_type == PT_PHDR)
6786 relocation = phdr_memaddr - p->p_vaddr;
6787 }
6788
6789 if (relocation == -1)
6790 {
6791 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6792 any real world executables, including PIE executables, have always
6793 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6794 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6795 or present DT_DEBUG anyway (fpc binaries are statically linked).
6796
6797 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6798
6799 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6800
6801 return 0;
6802 }
6803
6804 for (i = 0; i < num_phdr; i++)
6805 {
6806 if (is_elf64)
6807 {
6808 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6809
6810 if (p->p_type == PT_DYNAMIC)
6811 return p->p_vaddr + relocation;
6812 }
6813 else
6814 {
6815 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6816
6817 if (p->p_type == PT_DYNAMIC)
6818 return p->p_vaddr + relocation;
6819 }
6820 }
6821
6822 return 0;
6823 }
6824
6825 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6826 can be 0 if the inferior does not yet have the library list initialized.
6827 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6828 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6829
6830 static CORE_ADDR
6831 get_r_debug (const int pid, const int is_elf64)
6832 {
6833 CORE_ADDR dynamic_memaddr;
6834 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6835 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6836 CORE_ADDR map = -1;
6837
6838 dynamic_memaddr = get_dynamic (pid, is_elf64);
6839 if (dynamic_memaddr == 0)
6840 return map;
6841
6842 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6843 {
6844 if (is_elf64)
6845 {
6846 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6847 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6848 union
6849 {
6850 Elf64_Xword map;
6851 unsigned char buf[sizeof (Elf64_Xword)];
6852 }
6853 rld_map;
6854 #endif
6855 #ifdef DT_MIPS_RLD_MAP
6856 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6857 {
6858 if (linux_read_memory (dyn->d_un.d_val,
6859 rld_map.buf, sizeof (rld_map.buf)) == 0)
6860 return rld_map.map;
6861 else
6862 break;
6863 }
6864 #endif /* DT_MIPS_RLD_MAP */
6865 #ifdef DT_MIPS_RLD_MAP_REL
6866 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6867 {
6868 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6869 rld_map.buf, sizeof (rld_map.buf)) == 0)
6870 return rld_map.map;
6871 else
6872 break;
6873 }
6874 #endif /* DT_MIPS_RLD_MAP_REL */
6875
6876 if (dyn->d_tag == DT_DEBUG && map == -1)
6877 map = dyn->d_un.d_val;
6878
6879 if (dyn->d_tag == DT_NULL)
6880 break;
6881 }
6882 else
6883 {
6884 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6885 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6886 union
6887 {
6888 Elf32_Word map;
6889 unsigned char buf[sizeof (Elf32_Word)];
6890 }
6891 rld_map;
6892 #endif
6893 #ifdef DT_MIPS_RLD_MAP
6894 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6895 {
6896 if (linux_read_memory (dyn->d_un.d_val,
6897 rld_map.buf, sizeof (rld_map.buf)) == 0)
6898 return rld_map.map;
6899 else
6900 break;
6901 }
6902 #endif /* DT_MIPS_RLD_MAP */
6903 #ifdef DT_MIPS_RLD_MAP_REL
6904 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6905 {
6906 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6907 rld_map.buf, sizeof (rld_map.buf)) == 0)
6908 return rld_map.map;
6909 else
6910 break;
6911 }
6912 #endif /* DT_MIPS_RLD_MAP_REL */
6913
6914 if (dyn->d_tag == DT_DEBUG && map == -1)
6915 map = dyn->d_un.d_val;
6916
6917 if (dyn->d_tag == DT_NULL)
6918 break;
6919 }
6920
6921 dynamic_memaddr += dyn_size;
6922 }
6923
6924 return map;
6925 }
6926
6927 /* Read one pointer from MEMADDR in the inferior. */
6928
6929 static int
6930 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6931 {
6932 int ret;
6933
6934 /* Go through a union so this works on either big or little endian
6935 hosts, when the inferior's pointer size is smaller than the size
6936 of CORE_ADDR. It is assumed the inferior's endianness is the
6937 same of the superior's. */
6938 union
6939 {
6940 CORE_ADDR core_addr;
6941 unsigned int ui;
6942 unsigned char uc;
6943 } addr;
6944
6945 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6946 if (ret == 0)
6947 {
6948 if (ptr_size == sizeof (CORE_ADDR))
6949 *ptr = addr.core_addr;
6950 else if (ptr_size == sizeof (unsigned int))
6951 *ptr = addr.ui;
6952 else
6953 gdb_assert_not_reached ("unhandled pointer size");
6954 }
6955 return ret;
6956 }
6957
6958 struct link_map_offsets
6959 {
6960 /* Offset and size of r_debug.r_version. */
6961 int r_version_offset;
6962
6963 /* Offset and size of r_debug.r_map. */
6964 int r_map_offset;
6965
6966 /* Offset to l_addr field in struct link_map. */
6967 int l_addr_offset;
6968
6969 /* Offset to l_name field in struct link_map. */
6970 int l_name_offset;
6971
6972 /* Offset to l_ld field in struct link_map. */
6973 int l_ld_offset;
6974
6975 /* Offset to l_next field in struct link_map. */
6976 int l_next_offset;
6977
6978 /* Offset to l_prev field in struct link_map. */
6979 int l_prev_offset;
6980 };
6981
6982 /* Construct qXfer:libraries-svr4:read reply. */
6983
6984 static int
6985 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6986 unsigned const char *writebuf,
6987 CORE_ADDR offset, int len)
6988 {
6989 char *document;
6990 unsigned document_len;
6991 struct process_info_private *const priv = current_process ()->priv;
6992 char filename[PATH_MAX];
6993 int pid, is_elf64;
6994
6995 static const struct link_map_offsets lmo_32bit_offsets =
6996 {
6997 0, /* r_version offset. */
6998 4, /* r_debug.r_map offset. */
6999 0, /* l_addr offset in link_map. */
7000 4, /* l_name offset in link_map. */
7001 8, /* l_ld offset in link_map. */
7002 12, /* l_next offset in link_map. */
7003 16 /* l_prev offset in link_map. */
7004 };
7005
7006 static const struct link_map_offsets lmo_64bit_offsets =
7007 {
7008 0, /* r_version offset. */
7009 8, /* r_debug.r_map offset. */
7010 0, /* l_addr offset in link_map. */
7011 8, /* l_name offset in link_map. */
7012 16, /* l_ld offset in link_map. */
7013 24, /* l_next offset in link_map. */
7014 32 /* l_prev offset in link_map. */
7015 };
7016 const struct link_map_offsets *lmo;
7017 unsigned int machine;
7018 int ptr_size;
7019 CORE_ADDR lm_addr = 0, lm_prev = 0;
7020 int allocated = 1024;
7021 char *p;
7022 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7023 int header_done = 0;
7024
7025 if (writebuf != NULL)
7026 return -2;
7027 if (readbuf == NULL)
7028 return -1;
7029
7030 pid = lwpid_of (current_thread);
7031 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7032 is_elf64 = elf_64_file_p (filename, &machine);
7033 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7034 ptr_size = is_elf64 ? 8 : 4;
7035
7036 while (annex[0] != '\0')
7037 {
7038 const char *sep;
7039 CORE_ADDR *addrp;
7040 int len;
7041
7042 sep = strchr (annex, '=');
7043 if (sep == NULL)
7044 break;
7045
7046 len = sep - annex;
7047 if (len == 5 && startswith (annex, "start"))
7048 addrp = &lm_addr;
7049 else if (len == 4 && startswith (annex, "prev"))
7050 addrp = &lm_prev;
7051 else
7052 {
7053 annex = strchr (sep, ';');
7054 if (annex == NULL)
7055 break;
7056 annex++;
7057 continue;
7058 }
7059
7060 annex = decode_address_to_semicolon (addrp, sep + 1);
7061 }
7062
7063 if (lm_addr == 0)
7064 {
7065 int r_version = 0;
7066
7067 if (priv->r_debug == 0)
7068 priv->r_debug = get_r_debug (pid, is_elf64);
7069
7070 /* We failed to find DT_DEBUG. Such situation will not change
7071 for this inferior - do not retry it. Report it to GDB as
7072 E01, see for the reasons at the GDB solib-svr4.c side. */
7073 if (priv->r_debug == (CORE_ADDR) -1)
7074 return -1;
7075
7076 if (priv->r_debug != 0)
7077 {
7078 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7079 (unsigned char *) &r_version,
7080 sizeof (r_version)) != 0
7081 || r_version != 1)
7082 {
7083 warning ("unexpected r_debug version %d", r_version);
7084 }
7085 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7086 &lm_addr, ptr_size) != 0)
7087 {
7088 warning ("unable to read r_map from 0x%lx",
7089 (long) priv->r_debug + lmo->r_map_offset);
7090 }
7091 }
7092 }
7093
7094 document = (char *) xmalloc (allocated);
7095 strcpy (document, "<library-list-svr4 version=\"1.0\"");
7096 p = document + strlen (document);
7097
7098 while (lm_addr
7099 && read_one_ptr (lm_addr + lmo->l_name_offset,
7100 &l_name, ptr_size) == 0
7101 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7102 &l_addr, ptr_size) == 0
7103 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7104 &l_ld, ptr_size) == 0
7105 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7106 &l_prev, ptr_size) == 0
7107 && read_one_ptr (lm_addr + lmo->l_next_offset,
7108 &l_next, ptr_size) == 0)
7109 {
7110 unsigned char libname[PATH_MAX];
7111
7112 if (lm_prev != l_prev)
7113 {
7114 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7115 (long) lm_prev, (long) l_prev);
7116 break;
7117 }
7118
7119 /* Ignore the first entry even if it has valid name as the first entry
7120 corresponds to the main executable. The first entry should not be
7121 skipped if the dynamic loader was loaded late by a static executable
7122 (see solib-svr4.c parameter ignore_first). But in such case the main
7123 executable does not have PT_DYNAMIC present and this function already
7124 exited above due to failed get_r_debug. */
7125 if (lm_prev == 0)
7126 {
7127 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7128 p = p + strlen (p);
7129 }
7130 else
7131 {
7132 /* Not checking for error because reading may stop before
7133 we've got PATH_MAX worth of characters. */
7134 libname[0] = '\0';
7135 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7136 libname[sizeof (libname) - 1] = '\0';
7137 if (libname[0] != '\0')
7138 {
7139 /* 6x the size for xml_escape_text below. */
7140 size_t len = 6 * strlen ((char *) libname);
7141
7142 if (!header_done)
7143 {
7144 /* Terminate `<library-list-svr4'. */
7145 *p++ = '>';
7146 header_done = 1;
7147 }
7148
7149 while (allocated < p - document + len + 200)
7150 {
7151 /* Expand to guarantee sufficient storage. */
7152 uintptr_t document_len = p - document;
7153
7154 document = (char *) xrealloc (document, 2 * allocated);
7155 allocated *= 2;
7156 p = document + document_len;
7157 }
7158
7159 std::string name = xml_escape_text ((char *) libname);
7160 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7161 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7162 name.c_str (), (unsigned long) lm_addr,
7163 (unsigned long) l_addr, (unsigned long) l_ld);
7164 }
7165 }
7166
7167 lm_prev = lm_addr;
7168 lm_addr = l_next;
7169 }
7170
7171 if (!header_done)
7172 {
7173 /* Empty list; terminate `<library-list-svr4'. */
7174 strcpy (p, "/>");
7175 }
7176 else
7177 strcpy (p, "</library-list-svr4>");
7178
7179 document_len = strlen (document);
7180 if (offset < document_len)
7181 document_len -= offset;
7182 else
7183 document_len = 0;
7184 if (len > document_len)
7185 len = document_len;
7186
7187 memcpy (readbuf, document + offset, len);
7188 xfree (document);
7189
7190 return len;
7191 }
7192
7193 #ifdef HAVE_LINUX_BTRACE
7194
7195 /* See to_disable_btrace target method. */
7196
7197 static int
7198 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7199 {
7200 enum btrace_error err;
7201
7202 err = linux_disable_btrace (tinfo);
7203 return (err == BTRACE_ERR_NONE ? 0 : -1);
7204 }
7205
7206 /* Encode an Intel Processor Trace configuration. */
7207
7208 static void
7209 linux_low_encode_pt_config (struct buffer *buffer,
7210 const struct btrace_data_pt_config *config)
7211 {
7212 buffer_grow_str (buffer, "<pt-config>\n");
7213
7214 switch (config->cpu.vendor)
7215 {
7216 case CV_INTEL:
7217 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7218 "model=\"%u\" stepping=\"%u\"/>\n",
7219 config->cpu.family, config->cpu.model,
7220 config->cpu.stepping);
7221 break;
7222
7223 default:
7224 break;
7225 }
7226
7227 buffer_grow_str (buffer, "</pt-config>\n");
7228 }
7229
7230 /* Encode a raw buffer. */
7231
7232 static void
7233 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7234 unsigned int size)
7235 {
7236 if (size == 0)
7237 return;
7238
7239 /* We use hex encoding - see common/rsp-low.h. */
7240 buffer_grow_str (buffer, "<raw>\n");
7241
7242 while (size-- > 0)
7243 {
7244 char elem[2];
7245
7246 elem[0] = tohex ((*data >> 4) & 0xf);
7247 elem[1] = tohex (*data++ & 0xf);
7248
7249 buffer_grow (buffer, elem, 2);
7250 }
7251
7252 buffer_grow_str (buffer, "</raw>\n");
7253 }
7254
7255 /* See to_read_btrace target method. */
7256
7257 static int
7258 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7259 enum btrace_read_type type)
7260 {
7261 struct btrace_data btrace;
7262 struct btrace_block *block;
7263 enum btrace_error err;
7264 int i;
7265
7266 btrace_data_init (&btrace);
7267
7268 err = linux_read_btrace (&btrace, tinfo, type);
7269 if (err != BTRACE_ERR_NONE)
7270 {
7271 if (err == BTRACE_ERR_OVERFLOW)
7272 buffer_grow_str0 (buffer, "E.Overflow.");
7273 else
7274 buffer_grow_str0 (buffer, "E.Generic Error.");
7275
7276 goto err;
7277 }
7278
7279 switch (btrace.format)
7280 {
7281 case BTRACE_FORMAT_NONE:
7282 buffer_grow_str0 (buffer, "E.No Trace.");
7283 goto err;
7284
7285 case BTRACE_FORMAT_BTS:
7286 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7287 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7288
7289 for (i = 0;
7290 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7291 i++)
7292 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7293 paddress (block->begin), paddress (block->end));
7294
7295 buffer_grow_str0 (buffer, "</btrace>\n");
7296 break;
7297
7298 case BTRACE_FORMAT_PT:
7299 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7300 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7301 buffer_grow_str (buffer, "<pt>\n");
7302
7303 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7304
7305 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7306 btrace.variant.pt.size);
7307
7308 buffer_grow_str (buffer, "</pt>\n");
7309 buffer_grow_str0 (buffer, "</btrace>\n");
7310 break;
7311
7312 default:
7313 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7314 goto err;
7315 }
7316
7317 btrace_data_fini (&btrace);
7318 return 0;
7319
7320 err:
7321 btrace_data_fini (&btrace);
7322 return -1;
7323 }
7324
7325 /* See to_btrace_conf target method. */
7326
7327 static int
7328 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7329 struct buffer *buffer)
7330 {
7331 const struct btrace_config *conf;
7332
7333 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7334 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7335
7336 conf = linux_btrace_conf (tinfo);
7337 if (conf != NULL)
7338 {
7339 switch (conf->format)
7340 {
7341 case BTRACE_FORMAT_NONE:
7342 break;
7343
7344 case BTRACE_FORMAT_BTS:
7345 buffer_xml_printf (buffer, "<bts");
7346 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7347 buffer_xml_printf (buffer, " />\n");
7348 break;
7349
7350 case BTRACE_FORMAT_PT:
7351 buffer_xml_printf (buffer, "<pt");
7352 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7353 buffer_xml_printf (buffer, "/>\n");
7354 break;
7355 }
7356 }
7357
7358 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7359 return 0;
7360 }
7361 #endif /* HAVE_LINUX_BTRACE */
7362
7363 /* See nat/linux-nat.h. */
7364
7365 ptid_t
7366 current_lwp_ptid (void)
7367 {
7368 return ptid_of (current_thread);
7369 }
7370
7371 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7372
7373 static int
7374 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7375 {
7376 if (the_low_target.breakpoint_kind_from_pc != NULL)
7377 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7378 else
7379 return default_breakpoint_kind_from_pc (pcptr);
7380 }
7381
7382 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7383
7384 static const gdb_byte *
7385 linux_sw_breakpoint_from_kind (int kind, int *size)
7386 {
7387 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7388
7389 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7390 }
7391
7392 /* Implementation of the target_ops method
7393 "breakpoint_kind_from_current_state". */
7394
7395 static int
7396 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7397 {
7398 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7399 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7400 else
7401 return linux_breakpoint_kind_from_pc (pcptr);
7402 }
7403
7404 /* Default implementation of linux_target_ops method "set_pc" for
7405 32-bit pc register which is literally named "pc". */
7406
7407 void
7408 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7409 {
7410 uint32_t newpc = pc;
7411
7412 supply_register_by_name (regcache, "pc", &newpc);
7413 }
7414
7415 /* Default implementation of linux_target_ops method "get_pc" for
7416 32-bit pc register which is literally named "pc". */
7417
7418 CORE_ADDR
7419 linux_get_pc_32bit (struct regcache *regcache)
7420 {
7421 uint32_t pc;
7422
7423 collect_register_by_name (regcache, "pc", &pc);
7424 if (debug_threads)
7425 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7426 return pc;
7427 }
7428
7429 /* Default implementation of linux_target_ops method "set_pc" for
7430 64-bit pc register which is literally named "pc". */
7431
7432 void
7433 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7434 {
7435 uint64_t newpc = pc;
7436
7437 supply_register_by_name (regcache, "pc", &newpc);
7438 }
7439
7440 /* Default implementation of linux_target_ops method "get_pc" for
7441 64-bit pc register which is literally named "pc". */
7442
7443 CORE_ADDR
7444 linux_get_pc_64bit (struct regcache *regcache)
7445 {
7446 uint64_t pc;
7447
7448 collect_register_by_name (regcache, "pc", &pc);
7449 if (debug_threads)
7450 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7451 return pc;
7452 }
7453
7454
7455 static struct target_ops linux_target_ops = {
7456 linux_create_inferior,
7457 linux_post_create_inferior,
7458 linux_attach,
7459 linux_kill,
7460 linux_detach,
7461 linux_mourn,
7462 linux_join,
7463 linux_thread_alive,
7464 linux_resume,
7465 linux_wait,
7466 linux_fetch_registers,
7467 linux_store_registers,
7468 linux_prepare_to_access_memory,
7469 linux_done_accessing_memory,
7470 linux_read_memory,
7471 linux_write_memory,
7472 linux_look_up_symbols,
7473 linux_request_interrupt,
7474 linux_read_auxv,
7475 linux_supports_z_point_type,
7476 linux_insert_point,
7477 linux_remove_point,
7478 linux_stopped_by_sw_breakpoint,
7479 linux_supports_stopped_by_sw_breakpoint,
7480 linux_stopped_by_hw_breakpoint,
7481 linux_supports_stopped_by_hw_breakpoint,
7482 linux_supports_hardware_single_step,
7483 linux_stopped_by_watchpoint,
7484 linux_stopped_data_address,
7485 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7486 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7487 && defined(PT_TEXT_END_ADDR)
7488 linux_read_offsets,
7489 #else
7490 NULL,
7491 #endif
7492 #ifdef USE_THREAD_DB
7493 thread_db_get_tls_address,
7494 #else
7495 NULL,
7496 #endif
7497 linux_qxfer_spu,
7498 hostio_last_error_from_errno,
7499 linux_qxfer_osdata,
7500 linux_xfer_siginfo,
7501 linux_supports_non_stop,
7502 linux_async,
7503 linux_start_non_stop,
7504 linux_supports_multi_process,
7505 linux_supports_fork_events,
7506 linux_supports_vfork_events,
7507 linux_supports_exec_events,
7508 linux_handle_new_gdb_connection,
7509 #ifdef USE_THREAD_DB
7510 thread_db_handle_monitor_command,
7511 #else
7512 NULL,
7513 #endif
7514 linux_common_core_of_thread,
7515 linux_read_loadmap,
7516 linux_process_qsupported,
7517 linux_supports_tracepoints,
7518 linux_read_pc,
7519 linux_write_pc,
7520 linux_thread_stopped,
7521 NULL,
7522 linux_pause_all,
7523 linux_unpause_all,
7524 linux_stabilize_threads,
7525 linux_install_fast_tracepoint_jump_pad,
7526 linux_emit_ops,
7527 linux_supports_disable_randomization,
7528 linux_get_min_fast_tracepoint_insn_len,
7529 linux_qxfer_libraries_svr4,
7530 linux_supports_agent,
7531 #ifdef HAVE_LINUX_BTRACE
7532 linux_supports_btrace,
7533 linux_enable_btrace,
7534 linux_low_disable_btrace,
7535 linux_low_read_btrace,
7536 linux_low_btrace_conf,
7537 #else
7538 NULL,
7539 NULL,
7540 NULL,
7541 NULL,
7542 NULL,
7543 #endif
7544 linux_supports_range_stepping,
7545 linux_proc_pid_to_exec_file,
7546 linux_mntns_open_cloexec,
7547 linux_mntns_unlink,
7548 linux_mntns_readlink,
7549 linux_breakpoint_kind_from_pc,
7550 linux_sw_breakpoint_from_kind,
7551 linux_proc_tid_get_name,
7552 linux_breakpoint_kind_from_current_state,
7553 linux_supports_software_single_step,
7554 linux_supports_catch_syscall,
7555 linux_get_ipa_tdesc_idx,
7556 #if USE_THREAD_DB
7557 thread_db_thread_handle,
7558 #else
7559 NULL,
7560 #endif
7561 };
7562
7563 #ifdef HAVE_LINUX_REGSETS
7564 void
7565 initialize_regsets_info (struct regsets_info *info)
7566 {
7567 for (info->num_regsets = 0;
7568 info->regsets[info->num_regsets].size >= 0;
7569 info->num_regsets++)
7570 ;
7571 }
7572 #endif
7573
7574 void
7575 initialize_low (void)
7576 {
7577 struct sigaction sigchld_action;
7578
7579 memset (&sigchld_action, 0, sizeof (sigchld_action));
7580 set_target_ops (&linux_target_ops);
7581
7582 linux_ptrace_init_warnings ();
7583
7584 sigchld_action.sa_handler = sigchld_handler;
7585 sigemptyset (&sigchld_action.sa_mask);
7586 sigchld_action.sa_flags = SA_RESTART;
7587 sigaction (SIGCHLD, &sigchld_action, NULL);
7588
7589 initialize_low_arch ();
7590
7591 linux_check_ptrace_features ();
7592 }
This page took 0.222783 seconds and 4 git commands to generate.