gdbserver: When attaching, add process before lwps
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2019 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "common/agent.h"
23 #include "tdesc.h"
24 #include "common/rsp-low.h"
25 #include "common/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "common/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "common/filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #include "common/common-inferior.h"
51 #include "nat/fork-inferior.h"
52 #include "common/environ.h"
53 #include "common/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifndef SPUFS_MAGIC
64 #define SPUFS_MAGIC 0x23c9b64e
65 #endif
66
67 #ifdef HAVE_PERSONALITY
68 # include <sys/personality.h>
69 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
70 # define ADDR_NO_RANDOMIZE 0x0040000
71 # endif
72 #endif
73
74 #ifndef O_LARGEFILE
75 #define O_LARGEFILE 0
76 #endif
77
78 /* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81 #if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84 #if defined(__mcoldfire__)
85 /* These are still undefined in 3.10 kernels. */
86 #define PT_TEXT_ADDR 49*4
87 #define PT_DATA_ADDR 50*4
88 #define PT_TEXT_END_ADDR 51*4
89 /* BFIN already defines these since at least 2.6.32 kernels. */
90 #elif defined(BFIN)
91 #define PT_TEXT_ADDR 220
92 #define PT_TEXT_END_ADDR 224
93 #define PT_DATA_ADDR 228
94 /* These are still undefined in 3.10 kernels. */
95 #elif defined(__TMS320C6X__)
96 #define PT_TEXT_ADDR (0x10000*4)
97 #define PT_DATA_ADDR (0x10004*4)
98 #define PT_TEXT_END_ADDR (0x10008*4)
99 #endif
100 #endif
101
102 #ifdef HAVE_LINUX_BTRACE
103 # include "nat/linux-btrace.h"
104 # include "common/btrace-common.h"
105 #endif
106
107 #ifndef HAVE_ELF32_AUXV_T
108 /* Copied from glibc's elf.h. */
109 typedef struct
110 {
111 uint32_t a_type; /* Entry type */
112 union
113 {
114 uint32_t a_val; /* Integer value */
115 /* We use to have pointer elements added here. We cannot do that,
116 though, since it does not work when using 32-bit definitions
117 on 64-bit platforms and vice versa. */
118 } a_un;
119 } Elf32_auxv_t;
120 #endif
121
122 #ifndef HAVE_ELF64_AUXV_T
123 /* Copied from glibc's elf.h. */
124 typedef struct
125 {
126 uint64_t a_type; /* Entry type */
127 union
128 {
129 uint64_t a_val; /* Integer value */
130 /* We use to have pointer elements added here. We cannot do that,
131 though, since it does not work when using 32-bit definitions
132 on 64-bit platforms and vice versa. */
133 } a_un;
134 } Elf64_auxv_t;
135 #endif
136
137 /* Does the current host support PTRACE_GETREGSET? */
138 int have_ptrace_getregset = -1;
139
140 /* LWP accessors. */
141
142 /* See nat/linux-nat.h. */
143
144 ptid_t
145 ptid_of_lwp (struct lwp_info *lwp)
146 {
147 return ptid_of (get_lwp_thread (lwp));
148 }
149
150 /* See nat/linux-nat.h. */
151
152 void
153 lwp_set_arch_private_info (struct lwp_info *lwp,
154 struct arch_lwp_info *info)
155 {
156 lwp->arch_private = info;
157 }
158
159 /* See nat/linux-nat.h. */
160
161 struct arch_lwp_info *
162 lwp_arch_private_info (struct lwp_info *lwp)
163 {
164 return lwp->arch_private;
165 }
166
167 /* See nat/linux-nat.h. */
168
169 int
170 lwp_is_stopped (struct lwp_info *lwp)
171 {
172 return lwp->stopped;
173 }
174
175 /* See nat/linux-nat.h. */
176
177 enum target_stop_reason
178 lwp_stop_reason (struct lwp_info *lwp)
179 {
180 return lwp->stop_reason;
181 }
182
183 /* See nat/linux-nat.h. */
184
185 int
186 lwp_is_stepping (struct lwp_info *lwp)
187 {
188 return lwp->stepping;
189 }
190
191 /* A list of all unknown processes which receive stop signals. Some
192 other process will presumably claim each of these as forked
193 children momentarily. */
194
195 struct simple_pid_list
196 {
197 /* The process ID. */
198 int pid;
199
200 /* The status as reported by waitpid. */
201 int status;
202
203 /* Next in chain. */
204 struct simple_pid_list *next;
205 };
206 struct simple_pid_list *stopped_pids;
207
208 /* Trivial list manipulation functions to keep track of a list of new
209 stopped processes. */
210
211 static void
212 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
213 {
214 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
215
216 new_pid->pid = pid;
217 new_pid->status = status;
218 new_pid->next = *listp;
219 *listp = new_pid;
220 }
221
222 static int
223 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
224 {
225 struct simple_pid_list **p;
226
227 for (p = listp; *p != NULL; p = &(*p)->next)
228 if ((*p)->pid == pid)
229 {
230 struct simple_pid_list *next = (*p)->next;
231
232 *statusp = (*p)->status;
233 xfree (*p);
234 *p = next;
235 return 1;
236 }
237 return 0;
238 }
239
240 enum stopping_threads_kind
241 {
242 /* Not stopping threads presently. */
243 NOT_STOPPING_THREADS,
244
245 /* Stopping threads. */
246 STOPPING_THREADS,
247
248 /* Stopping and suspending threads. */
249 STOPPING_AND_SUSPENDING_THREADS
250 };
251
252 /* This is set while stop_all_lwps is in effect. */
253 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
254
255 /* FIXME make into a target method? */
256 int using_threads = 1;
257
258 /* True if we're presently stabilizing threads (moving them out of
259 jump pads). */
260 static int stabilizing_threads;
261
262 static void linux_resume_one_lwp (struct lwp_info *lwp,
263 int step, int signal, siginfo_t *info);
264 static void linux_resume (struct thread_resume *resume_info, size_t n);
265 static void stop_all_lwps (int suspend, struct lwp_info *except);
266 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
267 static void unsuspend_all_lwps (struct lwp_info *except);
268 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
269 int *wstat, int options);
270 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
271 static struct lwp_info *add_lwp (ptid_t ptid);
272 static void linux_mourn (struct process_info *process);
273 static int linux_stopped_by_watchpoint (void);
274 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
275 static int lwp_is_marked_dead (struct lwp_info *lwp);
276 static void proceed_all_lwps (void);
277 static int finish_step_over (struct lwp_info *lwp);
278 static int kill_lwp (unsigned long lwpid, int signo);
279 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
280 static void complete_ongoing_step_over (void);
281 static int linux_low_ptrace_options (int attached);
282 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
283 static void proceed_one_lwp (thread_info *thread, lwp_info *except);
284
285 /* When the event-loop is doing a step-over, this points at the thread
286 being stepped. */
287 ptid_t step_over_bkpt;
288
289 /* True if the low target can hardware single-step. */
290
291 static int
292 can_hardware_single_step (void)
293 {
294 if (the_low_target.supports_hardware_single_step != NULL)
295 return the_low_target.supports_hardware_single_step ();
296 else
297 return 0;
298 }
299
300 /* True if the low target can software single-step. Such targets
301 implement the GET_NEXT_PCS callback. */
302
303 static int
304 can_software_single_step (void)
305 {
306 return (the_low_target.get_next_pcs != NULL);
307 }
308
309 /* True if the low target supports memory breakpoints. If so, we'll
310 have a GET_PC implementation. */
311
312 static int
313 supports_breakpoints (void)
314 {
315 return (the_low_target.get_pc != NULL);
316 }
317
318 /* Returns true if this target can support fast tracepoints. This
319 does not mean that the in-process agent has been loaded in the
320 inferior. */
321
322 static int
323 supports_fast_tracepoints (void)
324 {
325 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
326 }
327
328 /* True if LWP is stopped in its stepping range. */
329
330 static int
331 lwp_in_step_range (struct lwp_info *lwp)
332 {
333 CORE_ADDR pc = lwp->stop_pc;
334
335 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
336 }
337
338 struct pending_signals
339 {
340 int signal;
341 siginfo_t info;
342 struct pending_signals *prev;
343 };
344
345 /* The read/write ends of the pipe registered as waitable file in the
346 event loop. */
347 static int linux_event_pipe[2] = { -1, -1 };
348
349 /* True if we're currently in async mode. */
350 #define target_is_async_p() (linux_event_pipe[0] != -1)
351
352 static void send_sigstop (struct lwp_info *lwp);
353 static void wait_for_sigstop (void);
354
355 /* Return non-zero if HEADER is a 64-bit ELF file. */
356
357 static int
358 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
359 {
360 if (header->e_ident[EI_MAG0] == ELFMAG0
361 && header->e_ident[EI_MAG1] == ELFMAG1
362 && header->e_ident[EI_MAG2] == ELFMAG2
363 && header->e_ident[EI_MAG3] == ELFMAG3)
364 {
365 *machine = header->e_machine;
366 return header->e_ident[EI_CLASS] == ELFCLASS64;
367
368 }
369 *machine = EM_NONE;
370 return -1;
371 }
372
373 /* Return non-zero if FILE is a 64-bit ELF file,
374 zero if the file is not a 64-bit ELF file,
375 and -1 if the file is not accessible or doesn't exist. */
376
377 static int
378 elf_64_file_p (const char *file, unsigned int *machine)
379 {
380 Elf64_Ehdr header;
381 int fd;
382
383 fd = open (file, O_RDONLY);
384 if (fd < 0)
385 return -1;
386
387 if (read (fd, &header, sizeof (header)) != sizeof (header))
388 {
389 close (fd);
390 return 0;
391 }
392 close (fd);
393
394 return elf_64_header_p (&header, machine);
395 }
396
397 /* Accepts an integer PID; Returns true if the executable PID is
398 running is a 64-bit ELF file.. */
399
400 int
401 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
402 {
403 char file[PATH_MAX];
404
405 sprintf (file, "/proc/%d/exe", pid);
406 return elf_64_file_p (file, machine);
407 }
408
409 static void
410 delete_lwp (struct lwp_info *lwp)
411 {
412 struct thread_info *thr = get_lwp_thread (lwp);
413
414 if (debug_threads)
415 debug_printf ("deleting %ld\n", lwpid_of (thr));
416
417 remove_thread (thr);
418
419 if (the_low_target.delete_thread != NULL)
420 the_low_target.delete_thread (lwp->arch_private);
421 else
422 gdb_assert (lwp->arch_private == NULL);
423
424 free (lwp);
425 }
426
427 /* Add a process to the common process list, and set its private
428 data. */
429
430 static struct process_info *
431 linux_add_process (int pid, int attached)
432 {
433 struct process_info *proc;
434
435 proc = add_process (pid, attached);
436 proc->priv = XCNEW (struct process_info_private);
437
438 if (the_low_target.new_process != NULL)
439 proc->priv->arch_private = the_low_target.new_process ();
440
441 return proc;
442 }
443
444 static CORE_ADDR get_pc (struct lwp_info *lwp);
445
446 /* Call the target arch_setup function on the current thread. */
447
448 static void
449 linux_arch_setup (void)
450 {
451 the_low_target.arch_setup ();
452 }
453
454 /* Call the target arch_setup function on THREAD. */
455
456 static void
457 linux_arch_setup_thread (struct thread_info *thread)
458 {
459 struct thread_info *saved_thread;
460
461 saved_thread = current_thread;
462 current_thread = thread;
463
464 linux_arch_setup ();
465
466 current_thread = saved_thread;
467 }
468
469 /* Handle a GNU/Linux extended wait response. If we see a clone,
470 fork, or vfork event, we need to add the new LWP to our list
471 (and return 0 so as not to report the trap to higher layers).
472 If we see an exec event, we will modify ORIG_EVENT_LWP to point
473 to a new LWP representing the new program. */
474
475 static int
476 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
477 {
478 client_state &cs = get_client_state ();
479 struct lwp_info *event_lwp = *orig_event_lwp;
480 int event = linux_ptrace_get_extended_event (wstat);
481 struct thread_info *event_thr = get_lwp_thread (event_lwp);
482 struct lwp_info *new_lwp;
483
484 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
485
486 /* All extended events we currently use are mid-syscall. Only
487 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
488 you have to be using PTRACE_SEIZE to get that. */
489 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
490
491 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
492 || (event == PTRACE_EVENT_CLONE))
493 {
494 ptid_t ptid;
495 unsigned long new_pid;
496 int ret, status;
497
498 /* Get the pid of the new lwp. */
499 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
500 &new_pid);
501
502 /* If we haven't already seen the new PID stop, wait for it now. */
503 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
504 {
505 /* The new child has a pending SIGSTOP. We can't affect it until it
506 hits the SIGSTOP, but we're already attached. */
507
508 ret = my_waitpid (new_pid, &status, __WALL);
509
510 if (ret == -1)
511 perror_with_name ("waiting for new child");
512 else if (ret != new_pid)
513 warning ("wait returned unexpected PID %d", ret);
514 else if (!WIFSTOPPED (status))
515 warning ("wait returned unexpected status 0x%x", status);
516 }
517
518 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
519 {
520 struct process_info *parent_proc;
521 struct process_info *child_proc;
522 struct lwp_info *child_lwp;
523 struct thread_info *child_thr;
524 struct target_desc *tdesc;
525
526 ptid = ptid_t (new_pid, new_pid, 0);
527
528 if (debug_threads)
529 {
530 debug_printf ("HEW: Got fork event from LWP %ld, "
531 "new child is %d\n",
532 ptid_of (event_thr).lwp (),
533 ptid.pid ());
534 }
535
536 /* Add the new process to the tables and clone the breakpoint
537 lists of the parent. We need to do this even if the new process
538 will be detached, since we will need the process object and the
539 breakpoints to remove any breakpoints from memory when we
540 detach, and the client side will access registers. */
541 child_proc = linux_add_process (new_pid, 0);
542 gdb_assert (child_proc != NULL);
543 child_lwp = add_lwp (ptid);
544 gdb_assert (child_lwp != NULL);
545 child_lwp->stopped = 1;
546 child_lwp->must_set_ptrace_flags = 1;
547 child_lwp->status_pending_p = 0;
548 child_thr = get_lwp_thread (child_lwp);
549 child_thr->last_resume_kind = resume_stop;
550 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
551
552 /* If we're suspending all threads, leave this one suspended
553 too. If the fork/clone parent is stepping over a breakpoint,
554 all other threads have been suspended already. Leave the
555 child suspended too. */
556 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
557 || event_lwp->bp_reinsert != 0)
558 {
559 if (debug_threads)
560 debug_printf ("HEW: leaving child suspended\n");
561 child_lwp->suspended = 1;
562 }
563
564 parent_proc = get_thread_process (event_thr);
565 child_proc->attached = parent_proc->attached;
566
567 if (event_lwp->bp_reinsert != 0
568 && can_software_single_step ()
569 && event == PTRACE_EVENT_VFORK)
570 {
571 /* If we leave single-step breakpoints there, child will
572 hit it, so uninsert single-step breakpoints from parent
573 (and child). Once vfork child is done, reinsert
574 them back to parent. */
575 uninsert_single_step_breakpoints (event_thr);
576 }
577
578 clone_all_breakpoints (child_thr, event_thr);
579
580 tdesc = allocate_target_description ();
581 copy_target_description (tdesc, parent_proc->tdesc);
582 child_proc->tdesc = tdesc;
583
584 /* Clone arch-specific process data. */
585 if (the_low_target.new_fork != NULL)
586 the_low_target.new_fork (parent_proc, child_proc);
587
588 /* Save fork info in the parent thread. */
589 if (event == PTRACE_EVENT_FORK)
590 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
591 else if (event == PTRACE_EVENT_VFORK)
592 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
593
594 event_lwp->waitstatus.value.related_pid = ptid;
595
596 /* The status_pending field contains bits denoting the
597 extended event, so when the pending event is handled,
598 the handler will look at lwp->waitstatus. */
599 event_lwp->status_pending_p = 1;
600 event_lwp->status_pending = wstat;
601
602 /* Link the threads until the parent event is passed on to
603 higher layers. */
604 event_lwp->fork_relative = child_lwp;
605 child_lwp->fork_relative = event_lwp;
606
607 /* If the parent thread is doing step-over with single-step
608 breakpoints, the list of single-step breakpoints are cloned
609 from the parent's. Remove them from the child process.
610 In case of vfork, we'll reinsert them back once vforked
611 child is done. */
612 if (event_lwp->bp_reinsert != 0
613 && can_software_single_step ())
614 {
615 /* The child process is forked and stopped, so it is safe
616 to access its memory without stopping all other threads
617 from other processes. */
618 delete_single_step_breakpoints (child_thr);
619
620 gdb_assert (has_single_step_breakpoints (event_thr));
621 gdb_assert (!has_single_step_breakpoints (child_thr));
622 }
623
624 /* Report the event. */
625 return 0;
626 }
627
628 if (debug_threads)
629 debug_printf ("HEW: Got clone event "
630 "from LWP %ld, new child is LWP %ld\n",
631 lwpid_of (event_thr), new_pid);
632
633 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
634 new_lwp = add_lwp (ptid);
635
636 /* Either we're going to immediately resume the new thread
637 or leave it stopped. linux_resume_one_lwp is a nop if it
638 thinks the thread is currently running, so set this first
639 before calling linux_resume_one_lwp. */
640 new_lwp->stopped = 1;
641
642 /* If we're suspending all threads, leave this one suspended
643 too. If the fork/clone parent is stepping over a breakpoint,
644 all other threads have been suspended already. Leave the
645 child suspended too. */
646 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
647 || event_lwp->bp_reinsert != 0)
648 new_lwp->suspended = 1;
649
650 /* Normally we will get the pending SIGSTOP. But in some cases
651 we might get another signal delivered to the group first.
652 If we do get another signal, be sure not to lose it. */
653 if (WSTOPSIG (status) != SIGSTOP)
654 {
655 new_lwp->stop_expected = 1;
656 new_lwp->status_pending_p = 1;
657 new_lwp->status_pending = status;
658 }
659 else if (cs.report_thread_events)
660 {
661 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
662 new_lwp->status_pending_p = 1;
663 new_lwp->status_pending = status;
664 }
665
666 #ifdef USE_THREAD_DB
667 thread_db_notice_clone (event_thr, ptid);
668 #endif
669
670 /* Don't report the event. */
671 return 1;
672 }
673 else if (event == PTRACE_EVENT_VFORK_DONE)
674 {
675 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
676
677 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
678 {
679 reinsert_single_step_breakpoints (event_thr);
680
681 gdb_assert (has_single_step_breakpoints (event_thr));
682 }
683
684 /* Report the event. */
685 return 0;
686 }
687 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
688 {
689 struct process_info *proc;
690 std::vector<int> syscalls_to_catch;
691 ptid_t event_ptid;
692 pid_t event_pid;
693
694 if (debug_threads)
695 {
696 debug_printf ("HEW: Got exec event from LWP %ld\n",
697 lwpid_of (event_thr));
698 }
699
700 /* Get the event ptid. */
701 event_ptid = ptid_of (event_thr);
702 event_pid = event_ptid.pid ();
703
704 /* Save the syscall list from the execing process. */
705 proc = get_thread_process (event_thr);
706 syscalls_to_catch = std::move (proc->syscalls_to_catch);
707
708 /* Delete the execing process and all its threads. */
709 linux_mourn (proc);
710 current_thread = NULL;
711
712 /* Create a new process/lwp/thread. */
713 proc = linux_add_process (event_pid, 0);
714 event_lwp = add_lwp (event_ptid);
715 event_thr = get_lwp_thread (event_lwp);
716 gdb_assert (current_thread == event_thr);
717 linux_arch_setup_thread (event_thr);
718
719 /* Set the event status. */
720 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
721 event_lwp->waitstatus.value.execd_pathname
722 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
723
724 /* Mark the exec status as pending. */
725 event_lwp->stopped = 1;
726 event_lwp->status_pending_p = 1;
727 event_lwp->status_pending = wstat;
728 event_thr->last_resume_kind = resume_continue;
729 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
730
731 /* Update syscall state in the new lwp, effectively mid-syscall too. */
732 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
733
734 /* Restore the list to catch. Don't rely on the client, which is free
735 to avoid sending a new list when the architecture doesn't change.
736 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
737 proc->syscalls_to_catch = std::move (syscalls_to_catch);
738
739 /* Report the event. */
740 *orig_event_lwp = event_lwp;
741 return 0;
742 }
743
744 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
745 }
746
747 /* Return the PC as read from the regcache of LWP, without any
748 adjustment. */
749
750 static CORE_ADDR
751 get_pc (struct lwp_info *lwp)
752 {
753 struct thread_info *saved_thread;
754 struct regcache *regcache;
755 CORE_ADDR pc;
756
757 if (the_low_target.get_pc == NULL)
758 return 0;
759
760 saved_thread = current_thread;
761 current_thread = get_lwp_thread (lwp);
762
763 regcache = get_thread_regcache (current_thread, 1);
764 pc = (*the_low_target.get_pc) (regcache);
765
766 if (debug_threads)
767 debug_printf ("pc is 0x%lx\n", (long) pc);
768
769 current_thread = saved_thread;
770 return pc;
771 }
772
773 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
774 Fill *SYSNO with the syscall nr trapped. */
775
776 static void
777 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
778 {
779 struct thread_info *saved_thread;
780 struct regcache *regcache;
781
782 if (the_low_target.get_syscall_trapinfo == NULL)
783 {
784 /* If we cannot get the syscall trapinfo, report an unknown
785 system call number. */
786 *sysno = UNKNOWN_SYSCALL;
787 return;
788 }
789
790 saved_thread = current_thread;
791 current_thread = get_lwp_thread (lwp);
792
793 regcache = get_thread_regcache (current_thread, 1);
794 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
795
796 if (debug_threads)
797 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
798
799 current_thread = saved_thread;
800 }
801
802 static int check_stopped_by_watchpoint (struct lwp_info *child);
803
804 /* Called when the LWP stopped for a signal/trap. If it stopped for a
805 trap check what caused it (breakpoint, watchpoint, trace, etc.),
806 and save the result in the LWP's stop_reason field. If it stopped
807 for a breakpoint, decrement the PC if necessary on the lwp's
808 architecture. Returns true if we now have the LWP's stop PC. */
809
810 static int
811 save_stop_reason (struct lwp_info *lwp)
812 {
813 CORE_ADDR pc;
814 CORE_ADDR sw_breakpoint_pc;
815 struct thread_info *saved_thread;
816 #if USE_SIGTRAP_SIGINFO
817 siginfo_t siginfo;
818 #endif
819
820 if (the_low_target.get_pc == NULL)
821 return 0;
822
823 pc = get_pc (lwp);
824 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
825
826 /* breakpoint_at reads from the current thread. */
827 saved_thread = current_thread;
828 current_thread = get_lwp_thread (lwp);
829
830 #if USE_SIGTRAP_SIGINFO
831 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
832 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
833 {
834 if (siginfo.si_signo == SIGTRAP)
835 {
836 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
837 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
838 {
839 /* The si_code is ambiguous on this arch -- check debug
840 registers. */
841 if (!check_stopped_by_watchpoint (lwp))
842 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
843 }
844 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
845 {
846 /* If we determine the LWP stopped for a SW breakpoint,
847 trust it. Particularly don't check watchpoint
848 registers, because at least on s390, we'd find
849 stopped-by-watchpoint as long as there's a watchpoint
850 set. */
851 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
852 }
853 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
854 {
855 /* This can indicate either a hardware breakpoint or
856 hardware watchpoint. Check debug registers. */
857 if (!check_stopped_by_watchpoint (lwp))
858 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
859 }
860 else if (siginfo.si_code == TRAP_TRACE)
861 {
862 /* We may have single stepped an instruction that
863 triggered a watchpoint. In that case, on some
864 architectures (such as x86), instead of TRAP_HWBKPT,
865 si_code indicates TRAP_TRACE, and we need to check
866 the debug registers separately. */
867 if (!check_stopped_by_watchpoint (lwp))
868 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
869 }
870 }
871 }
872 #else
873 /* We may have just stepped a breakpoint instruction. E.g., in
874 non-stop mode, GDB first tells the thread A to step a range, and
875 then the user inserts a breakpoint inside the range. In that
876 case we need to report the breakpoint PC. */
877 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
878 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
879 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
880
881 if (hardware_breakpoint_inserted_here (pc))
882 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
883
884 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
885 check_stopped_by_watchpoint (lwp);
886 #endif
887
888 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
889 {
890 if (debug_threads)
891 {
892 struct thread_info *thr = get_lwp_thread (lwp);
893
894 debug_printf ("CSBB: %s stopped by software breakpoint\n",
895 target_pid_to_str (ptid_of (thr)));
896 }
897
898 /* Back up the PC if necessary. */
899 if (pc != sw_breakpoint_pc)
900 {
901 struct regcache *regcache
902 = get_thread_regcache (current_thread, 1);
903 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
904 }
905
906 /* Update this so we record the correct stop PC below. */
907 pc = sw_breakpoint_pc;
908 }
909 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
910 {
911 if (debug_threads)
912 {
913 struct thread_info *thr = get_lwp_thread (lwp);
914
915 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
916 target_pid_to_str (ptid_of (thr)));
917 }
918 }
919 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
920 {
921 if (debug_threads)
922 {
923 struct thread_info *thr = get_lwp_thread (lwp);
924
925 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
926 target_pid_to_str (ptid_of (thr)));
927 }
928 }
929 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
930 {
931 if (debug_threads)
932 {
933 struct thread_info *thr = get_lwp_thread (lwp);
934
935 debug_printf ("CSBB: %s stopped by trace\n",
936 target_pid_to_str (ptid_of (thr)));
937 }
938 }
939
940 lwp->stop_pc = pc;
941 current_thread = saved_thread;
942 return 1;
943 }
944
945 static struct lwp_info *
946 add_lwp (ptid_t ptid)
947 {
948 struct lwp_info *lwp;
949
950 lwp = XCNEW (struct lwp_info);
951
952 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
953
954 lwp->thread = add_thread (ptid, lwp);
955
956 if (the_low_target.new_thread != NULL)
957 the_low_target.new_thread (lwp);
958
959 return lwp;
960 }
961
962 /* Callback to be used when calling fork_inferior, responsible for
963 actually initiating the tracing of the inferior. */
964
965 static void
966 linux_ptrace_fun ()
967 {
968 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
969 (PTRACE_TYPE_ARG4) 0) < 0)
970 trace_start_error_with_name ("ptrace");
971
972 if (setpgid (0, 0) < 0)
973 trace_start_error_with_name ("setpgid");
974
975 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
976 stdout to stderr so that inferior i/o doesn't corrupt the connection.
977 Also, redirect stdin to /dev/null. */
978 if (remote_connection_is_stdio ())
979 {
980 if (close (0) < 0)
981 trace_start_error_with_name ("close");
982 if (open ("/dev/null", O_RDONLY) < 0)
983 trace_start_error_with_name ("open");
984 if (dup2 (2, 1) < 0)
985 trace_start_error_with_name ("dup2");
986 if (write (2, "stdin/stdout redirected\n",
987 sizeof ("stdin/stdout redirected\n") - 1) < 0)
988 {
989 /* Errors ignored. */;
990 }
991 }
992 }
993
994 /* Start an inferior process and returns its pid.
995 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
996 are its arguments. */
997
998 static int
999 linux_create_inferior (const char *program,
1000 const std::vector<char *> &program_args)
1001 {
1002 client_state &cs = get_client_state ();
1003 struct lwp_info *new_lwp;
1004 int pid;
1005 ptid_t ptid;
1006
1007 {
1008 maybe_disable_address_space_randomization restore_personality
1009 (cs.disable_randomization);
1010 std::string str_program_args = stringify_argv (program_args);
1011
1012 pid = fork_inferior (program,
1013 str_program_args.c_str (),
1014 get_environ ()->envp (), linux_ptrace_fun,
1015 NULL, NULL, NULL, NULL);
1016 }
1017
1018 linux_add_process (pid, 0);
1019
1020 ptid = ptid_t (pid, pid, 0);
1021 new_lwp = add_lwp (ptid);
1022 new_lwp->must_set_ptrace_flags = 1;
1023
1024 post_fork_inferior (pid, program);
1025
1026 return pid;
1027 }
1028
1029 /* Implement the post_create_inferior target_ops method. */
1030
1031 static void
1032 linux_post_create_inferior (void)
1033 {
1034 struct lwp_info *lwp = get_thread_lwp (current_thread);
1035
1036 linux_arch_setup ();
1037
1038 if (lwp->must_set_ptrace_flags)
1039 {
1040 struct process_info *proc = current_process ();
1041 int options = linux_low_ptrace_options (proc->attached);
1042
1043 linux_enable_event_reporting (lwpid_of (current_thread), options);
1044 lwp->must_set_ptrace_flags = 0;
1045 }
1046 }
1047
1048 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1049 error. */
1050
1051 int
1052 linux_attach_lwp (ptid_t ptid)
1053 {
1054 struct lwp_info *new_lwp;
1055 int lwpid = ptid.lwp ();
1056
1057 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1058 != 0)
1059 return errno;
1060
1061 new_lwp = add_lwp (ptid);
1062
1063 /* We need to wait for SIGSTOP before being able to make the next
1064 ptrace call on this LWP. */
1065 new_lwp->must_set_ptrace_flags = 1;
1066
1067 if (linux_proc_pid_is_stopped (lwpid))
1068 {
1069 if (debug_threads)
1070 debug_printf ("Attached to a stopped process\n");
1071
1072 /* The process is definitely stopped. It is in a job control
1073 stop, unless the kernel predates the TASK_STOPPED /
1074 TASK_TRACED distinction, in which case it might be in a
1075 ptrace stop. Make sure it is in a ptrace stop; from there we
1076 can kill it, signal it, et cetera.
1077
1078 First make sure there is a pending SIGSTOP. Since we are
1079 already attached, the process can not transition from stopped
1080 to running without a PTRACE_CONT; so we know this signal will
1081 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1082 probably already in the queue (unless this kernel is old
1083 enough to use TASK_STOPPED for ptrace stops); but since
1084 SIGSTOP is not an RT signal, it can only be queued once. */
1085 kill_lwp (lwpid, SIGSTOP);
1086
1087 /* Finally, resume the stopped process. This will deliver the
1088 SIGSTOP (or a higher priority signal, just like normal
1089 PTRACE_ATTACH), which we'll catch later on. */
1090 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1091 }
1092
1093 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1094 brings it to a halt.
1095
1096 There are several cases to consider here:
1097
1098 1) gdbserver has already attached to the process and is being notified
1099 of a new thread that is being created.
1100 In this case we should ignore that SIGSTOP and resume the
1101 process. This is handled below by setting stop_expected = 1,
1102 and the fact that add_thread sets last_resume_kind ==
1103 resume_continue.
1104
1105 2) This is the first thread (the process thread), and we're attaching
1106 to it via attach_inferior.
1107 In this case we want the process thread to stop.
1108 This is handled by having linux_attach set last_resume_kind ==
1109 resume_stop after we return.
1110
1111 If the pid we are attaching to is also the tgid, we attach to and
1112 stop all the existing threads. Otherwise, we attach to pid and
1113 ignore any other threads in the same group as this pid.
1114
1115 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1116 existing threads.
1117 In this case we want the thread to stop.
1118 FIXME: This case is currently not properly handled.
1119 We should wait for the SIGSTOP but don't. Things work apparently
1120 because enough time passes between when we ptrace (ATTACH) and when
1121 gdb makes the next ptrace call on the thread.
1122
1123 On the other hand, if we are currently trying to stop all threads, we
1124 should treat the new thread as if we had sent it a SIGSTOP. This works
1125 because we are guaranteed that the add_lwp call above added us to the
1126 end of the list, and so the new thread has not yet reached
1127 wait_for_sigstop (but will). */
1128 new_lwp->stop_expected = 1;
1129
1130 return 0;
1131 }
1132
1133 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1134 already attached. Returns true if a new LWP is found, false
1135 otherwise. */
1136
1137 static int
1138 attach_proc_task_lwp_callback (ptid_t ptid)
1139 {
1140 /* Is this a new thread? */
1141 if (find_thread_ptid (ptid) == NULL)
1142 {
1143 int lwpid = ptid.lwp ();
1144 int err;
1145
1146 if (debug_threads)
1147 debug_printf ("Found new lwp %d\n", lwpid);
1148
1149 err = linux_attach_lwp (ptid);
1150
1151 /* Be quiet if we simply raced with the thread exiting. EPERM
1152 is returned if the thread's task still exists, and is marked
1153 as exited or zombie, as well as other conditions, so in that
1154 case, confirm the status in /proc/PID/status. */
1155 if (err == ESRCH
1156 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1157 {
1158 if (debug_threads)
1159 {
1160 debug_printf ("Cannot attach to lwp %d: "
1161 "thread is gone (%d: %s)\n",
1162 lwpid, err, strerror (err));
1163 }
1164 }
1165 else if (err != 0)
1166 {
1167 std::string reason
1168 = linux_ptrace_attach_fail_reason_string (ptid, err);
1169
1170 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1171 }
1172
1173 return 1;
1174 }
1175 return 0;
1176 }
1177
1178 static void async_file_mark (void);
1179
1180 /* Attach to PID. If PID is the tgid, attach to it and all
1181 of its threads. */
1182
1183 static int
1184 linux_attach (unsigned long pid)
1185 {
1186 struct process_info *proc;
1187 struct thread_info *initial_thread;
1188 ptid_t ptid = ptid_t (pid, pid, 0);
1189 int err;
1190
1191 proc = linux_add_process (pid, 1);
1192
1193 /* Attach to PID. We will check for other threads
1194 soon. */
1195 err = linux_attach_lwp (ptid);
1196 if (err != 0)
1197 {
1198 remove_process (proc);
1199
1200 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1201 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1202 }
1203
1204 /* Don't ignore the initial SIGSTOP if we just attached to this
1205 process. It will be collected by wait shortly. */
1206 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
1207 initial_thread->last_resume_kind = resume_stop;
1208
1209 /* We must attach to every LWP. If /proc is mounted, use that to
1210 find them now. On the one hand, the inferior may be using raw
1211 clone instead of using pthreads. On the other hand, even if it
1212 is using pthreads, GDB may not be connected yet (thread_db needs
1213 to do symbol lookups, through qSymbol). Also, thread_db walks
1214 structures in the inferior's address space to find the list of
1215 threads/LWPs, and those structures may well be corrupted. Note
1216 that once thread_db is loaded, we'll still use it to list threads
1217 and associate pthread info with each LWP. */
1218 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1219
1220 /* GDB will shortly read the xml target description for this
1221 process, to figure out the process' architecture. But the target
1222 description is only filled in when the first process/thread in
1223 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1224 that now, otherwise, if GDB is fast enough, it could read the
1225 target description _before_ that initial stop. */
1226 if (non_stop)
1227 {
1228 struct lwp_info *lwp;
1229 int wstat, lwpid;
1230 ptid_t pid_ptid = ptid_t (pid);
1231
1232 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1233 &wstat, __WALL);
1234 gdb_assert (lwpid > 0);
1235
1236 lwp = find_lwp_pid (ptid_t (lwpid));
1237
1238 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1239 {
1240 lwp->status_pending_p = 1;
1241 lwp->status_pending = wstat;
1242 }
1243
1244 initial_thread->last_resume_kind = resume_continue;
1245
1246 async_file_mark ();
1247
1248 gdb_assert (proc->tdesc != NULL);
1249 }
1250
1251 return 0;
1252 }
1253
1254 static int
1255 last_thread_of_process_p (int pid)
1256 {
1257 bool seen_one = false;
1258
1259 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1260 {
1261 if (!seen_one)
1262 {
1263 /* This is the first thread of this process we see. */
1264 seen_one = true;
1265 return false;
1266 }
1267 else
1268 {
1269 /* This is the second thread of this process we see. */
1270 return true;
1271 }
1272 });
1273
1274 return thread == NULL;
1275 }
1276
1277 /* Kill LWP. */
1278
1279 static void
1280 linux_kill_one_lwp (struct lwp_info *lwp)
1281 {
1282 struct thread_info *thr = get_lwp_thread (lwp);
1283 int pid = lwpid_of (thr);
1284
1285 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1286 there is no signal context, and ptrace(PTRACE_KILL) (or
1287 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1288 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1289 alternative is to kill with SIGKILL. We only need one SIGKILL
1290 per process, not one for each thread. But since we still support
1291 support debugging programs using raw clone without CLONE_THREAD,
1292 we send one for each thread. For years, we used PTRACE_KILL
1293 only, so we're being a bit paranoid about some old kernels where
1294 PTRACE_KILL might work better (dubious if there are any such, but
1295 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1296 second, and so we're fine everywhere. */
1297
1298 errno = 0;
1299 kill_lwp (pid, SIGKILL);
1300 if (debug_threads)
1301 {
1302 int save_errno = errno;
1303
1304 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1305 target_pid_to_str (ptid_of (thr)),
1306 save_errno ? strerror (save_errno) : "OK");
1307 }
1308
1309 errno = 0;
1310 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1311 if (debug_threads)
1312 {
1313 int save_errno = errno;
1314
1315 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1316 target_pid_to_str (ptid_of (thr)),
1317 save_errno ? strerror (save_errno) : "OK");
1318 }
1319 }
1320
1321 /* Kill LWP and wait for it to die. */
1322
1323 static void
1324 kill_wait_lwp (struct lwp_info *lwp)
1325 {
1326 struct thread_info *thr = get_lwp_thread (lwp);
1327 int pid = ptid_of (thr).pid ();
1328 int lwpid = ptid_of (thr).lwp ();
1329 int wstat;
1330 int res;
1331
1332 if (debug_threads)
1333 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1334
1335 do
1336 {
1337 linux_kill_one_lwp (lwp);
1338
1339 /* Make sure it died. Notes:
1340
1341 - The loop is most likely unnecessary.
1342
1343 - We don't use linux_wait_for_event as that could delete lwps
1344 while we're iterating over them. We're not interested in
1345 any pending status at this point, only in making sure all
1346 wait status on the kernel side are collected until the
1347 process is reaped.
1348
1349 - We don't use __WALL here as the __WALL emulation relies on
1350 SIGCHLD, and killing a stopped process doesn't generate
1351 one, nor an exit status.
1352 */
1353 res = my_waitpid (lwpid, &wstat, 0);
1354 if (res == -1 && errno == ECHILD)
1355 res = my_waitpid (lwpid, &wstat, __WCLONE);
1356 } while (res > 0 && WIFSTOPPED (wstat));
1357
1358 /* Even if it was stopped, the child may have already disappeared.
1359 E.g., if it was killed by SIGKILL. */
1360 if (res < 0 && errno != ECHILD)
1361 perror_with_name ("kill_wait_lwp");
1362 }
1363
1364 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1365 except the leader. */
1366
1367 static void
1368 kill_one_lwp_callback (thread_info *thread, int pid)
1369 {
1370 struct lwp_info *lwp = get_thread_lwp (thread);
1371
1372 /* We avoid killing the first thread here, because of a Linux kernel (at
1373 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1374 the children get a chance to be reaped, it will remain a zombie
1375 forever. */
1376
1377 if (lwpid_of (thread) == pid)
1378 {
1379 if (debug_threads)
1380 debug_printf ("lkop: is last of process %s\n",
1381 target_pid_to_str (thread->id));
1382 return;
1383 }
1384
1385 kill_wait_lwp (lwp);
1386 }
1387
1388 static int
1389 linux_kill (process_info *process)
1390 {
1391 int pid = process->pid;
1392
1393 /* If we're killing a running inferior, make sure it is stopped
1394 first, as PTRACE_KILL will not work otherwise. */
1395 stop_all_lwps (0, NULL);
1396
1397 for_each_thread (pid, [&] (thread_info *thread)
1398 {
1399 kill_one_lwp_callback (thread, pid);
1400 });
1401
1402 /* See the comment in linux_kill_one_lwp. We did not kill the first
1403 thread in the list, so do so now. */
1404 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1405
1406 if (lwp == NULL)
1407 {
1408 if (debug_threads)
1409 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1410 pid);
1411 }
1412 else
1413 kill_wait_lwp (lwp);
1414
1415 the_target->mourn (process);
1416
1417 /* Since we presently can only stop all lwps of all processes, we
1418 need to unstop lwps of other processes. */
1419 unstop_all_lwps (0, NULL);
1420 return 0;
1421 }
1422
1423 /* Get pending signal of THREAD, for detaching purposes. This is the
1424 signal the thread last stopped for, which we need to deliver to the
1425 thread when detaching, otherwise, it'd be suppressed/lost. */
1426
1427 static int
1428 get_detach_signal (struct thread_info *thread)
1429 {
1430 client_state &cs = get_client_state ();
1431 enum gdb_signal signo = GDB_SIGNAL_0;
1432 int status;
1433 struct lwp_info *lp = get_thread_lwp (thread);
1434
1435 if (lp->status_pending_p)
1436 status = lp->status_pending;
1437 else
1438 {
1439 /* If the thread had been suspended by gdbserver, and it stopped
1440 cleanly, then it'll have stopped with SIGSTOP. But we don't
1441 want to deliver that SIGSTOP. */
1442 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1443 || thread->last_status.value.sig == GDB_SIGNAL_0)
1444 return 0;
1445
1446 /* Otherwise, we may need to deliver the signal we
1447 intercepted. */
1448 status = lp->last_status;
1449 }
1450
1451 if (!WIFSTOPPED (status))
1452 {
1453 if (debug_threads)
1454 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1455 target_pid_to_str (ptid_of (thread)));
1456 return 0;
1457 }
1458
1459 /* Extended wait statuses aren't real SIGTRAPs. */
1460 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1461 {
1462 if (debug_threads)
1463 debug_printf ("GPS: lwp %s had stopped with extended "
1464 "status: no pending signal\n",
1465 target_pid_to_str (ptid_of (thread)));
1466 return 0;
1467 }
1468
1469 signo = gdb_signal_from_host (WSTOPSIG (status));
1470
1471 if (cs.program_signals_p && !cs.program_signals[signo])
1472 {
1473 if (debug_threads)
1474 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1475 target_pid_to_str (ptid_of (thread)),
1476 gdb_signal_to_string (signo));
1477 return 0;
1478 }
1479 else if (!cs.program_signals_p
1480 /* If we have no way to know which signals GDB does not
1481 want to have passed to the program, assume
1482 SIGTRAP/SIGINT, which is GDB's default. */
1483 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1484 {
1485 if (debug_threads)
1486 debug_printf ("GPS: lwp %s had signal %s, "
1487 "but we don't know if we should pass it. "
1488 "Default to not.\n",
1489 target_pid_to_str (ptid_of (thread)),
1490 gdb_signal_to_string (signo));
1491 return 0;
1492 }
1493 else
1494 {
1495 if (debug_threads)
1496 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1497 target_pid_to_str (ptid_of (thread)),
1498 gdb_signal_to_string (signo));
1499
1500 return WSTOPSIG (status);
1501 }
1502 }
1503
1504 /* Detach from LWP. */
1505
1506 static void
1507 linux_detach_one_lwp (struct lwp_info *lwp)
1508 {
1509 struct thread_info *thread = get_lwp_thread (lwp);
1510 int sig;
1511 int lwpid;
1512
1513 /* If there is a pending SIGSTOP, get rid of it. */
1514 if (lwp->stop_expected)
1515 {
1516 if (debug_threads)
1517 debug_printf ("Sending SIGCONT to %s\n",
1518 target_pid_to_str (ptid_of (thread)));
1519
1520 kill_lwp (lwpid_of (thread), SIGCONT);
1521 lwp->stop_expected = 0;
1522 }
1523
1524 /* Pass on any pending signal for this thread. */
1525 sig = get_detach_signal (thread);
1526
1527 /* Preparing to resume may try to write registers, and fail if the
1528 lwp is zombie. If that happens, ignore the error. We'll handle
1529 it below, when detach fails with ESRCH. */
1530 TRY
1531 {
1532 /* Flush any pending changes to the process's registers. */
1533 regcache_invalidate_thread (thread);
1534
1535 /* Finally, let it resume. */
1536 if (the_low_target.prepare_to_resume != NULL)
1537 the_low_target.prepare_to_resume (lwp);
1538 }
1539 CATCH (ex, RETURN_MASK_ERROR)
1540 {
1541 if (!check_ptrace_stopped_lwp_gone (lwp))
1542 throw_exception (ex);
1543 }
1544 END_CATCH
1545
1546 lwpid = lwpid_of (thread);
1547 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1548 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1549 {
1550 int save_errno = errno;
1551
1552 /* We know the thread exists, so ESRCH must mean the lwp is
1553 zombie. This can happen if one of the already-detached
1554 threads exits the whole thread group. In that case we're
1555 still attached, and must reap the lwp. */
1556 if (save_errno == ESRCH)
1557 {
1558 int ret, status;
1559
1560 ret = my_waitpid (lwpid, &status, __WALL);
1561 if (ret == -1)
1562 {
1563 warning (_("Couldn't reap LWP %d while detaching: %s"),
1564 lwpid, strerror (errno));
1565 }
1566 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1567 {
1568 warning (_("Reaping LWP %d while detaching "
1569 "returned unexpected status 0x%x"),
1570 lwpid, status);
1571 }
1572 }
1573 else
1574 {
1575 error (_("Can't detach %s: %s"),
1576 target_pid_to_str (ptid_of (thread)),
1577 strerror (save_errno));
1578 }
1579 }
1580 else if (debug_threads)
1581 {
1582 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1583 target_pid_to_str (ptid_of (thread)),
1584 strsignal (sig));
1585 }
1586
1587 delete_lwp (lwp);
1588 }
1589
1590 /* Callback for for_each_thread. Detaches from non-leader threads of a
1591 given process. */
1592
1593 static void
1594 linux_detach_lwp_callback (thread_info *thread)
1595 {
1596 /* We don't actually detach from the thread group leader just yet.
1597 If the thread group exits, we must reap the zombie clone lwps
1598 before we're able to reap the leader. */
1599 if (thread->id.pid () == thread->id.lwp ())
1600 return;
1601
1602 lwp_info *lwp = get_thread_lwp (thread);
1603 linux_detach_one_lwp (lwp);
1604 }
1605
1606 static int
1607 linux_detach (process_info *process)
1608 {
1609 struct lwp_info *main_lwp;
1610
1611 /* As there's a step over already in progress, let it finish first,
1612 otherwise nesting a stabilize_threads operation on top gets real
1613 messy. */
1614 complete_ongoing_step_over ();
1615
1616 /* Stop all threads before detaching. First, ptrace requires that
1617 the thread is stopped to sucessfully detach. Second, thread_db
1618 may need to uninstall thread event breakpoints from memory, which
1619 only works with a stopped process anyway. */
1620 stop_all_lwps (0, NULL);
1621
1622 #ifdef USE_THREAD_DB
1623 thread_db_detach (process);
1624 #endif
1625
1626 /* Stabilize threads (move out of jump pads). */
1627 stabilize_threads ();
1628
1629 /* Detach from the clone lwps first. If the thread group exits just
1630 while we're detaching, we must reap the clone lwps before we're
1631 able to reap the leader. */
1632 for_each_thread (process->pid, linux_detach_lwp_callback);
1633
1634 main_lwp = find_lwp_pid (ptid_t (process->pid));
1635 linux_detach_one_lwp (main_lwp);
1636
1637 the_target->mourn (process);
1638
1639 /* Since we presently can only stop all lwps of all processes, we
1640 need to unstop lwps of other processes. */
1641 unstop_all_lwps (0, NULL);
1642 return 0;
1643 }
1644
1645 /* Remove all LWPs that belong to process PROC from the lwp list. */
1646
1647 static void
1648 linux_mourn (struct process_info *process)
1649 {
1650 struct process_info_private *priv;
1651
1652 #ifdef USE_THREAD_DB
1653 thread_db_mourn (process);
1654 #endif
1655
1656 for_each_thread (process->pid, [] (thread_info *thread)
1657 {
1658 delete_lwp (get_thread_lwp (thread));
1659 });
1660
1661 /* Freeing all private data. */
1662 priv = process->priv;
1663 if (the_low_target.delete_process != NULL)
1664 the_low_target.delete_process (priv->arch_private);
1665 else
1666 gdb_assert (priv->arch_private == NULL);
1667 free (priv);
1668 process->priv = NULL;
1669
1670 remove_process (process);
1671 }
1672
1673 static void
1674 linux_join (int pid)
1675 {
1676 int status, ret;
1677
1678 do {
1679 ret = my_waitpid (pid, &status, 0);
1680 if (WIFEXITED (status) || WIFSIGNALED (status))
1681 break;
1682 } while (ret != -1 || errno != ECHILD);
1683 }
1684
1685 /* Return nonzero if the given thread is still alive. */
1686 static int
1687 linux_thread_alive (ptid_t ptid)
1688 {
1689 struct lwp_info *lwp = find_lwp_pid (ptid);
1690
1691 /* We assume we always know if a thread exits. If a whole process
1692 exited but we still haven't been able to report it to GDB, we'll
1693 hold on to the last lwp of the dead process. */
1694 if (lwp != NULL)
1695 return !lwp_is_marked_dead (lwp);
1696 else
1697 return 0;
1698 }
1699
1700 /* Return 1 if this lwp still has an interesting status pending. If
1701 not (e.g., it had stopped for a breakpoint that is gone), return
1702 false. */
1703
1704 static int
1705 thread_still_has_status_pending_p (struct thread_info *thread)
1706 {
1707 struct lwp_info *lp = get_thread_lwp (thread);
1708
1709 if (!lp->status_pending_p)
1710 return 0;
1711
1712 if (thread->last_resume_kind != resume_stop
1713 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1714 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1715 {
1716 struct thread_info *saved_thread;
1717 CORE_ADDR pc;
1718 int discard = 0;
1719
1720 gdb_assert (lp->last_status != 0);
1721
1722 pc = get_pc (lp);
1723
1724 saved_thread = current_thread;
1725 current_thread = thread;
1726
1727 if (pc != lp->stop_pc)
1728 {
1729 if (debug_threads)
1730 debug_printf ("PC of %ld changed\n",
1731 lwpid_of (thread));
1732 discard = 1;
1733 }
1734
1735 #if !USE_SIGTRAP_SIGINFO
1736 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1737 && !(*the_low_target.breakpoint_at) (pc))
1738 {
1739 if (debug_threads)
1740 debug_printf ("previous SW breakpoint of %ld gone\n",
1741 lwpid_of (thread));
1742 discard = 1;
1743 }
1744 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1745 && !hardware_breakpoint_inserted_here (pc))
1746 {
1747 if (debug_threads)
1748 debug_printf ("previous HW breakpoint of %ld gone\n",
1749 lwpid_of (thread));
1750 discard = 1;
1751 }
1752 #endif
1753
1754 current_thread = saved_thread;
1755
1756 if (discard)
1757 {
1758 if (debug_threads)
1759 debug_printf ("discarding pending breakpoint status\n");
1760 lp->status_pending_p = 0;
1761 return 0;
1762 }
1763 }
1764
1765 return 1;
1766 }
1767
1768 /* Returns true if LWP is resumed from the client's perspective. */
1769
1770 static int
1771 lwp_resumed (struct lwp_info *lwp)
1772 {
1773 struct thread_info *thread = get_lwp_thread (lwp);
1774
1775 if (thread->last_resume_kind != resume_stop)
1776 return 1;
1777
1778 /* Did gdb send us a `vCont;t', but we haven't reported the
1779 corresponding stop to gdb yet? If so, the thread is still
1780 resumed/running from gdb's perspective. */
1781 if (thread->last_resume_kind == resume_stop
1782 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1783 return 1;
1784
1785 return 0;
1786 }
1787
1788 /* Return true if this lwp has an interesting status pending. */
1789 static bool
1790 status_pending_p_callback (thread_info *thread, ptid_t ptid)
1791 {
1792 struct lwp_info *lp = get_thread_lwp (thread);
1793
1794 /* Check if we're only interested in events from a specific process
1795 or a specific LWP. */
1796 if (!thread->id.matches (ptid))
1797 return 0;
1798
1799 if (!lwp_resumed (lp))
1800 return 0;
1801
1802 if (lp->status_pending_p
1803 && !thread_still_has_status_pending_p (thread))
1804 {
1805 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1806 return 0;
1807 }
1808
1809 return lp->status_pending_p;
1810 }
1811
1812 struct lwp_info *
1813 find_lwp_pid (ptid_t ptid)
1814 {
1815 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1816 {
1817 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1818 return thr_arg->id.lwp () == lwp;
1819 });
1820
1821 if (thread == NULL)
1822 return NULL;
1823
1824 return get_thread_lwp (thread);
1825 }
1826
1827 /* Return the number of known LWPs in the tgid given by PID. */
1828
1829 static int
1830 num_lwps (int pid)
1831 {
1832 int count = 0;
1833
1834 for_each_thread (pid, [&] (thread_info *thread)
1835 {
1836 count++;
1837 });
1838
1839 return count;
1840 }
1841
1842 /* See nat/linux-nat.h. */
1843
1844 struct lwp_info *
1845 iterate_over_lwps (ptid_t filter,
1846 iterate_over_lwps_ftype callback,
1847 void *data)
1848 {
1849 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1850 {
1851 lwp_info *lwp = get_thread_lwp (thr_arg);
1852
1853 return callback (lwp, data);
1854 });
1855
1856 if (thread == NULL)
1857 return NULL;
1858
1859 return get_thread_lwp (thread);
1860 }
1861
1862 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1863 their exits until all other threads in the group have exited. */
1864
1865 static void
1866 check_zombie_leaders (void)
1867 {
1868 for_each_process ([] (process_info *proc) {
1869 pid_t leader_pid = pid_of (proc);
1870 struct lwp_info *leader_lp;
1871
1872 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1873
1874 if (debug_threads)
1875 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1876 "num_lwps=%d, zombie=%d\n",
1877 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1878 linux_proc_pid_is_zombie (leader_pid));
1879
1880 if (leader_lp != NULL && !leader_lp->stopped
1881 /* Check if there are other threads in the group, as we may
1882 have raced with the inferior simply exiting. */
1883 && !last_thread_of_process_p (leader_pid)
1884 && linux_proc_pid_is_zombie (leader_pid))
1885 {
1886 /* A leader zombie can mean one of two things:
1887
1888 - It exited, and there's an exit status pending
1889 available, or only the leader exited (not the whole
1890 program). In the latter case, we can't waitpid the
1891 leader's exit status until all other threads are gone.
1892
1893 - There are 3 or more threads in the group, and a thread
1894 other than the leader exec'd. On an exec, the Linux
1895 kernel destroys all other threads (except the execing
1896 one) in the thread group, and resets the execing thread's
1897 tid to the tgid. No exit notification is sent for the
1898 execing thread -- from the ptracer's perspective, it
1899 appears as though the execing thread just vanishes.
1900 Until we reap all other threads except the leader and the
1901 execing thread, the leader will be zombie, and the
1902 execing thread will be in `D (disc sleep)'. As soon as
1903 all other threads are reaped, the execing thread changes
1904 it's tid to the tgid, and the previous (zombie) leader
1905 vanishes, giving place to the "new" leader. We could try
1906 distinguishing the exit and exec cases, by waiting once
1907 more, and seeing if something comes out, but it doesn't
1908 sound useful. The previous leader _does_ go away, and
1909 we'll re-add the new one once we see the exec event
1910 (which is just the same as what would happen if the
1911 previous leader did exit voluntarily before some other
1912 thread execs). */
1913
1914 if (debug_threads)
1915 debug_printf ("CZL: Thread group leader %d zombie "
1916 "(it exited, or another thread execd).\n",
1917 leader_pid);
1918
1919 delete_lwp (leader_lp);
1920 }
1921 });
1922 }
1923
1924 /* Callback for `find_thread'. Returns the first LWP that is not
1925 stopped. */
1926
1927 static bool
1928 not_stopped_callback (thread_info *thread, ptid_t filter)
1929 {
1930 if (!thread->id.matches (filter))
1931 return false;
1932
1933 lwp_info *lwp = get_thread_lwp (thread);
1934
1935 return !lwp->stopped;
1936 }
1937
1938 /* Increment LWP's suspend count. */
1939
1940 static void
1941 lwp_suspended_inc (struct lwp_info *lwp)
1942 {
1943 lwp->suspended++;
1944
1945 if (debug_threads && lwp->suspended > 4)
1946 {
1947 struct thread_info *thread = get_lwp_thread (lwp);
1948
1949 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1950 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1951 }
1952 }
1953
1954 /* Decrement LWP's suspend count. */
1955
1956 static void
1957 lwp_suspended_decr (struct lwp_info *lwp)
1958 {
1959 lwp->suspended--;
1960
1961 if (lwp->suspended < 0)
1962 {
1963 struct thread_info *thread = get_lwp_thread (lwp);
1964
1965 internal_error (__FILE__, __LINE__,
1966 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1967 lwp->suspended);
1968 }
1969 }
1970
1971 /* This function should only be called if the LWP got a SIGTRAP.
1972
1973 Handle any tracepoint steps or hits. Return true if a tracepoint
1974 event was handled, 0 otherwise. */
1975
1976 static int
1977 handle_tracepoints (struct lwp_info *lwp)
1978 {
1979 struct thread_info *tinfo = get_lwp_thread (lwp);
1980 int tpoint_related_event = 0;
1981
1982 gdb_assert (lwp->suspended == 0);
1983
1984 /* If this tracepoint hit causes a tracing stop, we'll immediately
1985 uninsert tracepoints. To do this, we temporarily pause all
1986 threads, unpatch away, and then unpause threads. We need to make
1987 sure the unpausing doesn't resume LWP too. */
1988 lwp_suspended_inc (lwp);
1989
1990 /* And we need to be sure that any all-threads-stopping doesn't try
1991 to move threads out of the jump pads, as it could deadlock the
1992 inferior (LWP could be in the jump pad, maybe even holding the
1993 lock.) */
1994
1995 /* Do any necessary step collect actions. */
1996 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1997
1998 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1999
2000 /* See if we just hit a tracepoint and do its main collect
2001 actions. */
2002 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2003
2004 lwp_suspended_decr (lwp);
2005
2006 gdb_assert (lwp->suspended == 0);
2007 gdb_assert (!stabilizing_threads
2008 || (lwp->collecting_fast_tracepoint
2009 != fast_tpoint_collect_result::not_collecting));
2010
2011 if (tpoint_related_event)
2012 {
2013 if (debug_threads)
2014 debug_printf ("got a tracepoint event\n");
2015 return 1;
2016 }
2017
2018 return 0;
2019 }
2020
2021 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2022 collection status. */
2023
2024 static fast_tpoint_collect_result
2025 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2026 struct fast_tpoint_collect_status *status)
2027 {
2028 CORE_ADDR thread_area;
2029 struct thread_info *thread = get_lwp_thread (lwp);
2030
2031 if (the_low_target.get_thread_area == NULL)
2032 return fast_tpoint_collect_result::not_collecting;
2033
2034 /* Get the thread area address. This is used to recognize which
2035 thread is which when tracing with the in-process agent library.
2036 We don't read anything from the address, and treat it as opaque;
2037 it's the address itself that we assume is unique per-thread. */
2038 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2039 return fast_tpoint_collect_result::not_collecting;
2040
2041 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2042 }
2043
2044 /* The reason we resume in the caller, is because we want to be able
2045 to pass lwp->status_pending as WSTAT, and we need to clear
2046 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2047 refuses to resume. */
2048
2049 static int
2050 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2051 {
2052 struct thread_info *saved_thread;
2053
2054 saved_thread = current_thread;
2055 current_thread = get_lwp_thread (lwp);
2056
2057 if ((wstat == NULL
2058 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2059 && supports_fast_tracepoints ()
2060 && agent_loaded_p ())
2061 {
2062 struct fast_tpoint_collect_status status;
2063
2064 if (debug_threads)
2065 debug_printf ("Checking whether LWP %ld needs to move out of the "
2066 "jump pad.\n",
2067 lwpid_of (current_thread));
2068
2069 fast_tpoint_collect_result r
2070 = linux_fast_tracepoint_collecting (lwp, &status);
2071
2072 if (wstat == NULL
2073 || (WSTOPSIG (*wstat) != SIGILL
2074 && WSTOPSIG (*wstat) != SIGFPE
2075 && WSTOPSIG (*wstat) != SIGSEGV
2076 && WSTOPSIG (*wstat) != SIGBUS))
2077 {
2078 lwp->collecting_fast_tracepoint = r;
2079
2080 if (r != fast_tpoint_collect_result::not_collecting)
2081 {
2082 if (r == fast_tpoint_collect_result::before_insn
2083 && lwp->exit_jump_pad_bkpt == NULL)
2084 {
2085 /* Haven't executed the original instruction yet.
2086 Set breakpoint there, and wait till it's hit,
2087 then single-step until exiting the jump pad. */
2088 lwp->exit_jump_pad_bkpt
2089 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2090 }
2091
2092 if (debug_threads)
2093 debug_printf ("Checking whether LWP %ld needs to move out of "
2094 "the jump pad...it does\n",
2095 lwpid_of (current_thread));
2096 current_thread = saved_thread;
2097
2098 return 1;
2099 }
2100 }
2101 else
2102 {
2103 /* If we get a synchronous signal while collecting, *and*
2104 while executing the (relocated) original instruction,
2105 reset the PC to point at the tpoint address, before
2106 reporting to GDB. Otherwise, it's an IPA lib bug: just
2107 report the signal to GDB, and pray for the best. */
2108
2109 lwp->collecting_fast_tracepoint
2110 = fast_tpoint_collect_result::not_collecting;
2111
2112 if (r != fast_tpoint_collect_result::not_collecting
2113 && (status.adjusted_insn_addr <= lwp->stop_pc
2114 && lwp->stop_pc < status.adjusted_insn_addr_end))
2115 {
2116 siginfo_t info;
2117 struct regcache *regcache;
2118
2119 /* The si_addr on a few signals references the address
2120 of the faulting instruction. Adjust that as
2121 well. */
2122 if ((WSTOPSIG (*wstat) == SIGILL
2123 || WSTOPSIG (*wstat) == SIGFPE
2124 || WSTOPSIG (*wstat) == SIGBUS
2125 || WSTOPSIG (*wstat) == SIGSEGV)
2126 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2127 (PTRACE_TYPE_ARG3) 0, &info) == 0
2128 /* Final check just to make sure we don't clobber
2129 the siginfo of non-kernel-sent signals. */
2130 && (uintptr_t) info.si_addr == lwp->stop_pc)
2131 {
2132 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2133 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2134 (PTRACE_TYPE_ARG3) 0, &info);
2135 }
2136
2137 regcache = get_thread_regcache (current_thread, 1);
2138 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2139 lwp->stop_pc = status.tpoint_addr;
2140
2141 /* Cancel any fast tracepoint lock this thread was
2142 holding. */
2143 force_unlock_trace_buffer ();
2144 }
2145
2146 if (lwp->exit_jump_pad_bkpt != NULL)
2147 {
2148 if (debug_threads)
2149 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2150 "stopping all threads momentarily.\n");
2151
2152 stop_all_lwps (1, lwp);
2153
2154 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2155 lwp->exit_jump_pad_bkpt = NULL;
2156
2157 unstop_all_lwps (1, lwp);
2158
2159 gdb_assert (lwp->suspended >= 0);
2160 }
2161 }
2162 }
2163
2164 if (debug_threads)
2165 debug_printf ("Checking whether LWP %ld needs to move out of the "
2166 "jump pad...no\n",
2167 lwpid_of (current_thread));
2168
2169 current_thread = saved_thread;
2170 return 0;
2171 }
2172
2173 /* Enqueue one signal in the "signals to report later when out of the
2174 jump pad" list. */
2175
2176 static void
2177 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2178 {
2179 struct pending_signals *p_sig;
2180 struct thread_info *thread = get_lwp_thread (lwp);
2181
2182 if (debug_threads)
2183 debug_printf ("Deferring signal %d for LWP %ld.\n",
2184 WSTOPSIG (*wstat), lwpid_of (thread));
2185
2186 if (debug_threads)
2187 {
2188 struct pending_signals *sig;
2189
2190 for (sig = lwp->pending_signals_to_report;
2191 sig != NULL;
2192 sig = sig->prev)
2193 debug_printf (" Already queued %d\n",
2194 sig->signal);
2195
2196 debug_printf (" (no more currently queued signals)\n");
2197 }
2198
2199 /* Don't enqueue non-RT signals if they are already in the deferred
2200 queue. (SIGSTOP being the easiest signal to see ending up here
2201 twice) */
2202 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2203 {
2204 struct pending_signals *sig;
2205
2206 for (sig = lwp->pending_signals_to_report;
2207 sig != NULL;
2208 sig = sig->prev)
2209 {
2210 if (sig->signal == WSTOPSIG (*wstat))
2211 {
2212 if (debug_threads)
2213 debug_printf ("Not requeuing already queued non-RT signal %d"
2214 " for LWP %ld\n",
2215 sig->signal,
2216 lwpid_of (thread));
2217 return;
2218 }
2219 }
2220 }
2221
2222 p_sig = XCNEW (struct pending_signals);
2223 p_sig->prev = lwp->pending_signals_to_report;
2224 p_sig->signal = WSTOPSIG (*wstat);
2225
2226 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2227 &p_sig->info);
2228
2229 lwp->pending_signals_to_report = p_sig;
2230 }
2231
2232 /* Dequeue one signal from the "signals to report later when out of
2233 the jump pad" list. */
2234
2235 static int
2236 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2237 {
2238 struct thread_info *thread = get_lwp_thread (lwp);
2239
2240 if (lwp->pending_signals_to_report != NULL)
2241 {
2242 struct pending_signals **p_sig;
2243
2244 p_sig = &lwp->pending_signals_to_report;
2245 while ((*p_sig)->prev != NULL)
2246 p_sig = &(*p_sig)->prev;
2247
2248 *wstat = W_STOPCODE ((*p_sig)->signal);
2249 if ((*p_sig)->info.si_signo != 0)
2250 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2251 &(*p_sig)->info);
2252 free (*p_sig);
2253 *p_sig = NULL;
2254
2255 if (debug_threads)
2256 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2257 WSTOPSIG (*wstat), lwpid_of (thread));
2258
2259 if (debug_threads)
2260 {
2261 struct pending_signals *sig;
2262
2263 for (sig = lwp->pending_signals_to_report;
2264 sig != NULL;
2265 sig = sig->prev)
2266 debug_printf (" Still queued %d\n",
2267 sig->signal);
2268
2269 debug_printf (" (no more queued signals)\n");
2270 }
2271
2272 return 1;
2273 }
2274
2275 return 0;
2276 }
2277
2278 /* Fetch the possibly triggered data watchpoint info and store it in
2279 CHILD.
2280
2281 On some archs, like x86, that use debug registers to set
2282 watchpoints, it's possible that the way to know which watched
2283 address trapped, is to check the register that is used to select
2284 which address to watch. Problem is, between setting the watchpoint
2285 and reading back which data address trapped, the user may change
2286 the set of watchpoints, and, as a consequence, GDB changes the
2287 debug registers in the inferior. To avoid reading back a stale
2288 stopped-data-address when that happens, we cache in LP the fact
2289 that a watchpoint trapped, and the corresponding data address, as
2290 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2291 registers meanwhile, we have the cached data we can rely on. */
2292
2293 static int
2294 check_stopped_by_watchpoint (struct lwp_info *child)
2295 {
2296 if (the_low_target.stopped_by_watchpoint != NULL)
2297 {
2298 struct thread_info *saved_thread;
2299
2300 saved_thread = current_thread;
2301 current_thread = get_lwp_thread (child);
2302
2303 if (the_low_target.stopped_by_watchpoint ())
2304 {
2305 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2306
2307 if (the_low_target.stopped_data_address != NULL)
2308 child->stopped_data_address
2309 = the_low_target.stopped_data_address ();
2310 else
2311 child->stopped_data_address = 0;
2312 }
2313
2314 current_thread = saved_thread;
2315 }
2316
2317 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2318 }
2319
2320 /* Return the ptrace options that we want to try to enable. */
2321
2322 static int
2323 linux_low_ptrace_options (int attached)
2324 {
2325 client_state &cs = get_client_state ();
2326 int options = 0;
2327
2328 if (!attached)
2329 options |= PTRACE_O_EXITKILL;
2330
2331 if (cs.report_fork_events)
2332 options |= PTRACE_O_TRACEFORK;
2333
2334 if (cs.report_vfork_events)
2335 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2336
2337 if (cs.report_exec_events)
2338 options |= PTRACE_O_TRACEEXEC;
2339
2340 options |= PTRACE_O_TRACESYSGOOD;
2341
2342 return options;
2343 }
2344
2345 /* Do low-level handling of the event, and check if we should go on
2346 and pass it to caller code. Return the affected lwp if we are, or
2347 NULL otherwise. */
2348
2349 static struct lwp_info *
2350 linux_low_filter_event (int lwpid, int wstat)
2351 {
2352 client_state &cs = get_client_state ();
2353 struct lwp_info *child;
2354 struct thread_info *thread;
2355 int have_stop_pc = 0;
2356
2357 child = find_lwp_pid (ptid_t (lwpid));
2358
2359 /* Check for stop events reported by a process we didn't already
2360 know about - anything not already in our LWP list.
2361
2362 If we're expecting to receive stopped processes after
2363 fork, vfork, and clone events, then we'll just add the
2364 new one to our list and go back to waiting for the event
2365 to be reported - the stopped process might be returned
2366 from waitpid before or after the event is.
2367
2368 But note the case of a non-leader thread exec'ing after the
2369 leader having exited, and gone from our lists (because
2370 check_zombie_leaders deleted it). The non-leader thread
2371 changes its tid to the tgid. */
2372
2373 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2374 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2375 {
2376 ptid_t child_ptid;
2377
2378 /* A multi-thread exec after we had seen the leader exiting. */
2379 if (debug_threads)
2380 {
2381 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2382 "after exec.\n", lwpid);
2383 }
2384
2385 child_ptid = ptid_t (lwpid, lwpid, 0);
2386 child = add_lwp (child_ptid);
2387 child->stopped = 1;
2388 current_thread = child->thread;
2389 }
2390
2391 /* If we didn't find a process, one of two things presumably happened:
2392 - A process we started and then detached from has exited. Ignore it.
2393 - A process we are controlling has forked and the new child's stop
2394 was reported to us by the kernel. Save its PID. */
2395 if (child == NULL && WIFSTOPPED (wstat))
2396 {
2397 add_to_pid_list (&stopped_pids, lwpid, wstat);
2398 return NULL;
2399 }
2400 else if (child == NULL)
2401 return NULL;
2402
2403 thread = get_lwp_thread (child);
2404
2405 child->stopped = 1;
2406
2407 child->last_status = wstat;
2408
2409 /* Check if the thread has exited. */
2410 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2411 {
2412 if (debug_threads)
2413 debug_printf ("LLFE: %d exited.\n", lwpid);
2414
2415 if (finish_step_over (child))
2416 {
2417 /* Unsuspend all other LWPs, and set them back running again. */
2418 unsuspend_all_lwps (child);
2419 }
2420
2421 /* If there is at least one more LWP, then the exit signal was
2422 not the end of the debugged application and should be
2423 ignored, unless GDB wants to hear about thread exits. */
2424 if (cs.report_thread_events
2425 || last_thread_of_process_p (pid_of (thread)))
2426 {
2427 /* Since events are serialized to GDB core, and we can't
2428 report this one right now. Leave the status pending for
2429 the next time we're able to report it. */
2430 mark_lwp_dead (child, wstat);
2431 return child;
2432 }
2433 else
2434 {
2435 delete_lwp (child);
2436 return NULL;
2437 }
2438 }
2439
2440 gdb_assert (WIFSTOPPED (wstat));
2441
2442 if (WIFSTOPPED (wstat))
2443 {
2444 struct process_info *proc;
2445
2446 /* Architecture-specific setup after inferior is running. */
2447 proc = find_process_pid (pid_of (thread));
2448 if (proc->tdesc == NULL)
2449 {
2450 if (proc->attached)
2451 {
2452 /* This needs to happen after we have attached to the
2453 inferior and it is stopped for the first time, but
2454 before we access any inferior registers. */
2455 linux_arch_setup_thread (thread);
2456 }
2457 else
2458 {
2459 /* The process is started, but GDBserver will do
2460 architecture-specific setup after the program stops at
2461 the first instruction. */
2462 child->status_pending_p = 1;
2463 child->status_pending = wstat;
2464 return child;
2465 }
2466 }
2467 }
2468
2469 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2470 {
2471 struct process_info *proc = find_process_pid (pid_of (thread));
2472 int options = linux_low_ptrace_options (proc->attached);
2473
2474 linux_enable_event_reporting (lwpid, options);
2475 child->must_set_ptrace_flags = 0;
2476 }
2477
2478 /* Always update syscall_state, even if it will be filtered later. */
2479 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2480 {
2481 child->syscall_state
2482 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2483 ? TARGET_WAITKIND_SYSCALL_RETURN
2484 : TARGET_WAITKIND_SYSCALL_ENTRY);
2485 }
2486 else
2487 {
2488 /* Almost all other ptrace-stops are known to be outside of system
2489 calls, with further exceptions in handle_extended_wait. */
2490 child->syscall_state = TARGET_WAITKIND_IGNORE;
2491 }
2492
2493 /* Be careful to not overwrite stop_pc until save_stop_reason is
2494 called. */
2495 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2496 && linux_is_extended_waitstatus (wstat))
2497 {
2498 child->stop_pc = get_pc (child);
2499 if (handle_extended_wait (&child, wstat))
2500 {
2501 /* The event has been handled, so just return without
2502 reporting it. */
2503 return NULL;
2504 }
2505 }
2506
2507 if (linux_wstatus_maybe_breakpoint (wstat))
2508 {
2509 if (save_stop_reason (child))
2510 have_stop_pc = 1;
2511 }
2512
2513 if (!have_stop_pc)
2514 child->stop_pc = get_pc (child);
2515
2516 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2517 && child->stop_expected)
2518 {
2519 if (debug_threads)
2520 debug_printf ("Expected stop.\n");
2521 child->stop_expected = 0;
2522
2523 if (thread->last_resume_kind == resume_stop)
2524 {
2525 /* We want to report the stop to the core. Treat the
2526 SIGSTOP as a normal event. */
2527 if (debug_threads)
2528 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2529 target_pid_to_str (ptid_of (thread)));
2530 }
2531 else if (stopping_threads != NOT_STOPPING_THREADS)
2532 {
2533 /* Stopping threads. We don't want this SIGSTOP to end up
2534 pending. */
2535 if (debug_threads)
2536 debug_printf ("LLW: SIGSTOP caught for %s "
2537 "while stopping threads.\n",
2538 target_pid_to_str (ptid_of (thread)));
2539 return NULL;
2540 }
2541 else
2542 {
2543 /* This is a delayed SIGSTOP. Filter out the event. */
2544 if (debug_threads)
2545 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2546 child->stepping ? "step" : "continue",
2547 target_pid_to_str (ptid_of (thread)));
2548
2549 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2550 return NULL;
2551 }
2552 }
2553
2554 child->status_pending_p = 1;
2555 child->status_pending = wstat;
2556 return child;
2557 }
2558
2559 /* Return true if THREAD is doing hardware single step. */
2560
2561 static int
2562 maybe_hw_step (struct thread_info *thread)
2563 {
2564 if (can_hardware_single_step ())
2565 return 1;
2566 else
2567 {
2568 /* GDBserver must insert single-step breakpoint for software
2569 single step. */
2570 gdb_assert (has_single_step_breakpoints (thread));
2571 return 0;
2572 }
2573 }
2574
2575 /* Resume LWPs that are currently stopped without any pending status
2576 to report, but are resumed from the core's perspective. */
2577
2578 static void
2579 resume_stopped_resumed_lwps (thread_info *thread)
2580 {
2581 struct lwp_info *lp = get_thread_lwp (thread);
2582
2583 if (lp->stopped
2584 && !lp->suspended
2585 && !lp->status_pending_p
2586 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2587 {
2588 int step = 0;
2589
2590 if (thread->last_resume_kind == resume_step)
2591 step = maybe_hw_step (thread);
2592
2593 if (debug_threads)
2594 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2595 target_pid_to_str (ptid_of (thread)),
2596 paddress (lp->stop_pc),
2597 step);
2598
2599 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2600 }
2601 }
2602
2603 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2604 match FILTER_PTID (leaving others pending). The PTIDs can be:
2605 minus_one_ptid, to specify any child; a pid PTID, specifying all
2606 lwps of a thread group; or a PTID representing a single lwp. Store
2607 the stop status through the status pointer WSTAT. OPTIONS is
2608 passed to the waitpid call. Return 0 if no event was found and
2609 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2610 was found. Return the PID of the stopped child otherwise. */
2611
2612 static int
2613 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2614 int *wstatp, int options)
2615 {
2616 struct thread_info *event_thread;
2617 struct lwp_info *event_child, *requested_child;
2618 sigset_t block_mask, prev_mask;
2619
2620 retry:
2621 /* N.B. event_thread points to the thread_info struct that contains
2622 event_child. Keep them in sync. */
2623 event_thread = NULL;
2624 event_child = NULL;
2625 requested_child = NULL;
2626
2627 /* Check for a lwp with a pending status. */
2628
2629 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2630 {
2631 event_thread = find_thread_in_random ([&] (thread_info *thread)
2632 {
2633 return status_pending_p_callback (thread, filter_ptid);
2634 });
2635
2636 if (event_thread != NULL)
2637 event_child = get_thread_lwp (event_thread);
2638 if (debug_threads && event_thread)
2639 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2640 }
2641 else if (filter_ptid != null_ptid)
2642 {
2643 requested_child = find_lwp_pid (filter_ptid);
2644
2645 if (stopping_threads == NOT_STOPPING_THREADS
2646 && requested_child->status_pending_p
2647 && (requested_child->collecting_fast_tracepoint
2648 != fast_tpoint_collect_result::not_collecting))
2649 {
2650 enqueue_one_deferred_signal (requested_child,
2651 &requested_child->status_pending);
2652 requested_child->status_pending_p = 0;
2653 requested_child->status_pending = 0;
2654 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2655 }
2656
2657 if (requested_child->suspended
2658 && requested_child->status_pending_p)
2659 {
2660 internal_error (__FILE__, __LINE__,
2661 "requesting an event out of a"
2662 " suspended child?");
2663 }
2664
2665 if (requested_child->status_pending_p)
2666 {
2667 event_child = requested_child;
2668 event_thread = get_lwp_thread (event_child);
2669 }
2670 }
2671
2672 if (event_child != NULL)
2673 {
2674 if (debug_threads)
2675 debug_printf ("Got an event from pending child %ld (%04x)\n",
2676 lwpid_of (event_thread), event_child->status_pending);
2677 *wstatp = event_child->status_pending;
2678 event_child->status_pending_p = 0;
2679 event_child->status_pending = 0;
2680 current_thread = event_thread;
2681 return lwpid_of (event_thread);
2682 }
2683
2684 /* But if we don't find a pending event, we'll have to wait.
2685
2686 We only enter this loop if no process has a pending wait status.
2687 Thus any action taken in response to a wait status inside this
2688 loop is responding as soon as we detect the status, not after any
2689 pending events. */
2690
2691 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2692 all signals while here. */
2693 sigfillset (&block_mask);
2694 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2695
2696 /* Always pull all events out of the kernel. We'll randomly select
2697 an event LWP out of all that have events, to prevent
2698 starvation. */
2699 while (event_child == NULL)
2700 {
2701 pid_t ret = 0;
2702
2703 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2704 quirks:
2705
2706 - If the thread group leader exits while other threads in the
2707 thread group still exist, waitpid(TGID, ...) hangs. That
2708 waitpid won't return an exit status until the other threads
2709 in the group are reaped.
2710
2711 - When a non-leader thread execs, that thread just vanishes
2712 without reporting an exit (so we'd hang if we waited for it
2713 explicitly in that case). The exec event is reported to
2714 the TGID pid. */
2715 errno = 0;
2716 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2717
2718 if (debug_threads)
2719 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2720 ret, errno ? strerror (errno) : "ERRNO-OK");
2721
2722 if (ret > 0)
2723 {
2724 if (debug_threads)
2725 {
2726 debug_printf ("LLW: waitpid %ld received %s\n",
2727 (long) ret, status_to_str (*wstatp));
2728 }
2729
2730 /* Filter all events. IOW, leave all events pending. We'll
2731 randomly select an event LWP out of all that have events
2732 below. */
2733 linux_low_filter_event (ret, *wstatp);
2734 /* Retry until nothing comes out of waitpid. A single
2735 SIGCHLD can indicate more than one child stopped. */
2736 continue;
2737 }
2738
2739 /* Now that we've pulled all events out of the kernel, resume
2740 LWPs that don't have an interesting event to report. */
2741 if (stopping_threads == NOT_STOPPING_THREADS)
2742 for_each_thread (resume_stopped_resumed_lwps);
2743
2744 /* ... and find an LWP with a status to report to the core, if
2745 any. */
2746 event_thread = find_thread_in_random ([&] (thread_info *thread)
2747 {
2748 return status_pending_p_callback (thread, filter_ptid);
2749 });
2750
2751 if (event_thread != NULL)
2752 {
2753 event_child = get_thread_lwp (event_thread);
2754 *wstatp = event_child->status_pending;
2755 event_child->status_pending_p = 0;
2756 event_child->status_pending = 0;
2757 break;
2758 }
2759
2760 /* Check for zombie thread group leaders. Those can't be reaped
2761 until all other threads in the thread group are. */
2762 check_zombie_leaders ();
2763
2764 auto not_stopped = [&] (thread_info *thread)
2765 {
2766 return not_stopped_callback (thread, wait_ptid);
2767 };
2768
2769 /* If there are no resumed children left in the set of LWPs we
2770 want to wait for, bail. We can't just block in
2771 waitpid/sigsuspend, because lwps might have been left stopped
2772 in trace-stop state, and we'd be stuck forever waiting for
2773 their status to change (which would only happen if we resumed
2774 them). Even if WNOHANG is set, this return code is preferred
2775 over 0 (below), as it is more detailed. */
2776 if (find_thread (not_stopped) == NULL)
2777 {
2778 if (debug_threads)
2779 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2780 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2781 return -1;
2782 }
2783
2784 /* No interesting event to report to the caller. */
2785 if ((options & WNOHANG))
2786 {
2787 if (debug_threads)
2788 debug_printf ("WNOHANG set, no event found\n");
2789
2790 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2791 return 0;
2792 }
2793
2794 /* Block until we get an event reported with SIGCHLD. */
2795 if (debug_threads)
2796 debug_printf ("sigsuspend'ing\n");
2797
2798 sigsuspend (&prev_mask);
2799 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2800 goto retry;
2801 }
2802
2803 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2804
2805 current_thread = event_thread;
2806
2807 return lwpid_of (event_thread);
2808 }
2809
2810 /* Wait for an event from child(ren) PTID. PTIDs can be:
2811 minus_one_ptid, to specify any child; a pid PTID, specifying all
2812 lwps of a thread group; or a PTID representing a single lwp. Store
2813 the stop status through the status pointer WSTAT. OPTIONS is
2814 passed to the waitpid call. Return 0 if no event was found and
2815 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2816 was found. Return the PID of the stopped child otherwise. */
2817
2818 static int
2819 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2820 {
2821 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2822 }
2823
2824 /* Select one LWP out of those that have events pending. */
2825
2826 static void
2827 select_event_lwp (struct lwp_info **orig_lp)
2828 {
2829 int random_selector;
2830 struct thread_info *event_thread = NULL;
2831
2832 /* In all-stop, give preference to the LWP that is being
2833 single-stepped. There will be at most one, and it's the LWP that
2834 the core is most interested in. If we didn't do this, then we'd
2835 have to handle pending step SIGTRAPs somehow in case the core
2836 later continues the previously-stepped thread, otherwise we'd
2837 report the pending SIGTRAP, and the core, not having stepped the
2838 thread, wouldn't understand what the trap was for, and therefore
2839 would report it to the user as a random signal. */
2840 if (!non_stop)
2841 {
2842 event_thread = find_thread ([] (thread_info *thread)
2843 {
2844 lwp_info *lp = get_thread_lwp (thread);
2845
2846 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2847 && thread->last_resume_kind == resume_step
2848 && lp->status_pending_p);
2849 });
2850
2851 if (event_thread != NULL)
2852 {
2853 if (debug_threads)
2854 debug_printf ("SEL: Select single-step %s\n",
2855 target_pid_to_str (ptid_of (event_thread)));
2856 }
2857 }
2858 if (event_thread == NULL)
2859 {
2860 /* No single-stepping LWP. Select one at random, out of those
2861 which have had events. */
2862
2863 /* First see how many events we have. */
2864 int num_events = 0;
2865 for_each_thread ([&] (thread_info *thread)
2866 {
2867 lwp_info *lp = get_thread_lwp (thread);
2868
2869 /* Count only resumed LWPs that have an event pending. */
2870 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2871 && lp->status_pending_p)
2872 num_events++;
2873 });
2874 gdb_assert (num_events > 0);
2875
2876 /* Now randomly pick a LWP out of those that have had
2877 events. */
2878 random_selector = (int)
2879 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2880
2881 if (debug_threads && num_events > 1)
2882 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2883 num_events, random_selector);
2884
2885 event_thread = find_thread ([&] (thread_info *thread)
2886 {
2887 lwp_info *lp = get_thread_lwp (thread);
2888
2889 /* Select only resumed LWPs that have an event pending. */
2890 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2891 && lp->status_pending_p)
2892 if (random_selector-- == 0)
2893 return true;
2894
2895 return false;
2896 });
2897 }
2898
2899 if (event_thread != NULL)
2900 {
2901 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2902
2903 /* Switch the event LWP. */
2904 *orig_lp = event_lp;
2905 }
2906 }
2907
2908 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2909 NULL. */
2910
2911 static void
2912 unsuspend_all_lwps (struct lwp_info *except)
2913 {
2914 for_each_thread ([&] (thread_info *thread)
2915 {
2916 lwp_info *lwp = get_thread_lwp (thread);
2917
2918 if (lwp != except)
2919 lwp_suspended_decr (lwp);
2920 });
2921 }
2922
2923 static void move_out_of_jump_pad_callback (thread_info *thread);
2924 static bool stuck_in_jump_pad_callback (thread_info *thread);
2925 static bool lwp_running (thread_info *thread);
2926 static ptid_t linux_wait_1 (ptid_t ptid,
2927 struct target_waitstatus *ourstatus,
2928 int target_options);
2929
2930 /* Stabilize threads (move out of jump pads).
2931
2932 If a thread is midway collecting a fast tracepoint, we need to
2933 finish the collection and move it out of the jump pad before
2934 reporting the signal.
2935
2936 This avoids recursion while collecting (when a signal arrives
2937 midway, and the signal handler itself collects), which would trash
2938 the trace buffer. In case the user set a breakpoint in a signal
2939 handler, this avoids the backtrace showing the jump pad, etc..
2940 Most importantly, there are certain things we can't do safely if
2941 threads are stopped in a jump pad (or in its callee's). For
2942 example:
2943
2944 - starting a new trace run. A thread still collecting the
2945 previous run, could trash the trace buffer when resumed. The trace
2946 buffer control structures would have been reset but the thread had
2947 no way to tell. The thread could even midway memcpy'ing to the
2948 buffer, which would mean that when resumed, it would clobber the
2949 trace buffer that had been set for a new run.
2950
2951 - we can't rewrite/reuse the jump pads for new tracepoints
2952 safely. Say you do tstart while a thread is stopped midway while
2953 collecting. When the thread is later resumed, it finishes the
2954 collection, and returns to the jump pad, to execute the original
2955 instruction that was under the tracepoint jump at the time the
2956 older run had been started. If the jump pad had been rewritten
2957 since for something else in the new run, the thread would now
2958 execute the wrong / random instructions. */
2959
2960 static void
2961 linux_stabilize_threads (void)
2962 {
2963 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
2964
2965 if (thread_stuck != NULL)
2966 {
2967 if (debug_threads)
2968 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2969 lwpid_of (thread_stuck));
2970 return;
2971 }
2972
2973 thread_info *saved_thread = current_thread;
2974
2975 stabilizing_threads = 1;
2976
2977 /* Kick 'em all. */
2978 for_each_thread (move_out_of_jump_pad_callback);
2979
2980 /* Loop until all are stopped out of the jump pads. */
2981 while (find_thread (lwp_running) != NULL)
2982 {
2983 struct target_waitstatus ourstatus;
2984 struct lwp_info *lwp;
2985 int wstat;
2986
2987 /* Note that we go through the full wait even loop. While
2988 moving threads out of jump pad, we need to be able to step
2989 over internal breakpoints and such. */
2990 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2991
2992 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2993 {
2994 lwp = get_thread_lwp (current_thread);
2995
2996 /* Lock it. */
2997 lwp_suspended_inc (lwp);
2998
2999 if (ourstatus.value.sig != GDB_SIGNAL_0
3000 || current_thread->last_resume_kind == resume_stop)
3001 {
3002 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3003 enqueue_one_deferred_signal (lwp, &wstat);
3004 }
3005 }
3006 }
3007
3008 unsuspend_all_lwps (NULL);
3009
3010 stabilizing_threads = 0;
3011
3012 current_thread = saved_thread;
3013
3014 if (debug_threads)
3015 {
3016 thread_stuck = find_thread (stuck_in_jump_pad_callback);
3017
3018 if (thread_stuck != NULL)
3019 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3020 lwpid_of (thread_stuck));
3021 }
3022 }
3023
3024 /* Convenience function that is called when the kernel reports an
3025 event that is not passed out to GDB. */
3026
3027 static ptid_t
3028 ignore_event (struct target_waitstatus *ourstatus)
3029 {
3030 /* If we got an event, there may still be others, as a single
3031 SIGCHLD can indicate more than one child stopped. This forces
3032 another target_wait call. */
3033 async_file_mark ();
3034
3035 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3036 return null_ptid;
3037 }
3038
3039 /* Convenience function that is called when the kernel reports an exit
3040 event. This decides whether to report the event to GDB as a
3041 process exit event, a thread exit event, or to suppress the
3042 event. */
3043
3044 static ptid_t
3045 filter_exit_event (struct lwp_info *event_child,
3046 struct target_waitstatus *ourstatus)
3047 {
3048 client_state &cs = get_client_state ();
3049 struct thread_info *thread = get_lwp_thread (event_child);
3050 ptid_t ptid = ptid_of (thread);
3051
3052 if (!last_thread_of_process_p (pid_of (thread)))
3053 {
3054 if (cs.report_thread_events)
3055 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3056 else
3057 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3058
3059 delete_lwp (event_child);
3060 }
3061 return ptid;
3062 }
3063
3064 /* Returns 1 if GDB is interested in any event_child syscalls. */
3065
3066 static int
3067 gdb_catching_syscalls_p (struct lwp_info *event_child)
3068 {
3069 struct thread_info *thread = get_lwp_thread (event_child);
3070 struct process_info *proc = get_thread_process (thread);
3071
3072 return !proc->syscalls_to_catch.empty ();
3073 }
3074
3075 /* Returns 1 if GDB is interested in the event_child syscall.
3076 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3077
3078 static int
3079 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3080 {
3081 int sysno;
3082 struct thread_info *thread = get_lwp_thread (event_child);
3083 struct process_info *proc = get_thread_process (thread);
3084
3085 if (proc->syscalls_to_catch.empty ())
3086 return 0;
3087
3088 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
3089 return 1;
3090
3091 get_syscall_trapinfo (event_child, &sysno);
3092
3093 for (int iter : proc->syscalls_to_catch)
3094 if (iter == sysno)
3095 return 1;
3096
3097 return 0;
3098 }
3099
3100 /* Wait for process, returns status. */
3101
3102 static ptid_t
3103 linux_wait_1 (ptid_t ptid,
3104 struct target_waitstatus *ourstatus, int target_options)
3105 {
3106 client_state &cs = get_client_state ();
3107 int w;
3108 struct lwp_info *event_child;
3109 int options;
3110 int pid;
3111 int step_over_finished;
3112 int bp_explains_trap;
3113 int maybe_internal_trap;
3114 int report_to_gdb;
3115 int trace_event;
3116 int in_step_range;
3117 int any_resumed;
3118
3119 if (debug_threads)
3120 {
3121 debug_enter ();
3122 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3123 }
3124
3125 /* Translate generic target options into linux options. */
3126 options = __WALL;
3127 if (target_options & TARGET_WNOHANG)
3128 options |= WNOHANG;
3129
3130 bp_explains_trap = 0;
3131 trace_event = 0;
3132 in_step_range = 0;
3133 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3134
3135 auto status_pending_p_any = [&] (thread_info *thread)
3136 {
3137 return status_pending_p_callback (thread, minus_one_ptid);
3138 };
3139
3140 auto not_stopped = [&] (thread_info *thread)
3141 {
3142 return not_stopped_callback (thread, minus_one_ptid);
3143 };
3144
3145 /* Find a resumed LWP, if any. */
3146 if (find_thread (status_pending_p_any) != NULL)
3147 any_resumed = 1;
3148 else if (find_thread (not_stopped) != NULL)
3149 any_resumed = 1;
3150 else
3151 any_resumed = 0;
3152
3153 if (step_over_bkpt == null_ptid)
3154 pid = linux_wait_for_event (ptid, &w, options);
3155 else
3156 {
3157 if (debug_threads)
3158 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3159 target_pid_to_str (step_over_bkpt));
3160 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3161 }
3162
3163 if (pid == 0 || (pid == -1 && !any_resumed))
3164 {
3165 gdb_assert (target_options & TARGET_WNOHANG);
3166
3167 if (debug_threads)
3168 {
3169 debug_printf ("linux_wait_1 ret = null_ptid, "
3170 "TARGET_WAITKIND_IGNORE\n");
3171 debug_exit ();
3172 }
3173
3174 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3175 return null_ptid;
3176 }
3177 else if (pid == -1)
3178 {
3179 if (debug_threads)
3180 {
3181 debug_printf ("linux_wait_1 ret = null_ptid, "
3182 "TARGET_WAITKIND_NO_RESUMED\n");
3183 debug_exit ();
3184 }
3185
3186 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3187 return null_ptid;
3188 }
3189
3190 event_child = get_thread_lwp (current_thread);
3191
3192 /* linux_wait_for_event only returns an exit status for the last
3193 child of a process. Report it. */
3194 if (WIFEXITED (w) || WIFSIGNALED (w))
3195 {
3196 if (WIFEXITED (w))
3197 {
3198 ourstatus->kind = TARGET_WAITKIND_EXITED;
3199 ourstatus->value.integer = WEXITSTATUS (w);
3200
3201 if (debug_threads)
3202 {
3203 debug_printf ("linux_wait_1 ret = %s, exited with "
3204 "retcode %d\n",
3205 target_pid_to_str (ptid_of (current_thread)),
3206 WEXITSTATUS (w));
3207 debug_exit ();
3208 }
3209 }
3210 else
3211 {
3212 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3213 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3214
3215 if (debug_threads)
3216 {
3217 debug_printf ("linux_wait_1 ret = %s, terminated with "
3218 "signal %d\n",
3219 target_pid_to_str (ptid_of (current_thread)),
3220 WTERMSIG (w));
3221 debug_exit ();
3222 }
3223 }
3224
3225 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3226 return filter_exit_event (event_child, ourstatus);
3227
3228 return ptid_of (current_thread);
3229 }
3230
3231 /* If step-over executes a breakpoint instruction, in the case of a
3232 hardware single step it means a gdb/gdbserver breakpoint had been
3233 planted on top of a permanent breakpoint, in the case of a software
3234 single step it may just mean that gdbserver hit the reinsert breakpoint.
3235 The PC has been adjusted by save_stop_reason to point at
3236 the breakpoint address.
3237 So in the case of the hardware single step advance the PC manually
3238 past the breakpoint and in the case of software single step advance only
3239 if it's not the single_step_breakpoint we are hitting.
3240 This avoids that a program would keep trapping a permanent breakpoint
3241 forever. */
3242 if (step_over_bkpt != null_ptid
3243 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3244 && (event_child->stepping
3245 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3246 {
3247 int increment_pc = 0;
3248 int breakpoint_kind = 0;
3249 CORE_ADDR stop_pc = event_child->stop_pc;
3250
3251 breakpoint_kind =
3252 the_target->breakpoint_kind_from_current_state (&stop_pc);
3253 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3254
3255 if (debug_threads)
3256 {
3257 debug_printf ("step-over for %s executed software breakpoint\n",
3258 target_pid_to_str (ptid_of (current_thread)));
3259 }
3260
3261 if (increment_pc != 0)
3262 {
3263 struct regcache *regcache
3264 = get_thread_regcache (current_thread, 1);
3265
3266 event_child->stop_pc += increment_pc;
3267 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3268
3269 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3270 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3271 }
3272 }
3273
3274 /* If this event was not handled before, and is not a SIGTRAP, we
3275 report it. SIGILL and SIGSEGV are also treated as traps in case
3276 a breakpoint is inserted at the current PC. If this target does
3277 not support internal breakpoints at all, we also report the
3278 SIGTRAP without further processing; it's of no concern to us. */
3279 maybe_internal_trap
3280 = (supports_breakpoints ()
3281 && (WSTOPSIG (w) == SIGTRAP
3282 || ((WSTOPSIG (w) == SIGILL
3283 || WSTOPSIG (w) == SIGSEGV)
3284 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3285
3286 if (maybe_internal_trap)
3287 {
3288 /* Handle anything that requires bookkeeping before deciding to
3289 report the event or continue waiting. */
3290
3291 /* First check if we can explain the SIGTRAP with an internal
3292 breakpoint, or if we should possibly report the event to GDB.
3293 Do this before anything that may remove or insert a
3294 breakpoint. */
3295 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3296
3297 /* We have a SIGTRAP, possibly a step-over dance has just
3298 finished. If so, tweak the state machine accordingly,
3299 reinsert breakpoints and delete any single-step
3300 breakpoints. */
3301 step_over_finished = finish_step_over (event_child);
3302
3303 /* Now invoke the callbacks of any internal breakpoints there. */
3304 check_breakpoints (event_child->stop_pc);
3305
3306 /* Handle tracepoint data collecting. This may overflow the
3307 trace buffer, and cause a tracing stop, removing
3308 breakpoints. */
3309 trace_event = handle_tracepoints (event_child);
3310
3311 if (bp_explains_trap)
3312 {
3313 if (debug_threads)
3314 debug_printf ("Hit a gdbserver breakpoint.\n");
3315 }
3316 }
3317 else
3318 {
3319 /* We have some other signal, possibly a step-over dance was in
3320 progress, and it should be cancelled too. */
3321 step_over_finished = finish_step_over (event_child);
3322 }
3323
3324 /* We have all the data we need. Either report the event to GDB, or
3325 resume threads and keep waiting for more. */
3326
3327 /* If we're collecting a fast tracepoint, finish the collection and
3328 move out of the jump pad before delivering a signal. See
3329 linux_stabilize_threads. */
3330
3331 if (WIFSTOPPED (w)
3332 && WSTOPSIG (w) != SIGTRAP
3333 && supports_fast_tracepoints ()
3334 && agent_loaded_p ())
3335 {
3336 if (debug_threads)
3337 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3338 "to defer or adjust it.\n",
3339 WSTOPSIG (w), lwpid_of (current_thread));
3340
3341 /* Allow debugging the jump pad itself. */
3342 if (current_thread->last_resume_kind != resume_step
3343 && maybe_move_out_of_jump_pad (event_child, &w))
3344 {
3345 enqueue_one_deferred_signal (event_child, &w);
3346
3347 if (debug_threads)
3348 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3349 WSTOPSIG (w), lwpid_of (current_thread));
3350
3351 linux_resume_one_lwp (event_child, 0, 0, NULL);
3352
3353 if (debug_threads)
3354 debug_exit ();
3355 return ignore_event (ourstatus);
3356 }
3357 }
3358
3359 if (event_child->collecting_fast_tracepoint
3360 != fast_tpoint_collect_result::not_collecting)
3361 {
3362 if (debug_threads)
3363 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3364 "Check if we're already there.\n",
3365 lwpid_of (current_thread),
3366 (int) event_child->collecting_fast_tracepoint);
3367
3368 trace_event = 1;
3369
3370 event_child->collecting_fast_tracepoint
3371 = linux_fast_tracepoint_collecting (event_child, NULL);
3372
3373 if (event_child->collecting_fast_tracepoint
3374 != fast_tpoint_collect_result::before_insn)
3375 {
3376 /* No longer need this breakpoint. */
3377 if (event_child->exit_jump_pad_bkpt != NULL)
3378 {
3379 if (debug_threads)
3380 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3381 "stopping all threads momentarily.\n");
3382
3383 /* Other running threads could hit this breakpoint.
3384 We don't handle moribund locations like GDB does,
3385 instead we always pause all threads when removing
3386 breakpoints, so that any step-over or
3387 decr_pc_after_break adjustment is always taken
3388 care of while the breakpoint is still
3389 inserted. */
3390 stop_all_lwps (1, event_child);
3391
3392 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3393 event_child->exit_jump_pad_bkpt = NULL;
3394
3395 unstop_all_lwps (1, event_child);
3396
3397 gdb_assert (event_child->suspended >= 0);
3398 }
3399 }
3400
3401 if (event_child->collecting_fast_tracepoint
3402 == fast_tpoint_collect_result::not_collecting)
3403 {
3404 if (debug_threads)
3405 debug_printf ("fast tracepoint finished "
3406 "collecting successfully.\n");
3407
3408 /* We may have a deferred signal to report. */
3409 if (dequeue_one_deferred_signal (event_child, &w))
3410 {
3411 if (debug_threads)
3412 debug_printf ("dequeued one signal.\n");
3413 }
3414 else
3415 {
3416 if (debug_threads)
3417 debug_printf ("no deferred signals.\n");
3418
3419 if (stabilizing_threads)
3420 {
3421 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3422 ourstatus->value.sig = GDB_SIGNAL_0;
3423
3424 if (debug_threads)
3425 {
3426 debug_printf ("linux_wait_1 ret = %s, stopped "
3427 "while stabilizing threads\n",
3428 target_pid_to_str (ptid_of (current_thread)));
3429 debug_exit ();
3430 }
3431
3432 return ptid_of (current_thread);
3433 }
3434 }
3435 }
3436 }
3437
3438 /* Check whether GDB would be interested in this event. */
3439
3440 /* Check if GDB is interested in this syscall. */
3441 if (WIFSTOPPED (w)
3442 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3443 && !gdb_catch_this_syscall_p (event_child))
3444 {
3445 if (debug_threads)
3446 {
3447 debug_printf ("Ignored syscall for LWP %ld.\n",
3448 lwpid_of (current_thread));
3449 }
3450
3451 linux_resume_one_lwp (event_child, event_child->stepping,
3452 0, NULL);
3453
3454 if (debug_threads)
3455 debug_exit ();
3456 return ignore_event (ourstatus);
3457 }
3458
3459 /* If GDB is not interested in this signal, don't stop other
3460 threads, and don't report it to GDB. Just resume the inferior
3461 right away. We do this for threading-related signals as well as
3462 any that GDB specifically requested we ignore. But never ignore
3463 SIGSTOP if we sent it ourselves, and do not ignore signals when
3464 stepping - they may require special handling to skip the signal
3465 handler. Also never ignore signals that could be caused by a
3466 breakpoint. */
3467 if (WIFSTOPPED (w)
3468 && current_thread->last_resume_kind != resume_step
3469 && (
3470 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3471 (current_process ()->priv->thread_db != NULL
3472 && (WSTOPSIG (w) == __SIGRTMIN
3473 || WSTOPSIG (w) == __SIGRTMIN + 1))
3474 ||
3475 #endif
3476 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3477 && !(WSTOPSIG (w) == SIGSTOP
3478 && current_thread->last_resume_kind == resume_stop)
3479 && !linux_wstatus_maybe_breakpoint (w))))
3480 {
3481 siginfo_t info, *info_p;
3482
3483 if (debug_threads)
3484 debug_printf ("Ignored signal %d for LWP %ld.\n",
3485 WSTOPSIG (w), lwpid_of (current_thread));
3486
3487 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3488 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3489 info_p = &info;
3490 else
3491 info_p = NULL;
3492
3493 if (step_over_finished)
3494 {
3495 /* We cancelled this thread's step-over above. We still
3496 need to unsuspend all other LWPs, and set them back
3497 running again while the signal handler runs. */
3498 unsuspend_all_lwps (event_child);
3499
3500 /* Enqueue the pending signal info so that proceed_all_lwps
3501 doesn't lose it. */
3502 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3503
3504 proceed_all_lwps ();
3505 }
3506 else
3507 {
3508 linux_resume_one_lwp (event_child, event_child->stepping,
3509 WSTOPSIG (w), info_p);
3510 }
3511
3512 if (debug_threads)
3513 debug_exit ();
3514
3515 return ignore_event (ourstatus);
3516 }
3517
3518 /* Note that all addresses are always "out of the step range" when
3519 there's no range to begin with. */
3520 in_step_range = lwp_in_step_range (event_child);
3521
3522 /* If GDB wanted this thread to single step, and the thread is out
3523 of the step range, we always want to report the SIGTRAP, and let
3524 GDB handle it. Watchpoints should always be reported. So should
3525 signals we can't explain. A SIGTRAP we can't explain could be a
3526 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3527 do, we're be able to handle GDB breakpoints on top of internal
3528 breakpoints, by handling the internal breakpoint and still
3529 reporting the event to GDB. If we don't, we're out of luck, GDB
3530 won't see the breakpoint hit. If we see a single-step event but
3531 the thread should be continuing, don't pass the trap to gdb.
3532 That indicates that we had previously finished a single-step but
3533 left the single-step pending -- see
3534 complete_ongoing_step_over. */
3535 report_to_gdb = (!maybe_internal_trap
3536 || (current_thread->last_resume_kind == resume_step
3537 && !in_step_range)
3538 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3539 || (!in_step_range
3540 && !bp_explains_trap
3541 && !trace_event
3542 && !step_over_finished
3543 && !(current_thread->last_resume_kind == resume_continue
3544 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3545 || (gdb_breakpoint_here (event_child->stop_pc)
3546 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3547 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3548 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3549
3550 run_breakpoint_commands (event_child->stop_pc);
3551
3552 /* We found no reason GDB would want us to stop. We either hit one
3553 of our own breakpoints, or finished an internal step GDB
3554 shouldn't know about. */
3555 if (!report_to_gdb)
3556 {
3557 if (debug_threads)
3558 {
3559 if (bp_explains_trap)
3560 debug_printf ("Hit a gdbserver breakpoint.\n");
3561 if (step_over_finished)
3562 debug_printf ("Step-over finished.\n");
3563 if (trace_event)
3564 debug_printf ("Tracepoint event.\n");
3565 if (lwp_in_step_range (event_child))
3566 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3567 paddress (event_child->stop_pc),
3568 paddress (event_child->step_range_start),
3569 paddress (event_child->step_range_end));
3570 }
3571
3572 /* We're not reporting this breakpoint to GDB, so apply the
3573 decr_pc_after_break adjustment to the inferior's regcache
3574 ourselves. */
3575
3576 if (the_low_target.set_pc != NULL)
3577 {
3578 struct regcache *regcache
3579 = get_thread_regcache (current_thread, 1);
3580 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3581 }
3582
3583 if (step_over_finished)
3584 {
3585 /* If we have finished stepping over a breakpoint, we've
3586 stopped and suspended all LWPs momentarily except the
3587 stepping one. This is where we resume them all again.
3588 We're going to keep waiting, so use proceed, which
3589 handles stepping over the next breakpoint. */
3590 unsuspend_all_lwps (event_child);
3591 }
3592 else
3593 {
3594 /* Remove the single-step breakpoints if any. Note that
3595 there isn't single-step breakpoint if we finished stepping
3596 over. */
3597 if (can_software_single_step ()
3598 && has_single_step_breakpoints (current_thread))
3599 {
3600 stop_all_lwps (0, event_child);
3601 delete_single_step_breakpoints (current_thread);
3602 unstop_all_lwps (0, event_child);
3603 }
3604 }
3605
3606 if (debug_threads)
3607 debug_printf ("proceeding all threads.\n");
3608 proceed_all_lwps ();
3609
3610 if (debug_threads)
3611 debug_exit ();
3612
3613 return ignore_event (ourstatus);
3614 }
3615
3616 if (debug_threads)
3617 {
3618 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3619 {
3620 std::string str
3621 = target_waitstatus_to_string (&event_child->waitstatus);
3622
3623 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3624 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3625 }
3626 if (current_thread->last_resume_kind == resume_step)
3627 {
3628 if (event_child->step_range_start == event_child->step_range_end)
3629 debug_printf ("GDB wanted to single-step, reporting event.\n");
3630 else if (!lwp_in_step_range (event_child))
3631 debug_printf ("Out of step range, reporting event.\n");
3632 }
3633 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3634 debug_printf ("Stopped by watchpoint.\n");
3635 else if (gdb_breakpoint_here (event_child->stop_pc))
3636 debug_printf ("Stopped by GDB breakpoint.\n");
3637 if (debug_threads)
3638 debug_printf ("Hit a non-gdbserver trap event.\n");
3639 }
3640
3641 /* Alright, we're going to report a stop. */
3642
3643 /* Remove single-step breakpoints. */
3644 if (can_software_single_step ())
3645 {
3646 /* Remove single-step breakpoints or not. It it is true, stop all
3647 lwps, so that other threads won't hit the breakpoint in the
3648 staled memory. */
3649 int remove_single_step_breakpoints_p = 0;
3650
3651 if (non_stop)
3652 {
3653 remove_single_step_breakpoints_p
3654 = has_single_step_breakpoints (current_thread);
3655 }
3656 else
3657 {
3658 /* In all-stop, a stop reply cancels all previous resume
3659 requests. Delete all single-step breakpoints. */
3660
3661 find_thread ([&] (thread_info *thread) {
3662 if (has_single_step_breakpoints (thread))
3663 {
3664 remove_single_step_breakpoints_p = 1;
3665 return true;
3666 }
3667
3668 return false;
3669 });
3670 }
3671
3672 if (remove_single_step_breakpoints_p)
3673 {
3674 /* If we remove single-step breakpoints from memory, stop all lwps,
3675 so that other threads won't hit the breakpoint in the staled
3676 memory. */
3677 stop_all_lwps (0, event_child);
3678
3679 if (non_stop)
3680 {
3681 gdb_assert (has_single_step_breakpoints (current_thread));
3682 delete_single_step_breakpoints (current_thread);
3683 }
3684 else
3685 {
3686 for_each_thread ([] (thread_info *thread){
3687 if (has_single_step_breakpoints (thread))
3688 delete_single_step_breakpoints (thread);
3689 });
3690 }
3691
3692 unstop_all_lwps (0, event_child);
3693 }
3694 }
3695
3696 if (!stabilizing_threads)
3697 {
3698 /* In all-stop, stop all threads. */
3699 if (!non_stop)
3700 stop_all_lwps (0, NULL);
3701
3702 if (step_over_finished)
3703 {
3704 if (!non_stop)
3705 {
3706 /* If we were doing a step-over, all other threads but
3707 the stepping one had been paused in start_step_over,
3708 with their suspend counts incremented. We don't want
3709 to do a full unstop/unpause, because we're in
3710 all-stop mode (so we want threads stopped), but we
3711 still need to unsuspend the other threads, to
3712 decrement their `suspended' count back. */
3713 unsuspend_all_lwps (event_child);
3714 }
3715 else
3716 {
3717 /* If we just finished a step-over, then all threads had
3718 been momentarily paused. In all-stop, that's fine,
3719 we want threads stopped by now anyway. In non-stop,
3720 we need to re-resume threads that GDB wanted to be
3721 running. */
3722 unstop_all_lwps (1, event_child);
3723 }
3724 }
3725
3726 /* If we're not waiting for a specific LWP, choose an event LWP
3727 from among those that have had events. Giving equal priority
3728 to all LWPs that have had events helps prevent
3729 starvation. */
3730 if (ptid == minus_one_ptid)
3731 {
3732 event_child->status_pending_p = 1;
3733 event_child->status_pending = w;
3734
3735 select_event_lwp (&event_child);
3736
3737 /* current_thread and event_child must stay in sync. */
3738 current_thread = get_lwp_thread (event_child);
3739
3740 event_child->status_pending_p = 0;
3741 w = event_child->status_pending;
3742 }
3743
3744
3745 /* Stabilize threads (move out of jump pads). */
3746 if (!non_stop)
3747 stabilize_threads ();
3748 }
3749 else
3750 {
3751 /* If we just finished a step-over, then all threads had been
3752 momentarily paused. In all-stop, that's fine, we want
3753 threads stopped by now anyway. In non-stop, we need to
3754 re-resume threads that GDB wanted to be running. */
3755 if (step_over_finished)
3756 unstop_all_lwps (1, event_child);
3757 }
3758
3759 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3760 {
3761 /* If the reported event is an exit, fork, vfork or exec, let
3762 GDB know. */
3763
3764 /* Break the unreported fork relationship chain. */
3765 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3766 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3767 {
3768 event_child->fork_relative->fork_relative = NULL;
3769 event_child->fork_relative = NULL;
3770 }
3771
3772 *ourstatus = event_child->waitstatus;
3773 /* Clear the event lwp's waitstatus since we handled it already. */
3774 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3775 }
3776 else
3777 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3778
3779 /* Now that we've selected our final event LWP, un-adjust its PC if
3780 it was a software breakpoint, and the client doesn't know we can
3781 adjust the breakpoint ourselves. */
3782 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3783 && !cs.swbreak_feature)
3784 {
3785 int decr_pc = the_low_target.decr_pc_after_break;
3786
3787 if (decr_pc != 0)
3788 {
3789 struct regcache *regcache
3790 = get_thread_regcache (current_thread, 1);
3791 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3792 }
3793 }
3794
3795 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3796 {
3797 get_syscall_trapinfo (event_child,
3798 &ourstatus->value.syscall_number);
3799 ourstatus->kind = event_child->syscall_state;
3800 }
3801 else if (current_thread->last_resume_kind == resume_stop
3802 && WSTOPSIG (w) == SIGSTOP)
3803 {
3804 /* A thread that has been requested to stop by GDB with vCont;t,
3805 and it stopped cleanly, so report as SIG0. The use of
3806 SIGSTOP is an implementation detail. */
3807 ourstatus->value.sig = GDB_SIGNAL_0;
3808 }
3809 else if (current_thread->last_resume_kind == resume_stop
3810 && WSTOPSIG (w) != SIGSTOP)
3811 {
3812 /* A thread that has been requested to stop by GDB with vCont;t,
3813 but, it stopped for other reasons. */
3814 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3815 }
3816 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3817 {
3818 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3819 }
3820
3821 gdb_assert (step_over_bkpt == null_ptid);
3822
3823 if (debug_threads)
3824 {
3825 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3826 target_pid_to_str (ptid_of (current_thread)),
3827 ourstatus->kind, ourstatus->value.sig);
3828 debug_exit ();
3829 }
3830
3831 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3832 return filter_exit_event (event_child, ourstatus);
3833
3834 return ptid_of (current_thread);
3835 }
3836
3837 /* Get rid of any pending event in the pipe. */
3838 static void
3839 async_file_flush (void)
3840 {
3841 int ret;
3842 char buf;
3843
3844 do
3845 ret = read (linux_event_pipe[0], &buf, 1);
3846 while (ret >= 0 || (ret == -1 && errno == EINTR));
3847 }
3848
3849 /* Put something in the pipe, so the event loop wakes up. */
3850 static void
3851 async_file_mark (void)
3852 {
3853 int ret;
3854
3855 async_file_flush ();
3856
3857 do
3858 ret = write (linux_event_pipe[1], "+", 1);
3859 while (ret == 0 || (ret == -1 && errno == EINTR));
3860
3861 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3862 be awakened anyway. */
3863 }
3864
3865 static ptid_t
3866 linux_wait (ptid_t ptid,
3867 struct target_waitstatus *ourstatus, int target_options)
3868 {
3869 ptid_t event_ptid;
3870
3871 /* Flush the async file first. */
3872 if (target_is_async_p ())
3873 async_file_flush ();
3874
3875 do
3876 {
3877 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3878 }
3879 while ((target_options & TARGET_WNOHANG) == 0
3880 && event_ptid == null_ptid
3881 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3882
3883 /* If at least one stop was reported, there may be more. A single
3884 SIGCHLD can signal more than one child stop. */
3885 if (target_is_async_p ()
3886 && (target_options & TARGET_WNOHANG) != 0
3887 && event_ptid != null_ptid)
3888 async_file_mark ();
3889
3890 return event_ptid;
3891 }
3892
3893 /* Send a signal to an LWP. */
3894
3895 static int
3896 kill_lwp (unsigned long lwpid, int signo)
3897 {
3898 int ret;
3899
3900 errno = 0;
3901 ret = syscall (__NR_tkill, lwpid, signo);
3902 if (errno == ENOSYS)
3903 {
3904 /* If tkill fails, then we are not using nptl threads, a
3905 configuration we no longer support. */
3906 perror_with_name (("tkill"));
3907 }
3908 return ret;
3909 }
3910
3911 void
3912 linux_stop_lwp (struct lwp_info *lwp)
3913 {
3914 send_sigstop (lwp);
3915 }
3916
3917 static void
3918 send_sigstop (struct lwp_info *lwp)
3919 {
3920 int pid;
3921
3922 pid = lwpid_of (get_lwp_thread (lwp));
3923
3924 /* If we already have a pending stop signal for this process, don't
3925 send another. */
3926 if (lwp->stop_expected)
3927 {
3928 if (debug_threads)
3929 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3930
3931 return;
3932 }
3933
3934 if (debug_threads)
3935 debug_printf ("Sending sigstop to lwp %d\n", pid);
3936
3937 lwp->stop_expected = 1;
3938 kill_lwp (pid, SIGSTOP);
3939 }
3940
3941 static void
3942 send_sigstop (thread_info *thread, lwp_info *except)
3943 {
3944 struct lwp_info *lwp = get_thread_lwp (thread);
3945
3946 /* Ignore EXCEPT. */
3947 if (lwp == except)
3948 return;
3949
3950 if (lwp->stopped)
3951 return;
3952
3953 send_sigstop (lwp);
3954 }
3955
3956 /* Increment the suspend count of an LWP, and stop it, if not stopped
3957 yet. */
3958 static void
3959 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3960 {
3961 struct lwp_info *lwp = get_thread_lwp (thread);
3962
3963 /* Ignore EXCEPT. */
3964 if (lwp == except)
3965 return;
3966
3967 lwp_suspended_inc (lwp);
3968
3969 send_sigstop (thread, except);
3970 }
3971
3972 static void
3973 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3974 {
3975 /* Store the exit status for later. */
3976 lwp->status_pending_p = 1;
3977 lwp->status_pending = wstat;
3978
3979 /* Store in waitstatus as well, as there's nothing else to process
3980 for this event. */
3981 if (WIFEXITED (wstat))
3982 {
3983 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3984 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3985 }
3986 else if (WIFSIGNALED (wstat))
3987 {
3988 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3989 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3990 }
3991
3992 /* Prevent trying to stop it. */
3993 lwp->stopped = 1;
3994
3995 /* No further stops are expected from a dead lwp. */
3996 lwp->stop_expected = 0;
3997 }
3998
3999 /* Return true if LWP has exited already, and has a pending exit event
4000 to report to GDB. */
4001
4002 static int
4003 lwp_is_marked_dead (struct lwp_info *lwp)
4004 {
4005 return (lwp->status_pending_p
4006 && (WIFEXITED (lwp->status_pending)
4007 || WIFSIGNALED (lwp->status_pending)));
4008 }
4009
4010 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4011
4012 static void
4013 wait_for_sigstop (void)
4014 {
4015 struct thread_info *saved_thread;
4016 ptid_t saved_tid;
4017 int wstat;
4018 int ret;
4019
4020 saved_thread = current_thread;
4021 if (saved_thread != NULL)
4022 saved_tid = saved_thread->id;
4023 else
4024 saved_tid = null_ptid; /* avoid bogus unused warning */
4025
4026 if (debug_threads)
4027 debug_printf ("wait_for_sigstop: pulling events\n");
4028
4029 /* Passing NULL_PTID as filter indicates we want all events to be
4030 left pending. Eventually this returns when there are no
4031 unwaited-for children left. */
4032 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4033 &wstat, __WALL);
4034 gdb_assert (ret == -1);
4035
4036 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4037 current_thread = saved_thread;
4038 else
4039 {
4040 if (debug_threads)
4041 debug_printf ("Previously current thread died.\n");
4042
4043 /* We can't change the current inferior behind GDB's back,
4044 otherwise, a subsequent command may apply to the wrong
4045 process. */
4046 current_thread = NULL;
4047 }
4048 }
4049
4050 /* Returns true if THREAD is stopped in a jump pad, and we can't
4051 move it out, because we need to report the stop event to GDB. For
4052 example, if the user puts a breakpoint in the jump pad, it's
4053 because she wants to debug it. */
4054
4055 static bool
4056 stuck_in_jump_pad_callback (thread_info *thread)
4057 {
4058 struct lwp_info *lwp = get_thread_lwp (thread);
4059
4060 if (lwp->suspended != 0)
4061 {
4062 internal_error (__FILE__, __LINE__,
4063 "LWP %ld is suspended, suspended=%d\n",
4064 lwpid_of (thread), lwp->suspended);
4065 }
4066 gdb_assert (lwp->stopped);
4067
4068 /* Allow debugging the jump pad, gdb_collect, etc.. */
4069 return (supports_fast_tracepoints ()
4070 && agent_loaded_p ()
4071 && (gdb_breakpoint_here (lwp->stop_pc)
4072 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4073 || thread->last_resume_kind == resume_step)
4074 && (linux_fast_tracepoint_collecting (lwp, NULL)
4075 != fast_tpoint_collect_result::not_collecting));
4076 }
4077
4078 static void
4079 move_out_of_jump_pad_callback (thread_info *thread)
4080 {
4081 struct thread_info *saved_thread;
4082 struct lwp_info *lwp = get_thread_lwp (thread);
4083 int *wstat;
4084
4085 if (lwp->suspended != 0)
4086 {
4087 internal_error (__FILE__, __LINE__,
4088 "LWP %ld is suspended, suspended=%d\n",
4089 lwpid_of (thread), lwp->suspended);
4090 }
4091 gdb_assert (lwp->stopped);
4092
4093 /* For gdb_breakpoint_here. */
4094 saved_thread = current_thread;
4095 current_thread = thread;
4096
4097 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4098
4099 /* Allow debugging the jump pad, gdb_collect, etc. */
4100 if (!gdb_breakpoint_here (lwp->stop_pc)
4101 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4102 && thread->last_resume_kind != resume_step
4103 && maybe_move_out_of_jump_pad (lwp, wstat))
4104 {
4105 if (debug_threads)
4106 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4107 lwpid_of (thread));
4108
4109 if (wstat)
4110 {
4111 lwp->status_pending_p = 0;
4112 enqueue_one_deferred_signal (lwp, wstat);
4113
4114 if (debug_threads)
4115 debug_printf ("Signal %d for LWP %ld deferred "
4116 "(in jump pad)\n",
4117 WSTOPSIG (*wstat), lwpid_of (thread));
4118 }
4119
4120 linux_resume_one_lwp (lwp, 0, 0, NULL);
4121 }
4122 else
4123 lwp_suspended_inc (lwp);
4124
4125 current_thread = saved_thread;
4126 }
4127
4128 static bool
4129 lwp_running (thread_info *thread)
4130 {
4131 struct lwp_info *lwp = get_thread_lwp (thread);
4132
4133 if (lwp_is_marked_dead (lwp))
4134 return false;
4135
4136 return !lwp->stopped;
4137 }
4138
4139 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4140 If SUSPEND, then also increase the suspend count of every LWP,
4141 except EXCEPT. */
4142
4143 static void
4144 stop_all_lwps (int suspend, struct lwp_info *except)
4145 {
4146 /* Should not be called recursively. */
4147 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4148
4149 if (debug_threads)
4150 {
4151 debug_enter ();
4152 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4153 suspend ? "stop-and-suspend" : "stop",
4154 except != NULL
4155 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4156 : "none");
4157 }
4158
4159 stopping_threads = (suspend
4160 ? STOPPING_AND_SUSPENDING_THREADS
4161 : STOPPING_THREADS);
4162
4163 if (suspend)
4164 for_each_thread ([&] (thread_info *thread)
4165 {
4166 suspend_and_send_sigstop (thread, except);
4167 });
4168 else
4169 for_each_thread ([&] (thread_info *thread)
4170 {
4171 send_sigstop (thread, except);
4172 });
4173
4174 wait_for_sigstop ();
4175 stopping_threads = NOT_STOPPING_THREADS;
4176
4177 if (debug_threads)
4178 {
4179 debug_printf ("stop_all_lwps done, setting stopping_threads "
4180 "back to !stopping\n");
4181 debug_exit ();
4182 }
4183 }
4184
4185 /* Enqueue one signal in the chain of signals which need to be
4186 delivered to this process on next resume. */
4187
4188 static void
4189 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4190 {
4191 struct pending_signals *p_sig = XNEW (struct pending_signals);
4192
4193 p_sig->prev = lwp->pending_signals;
4194 p_sig->signal = signal;
4195 if (info == NULL)
4196 memset (&p_sig->info, 0, sizeof (siginfo_t));
4197 else
4198 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4199 lwp->pending_signals = p_sig;
4200 }
4201
4202 /* Install breakpoints for software single stepping. */
4203
4204 static void
4205 install_software_single_step_breakpoints (struct lwp_info *lwp)
4206 {
4207 struct thread_info *thread = get_lwp_thread (lwp);
4208 struct regcache *regcache = get_thread_regcache (thread, 1);
4209
4210 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4211
4212 current_thread = thread;
4213 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
4214
4215 for (CORE_ADDR pc : next_pcs)
4216 set_single_step_breakpoint (pc, current_ptid);
4217 }
4218
4219 /* Single step via hardware or software single step.
4220 Return 1 if hardware single stepping, 0 if software single stepping
4221 or can't single step. */
4222
4223 static int
4224 single_step (struct lwp_info* lwp)
4225 {
4226 int step = 0;
4227
4228 if (can_hardware_single_step ())
4229 {
4230 step = 1;
4231 }
4232 else if (can_software_single_step ())
4233 {
4234 install_software_single_step_breakpoints (lwp);
4235 step = 0;
4236 }
4237 else
4238 {
4239 if (debug_threads)
4240 debug_printf ("stepping is not implemented on this target");
4241 }
4242
4243 return step;
4244 }
4245
4246 /* The signal can be delivered to the inferior if we are not trying to
4247 finish a fast tracepoint collect. Since signal can be delivered in
4248 the step-over, the program may go to signal handler and trap again
4249 after return from the signal handler. We can live with the spurious
4250 double traps. */
4251
4252 static int
4253 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4254 {
4255 return (lwp->collecting_fast_tracepoint
4256 == fast_tpoint_collect_result::not_collecting);
4257 }
4258
4259 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4260 SIGNAL is nonzero, give it that signal. */
4261
4262 static void
4263 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4264 int step, int signal, siginfo_t *info)
4265 {
4266 struct thread_info *thread = get_lwp_thread (lwp);
4267 struct thread_info *saved_thread;
4268 int ptrace_request;
4269 struct process_info *proc = get_thread_process (thread);
4270
4271 /* Note that target description may not be initialised
4272 (proc->tdesc == NULL) at this point because the program hasn't
4273 stopped at the first instruction yet. It means GDBserver skips
4274 the extra traps from the wrapper program (see option --wrapper).
4275 Code in this function that requires register access should be
4276 guarded by proc->tdesc == NULL or something else. */
4277
4278 if (lwp->stopped == 0)
4279 return;
4280
4281 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4282
4283 fast_tpoint_collect_result fast_tp_collecting
4284 = lwp->collecting_fast_tracepoint;
4285
4286 gdb_assert (!stabilizing_threads
4287 || (fast_tp_collecting
4288 != fast_tpoint_collect_result::not_collecting));
4289
4290 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4291 user used the "jump" command, or "set $pc = foo"). */
4292 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4293 {
4294 /* Collecting 'while-stepping' actions doesn't make sense
4295 anymore. */
4296 release_while_stepping_state_list (thread);
4297 }
4298
4299 /* If we have pending signals or status, and a new signal, enqueue the
4300 signal. Also enqueue the signal if it can't be delivered to the
4301 inferior right now. */
4302 if (signal != 0
4303 && (lwp->status_pending_p
4304 || lwp->pending_signals != NULL
4305 || !lwp_signal_can_be_delivered (lwp)))
4306 {
4307 enqueue_pending_signal (lwp, signal, info);
4308
4309 /* Postpone any pending signal. It was enqueued above. */
4310 signal = 0;
4311 }
4312
4313 if (lwp->status_pending_p)
4314 {
4315 if (debug_threads)
4316 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4317 " has pending status\n",
4318 lwpid_of (thread), step ? "step" : "continue",
4319 lwp->stop_expected ? "expected" : "not expected");
4320 return;
4321 }
4322
4323 saved_thread = current_thread;
4324 current_thread = thread;
4325
4326 /* This bit needs some thinking about. If we get a signal that
4327 we must report while a single-step reinsert is still pending,
4328 we often end up resuming the thread. It might be better to
4329 (ew) allow a stack of pending events; then we could be sure that
4330 the reinsert happened right away and not lose any signals.
4331
4332 Making this stack would also shrink the window in which breakpoints are
4333 uninserted (see comment in linux_wait_for_lwp) but not enough for
4334 complete correctness, so it won't solve that problem. It may be
4335 worthwhile just to solve this one, however. */
4336 if (lwp->bp_reinsert != 0)
4337 {
4338 if (debug_threads)
4339 debug_printf (" pending reinsert at 0x%s\n",
4340 paddress (lwp->bp_reinsert));
4341
4342 if (can_hardware_single_step ())
4343 {
4344 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4345 {
4346 if (step == 0)
4347 warning ("BAD - reinserting but not stepping.");
4348 if (lwp->suspended)
4349 warning ("BAD - reinserting and suspended(%d).",
4350 lwp->suspended);
4351 }
4352 }
4353
4354 step = maybe_hw_step (thread);
4355 }
4356
4357 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4358 {
4359 if (debug_threads)
4360 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4361 " (exit-jump-pad-bkpt)\n",
4362 lwpid_of (thread));
4363 }
4364 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4365 {
4366 if (debug_threads)
4367 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4368 " single-stepping\n",
4369 lwpid_of (thread));
4370
4371 if (can_hardware_single_step ())
4372 step = 1;
4373 else
4374 {
4375 internal_error (__FILE__, __LINE__,
4376 "moving out of jump pad single-stepping"
4377 " not implemented on this target");
4378 }
4379 }
4380
4381 /* If we have while-stepping actions in this thread set it stepping.
4382 If we have a signal to deliver, it may or may not be set to
4383 SIG_IGN, we don't know. Assume so, and allow collecting
4384 while-stepping into a signal handler. A possible smart thing to
4385 do would be to set an internal breakpoint at the signal return
4386 address, continue, and carry on catching this while-stepping
4387 action only when that breakpoint is hit. A future
4388 enhancement. */
4389 if (thread->while_stepping != NULL)
4390 {
4391 if (debug_threads)
4392 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4393 lwpid_of (thread));
4394
4395 step = single_step (lwp);
4396 }
4397
4398 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4399 {
4400 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4401
4402 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4403
4404 if (debug_threads)
4405 {
4406 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4407 (long) lwp->stop_pc);
4408 }
4409 }
4410
4411 /* If we have pending signals, consume one if it can be delivered to
4412 the inferior. */
4413 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4414 {
4415 struct pending_signals **p_sig;
4416
4417 p_sig = &lwp->pending_signals;
4418 while ((*p_sig)->prev != NULL)
4419 p_sig = &(*p_sig)->prev;
4420
4421 signal = (*p_sig)->signal;
4422 if ((*p_sig)->info.si_signo != 0)
4423 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4424 &(*p_sig)->info);
4425
4426 free (*p_sig);
4427 *p_sig = NULL;
4428 }
4429
4430 if (debug_threads)
4431 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4432 lwpid_of (thread), step ? "step" : "continue", signal,
4433 lwp->stop_expected ? "expected" : "not expected");
4434
4435 if (the_low_target.prepare_to_resume != NULL)
4436 the_low_target.prepare_to_resume (lwp);
4437
4438 regcache_invalidate_thread (thread);
4439 errno = 0;
4440 lwp->stepping = step;
4441 if (step)
4442 ptrace_request = PTRACE_SINGLESTEP;
4443 else if (gdb_catching_syscalls_p (lwp))
4444 ptrace_request = PTRACE_SYSCALL;
4445 else
4446 ptrace_request = PTRACE_CONT;
4447 ptrace (ptrace_request,
4448 lwpid_of (thread),
4449 (PTRACE_TYPE_ARG3) 0,
4450 /* Coerce to a uintptr_t first to avoid potential gcc warning
4451 of coercing an 8 byte integer to a 4 byte pointer. */
4452 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4453
4454 current_thread = saved_thread;
4455 if (errno)
4456 perror_with_name ("resuming thread");
4457
4458 /* Successfully resumed. Clear state that no longer makes sense,
4459 and mark the LWP as running. Must not do this before resuming
4460 otherwise if that fails other code will be confused. E.g., we'd
4461 later try to stop the LWP and hang forever waiting for a stop
4462 status. Note that we must not throw after this is cleared,
4463 otherwise handle_zombie_lwp_error would get confused. */
4464 lwp->stopped = 0;
4465 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4466 }
4467
4468 /* Called when we try to resume a stopped LWP and that errors out. If
4469 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4470 or about to become), discard the error, clear any pending status
4471 the LWP may have, and return true (we'll collect the exit status
4472 soon enough). Otherwise, return false. */
4473
4474 static int
4475 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4476 {
4477 struct thread_info *thread = get_lwp_thread (lp);
4478
4479 /* If we get an error after resuming the LWP successfully, we'd
4480 confuse !T state for the LWP being gone. */
4481 gdb_assert (lp->stopped);
4482
4483 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4484 because even if ptrace failed with ESRCH, the tracee may be "not
4485 yet fully dead", but already refusing ptrace requests. In that
4486 case the tracee has 'R (Running)' state for a little bit
4487 (observed in Linux 3.18). See also the note on ESRCH in the
4488 ptrace(2) man page. Instead, check whether the LWP has any state
4489 other than ptrace-stopped. */
4490
4491 /* Don't assume anything if /proc/PID/status can't be read. */
4492 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4493 {
4494 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4495 lp->status_pending_p = 0;
4496 return 1;
4497 }
4498 return 0;
4499 }
4500
4501 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4502 disappears while we try to resume it. */
4503
4504 static void
4505 linux_resume_one_lwp (struct lwp_info *lwp,
4506 int step, int signal, siginfo_t *info)
4507 {
4508 TRY
4509 {
4510 linux_resume_one_lwp_throw (lwp, step, signal, info);
4511 }
4512 CATCH (ex, RETURN_MASK_ERROR)
4513 {
4514 if (!check_ptrace_stopped_lwp_gone (lwp))
4515 throw_exception (ex);
4516 }
4517 END_CATCH
4518 }
4519
4520 /* This function is called once per thread via for_each_thread.
4521 We look up which resume request applies to THREAD and mark it with a
4522 pointer to the appropriate resume request.
4523
4524 This algorithm is O(threads * resume elements), but resume elements
4525 is small (and will remain small at least until GDB supports thread
4526 suspension). */
4527
4528 static void
4529 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4530 {
4531 struct lwp_info *lwp = get_thread_lwp (thread);
4532
4533 for (int ndx = 0; ndx < n; ndx++)
4534 {
4535 ptid_t ptid = resume[ndx].thread;
4536 if (ptid == minus_one_ptid
4537 || ptid == thread->id
4538 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4539 of PID'. */
4540 || (ptid.pid () == pid_of (thread)
4541 && (ptid.is_pid ()
4542 || ptid.lwp () == -1)))
4543 {
4544 if (resume[ndx].kind == resume_stop
4545 && thread->last_resume_kind == resume_stop)
4546 {
4547 if (debug_threads)
4548 debug_printf ("already %s LWP %ld at GDB's request\n",
4549 (thread->last_status.kind
4550 == TARGET_WAITKIND_STOPPED)
4551 ? "stopped"
4552 : "stopping",
4553 lwpid_of (thread));
4554
4555 continue;
4556 }
4557
4558 /* Ignore (wildcard) resume requests for already-resumed
4559 threads. */
4560 if (resume[ndx].kind != resume_stop
4561 && thread->last_resume_kind != resume_stop)
4562 {
4563 if (debug_threads)
4564 debug_printf ("already %s LWP %ld at GDB's request\n",
4565 (thread->last_resume_kind
4566 == resume_step)
4567 ? "stepping"
4568 : "continuing",
4569 lwpid_of (thread));
4570 continue;
4571 }
4572
4573 /* Don't let wildcard resumes resume fork children that GDB
4574 does not yet know are new fork children. */
4575 if (lwp->fork_relative != NULL)
4576 {
4577 struct lwp_info *rel = lwp->fork_relative;
4578
4579 if (rel->status_pending_p
4580 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4581 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4582 {
4583 if (debug_threads)
4584 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4585 lwpid_of (thread));
4586 continue;
4587 }
4588 }
4589
4590 /* If the thread has a pending event that has already been
4591 reported to GDBserver core, but GDB has not pulled the
4592 event out of the vStopped queue yet, likewise, ignore the
4593 (wildcard) resume request. */
4594 if (in_queued_stop_replies (thread->id))
4595 {
4596 if (debug_threads)
4597 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4598 lwpid_of (thread));
4599 continue;
4600 }
4601
4602 lwp->resume = &resume[ndx];
4603 thread->last_resume_kind = lwp->resume->kind;
4604
4605 lwp->step_range_start = lwp->resume->step_range_start;
4606 lwp->step_range_end = lwp->resume->step_range_end;
4607
4608 /* If we had a deferred signal to report, dequeue one now.
4609 This can happen if LWP gets more than one signal while
4610 trying to get out of a jump pad. */
4611 if (lwp->stopped
4612 && !lwp->status_pending_p
4613 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4614 {
4615 lwp->status_pending_p = 1;
4616
4617 if (debug_threads)
4618 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4619 "leaving status pending.\n",
4620 WSTOPSIG (lwp->status_pending),
4621 lwpid_of (thread));
4622 }
4623
4624 return;
4625 }
4626 }
4627
4628 /* No resume action for this thread. */
4629 lwp->resume = NULL;
4630 }
4631
4632 /* find_thread callback for linux_resume. Return true if this lwp has an
4633 interesting status pending. */
4634
4635 static bool
4636 resume_status_pending_p (thread_info *thread)
4637 {
4638 struct lwp_info *lwp = get_thread_lwp (thread);
4639
4640 /* LWPs which will not be resumed are not interesting, because
4641 we might not wait for them next time through linux_wait. */
4642 if (lwp->resume == NULL)
4643 return false;
4644
4645 return thread_still_has_status_pending_p (thread);
4646 }
4647
4648 /* Return 1 if this lwp that GDB wants running is stopped at an
4649 internal breakpoint that we need to step over. It assumes that any
4650 required STOP_PC adjustment has already been propagated to the
4651 inferior's regcache. */
4652
4653 static bool
4654 need_step_over_p (thread_info *thread)
4655 {
4656 struct lwp_info *lwp = get_thread_lwp (thread);
4657 struct thread_info *saved_thread;
4658 CORE_ADDR pc;
4659 struct process_info *proc = get_thread_process (thread);
4660
4661 /* GDBserver is skipping the extra traps from the wrapper program,
4662 don't have to do step over. */
4663 if (proc->tdesc == NULL)
4664 return false;
4665
4666 /* LWPs which will not be resumed are not interesting, because we
4667 might not wait for them next time through linux_wait. */
4668
4669 if (!lwp->stopped)
4670 {
4671 if (debug_threads)
4672 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4673 lwpid_of (thread));
4674 return false;
4675 }
4676
4677 if (thread->last_resume_kind == resume_stop)
4678 {
4679 if (debug_threads)
4680 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4681 " stopped\n",
4682 lwpid_of (thread));
4683 return false;
4684 }
4685
4686 gdb_assert (lwp->suspended >= 0);
4687
4688 if (lwp->suspended)
4689 {
4690 if (debug_threads)
4691 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4692 lwpid_of (thread));
4693 return false;
4694 }
4695
4696 if (lwp->status_pending_p)
4697 {
4698 if (debug_threads)
4699 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4700 " status.\n",
4701 lwpid_of (thread));
4702 return false;
4703 }
4704
4705 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4706 or we have. */
4707 pc = get_pc (lwp);
4708
4709 /* If the PC has changed since we stopped, then don't do anything,
4710 and let the breakpoint/tracepoint be hit. This happens if, for
4711 instance, GDB handled the decr_pc_after_break subtraction itself,
4712 GDB is OOL stepping this thread, or the user has issued a "jump"
4713 command, or poked thread's registers herself. */
4714 if (pc != lwp->stop_pc)
4715 {
4716 if (debug_threads)
4717 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4718 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4719 lwpid_of (thread),
4720 paddress (lwp->stop_pc), paddress (pc));
4721 return false;
4722 }
4723
4724 /* On software single step target, resume the inferior with signal
4725 rather than stepping over. */
4726 if (can_software_single_step ()
4727 && lwp->pending_signals != NULL
4728 && lwp_signal_can_be_delivered (lwp))
4729 {
4730 if (debug_threads)
4731 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4732 " signals.\n",
4733 lwpid_of (thread));
4734
4735 return false;
4736 }
4737
4738 saved_thread = current_thread;
4739 current_thread = thread;
4740
4741 /* We can only step over breakpoints we know about. */
4742 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4743 {
4744 /* Don't step over a breakpoint that GDB expects to hit
4745 though. If the condition is being evaluated on the target's side
4746 and it evaluate to false, step over this breakpoint as well. */
4747 if (gdb_breakpoint_here (pc)
4748 && gdb_condition_true_at_breakpoint (pc)
4749 && gdb_no_commands_at_breakpoint (pc))
4750 {
4751 if (debug_threads)
4752 debug_printf ("Need step over [LWP %ld]? yes, but found"
4753 " GDB breakpoint at 0x%s; skipping step over\n",
4754 lwpid_of (thread), paddress (pc));
4755
4756 current_thread = saved_thread;
4757 return false;
4758 }
4759 else
4760 {
4761 if (debug_threads)
4762 debug_printf ("Need step over [LWP %ld]? yes, "
4763 "found breakpoint at 0x%s\n",
4764 lwpid_of (thread), paddress (pc));
4765
4766 /* We've found an lwp that needs stepping over --- return 1 so
4767 that find_thread stops looking. */
4768 current_thread = saved_thread;
4769
4770 return true;
4771 }
4772 }
4773
4774 current_thread = saved_thread;
4775
4776 if (debug_threads)
4777 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4778 " at 0x%s\n",
4779 lwpid_of (thread), paddress (pc));
4780
4781 return false;
4782 }
4783
4784 /* Start a step-over operation on LWP. When LWP stopped at a
4785 breakpoint, to make progress, we need to remove the breakpoint out
4786 of the way. If we let other threads run while we do that, they may
4787 pass by the breakpoint location and miss hitting it. To avoid
4788 that, a step-over momentarily stops all threads while LWP is
4789 single-stepped by either hardware or software while the breakpoint
4790 is temporarily uninserted from the inferior. When the single-step
4791 finishes, we reinsert the breakpoint, and let all threads that are
4792 supposed to be running, run again. */
4793
4794 static int
4795 start_step_over (struct lwp_info *lwp)
4796 {
4797 struct thread_info *thread = get_lwp_thread (lwp);
4798 struct thread_info *saved_thread;
4799 CORE_ADDR pc;
4800 int step;
4801
4802 if (debug_threads)
4803 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4804 lwpid_of (thread));
4805
4806 stop_all_lwps (1, lwp);
4807
4808 if (lwp->suspended != 0)
4809 {
4810 internal_error (__FILE__, __LINE__,
4811 "LWP %ld suspended=%d\n", lwpid_of (thread),
4812 lwp->suspended);
4813 }
4814
4815 if (debug_threads)
4816 debug_printf ("Done stopping all threads for step-over.\n");
4817
4818 /* Note, we should always reach here with an already adjusted PC,
4819 either by GDB (if we're resuming due to GDB's request), or by our
4820 caller, if we just finished handling an internal breakpoint GDB
4821 shouldn't care about. */
4822 pc = get_pc (lwp);
4823
4824 saved_thread = current_thread;
4825 current_thread = thread;
4826
4827 lwp->bp_reinsert = pc;
4828 uninsert_breakpoints_at (pc);
4829 uninsert_fast_tracepoint_jumps_at (pc);
4830
4831 step = single_step (lwp);
4832
4833 current_thread = saved_thread;
4834
4835 linux_resume_one_lwp (lwp, step, 0, NULL);
4836
4837 /* Require next event from this LWP. */
4838 step_over_bkpt = thread->id;
4839 return 1;
4840 }
4841
4842 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4843 start_step_over, if still there, and delete any single-step
4844 breakpoints we've set, on non hardware single-step targets. */
4845
4846 static int
4847 finish_step_over (struct lwp_info *lwp)
4848 {
4849 if (lwp->bp_reinsert != 0)
4850 {
4851 struct thread_info *saved_thread = current_thread;
4852
4853 if (debug_threads)
4854 debug_printf ("Finished step over.\n");
4855
4856 current_thread = get_lwp_thread (lwp);
4857
4858 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4859 may be no breakpoint to reinsert there by now. */
4860 reinsert_breakpoints_at (lwp->bp_reinsert);
4861 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4862
4863 lwp->bp_reinsert = 0;
4864
4865 /* Delete any single-step breakpoints. No longer needed. We
4866 don't have to worry about other threads hitting this trap,
4867 and later not being able to explain it, because we were
4868 stepping over a breakpoint, and we hold all threads but
4869 LWP stopped while doing that. */
4870 if (!can_hardware_single_step ())
4871 {
4872 gdb_assert (has_single_step_breakpoints (current_thread));
4873 delete_single_step_breakpoints (current_thread);
4874 }
4875
4876 step_over_bkpt = null_ptid;
4877 current_thread = saved_thread;
4878 return 1;
4879 }
4880 else
4881 return 0;
4882 }
4883
4884 /* If there's a step over in progress, wait until all threads stop
4885 (that is, until the stepping thread finishes its step), and
4886 unsuspend all lwps. The stepping thread ends with its status
4887 pending, which is processed later when we get back to processing
4888 events. */
4889
4890 static void
4891 complete_ongoing_step_over (void)
4892 {
4893 if (step_over_bkpt != null_ptid)
4894 {
4895 struct lwp_info *lwp;
4896 int wstat;
4897 int ret;
4898
4899 if (debug_threads)
4900 debug_printf ("detach: step over in progress, finish it first\n");
4901
4902 /* Passing NULL_PTID as filter indicates we want all events to
4903 be left pending. Eventually this returns when there are no
4904 unwaited-for children left. */
4905 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4906 &wstat, __WALL);
4907 gdb_assert (ret == -1);
4908
4909 lwp = find_lwp_pid (step_over_bkpt);
4910 if (lwp != NULL)
4911 finish_step_over (lwp);
4912 step_over_bkpt = null_ptid;
4913 unsuspend_all_lwps (lwp);
4914 }
4915 }
4916
4917 /* This function is called once per thread. We check the thread's resume
4918 request, which will tell us whether to resume, step, or leave the thread
4919 stopped; and what signal, if any, it should be sent.
4920
4921 For threads which we aren't explicitly told otherwise, we preserve
4922 the stepping flag; this is used for stepping over gdbserver-placed
4923 breakpoints.
4924
4925 If pending_flags was set in any thread, we queue any needed
4926 signals, since we won't actually resume. We already have a pending
4927 event to report, so we don't need to preserve any step requests;
4928 they should be re-issued if necessary. */
4929
4930 static void
4931 linux_resume_one_thread (thread_info *thread, bool leave_all_stopped)
4932 {
4933 struct lwp_info *lwp = get_thread_lwp (thread);
4934 int leave_pending;
4935
4936 if (lwp->resume == NULL)
4937 return;
4938
4939 if (lwp->resume->kind == resume_stop)
4940 {
4941 if (debug_threads)
4942 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4943
4944 if (!lwp->stopped)
4945 {
4946 if (debug_threads)
4947 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4948
4949 /* Stop the thread, and wait for the event asynchronously,
4950 through the event loop. */
4951 send_sigstop (lwp);
4952 }
4953 else
4954 {
4955 if (debug_threads)
4956 debug_printf ("already stopped LWP %ld\n",
4957 lwpid_of (thread));
4958
4959 /* The LWP may have been stopped in an internal event that
4960 was not meant to be notified back to GDB (e.g., gdbserver
4961 breakpoint), so we should be reporting a stop event in
4962 this case too. */
4963
4964 /* If the thread already has a pending SIGSTOP, this is a
4965 no-op. Otherwise, something later will presumably resume
4966 the thread and this will cause it to cancel any pending
4967 operation, due to last_resume_kind == resume_stop. If
4968 the thread already has a pending status to report, we
4969 will still report it the next time we wait - see
4970 status_pending_p_callback. */
4971
4972 /* If we already have a pending signal to report, then
4973 there's no need to queue a SIGSTOP, as this means we're
4974 midway through moving the LWP out of the jumppad, and we
4975 will report the pending signal as soon as that is
4976 finished. */
4977 if (lwp->pending_signals_to_report == NULL)
4978 send_sigstop (lwp);
4979 }
4980
4981 /* For stop requests, we're done. */
4982 lwp->resume = NULL;
4983 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4984 return;
4985 }
4986
4987 /* If this thread which is about to be resumed has a pending status,
4988 then don't resume it - we can just report the pending status.
4989 Likewise if it is suspended, because e.g., another thread is
4990 stepping past a breakpoint. Make sure to queue any signals that
4991 would otherwise be sent. In all-stop mode, we do this decision
4992 based on if *any* thread has a pending status. If there's a
4993 thread that needs the step-over-breakpoint dance, then don't
4994 resume any other thread but that particular one. */
4995 leave_pending = (lwp->suspended
4996 || lwp->status_pending_p
4997 || leave_all_stopped);
4998
4999 /* If we have a new signal, enqueue the signal. */
5000 if (lwp->resume->sig != 0)
5001 {
5002 siginfo_t info, *info_p;
5003
5004 /* If this is the same signal we were previously stopped by,
5005 make sure to queue its siginfo. */
5006 if (WIFSTOPPED (lwp->last_status)
5007 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
5008 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
5009 (PTRACE_TYPE_ARG3) 0, &info) == 0)
5010 info_p = &info;
5011 else
5012 info_p = NULL;
5013
5014 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
5015 }
5016
5017 if (!leave_pending)
5018 {
5019 if (debug_threads)
5020 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5021
5022 proceed_one_lwp (thread, NULL);
5023 }
5024 else
5025 {
5026 if (debug_threads)
5027 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5028 }
5029
5030 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5031 lwp->resume = NULL;
5032 }
5033
5034 static void
5035 linux_resume (struct thread_resume *resume_info, size_t n)
5036 {
5037 struct thread_info *need_step_over = NULL;
5038
5039 if (debug_threads)
5040 {
5041 debug_enter ();
5042 debug_printf ("linux_resume:\n");
5043 }
5044
5045 for_each_thread ([&] (thread_info *thread)
5046 {
5047 linux_set_resume_request (thread, resume_info, n);
5048 });
5049
5050 /* If there is a thread which would otherwise be resumed, which has
5051 a pending status, then don't resume any threads - we can just
5052 report the pending status. Make sure to queue any signals that
5053 would otherwise be sent. In non-stop mode, we'll apply this
5054 logic to each thread individually. We consume all pending events
5055 before considering to start a step-over (in all-stop). */
5056 bool any_pending = false;
5057 if (!non_stop)
5058 any_pending = find_thread (resume_status_pending_p) != NULL;
5059
5060 /* If there is a thread which would otherwise be resumed, which is
5061 stopped at a breakpoint that needs stepping over, then don't
5062 resume any threads - have it step over the breakpoint with all
5063 other threads stopped, then resume all threads again. Make sure
5064 to queue any signals that would otherwise be delivered or
5065 queued. */
5066 if (!any_pending && supports_breakpoints ())
5067 need_step_over = find_thread (need_step_over_p);
5068
5069 bool leave_all_stopped = (need_step_over != NULL || any_pending);
5070
5071 if (debug_threads)
5072 {
5073 if (need_step_over != NULL)
5074 debug_printf ("Not resuming all, need step over\n");
5075 else if (any_pending)
5076 debug_printf ("Not resuming, all-stop and found "
5077 "an LWP with pending status\n");
5078 else
5079 debug_printf ("Resuming, no pending status or step over needed\n");
5080 }
5081
5082 /* Even if we're leaving threads stopped, queue all signals we'd
5083 otherwise deliver. */
5084 for_each_thread ([&] (thread_info *thread)
5085 {
5086 linux_resume_one_thread (thread, leave_all_stopped);
5087 });
5088
5089 if (need_step_over)
5090 start_step_over (get_thread_lwp (need_step_over));
5091
5092 if (debug_threads)
5093 {
5094 debug_printf ("linux_resume done\n");
5095 debug_exit ();
5096 }
5097
5098 /* We may have events that were pending that can/should be sent to
5099 the client now. Trigger a linux_wait call. */
5100 if (target_is_async_p ())
5101 async_file_mark ();
5102 }
5103
5104 /* This function is called once per thread. We check the thread's
5105 last resume request, which will tell us whether to resume, step, or
5106 leave the thread stopped. Any signal the client requested to be
5107 delivered has already been enqueued at this point.
5108
5109 If any thread that GDB wants running is stopped at an internal
5110 breakpoint that needs stepping over, we start a step-over operation
5111 on that particular thread, and leave all others stopped. */
5112
5113 static void
5114 proceed_one_lwp (thread_info *thread, lwp_info *except)
5115 {
5116 struct lwp_info *lwp = get_thread_lwp (thread);
5117 int step;
5118
5119 if (lwp == except)
5120 return;
5121
5122 if (debug_threads)
5123 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5124
5125 if (!lwp->stopped)
5126 {
5127 if (debug_threads)
5128 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5129 return;
5130 }
5131
5132 if (thread->last_resume_kind == resume_stop
5133 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5134 {
5135 if (debug_threads)
5136 debug_printf (" client wants LWP to remain %ld stopped\n",
5137 lwpid_of (thread));
5138 return;
5139 }
5140
5141 if (lwp->status_pending_p)
5142 {
5143 if (debug_threads)
5144 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5145 lwpid_of (thread));
5146 return;
5147 }
5148
5149 gdb_assert (lwp->suspended >= 0);
5150
5151 if (lwp->suspended)
5152 {
5153 if (debug_threads)
5154 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5155 return;
5156 }
5157
5158 if (thread->last_resume_kind == resume_stop
5159 && lwp->pending_signals_to_report == NULL
5160 && (lwp->collecting_fast_tracepoint
5161 == fast_tpoint_collect_result::not_collecting))
5162 {
5163 /* We haven't reported this LWP as stopped yet (otherwise, the
5164 last_status.kind check above would catch it, and we wouldn't
5165 reach here. This LWP may have been momentarily paused by a
5166 stop_all_lwps call while handling for example, another LWP's
5167 step-over. In that case, the pending expected SIGSTOP signal
5168 that was queued at vCont;t handling time will have already
5169 been consumed by wait_for_sigstop, and so we need to requeue
5170 another one here. Note that if the LWP already has a SIGSTOP
5171 pending, this is a no-op. */
5172
5173 if (debug_threads)
5174 debug_printf ("Client wants LWP %ld to stop. "
5175 "Making sure it has a SIGSTOP pending\n",
5176 lwpid_of (thread));
5177
5178 send_sigstop (lwp);
5179 }
5180
5181 if (thread->last_resume_kind == resume_step)
5182 {
5183 if (debug_threads)
5184 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5185 lwpid_of (thread));
5186
5187 /* If resume_step is requested by GDB, install single-step
5188 breakpoints when the thread is about to be actually resumed if
5189 the single-step breakpoints weren't removed. */
5190 if (can_software_single_step ()
5191 && !has_single_step_breakpoints (thread))
5192 install_software_single_step_breakpoints (lwp);
5193
5194 step = maybe_hw_step (thread);
5195 }
5196 else if (lwp->bp_reinsert != 0)
5197 {
5198 if (debug_threads)
5199 debug_printf (" stepping LWP %ld, reinsert set\n",
5200 lwpid_of (thread));
5201
5202 step = maybe_hw_step (thread);
5203 }
5204 else
5205 step = 0;
5206
5207 linux_resume_one_lwp (lwp, step, 0, NULL);
5208 }
5209
5210 static void
5211 unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except)
5212 {
5213 struct lwp_info *lwp = get_thread_lwp (thread);
5214
5215 if (lwp == except)
5216 return;
5217
5218 lwp_suspended_decr (lwp);
5219
5220 proceed_one_lwp (thread, except);
5221 }
5222
5223 /* When we finish a step-over, set threads running again. If there's
5224 another thread that may need a step-over, now's the time to start
5225 it. Eventually, we'll move all threads past their breakpoints. */
5226
5227 static void
5228 proceed_all_lwps (void)
5229 {
5230 struct thread_info *need_step_over;
5231
5232 /* If there is a thread which would otherwise be resumed, which is
5233 stopped at a breakpoint that needs stepping over, then don't
5234 resume any threads - have it step over the breakpoint with all
5235 other threads stopped, then resume all threads again. */
5236
5237 if (supports_breakpoints ())
5238 {
5239 need_step_over = find_thread (need_step_over_p);
5240
5241 if (need_step_over != NULL)
5242 {
5243 if (debug_threads)
5244 debug_printf ("proceed_all_lwps: found "
5245 "thread %ld needing a step-over\n",
5246 lwpid_of (need_step_over));
5247
5248 start_step_over (get_thread_lwp (need_step_over));
5249 return;
5250 }
5251 }
5252
5253 if (debug_threads)
5254 debug_printf ("Proceeding, no step-over needed\n");
5255
5256 for_each_thread ([] (thread_info *thread)
5257 {
5258 proceed_one_lwp (thread, NULL);
5259 });
5260 }
5261
5262 /* Stopped LWPs that the client wanted to be running, that don't have
5263 pending statuses, are set to run again, except for EXCEPT, if not
5264 NULL. This undoes a stop_all_lwps call. */
5265
5266 static void
5267 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5268 {
5269 if (debug_threads)
5270 {
5271 debug_enter ();
5272 if (except)
5273 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5274 lwpid_of (get_lwp_thread (except)));
5275 else
5276 debug_printf ("unstopping all lwps\n");
5277 }
5278
5279 if (unsuspend)
5280 for_each_thread ([&] (thread_info *thread)
5281 {
5282 unsuspend_and_proceed_one_lwp (thread, except);
5283 });
5284 else
5285 for_each_thread ([&] (thread_info *thread)
5286 {
5287 proceed_one_lwp (thread, except);
5288 });
5289
5290 if (debug_threads)
5291 {
5292 debug_printf ("unstop_all_lwps done\n");
5293 debug_exit ();
5294 }
5295 }
5296
5297
5298 #ifdef HAVE_LINUX_REGSETS
5299
5300 #define use_linux_regsets 1
5301
5302 /* Returns true if REGSET has been disabled. */
5303
5304 static int
5305 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5306 {
5307 return (info->disabled_regsets != NULL
5308 && info->disabled_regsets[regset - info->regsets]);
5309 }
5310
5311 /* Disable REGSET. */
5312
5313 static void
5314 disable_regset (struct regsets_info *info, struct regset_info *regset)
5315 {
5316 int dr_offset;
5317
5318 dr_offset = regset - info->regsets;
5319 if (info->disabled_regsets == NULL)
5320 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5321 info->disabled_regsets[dr_offset] = 1;
5322 }
5323
5324 static int
5325 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5326 struct regcache *regcache)
5327 {
5328 struct regset_info *regset;
5329 int saw_general_regs = 0;
5330 int pid;
5331 struct iovec iov;
5332
5333 pid = lwpid_of (current_thread);
5334 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5335 {
5336 void *buf, *data;
5337 int nt_type, res;
5338
5339 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5340 continue;
5341
5342 buf = xmalloc (regset->size);
5343
5344 nt_type = regset->nt_type;
5345 if (nt_type)
5346 {
5347 iov.iov_base = buf;
5348 iov.iov_len = regset->size;
5349 data = (void *) &iov;
5350 }
5351 else
5352 data = buf;
5353
5354 #ifndef __sparc__
5355 res = ptrace (regset->get_request, pid,
5356 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5357 #else
5358 res = ptrace (regset->get_request, pid, data, nt_type);
5359 #endif
5360 if (res < 0)
5361 {
5362 if (errno == EIO)
5363 {
5364 /* If we get EIO on a regset, do not try it again for
5365 this process mode. */
5366 disable_regset (regsets_info, regset);
5367 }
5368 else if (errno == ENODATA)
5369 {
5370 /* ENODATA may be returned if the regset is currently
5371 not "active". This can happen in normal operation,
5372 so suppress the warning in this case. */
5373 }
5374 else if (errno == ESRCH)
5375 {
5376 /* At this point, ESRCH should mean the process is
5377 already gone, in which case we simply ignore attempts
5378 to read its registers. */
5379 }
5380 else
5381 {
5382 char s[256];
5383 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5384 pid);
5385 perror (s);
5386 }
5387 }
5388 else
5389 {
5390 if (regset->type == GENERAL_REGS)
5391 saw_general_regs = 1;
5392 regset->store_function (regcache, buf);
5393 }
5394 free (buf);
5395 }
5396 if (saw_general_regs)
5397 return 0;
5398 else
5399 return 1;
5400 }
5401
5402 static int
5403 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5404 struct regcache *regcache)
5405 {
5406 struct regset_info *regset;
5407 int saw_general_regs = 0;
5408 int pid;
5409 struct iovec iov;
5410
5411 pid = lwpid_of (current_thread);
5412 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5413 {
5414 void *buf, *data;
5415 int nt_type, res;
5416
5417 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5418 || regset->fill_function == NULL)
5419 continue;
5420
5421 buf = xmalloc (regset->size);
5422
5423 /* First fill the buffer with the current register set contents,
5424 in case there are any items in the kernel's regset that are
5425 not in gdbserver's regcache. */
5426
5427 nt_type = regset->nt_type;
5428 if (nt_type)
5429 {
5430 iov.iov_base = buf;
5431 iov.iov_len = regset->size;
5432 data = (void *) &iov;
5433 }
5434 else
5435 data = buf;
5436
5437 #ifndef __sparc__
5438 res = ptrace (regset->get_request, pid,
5439 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5440 #else
5441 res = ptrace (regset->get_request, pid, data, nt_type);
5442 #endif
5443
5444 if (res == 0)
5445 {
5446 /* Then overlay our cached registers on that. */
5447 regset->fill_function (regcache, buf);
5448
5449 /* Only now do we write the register set. */
5450 #ifndef __sparc__
5451 res = ptrace (regset->set_request, pid,
5452 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5453 #else
5454 res = ptrace (regset->set_request, pid, data, nt_type);
5455 #endif
5456 }
5457
5458 if (res < 0)
5459 {
5460 if (errno == EIO)
5461 {
5462 /* If we get EIO on a regset, do not try it again for
5463 this process mode. */
5464 disable_regset (regsets_info, regset);
5465 }
5466 else if (errno == ESRCH)
5467 {
5468 /* At this point, ESRCH should mean the process is
5469 already gone, in which case we simply ignore attempts
5470 to change its registers. See also the related
5471 comment in linux_resume_one_lwp. */
5472 free (buf);
5473 return 0;
5474 }
5475 else
5476 {
5477 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5478 }
5479 }
5480 else if (regset->type == GENERAL_REGS)
5481 saw_general_regs = 1;
5482 free (buf);
5483 }
5484 if (saw_general_regs)
5485 return 0;
5486 else
5487 return 1;
5488 }
5489
5490 #else /* !HAVE_LINUX_REGSETS */
5491
5492 #define use_linux_regsets 0
5493 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5494 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5495
5496 #endif
5497
5498 /* Return 1 if register REGNO is supported by one of the regset ptrace
5499 calls or 0 if it has to be transferred individually. */
5500
5501 static int
5502 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5503 {
5504 unsigned char mask = 1 << (regno % 8);
5505 size_t index = regno / 8;
5506
5507 return (use_linux_regsets
5508 && (regs_info->regset_bitmap == NULL
5509 || (regs_info->regset_bitmap[index] & mask) != 0));
5510 }
5511
5512 #ifdef HAVE_LINUX_USRREGS
5513
5514 static int
5515 register_addr (const struct usrregs_info *usrregs, int regnum)
5516 {
5517 int addr;
5518
5519 if (regnum < 0 || regnum >= usrregs->num_regs)
5520 error ("Invalid register number %d.", regnum);
5521
5522 addr = usrregs->regmap[regnum];
5523
5524 return addr;
5525 }
5526
5527 /* Fetch one register. */
5528 static void
5529 fetch_register (const struct usrregs_info *usrregs,
5530 struct regcache *regcache, int regno)
5531 {
5532 CORE_ADDR regaddr;
5533 int i, size;
5534 char *buf;
5535 int pid;
5536
5537 if (regno >= usrregs->num_regs)
5538 return;
5539 if ((*the_low_target.cannot_fetch_register) (regno))
5540 return;
5541
5542 regaddr = register_addr (usrregs, regno);
5543 if (regaddr == -1)
5544 return;
5545
5546 size = ((register_size (regcache->tdesc, regno)
5547 + sizeof (PTRACE_XFER_TYPE) - 1)
5548 & -sizeof (PTRACE_XFER_TYPE));
5549 buf = (char *) alloca (size);
5550
5551 pid = lwpid_of (current_thread);
5552 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5553 {
5554 errno = 0;
5555 *(PTRACE_XFER_TYPE *) (buf + i) =
5556 ptrace (PTRACE_PEEKUSER, pid,
5557 /* Coerce to a uintptr_t first to avoid potential gcc warning
5558 of coercing an 8 byte integer to a 4 byte pointer. */
5559 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5560 regaddr += sizeof (PTRACE_XFER_TYPE);
5561 if (errno != 0)
5562 {
5563 /* Mark register REGNO unavailable. */
5564 supply_register (regcache, regno, NULL);
5565 return;
5566 }
5567 }
5568
5569 if (the_low_target.supply_ptrace_register)
5570 the_low_target.supply_ptrace_register (regcache, regno, buf);
5571 else
5572 supply_register (regcache, regno, buf);
5573 }
5574
5575 /* Store one register. */
5576 static void
5577 store_register (const struct usrregs_info *usrregs,
5578 struct regcache *regcache, int regno)
5579 {
5580 CORE_ADDR regaddr;
5581 int i, size;
5582 char *buf;
5583 int pid;
5584
5585 if (regno >= usrregs->num_regs)
5586 return;
5587 if ((*the_low_target.cannot_store_register) (regno))
5588 return;
5589
5590 regaddr = register_addr (usrregs, regno);
5591 if (regaddr == -1)
5592 return;
5593
5594 size = ((register_size (regcache->tdesc, regno)
5595 + sizeof (PTRACE_XFER_TYPE) - 1)
5596 & -sizeof (PTRACE_XFER_TYPE));
5597 buf = (char *) alloca (size);
5598 memset (buf, 0, size);
5599
5600 if (the_low_target.collect_ptrace_register)
5601 the_low_target.collect_ptrace_register (regcache, regno, buf);
5602 else
5603 collect_register (regcache, regno, buf);
5604
5605 pid = lwpid_of (current_thread);
5606 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5607 {
5608 errno = 0;
5609 ptrace (PTRACE_POKEUSER, pid,
5610 /* Coerce to a uintptr_t first to avoid potential gcc warning
5611 about coercing an 8 byte integer to a 4 byte pointer. */
5612 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5613 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5614 if (errno != 0)
5615 {
5616 /* At this point, ESRCH should mean the process is
5617 already gone, in which case we simply ignore attempts
5618 to change its registers. See also the related
5619 comment in linux_resume_one_lwp. */
5620 if (errno == ESRCH)
5621 return;
5622
5623 if ((*the_low_target.cannot_store_register) (regno) == 0)
5624 error ("writing register %d: %s", regno, strerror (errno));
5625 }
5626 regaddr += sizeof (PTRACE_XFER_TYPE);
5627 }
5628 }
5629
5630 /* Fetch all registers, or just one, from the child process.
5631 If REGNO is -1, do this for all registers, skipping any that are
5632 assumed to have been retrieved by regsets_fetch_inferior_registers,
5633 unless ALL is non-zero.
5634 Otherwise, REGNO specifies which register (so we can save time). */
5635 static void
5636 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5637 struct regcache *regcache, int regno, int all)
5638 {
5639 struct usrregs_info *usr = regs_info->usrregs;
5640
5641 if (regno == -1)
5642 {
5643 for (regno = 0; regno < usr->num_regs; regno++)
5644 if (all || !linux_register_in_regsets (regs_info, regno))
5645 fetch_register (usr, regcache, regno);
5646 }
5647 else
5648 fetch_register (usr, regcache, regno);
5649 }
5650
5651 /* Store our register values back into the inferior.
5652 If REGNO is -1, do this for all registers, skipping any that are
5653 assumed to have been saved by regsets_store_inferior_registers,
5654 unless ALL is non-zero.
5655 Otherwise, REGNO specifies which register (so we can save time). */
5656 static void
5657 usr_store_inferior_registers (const struct regs_info *regs_info,
5658 struct regcache *regcache, int regno, int all)
5659 {
5660 struct usrregs_info *usr = regs_info->usrregs;
5661
5662 if (regno == -1)
5663 {
5664 for (regno = 0; regno < usr->num_regs; regno++)
5665 if (all || !linux_register_in_regsets (regs_info, regno))
5666 store_register (usr, regcache, regno);
5667 }
5668 else
5669 store_register (usr, regcache, regno);
5670 }
5671
5672 #else /* !HAVE_LINUX_USRREGS */
5673
5674 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5675 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5676
5677 #endif
5678
5679
5680 static void
5681 linux_fetch_registers (struct regcache *regcache, int regno)
5682 {
5683 int use_regsets;
5684 int all = 0;
5685 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5686
5687 if (regno == -1)
5688 {
5689 if (the_low_target.fetch_register != NULL
5690 && regs_info->usrregs != NULL)
5691 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5692 (*the_low_target.fetch_register) (regcache, regno);
5693
5694 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5695 if (regs_info->usrregs != NULL)
5696 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5697 }
5698 else
5699 {
5700 if (the_low_target.fetch_register != NULL
5701 && (*the_low_target.fetch_register) (regcache, regno))
5702 return;
5703
5704 use_regsets = linux_register_in_regsets (regs_info, regno);
5705 if (use_regsets)
5706 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5707 regcache);
5708 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5709 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5710 }
5711 }
5712
5713 static void
5714 linux_store_registers (struct regcache *regcache, int regno)
5715 {
5716 int use_regsets;
5717 int all = 0;
5718 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5719
5720 if (regno == -1)
5721 {
5722 all = regsets_store_inferior_registers (regs_info->regsets_info,
5723 regcache);
5724 if (regs_info->usrregs != NULL)
5725 usr_store_inferior_registers (regs_info, regcache, regno, all);
5726 }
5727 else
5728 {
5729 use_regsets = linux_register_in_regsets (regs_info, regno);
5730 if (use_regsets)
5731 all = regsets_store_inferior_registers (regs_info->regsets_info,
5732 regcache);
5733 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5734 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5735 }
5736 }
5737
5738
5739 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5740 to debugger memory starting at MYADDR. */
5741
5742 static int
5743 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5744 {
5745 int pid = lwpid_of (current_thread);
5746 PTRACE_XFER_TYPE *buffer;
5747 CORE_ADDR addr;
5748 int count;
5749 char filename[64];
5750 int i;
5751 int ret;
5752 int fd;
5753
5754 /* Try using /proc. Don't bother for one word. */
5755 if (len >= 3 * sizeof (long))
5756 {
5757 int bytes;
5758
5759 /* We could keep this file open and cache it - possibly one per
5760 thread. That requires some juggling, but is even faster. */
5761 sprintf (filename, "/proc/%d/mem", pid);
5762 fd = open (filename, O_RDONLY | O_LARGEFILE);
5763 if (fd == -1)
5764 goto no_proc;
5765
5766 /* If pread64 is available, use it. It's faster if the kernel
5767 supports it (only one syscall), and it's 64-bit safe even on
5768 32-bit platforms (for instance, SPARC debugging a SPARC64
5769 application). */
5770 #ifdef HAVE_PREAD64
5771 bytes = pread64 (fd, myaddr, len, memaddr);
5772 #else
5773 bytes = -1;
5774 if (lseek (fd, memaddr, SEEK_SET) != -1)
5775 bytes = read (fd, myaddr, len);
5776 #endif
5777
5778 close (fd);
5779 if (bytes == len)
5780 return 0;
5781
5782 /* Some data was read, we'll try to get the rest with ptrace. */
5783 if (bytes > 0)
5784 {
5785 memaddr += bytes;
5786 myaddr += bytes;
5787 len -= bytes;
5788 }
5789 }
5790
5791 no_proc:
5792 /* Round starting address down to longword boundary. */
5793 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5794 /* Round ending address up; get number of longwords that makes. */
5795 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5796 / sizeof (PTRACE_XFER_TYPE));
5797 /* Allocate buffer of that many longwords. */
5798 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5799
5800 /* Read all the longwords */
5801 errno = 0;
5802 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5803 {
5804 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5805 about coercing an 8 byte integer to a 4 byte pointer. */
5806 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5807 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5808 (PTRACE_TYPE_ARG4) 0);
5809 if (errno)
5810 break;
5811 }
5812 ret = errno;
5813
5814 /* Copy appropriate bytes out of the buffer. */
5815 if (i > 0)
5816 {
5817 i *= sizeof (PTRACE_XFER_TYPE);
5818 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5819 memcpy (myaddr,
5820 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5821 i < len ? i : len);
5822 }
5823
5824 return ret;
5825 }
5826
5827 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5828 memory at MEMADDR. On failure (cannot write to the inferior)
5829 returns the value of errno. Always succeeds if LEN is zero. */
5830
5831 static int
5832 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5833 {
5834 int i;
5835 /* Round starting address down to longword boundary. */
5836 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5837 /* Round ending address up; get number of longwords that makes. */
5838 int count
5839 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5840 / sizeof (PTRACE_XFER_TYPE);
5841
5842 /* Allocate buffer of that many longwords. */
5843 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5844
5845 int pid = lwpid_of (current_thread);
5846
5847 if (len == 0)
5848 {
5849 /* Zero length write always succeeds. */
5850 return 0;
5851 }
5852
5853 if (debug_threads)
5854 {
5855 /* Dump up to four bytes. */
5856 char str[4 * 2 + 1];
5857 char *p = str;
5858 int dump = len < 4 ? len : 4;
5859
5860 for (i = 0; i < dump; i++)
5861 {
5862 sprintf (p, "%02x", myaddr[i]);
5863 p += 2;
5864 }
5865 *p = '\0';
5866
5867 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5868 str, (long) memaddr, pid);
5869 }
5870
5871 /* Fill start and end extra bytes of buffer with existing memory data. */
5872
5873 errno = 0;
5874 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5875 about coercing an 8 byte integer to a 4 byte pointer. */
5876 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5877 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5878 (PTRACE_TYPE_ARG4) 0);
5879 if (errno)
5880 return errno;
5881
5882 if (count > 1)
5883 {
5884 errno = 0;
5885 buffer[count - 1]
5886 = ptrace (PTRACE_PEEKTEXT, pid,
5887 /* Coerce to a uintptr_t first to avoid potential gcc warning
5888 about coercing an 8 byte integer to a 4 byte pointer. */
5889 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5890 * sizeof (PTRACE_XFER_TYPE)),
5891 (PTRACE_TYPE_ARG4) 0);
5892 if (errno)
5893 return errno;
5894 }
5895
5896 /* Copy data to be written over corresponding part of buffer. */
5897
5898 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5899 myaddr, len);
5900
5901 /* Write the entire buffer. */
5902
5903 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5904 {
5905 errno = 0;
5906 ptrace (PTRACE_POKETEXT, pid,
5907 /* Coerce to a uintptr_t first to avoid potential gcc warning
5908 about coercing an 8 byte integer to a 4 byte pointer. */
5909 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5910 (PTRACE_TYPE_ARG4) buffer[i]);
5911 if (errno)
5912 return errno;
5913 }
5914
5915 return 0;
5916 }
5917
5918 static void
5919 linux_look_up_symbols (void)
5920 {
5921 #ifdef USE_THREAD_DB
5922 struct process_info *proc = current_process ();
5923
5924 if (proc->priv->thread_db != NULL)
5925 return;
5926
5927 thread_db_init ();
5928 #endif
5929 }
5930
5931 static void
5932 linux_request_interrupt (void)
5933 {
5934 /* Send a SIGINT to the process group. This acts just like the user
5935 typed a ^C on the controlling terminal. */
5936 kill (-signal_pid, SIGINT);
5937 }
5938
5939 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5940 to debugger memory starting at MYADDR. */
5941
5942 static int
5943 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5944 {
5945 char filename[PATH_MAX];
5946 int fd, n;
5947 int pid = lwpid_of (current_thread);
5948
5949 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5950
5951 fd = open (filename, O_RDONLY);
5952 if (fd < 0)
5953 return -1;
5954
5955 if (offset != (CORE_ADDR) 0
5956 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5957 n = -1;
5958 else
5959 n = read (fd, myaddr, len);
5960
5961 close (fd);
5962
5963 return n;
5964 }
5965
5966 /* These breakpoint and watchpoint related wrapper functions simply
5967 pass on the function call if the target has registered a
5968 corresponding function. */
5969
5970 static int
5971 linux_supports_z_point_type (char z_type)
5972 {
5973 return (the_low_target.supports_z_point_type != NULL
5974 && the_low_target.supports_z_point_type (z_type));
5975 }
5976
5977 static int
5978 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5979 int size, struct raw_breakpoint *bp)
5980 {
5981 if (type == raw_bkpt_type_sw)
5982 return insert_memory_breakpoint (bp);
5983 else if (the_low_target.insert_point != NULL)
5984 return the_low_target.insert_point (type, addr, size, bp);
5985 else
5986 /* Unsupported (see target.h). */
5987 return 1;
5988 }
5989
5990 static int
5991 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5992 int size, struct raw_breakpoint *bp)
5993 {
5994 if (type == raw_bkpt_type_sw)
5995 return remove_memory_breakpoint (bp);
5996 else if (the_low_target.remove_point != NULL)
5997 return the_low_target.remove_point (type, addr, size, bp);
5998 else
5999 /* Unsupported (see target.h). */
6000 return 1;
6001 }
6002
6003 /* Implement the to_stopped_by_sw_breakpoint target_ops
6004 method. */
6005
6006 static int
6007 linux_stopped_by_sw_breakpoint (void)
6008 {
6009 struct lwp_info *lwp = get_thread_lwp (current_thread);
6010
6011 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6012 }
6013
6014 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
6015 method. */
6016
6017 static int
6018 linux_supports_stopped_by_sw_breakpoint (void)
6019 {
6020 return USE_SIGTRAP_SIGINFO;
6021 }
6022
6023 /* Implement the to_stopped_by_hw_breakpoint target_ops
6024 method. */
6025
6026 static int
6027 linux_stopped_by_hw_breakpoint (void)
6028 {
6029 struct lwp_info *lwp = get_thread_lwp (current_thread);
6030
6031 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6032 }
6033
6034 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6035 method. */
6036
6037 static int
6038 linux_supports_stopped_by_hw_breakpoint (void)
6039 {
6040 return USE_SIGTRAP_SIGINFO;
6041 }
6042
6043 /* Implement the supports_hardware_single_step target_ops method. */
6044
6045 static int
6046 linux_supports_hardware_single_step (void)
6047 {
6048 return can_hardware_single_step ();
6049 }
6050
6051 static int
6052 linux_supports_software_single_step (void)
6053 {
6054 return can_software_single_step ();
6055 }
6056
6057 static int
6058 linux_stopped_by_watchpoint (void)
6059 {
6060 struct lwp_info *lwp = get_thread_lwp (current_thread);
6061
6062 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6063 }
6064
6065 static CORE_ADDR
6066 linux_stopped_data_address (void)
6067 {
6068 struct lwp_info *lwp = get_thread_lwp (current_thread);
6069
6070 return lwp->stopped_data_address;
6071 }
6072
6073 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6074 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6075 && defined(PT_TEXT_END_ADDR)
6076
6077 /* This is only used for targets that define PT_TEXT_ADDR,
6078 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6079 the target has different ways of acquiring this information, like
6080 loadmaps. */
6081
6082 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6083 to tell gdb about. */
6084
6085 static int
6086 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6087 {
6088 unsigned long text, text_end, data;
6089 int pid = lwpid_of (current_thread);
6090
6091 errno = 0;
6092
6093 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6094 (PTRACE_TYPE_ARG4) 0);
6095 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6096 (PTRACE_TYPE_ARG4) 0);
6097 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6098 (PTRACE_TYPE_ARG4) 0);
6099
6100 if (errno == 0)
6101 {
6102 /* Both text and data offsets produced at compile-time (and so
6103 used by gdb) are relative to the beginning of the program,
6104 with the data segment immediately following the text segment.
6105 However, the actual runtime layout in memory may put the data
6106 somewhere else, so when we send gdb a data base-address, we
6107 use the real data base address and subtract the compile-time
6108 data base-address from it (which is just the length of the
6109 text segment). BSS immediately follows data in both
6110 cases. */
6111 *text_p = text;
6112 *data_p = data - (text_end - text);
6113
6114 return 1;
6115 }
6116 return 0;
6117 }
6118 #endif
6119
6120 static int
6121 linux_qxfer_osdata (const char *annex,
6122 unsigned char *readbuf, unsigned const char *writebuf,
6123 CORE_ADDR offset, int len)
6124 {
6125 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6126 }
6127
6128 /* Convert a native/host siginfo object, into/from the siginfo in the
6129 layout of the inferiors' architecture. */
6130
6131 static void
6132 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6133 {
6134 int done = 0;
6135
6136 if (the_low_target.siginfo_fixup != NULL)
6137 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6138
6139 /* If there was no callback, or the callback didn't do anything,
6140 then just do a straight memcpy. */
6141 if (!done)
6142 {
6143 if (direction == 1)
6144 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6145 else
6146 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6147 }
6148 }
6149
6150 static int
6151 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6152 unsigned const char *writebuf, CORE_ADDR offset, int len)
6153 {
6154 int pid;
6155 siginfo_t siginfo;
6156 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6157
6158 if (current_thread == NULL)
6159 return -1;
6160
6161 pid = lwpid_of (current_thread);
6162
6163 if (debug_threads)
6164 debug_printf ("%s siginfo for lwp %d.\n",
6165 readbuf != NULL ? "Reading" : "Writing",
6166 pid);
6167
6168 if (offset >= sizeof (siginfo))
6169 return -1;
6170
6171 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6172 return -1;
6173
6174 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6175 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6176 inferior with a 64-bit GDBSERVER should look the same as debugging it
6177 with a 32-bit GDBSERVER, we need to convert it. */
6178 siginfo_fixup (&siginfo, inf_siginfo, 0);
6179
6180 if (offset + len > sizeof (siginfo))
6181 len = sizeof (siginfo) - offset;
6182
6183 if (readbuf != NULL)
6184 memcpy (readbuf, inf_siginfo + offset, len);
6185 else
6186 {
6187 memcpy (inf_siginfo + offset, writebuf, len);
6188
6189 /* Convert back to ptrace layout before flushing it out. */
6190 siginfo_fixup (&siginfo, inf_siginfo, 1);
6191
6192 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6193 return -1;
6194 }
6195
6196 return len;
6197 }
6198
6199 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6200 so we notice when children change state; as the handler for the
6201 sigsuspend in my_waitpid. */
6202
6203 static void
6204 sigchld_handler (int signo)
6205 {
6206 int old_errno = errno;
6207
6208 if (debug_threads)
6209 {
6210 do
6211 {
6212 /* fprintf is not async-signal-safe, so call write
6213 directly. */
6214 if (write (2, "sigchld_handler\n",
6215 sizeof ("sigchld_handler\n") - 1) < 0)
6216 break; /* just ignore */
6217 } while (0);
6218 }
6219
6220 if (target_is_async_p ())
6221 async_file_mark (); /* trigger a linux_wait */
6222
6223 errno = old_errno;
6224 }
6225
6226 static int
6227 linux_supports_non_stop (void)
6228 {
6229 return 1;
6230 }
6231
6232 static int
6233 linux_async (int enable)
6234 {
6235 int previous = target_is_async_p ();
6236
6237 if (debug_threads)
6238 debug_printf ("linux_async (%d), previous=%d\n",
6239 enable, previous);
6240
6241 if (previous != enable)
6242 {
6243 sigset_t mask;
6244 sigemptyset (&mask);
6245 sigaddset (&mask, SIGCHLD);
6246
6247 sigprocmask (SIG_BLOCK, &mask, NULL);
6248
6249 if (enable)
6250 {
6251 if (pipe (linux_event_pipe) == -1)
6252 {
6253 linux_event_pipe[0] = -1;
6254 linux_event_pipe[1] = -1;
6255 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6256
6257 warning ("creating event pipe failed.");
6258 return previous;
6259 }
6260
6261 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6262 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6263
6264 /* Register the event loop handler. */
6265 add_file_handler (linux_event_pipe[0],
6266 handle_target_event, NULL);
6267
6268 /* Always trigger a linux_wait. */
6269 async_file_mark ();
6270 }
6271 else
6272 {
6273 delete_file_handler (linux_event_pipe[0]);
6274
6275 close (linux_event_pipe[0]);
6276 close (linux_event_pipe[1]);
6277 linux_event_pipe[0] = -1;
6278 linux_event_pipe[1] = -1;
6279 }
6280
6281 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6282 }
6283
6284 return previous;
6285 }
6286
6287 static int
6288 linux_start_non_stop (int nonstop)
6289 {
6290 /* Register or unregister from event-loop accordingly. */
6291 linux_async (nonstop);
6292
6293 if (target_is_async_p () != (nonstop != 0))
6294 return -1;
6295
6296 return 0;
6297 }
6298
6299 static int
6300 linux_supports_multi_process (void)
6301 {
6302 return 1;
6303 }
6304
6305 /* Check if fork events are supported. */
6306
6307 static int
6308 linux_supports_fork_events (void)
6309 {
6310 return linux_supports_tracefork ();
6311 }
6312
6313 /* Check if vfork events are supported. */
6314
6315 static int
6316 linux_supports_vfork_events (void)
6317 {
6318 return linux_supports_tracefork ();
6319 }
6320
6321 /* Check if exec events are supported. */
6322
6323 static int
6324 linux_supports_exec_events (void)
6325 {
6326 return linux_supports_traceexec ();
6327 }
6328
6329 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6330 ptrace flags for all inferiors. This is in case the new GDB connection
6331 doesn't support the same set of events that the previous one did. */
6332
6333 static void
6334 linux_handle_new_gdb_connection (void)
6335 {
6336 /* Request that all the lwps reset their ptrace options. */
6337 for_each_thread ([] (thread_info *thread)
6338 {
6339 struct lwp_info *lwp = get_thread_lwp (thread);
6340
6341 if (!lwp->stopped)
6342 {
6343 /* Stop the lwp so we can modify its ptrace options. */
6344 lwp->must_set_ptrace_flags = 1;
6345 linux_stop_lwp (lwp);
6346 }
6347 else
6348 {
6349 /* Already stopped; go ahead and set the ptrace options. */
6350 struct process_info *proc = find_process_pid (pid_of (thread));
6351 int options = linux_low_ptrace_options (proc->attached);
6352
6353 linux_enable_event_reporting (lwpid_of (thread), options);
6354 lwp->must_set_ptrace_flags = 0;
6355 }
6356 });
6357 }
6358
6359 static int
6360 linux_supports_disable_randomization (void)
6361 {
6362 #ifdef HAVE_PERSONALITY
6363 return 1;
6364 #else
6365 return 0;
6366 #endif
6367 }
6368
6369 static int
6370 linux_supports_agent (void)
6371 {
6372 return 1;
6373 }
6374
6375 static int
6376 linux_supports_range_stepping (void)
6377 {
6378 if (can_software_single_step ())
6379 return 1;
6380 if (*the_low_target.supports_range_stepping == NULL)
6381 return 0;
6382
6383 return (*the_low_target.supports_range_stepping) ();
6384 }
6385
6386 /* Enumerate spufs IDs for process PID. */
6387 static int
6388 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6389 {
6390 int pos = 0;
6391 int written = 0;
6392 char path[128];
6393 DIR *dir;
6394 struct dirent *entry;
6395
6396 sprintf (path, "/proc/%ld/fd", pid);
6397 dir = opendir (path);
6398 if (!dir)
6399 return -1;
6400
6401 rewinddir (dir);
6402 while ((entry = readdir (dir)) != NULL)
6403 {
6404 struct stat st;
6405 struct statfs stfs;
6406 int fd;
6407
6408 fd = atoi (entry->d_name);
6409 if (!fd)
6410 continue;
6411
6412 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6413 if (stat (path, &st) != 0)
6414 continue;
6415 if (!S_ISDIR (st.st_mode))
6416 continue;
6417
6418 if (statfs (path, &stfs) != 0)
6419 continue;
6420 if (stfs.f_type != SPUFS_MAGIC)
6421 continue;
6422
6423 if (pos >= offset && pos + 4 <= offset + len)
6424 {
6425 *(unsigned int *)(buf + pos - offset) = fd;
6426 written += 4;
6427 }
6428 pos += 4;
6429 }
6430
6431 closedir (dir);
6432 return written;
6433 }
6434
6435 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6436 object type, using the /proc file system. */
6437 static int
6438 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6439 unsigned const char *writebuf,
6440 CORE_ADDR offset, int len)
6441 {
6442 long pid = lwpid_of (current_thread);
6443 char buf[128];
6444 int fd = 0;
6445 int ret = 0;
6446
6447 if (!writebuf && !readbuf)
6448 return -1;
6449
6450 if (!*annex)
6451 {
6452 if (!readbuf)
6453 return -1;
6454 else
6455 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6456 }
6457
6458 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6459 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6460 if (fd <= 0)
6461 return -1;
6462
6463 if (offset != 0
6464 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6465 {
6466 close (fd);
6467 return 0;
6468 }
6469
6470 if (writebuf)
6471 ret = write (fd, writebuf, (size_t) len);
6472 else
6473 ret = read (fd, readbuf, (size_t) len);
6474
6475 close (fd);
6476 return ret;
6477 }
6478
6479 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6480 struct target_loadseg
6481 {
6482 /* Core address to which the segment is mapped. */
6483 Elf32_Addr addr;
6484 /* VMA recorded in the program header. */
6485 Elf32_Addr p_vaddr;
6486 /* Size of this segment in memory. */
6487 Elf32_Word p_memsz;
6488 };
6489
6490 # if defined PT_GETDSBT
6491 struct target_loadmap
6492 {
6493 /* Protocol version number, must be zero. */
6494 Elf32_Word version;
6495 /* Pointer to the DSBT table, its size, and the DSBT index. */
6496 unsigned *dsbt_table;
6497 unsigned dsbt_size, dsbt_index;
6498 /* Number of segments in this map. */
6499 Elf32_Word nsegs;
6500 /* The actual memory map. */
6501 struct target_loadseg segs[/*nsegs*/];
6502 };
6503 # define LINUX_LOADMAP PT_GETDSBT
6504 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6505 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6506 # else
6507 struct target_loadmap
6508 {
6509 /* Protocol version number, must be zero. */
6510 Elf32_Half version;
6511 /* Number of segments in this map. */
6512 Elf32_Half nsegs;
6513 /* The actual memory map. */
6514 struct target_loadseg segs[/*nsegs*/];
6515 };
6516 # define LINUX_LOADMAP PTRACE_GETFDPIC
6517 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6518 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6519 # endif
6520
6521 static int
6522 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6523 unsigned char *myaddr, unsigned int len)
6524 {
6525 int pid = lwpid_of (current_thread);
6526 int addr = -1;
6527 struct target_loadmap *data = NULL;
6528 unsigned int actual_length, copy_length;
6529
6530 if (strcmp (annex, "exec") == 0)
6531 addr = (int) LINUX_LOADMAP_EXEC;
6532 else if (strcmp (annex, "interp") == 0)
6533 addr = (int) LINUX_LOADMAP_INTERP;
6534 else
6535 return -1;
6536
6537 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6538 return -1;
6539
6540 if (data == NULL)
6541 return -1;
6542
6543 actual_length = sizeof (struct target_loadmap)
6544 + sizeof (struct target_loadseg) * data->nsegs;
6545
6546 if (offset < 0 || offset > actual_length)
6547 return -1;
6548
6549 copy_length = actual_length - offset < len ? actual_length - offset : len;
6550 memcpy (myaddr, (char *) data + offset, copy_length);
6551 return copy_length;
6552 }
6553 #else
6554 # define linux_read_loadmap NULL
6555 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6556
6557 static void
6558 linux_process_qsupported (char **features, int count)
6559 {
6560 if (the_low_target.process_qsupported != NULL)
6561 the_low_target.process_qsupported (features, count);
6562 }
6563
6564 static int
6565 linux_supports_catch_syscall (void)
6566 {
6567 return (the_low_target.get_syscall_trapinfo != NULL
6568 && linux_supports_tracesysgood ());
6569 }
6570
6571 static int
6572 linux_get_ipa_tdesc_idx (void)
6573 {
6574 if (the_low_target.get_ipa_tdesc_idx == NULL)
6575 return 0;
6576
6577 return (*the_low_target.get_ipa_tdesc_idx) ();
6578 }
6579
6580 static int
6581 linux_supports_tracepoints (void)
6582 {
6583 if (*the_low_target.supports_tracepoints == NULL)
6584 return 0;
6585
6586 return (*the_low_target.supports_tracepoints) ();
6587 }
6588
6589 static CORE_ADDR
6590 linux_read_pc (struct regcache *regcache)
6591 {
6592 if (the_low_target.get_pc == NULL)
6593 return 0;
6594
6595 return (*the_low_target.get_pc) (regcache);
6596 }
6597
6598 static void
6599 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6600 {
6601 gdb_assert (the_low_target.set_pc != NULL);
6602
6603 (*the_low_target.set_pc) (regcache, pc);
6604 }
6605
6606 static int
6607 linux_thread_stopped (struct thread_info *thread)
6608 {
6609 return get_thread_lwp (thread)->stopped;
6610 }
6611
6612 /* This exposes stop-all-threads functionality to other modules. */
6613
6614 static void
6615 linux_pause_all (int freeze)
6616 {
6617 stop_all_lwps (freeze, NULL);
6618 }
6619
6620 /* This exposes unstop-all-threads functionality to other gdbserver
6621 modules. */
6622
6623 static void
6624 linux_unpause_all (int unfreeze)
6625 {
6626 unstop_all_lwps (unfreeze, NULL);
6627 }
6628
6629 static int
6630 linux_prepare_to_access_memory (void)
6631 {
6632 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6633 running LWP. */
6634 if (non_stop)
6635 linux_pause_all (1);
6636 return 0;
6637 }
6638
6639 static void
6640 linux_done_accessing_memory (void)
6641 {
6642 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6643 running LWP. */
6644 if (non_stop)
6645 linux_unpause_all (1);
6646 }
6647
6648 static int
6649 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6650 CORE_ADDR collector,
6651 CORE_ADDR lockaddr,
6652 ULONGEST orig_size,
6653 CORE_ADDR *jump_entry,
6654 CORE_ADDR *trampoline,
6655 ULONGEST *trampoline_size,
6656 unsigned char *jjump_pad_insn,
6657 ULONGEST *jjump_pad_insn_size,
6658 CORE_ADDR *adjusted_insn_addr,
6659 CORE_ADDR *adjusted_insn_addr_end,
6660 char *err)
6661 {
6662 return (*the_low_target.install_fast_tracepoint_jump_pad)
6663 (tpoint, tpaddr, collector, lockaddr, orig_size,
6664 jump_entry, trampoline, trampoline_size,
6665 jjump_pad_insn, jjump_pad_insn_size,
6666 adjusted_insn_addr, adjusted_insn_addr_end,
6667 err);
6668 }
6669
6670 static struct emit_ops *
6671 linux_emit_ops (void)
6672 {
6673 if (the_low_target.emit_ops != NULL)
6674 return (*the_low_target.emit_ops) ();
6675 else
6676 return NULL;
6677 }
6678
6679 static int
6680 linux_get_min_fast_tracepoint_insn_len (void)
6681 {
6682 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6683 }
6684
6685 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6686
6687 static int
6688 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6689 CORE_ADDR *phdr_memaddr, int *num_phdr)
6690 {
6691 char filename[PATH_MAX];
6692 int fd;
6693 const int auxv_size = is_elf64
6694 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6695 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6696
6697 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6698
6699 fd = open (filename, O_RDONLY);
6700 if (fd < 0)
6701 return 1;
6702
6703 *phdr_memaddr = 0;
6704 *num_phdr = 0;
6705 while (read (fd, buf, auxv_size) == auxv_size
6706 && (*phdr_memaddr == 0 || *num_phdr == 0))
6707 {
6708 if (is_elf64)
6709 {
6710 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6711
6712 switch (aux->a_type)
6713 {
6714 case AT_PHDR:
6715 *phdr_memaddr = aux->a_un.a_val;
6716 break;
6717 case AT_PHNUM:
6718 *num_phdr = aux->a_un.a_val;
6719 break;
6720 }
6721 }
6722 else
6723 {
6724 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6725
6726 switch (aux->a_type)
6727 {
6728 case AT_PHDR:
6729 *phdr_memaddr = aux->a_un.a_val;
6730 break;
6731 case AT_PHNUM:
6732 *num_phdr = aux->a_un.a_val;
6733 break;
6734 }
6735 }
6736 }
6737
6738 close (fd);
6739
6740 if (*phdr_memaddr == 0 || *num_phdr == 0)
6741 {
6742 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6743 "phdr_memaddr = %ld, phdr_num = %d",
6744 (long) *phdr_memaddr, *num_phdr);
6745 return 2;
6746 }
6747
6748 return 0;
6749 }
6750
6751 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6752
6753 static CORE_ADDR
6754 get_dynamic (const int pid, const int is_elf64)
6755 {
6756 CORE_ADDR phdr_memaddr, relocation;
6757 int num_phdr, i;
6758 unsigned char *phdr_buf;
6759 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6760
6761 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6762 return 0;
6763
6764 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6765 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6766
6767 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6768 return 0;
6769
6770 /* Compute relocation: it is expected to be 0 for "regular" executables,
6771 non-zero for PIE ones. */
6772 relocation = -1;
6773 for (i = 0; relocation == -1 && i < num_phdr; i++)
6774 if (is_elf64)
6775 {
6776 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6777
6778 if (p->p_type == PT_PHDR)
6779 relocation = phdr_memaddr - p->p_vaddr;
6780 }
6781 else
6782 {
6783 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6784
6785 if (p->p_type == PT_PHDR)
6786 relocation = phdr_memaddr - p->p_vaddr;
6787 }
6788
6789 if (relocation == -1)
6790 {
6791 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6792 any real world executables, including PIE executables, have always
6793 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6794 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6795 or present DT_DEBUG anyway (fpc binaries are statically linked).
6796
6797 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6798
6799 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6800
6801 return 0;
6802 }
6803
6804 for (i = 0; i < num_phdr; i++)
6805 {
6806 if (is_elf64)
6807 {
6808 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6809
6810 if (p->p_type == PT_DYNAMIC)
6811 return p->p_vaddr + relocation;
6812 }
6813 else
6814 {
6815 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6816
6817 if (p->p_type == PT_DYNAMIC)
6818 return p->p_vaddr + relocation;
6819 }
6820 }
6821
6822 return 0;
6823 }
6824
6825 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6826 can be 0 if the inferior does not yet have the library list initialized.
6827 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6828 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6829
6830 static CORE_ADDR
6831 get_r_debug (const int pid, const int is_elf64)
6832 {
6833 CORE_ADDR dynamic_memaddr;
6834 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6835 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6836 CORE_ADDR map = -1;
6837
6838 dynamic_memaddr = get_dynamic (pid, is_elf64);
6839 if (dynamic_memaddr == 0)
6840 return map;
6841
6842 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6843 {
6844 if (is_elf64)
6845 {
6846 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6847 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6848 union
6849 {
6850 Elf64_Xword map;
6851 unsigned char buf[sizeof (Elf64_Xword)];
6852 }
6853 rld_map;
6854 #endif
6855 #ifdef DT_MIPS_RLD_MAP
6856 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6857 {
6858 if (linux_read_memory (dyn->d_un.d_val,
6859 rld_map.buf, sizeof (rld_map.buf)) == 0)
6860 return rld_map.map;
6861 else
6862 break;
6863 }
6864 #endif /* DT_MIPS_RLD_MAP */
6865 #ifdef DT_MIPS_RLD_MAP_REL
6866 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6867 {
6868 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6869 rld_map.buf, sizeof (rld_map.buf)) == 0)
6870 return rld_map.map;
6871 else
6872 break;
6873 }
6874 #endif /* DT_MIPS_RLD_MAP_REL */
6875
6876 if (dyn->d_tag == DT_DEBUG && map == -1)
6877 map = dyn->d_un.d_val;
6878
6879 if (dyn->d_tag == DT_NULL)
6880 break;
6881 }
6882 else
6883 {
6884 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6885 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6886 union
6887 {
6888 Elf32_Word map;
6889 unsigned char buf[sizeof (Elf32_Word)];
6890 }
6891 rld_map;
6892 #endif
6893 #ifdef DT_MIPS_RLD_MAP
6894 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6895 {
6896 if (linux_read_memory (dyn->d_un.d_val,
6897 rld_map.buf, sizeof (rld_map.buf)) == 0)
6898 return rld_map.map;
6899 else
6900 break;
6901 }
6902 #endif /* DT_MIPS_RLD_MAP */
6903 #ifdef DT_MIPS_RLD_MAP_REL
6904 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6905 {
6906 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6907 rld_map.buf, sizeof (rld_map.buf)) == 0)
6908 return rld_map.map;
6909 else
6910 break;
6911 }
6912 #endif /* DT_MIPS_RLD_MAP_REL */
6913
6914 if (dyn->d_tag == DT_DEBUG && map == -1)
6915 map = dyn->d_un.d_val;
6916
6917 if (dyn->d_tag == DT_NULL)
6918 break;
6919 }
6920
6921 dynamic_memaddr += dyn_size;
6922 }
6923
6924 return map;
6925 }
6926
6927 /* Read one pointer from MEMADDR in the inferior. */
6928
6929 static int
6930 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6931 {
6932 int ret;
6933
6934 /* Go through a union so this works on either big or little endian
6935 hosts, when the inferior's pointer size is smaller than the size
6936 of CORE_ADDR. It is assumed the inferior's endianness is the
6937 same of the superior's. */
6938 union
6939 {
6940 CORE_ADDR core_addr;
6941 unsigned int ui;
6942 unsigned char uc;
6943 } addr;
6944
6945 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6946 if (ret == 0)
6947 {
6948 if (ptr_size == sizeof (CORE_ADDR))
6949 *ptr = addr.core_addr;
6950 else if (ptr_size == sizeof (unsigned int))
6951 *ptr = addr.ui;
6952 else
6953 gdb_assert_not_reached ("unhandled pointer size");
6954 }
6955 return ret;
6956 }
6957
6958 struct link_map_offsets
6959 {
6960 /* Offset and size of r_debug.r_version. */
6961 int r_version_offset;
6962
6963 /* Offset and size of r_debug.r_map. */
6964 int r_map_offset;
6965
6966 /* Offset to l_addr field in struct link_map. */
6967 int l_addr_offset;
6968
6969 /* Offset to l_name field in struct link_map. */
6970 int l_name_offset;
6971
6972 /* Offset to l_ld field in struct link_map. */
6973 int l_ld_offset;
6974
6975 /* Offset to l_next field in struct link_map. */
6976 int l_next_offset;
6977
6978 /* Offset to l_prev field in struct link_map. */
6979 int l_prev_offset;
6980 };
6981
6982 /* Construct qXfer:libraries-svr4:read reply. */
6983
6984 static int
6985 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6986 unsigned const char *writebuf,
6987 CORE_ADDR offset, int len)
6988 {
6989 struct process_info_private *const priv = current_process ()->priv;
6990 char filename[PATH_MAX];
6991 int pid, is_elf64;
6992
6993 static const struct link_map_offsets lmo_32bit_offsets =
6994 {
6995 0, /* r_version offset. */
6996 4, /* r_debug.r_map offset. */
6997 0, /* l_addr offset in link_map. */
6998 4, /* l_name offset in link_map. */
6999 8, /* l_ld offset in link_map. */
7000 12, /* l_next offset in link_map. */
7001 16 /* l_prev offset in link_map. */
7002 };
7003
7004 static const struct link_map_offsets lmo_64bit_offsets =
7005 {
7006 0, /* r_version offset. */
7007 8, /* r_debug.r_map offset. */
7008 0, /* l_addr offset in link_map. */
7009 8, /* l_name offset in link_map. */
7010 16, /* l_ld offset in link_map. */
7011 24, /* l_next offset in link_map. */
7012 32 /* l_prev offset in link_map. */
7013 };
7014 const struct link_map_offsets *lmo;
7015 unsigned int machine;
7016 int ptr_size;
7017 CORE_ADDR lm_addr = 0, lm_prev = 0;
7018 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7019 int header_done = 0;
7020
7021 if (writebuf != NULL)
7022 return -2;
7023 if (readbuf == NULL)
7024 return -1;
7025
7026 pid = lwpid_of (current_thread);
7027 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7028 is_elf64 = elf_64_file_p (filename, &machine);
7029 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7030 ptr_size = is_elf64 ? 8 : 4;
7031
7032 while (annex[0] != '\0')
7033 {
7034 const char *sep;
7035 CORE_ADDR *addrp;
7036 int name_len;
7037
7038 sep = strchr (annex, '=');
7039 if (sep == NULL)
7040 break;
7041
7042 name_len = sep - annex;
7043 if (name_len == 5 && startswith (annex, "start"))
7044 addrp = &lm_addr;
7045 else if (name_len == 4 && startswith (annex, "prev"))
7046 addrp = &lm_prev;
7047 else
7048 {
7049 annex = strchr (sep, ';');
7050 if (annex == NULL)
7051 break;
7052 annex++;
7053 continue;
7054 }
7055
7056 annex = decode_address_to_semicolon (addrp, sep + 1);
7057 }
7058
7059 if (lm_addr == 0)
7060 {
7061 int r_version = 0;
7062
7063 if (priv->r_debug == 0)
7064 priv->r_debug = get_r_debug (pid, is_elf64);
7065
7066 /* We failed to find DT_DEBUG. Such situation will not change
7067 for this inferior - do not retry it. Report it to GDB as
7068 E01, see for the reasons at the GDB solib-svr4.c side. */
7069 if (priv->r_debug == (CORE_ADDR) -1)
7070 return -1;
7071
7072 if (priv->r_debug != 0)
7073 {
7074 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7075 (unsigned char *) &r_version,
7076 sizeof (r_version)) != 0
7077 || r_version != 1)
7078 {
7079 warning ("unexpected r_debug version %d", r_version);
7080 }
7081 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7082 &lm_addr, ptr_size) != 0)
7083 {
7084 warning ("unable to read r_map from 0x%lx",
7085 (long) priv->r_debug + lmo->r_map_offset);
7086 }
7087 }
7088 }
7089
7090 std::string document = "<library-list-svr4 version=\"1.0\"";
7091
7092 while (lm_addr
7093 && read_one_ptr (lm_addr + lmo->l_name_offset,
7094 &l_name, ptr_size) == 0
7095 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7096 &l_addr, ptr_size) == 0
7097 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7098 &l_ld, ptr_size) == 0
7099 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7100 &l_prev, ptr_size) == 0
7101 && read_one_ptr (lm_addr + lmo->l_next_offset,
7102 &l_next, ptr_size) == 0)
7103 {
7104 unsigned char libname[PATH_MAX];
7105
7106 if (lm_prev != l_prev)
7107 {
7108 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7109 (long) lm_prev, (long) l_prev);
7110 break;
7111 }
7112
7113 /* Ignore the first entry even if it has valid name as the first entry
7114 corresponds to the main executable. The first entry should not be
7115 skipped if the dynamic loader was loaded late by a static executable
7116 (see solib-svr4.c parameter ignore_first). But in such case the main
7117 executable does not have PT_DYNAMIC present and this function already
7118 exited above due to failed get_r_debug. */
7119 if (lm_prev == 0)
7120 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7121 else
7122 {
7123 /* Not checking for error because reading may stop before
7124 we've got PATH_MAX worth of characters. */
7125 libname[0] = '\0';
7126 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7127 libname[sizeof (libname) - 1] = '\0';
7128 if (libname[0] != '\0')
7129 {
7130 if (!header_done)
7131 {
7132 /* Terminate `<library-list-svr4'. */
7133 document += '>';
7134 header_done = 1;
7135 }
7136
7137 string_appendf (document, "<library name=\"");
7138 xml_escape_text_append (&document, (char *) libname);
7139 string_appendf (document, "\" lm=\"0x%lx\" "
7140 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7141 (unsigned long) lm_addr, (unsigned long) l_addr,
7142 (unsigned long) l_ld);
7143 }
7144 }
7145
7146 lm_prev = lm_addr;
7147 lm_addr = l_next;
7148 }
7149
7150 if (!header_done)
7151 {
7152 /* Empty list; terminate `<library-list-svr4'. */
7153 document += "/>";
7154 }
7155 else
7156 document += "</library-list-svr4>";
7157
7158 int document_len = document.length ();
7159 if (offset < document_len)
7160 document_len -= offset;
7161 else
7162 document_len = 0;
7163 if (len > document_len)
7164 len = document_len;
7165
7166 memcpy (readbuf, document.data () + offset, len);
7167
7168 return len;
7169 }
7170
7171 #ifdef HAVE_LINUX_BTRACE
7172
7173 /* See to_disable_btrace target method. */
7174
7175 static int
7176 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7177 {
7178 enum btrace_error err;
7179
7180 err = linux_disable_btrace (tinfo);
7181 return (err == BTRACE_ERR_NONE ? 0 : -1);
7182 }
7183
7184 /* Encode an Intel Processor Trace configuration. */
7185
7186 static void
7187 linux_low_encode_pt_config (struct buffer *buffer,
7188 const struct btrace_data_pt_config *config)
7189 {
7190 buffer_grow_str (buffer, "<pt-config>\n");
7191
7192 switch (config->cpu.vendor)
7193 {
7194 case CV_INTEL:
7195 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7196 "model=\"%u\" stepping=\"%u\"/>\n",
7197 config->cpu.family, config->cpu.model,
7198 config->cpu.stepping);
7199 break;
7200
7201 default:
7202 break;
7203 }
7204
7205 buffer_grow_str (buffer, "</pt-config>\n");
7206 }
7207
7208 /* Encode a raw buffer. */
7209
7210 static void
7211 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7212 unsigned int size)
7213 {
7214 if (size == 0)
7215 return;
7216
7217 /* We use hex encoding - see common/rsp-low.h. */
7218 buffer_grow_str (buffer, "<raw>\n");
7219
7220 while (size-- > 0)
7221 {
7222 char elem[2];
7223
7224 elem[0] = tohex ((*data >> 4) & 0xf);
7225 elem[1] = tohex (*data++ & 0xf);
7226
7227 buffer_grow (buffer, elem, 2);
7228 }
7229
7230 buffer_grow_str (buffer, "</raw>\n");
7231 }
7232
7233 /* See to_read_btrace target method. */
7234
7235 static int
7236 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7237 enum btrace_read_type type)
7238 {
7239 struct btrace_data btrace;
7240 struct btrace_block *block;
7241 enum btrace_error err;
7242 int i;
7243
7244 err = linux_read_btrace (&btrace, tinfo, type);
7245 if (err != BTRACE_ERR_NONE)
7246 {
7247 if (err == BTRACE_ERR_OVERFLOW)
7248 buffer_grow_str0 (buffer, "E.Overflow.");
7249 else
7250 buffer_grow_str0 (buffer, "E.Generic Error.");
7251
7252 return -1;
7253 }
7254
7255 switch (btrace.format)
7256 {
7257 case BTRACE_FORMAT_NONE:
7258 buffer_grow_str0 (buffer, "E.No Trace.");
7259 return -1;
7260
7261 case BTRACE_FORMAT_BTS:
7262 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7263 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7264
7265 for (i = 0;
7266 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7267 i++)
7268 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7269 paddress (block->begin), paddress (block->end));
7270
7271 buffer_grow_str0 (buffer, "</btrace>\n");
7272 break;
7273
7274 case BTRACE_FORMAT_PT:
7275 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7276 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7277 buffer_grow_str (buffer, "<pt>\n");
7278
7279 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7280
7281 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7282 btrace.variant.pt.size);
7283
7284 buffer_grow_str (buffer, "</pt>\n");
7285 buffer_grow_str0 (buffer, "</btrace>\n");
7286 break;
7287
7288 default:
7289 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7290 return -1;
7291 }
7292
7293 return 0;
7294 }
7295
7296 /* See to_btrace_conf target method. */
7297
7298 static int
7299 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7300 struct buffer *buffer)
7301 {
7302 const struct btrace_config *conf;
7303
7304 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7305 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7306
7307 conf = linux_btrace_conf (tinfo);
7308 if (conf != NULL)
7309 {
7310 switch (conf->format)
7311 {
7312 case BTRACE_FORMAT_NONE:
7313 break;
7314
7315 case BTRACE_FORMAT_BTS:
7316 buffer_xml_printf (buffer, "<bts");
7317 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7318 buffer_xml_printf (buffer, " />\n");
7319 break;
7320
7321 case BTRACE_FORMAT_PT:
7322 buffer_xml_printf (buffer, "<pt");
7323 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7324 buffer_xml_printf (buffer, "/>\n");
7325 break;
7326 }
7327 }
7328
7329 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7330 return 0;
7331 }
7332 #endif /* HAVE_LINUX_BTRACE */
7333
7334 /* See nat/linux-nat.h. */
7335
7336 ptid_t
7337 current_lwp_ptid (void)
7338 {
7339 return ptid_of (current_thread);
7340 }
7341
7342 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7343
7344 static int
7345 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7346 {
7347 if (the_low_target.breakpoint_kind_from_pc != NULL)
7348 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7349 else
7350 return default_breakpoint_kind_from_pc (pcptr);
7351 }
7352
7353 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7354
7355 static const gdb_byte *
7356 linux_sw_breakpoint_from_kind (int kind, int *size)
7357 {
7358 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7359
7360 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7361 }
7362
7363 /* Implementation of the target_ops method
7364 "breakpoint_kind_from_current_state". */
7365
7366 static int
7367 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7368 {
7369 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7370 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7371 else
7372 return linux_breakpoint_kind_from_pc (pcptr);
7373 }
7374
7375 /* Default implementation of linux_target_ops method "set_pc" for
7376 32-bit pc register which is literally named "pc". */
7377
7378 void
7379 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7380 {
7381 uint32_t newpc = pc;
7382
7383 supply_register_by_name (regcache, "pc", &newpc);
7384 }
7385
7386 /* Default implementation of linux_target_ops method "get_pc" for
7387 32-bit pc register which is literally named "pc". */
7388
7389 CORE_ADDR
7390 linux_get_pc_32bit (struct regcache *regcache)
7391 {
7392 uint32_t pc;
7393
7394 collect_register_by_name (regcache, "pc", &pc);
7395 if (debug_threads)
7396 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7397 return pc;
7398 }
7399
7400 /* Default implementation of linux_target_ops method "set_pc" for
7401 64-bit pc register which is literally named "pc". */
7402
7403 void
7404 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7405 {
7406 uint64_t newpc = pc;
7407
7408 supply_register_by_name (regcache, "pc", &newpc);
7409 }
7410
7411 /* Default implementation of linux_target_ops method "get_pc" for
7412 64-bit pc register which is literally named "pc". */
7413
7414 CORE_ADDR
7415 linux_get_pc_64bit (struct regcache *regcache)
7416 {
7417 uint64_t pc;
7418
7419 collect_register_by_name (regcache, "pc", &pc);
7420 if (debug_threads)
7421 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7422 return pc;
7423 }
7424
7425
7426 static struct target_ops linux_target_ops = {
7427 linux_create_inferior,
7428 linux_post_create_inferior,
7429 linux_attach,
7430 linux_kill,
7431 linux_detach,
7432 linux_mourn,
7433 linux_join,
7434 linux_thread_alive,
7435 linux_resume,
7436 linux_wait,
7437 linux_fetch_registers,
7438 linux_store_registers,
7439 linux_prepare_to_access_memory,
7440 linux_done_accessing_memory,
7441 linux_read_memory,
7442 linux_write_memory,
7443 linux_look_up_symbols,
7444 linux_request_interrupt,
7445 linux_read_auxv,
7446 linux_supports_z_point_type,
7447 linux_insert_point,
7448 linux_remove_point,
7449 linux_stopped_by_sw_breakpoint,
7450 linux_supports_stopped_by_sw_breakpoint,
7451 linux_stopped_by_hw_breakpoint,
7452 linux_supports_stopped_by_hw_breakpoint,
7453 linux_supports_hardware_single_step,
7454 linux_stopped_by_watchpoint,
7455 linux_stopped_data_address,
7456 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7457 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7458 && defined(PT_TEXT_END_ADDR)
7459 linux_read_offsets,
7460 #else
7461 NULL,
7462 #endif
7463 #ifdef USE_THREAD_DB
7464 thread_db_get_tls_address,
7465 #else
7466 NULL,
7467 #endif
7468 linux_qxfer_spu,
7469 hostio_last_error_from_errno,
7470 linux_qxfer_osdata,
7471 linux_xfer_siginfo,
7472 linux_supports_non_stop,
7473 linux_async,
7474 linux_start_non_stop,
7475 linux_supports_multi_process,
7476 linux_supports_fork_events,
7477 linux_supports_vfork_events,
7478 linux_supports_exec_events,
7479 linux_handle_new_gdb_connection,
7480 #ifdef USE_THREAD_DB
7481 thread_db_handle_monitor_command,
7482 #else
7483 NULL,
7484 #endif
7485 linux_common_core_of_thread,
7486 linux_read_loadmap,
7487 linux_process_qsupported,
7488 linux_supports_tracepoints,
7489 linux_read_pc,
7490 linux_write_pc,
7491 linux_thread_stopped,
7492 NULL,
7493 linux_pause_all,
7494 linux_unpause_all,
7495 linux_stabilize_threads,
7496 linux_install_fast_tracepoint_jump_pad,
7497 linux_emit_ops,
7498 linux_supports_disable_randomization,
7499 linux_get_min_fast_tracepoint_insn_len,
7500 linux_qxfer_libraries_svr4,
7501 linux_supports_agent,
7502 #ifdef HAVE_LINUX_BTRACE
7503 linux_enable_btrace,
7504 linux_low_disable_btrace,
7505 linux_low_read_btrace,
7506 linux_low_btrace_conf,
7507 #else
7508 NULL,
7509 NULL,
7510 NULL,
7511 NULL,
7512 #endif
7513 linux_supports_range_stepping,
7514 linux_proc_pid_to_exec_file,
7515 linux_mntns_open_cloexec,
7516 linux_mntns_unlink,
7517 linux_mntns_readlink,
7518 linux_breakpoint_kind_from_pc,
7519 linux_sw_breakpoint_from_kind,
7520 linux_proc_tid_get_name,
7521 linux_breakpoint_kind_from_current_state,
7522 linux_supports_software_single_step,
7523 linux_supports_catch_syscall,
7524 linux_get_ipa_tdesc_idx,
7525 #if USE_THREAD_DB
7526 thread_db_thread_handle,
7527 #else
7528 NULL,
7529 #endif
7530 };
7531
7532 #ifdef HAVE_LINUX_REGSETS
7533 void
7534 initialize_regsets_info (struct regsets_info *info)
7535 {
7536 for (info->num_regsets = 0;
7537 info->regsets[info->num_regsets].size >= 0;
7538 info->num_regsets++)
7539 ;
7540 }
7541 #endif
7542
7543 void
7544 initialize_low (void)
7545 {
7546 struct sigaction sigchld_action;
7547
7548 memset (&sigchld_action, 0, sizeof (sigchld_action));
7549 set_target_ops (&linux_target_ops);
7550
7551 linux_ptrace_init_warnings ();
7552 linux_proc_init_warnings ();
7553
7554 sigchld_action.sa_handler = sigchld_handler;
7555 sigemptyset (&sigchld_action.sa_mask);
7556 sigchld_action.sa_flags = SA_RESTART;
7557 sigaction (SIGCHLD, &sigchld_action, NULL);
7558
7559 initialize_low_arch ();
7560
7561 linux_check_ptrace_features ();
7562 }
This page took 0.17742 seconds and 5 git commands to generate.