gdbserver/linux-low: turn the 'decr_pc_after_break' field into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
48 #include <inttypes.h>
49 #include "gdbsupport/common-inferior.h"
50 #include "nat/fork-inferior.h"
51 #include "gdbsupport/environ.h"
52 #include "gdbsupport/gdb-sigmask.h"
53 #include "gdbsupport/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 #ifndef AT_HWCAP2
75 #define AT_HWCAP2 26
76 #endif
77
78 /* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81 #if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84 #if defined(__mcoldfire__)
85 /* These are still undefined in 3.10 kernels. */
86 #define PT_TEXT_ADDR 49*4
87 #define PT_DATA_ADDR 50*4
88 #define PT_TEXT_END_ADDR 51*4
89 /* BFIN already defines these since at least 2.6.32 kernels. */
90 #elif defined(BFIN)
91 #define PT_TEXT_ADDR 220
92 #define PT_TEXT_END_ADDR 224
93 #define PT_DATA_ADDR 228
94 /* These are still undefined in 3.10 kernels. */
95 #elif defined(__TMS320C6X__)
96 #define PT_TEXT_ADDR (0x10000*4)
97 #define PT_DATA_ADDR (0x10004*4)
98 #define PT_TEXT_END_ADDR (0x10008*4)
99 #endif
100 #endif
101
102 #if (defined(__UCLIBC__) \
103 && defined(HAS_NOMMU) \
104 && defined(PT_TEXT_ADDR) \
105 && defined(PT_DATA_ADDR) \
106 && defined(PT_TEXT_END_ADDR))
107 #define SUPPORTS_READ_OFFSETS
108 #endif
109
110 #ifdef HAVE_LINUX_BTRACE
111 # include "nat/linux-btrace.h"
112 # include "gdbsupport/btrace-common.h"
113 #endif
114
115 #ifndef HAVE_ELF32_AUXV_T
116 /* Copied from glibc's elf.h. */
117 typedef struct
118 {
119 uint32_t a_type; /* Entry type */
120 union
121 {
122 uint32_t a_val; /* Integer value */
123 /* We use to have pointer elements added here. We cannot do that,
124 though, since it does not work when using 32-bit definitions
125 on 64-bit platforms and vice versa. */
126 } a_un;
127 } Elf32_auxv_t;
128 #endif
129
130 #ifndef HAVE_ELF64_AUXV_T
131 /* Copied from glibc's elf.h. */
132 typedef struct
133 {
134 uint64_t a_type; /* Entry type */
135 union
136 {
137 uint64_t a_val; /* Integer value */
138 /* We use to have pointer elements added here. We cannot do that,
139 though, since it does not work when using 32-bit definitions
140 on 64-bit platforms and vice versa. */
141 } a_un;
142 } Elf64_auxv_t;
143 #endif
144
145 /* Does the current host support PTRACE_GETREGSET? */
146 int have_ptrace_getregset = -1;
147
148 /* LWP accessors. */
149
150 /* See nat/linux-nat.h. */
151
152 ptid_t
153 ptid_of_lwp (struct lwp_info *lwp)
154 {
155 return ptid_of (get_lwp_thread (lwp));
156 }
157
158 /* See nat/linux-nat.h. */
159
160 void
161 lwp_set_arch_private_info (struct lwp_info *lwp,
162 struct arch_lwp_info *info)
163 {
164 lwp->arch_private = info;
165 }
166
167 /* See nat/linux-nat.h. */
168
169 struct arch_lwp_info *
170 lwp_arch_private_info (struct lwp_info *lwp)
171 {
172 return lwp->arch_private;
173 }
174
175 /* See nat/linux-nat.h. */
176
177 int
178 lwp_is_stopped (struct lwp_info *lwp)
179 {
180 return lwp->stopped;
181 }
182
183 /* See nat/linux-nat.h. */
184
185 enum target_stop_reason
186 lwp_stop_reason (struct lwp_info *lwp)
187 {
188 return lwp->stop_reason;
189 }
190
191 /* See nat/linux-nat.h. */
192
193 int
194 lwp_is_stepping (struct lwp_info *lwp)
195 {
196 return lwp->stepping;
197 }
198
199 /* A list of all unknown processes which receive stop signals. Some
200 other process will presumably claim each of these as forked
201 children momentarily. */
202
203 struct simple_pid_list
204 {
205 /* The process ID. */
206 int pid;
207
208 /* The status as reported by waitpid. */
209 int status;
210
211 /* Next in chain. */
212 struct simple_pid_list *next;
213 };
214 struct simple_pid_list *stopped_pids;
215
216 /* Trivial list manipulation functions to keep track of a list of new
217 stopped processes. */
218
219 static void
220 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
221 {
222 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
223
224 new_pid->pid = pid;
225 new_pid->status = status;
226 new_pid->next = *listp;
227 *listp = new_pid;
228 }
229
230 static int
231 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
232 {
233 struct simple_pid_list **p;
234
235 for (p = listp; *p != NULL; p = &(*p)->next)
236 if ((*p)->pid == pid)
237 {
238 struct simple_pid_list *next = (*p)->next;
239
240 *statusp = (*p)->status;
241 xfree (*p);
242 *p = next;
243 return 1;
244 }
245 return 0;
246 }
247
248 enum stopping_threads_kind
249 {
250 /* Not stopping threads presently. */
251 NOT_STOPPING_THREADS,
252
253 /* Stopping threads. */
254 STOPPING_THREADS,
255
256 /* Stopping and suspending threads. */
257 STOPPING_AND_SUSPENDING_THREADS
258 };
259
260 /* This is set while stop_all_lwps is in effect. */
261 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
262
263 /* FIXME make into a target method? */
264 int using_threads = 1;
265
266 /* True if we're presently stabilizing threads (moving them out of
267 jump pads). */
268 static int stabilizing_threads;
269
270 static void unsuspend_all_lwps (struct lwp_info *except);
271 static struct lwp_info *add_lwp (ptid_t ptid);
272 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
273 static int lwp_is_marked_dead (struct lwp_info *lwp);
274 static int finish_step_over (struct lwp_info *lwp);
275 static int kill_lwp (unsigned long lwpid, int signo);
276 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
277 static int linux_low_ptrace_options (int attached);
278 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
279
280 /* When the event-loop is doing a step-over, this points at the thread
281 being stepped. */
282 ptid_t step_over_bkpt;
283
284 /* True if the low target can hardware single-step. */
285
286 static int
287 can_hardware_single_step (void)
288 {
289 if (the_low_target.supports_hardware_single_step != NULL)
290 return the_low_target.supports_hardware_single_step ();
291 else
292 return 0;
293 }
294
295 bool
296 linux_process_target::low_supports_breakpoints ()
297 {
298 return false;
299 }
300
301 CORE_ADDR
302 linux_process_target::low_get_pc (regcache *regcache)
303 {
304 return 0;
305 }
306
307 void
308 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
309 {
310 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
311 }
312
313 std::vector<CORE_ADDR>
314 linux_process_target::low_get_next_pcs (regcache *regcache)
315 {
316 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
317 "implemented");
318 }
319
320 int
321 linux_process_target::low_decr_pc_after_break ()
322 {
323 return 0;
324 }
325
326 /* Returns true if this target can support fast tracepoints. This
327 does not mean that the in-process agent has been loaded in the
328 inferior. */
329
330 static int
331 supports_fast_tracepoints (void)
332 {
333 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
334 }
335
336 /* True if LWP is stopped in its stepping range. */
337
338 static int
339 lwp_in_step_range (struct lwp_info *lwp)
340 {
341 CORE_ADDR pc = lwp->stop_pc;
342
343 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
344 }
345
346 struct pending_signals
347 {
348 int signal;
349 siginfo_t info;
350 struct pending_signals *prev;
351 };
352
353 /* The read/write ends of the pipe registered as waitable file in the
354 event loop. */
355 static int linux_event_pipe[2] = { -1, -1 };
356
357 /* True if we're currently in async mode. */
358 #define target_is_async_p() (linux_event_pipe[0] != -1)
359
360 static void send_sigstop (struct lwp_info *lwp);
361
362 /* Return non-zero if HEADER is a 64-bit ELF file. */
363
364 static int
365 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
366 {
367 if (header->e_ident[EI_MAG0] == ELFMAG0
368 && header->e_ident[EI_MAG1] == ELFMAG1
369 && header->e_ident[EI_MAG2] == ELFMAG2
370 && header->e_ident[EI_MAG3] == ELFMAG3)
371 {
372 *machine = header->e_machine;
373 return header->e_ident[EI_CLASS] == ELFCLASS64;
374
375 }
376 *machine = EM_NONE;
377 return -1;
378 }
379
380 /* Return non-zero if FILE is a 64-bit ELF file,
381 zero if the file is not a 64-bit ELF file,
382 and -1 if the file is not accessible or doesn't exist. */
383
384 static int
385 elf_64_file_p (const char *file, unsigned int *machine)
386 {
387 Elf64_Ehdr header;
388 int fd;
389
390 fd = open (file, O_RDONLY);
391 if (fd < 0)
392 return -1;
393
394 if (read (fd, &header, sizeof (header)) != sizeof (header))
395 {
396 close (fd);
397 return 0;
398 }
399 close (fd);
400
401 return elf_64_header_p (&header, machine);
402 }
403
404 /* Accepts an integer PID; Returns true if the executable PID is
405 running is a 64-bit ELF file.. */
406
407 int
408 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
409 {
410 char file[PATH_MAX];
411
412 sprintf (file, "/proc/%d/exe", pid);
413 return elf_64_file_p (file, machine);
414 }
415
416 static void
417 delete_lwp (struct lwp_info *lwp)
418 {
419 struct thread_info *thr = get_lwp_thread (lwp);
420
421 if (debug_threads)
422 debug_printf ("deleting %ld\n", lwpid_of (thr));
423
424 remove_thread (thr);
425
426 if (the_low_target.delete_thread != NULL)
427 the_low_target.delete_thread (lwp->arch_private);
428 else
429 gdb_assert (lwp->arch_private == NULL);
430
431 free (lwp);
432 }
433
434 /* Add a process to the common process list, and set its private
435 data. */
436
437 static struct process_info *
438 linux_add_process (int pid, int attached)
439 {
440 struct process_info *proc;
441
442 proc = add_process (pid, attached);
443 proc->priv = XCNEW (struct process_info_private);
444
445 if (the_low_target.new_process != NULL)
446 proc->priv->arch_private = the_low_target.new_process ();
447
448 return proc;
449 }
450
451 void
452 linux_process_target::arch_setup_thread (thread_info *thread)
453 {
454 struct thread_info *saved_thread;
455
456 saved_thread = current_thread;
457 current_thread = thread;
458
459 low_arch_setup ();
460
461 current_thread = saved_thread;
462 }
463
464 int
465 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
466 int wstat)
467 {
468 client_state &cs = get_client_state ();
469 struct lwp_info *event_lwp = *orig_event_lwp;
470 int event = linux_ptrace_get_extended_event (wstat);
471 struct thread_info *event_thr = get_lwp_thread (event_lwp);
472 struct lwp_info *new_lwp;
473
474 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
475
476 /* All extended events we currently use are mid-syscall. Only
477 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
478 you have to be using PTRACE_SEIZE to get that. */
479 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
480
481 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
482 || (event == PTRACE_EVENT_CLONE))
483 {
484 ptid_t ptid;
485 unsigned long new_pid;
486 int ret, status;
487
488 /* Get the pid of the new lwp. */
489 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
490 &new_pid);
491
492 /* If we haven't already seen the new PID stop, wait for it now. */
493 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
494 {
495 /* The new child has a pending SIGSTOP. We can't affect it until it
496 hits the SIGSTOP, but we're already attached. */
497
498 ret = my_waitpid (new_pid, &status, __WALL);
499
500 if (ret == -1)
501 perror_with_name ("waiting for new child");
502 else if (ret != new_pid)
503 warning ("wait returned unexpected PID %d", ret);
504 else if (!WIFSTOPPED (status))
505 warning ("wait returned unexpected status 0x%x", status);
506 }
507
508 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
509 {
510 struct process_info *parent_proc;
511 struct process_info *child_proc;
512 struct lwp_info *child_lwp;
513 struct thread_info *child_thr;
514 struct target_desc *tdesc;
515
516 ptid = ptid_t (new_pid, new_pid, 0);
517
518 if (debug_threads)
519 {
520 debug_printf ("HEW: Got fork event from LWP %ld, "
521 "new child is %d\n",
522 ptid_of (event_thr).lwp (),
523 ptid.pid ());
524 }
525
526 /* Add the new process to the tables and clone the breakpoint
527 lists of the parent. We need to do this even if the new process
528 will be detached, since we will need the process object and the
529 breakpoints to remove any breakpoints from memory when we
530 detach, and the client side will access registers. */
531 child_proc = linux_add_process (new_pid, 0);
532 gdb_assert (child_proc != NULL);
533 child_lwp = add_lwp (ptid);
534 gdb_assert (child_lwp != NULL);
535 child_lwp->stopped = 1;
536 child_lwp->must_set_ptrace_flags = 1;
537 child_lwp->status_pending_p = 0;
538 child_thr = get_lwp_thread (child_lwp);
539 child_thr->last_resume_kind = resume_stop;
540 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
541
542 /* If we're suspending all threads, leave this one suspended
543 too. If the fork/clone parent is stepping over a breakpoint,
544 all other threads have been suspended already. Leave the
545 child suspended too. */
546 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
547 || event_lwp->bp_reinsert != 0)
548 {
549 if (debug_threads)
550 debug_printf ("HEW: leaving child suspended\n");
551 child_lwp->suspended = 1;
552 }
553
554 parent_proc = get_thread_process (event_thr);
555 child_proc->attached = parent_proc->attached;
556
557 if (event_lwp->bp_reinsert != 0
558 && supports_software_single_step ()
559 && event == PTRACE_EVENT_VFORK)
560 {
561 /* If we leave single-step breakpoints there, child will
562 hit it, so uninsert single-step breakpoints from parent
563 (and child). Once vfork child is done, reinsert
564 them back to parent. */
565 uninsert_single_step_breakpoints (event_thr);
566 }
567
568 clone_all_breakpoints (child_thr, event_thr);
569
570 tdesc = allocate_target_description ();
571 copy_target_description (tdesc, parent_proc->tdesc);
572 child_proc->tdesc = tdesc;
573
574 /* Clone arch-specific process data. */
575 if (the_low_target.new_fork != NULL)
576 the_low_target.new_fork (parent_proc, child_proc);
577
578 /* Save fork info in the parent thread. */
579 if (event == PTRACE_EVENT_FORK)
580 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
581 else if (event == PTRACE_EVENT_VFORK)
582 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
583
584 event_lwp->waitstatus.value.related_pid = ptid;
585
586 /* The status_pending field contains bits denoting the
587 extended event, so when the pending event is handled,
588 the handler will look at lwp->waitstatus. */
589 event_lwp->status_pending_p = 1;
590 event_lwp->status_pending = wstat;
591
592 /* Link the threads until the parent event is passed on to
593 higher layers. */
594 event_lwp->fork_relative = child_lwp;
595 child_lwp->fork_relative = event_lwp;
596
597 /* If the parent thread is doing step-over with single-step
598 breakpoints, the list of single-step breakpoints are cloned
599 from the parent's. Remove them from the child process.
600 In case of vfork, we'll reinsert them back once vforked
601 child is done. */
602 if (event_lwp->bp_reinsert != 0
603 && supports_software_single_step ())
604 {
605 /* The child process is forked and stopped, so it is safe
606 to access its memory without stopping all other threads
607 from other processes. */
608 delete_single_step_breakpoints (child_thr);
609
610 gdb_assert (has_single_step_breakpoints (event_thr));
611 gdb_assert (!has_single_step_breakpoints (child_thr));
612 }
613
614 /* Report the event. */
615 return 0;
616 }
617
618 if (debug_threads)
619 debug_printf ("HEW: Got clone event "
620 "from LWP %ld, new child is LWP %ld\n",
621 lwpid_of (event_thr), new_pid);
622
623 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
624 new_lwp = add_lwp (ptid);
625
626 /* Either we're going to immediately resume the new thread
627 or leave it stopped. resume_one_lwp is a nop if it
628 thinks the thread is currently running, so set this first
629 before calling resume_one_lwp. */
630 new_lwp->stopped = 1;
631
632 /* If we're suspending all threads, leave this one suspended
633 too. If the fork/clone parent is stepping over a breakpoint,
634 all other threads have been suspended already. Leave the
635 child suspended too. */
636 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
637 || event_lwp->bp_reinsert != 0)
638 new_lwp->suspended = 1;
639
640 /* Normally we will get the pending SIGSTOP. But in some cases
641 we might get another signal delivered to the group first.
642 If we do get another signal, be sure not to lose it. */
643 if (WSTOPSIG (status) != SIGSTOP)
644 {
645 new_lwp->stop_expected = 1;
646 new_lwp->status_pending_p = 1;
647 new_lwp->status_pending = status;
648 }
649 else if (cs.report_thread_events)
650 {
651 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
652 new_lwp->status_pending_p = 1;
653 new_lwp->status_pending = status;
654 }
655
656 #ifdef USE_THREAD_DB
657 thread_db_notice_clone (event_thr, ptid);
658 #endif
659
660 /* Don't report the event. */
661 return 1;
662 }
663 else if (event == PTRACE_EVENT_VFORK_DONE)
664 {
665 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
666
667 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
668 {
669 reinsert_single_step_breakpoints (event_thr);
670
671 gdb_assert (has_single_step_breakpoints (event_thr));
672 }
673
674 /* Report the event. */
675 return 0;
676 }
677 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
678 {
679 struct process_info *proc;
680 std::vector<int> syscalls_to_catch;
681 ptid_t event_ptid;
682 pid_t event_pid;
683
684 if (debug_threads)
685 {
686 debug_printf ("HEW: Got exec event from LWP %ld\n",
687 lwpid_of (event_thr));
688 }
689
690 /* Get the event ptid. */
691 event_ptid = ptid_of (event_thr);
692 event_pid = event_ptid.pid ();
693
694 /* Save the syscall list from the execing process. */
695 proc = get_thread_process (event_thr);
696 syscalls_to_catch = std::move (proc->syscalls_to_catch);
697
698 /* Delete the execing process and all its threads. */
699 mourn (proc);
700 current_thread = NULL;
701
702 /* Create a new process/lwp/thread. */
703 proc = linux_add_process (event_pid, 0);
704 event_lwp = add_lwp (event_ptid);
705 event_thr = get_lwp_thread (event_lwp);
706 gdb_assert (current_thread == event_thr);
707 arch_setup_thread (event_thr);
708
709 /* Set the event status. */
710 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
711 event_lwp->waitstatus.value.execd_pathname
712 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
713
714 /* Mark the exec status as pending. */
715 event_lwp->stopped = 1;
716 event_lwp->status_pending_p = 1;
717 event_lwp->status_pending = wstat;
718 event_thr->last_resume_kind = resume_continue;
719 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
720
721 /* Update syscall state in the new lwp, effectively mid-syscall too. */
722 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
723
724 /* Restore the list to catch. Don't rely on the client, which is free
725 to avoid sending a new list when the architecture doesn't change.
726 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
727 proc->syscalls_to_catch = std::move (syscalls_to_catch);
728
729 /* Report the event. */
730 *orig_event_lwp = event_lwp;
731 return 0;
732 }
733
734 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
735 }
736
737 CORE_ADDR
738 linux_process_target::get_pc (lwp_info *lwp)
739 {
740 struct thread_info *saved_thread;
741 struct regcache *regcache;
742 CORE_ADDR pc;
743
744 if (!low_supports_breakpoints ())
745 return 0;
746
747 saved_thread = current_thread;
748 current_thread = get_lwp_thread (lwp);
749
750 regcache = get_thread_regcache (current_thread, 1);
751 pc = low_get_pc (regcache);
752
753 if (debug_threads)
754 debug_printf ("pc is 0x%lx\n", (long) pc);
755
756 current_thread = saved_thread;
757 return pc;
758 }
759
760 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
761 Fill *SYSNO with the syscall nr trapped. */
762
763 static void
764 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
765 {
766 struct thread_info *saved_thread;
767 struct regcache *regcache;
768
769 if (the_low_target.get_syscall_trapinfo == NULL)
770 {
771 /* If we cannot get the syscall trapinfo, report an unknown
772 system call number. */
773 *sysno = UNKNOWN_SYSCALL;
774 return;
775 }
776
777 saved_thread = current_thread;
778 current_thread = get_lwp_thread (lwp);
779
780 regcache = get_thread_regcache (current_thread, 1);
781 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
782
783 if (debug_threads)
784 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
785
786 current_thread = saved_thread;
787 }
788
789 static int check_stopped_by_watchpoint (struct lwp_info *child);
790
791 bool
792 linux_process_target::save_stop_reason (lwp_info *lwp)
793 {
794 CORE_ADDR pc;
795 CORE_ADDR sw_breakpoint_pc;
796 struct thread_info *saved_thread;
797 #if USE_SIGTRAP_SIGINFO
798 siginfo_t siginfo;
799 #endif
800
801 if (!low_supports_breakpoints ())
802 return false;
803
804 pc = get_pc (lwp);
805 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
806
807 /* breakpoint_at reads from the current thread. */
808 saved_thread = current_thread;
809 current_thread = get_lwp_thread (lwp);
810
811 #if USE_SIGTRAP_SIGINFO
812 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
813 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
814 {
815 if (siginfo.si_signo == SIGTRAP)
816 {
817 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
818 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
819 {
820 /* The si_code is ambiguous on this arch -- check debug
821 registers. */
822 if (!check_stopped_by_watchpoint (lwp))
823 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
824 }
825 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
826 {
827 /* If we determine the LWP stopped for a SW breakpoint,
828 trust it. Particularly don't check watchpoint
829 registers, because at least on s390, we'd find
830 stopped-by-watchpoint as long as there's a watchpoint
831 set. */
832 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
833 }
834 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
835 {
836 /* This can indicate either a hardware breakpoint or
837 hardware watchpoint. Check debug registers. */
838 if (!check_stopped_by_watchpoint (lwp))
839 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
840 }
841 else if (siginfo.si_code == TRAP_TRACE)
842 {
843 /* We may have single stepped an instruction that
844 triggered a watchpoint. In that case, on some
845 architectures (such as x86), instead of TRAP_HWBKPT,
846 si_code indicates TRAP_TRACE, and we need to check
847 the debug registers separately. */
848 if (!check_stopped_by_watchpoint (lwp))
849 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
850 }
851 }
852 }
853 #else
854 /* We may have just stepped a breakpoint instruction. E.g., in
855 non-stop mode, GDB first tells the thread A to step a range, and
856 then the user inserts a breakpoint inside the range. In that
857 case we need to report the breakpoint PC. */
858 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
859 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
860 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
861
862 if (hardware_breakpoint_inserted_here (pc))
863 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
864
865 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
866 check_stopped_by_watchpoint (lwp);
867 #endif
868
869 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
870 {
871 if (debug_threads)
872 {
873 struct thread_info *thr = get_lwp_thread (lwp);
874
875 debug_printf ("CSBB: %s stopped by software breakpoint\n",
876 target_pid_to_str (ptid_of (thr)));
877 }
878
879 /* Back up the PC if necessary. */
880 if (pc != sw_breakpoint_pc)
881 {
882 struct regcache *regcache
883 = get_thread_regcache (current_thread, 1);
884 low_set_pc (regcache, sw_breakpoint_pc);
885 }
886
887 /* Update this so we record the correct stop PC below. */
888 pc = sw_breakpoint_pc;
889 }
890 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
891 {
892 if (debug_threads)
893 {
894 struct thread_info *thr = get_lwp_thread (lwp);
895
896 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
897 target_pid_to_str (ptid_of (thr)));
898 }
899 }
900 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
901 {
902 if (debug_threads)
903 {
904 struct thread_info *thr = get_lwp_thread (lwp);
905
906 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
907 target_pid_to_str (ptid_of (thr)));
908 }
909 }
910 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
911 {
912 if (debug_threads)
913 {
914 struct thread_info *thr = get_lwp_thread (lwp);
915
916 debug_printf ("CSBB: %s stopped by trace\n",
917 target_pid_to_str (ptid_of (thr)));
918 }
919 }
920
921 lwp->stop_pc = pc;
922 current_thread = saved_thread;
923 return true;
924 }
925
926 static struct lwp_info *
927 add_lwp (ptid_t ptid)
928 {
929 struct lwp_info *lwp;
930
931 lwp = XCNEW (struct lwp_info);
932
933 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
934
935 lwp->thread = add_thread (ptid, lwp);
936
937 if (the_low_target.new_thread != NULL)
938 the_low_target.new_thread (lwp);
939
940 return lwp;
941 }
942
943 /* Callback to be used when calling fork_inferior, responsible for
944 actually initiating the tracing of the inferior. */
945
946 static void
947 linux_ptrace_fun ()
948 {
949 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
950 (PTRACE_TYPE_ARG4) 0) < 0)
951 trace_start_error_with_name ("ptrace");
952
953 if (setpgid (0, 0) < 0)
954 trace_start_error_with_name ("setpgid");
955
956 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
957 stdout to stderr so that inferior i/o doesn't corrupt the connection.
958 Also, redirect stdin to /dev/null. */
959 if (remote_connection_is_stdio ())
960 {
961 if (close (0) < 0)
962 trace_start_error_with_name ("close");
963 if (open ("/dev/null", O_RDONLY) < 0)
964 trace_start_error_with_name ("open");
965 if (dup2 (2, 1) < 0)
966 trace_start_error_with_name ("dup2");
967 if (write (2, "stdin/stdout redirected\n",
968 sizeof ("stdin/stdout redirected\n") - 1) < 0)
969 {
970 /* Errors ignored. */;
971 }
972 }
973 }
974
975 /* Start an inferior process and returns its pid.
976 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
977 are its arguments. */
978
979 int
980 linux_process_target::create_inferior (const char *program,
981 const std::vector<char *> &program_args)
982 {
983 client_state &cs = get_client_state ();
984 struct lwp_info *new_lwp;
985 int pid;
986 ptid_t ptid;
987
988 {
989 maybe_disable_address_space_randomization restore_personality
990 (cs.disable_randomization);
991 std::string str_program_args = stringify_argv (program_args);
992
993 pid = fork_inferior (program,
994 str_program_args.c_str (),
995 get_environ ()->envp (), linux_ptrace_fun,
996 NULL, NULL, NULL, NULL);
997 }
998
999 linux_add_process (pid, 0);
1000
1001 ptid = ptid_t (pid, pid, 0);
1002 new_lwp = add_lwp (ptid);
1003 new_lwp->must_set_ptrace_flags = 1;
1004
1005 post_fork_inferior (pid, program);
1006
1007 return pid;
1008 }
1009
1010 /* Implement the post_create_inferior target_ops method. */
1011
1012 void
1013 linux_process_target::post_create_inferior ()
1014 {
1015 struct lwp_info *lwp = get_thread_lwp (current_thread);
1016
1017 low_arch_setup ();
1018
1019 if (lwp->must_set_ptrace_flags)
1020 {
1021 struct process_info *proc = current_process ();
1022 int options = linux_low_ptrace_options (proc->attached);
1023
1024 linux_enable_event_reporting (lwpid_of (current_thread), options);
1025 lwp->must_set_ptrace_flags = 0;
1026 }
1027 }
1028
1029 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1030 error. */
1031
1032 int
1033 linux_attach_lwp (ptid_t ptid)
1034 {
1035 struct lwp_info *new_lwp;
1036 int lwpid = ptid.lwp ();
1037
1038 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1039 != 0)
1040 return errno;
1041
1042 new_lwp = add_lwp (ptid);
1043
1044 /* We need to wait for SIGSTOP before being able to make the next
1045 ptrace call on this LWP. */
1046 new_lwp->must_set_ptrace_flags = 1;
1047
1048 if (linux_proc_pid_is_stopped (lwpid))
1049 {
1050 if (debug_threads)
1051 debug_printf ("Attached to a stopped process\n");
1052
1053 /* The process is definitely stopped. It is in a job control
1054 stop, unless the kernel predates the TASK_STOPPED /
1055 TASK_TRACED distinction, in which case it might be in a
1056 ptrace stop. Make sure it is in a ptrace stop; from there we
1057 can kill it, signal it, et cetera.
1058
1059 First make sure there is a pending SIGSTOP. Since we are
1060 already attached, the process can not transition from stopped
1061 to running without a PTRACE_CONT; so we know this signal will
1062 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1063 probably already in the queue (unless this kernel is old
1064 enough to use TASK_STOPPED for ptrace stops); but since
1065 SIGSTOP is not an RT signal, it can only be queued once. */
1066 kill_lwp (lwpid, SIGSTOP);
1067
1068 /* Finally, resume the stopped process. This will deliver the
1069 SIGSTOP (or a higher priority signal, just like normal
1070 PTRACE_ATTACH), which we'll catch later on. */
1071 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1072 }
1073
1074 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1075 brings it to a halt.
1076
1077 There are several cases to consider here:
1078
1079 1) gdbserver has already attached to the process and is being notified
1080 of a new thread that is being created.
1081 In this case we should ignore that SIGSTOP and resume the
1082 process. This is handled below by setting stop_expected = 1,
1083 and the fact that add_thread sets last_resume_kind ==
1084 resume_continue.
1085
1086 2) This is the first thread (the process thread), and we're attaching
1087 to it via attach_inferior.
1088 In this case we want the process thread to stop.
1089 This is handled by having linux_attach set last_resume_kind ==
1090 resume_stop after we return.
1091
1092 If the pid we are attaching to is also the tgid, we attach to and
1093 stop all the existing threads. Otherwise, we attach to pid and
1094 ignore any other threads in the same group as this pid.
1095
1096 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1097 existing threads.
1098 In this case we want the thread to stop.
1099 FIXME: This case is currently not properly handled.
1100 We should wait for the SIGSTOP but don't. Things work apparently
1101 because enough time passes between when we ptrace (ATTACH) and when
1102 gdb makes the next ptrace call on the thread.
1103
1104 On the other hand, if we are currently trying to stop all threads, we
1105 should treat the new thread as if we had sent it a SIGSTOP. This works
1106 because we are guaranteed that the add_lwp call above added us to the
1107 end of the list, and so the new thread has not yet reached
1108 wait_for_sigstop (but will). */
1109 new_lwp->stop_expected = 1;
1110
1111 return 0;
1112 }
1113
1114 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1115 already attached. Returns true if a new LWP is found, false
1116 otherwise. */
1117
1118 static int
1119 attach_proc_task_lwp_callback (ptid_t ptid)
1120 {
1121 /* Is this a new thread? */
1122 if (find_thread_ptid (ptid) == NULL)
1123 {
1124 int lwpid = ptid.lwp ();
1125 int err;
1126
1127 if (debug_threads)
1128 debug_printf ("Found new lwp %d\n", lwpid);
1129
1130 err = linux_attach_lwp (ptid);
1131
1132 /* Be quiet if we simply raced with the thread exiting. EPERM
1133 is returned if the thread's task still exists, and is marked
1134 as exited or zombie, as well as other conditions, so in that
1135 case, confirm the status in /proc/PID/status. */
1136 if (err == ESRCH
1137 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1138 {
1139 if (debug_threads)
1140 {
1141 debug_printf ("Cannot attach to lwp %d: "
1142 "thread is gone (%d: %s)\n",
1143 lwpid, err, safe_strerror (err));
1144 }
1145 }
1146 else if (err != 0)
1147 {
1148 std::string reason
1149 = linux_ptrace_attach_fail_reason_string (ptid, err);
1150
1151 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1152 }
1153
1154 return 1;
1155 }
1156 return 0;
1157 }
1158
1159 static void async_file_mark (void);
1160
1161 /* Attach to PID. If PID is the tgid, attach to it and all
1162 of its threads. */
1163
1164 int
1165 linux_process_target::attach (unsigned long pid)
1166 {
1167 struct process_info *proc;
1168 struct thread_info *initial_thread;
1169 ptid_t ptid = ptid_t (pid, pid, 0);
1170 int err;
1171
1172 proc = linux_add_process (pid, 1);
1173
1174 /* Attach to PID. We will check for other threads
1175 soon. */
1176 err = linux_attach_lwp (ptid);
1177 if (err != 0)
1178 {
1179 remove_process (proc);
1180
1181 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1182 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1183 }
1184
1185 /* Don't ignore the initial SIGSTOP if we just attached to this
1186 process. It will be collected by wait shortly. */
1187 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
1188 initial_thread->last_resume_kind = resume_stop;
1189
1190 /* We must attach to every LWP. If /proc is mounted, use that to
1191 find them now. On the one hand, the inferior may be using raw
1192 clone instead of using pthreads. On the other hand, even if it
1193 is using pthreads, GDB may not be connected yet (thread_db needs
1194 to do symbol lookups, through qSymbol). Also, thread_db walks
1195 structures in the inferior's address space to find the list of
1196 threads/LWPs, and those structures may well be corrupted. Note
1197 that once thread_db is loaded, we'll still use it to list threads
1198 and associate pthread info with each LWP. */
1199 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1200
1201 /* GDB will shortly read the xml target description for this
1202 process, to figure out the process' architecture. But the target
1203 description is only filled in when the first process/thread in
1204 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1205 that now, otherwise, if GDB is fast enough, it could read the
1206 target description _before_ that initial stop. */
1207 if (non_stop)
1208 {
1209 struct lwp_info *lwp;
1210 int wstat, lwpid;
1211 ptid_t pid_ptid = ptid_t (pid);
1212
1213 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1214 gdb_assert (lwpid > 0);
1215
1216 lwp = find_lwp_pid (ptid_t (lwpid));
1217
1218 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1219 {
1220 lwp->status_pending_p = 1;
1221 lwp->status_pending = wstat;
1222 }
1223
1224 initial_thread->last_resume_kind = resume_continue;
1225
1226 async_file_mark ();
1227
1228 gdb_assert (proc->tdesc != NULL);
1229 }
1230
1231 return 0;
1232 }
1233
1234 static int
1235 last_thread_of_process_p (int pid)
1236 {
1237 bool seen_one = false;
1238
1239 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1240 {
1241 if (!seen_one)
1242 {
1243 /* This is the first thread of this process we see. */
1244 seen_one = true;
1245 return false;
1246 }
1247 else
1248 {
1249 /* This is the second thread of this process we see. */
1250 return true;
1251 }
1252 });
1253
1254 return thread == NULL;
1255 }
1256
1257 /* Kill LWP. */
1258
1259 static void
1260 linux_kill_one_lwp (struct lwp_info *lwp)
1261 {
1262 struct thread_info *thr = get_lwp_thread (lwp);
1263 int pid = lwpid_of (thr);
1264
1265 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1266 there is no signal context, and ptrace(PTRACE_KILL) (or
1267 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1268 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1269 alternative is to kill with SIGKILL. We only need one SIGKILL
1270 per process, not one for each thread. But since we still support
1271 support debugging programs using raw clone without CLONE_THREAD,
1272 we send one for each thread. For years, we used PTRACE_KILL
1273 only, so we're being a bit paranoid about some old kernels where
1274 PTRACE_KILL might work better (dubious if there are any such, but
1275 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1276 second, and so we're fine everywhere. */
1277
1278 errno = 0;
1279 kill_lwp (pid, SIGKILL);
1280 if (debug_threads)
1281 {
1282 int save_errno = errno;
1283
1284 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1285 target_pid_to_str (ptid_of (thr)),
1286 save_errno ? safe_strerror (save_errno) : "OK");
1287 }
1288
1289 errno = 0;
1290 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1291 if (debug_threads)
1292 {
1293 int save_errno = errno;
1294
1295 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1296 target_pid_to_str (ptid_of (thr)),
1297 save_errno ? safe_strerror (save_errno) : "OK");
1298 }
1299 }
1300
1301 /* Kill LWP and wait for it to die. */
1302
1303 static void
1304 kill_wait_lwp (struct lwp_info *lwp)
1305 {
1306 struct thread_info *thr = get_lwp_thread (lwp);
1307 int pid = ptid_of (thr).pid ();
1308 int lwpid = ptid_of (thr).lwp ();
1309 int wstat;
1310 int res;
1311
1312 if (debug_threads)
1313 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1314
1315 do
1316 {
1317 linux_kill_one_lwp (lwp);
1318
1319 /* Make sure it died. Notes:
1320
1321 - The loop is most likely unnecessary.
1322
1323 - We don't use wait_for_event as that could delete lwps
1324 while we're iterating over them. We're not interested in
1325 any pending status at this point, only in making sure all
1326 wait status on the kernel side are collected until the
1327 process is reaped.
1328
1329 - We don't use __WALL here as the __WALL emulation relies on
1330 SIGCHLD, and killing a stopped process doesn't generate
1331 one, nor an exit status.
1332 */
1333 res = my_waitpid (lwpid, &wstat, 0);
1334 if (res == -1 && errno == ECHILD)
1335 res = my_waitpid (lwpid, &wstat, __WCLONE);
1336 } while (res > 0 && WIFSTOPPED (wstat));
1337
1338 /* Even if it was stopped, the child may have already disappeared.
1339 E.g., if it was killed by SIGKILL. */
1340 if (res < 0 && errno != ECHILD)
1341 perror_with_name ("kill_wait_lwp");
1342 }
1343
1344 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1345 except the leader. */
1346
1347 static void
1348 kill_one_lwp_callback (thread_info *thread, int pid)
1349 {
1350 struct lwp_info *lwp = get_thread_lwp (thread);
1351
1352 /* We avoid killing the first thread here, because of a Linux kernel (at
1353 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1354 the children get a chance to be reaped, it will remain a zombie
1355 forever. */
1356
1357 if (lwpid_of (thread) == pid)
1358 {
1359 if (debug_threads)
1360 debug_printf ("lkop: is last of process %s\n",
1361 target_pid_to_str (thread->id));
1362 return;
1363 }
1364
1365 kill_wait_lwp (lwp);
1366 }
1367
1368 int
1369 linux_process_target::kill (process_info *process)
1370 {
1371 int pid = process->pid;
1372
1373 /* If we're killing a running inferior, make sure it is stopped
1374 first, as PTRACE_KILL will not work otherwise. */
1375 stop_all_lwps (0, NULL);
1376
1377 for_each_thread (pid, [&] (thread_info *thread)
1378 {
1379 kill_one_lwp_callback (thread, pid);
1380 });
1381
1382 /* See the comment in linux_kill_one_lwp. We did not kill the first
1383 thread in the list, so do so now. */
1384 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1385
1386 if (lwp == NULL)
1387 {
1388 if (debug_threads)
1389 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1390 pid);
1391 }
1392 else
1393 kill_wait_lwp (lwp);
1394
1395 mourn (process);
1396
1397 /* Since we presently can only stop all lwps of all processes, we
1398 need to unstop lwps of other processes. */
1399 unstop_all_lwps (0, NULL);
1400 return 0;
1401 }
1402
1403 /* Get pending signal of THREAD, for detaching purposes. This is the
1404 signal the thread last stopped for, which we need to deliver to the
1405 thread when detaching, otherwise, it'd be suppressed/lost. */
1406
1407 static int
1408 get_detach_signal (struct thread_info *thread)
1409 {
1410 client_state &cs = get_client_state ();
1411 enum gdb_signal signo = GDB_SIGNAL_0;
1412 int status;
1413 struct lwp_info *lp = get_thread_lwp (thread);
1414
1415 if (lp->status_pending_p)
1416 status = lp->status_pending;
1417 else
1418 {
1419 /* If the thread had been suspended by gdbserver, and it stopped
1420 cleanly, then it'll have stopped with SIGSTOP. But we don't
1421 want to deliver that SIGSTOP. */
1422 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1423 || thread->last_status.value.sig == GDB_SIGNAL_0)
1424 return 0;
1425
1426 /* Otherwise, we may need to deliver the signal we
1427 intercepted. */
1428 status = lp->last_status;
1429 }
1430
1431 if (!WIFSTOPPED (status))
1432 {
1433 if (debug_threads)
1434 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1435 target_pid_to_str (ptid_of (thread)));
1436 return 0;
1437 }
1438
1439 /* Extended wait statuses aren't real SIGTRAPs. */
1440 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1441 {
1442 if (debug_threads)
1443 debug_printf ("GPS: lwp %s had stopped with extended "
1444 "status: no pending signal\n",
1445 target_pid_to_str (ptid_of (thread)));
1446 return 0;
1447 }
1448
1449 signo = gdb_signal_from_host (WSTOPSIG (status));
1450
1451 if (cs.program_signals_p && !cs.program_signals[signo])
1452 {
1453 if (debug_threads)
1454 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1455 target_pid_to_str (ptid_of (thread)),
1456 gdb_signal_to_string (signo));
1457 return 0;
1458 }
1459 else if (!cs.program_signals_p
1460 /* If we have no way to know which signals GDB does not
1461 want to have passed to the program, assume
1462 SIGTRAP/SIGINT, which is GDB's default. */
1463 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1464 {
1465 if (debug_threads)
1466 debug_printf ("GPS: lwp %s had signal %s, "
1467 "but we don't know if we should pass it. "
1468 "Default to not.\n",
1469 target_pid_to_str (ptid_of (thread)),
1470 gdb_signal_to_string (signo));
1471 return 0;
1472 }
1473 else
1474 {
1475 if (debug_threads)
1476 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1477 target_pid_to_str (ptid_of (thread)),
1478 gdb_signal_to_string (signo));
1479
1480 return WSTOPSIG (status);
1481 }
1482 }
1483
1484 /* Detach from LWP. */
1485
1486 static void
1487 linux_detach_one_lwp (struct lwp_info *lwp)
1488 {
1489 struct thread_info *thread = get_lwp_thread (lwp);
1490 int sig;
1491 int lwpid;
1492
1493 /* If there is a pending SIGSTOP, get rid of it. */
1494 if (lwp->stop_expected)
1495 {
1496 if (debug_threads)
1497 debug_printf ("Sending SIGCONT to %s\n",
1498 target_pid_to_str (ptid_of (thread)));
1499
1500 kill_lwp (lwpid_of (thread), SIGCONT);
1501 lwp->stop_expected = 0;
1502 }
1503
1504 /* Pass on any pending signal for this thread. */
1505 sig = get_detach_signal (thread);
1506
1507 /* Preparing to resume may try to write registers, and fail if the
1508 lwp is zombie. If that happens, ignore the error. We'll handle
1509 it below, when detach fails with ESRCH. */
1510 try
1511 {
1512 /* Flush any pending changes to the process's registers. */
1513 regcache_invalidate_thread (thread);
1514
1515 /* Finally, let it resume. */
1516 if (the_low_target.prepare_to_resume != NULL)
1517 the_low_target.prepare_to_resume (lwp);
1518 }
1519 catch (const gdb_exception_error &ex)
1520 {
1521 if (!check_ptrace_stopped_lwp_gone (lwp))
1522 throw;
1523 }
1524
1525 lwpid = lwpid_of (thread);
1526 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1527 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1528 {
1529 int save_errno = errno;
1530
1531 /* We know the thread exists, so ESRCH must mean the lwp is
1532 zombie. This can happen if one of the already-detached
1533 threads exits the whole thread group. In that case we're
1534 still attached, and must reap the lwp. */
1535 if (save_errno == ESRCH)
1536 {
1537 int ret, status;
1538
1539 ret = my_waitpid (lwpid, &status, __WALL);
1540 if (ret == -1)
1541 {
1542 warning (_("Couldn't reap LWP %d while detaching: %s"),
1543 lwpid, safe_strerror (errno));
1544 }
1545 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1546 {
1547 warning (_("Reaping LWP %d while detaching "
1548 "returned unexpected status 0x%x"),
1549 lwpid, status);
1550 }
1551 }
1552 else
1553 {
1554 error (_("Can't detach %s: %s"),
1555 target_pid_to_str (ptid_of (thread)),
1556 safe_strerror (save_errno));
1557 }
1558 }
1559 else if (debug_threads)
1560 {
1561 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1562 target_pid_to_str (ptid_of (thread)),
1563 strsignal (sig));
1564 }
1565
1566 delete_lwp (lwp);
1567 }
1568
1569 /* Callback for for_each_thread. Detaches from non-leader threads of a
1570 given process. */
1571
1572 static void
1573 linux_detach_lwp_callback (thread_info *thread)
1574 {
1575 /* We don't actually detach from the thread group leader just yet.
1576 If the thread group exits, we must reap the zombie clone lwps
1577 before we're able to reap the leader. */
1578 if (thread->id.pid () == thread->id.lwp ())
1579 return;
1580
1581 lwp_info *lwp = get_thread_lwp (thread);
1582 linux_detach_one_lwp (lwp);
1583 }
1584
1585 int
1586 linux_process_target::detach (process_info *process)
1587 {
1588 struct lwp_info *main_lwp;
1589
1590 /* As there's a step over already in progress, let it finish first,
1591 otherwise nesting a stabilize_threads operation on top gets real
1592 messy. */
1593 complete_ongoing_step_over ();
1594
1595 /* Stop all threads before detaching. First, ptrace requires that
1596 the thread is stopped to successfully detach. Second, thread_db
1597 may need to uninstall thread event breakpoints from memory, which
1598 only works with a stopped process anyway. */
1599 stop_all_lwps (0, NULL);
1600
1601 #ifdef USE_THREAD_DB
1602 thread_db_detach (process);
1603 #endif
1604
1605 /* Stabilize threads (move out of jump pads). */
1606 target_stabilize_threads ();
1607
1608 /* Detach from the clone lwps first. If the thread group exits just
1609 while we're detaching, we must reap the clone lwps before we're
1610 able to reap the leader. */
1611 for_each_thread (process->pid, linux_detach_lwp_callback);
1612
1613 main_lwp = find_lwp_pid (ptid_t (process->pid));
1614 linux_detach_one_lwp (main_lwp);
1615
1616 mourn (process);
1617
1618 /* Since we presently can only stop all lwps of all processes, we
1619 need to unstop lwps of other processes. */
1620 unstop_all_lwps (0, NULL);
1621 return 0;
1622 }
1623
1624 /* Remove all LWPs that belong to process PROC from the lwp list. */
1625
1626 void
1627 linux_process_target::mourn (process_info *process)
1628 {
1629 struct process_info_private *priv;
1630
1631 #ifdef USE_THREAD_DB
1632 thread_db_mourn (process);
1633 #endif
1634
1635 for_each_thread (process->pid, [] (thread_info *thread)
1636 {
1637 delete_lwp (get_thread_lwp (thread));
1638 });
1639
1640 /* Freeing all private data. */
1641 priv = process->priv;
1642 if (the_low_target.delete_process != NULL)
1643 the_low_target.delete_process (priv->arch_private);
1644 else
1645 gdb_assert (priv->arch_private == NULL);
1646 free (priv);
1647 process->priv = NULL;
1648
1649 remove_process (process);
1650 }
1651
1652 void
1653 linux_process_target::join (int pid)
1654 {
1655 int status, ret;
1656
1657 do {
1658 ret = my_waitpid (pid, &status, 0);
1659 if (WIFEXITED (status) || WIFSIGNALED (status))
1660 break;
1661 } while (ret != -1 || errno != ECHILD);
1662 }
1663
1664 /* Return true if the given thread is still alive. */
1665
1666 bool
1667 linux_process_target::thread_alive (ptid_t ptid)
1668 {
1669 struct lwp_info *lwp = find_lwp_pid (ptid);
1670
1671 /* We assume we always know if a thread exits. If a whole process
1672 exited but we still haven't been able to report it to GDB, we'll
1673 hold on to the last lwp of the dead process. */
1674 if (lwp != NULL)
1675 return !lwp_is_marked_dead (lwp);
1676 else
1677 return 0;
1678 }
1679
1680 bool
1681 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1682 {
1683 struct lwp_info *lp = get_thread_lwp (thread);
1684
1685 if (!lp->status_pending_p)
1686 return 0;
1687
1688 if (thread->last_resume_kind != resume_stop
1689 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1690 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1691 {
1692 struct thread_info *saved_thread;
1693 CORE_ADDR pc;
1694 int discard = 0;
1695
1696 gdb_assert (lp->last_status != 0);
1697
1698 pc = get_pc (lp);
1699
1700 saved_thread = current_thread;
1701 current_thread = thread;
1702
1703 if (pc != lp->stop_pc)
1704 {
1705 if (debug_threads)
1706 debug_printf ("PC of %ld changed\n",
1707 lwpid_of (thread));
1708 discard = 1;
1709 }
1710
1711 #if !USE_SIGTRAP_SIGINFO
1712 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1713 && !(*the_low_target.breakpoint_at) (pc))
1714 {
1715 if (debug_threads)
1716 debug_printf ("previous SW breakpoint of %ld gone\n",
1717 lwpid_of (thread));
1718 discard = 1;
1719 }
1720 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1721 && !hardware_breakpoint_inserted_here (pc))
1722 {
1723 if (debug_threads)
1724 debug_printf ("previous HW breakpoint of %ld gone\n",
1725 lwpid_of (thread));
1726 discard = 1;
1727 }
1728 #endif
1729
1730 current_thread = saved_thread;
1731
1732 if (discard)
1733 {
1734 if (debug_threads)
1735 debug_printf ("discarding pending breakpoint status\n");
1736 lp->status_pending_p = 0;
1737 return 0;
1738 }
1739 }
1740
1741 return 1;
1742 }
1743
1744 /* Returns true if LWP is resumed from the client's perspective. */
1745
1746 static int
1747 lwp_resumed (struct lwp_info *lwp)
1748 {
1749 struct thread_info *thread = get_lwp_thread (lwp);
1750
1751 if (thread->last_resume_kind != resume_stop)
1752 return 1;
1753
1754 /* Did gdb send us a `vCont;t', but we haven't reported the
1755 corresponding stop to gdb yet? If so, the thread is still
1756 resumed/running from gdb's perspective. */
1757 if (thread->last_resume_kind == resume_stop
1758 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1759 return 1;
1760
1761 return 0;
1762 }
1763
1764 bool
1765 linux_process_target::status_pending_p_callback (thread_info *thread,
1766 ptid_t ptid)
1767 {
1768 struct lwp_info *lp = get_thread_lwp (thread);
1769
1770 /* Check if we're only interested in events from a specific process
1771 or a specific LWP. */
1772 if (!thread->id.matches (ptid))
1773 return 0;
1774
1775 if (!lwp_resumed (lp))
1776 return 0;
1777
1778 if (lp->status_pending_p
1779 && !thread_still_has_status_pending (thread))
1780 {
1781 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1782 return 0;
1783 }
1784
1785 return lp->status_pending_p;
1786 }
1787
1788 struct lwp_info *
1789 find_lwp_pid (ptid_t ptid)
1790 {
1791 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1792 {
1793 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1794 return thr_arg->id.lwp () == lwp;
1795 });
1796
1797 if (thread == NULL)
1798 return NULL;
1799
1800 return get_thread_lwp (thread);
1801 }
1802
1803 /* Return the number of known LWPs in the tgid given by PID. */
1804
1805 static int
1806 num_lwps (int pid)
1807 {
1808 int count = 0;
1809
1810 for_each_thread (pid, [&] (thread_info *thread)
1811 {
1812 count++;
1813 });
1814
1815 return count;
1816 }
1817
1818 /* See nat/linux-nat.h. */
1819
1820 struct lwp_info *
1821 iterate_over_lwps (ptid_t filter,
1822 gdb::function_view<iterate_over_lwps_ftype> callback)
1823 {
1824 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1825 {
1826 lwp_info *lwp = get_thread_lwp (thr_arg);
1827
1828 return callback (lwp);
1829 });
1830
1831 if (thread == NULL)
1832 return NULL;
1833
1834 return get_thread_lwp (thread);
1835 }
1836
1837 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1838 their exits until all other threads in the group have exited. */
1839
1840 static void
1841 check_zombie_leaders (void)
1842 {
1843 for_each_process ([] (process_info *proc) {
1844 pid_t leader_pid = pid_of (proc);
1845 struct lwp_info *leader_lp;
1846
1847 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1848
1849 if (debug_threads)
1850 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1851 "num_lwps=%d, zombie=%d\n",
1852 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1853 linux_proc_pid_is_zombie (leader_pid));
1854
1855 if (leader_lp != NULL && !leader_lp->stopped
1856 /* Check if there are other threads in the group, as we may
1857 have raced with the inferior simply exiting. */
1858 && !last_thread_of_process_p (leader_pid)
1859 && linux_proc_pid_is_zombie (leader_pid))
1860 {
1861 /* A leader zombie can mean one of two things:
1862
1863 - It exited, and there's an exit status pending
1864 available, or only the leader exited (not the whole
1865 program). In the latter case, we can't waitpid the
1866 leader's exit status until all other threads are gone.
1867
1868 - There are 3 or more threads in the group, and a thread
1869 other than the leader exec'd. On an exec, the Linux
1870 kernel destroys all other threads (except the execing
1871 one) in the thread group, and resets the execing thread's
1872 tid to the tgid. No exit notification is sent for the
1873 execing thread -- from the ptracer's perspective, it
1874 appears as though the execing thread just vanishes.
1875 Until we reap all other threads except the leader and the
1876 execing thread, the leader will be zombie, and the
1877 execing thread will be in `D (disc sleep)'. As soon as
1878 all other threads are reaped, the execing thread changes
1879 it's tid to the tgid, and the previous (zombie) leader
1880 vanishes, giving place to the "new" leader. We could try
1881 distinguishing the exit and exec cases, by waiting once
1882 more, and seeing if something comes out, but it doesn't
1883 sound useful. The previous leader _does_ go away, and
1884 we'll re-add the new one once we see the exec event
1885 (which is just the same as what would happen if the
1886 previous leader did exit voluntarily before some other
1887 thread execs). */
1888
1889 if (debug_threads)
1890 debug_printf ("CZL: Thread group leader %d zombie "
1891 "(it exited, or another thread execd).\n",
1892 leader_pid);
1893
1894 delete_lwp (leader_lp);
1895 }
1896 });
1897 }
1898
1899 /* Callback for `find_thread'. Returns the first LWP that is not
1900 stopped. */
1901
1902 static bool
1903 not_stopped_callback (thread_info *thread, ptid_t filter)
1904 {
1905 if (!thread->id.matches (filter))
1906 return false;
1907
1908 lwp_info *lwp = get_thread_lwp (thread);
1909
1910 return !lwp->stopped;
1911 }
1912
1913 /* Increment LWP's suspend count. */
1914
1915 static void
1916 lwp_suspended_inc (struct lwp_info *lwp)
1917 {
1918 lwp->suspended++;
1919
1920 if (debug_threads && lwp->suspended > 4)
1921 {
1922 struct thread_info *thread = get_lwp_thread (lwp);
1923
1924 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1925 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1926 }
1927 }
1928
1929 /* Decrement LWP's suspend count. */
1930
1931 static void
1932 lwp_suspended_decr (struct lwp_info *lwp)
1933 {
1934 lwp->suspended--;
1935
1936 if (lwp->suspended < 0)
1937 {
1938 struct thread_info *thread = get_lwp_thread (lwp);
1939
1940 internal_error (__FILE__, __LINE__,
1941 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1942 lwp->suspended);
1943 }
1944 }
1945
1946 /* This function should only be called if the LWP got a SIGTRAP.
1947
1948 Handle any tracepoint steps or hits. Return true if a tracepoint
1949 event was handled, 0 otherwise. */
1950
1951 static int
1952 handle_tracepoints (struct lwp_info *lwp)
1953 {
1954 struct thread_info *tinfo = get_lwp_thread (lwp);
1955 int tpoint_related_event = 0;
1956
1957 gdb_assert (lwp->suspended == 0);
1958
1959 /* If this tracepoint hit causes a tracing stop, we'll immediately
1960 uninsert tracepoints. To do this, we temporarily pause all
1961 threads, unpatch away, and then unpause threads. We need to make
1962 sure the unpausing doesn't resume LWP too. */
1963 lwp_suspended_inc (lwp);
1964
1965 /* And we need to be sure that any all-threads-stopping doesn't try
1966 to move threads out of the jump pads, as it could deadlock the
1967 inferior (LWP could be in the jump pad, maybe even holding the
1968 lock.) */
1969
1970 /* Do any necessary step collect actions. */
1971 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1972
1973 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1974
1975 /* See if we just hit a tracepoint and do its main collect
1976 actions. */
1977 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1978
1979 lwp_suspended_decr (lwp);
1980
1981 gdb_assert (lwp->suspended == 0);
1982 gdb_assert (!stabilizing_threads
1983 || (lwp->collecting_fast_tracepoint
1984 != fast_tpoint_collect_result::not_collecting));
1985
1986 if (tpoint_related_event)
1987 {
1988 if (debug_threads)
1989 debug_printf ("got a tracepoint event\n");
1990 return 1;
1991 }
1992
1993 return 0;
1994 }
1995
1996 /* Convenience wrapper. Returns information about LWP's fast tracepoint
1997 collection status. */
1998
1999 static fast_tpoint_collect_result
2000 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2001 struct fast_tpoint_collect_status *status)
2002 {
2003 CORE_ADDR thread_area;
2004 struct thread_info *thread = get_lwp_thread (lwp);
2005
2006 if (the_low_target.get_thread_area == NULL)
2007 return fast_tpoint_collect_result::not_collecting;
2008
2009 /* Get the thread area address. This is used to recognize which
2010 thread is which when tracing with the in-process agent library.
2011 We don't read anything from the address, and treat it as opaque;
2012 it's the address itself that we assume is unique per-thread. */
2013 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2014 return fast_tpoint_collect_result::not_collecting;
2015
2016 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2017 }
2018
2019 bool
2020 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
2021 {
2022 struct thread_info *saved_thread;
2023
2024 saved_thread = current_thread;
2025 current_thread = get_lwp_thread (lwp);
2026
2027 if ((wstat == NULL
2028 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2029 && supports_fast_tracepoints ()
2030 && agent_loaded_p ())
2031 {
2032 struct fast_tpoint_collect_status status;
2033
2034 if (debug_threads)
2035 debug_printf ("Checking whether LWP %ld needs to move out of the "
2036 "jump pad.\n",
2037 lwpid_of (current_thread));
2038
2039 fast_tpoint_collect_result r
2040 = linux_fast_tracepoint_collecting (lwp, &status);
2041
2042 if (wstat == NULL
2043 || (WSTOPSIG (*wstat) != SIGILL
2044 && WSTOPSIG (*wstat) != SIGFPE
2045 && WSTOPSIG (*wstat) != SIGSEGV
2046 && WSTOPSIG (*wstat) != SIGBUS))
2047 {
2048 lwp->collecting_fast_tracepoint = r;
2049
2050 if (r != fast_tpoint_collect_result::not_collecting)
2051 {
2052 if (r == fast_tpoint_collect_result::before_insn
2053 && lwp->exit_jump_pad_bkpt == NULL)
2054 {
2055 /* Haven't executed the original instruction yet.
2056 Set breakpoint there, and wait till it's hit,
2057 then single-step until exiting the jump pad. */
2058 lwp->exit_jump_pad_bkpt
2059 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2060 }
2061
2062 if (debug_threads)
2063 debug_printf ("Checking whether LWP %ld needs to move out of "
2064 "the jump pad...it does\n",
2065 lwpid_of (current_thread));
2066 current_thread = saved_thread;
2067
2068 return true;
2069 }
2070 }
2071 else
2072 {
2073 /* If we get a synchronous signal while collecting, *and*
2074 while executing the (relocated) original instruction,
2075 reset the PC to point at the tpoint address, before
2076 reporting to GDB. Otherwise, it's an IPA lib bug: just
2077 report the signal to GDB, and pray for the best. */
2078
2079 lwp->collecting_fast_tracepoint
2080 = fast_tpoint_collect_result::not_collecting;
2081
2082 if (r != fast_tpoint_collect_result::not_collecting
2083 && (status.adjusted_insn_addr <= lwp->stop_pc
2084 && lwp->stop_pc < status.adjusted_insn_addr_end))
2085 {
2086 siginfo_t info;
2087 struct regcache *regcache;
2088
2089 /* The si_addr on a few signals references the address
2090 of the faulting instruction. Adjust that as
2091 well. */
2092 if ((WSTOPSIG (*wstat) == SIGILL
2093 || WSTOPSIG (*wstat) == SIGFPE
2094 || WSTOPSIG (*wstat) == SIGBUS
2095 || WSTOPSIG (*wstat) == SIGSEGV)
2096 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2097 (PTRACE_TYPE_ARG3) 0, &info) == 0
2098 /* Final check just to make sure we don't clobber
2099 the siginfo of non-kernel-sent signals. */
2100 && (uintptr_t) info.si_addr == lwp->stop_pc)
2101 {
2102 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2103 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2104 (PTRACE_TYPE_ARG3) 0, &info);
2105 }
2106
2107 regcache = get_thread_regcache (current_thread, 1);
2108 low_set_pc (regcache, status.tpoint_addr);
2109 lwp->stop_pc = status.tpoint_addr;
2110
2111 /* Cancel any fast tracepoint lock this thread was
2112 holding. */
2113 force_unlock_trace_buffer ();
2114 }
2115
2116 if (lwp->exit_jump_pad_bkpt != NULL)
2117 {
2118 if (debug_threads)
2119 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2120 "stopping all threads momentarily.\n");
2121
2122 stop_all_lwps (1, lwp);
2123
2124 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2125 lwp->exit_jump_pad_bkpt = NULL;
2126
2127 unstop_all_lwps (1, lwp);
2128
2129 gdb_assert (lwp->suspended >= 0);
2130 }
2131 }
2132 }
2133
2134 if (debug_threads)
2135 debug_printf ("Checking whether LWP %ld needs to move out of the "
2136 "jump pad...no\n",
2137 lwpid_of (current_thread));
2138
2139 current_thread = saved_thread;
2140 return false;
2141 }
2142
2143 /* Enqueue one signal in the "signals to report later when out of the
2144 jump pad" list. */
2145
2146 static void
2147 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2148 {
2149 struct pending_signals *p_sig;
2150 struct thread_info *thread = get_lwp_thread (lwp);
2151
2152 if (debug_threads)
2153 debug_printf ("Deferring signal %d for LWP %ld.\n",
2154 WSTOPSIG (*wstat), lwpid_of (thread));
2155
2156 if (debug_threads)
2157 {
2158 struct pending_signals *sig;
2159
2160 for (sig = lwp->pending_signals_to_report;
2161 sig != NULL;
2162 sig = sig->prev)
2163 debug_printf (" Already queued %d\n",
2164 sig->signal);
2165
2166 debug_printf (" (no more currently queued signals)\n");
2167 }
2168
2169 /* Don't enqueue non-RT signals if they are already in the deferred
2170 queue. (SIGSTOP being the easiest signal to see ending up here
2171 twice) */
2172 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2173 {
2174 struct pending_signals *sig;
2175
2176 for (sig = lwp->pending_signals_to_report;
2177 sig != NULL;
2178 sig = sig->prev)
2179 {
2180 if (sig->signal == WSTOPSIG (*wstat))
2181 {
2182 if (debug_threads)
2183 debug_printf ("Not requeuing already queued non-RT signal %d"
2184 " for LWP %ld\n",
2185 sig->signal,
2186 lwpid_of (thread));
2187 return;
2188 }
2189 }
2190 }
2191
2192 p_sig = XCNEW (struct pending_signals);
2193 p_sig->prev = lwp->pending_signals_to_report;
2194 p_sig->signal = WSTOPSIG (*wstat);
2195
2196 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2197 &p_sig->info);
2198
2199 lwp->pending_signals_to_report = p_sig;
2200 }
2201
2202 /* Dequeue one signal from the "signals to report later when out of
2203 the jump pad" list. */
2204
2205 static int
2206 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2207 {
2208 struct thread_info *thread = get_lwp_thread (lwp);
2209
2210 if (lwp->pending_signals_to_report != NULL)
2211 {
2212 struct pending_signals **p_sig;
2213
2214 p_sig = &lwp->pending_signals_to_report;
2215 while ((*p_sig)->prev != NULL)
2216 p_sig = &(*p_sig)->prev;
2217
2218 *wstat = W_STOPCODE ((*p_sig)->signal);
2219 if ((*p_sig)->info.si_signo != 0)
2220 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2221 &(*p_sig)->info);
2222 free (*p_sig);
2223 *p_sig = NULL;
2224
2225 if (debug_threads)
2226 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2227 WSTOPSIG (*wstat), lwpid_of (thread));
2228
2229 if (debug_threads)
2230 {
2231 struct pending_signals *sig;
2232
2233 for (sig = lwp->pending_signals_to_report;
2234 sig != NULL;
2235 sig = sig->prev)
2236 debug_printf (" Still queued %d\n",
2237 sig->signal);
2238
2239 debug_printf (" (no more queued signals)\n");
2240 }
2241
2242 return 1;
2243 }
2244
2245 return 0;
2246 }
2247
2248 /* Fetch the possibly triggered data watchpoint info and store it in
2249 CHILD.
2250
2251 On some archs, like x86, that use debug registers to set
2252 watchpoints, it's possible that the way to know which watched
2253 address trapped, is to check the register that is used to select
2254 which address to watch. Problem is, between setting the watchpoint
2255 and reading back which data address trapped, the user may change
2256 the set of watchpoints, and, as a consequence, GDB changes the
2257 debug registers in the inferior. To avoid reading back a stale
2258 stopped-data-address when that happens, we cache in LP the fact
2259 that a watchpoint trapped, and the corresponding data address, as
2260 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2261 registers meanwhile, we have the cached data we can rely on. */
2262
2263 static int
2264 check_stopped_by_watchpoint (struct lwp_info *child)
2265 {
2266 if (the_low_target.stopped_by_watchpoint != NULL)
2267 {
2268 struct thread_info *saved_thread;
2269
2270 saved_thread = current_thread;
2271 current_thread = get_lwp_thread (child);
2272
2273 if (the_low_target.stopped_by_watchpoint ())
2274 {
2275 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2276
2277 if (the_low_target.stopped_data_address != NULL)
2278 child->stopped_data_address
2279 = the_low_target.stopped_data_address ();
2280 else
2281 child->stopped_data_address = 0;
2282 }
2283
2284 current_thread = saved_thread;
2285 }
2286
2287 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2288 }
2289
2290 /* Return the ptrace options that we want to try to enable. */
2291
2292 static int
2293 linux_low_ptrace_options (int attached)
2294 {
2295 client_state &cs = get_client_state ();
2296 int options = 0;
2297
2298 if (!attached)
2299 options |= PTRACE_O_EXITKILL;
2300
2301 if (cs.report_fork_events)
2302 options |= PTRACE_O_TRACEFORK;
2303
2304 if (cs.report_vfork_events)
2305 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2306
2307 if (cs.report_exec_events)
2308 options |= PTRACE_O_TRACEEXEC;
2309
2310 options |= PTRACE_O_TRACESYSGOOD;
2311
2312 return options;
2313 }
2314
2315 lwp_info *
2316 linux_process_target::filter_event (int lwpid, int wstat)
2317 {
2318 client_state &cs = get_client_state ();
2319 struct lwp_info *child;
2320 struct thread_info *thread;
2321 int have_stop_pc = 0;
2322
2323 child = find_lwp_pid (ptid_t (lwpid));
2324
2325 /* Check for stop events reported by a process we didn't already
2326 know about - anything not already in our LWP list.
2327
2328 If we're expecting to receive stopped processes after
2329 fork, vfork, and clone events, then we'll just add the
2330 new one to our list and go back to waiting for the event
2331 to be reported - the stopped process might be returned
2332 from waitpid before or after the event is.
2333
2334 But note the case of a non-leader thread exec'ing after the
2335 leader having exited, and gone from our lists (because
2336 check_zombie_leaders deleted it). The non-leader thread
2337 changes its tid to the tgid. */
2338
2339 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2340 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2341 {
2342 ptid_t child_ptid;
2343
2344 /* A multi-thread exec after we had seen the leader exiting. */
2345 if (debug_threads)
2346 {
2347 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2348 "after exec.\n", lwpid);
2349 }
2350
2351 child_ptid = ptid_t (lwpid, lwpid, 0);
2352 child = add_lwp (child_ptid);
2353 child->stopped = 1;
2354 current_thread = child->thread;
2355 }
2356
2357 /* If we didn't find a process, one of two things presumably happened:
2358 - A process we started and then detached from has exited. Ignore it.
2359 - A process we are controlling has forked and the new child's stop
2360 was reported to us by the kernel. Save its PID. */
2361 if (child == NULL && WIFSTOPPED (wstat))
2362 {
2363 add_to_pid_list (&stopped_pids, lwpid, wstat);
2364 return NULL;
2365 }
2366 else if (child == NULL)
2367 return NULL;
2368
2369 thread = get_lwp_thread (child);
2370
2371 child->stopped = 1;
2372
2373 child->last_status = wstat;
2374
2375 /* Check if the thread has exited. */
2376 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2377 {
2378 if (debug_threads)
2379 debug_printf ("LLFE: %d exited.\n", lwpid);
2380
2381 if (finish_step_over (child))
2382 {
2383 /* Unsuspend all other LWPs, and set them back running again. */
2384 unsuspend_all_lwps (child);
2385 }
2386
2387 /* If there is at least one more LWP, then the exit signal was
2388 not the end of the debugged application and should be
2389 ignored, unless GDB wants to hear about thread exits. */
2390 if (cs.report_thread_events
2391 || last_thread_of_process_p (pid_of (thread)))
2392 {
2393 /* Since events are serialized to GDB core, and we can't
2394 report this one right now. Leave the status pending for
2395 the next time we're able to report it. */
2396 mark_lwp_dead (child, wstat);
2397 return child;
2398 }
2399 else
2400 {
2401 delete_lwp (child);
2402 return NULL;
2403 }
2404 }
2405
2406 gdb_assert (WIFSTOPPED (wstat));
2407
2408 if (WIFSTOPPED (wstat))
2409 {
2410 struct process_info *proc;
2411
2412 /* Architecture-specific setup after inferior is running. */
2413 proc = find_process_pid (pid_of (thread));
2414 if (proc->tdesc == NULL)
2415 {
2416 if (proc->attached)
2417 {
2418 /* This needs to happen after we have attached to the
2419 inferior and it is stopped for the first time, but
2420 before we access any inferior registers. */
2421 arch_setup_thread (thread);
2422 }
2423 else
2424 {
2425 /* The process is started, but GDBserver will do
2426 architecture-specific setup after the program stops at
2427 the first instruction. */
2428 child->status_pending_p = 1;
2429 child->status_pending = wstat;
2430 return child;
2431 }
2432 }
2433 }
2434
2435 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2436 {
2437 struct process_info *proc = find_process_pid (pid_of (thread));
2438 int options = linux_low_ptrace_options (proc->attached);
2439
2440 linux_enable_event_reporting (lwpid, options);
2441 child->must_set_ptrace_flags = 0;
2442 }
2443
2444 /* Always update syscall_state, even if it will be filtered later. */
2445 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2446 {
2447 child->syscall_state
2448 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2449 ? TARGET_WAITKIND_SYSCALL_RETURN
2450 : TARGET_WAITKIND_SYSCALL_ENTRY);
2451 }
2452 else
2453 {
2454 /* Almost all other ptrace-stops are known to be outside of system
2455 calls, with further exceptions in handle_extended_wait. */
2456 child->syscall_state = TARGET_WAITKIND_IGNORE;
2457 }
2458
2459 /* Be careful to not overwrite stop_pc until save_stop_reason is
2460 called. */
2461 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2462 && linux_is_extended_waitstatus (wstat))
2463 {
2464 child->stop_pc = get_pc (child);
2465 if (handle_extended_wait (&child, wstat))
2466 {
2467 /* The event has been handled, so just return without
2468 reporting it. */
2469 return NULL;
2470 }
2471 }
2472
2473 if (linux_wstatus_maybe_breakpoint (wstat))
2474 {
2475 if (save_stop_reason (child))
2476 have_stop_pc = 1;
2477 }
2478
2479 if (!have_stop_pc)
2480 child->stop_pc = get_pc (child);
2481
2482 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2483 && child->stop_expected)
2484 {
2485 if (debug_threads)
2486 debug_printf ("Expected stop.\n");
2487 child->stop_expected = 0;
2488
2489 if (thread->last_resume_kind == resume_stop)
2490 {
2491 /* We want to report the stop to the core. Treat the
2492 SIGSTOP as a normal event. */
2493 if (debug_threads)
2494 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2495 target_pid_to_str (ptid_of (thread)));
2496 }
2497 else if (stopping_threads != NOT_STOPPING_THREADS)
2498 {
2499 /* Stopping threads. We don't want this SIGSTOP to end up
2500 pending. */
2501 if (debug_threads)
2502 debug_printf ("LLW: SIGSTOP caught for %s "
2503 "while stopping threads.\n",
2504 target_pid_to_str (ptid_of (thread)));
2505 return NULL;
2506 }
2507 else
2508 {
2509 /* This is a delayed SIGSTOP. Filter out the event. */
2510 if (debug_threads)
2511 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2512 child->stepping ? "step" : "continue",
2513 target_pid_to_str (ptid_of (thread)));
2514
2515 resume_one_lwp (child, child->stepping, 0, NULL);
2516 return NULL;
2517 }
2518 }
2519
2520 child->status_pending_p = 1;
2521 child->status_pending = wstat;
2522 return child;
2523 }
2524
2525 /* Return true if THREAD is doing hardware single step. */
2526
2527 static int
2528 maybe_hw_step (struct thread_info *thread)
2529 {
2530 if (can_hardware_single_step ())
2531 return 1;
2532 else
2533 {
2534 /* GDBserver must insert single-step breakpoint for software
2535 single step. */
2536 gdb_assert (has_single_step_breakpoints (thread));
2537 return 0;
2538 }
2539 }
2540
2541 void
2542 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2543 {
2544 struct lwp_info *lp = get_thread_lwp (thread);
2545
2546 if (lp->stopped
2547 && !lp->suspended
2548 && !lp->status_pending_p
2549 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2550 {
2551 int step = 0;
2552
2553 if (thread->last_resume_kind == resume_step)
2554 step = maybe_hw_step (thread);
2555
2556 if (debug_threads)
2557 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2558 target_pid_to_str (ptid_of (thread)),
2559 paddress (lp->stop_pc),
2560 step);
2561
2562 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2563 }
2564 }
2565
2566 int
2567 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2568 ptid_t filter_ptid,
2569 int *wstatp, int options)
2570 {
2571 struct thread_info *event_thread;
2572 struct lwp_info *event_child, *requested_child;
2573 sigset_t block_mask, prev_mask;
2574
2575 retry:
2576 /* N.B. event_thread points to the thread_info struct that contains
2577 event_child. Keep them in sync. */
2578 event_thread = NULL;
2579 event_child = NULL;
2580 requested_child = NULL;
2581
2582 /* Check for a lwp with a pending status. */
2583
2584 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2585 {
2586 event_thread = find_thread_in_random ([&] (thread_info *thread)
2587 {
2588 return status_pending_p_callback (thread, filter_ptid);
2589 });
2590
2591 if (event_thread != NULL)
2592 event_child = get_thread_lwp (event_thread);
2593 if (debug_threads && event_thread)
2594 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2595 }
2596 else if (filter_ptid != null_ptid)
2597 {
2598 requested_child = find_lwp_pid (filter_ptid);
2599
2600 if (stopping_threads == NOT_STOPPING_THREADS
2601 && requested_child->status_pending_p
2602 && (requested_child->collecting_fast_tracepoint
2603 != fast_tpoint_collect_result::not_collecting))
2604 {
2605 enqueue_one_deferred_signal (requested_child,
2606 &requested_child->status_pending);
2607 requested_child->status_pending_p = 0;
2608 requested_child->status_pending = 0;
2609 resume_one_lwp (requested_child, 0, 0, NULL);
2610 }
2611
2612 if (requested_child->suspended
2613 && requested_child->status_pending_p)
2614 {
2615 internal_error (__FILE__, __LINE__,
2616 "requesting an event out of a"
2617 " suspended child?");
2618 }
2619
2620 if (requested_child->status_pending_p)
2621 {
2622 event_child = requested_child;
2623 event_thread = get_lwp_thread (event_child);
2624 }
2625 }
2626
2627 if (event_child != NULL)
2628 {
2629 if (debug_threads)
2630 debug_printf ("Got an event from pending child %ld (%04x)\n",
2631 lwpid_of (event_thread), event_child->status_pending);
2632 *wstatp = event_child->status_pending;
2633 event_child->status_pending_p = 0;
2634 event_child->status_pending = 0;
2635 current_thread = event_thread;
2636 return lwpid_of (event_thread);
2637 }
2638
2639 /* But if we don't find a pending event, we'll have to wait.
2640
2641 We only enter this loop if no process has a pending wait status.
2642 Thus any action taken in response to a wait status inside this
2643 loop is responding as soon as we detect the status, not after any
2644 pending events. */
2645
2646 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2647 all signals while here. */
2648 sigfillset (&block_mask);
2649 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2650
2651 /* Always pull all events out of the kernel. We'll randomly select
2652 an event LWP out of all that have events, to prevent
2653 starvation. */
2654 while (event_child == NULL)
2655 {
2656 pid_t ret = 0;
2657
2658 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2659 quirks:
2660
2661 - If the thread group leader exits while other threads in the
2662 thread group still exist, waitpid(TGID, ...) hangs. That
2663 waitpid won't return an exit status until the other threads
2664 in the group are reaped.
2665
2666 - When a non-leader thread execs, that thread just vanishes
2667 without reporting an exit (so we'd hang if we waited for it
2668 explicitly in that case). The exec event is reported to
2669 the TGID pid. */
2670 errno = 0;
2671 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2672
2673 if (debug_threads)
2674 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2675 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2676
2677 if (ret > 0)
2678 {
2679 if (debug_threads)
2680 {
2681 debug_printf ("LLW: waitpid %ld received %s\n",
2682 (long) ret, status_to_str (*wstatp));
2683 }
2684
2685 /* Filter all events. IOW, leave all events pending. We'll
2686 randomly select an event LWP out of all that have events
2687 below. */
2688 filter_event (ret, *wstatp);
2689 /* Retry until nothing comes out of waitpid. A single
2690 SIGCHLD can indicate more than one child stopped. */
2691 continue;
2692 }
2693
2694 /* Now that we've pulled all events out of the kernel, resume
2695 LWPs that don't have an interesting event to report. */
2696 if (stopping_threads == NOT_STOPPING_THREADS)
2697 for_each_thread ([this] (thread_info *thread)
2698 {
2699 resume_stopped_resumed_lwps (thread);
2700 });
2701
2702 /* ... and find an LWP with a status to report to the core, if
2703 any. */
2704 event_thread = find_thread_in_random ([&] (thread_info *thread)
2705 {
2706 return status_pending_p_callback (thread, filter_ptid);
2707 });
2708
2709 if (event_thread != NULL)
2710 {
2711 event_child = get_thread_lwp (event_thread);
2712 *wstatp = event_child->status_pending;
2713 event_child->status_pending_p = 0;
2714 event_child->status_pending = 0;
2715 break;
2716 }
2717
2718 /* Check for zombie thread group leaders. Those can't be reaped
2719 until all other threads in the thread group are. */
2720 check_zombie_leaders ();
2721
2722 auto not_stopped = [&] (thread_info *thread)
2723 {
2724 return not_stopped_callback (thread, wait_ptid);
2725 };
2726
2727 /* If there are no resumed children left in the set of LWPs we
2728 want to wait for, bail. We can't just block in
2729 waitpid/sigsuspend, because lwps might have been left stopped
2730 in trace-stop state, and we'd be stuck forever waiting for
2731 their status to change (which would only happen if we resumed
2732 them). Even if WNOHANG is set, this return code is preferred
2733 over 0 (below), as it is more detailed. */
2734 if (find_thread (not_stopped) == NULL)
2735 {
2736 if (debug_threads)
2737 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2738 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2739 return -1;
2740 }
2741
2742 /* No interesting event to report to the caller. */
2743 if ((options & WNOHANG))
2744 {
2745 if (debug_threads)
2746 debug_printf ("WNOHANG set, no event found\n");
2747
2748 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2749 return 0;
2750 }
2751
2752 /* Block until we get an event reported with SIGCHLD. */
2753 if (debug_threads)
2754 debug_printf ("sigsuspend'ing\n");
2755
2756 sigsuspend (&prev_mask);
2757 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2758 goto retry;
2759 }
2760
2761 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2762
2763 current_thread = event_thread;
2764
2765 return lwpid_of (event_thread);
2766 }
2767
2768 int
2769 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2770 {
2771 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2772 }
2773
2774 /* Select one LWP out of those that have events pending. */
2775
2776 static void
2777 select_event_lwp (struct lwp_info **orig_lp)
2778 {
2779 struct thread_info *event_thread = NULL;
2780
2781 /* In all-stop, give preference to the LWP that is being
2782 single-stepped. There will be at most one, and it's the LWP that
2783 the core is most interested in. If we didn't do this, then we'd
2784 have to handle pending step SIGTRAPs somehow in case the core
2785 later continues the previously-stepped thread, otherwise we'd
2786 report the pending SIGTRAP, and the core, not having stepped the
2787 thread, wouldn't understand what the trap was for, and therefore
2788 would report it to the user as a random signal. */
2789 if (!non_stop)
2790 {
2791 event_thread = find_thread ([] (thread_info *thread)
2792 {
2793 lwp_info *lp = get_thread_lwp (thread);
2794
2795 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2796 && thread->last_resume_kind == resume_step
2797 && lp->status_pending_p);
2798 });
2799
2800 if (event_thread != NULL)
2801 {
2802 if (debug_threads)
2803 debug_printf ("SEL: Select single-step %s\n",
2804 target_pid_to_str (ptid_of (event_thread)));
2805 }
2806 }
2807 if (event_thread == NULL)
2808 {
2809 /* No single-stepping LWP. Select one at random, out of those
2810 which have had events. */
2811
2812 event_thread = find_thread_in_random ([&] (thread_info *thread)
2813 {
2814 lwp_info *lp = get_thread_lwp (thread);
2815
2816 /* Only resumed LWPs that have an event pending. */
2817 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2818 && lp->status_pending_p);
2819 });
2820 }
2821
2822 if (event_thread != NULL)
2823 {
2824 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2825
2826 /* Switch the event LWP. */
2827 *orig_lp = event_lp;
2828 }
2829 }
2830
2831 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2832 NULL. */
2833
2834 static void
2835 unsuspend_all_lwps (struct lwp_info *except)
2836 {
2837 for_each_thread ([&] (thread_info *thread)
2838 {
2839 lwp_info *lwp = get_thread_lwp (thread);
2840
2841 if (lwp != except)
2842 lwp_suspended_decr (lwp);
2843 });
2844 }
2845
2846 static bool stuck_in_jump_pad_callback (thread_info *thread);
2847 static bool lwp_running (thread_info *thread);
2848
2849 /* Stabilize threads (move out of jump pads).
2850
2851 If a thread is midway collecting a fast tracepoint, we need to
2852 finish the collection and move it out of the jump pad before
2853 reporting the signal.
2854
2855 This avoids recursion while collecting (when a signal arrives
2856 midway, and the signal handler itself collects), which would trash
2857 the trace buffer. In case the user set a breakpoint in a signal
2858 handler, this avoids the backtrace showing the jump pad, etc..
2859 Most importantly, there are certain things we can't do safely if
2860 threads are stopped in a jump pad (or in its callee's). For
2861 example:
2862
2863 - starting a new trace run. A thread still collecting the
2864 previous run, could trash the trace buffer when resumed. The trace
2865 buffer control structures would have been reset but the thread had
2866 no way to tell. The thread could even midway memcpy'ing to the
2867 buffer, which would mean that when resumed, it would clobber the
2868 trace buffer that had been set for a new run.
2869
2870 - we can't rewrite/reuse the jump pads for new tracepoints
2871 safely. Say you do tstart while a thread is stopped midway while
2872 collecting. When the thread is later resumed, it finishes the
2873 collection, and returns to the jump pad, to execute the original
2874 instruction that was under the tracepoint jump at the time the
2875 older run had been started. If the jump pad had been rewritten
2876 since for something else in the new run, the thread would now
2877 execute the wrong / random instructions. */
2878
2879 void
2880 linux_process_target::stabilize_threads ()
2881 {
2882 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
2883
2884 if (thread_stuck != NULL)
2885 {
2886 if (debug_threads)
2887 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2888 lwpid_of (thread_stuck));
2889 return;
2890 }
2891
2892 thread_info *saved_thread = current_thread;
2893
2894 stabilizing_threads = 1;
2895
2896 /* Kick 'em all. */
2897 for_each_thread ([this] (thread_info *thread)
2898 {
2899 move_out_of_jump_pad (thread);
2900 });
2901
2902 /* Loop until all are stopped out of the jump pads. */
2903 while (find_thread (lwp_running) != NULL)
2904 {
2905 struct target_waitstatus ourstatus;
2906 struct lwp_info *lwp;
2907 int wstat;
2908
2909 /* Note that we go through the full wait even loop. While
2910 moving threads out of jump pad, we need to be able to step
2911 over internal breakpoints and such. */
2912 wait_1 (minus_one_ptid, &ourstatus, 0);
2913
2914 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2915 {
2916 lwp = get_thread_lwp (current_thread);
2917
2918 /* Lock it. */
2919 lwp_suspended_inc (lwp);
2920
2921 if (ourstatus.value.sig != GDB_SIGNAL_0
2922 || current_thread->last_resume_kind == resume_stop)
2923 {
2924 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2925 enqueue_one_deferred_signal (lwp, &wstat);
2926 }
2927 }
2928 }
2929
2930 unsuspend_all_lwps (NULL);
2931
2932 stabilizing_threads = 0;
2933
2934 current_thread = saved_thread;
2935
2936 if (debug_threads)
2937 {
2938 thread_stuck = find_thread (stuck_in_jump_pad_callback);
2939
2940 if (thread_stuck != NULL)
2941 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2942 lwpid_of (thread_stuck));
2943 }
2944 }
2945
2946 /* Convenience function that is called when the kernel reports an
2947 event that is not passed out to GDB. */
2948
2949 static ptid_t
2950 ignore_event (struct target_waitstatus *ourstatus)
2951 {
2952 /* If we got an event, there may still be others, as a single
2953 SIGCHLD can indicate more than one child stopped. This forces
2954 another target_wait call. */
2955 async_file_mark ();
2956
2957 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2958 return null_ptid;
2959 }
2960
2961 /* Convenience function that is called when the kernel reports an exit
2962 event. This decides whether to report the event to GDB as a
2963 process exit event, a thread exit event, or to suppress the
2964 event. */
2965
2966 static ptid_t
2967 filter_exit_event (struct lwp_info *event_child,
2968 struct target_waitstatus *ourstatus)
2969 {
2970 client_state &cs = get_client_state ();
2971 struct thread_info *thread = get_lwp_thread (event_child);
2972 ptid_t ptid = ptid_of (thread);
2973
2974 if (!last_thread_of_process_p (pid_of (thread)))
2975 {
2976 if (cs.report_thread_events)
2977 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2978 else
2979 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2980
2981 delete_lwp (event_child);
2982 }
2983 return ptid;
2984 }
2985
2986 /* Returns 1 if GDB is interested in any event_child syscalls. */
2987
2988 static int
2989 gdb_catching_syscalls_p (struct lwp_info *event_child)
2990 {
2991 struct thread_info *thread = get_lwp_thread (event_child);
2992 struct process_info *proc = get_thread_process (thread);
2993
2994 return !proc->syscalls_to_catch.empty ();
2995 }
2996
2997 /* Returns 1 if GDB is interested in the event_child syscall.
2998 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
2999
3000 static int
3001 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3002 {
3003 int sysno;
3004 struct thread_info *thread = get_lwp_thread (event_child);
3005 struct process_info *proc = get_thread_process (thread);
3006
3007 if (proc->syscalls_to_catch.empty ())
3008 return 0;
3009
3010 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
3011 return 1;
3012
3013 get_syscall_trapinfo (event_child, &sysno);
3014
3015 for (int iter : proc->syscalls_to_catch)
3016 if (iter == sysno)
3017 return 1;
3018
3019 return 0;
3020 }
3021
3022 ptid_t
3023 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
3024 int target_options)
3025 {
3026 client_state &cs = get_client_state ();
3027 int w;
3028 struct lwp_info *event_child;
3029 int options;
3030 int pid;
3031 int step_over_finished;
3032 int bp_explains_trap;
3033 int maybe_internal_trap;
3034 int report_to_gdb;
3035 int trace_event;
3036 int in_step_range;
3037 int any_resumed;
3038
3039 if (debug_threads)
3040 {
3041 debug_enter ();
3042 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid));
3043 }
3044
3045 /* Translate generic target options into linux options. */
3046 options = __WALL;
3047 if (target_options & TARGET_WNOHANG)
3048 options |= WNOHANG;
3049
3050 bp_explains_trap = 0;
3051 trace_event = 0;
3052 in_step_range = 0;
3053 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3054
3055 auto status_pending_p_any = [&] (thread_info *thread)
3056 {
3057 return status_pending_p_callback (thread, minus_one_ptid);
3058 };
3059
3060 auto not_stopped = [&] (thread_info *thread)
3061 {
3062 return not_stopped_callback (thread, minus_one_ptid);
3063 };
3064
3065 /* Find a resumed LWP, if any. */
3066 if (find_thread (status_pending_p_any) != NULL)
3067 any_resumed = 1;
3068 else if (find_thread (not_stopped) != NULL)
3069 any_resumed = 1;
3070 else
3071 any_resumed = 0;
3072
3073 if (step_over_bkpt == null_ptid)
3074 pid = wait_for_event (ptid, &w, options);
3075 else
3076 {
3077 if (debug_threads)
3078 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3079 target_pid_to_str (step_over_bkpt));
3080 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3081 }
3082
3083 if (pid == 0 || (pid == -1 && !any_resumed))
3084 {
3085 gdb_assert (target_options & TARGET_WNOHANG);
3086
3087 if (debug_threads)
3088 {
3089 debug_printf ("wait_1 ret = null_ptid, "
3090 "TARGET_WAITKIND_IGNORE\n");
3091 debug_exit ();
3092 }
3093
3094 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3095 return null_ptid;
3096 }
3097 else if (pid == -1)
3098 {
3099 if (debug_threads)
3100 {
3101 debug_printf ("wait_1 ret = null_ptid, "
3102 "TARGET_WAITKIND_NO_RESUMED\n");
3103 debug_exit ();
3104 }
3105
3106 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3107 return null_ptid;
3108 }
3109
3110 event_child = get_thread_lwp (current_thread);
3111
3112 /* wait_for_event only returns an exit status for the last
3113 child of a process. Report it. */
3114 if (WIFEXITED (w) || WIFSIGNALED (w))
3115 {
3116 if (WIFEXITED (w))
3117 {
3118 ourstatus->kind = TARGET_WAITKIND_EXITED;
3119 ourstatus->value.integer = WEXITSTATUS (w);
3120
3121 if (debug_threads)
3122 {
3123 debug_printf ("wait_1 ret = %s, exited with "
3124 "retcode %d\n",
3125 target_pid_to_str (ptid_of (current_thread)),
3126 WEXITSTATUS (w));
3127 debug_exit ();
3128 }
3129 }
3130 else
3131 {
3132 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3133 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3134
3135 if (debug_threads)
3136 {
3137 debug_printf ("wait_1 ret = %s, terminated with "
3138 "signal %d\n",
3139 target_pid_to_str (ptid_of (current_thread)),
3140 WTERMSIG (w));
3141 debug_exit ();
3142 }
3143 }
3144
3145 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3146 return filter_exit_event (event_child, ourstatus);
3147
3148 return ptid_of (current_thread);
3149 }
3150
3151 /* If step-over executes a breakpoint instruction, in the case of a
3152 hardware single step it means a gdb/gdbserver breakpoint had been
3153 planted on top of a permanent breakpoint, in the case of a software
3154 single step it may just mean that gdbserver hit the reinsert breakpoint.
3155 The PC has been adjusted by save_stop_reason to point at
3156 the breakpoint address.
3157 So in the case of the hardware single step advance the PC manually
3158 past the breakpoint and in the case of software single step advance only
3159 if it's not the single_step_breakpoint we are hitting.
3160 This avoids that a program would keep trapping a permanent breakpoint
3161 forever. */
3162 if (step_over_bkpt != null_ptid
3163 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3164 && (event_child->stepping
3165 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3166 {
3167 int increment_pc = 0;
3168 int breakpoint_kind = 0;
3169 CORE_ADDR stop_pc = event_child->stop_pc;
3170
3171 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3172 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3173
3174 if (debug_threads)
3175 {
3176 debug_printf ("step-over for %s executed software breakpoint\n",
3177 target_pid_to_str (ptid_of (current_thread)));
3178 }
3179
3180 if (increment_pc != 0)
3181 {
3182 struct regcache *regcache
3183 = get_thread_regcache (current_thread, 1);
3184
3185 event_child->stop_pc += increment_pc;
3186 low_set_pc (regcache, event_child->stop_pc);
3187
3188 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3189 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3190 }
3191 }
3192
3193 /* If this event was not handled before, and is not a SIGTRAP, we
3194 report it. SIGILL and SIGSEGV are also treated as traps in case
3195 a breakpoint is inserted at the current PC. If this target does
3196 not support internal breakpoints at all, we also report the
3197 SIGTRAP without further processing; it's of no concern to us. */
3198 maybe_internal_trap
3199 = (low_supports_breakpoints ()
3200 && (WSTOPSIG (w) == SIGTRAP
3201 || ((WSTOPSIG (w) == SIGILL
3202 || WSTOPSIG (w) == SIGSEGV)
3203 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3204
3205 if (maybe_internal_trap)
3206 {
3207 /* Handle anything that requires bookkeeping before deciding to
3208 report the event or continue waiting. */
3209
3210 /* First check if we can explain the SIGTRAP with an internal
3211 breakpoint, or if we should possibly report the event to GDB.
3212 Do this before anything that may remove or insert a
3213 breakpoint. */
3214 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3215
3216 /* We have a SIGTRAP, possibly a step-over dance has just
3217 finished. If so, tweak the state machine accordingly,
3218 reinsert breakpoints and delete any single-step
3219 breakpoints. */
3220 step_over_finished = finish_step_over (event_child);
3221
3222 /* Now invoke the callbacks of any internal breakpoints there. */
3223 check_breakpoints (event_child->stop_pc);
3224
3225 /* Handle tracepoint data collecting. This may overflow the
3226 trace buffer, and cause a tracing stop, removing
3227 breakpoints. */
3228 trace_event = handle_tracepoints (event_child);
3229
3230 if (bp_explains_trap)
3231 {
3232 if (debug_threads)
3233 debug_printf ("Hit a gdbserver breakpoint.\n");
3234 }
3235 }
3236 else
3237 {
3238 /* We have some other signal, possibly a step-over dance was in
3239 progress, and it should be cancelled too. */
3240 step_over_finished = finish_step_over (event_child);
3241 }
3242
3243 /* We have all the data we need. Either report the event to GDB, or
3244 resume threads and keep waiting for more. */
3245
3246 /* If we're collecting a fast tracepoint, finish the collection and
3247 move out of the jump pad before delivering a signal. See
3248 linux_stabilize_threads. */
3249
3250 if (WIFSTOPPED (w)
3251 && WSTOPSIG (w) != SIGTRAP
3252 && supports_fast_tracepoints ()
3253 && agent_loaded_p ())
3254 {
3255 if (debug_threads)
3256 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3257 "to defer or adjust it.\n",
3258 WSTOPSIG (w), lwpid_of (current_thread));
3259
3260 /* Allow debugging the jump pad itself. */
3261 if (current_thread->last_resume_kind != resume_step
3262 && maybe_move_out_of_jump_pad (event_child, &w))
3263 {
3264 enqueue_one_deferred_signal (event_child, &w);
3265
3266 if (debug_threads)
3267 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3268 WSTOPSIG (w), lwpid_of (current_thread));
3269
3270 resume_one_lwp (event_child, 0, 0, NULL);
3271
3272 if (debug_threads)
3273 debug_exit ();
3274 return ignore_event (ourstatus);
3275 }
3276 }
3277
3278 if (event_child->collecting_fast_tracepoint
3279 != fast_tpoint_collect_result::not_collecting)
3280 {
3281 if (debug_threads)
3282 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3283 "Check if we're already there.\n",
3284 lwpid_of (current_thread),
3285 (int) event_child->collecting_fast_tracepoint);
3286
3287 trace_event = 1;
3288
3289 event_child->collecting_fast_tracepoint
3290 = linux_fast_tracepoint_collecting (event_child, NULL);
3291
3292 if (event_child->collecting_fast_tracepoint
3293 != fast_tpoint_collect_result::before_insn)
3294 {
3295 /* No longer need this breakpoint. */
3296 if (event_child->exit_jump_pad_bkpt != NULL)
3297 {
3298 if (debug_threads)
3299 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3300 "stopping all threads momentarily.\n");
3301
3302 /* Other running threads could hit this breakpoint.
3303 We don't handle moribund locations like GDB does,
3304 instead we always pause all threads when removing
3305 breakpoints, so that any step-over or
3306 decr_pc_after_break adjustment is always taken
3307 care of while the breakpoint is still
3308 inserted. */
3309 stop_all_lwps (1, event_child);
3310
3311 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3312 event_child->exit_jump_pad_bkpt = NULL;
3313
3314 unstop_all_lwps (1, event_child);
3315
3316 gdb_assert (event_child->suspended >= 0);
3317 }
3318 }
3319
3320 if (event_child->collecting_fast_tracepoint
3321 == fast_tpoint_collect_result::not_collecting)
3322 {
3323 if (debug_threads)
3324 debug_printf ("fast tracepoint finished "
3325 "collecting successfully.\n");
3326
3327 /* We may have a deferred signal to report. */
3328 if (dequeue_one_deferred_signal (event_child, &w))
3329 {
3330 if (debug_threads)
3331 debug_printf ("dequeued one signal.\n");
3332 }
3333 else
3334 {
3335 if (debug_threads)
3336 debug_printf ("no deferred signals.\n");
3337
3338 if (stabilizing_threads)
3339 {
3340 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3341 ourstatus->value.sig = GDB_SIGNAL_0;
3342
3343 if (debug_threads)
3344 {
3345 debug_printf ("wait_1 ret = %s, stopped "
3346 "while stabilizing threads\n",
3347 target_pid_to_str (ptid_of (current_thread)));
3348 debug_exit ();
3349 }
3350
3351 return ptid_of (current_thread);
3352 }
3353 }
3354 }
3355 }
3356
3357 /* Check whether GDB would be interested in this event. */
3358
3359 /* Check if GDB is interested in this syscall. */
3360 if (WIFSTOPPED (w)
3361 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3362 && !gdb_catch_this_syscall_p (event_child))
3363 {
3364 if (debug_threads)
3365 {
3366 debug_printf ("Ignored syscall for LWP %ld.\n",
3367 lwpid_of (current_thread));
3368 }
3369
3370 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3371
3372 if (debug_threads)
3373 debug_exit ();
3374 return ignore_event (ourstatus);
3375 }
3376
3377 /* If GDB is not interested in this signal, don't stop other
3378 threads, and don't report it to GDB. Just resume the inferior
3379 right away. We do this for threading-related signals as well as
3380 any that GDB specifically requested we ignore. But never ignore
3381 SIGSTOP if we sent it ourselves, and do not ignore signals when
3382 stepping - they may require special handling to skip the signal
3383 handler. Also never ignore signals that could be caused by a
3384 breakpoint. */
3385 if (WIFSTOPPED (w)
3386 && current_thread->last_resume_kind != resume_step
3387 && (
3388 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3389 (current_process ()->priv->thread_db != NULL
3390 && (WSTOPSIG (w) == __SIGRTMIN
3391 || WSTOPSIG (w) == __SIGRTMIN + 1))
3392 ||
3393 #endif
3394 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3395 && !(WSTOPSIG (w) == SIGSTOP
3396 && current_thread->last_resume_kind == resume_stop)
3397 && !linux_wstatus_maybe_breakpoint (w))))
3398 {
3399 siginfo_t info, *info_p;
3400
3401 if (debug_threads)
3402 debug_printf ("Ignored signal %d for LWP %ld.\n",
3403 WSTOPSIG (w), lwpid_of (current_thread));
3404
3405 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3406 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3407 info_p = &info;
3408 else
3409 info_p = NULL;
3410
3411 if (step_over_finished)
3412 {
3413 /* We cancelled this thread's step-over above. We still
3414 need to unsuspend all other LWPs, and set them back
3415 running again while the signal handler runs. */
3416 unsuspend_all_lwps (event_child);
3417
3418 /* Enqueue the pending signal info so that proceed_all_lwps
3419 doesn't lose it. */
3420 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3421
3422 proceed_all_lwps ();
3423 }
3424 else
3425 {
3426 resume_one_lwp (event_child, event_child->stepping,
3427 WSTOPSIG (w), info_p);
3428 }
3429
3430 if (debug_threads)
3431 debug_exit ();
3432
3433 return ignore_event (ourstatus);
3434 }
3435
3436 /* Note that all addresses are always "out of the step range" when
3437 there's no range to begin with. */
3438 in_step_range = lwp_in_step_range (event_child);
3439
3440 /* If GDB wanted this thread to single step, and the thread is out
3441 of the step range, we always want to report the SIGTRAP, and let
3442 GDB handle it. Watchpoints should always be reported. So should
3443 signals we can't explain. A SIGTRAP we can't explain could be a
3444 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3445 do, we're be able to handle GDB breakpoints on top of internal
3446 breakpoints, by handling the internal breakpoint and still
3447 reporting the event to GDB. If we don't, we're out of luck, GDB
3448 won't see the breakpoint hit. If we see a single-step event but
3449 the thread should be continuing, don't pass the trap to gdb.
3450 That indicates that we had previously finished a single-step but
3451 left the single-step pending -- see
3452 complete_ongoing_step_over. */
3453 report_to_gdb = (!maybe_internal_trap
3454 || (current_thread->last_resume_kind == resume_step
3455 && !in_step_range)
3456 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3457 || (!in_step_range
3458 && !bp_explains_trap
3459 && !trace_event
3460 && !step_over_finished
3461 && !(current_thread->last_resume_kind == resume_continue
3462 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3463 || (gdb_breakpoint_here (event_child->stop_pc)
3464 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3465 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3466 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3467
3468 run_breakpoint_commands (event_child->stop_pc);
3469
3470 /* We found no reason GDB would want us to stop. We either hit one
3471 of our own breakpoints, or finished an internal step GDB
3472 shouldn't know about. */
3473 if (!report_to_gdb)
3474 {
3475 if (debug_threads)
3476 {
3477 if (bp_explains_trap)
3478 debug_printf ("Hit a gdbserver breakpoint.\n");
3479 if (step_over_finished)
3480 debug_printf ("Step-over finished.\n");
3481 if (trace_event)
3482 debug_printf ("Tracepoint event.\n");
3483 if (lwp_in_step_range (event_child))
3484 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3485 paddress (event_child->stop_pc),
3486 paddress (event_child->step_range_start),
3487 paddress (event_child->step_range_end));
3488 }
3489
3490 /* We're not reporting this breakpoint to GDB, so apply the
3491 decr_pc_after_break adjustment to the inferior's regcache
3492 ourselves. */
3493
3494 if (low_supports_breakpoints ())
3495 {
3496 struct regcache *regcache
3497 = get_thread_regcache (current_thread, 1);
3498 low_set_pc (regcache, event_child->stop_pc);
3499 }
3500
3501 if (step_over_finished)
3502 {
3503 /* If we have finished stepping over a breakpoint, we've
3504 stopped and suspended all LWPs momentarily except the
3505 stepping one. This is where we resume them all again.
3506 We're going to keep waiting, so use proceed, which
3507 handles stepping over the next breakpoint. */
3508 unsuspend_all_lwps (event_child);
3509 }
3510 else
3511 {
3512 /* Remove the single-step breakpoints if any. Note that
3513 there isn't single-step breakpoint if we finished stepping
3514 over. */
3515 if (supports_software_single_step ()
3516 && has_single_step_breakpoints (current_thread))
3517 {
3518 stop_all_lwps (0, event_child);
3519 delete_single_step_breakpoints (current_thread);
3520 unstop_all_lwps (0, event_child);
3521 }
3522 }
3523
3524 if (debug_threads)
3525 debug_printf ("proceeding all threads.\n");
3526 proceed_all_lwps ();
3527
3528 if (debug_threads)
3529 debug_exit ();
3530
3531 return ignore_event (ourstatus);
3532 }
3533
3534 if (debug_threads)
3535 {
3536 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3537 {
3538 std::string str
3539 = target_waitstatus_to_string (&event_child->waitstatus);
3540
3541 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3542 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3543 }
3544 if (current_thread->last_resume_kind == resume_step)
3545 {
3546 if (event_child->step_range_start == event_child->step_range_end)
3547 debug_printf ("GDB wanted to single-step, reporting event.\n");
3548 else if (!lwp_in_step_range (event_child))
3549 debug_printf ("Out of step range, reporting event.\n");
3550 }
3551 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3552 debug_printf ("Stopped by watchpoint.\n");
3553 else if (gdb_breakpoint_here (event_child->stop_pc))
3554 debug_printf ("Stopped by GDB breakpoint.\n");
3555 if (debug_threads)
3556 debug_printf ("Hit a non-gdbserver trap event.\n");
3557 }
3558
3559 /* Alright, we're going to report a stop. */
3560
3561 /* Remove single-step breakpoints. */
3562 if (supports_software_single_step ())
3563 {
3564 /* Remove single-step breakpoints or not. It it is true, stop all
3565 lwps, so that other threads won't hit the breakpoint in the
3566 staled memory. */
3567 int remove_single_step_breakpoints_p = 0;
3568
3569 if (non_stop)
3570 {
3571 remove_single_step_breakpoints_p
3572 = has_single_step_breakpoints (current_thread);
3573 }
3574 else
3575 {
3576 /* In all-stop, a stop reply cancels all previous resume
3577 requests. Delete all single-step breakpoints. */
3578
3579 find_thread ([&] (thread_info *thread) {
3580 if (has_single_step_breakpoints (thread))
3581 {
3582 remove_single_step_breakpoints_p = 1;
3583 return true;
3584 }
3585
3586 return false;
3587 });
3588 }
3589
3590 if (remove_single_step_breakpoints_p)
3591 {
3592 /* If we remove single-step breakpoints from memory, stop all lwps,
3593 so that other threads won't hit the breakpoint in the staled
3594 memory. */
3595 stop_all_lwps (0, event_child);
3596
3597 if (non_stop)
3598 {
3599 gdb_assert (has_single_step_breakpoints (current_thread));
3600 delete_single_step_breakpoints (current_thread);
3601 }
3602 else
3603 {
3604 for_each_thread ([] (thread_info *thread){
3605 if (has_single_step_breakpoints (thread))
3606 delete_single_step_breakpoints (thread);
3607 });
3608 }
3609
3610 unstop_all_lwps (0, event_child);
3611 }
3612 }
3613
3614 if (!stabilizing_threads)
3615 {
3616 /* In all-stop, stop all threads. */
3617 if (!non_stop)
3618 stop_all_lwps (0, NULL);
3619
3620 if (step_over_finished)
3621 {
3622 if (!non_stop)
3623 {
3624 /* If we were doing a step-over, all other threads but
3625 the stepping one had been paused in start_step_over,
3626 with their suspend counts incremented. We don't want
3627 to do a full unstop/unpause, because we're in
3628 all-stop mode (so we want threads stopped), but we
3629 still need to unsuspend the other threads, to
3630 decrement their `suspended' count back. */
3631 unsuspend_all_lwps (event_child);
3632 }
3633 else
3634 {
3635 /* If we just finished a step-over, then all threads had
3636 been momentarily paused. In all-stop, that's fine,
3637 we want threads stopped by now anyway. In non-stop,
3638 we need to re-resume threads that GDB wanted to be
3639 running. */
3640 unstop_all_lwps (1, event_child);
3641 }
3642 }
3643
3644 /* If we're not waiting for a specific LWP, choose an event LWP
3645 from among those that have had events. Giving equal priority
3646 to all LWPs that have had events helps prevent
3647 starvation. */
3648 if (ptid == minus_one_ptid)
3649 {
3650 event_child->status_pending_p = 1;
3651 event_child->status_pending = w;
3652
3653 select_event_lwp (&event_child);
3654
3655 /* current_thread and event_child must stay in sync. */
3656 current_thread = get_lwp_thread (event_child);
3657
3658 event_child->status_pending_p = 0;
3659 w = event_child->status_pending;
3660 }
3661
3662
3663 /* Stabilize threads (move out of jump pads). */
3664 if (!non_stop)
3665 target_stabilize_threads ();
3666 }
3667 else
3668 {
3669 /* If we just finished a step-over, then all threads had been
3670 momentarily paused. In all-stop, that's fine, we want
3671 threads stopped by now anyway. In non-stop, we need to
3672 re-resume threads that GDB wanted to be running. */
3673 if (step_over_finished)
3674 unstop_all_lwps (1, event_child);
3675 }
3676
3677 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3678 {
3679 /* If the reported event is an exit, fork, vfork or exec, let
3680 GDB know. */
3681
3682 /* Break the unreported fork relationship chain. */
3683 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3684 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3685 {
3686 event_child->fork_relative->fork_relative = NULL;
3687 event_child->fork_relative = NULL;
3688 }
3689
3690 *ourstatus = event_child->waitstatus;
3691 /* Clear the event lwp's waitstatus since we handled it already. */
3692 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3693 }
3694 else
3695 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3696
3697 /* Now that we've selected our final event LWP, un-adjust its PC if
3698 it was a software breakpoint, and the client doesn't know we can
3699 adjust the breakpoint ourselves. */
3700 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3701 && !cs.swbreak_feature)
3702 {
3703 int decr_pc = low_decr_pc_after_break ();
3704
3705 if (decr_pc != 0)
3706 {
3707 struct regcache *regcache
3708 = get_thread_regcache (current_thread, 1);
3709 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3710 }
3711 }
3712
3713 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3714 {
3715 get_syscall_trapinfo (event_child,
3716 &ourstatus->value.syscall_number);
3717 ourstatus->kind = event_child->syscall_state;
3718 }
3719 else if (current_thread->last_resume_kind == resume_stop
3720 && WSTOPSIG (w) == SIGSTOP)
3721 {
3722 /* A thread that has been requested to stop by GDB with vCont;t,
3723 and it stopped cleanly, so report as SIG0. The use of
3724 SIGSTOP is an implementation detail. */
3725 ourstatus->value.sig = GDB_SIGNAL_0;
3726 }
3727 else if (current_thread->last_resume_kind == resume_stop
3728 && WSTOPSIG (w) != SIGSTOP)
3729 {
3730 /* A thread that has been requested to stop by GDB with vCont;t,
3731 but, it stopped for other reasons. */
3732 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3733 }
3734 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3735 {
3736 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3737 }
3738
3739 gdb_assert (step_over_bkpt == null_ptid);
3740
3741 if (debug_threads)
3742 {
3743 debug_printf ("wait_1 ret = %s, %d, %d\n",
3744 target_pid_to_str (ptid_of (current_thread)),
3745 ourstatus->kind, ourstatus->value.sig);
3746 debug_exit ();
3747 }
3748
3749 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3750 return filter_exit_event (event_child, ourstatus);
3751
3752 return ptid_of (current_thread);
3753 }
3754
3755 /* Get rid of any pending event in the pipe. */
3756 static void
3757 async_file_flush (void)
3758 {
3759 int ret;
3760 char buf;
3761
3762 do
3763 ret = read (linux_event_pipe[0], &buf, 1);
3764 while (ret >= 0 || (ret == -1 && errno == EINTR));
3765 }
3766
3767 /* Put something in the pipe, so the event loop wakes up. */
3768 static void
3769 async_file_mark (void)
3770 {
3771 int ret;
3772
3773 async_file_flush ();
3774
3775 do
3776 ret = write (linux_event_pipe[1], "+", 1);
3777 while (ret == 0 || (ret == -1 && errno == EINTR));
3778
3779 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3780 be awakened anyway. */
3781 }
3782
3783 ptid_t
3784 linux_process_target::wait (ptid_t ptid,
3785 target_waitstatus *ourstatus,
3786 int target_options)
3787 {
3788 ptid_t event_ptid;
3789
3790 /* Flush the async file first. */
3791 if (target_is_async_p ())
3792 async_file_flush ();
3793
3794 do
3795 {
3796 event_ptid = wait_1 (ptid, ourstatus, target_options);
3797 }
3798 while ((target_options & TARGET_WNOHANG) == 0
3799 && event_ptid == null_ptid
3800 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3801
3802 /* If at least one stop was reported, there may be more. A single
3803 SIGCHLD can signal more than one child stop. */
3804 if (target_is_async_p ()
3805 && (target_options & TARGET_WNOHANG) != 0
3806 && event_ptid != null_ptid)
3807 async_file_mark ();
3808
3809 return event_ptid;
3810 }
3811
3812 /* Send a signal to an LWP. */
3813
3814 static int
3815 kill_lwp (unsigned long lwpid, int signo)
3816 {
3817 int ret;
3818
3819 errno = 0;
3820 ret = syscall (__NR_tkill, lwpid, signo);
3821 if (errno == ENOSYS)
3822 {
3823 /* If tkill fails, then we are not using nptl threads, a
3824 configuration we no longer support. */
3825 perror_with_name (("tkill"));
3826 }
3827 return ret;
3828 }
3829
3830 void
3831 linux_stop_lwp (struct lwp_info *lwp)
3832 {
3833 send_sigstop (lwp);
3834 }
3835
3836 static void
3837 send_sigstop (struct lwp_info *lwp)
3838 {
3839 int pid;
3840
3841 pid = lwpid_of (get_lwp_thread (lwp));
3842
3843 /* If we already have a pending stop signal for this process, don't
3844 send another. */
3845 if (lwp->stop_expected)
3846 {
3847 if (debug_threads)
3848 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3849
3850 return;
3851 }
3852
3853 if (debug_threads)
3854 debug_printf ("Sending sigstop to lwp %d\n", pid);
3855
3856 lwp->stop_expected = 1;
3857 kill_lwp (pid, SIGSTOP);
3858 }
3859
3860 static void
3861 send_sigstop (thread_info *thread, lwp_info *except)
3862 {
3863 struct lwp_info *lwp = get_thread_lwp (thread);
3864
3865 /* Ignore EXCEPT. */
3866 if (lwp == except)
3867 return;
3868
3869 if (lwp->stopped)
3870 return;
3871
3872 send_sigstop (lwp);
3873 }
3874
3875 /* Increment the suspend count of an LWP, and stop it, if not stopped
3876 yet. */
3877 static void
3878 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3879 {
3880 struct lwp_info *lwp = get_thread_lwp (thread);
3881
3882 /* Ignore EXCEPT. */
3883 if (lwp == except)
3884 return;
3885
3886 lwp_suspended_inc (lwp);
3887
3888 send_sigstop (thread, except);
3889 }
3890
3891 static void
3892 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3893 {
3894 /* Store the exit status for later. */
3895 lwp->status_pending_p = 1;
3896 lwp->status_pending = wstat;
3897
3898 /* Store in waitstatus as well, as there's nothing else to process
3899 for this event. */
3900 if (WIFEXITED (wstat))
3901 {
3902 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3903 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3904 }
3905 else if (WIFSIGNALED (wstat))
3906 {
3907 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3908 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3909 }
3910
3911 /* Prevent trying to stop it. */
3912 lwp->stopped = 1;
3913
3914 /* No further stops are expected from a dead lwp. */
3915 lwp->stop_expected = 0;
3916 }
3917
3918 /* Return true if LWP has exited already, and has a pending exit event
3919 to report to GDB. */
3920
3921 static int
3922 lwp_is_marked_dead (struct lwp_info *lwp)
3923 {
3924 return (lwp->status_pending_p
3925 && (WIFEXITED (lwp->status_pending)
3926 || WIFSIGNALED (lwp->status_pending)));
3927 }
3928
3929 void
3930 linux_process_target::wait_for_sigstop ()
3931 {
3932 struct thread_info *saved_thread;
3933 ptid_t saved_tid;
3934 int wstat;
3935 int ret;
3936
3937 saved_thread = current_thread;
3938 if (saved_thread != NULL)
3939 saved_tid = saved_thread->id;
3940 else
3941 saved_tid = null_ptid; /* avoid bogus unused warning */
3942
3943 if (debug_threads)
3944 debug_printf ("wait_for_sigstop: pulling events\n");
3945
3946 /* Passing NULL_PTID as filter indicates we want all events to be
3947 left pending. Eventually this returns when there are no
3948 unwaited-for children left. */
3949 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3950 gdb_assert (ret == -1);
3951
3952 if (saved_thread == NULL || mythread_alive (saved_tid))
3953 current_thread = saved_thread;
3954 else
3955 {
3956 if (debug_threads)
3957 debug_printf ("Previously current thread died.\n");
3958
3959 /* We can't change the current inferior behind GDB's back,
3960 otherwise, a subsequent command may apply to the wrong
3961 process. */
3962 current_thread = NULL;
3963 }
3964 }
3965
3966 /* Returns true if THREAD is stopped in a jump pad, and we can't
3967 move it out, because we need to report the stop event to GDB. For
3968 example, if the user puts a breakpoint in the jump pad, it's
3969 because she wants to debug it. */
3970
3971 static bool
3972 stuck_in_jump_pad_callback (thread_info *thread)
3973 {
3974 struct lwp_info *lwp = get_thread_lwp (thread);
3975
3976 if (lwp->suspended != 0)
3977 {
3978 internal_error (__FILE__, __LINE__,
3979 "LWP %ld is suspended, suspended=%d\n",
3980 lwpid_of (thread), lwp->suspended);
3981 }
3982 gdb_assert (lwp->stopped);
3983
3984 /* Allow debugging the jump pad, gdb_collect, etc.. */
3985 return (supports_fast_tracepoints ()
3986 && agent_loaded_p ()
3987 && (gdb_breakpoint_here (lwp->stop_pc)
3988 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3989 || thread->last_resume_kind == resume_step)
3990 && (linux_fast_tracepoint_collecting (lwp, NULL)
3991 != fast_tpoint_collect_result::not_collecting));
3992 }
3993
3994 void
3995 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3996 {
3997 struct thread_info *saved_thread;
3998 struct lwp_info *lwp = get_thread_lwp (thread);
3999 int *wstat;
4000
4001 if (lwp->suspended != 0)
4002 {
4003 internal_error (__FILE__, __LINE__,
4004 "LWP %ld is suspended, suspended=%d\n",
4005 lwpid_of (thread), lwp->suspended);
4006 }
4007 gdb_assert (lwp->stopped);
4008
4009 /* For gdb_breakpoint_here. */
4010 saved_thread = current_thread;
4011 current_thread = thread;
4012
4013 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4014
4015 /* Allow debugging the jump pad, gdb_collect, etc. */
4016 if (!gdb_breakpoint_here (lwp->stop_pc)
4017 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4018 && thread->last_resume_kind != resume_step
4019 && maybe_move_out_of_jump_pad (lwp, wstat))
4020 {
4021 if (debug_threads)
4022 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4023 lwpid_of (thread));
4024
4025 if (wstat)
4026 {
4027 lwp->status_pending_p = 0;
4028 enqueue_one_deferred_signal (lwp, wstat);
4029
4030 if (debug_threads)
4031 debug_printf ("Signal %d for LWP %ld deferred "
4032 "(in jump pad)\n",
4033 WSTOPSIG (*wstat), lwpid_of (thread));
4034 }
4035
4036 resume_one_lwp (lwp, 0, 0, NULL);
4037 }
4038 else
4039 lwp_suspended_inc (lwp);
4040
4041 current_thread = saved_thread;
4042 }
4043
4044 static bool
4045 lwp_running (thread_info *thread)
4046 {
4047 struct lwp_info *lwp = get_thread_lwp (thread);
4048
4049 if (lwp_is_marked_dead (lwp))
4050 return false;
4051
4052 return !lwp->stopped;
4053 }
4054
4055 void
4056 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
4057 {
4058 /* Should not be called recursively. */
4059 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4060
4061 if (debug_threads)
4062 {
4063 debug_enter ();
4064 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4065 suspend ? "stop-and-suspend" : "stop",
4066 except != NULL
4067 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4068 : "none");
4069 }
4070
4071 stopping_threads = (suspend
4072 ? STOPPING_AND_SUSPENDING_THREADS
4073 : STOPPING_THREADS);
4074
4075 if (suspend)
4076 for_each_thread ([&] (thread_info *thread)
4077 {
4078 suspend_and_send_sigstop (thread, except);
4079 });
4080 else
4081 for_each_thread ([&] (thread_info *thread)
4082 {
4083 send_sigstop (thread, except);
4084 });
4085
4086 wait_for_sigstop ();
4087 stopping_threads = NOT_STOPPING_THREADS;
4088
4089 if (debug_threads)
4090 {
4091 debug_printf ("stop_all_lwps done, setting stopping_threads "
4092 "back to !stopping\n");
4093 debug_exit ();
4094 }
4095 }
4096
4097 /* Enqueue one signal in the chain of signals which need to be
4098 delivered to this process on next resume. */
4099
4100 static void
4101 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4102 {
4103 struct pending_signals *p_sig = XNEW (struct pending_signals);
4104
4105 p_sig->prev = lwp->pending_signals;
4106 p_sig->signal = signal;
4107 if (info == NULL)
4108 memset (&p_sig->info, 0, sizeof (siginfo_t));
4109 else
4110 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4111 lwp->pending_signals = p_sig;
4112 }
4113
4114 void
4115 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
4116 {
4117 struct thread_info *thread = get_lwp_thread (lwp);
4118 struct regcache *regcache = get_thread_regcache (thread, 1);
4119
4120 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4121
4122 current_thread = thread;
4123 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
4124
4125 for (CORE_ADDR pc : next_pcs)
4126 set_single_step_breakpoint (pc, current_ptid);
4127 }
4128
4129 int
4130 linux_process_target::single_step (lwp_info* lwp)
4131 {
4132 int step = 0;
4133
4134 if (can_hardware_single_step ())
4135 {
4136 step = 1;
4137 }
4138 else if (supports_software_single_step ())
4139 {
4140 install_software_single_step_breakpoints (lwp);
4141 step = 0;
4142 }
4143 else
4144 {
4145 if (debug_threads)
4146 debug_printf ("stepping is not implemented on this target");
4147 }
4148
4149 return step;
4150 }
4151
4152 /* The signal can be delivered to the inferior if we are not trying to
4153 finish a fast tracepoint collect. Since signal can be delivered in
4154 the step-over, the program may go to signal handler and trap again
4155 after return from the signal handler. We can live with the spurious
4156 double traps. */
4157
4158 static int
4159 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4160 {
4161 return (lwp->collecting_fast_tracepoint
4162 == fast_tpoint_collect_result::not_collecting);
4163 }
4164
4165 void
4166 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4167 int signal, siginfo_t *info)
4168 {
4169 struct thread_info *thread = get_lwp_thread (lwp);
4170 struct thread_info *saved_thread;
4171 int ptrace_request;
4172 struct process_info *proc = get_thread_process (thread);
4173
4174 /* Note that target description may not be initialised
4175 (proc->tdesc == NULL) at this point because the program hasn't
4176 stopped at the first instruction yet. It means GDBserver skips
4177 the extra traps from the wrapper program (see option --wrapper).
4178 Code in this function that requires register access should be
4179 guarded by proc->tdesc == NULL or something else. */
4180
4181 if (lwp->stopped == 0)
4182 return;
4183
4184 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4185
4186 fast_tpoint_collect_result fast_tp_collecting
4187 = lwp->collecting_fast_tracepoint;
4188
4189 gdb_assert (!stabilizing_threads
4190 || (fast_tp_collecting
4191 != fast_tpoint_collect_result::not_collecting));
4192
4193 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4194 user used the "jump" command, or "set $pc = foo"). */
4195 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4196 {
4197 /* Collecting 'while-stepping' actions doesn't make sense
4198 anymore. */
4199 release_while_stepping_state_list (thread);
4200 }
4201
4202 /* If we have pending signals or status, and a new signal, enqueue the
4203 signal. Also enqueue the signal if it can't be delivered to the
4204 inferior right now. */
4205 if (signal != 0
4206 && (lwp->status_pending_p
4207 || lwp->pending_signals != NULL
4208 || !lwp_signal_can_be_delivered (lwp)))
4209 {
4210 enqueue_pending_signal (lwp, signal, info);
4211
4212 /* Postpone any pending signal. It was enqueued above. */
4213 signal = 0;
4214 }
4215
4216 if (lwp->status_pending_p)
4217 {
4218 if (debug_threads)
4219 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4220 " has pending status\n",
4221 lwpid_of (thread), step ? "step" : "continue",
4222 lwp->stop_expected ? "expected" : "not expected");
4223 return;
4224 }
4225
4226 saved_thread = current_thread;
4227 current_thread = thread;
4228
4229 /* This bit needs some thinking about. If we get a signal that
4230 we must report while a single-step reinsert is still pending,
4231 we often end up resuming the thread. It might be better to
4232 (ew) allow a stack of pending events; then we could be sure that
4233 the reinsert happened right away and not lose any signals.
4234
4235 Making this stack would also shrink the window in which breakpoints are
4236 uninserted (see comment in linux_wait_for_lwp) but not enough for
4237 complete correctness, so it won't solve that problem. It may be
4238 worthwhile just to solve this one, however. */
4239 if (lwp->bp_reinsert != 0)
4240 {
4241 if (debug_threads)
4242 debug_printf (" pending reinsert at 0x%s\n",
4243 paddress (lwp->bp_reinsert));
4244
4245 if (can_hardware_single_step ())
4246 {
4247 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4248 {
4249 if (step == 0)
4250 warning ("BAD - reinserting but not stepping.");
4251 if (lwp->suspended)
4252 warning ("BAD - reinserting and suspended(%d).",
4253 lwp->suspended);
4254 }
4255 }
4256
4257 step = maybe_hw_step (thread);
4258 }
4259
4260 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4261 {
4262 if (debug_threads)
4263 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4264 " (exit-jump-pad-bkpt)\n",
4265 lwpid_of (thread));
4266 }
4267 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4268 {
4269 if (debug_threads)
4270 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4271 " single-stepping\n",
4272 lwpid_of (thread));
4273
4274 if (can_hardware_single_step ())
4275 step = 1;
4276 else
4277 {
4278 internal_error (__FILE__, __LINE__,
4279 "moving out of jump pad single-stepping"
4280 " not implemented on this target");
4281 }
4282 }
4283
4284 /* If we have while-stepping actions in this thread set it stepping.
4285 If we have a signal to deliver, it may or may not be set to
4286 SIG_IGN, we don't know. Assume so, and allow collecting
4287 while-stepping into a signal handler. A possible smart thing to
4288 do would be to set an internal breakpoint at the signal return
4289 address, continue, and carry on catching this while-stepping
4290 action only when that breakpoint is hit. A future
4291 enhancement. */
4292 if (thread->while_stepping != NULL)
4293 {
4294 if (debug_threads)
4295 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4296 lwpid_of (thread));
4297
4298 step = single_step (lwp);
4299 }
4300
4301 if (proc->tdesc != NULL && low_supports_breakpoints ())
4302 {
4303 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4304
4305 lwp->stop_pc = low_get_pc (regcache);
4306
4307 if (debug_threads)
4308 {
4309 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4310 (long) lwp->stop_pc);
4311 }
4312 }
4313
4314 /* If we have pending signals, consume one if it can be delivered to
4315 the inferior. */
4316 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4317 {
4318 struct pending_signals **p_sig;
4319
4320 p_sig = &lwp->pending_signals;
4321 while ((*p_sig)->prev != NULL)
4322 p_sig = &(*p_sig)->prev;
4323
4324 signal = (*p_sig)->signal;
4325 if ((*p_sig)->info.si_signo != 0)
4326 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4327 &(*p_sig)->info);
4328
4329 free (*p_sig);
4330 *p_sig = NULL;
4331 }
4332
4333 if (debug_threads)
4334 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4335 lwpid_of (thread), step ? "step" : "continue", signal,
4336 lwp->stop_expected ? "expected" : "not expected");
4337
4338 if (the_low_target.prepare_to_resume != NULL)
4339 the_low_target.prepare_to_resume (lwp);
4340
4341 regcache_invalidate_thread (thread);
4342 errno = 0;
4343 lwp->stepping = step;
4344 if (step)
4345 ptrace_request = PTRACE_SINGLESTEP;
4346 else if (gdb_catching_syscalls_p (lwp))
4347 ptrace_request = PTRACE_SYSCALL;
4348 else
4349 ptrace_request = PTRACE_CONT;
4350 ptrace (ptrace_request,
4351 lwpid_of (thread),
4352 (PTRACE_TYPE_ARG3) 0,
4353 /* Coerce to a uintptr_t first to avoid potential gcc warning
4354 of coercing an 8 byte integer to a 4 byte pointer. */
4355 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4356
4357 current_thread = saved_thread;
4358 if (errno)
4359 perror_with_name ("resuming thread");
4360
4361 /* Successfully resumed. Clear state that no longer makes sense,
4362 and mark the LWP as running. Must not do this before resuming
4363 otherwise if that fails other code will be confused. E.g., we'd
4364 later try to stop the LWP and hang forever waiting for a stop
4365 status. Note that we must not throw after this is cleared,
4366 otherwise handle_zombie_lwp_error would get confused. */
4367 lwp->stopped = 0;
4368 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4369 }
4370
4371 /* Called when we try to resume a stopped LWP and that errors out. If
4372 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4373 or about to become), discard the error, clear any pending status
4374 the LWP may have, and return true (we'll collect the exit status
4375 soon enough). Otherwise, return false. */
4376
4377 static int
4378 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4379 {
4380 struct thread_info *thread = get_lwp_thread (lp);
4381
4382 /* If we get an error after resuming the LWP successfully, we'd
4383 confuse !T state for the LWP being gone. */
4384 gdb_assert (lp->stopped);
4385
4386 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4387 because even if ptrace failed with ESRCH, the tracee may be "not
4388 yet fully dead", but already refusing ptrace requests. In that
4389 case the tracee has 'R (Running)' state for a little bit
4390 (observed in Linux 3.18). See also the note on ESRCH in the
4391 ptrace(2) man page. Instead, check whether the LWP has any state
4392 other than ptrace-stopped. */
4393
4394 /* Don't assume anything if /proc/PID/status can't be read. */
4395 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4396 {
4397 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4398 lp->status_pending_p = 0;
4399 return 1;
4400 }
4401 return 0;
4402 }
4403
4404 void
4405 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4406 siginfo_t *info)
4407 {
4408 try
4409 {
4410 resume_one_lwp_throw (lwp, step, signal, info);
4411 }
4412 catch (const gdb_exception_error &ex)
4413 {
4414 if (!check_ptrace_stopped_lwp_gone (lwp))
4415 throw;
4416 }
4417 }
4418
4419 /* This function is called once per thread via for_each_thread.
4420 We look up which resume request applies to THREAD and mark it with a
4421 pointer to the appropriate resume request.
4422
4423 This algorithm is O(threads * resume elements), but resume elements
4424 is small (and will remain small at least until GDB supports thread
4425 suspension). */
4426
4427 static void
4428 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4429 {
4430 struct lwp_info *lwp = get_thread_lwp (thread);
4431
4432 for (int ndx = 0; ndx < n; ndx++)
4433 {
4434 ptid_t ptid = resume[ndx].thread;
4435 if (ptid == minus_one_ptid
4436 || ptid == thread->id
4437 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4438 of PID'. */
4439 || (ptid.pid () == pid_of (thread)
4440 && (ptid.is_pid ()
4441 || ptid.lwp () == -1)))
4442 {
4443 if (resume[ndx].kind == resume_stop
4444 && thread->last_resume_kind == resume_stop)
4445 {
4446 if (debug_threads)
4447 debug_printf ("already %s LWP %ld at GDB's request\n",
4448 (thread->last_status.kind
4449 == TARGET_WAITKIND_STOPPED)
4450 ? "stopped"
4451 : "stopping",
4452 lwpid_of (thread));
4453
4454 continue;
4455 }
4456
4457 /* Ignore (wildcard) resume requests for already-resumed
4458 threads. */
4459 if (resume[ndx].kind != resume_stop
4460 && thread->last_resume_kind != resume_stop)
4461 {
4462 if (debug_threads)
4463 debug_printf ("already %s LWP %ld at GDB's request\n",
4464 (thread->last_resume_kind
4465 == resume_step)
4466 ? "stepping"
4467 : "continuing",
4468 lwpid_of (thread));
4469 continue;
4470 }
4471
4472 /* Don't let wildcard resumes resume fork children that GDB
4473 does not yet know are new fork children. */
4474 if (lwp->fork_relative != NULL)
4475 {
4476 struct lwp_info *rel = lwp->fork_relative;
4477
4478 if (rel->status_pending_p
4479 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4480 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4481 {
4482 if (debug_threads)
4483 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4484 lwpid_of (thread));
4485 continue;
4486 }
4487 }
4488
4489 /* If the thread has a pending event that has already been
4490 reported to GDBserver core, but GDB has not pulled the
4491 event out of the vStopped queue yet, likewise, ignore the
4492 (wildcard) resume request. */
4493 if (in_queued_stop_replies (thread->id))
4494 {
4495 if (debug_threads)
4496 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4497 lwpid_of (thread));
4498 continue;
4499 }
4500
4501 lwp->resume = &resume[ndx];
4502 thread->last_resume_kind = lwp->resume->kind;
4503
4504 lwp->step_range_start = lwp->resume->step_range_start;
4505 lwp->step_range_end = lwp->resume->step_range_end;
4506
4507 /* If we had a deferred signal to report, dequeue one now.
4508 This can happen if LWP gets more than one signal while
4509 trying to get out of a jump pad. */
4510 if (lwp->stopped
4511 && !lwp->status_pending_p
4512 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4513 {
4514 lwp->status_pending_p = 1;
4515
4516 if (debug_threads)
4517 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4518 "leaving status pending.\n",
4519 WSTOPSIG (lwp->status_pending),
4520 lwpid_of (thread));
4521 }
4522
4523 return;
4524 }
4525 }
4526
4527 /* No resume action for this thread. */
4528 lwp->resume = NULL;
4529 }
4530
4531 bool
4532 linux_process_target::resume_status_pending (thread_info *thread)
4533 {
4534 struct lwp_info *lwp = get_thread_lwp (thread);
4535
4536 /* LWPs which will not be resumed are not interesting, because
4537 we might not wait for them next time through linux_wait. */
4538 if (lwp->resume == NULL)
4539 return false;
4540
4541 return thread_still_has_status_pending (thread);
4542 }
4543
4544 bool
4545 linux_process_target::thread_needs_step_over (thread_info *thread)
4546 {
4547 struct lwp_info *lwp = get_thread_lwp (thread);
4548 struct thread_info *saved_thread;
4549 CORE_ADDR pc;
4550 struct process_info *proc = get_thread_process (thread);
4551
4552 /* GDBserver is skipping the extra traps from the wrapper program,
4553 don't have to do step over. */
4554 if (proc->tdesc == NULL)
4555 return false;
4556
4557 /* LWPs which will not be resumed are not interesting, because we
4558 might not wait for them next time through linux_wait. */
4559
4560 if (!lwp->stopped)
4561 {
4562 if (debug_threads)
4563 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4564 lwpid_of (thread));
4565 return false;
4566 }
4567
4568 if (thread->last_resume_kind == resume_stop)
4569 {
4570 if (debug_threads)
4571 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4572 " stopped\n",
4573 lwpid_of (thread));
4574 return false;
4575 }
4576
4577 gdb_assert (lwp->suspended >= 0);
4578
4579 if (lwp->suspended)
4580 {
4581 if (debug_threads)
4582 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4583 lwpid_of (thread));
4584 return false;
4585 }
4586
4587 if (lwp->status_pending_p)
4588 {
4589 if (debug_threads)
4590 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4591 " status.\n",
4592 lwpid_of (thread));
4593 return false;
4594 }
4595
4596 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4597 or we have. */
4598 pc = get_pc (lwp);
4599
4600 /* If the PC has changed since we stopped, then don't do anything,
4601 and let the breakpoint/tracepoint be hit. This happens if, for
4602 instance, GDB handled the decr_pc_after_break subtraction itself,
4603 GDB is OOL stepping this thread, or the user has issued a "jump"
4604 command, or poked thread's registers herself. */
4605 if (pc != lwp->stop_pc)
4606 {
4607 if (debug_threads)
4608 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4609 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4610 lwpid_of (thread),
4611 paddress (lwp->stop_pc), paddress (pc));
4612 return false;
4613 }
4614
4615 /* On software single step target, resume the inferior with signal
4616 rather than stepping over. */
4617 if (supports_software_single_step ()
4618 && lwp->pending_signals != NULL
4619 && lwp_signal_can_be_delivered (lwp))
4620 {
4621 if (debug_threads)
4622 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4623 " signals.\n",
4624 lwpid_of (thread));
4625
4626 return false;
4627 }
4628
4629 saved_thread = current_thread;
4630 current_thread = thread;
4631
4632 /* We can only step over breakpoints we know about. */
4633 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4634 {
4635 /* Don't step over a breakpoint that GDB expects to hit
4636 though. If the condition is being evaluated on the target's side
4637 and it evaluate to false, step over this breakpoint as well. */
4638 if (gdb_breakpoint_here (pc)
4639 && gdb_condition_true_at_breakpoint (pc)
4640 && gdb_no_commands_at_breakpoint (pc))
4641 {
4642 if (debug_threads)
4643 debug_printf ("Need step over [LWP %ld]? yes, but found"
4644 " GDB breakpoint at 0x%s; skipping step over\n",
4645 lwpid_of (thread), paddress (pc));
4646
4647 current_thread = saved_thread;
4648 return false;
4649 }
4650 else
4651 {
4652 if (debug_threads)
4653 debug_printf ("Need step over [LWP %ld]? yes, "
4654 "found breakpoint at 0x%s\n",
4655 lwpid_of (thread), paddress (pc));
4656
4657 /* We've found an lwp that needs stepping over --- return 1 so
4658 that find_thread stops looking. */
4659 current_thread = saved_thread;
4660
4661 return true;
4662 }
4663 }
4664
4665 current_thread = saved_thread;
4666
4667 if (debug_threads)
4668 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4669 " at 0x%s\n",
4670 lwpid_of (thread), paddress (pc));
4671
4672 return false;
4673 }
4674
4675 void
4676 linux_process_target::start_step_over (lwp_info *lwp)
4677 {
4678 struct thread_info *thread = get_lwp_thread (lwp);
4679 struct thread_info *saved_thread;
4680 CORE_ADDR pc;
4681 int step;
4682
4683 if (debug_threads)
4684 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4685 lwpid_of (thread));
4686
4687 stop_all_lwps (1, lwp);
4688
4689 if (lwp->suspended != 0)
4690 {
4691 internal_error (__FILE__, __LINE__,
4692 "LWP %ld suspended=%d\n", lwpid_of (thread),
4693 lwp->suspended);
4694 }
4695
4696 if (debug_threads)
4697 debug_printf ("Done stopping all threads for step-over.\n");
4698
4699 /* Note, we should always reach here with an already adjusted PC,
4700 either by GDB (if we're resuming due to GDB's request), or by our
4701 caller, if we just finished handling an internal breakpoint GDB
4702 shouldn't care about. */
4703 pc = get_pc (lwp);
4704
4705 saved_thread = current_thread;
4706 current_thread = thread;
4707
4708 lwp->bp_reinsert = pc;
4709 uninsert_breakpoints_at (pc);
4710 uninsert_fast_tracepoint_jumps_at (pc);
4711
4712 step = single_step (lwp);
4713
4714 current_thread = saved_thread;
4715
4716 resume_one_lwp (lwp, step, 0, NULL);
4717
4718 /* Require next event from this LWP. */
4719 step_over_bkpt = thread->id;
4720 }
4721
4722 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4723 start_step_over, if still there, and delete any single-step
4724 breakpoints we've set, on non hardware single-step targets. */
4725
4726 static int
4727 finish_step_over (struct lwp_info *lwp)
4728 {
4729 if (lwp->bp_reinsert != 0)
4730 {
4731 struct thread_info *saved_thread = current_thread;
4732
4733 if (debug_threads)
4734 debug_printf ("Finished step over.\n");
4735
4736 current_thread = get_lwp_thread (lwp);
4737
4738 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4739 may be no breakpoint to reinsert there by now. */
4740 reinsert_breakpoints_at (lwp->bp_reinsert);
4741 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4742
4743 lwp->bp_reinsert = 0;
4744
4745 /* Delete any single-step breakpoints. No longer needed. We
4746 don't have to worry about other threads hitting this trap,
4747 and later not being able to explain it, because we were
4748 stepping over a breakpoint, and we hold all threads but
4749 LWP stopped while doing that. */
4750 if (!can_hardware_single_step ())
4751 {
4752 gdb_assert (has_single_step_breakpoints (current_thread));
4753 delete_single_step_breakpoints (current_thread);
4754 }
4755
4756 step_over_bkpt = null_ptid;
4757 current_thread = saved_thread;
4758 return 1;
4759 }
4760 else
4761 return 0;
4762 }
4763
4764 void
4765 linux_process_target::complete_ongoing_step_over ()
4766 {
4767 if (step_over_bkpt != null_ptid)
4768 {
4769 struct lwp_info *lwp;
4770 int wstat;
4771 int ret;
4772
4773 if (debug_threads)
4774 debug_printf ("detach: step over in progress, finish it first\n");
4775
4776 /* Passing NULL_PTID as filter indicates we want all events to
4777 be left pending. Eventually this returns when there are no
4778 unwaited-for children left. */
4779 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4780 __WALL);
4781 gdb_assert (ret == -1);
4782
4783 lwp = find_lwp_pid (step_over_bkpt);
4784 if (lwp != NULL)
4785 finish_step_over (lwp);
4786 step_over_bkpt = null_ptid;
4787 unsuspend_all_lwps (lwp);
4788 }
4789 }
4790
4791 void
4792 linux_process_target::resume_one_thread (thread_info *thread,
4793 bool leave_all_stopped)
4794 {
4795 struct lwp_info *lwp = get_thread_lwp (thread);
4796 int leave_pending;
4797
4798 if (lwp->resume == NULL)
4799 return;
4800
4801 if (lwp->resume->kind == resume_stop)
4802 {
4803 if (debug_threads)
4804 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4805
4806 if (!lwp->stopped)
4807 {
4808 if (debug_threads)
4809 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4810
4811 /* Stop the thread, and wait for the event asynchronously,
4812 through the event loop. */
4813 send_sigstop (lwp);
4814 }
4815 else
4816 {
4817 if (debug_threads)
4818 debug_printf ("already stopped LWP %ld\n",
4819 lwpid_of (thread));
4820
4821 /* The LWP may have been stopped in an internal event that
4822 was not meant to be notified back to GDB (e.g., gdbserver
4823 breakpoint), so we should be reporting a stop event in
4824 this case too. */
4825
4826 /* If the thread already has a pending SIGSTOP, this is a
4827 no-op. Otherwise, something later will presumably resume
4828 the thread and this will cause it to cancel any pending
4829 operation, due to last_resume_kind == resume_stop. If
4830 the thread already has a pending status to report, we
4831 will still report it the next time we wait - see
4832 status_pending_p_callback. */
4833
4834 /* If we already have a pending signal to report, then
4835 there's no need to queue a SIGSTOP, as this means we're
4836 midway through moving the LWP out of the jumppad, and we
4837 will report the pending signal as soon as that is
4838 finished. */
4839 if (lwp->pending_signals_to_report == NULL)
4840 send_sigstop (lwp);
4841 }
4842
4843 /* For stop requests, we're done. */
4844 lwp->resume = NULL;
4845 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4846 return;
4847 }
4848
4849 /* If this thread which is about to be resumed has a pending status,
4850 then don't resume it - we can just report the pending status.
4851 Likewise if it is suspended, because e.g., another thread is
4852 stepping past a breakpoint. Make sure to queue any signals that
4853 would otherwise be sent. In all-stop mode, we do this decision
4854 based on if *any* thread has a pending status. If there's a
4855 thread that needs the step-over-breakpoint dance, then don't
4856 resume any other thread but that particular one. */
4857 leave_pending = (lwp->suspended
4858 || lwp->status_pending_p
4859 || leave_all_stopped);
4860
4861 /* If we have a new signal, enqueue the signal. */
4862 if (lwp->resume->sig != 0)
4863 {
4864 siginfo_t info, *info_p;
4865
4866 /* If this is the same signal we were previously stopped by,
4867 make sure to queue its siginfo. */
4868 if (WIFSTOPPED (lwp->last_status)
4869 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4870 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4871 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4872 info_p = &info;
4873 else
4874 info_p = NULL;
4875
4876 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4877 }
4878
4879 if (!leave_pending)
4880 {
4881 if (debug_threads)
4882 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4883
4884 proceed_one_lwp (thread, NULL);
4885 }
4886 else
4887 {
4888 if (debug_threads)
4889 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4890 }
4891
4892 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4893 lwp->resume = NULL;
4894 }
4895
4896 void
4897 linux_process_target::resume (thread_resume *resume_info, size_t n)
4898 {
4899 struct thread_info *need_step_over = NULL;
4900
4901 if (debug_threads)
4902 {
4903 debug_enter ();
4904 debug_printf ("linux_resume:\n");
4905 }
4906
4907 for_each_thread ([&] (thread_info *thread)
4908 {
4909 linux_set_resume_request (thread, resume_info, n);
4910 });
4911
4912 /* If there is a thread which would otherwise be resumed, which has
4913 a pending status, then don't resume any threads - we can just
4914 report the pending status. Make sure to queue any signals that
4915 would otherwise be sent. In non-stop mode, we'll apply this
4916 logic to each thread individually. We consume all pending events
4917 before considering to start a step-over (in all-stop). */
4918 bool any_pending = false;
4919 if (!non_stop)
4920 any_pending = find_thread ([this] (thread_info *thread)
4921 {
4922 return resume_status_pending (thread);
4923 }) != nullptr;
4924
4925 /* If there is a thread which would otherwise be resumed, which is
4926 stopped at a breakpoint that needs stepping over, then don't
4927 resume any threads - have it step over the breakpoint with all
4928 other threads stopped, then resume all threads again. Make sure
4929 to queue any signals that would otherwise be delivered or
4930 queued. */
4931 if (!any_pending && low_supports_breakpoints ())
4932 need_step_over = find_thread ([this] (thread_info *thread)
4933 {
4934 return thread_needs_step_over (thread);
4935 });
4936
4937 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4938
4939 if (debug_threads)
4940 {
4941 if (need_step_over != NULL)
4942 debug_printf ("Not resuming all, need step over\n");
4943 else if (any_pending)
4944 debug_printf ("Not resuming, all-stop and found "
4945 "an LWP with pending status\n");
4946 else
4947 debug_printf ("Resuming, no pending status or step over needed\n");
4948 }
4949
4950 /* Even if we're leaving threads stopped, queue all signals we'd
4951 otherwise deliver. */
4952 for_each_thread ([&] (thread_info *thread)
4953 {
4954 resume_one_thread (thread, leave_all_stopped);
4955 });
4956
4957 if (need_step_over)
4958 start_step_over (get_thread_lwp (need_step_over));
4959
4960 if (debug_threads)
4961 {
4962 debug_printf ("linux_resume done\n");
4963 debug_exit ();
4964 }
4965
4966 /* We may have events that were pending that can/should be sent to
4967 the client now. Trigger a linux_wait call. */
4968 if (target_is_async_p ())
4969 async_file_mark ();
4970 }
4971
4972 void
4973 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4974 {
4975 struct lwp_info *lwp = get_thread_lwp (thread);
4976 int step;
4977
4978 if (lwp == except)
4979 return;
4980
4981 if (debug_threads)
4982 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4983
4984 if (!lwp->stopped)
4985 {
4986 if (debug_threads)
4987 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4988 return;
4989 }
4990
4991 if (thread->last_resume_kind == resume_stop
4992 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4993 {
4994 if (debug_threads)
4995 debug_printf (" client wants LWP to remain %ld stopped\n",
4996 lwpid_of (thread));
4997 return;
4998 }
4999
5000 if (lwp->status_pending_p)
5001 {
5002 if (debug_threads)
5003 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5004 lwpid_of (thread));
5005 return;
5006 }
5007
5008 gdb_assert (lwp->suspended >= 0);
5009
5010 if (lwp->suspended)
5011 {
5012 if (debug_threads)
5013 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5014 return;
5015 }
5016
5017 if (thread->last_resume_kind == resume_stop
5018 && lwp->pending_signals_to_report == NULL
5019 && (lwp->collecting_fast_tracepoint
5020 == fast_tpoint_collect_result::not_collecting))
5021 {
5022 /* We haven't reported this LWP as stopped yet (otherwise, the
5023 last_status.kind check above would catch it, and we wouldn't
5024 reach here. This LWP may have been momentarily paused by a
5025 stop_all_lwps call while handling for example, another LWP's
5026 step-over. In that case, the pending expected SIGSTOP signal
5027 that was queued at vCont;t handling time will have already
5028 been consumed by wait_for_sigstop, and so we need to requeue
5029 another one here. Note that if the LWP already has a SIGSTOP
5030 pending, this is a no-op. */
5031
5032 if (debug_threads)
5033 debug_printf ("Client wants LWP %ld to stop. "
5034 "Making sure it has a SIGSTOP pending\n",
5035 lwpid_of (thread));
5036
5037 send_sigstop (lwp);
5038 }
5039
5040 if (thread->last_resume_kind == resume_step)
5041 {
5042 if (debug_threads)
5043 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5044 lwpid_of (thread));
5045
5046 /* If resume_step is requested by GDB, install single-step
5047 breakpoints when the thread is about to be actually resumed if
5048 the single-step breakpoints weren't removed. */
5049 if (supports_software_single_step ()
5050 && !has_single_step_breakpoints (thread))
5051 install_software_single_step_breakpoints (lwp);
5052
5053 step = maybe_hw_step (thread);
5054 }
5055 else if (lwp->bp_reinsert != 0)
5056 {
5057 if (debug_threads)
5058 debug_printf (" stepping LWP %ld, reinsert set\n",
5059 lwpid_of (thread));
5060
5061 step = maybe_hw_step (thread);
5062 }
5063 else
5064 step = 0;
5065
5066 resume_one_lwp (lwp, step, 0, NULL);
5067 }
5068
5069 void
5070 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
5071 lwp_info *except)
5072 {
5073 struct lwp_info *lwp = get_thread_lwp (thread);
5074
5075 if (lwp == except)
5076 return;
5077
5078 lwp_suspended_decr (lwp);
5079
5080 proceed_one_lwp (thread, except);
5081 }
5082
5083 void
5084 linux_process_target::proceed_all_lwps ()
5085 {
5086 struct thread_info *need_step_over;
5087
5088 /* If there is a thread which would otherwise be resumed, which is
5089 stopped at a breakpoint that needs stepping over, then don't
5090 resume any threads - have it step over the breakpoint with all
5091 other threads stopped, then resume all threads again. */
5092
5093 if (low_supports_breakpoints ())
5094 {
5095 need_step_over = find_thread ([this] (thread_info *thread)
5096 {
5097 return thread_needs_step_over (thread);
5098 });
5099
5100 if (need_step_over != NULL)
5101 {
5102 if (debug_threads)
5103 debug_printf ("proceed_all_lwps: found "
5104 "thread %ld needing a step-over\n",
5105 lwpid_of (need_step_over));
5106
5107 start_step_over (get_thread_lwp (need_step_over));
5108 return;
5109 }
5110 }
5111
5112 if (debug_threads)
5113 debug_printf ("Proceeding, no step-over needed\n");
5114
5115 for_each_thread ([this] (thread_info *thread)
5116 {
5117 proceed_one_lwp (thread, NULL);
5118 });
5119 }
5120
5121 void
5122 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
5123 {
5124 if (debug_threads)
5125 {
5126 debug_enter ();
5127 if (except)
5128 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5129 lwpid_of (get_lwp_thread (except)));
5130 else
5131 debug_printf ("unstopping all lwps\n");
5132 }
5133
5134 if (unsuspend)
5135 for_each_thread ([&] (thread_info *thread)
5136 {
5137 unsuspend_and_proceed_one_lwp (thread, except);
5138 });
5139 else
5140 for_each_thread ([&] (thread_info *thread)
5141 {
5142 proceed_one_lwp (thread, except);
5143 });
5144
5145 if (debug_threads)
5146 {
5147 debug_printf ("unstop_all_lwps done\n");
5148 debug_exit ();
5149 }
5150 }
5151
5152
5153 #ifdef HAVE_LINUX_REGSETS
5154
5155 #define use_linux_regsets 1
5156
5157 /* Returns true if REGSET has been disabled. */
5158
5159 static int
5160 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5161 {
5162 return (info->disabled_regsets != NULL
5163 && info->disabled_regsets[regset - info->regsets]);
5164 }
5165
5166 /* Disable REGSET. */
5167
5168 static void
5169 disable_regset (struct regsets_info *info, struct regset_info *regset)
5170 {
5171 int dr_offset;
5172
5173 dr_offset = regset - info->regsets;
5174 if (info->disabled_regsets == NULL)
5175 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5176 info->disabled_regsets[dr_offset] = 1;
5177 }
5178
5179 static int
5180 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5181 struct regcache *regcache)
5182 {
5183 struct regset_info *regset;
5184 int saw_general_regs = 0;
5185 int pid;
5186 struct iovec iov;
5187
5188 pid = lwpid_of (current_thread);
5189 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5190 {
5191 void *buf, *data;
5192 int nt_type, res;
5193
5194 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5195 continue;
5196
5197 buf = xmalloc (regset->size);
5198
5199 nt_type = regset->nt_type;
5200 if (nt_type)
5201 {
5202 iov.iov_base = buf;
5203 iov.iov_len = regset->size;
5204 data = (void *) &iov;
5205 }
5206 else
5207 data = buf;
5208
5209 #ifndef __sparc__
5210 res = ptrace (regset->get_request, pid,
5211 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5212 #else
5213 res = ptrace (regset->get_request, pid, data, nt_type);
5214 #endif
5215 if (res < 0)
5216 {
5217 if (errno == EIO
5218 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5219 {
5220 /* If we get EIO on a regset, or an EINVAL and the regset is
5221 optional, do not try it again for this process mode. */
5222 disable_regset (regsets_info, regset);
5223 }
5224 else if (errno == ENODATA)
5225 {
5226 /* ENODATA may be returned if the regset is currently
5227 not "active". This can happen in normal operation,
5228 so suppress the warning in this case. */
5229 }
5230 else if (errno == ESRCH)
5231 {
5232 /* At this point, ESRCH should mean the process is
5233 already gone, in which case we simply ignore attempts
5234 to read its registers. */
5235 }
5236 else
5237 {
5238 char s[256];
5239 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5240 pid);
5241 perror (s);
5242 }
5243 }
5244 else
5245 {
5246 if (regset->type == GENERAL_REGS)
5247 saw_general_regs = 1;
5248 regset->store_function (regcache, buf);
5249 }
5250 free (buf);
5251 }
5252 if (saw_general_regs)
5253 return 0;
5254 else
5255 return 1;
5256 }
5257
5258 static int
5259 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5260 struct regcache *regcache)
5261 {
5262 struct regset_info *regset;
5263 int saw_general_regs = 0;
5264 int pid;
5265 struct iovec iov;
5266
5267 pid = lwpid_of (current_thread);
5268 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5269 {
5270 void *buf, *data;
5271 int nt_type, res;
5272
5273 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5274 || regset->fill_function == NULL)
5275 continue;
5276
5277 buf = xmalloc (regset->size);
5278
5279 /* First fill the buffer with the current register set contents,
5280 in case there are any items in the kernel's regset that are
5281 not in gdbserver's regcache. */
5282
5283 nt_type = regset->nt_type;
5284 if (nt_type)
5285 {
5286 iov.iov_base = buf;
5287 iov.iov_len = regset->size;
5288 data = (void *) &iov;
5289 }
5290 else
5291 data = buf;
5292
5293 #ifndef __sparc__
5294 res = ptrace (regset->get_request, pid,
5295 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5296 #else
5297 res = ptrace (regset->get_request, pid, data, nt_type);
5298 #endif
5299
5300 if (res == 0)
5301 {
5302 /* Then overlay our cached registers on that. */
5303 regset->fill_function (regcache, buf);
5304
5305 /* Only now do we write the register set. */
5306 #ifndef __sparc__
5307 res = ptrace (regset->set_request, pid,
5308 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5309 #else
5310 res = ptrace (regset->set_request, pid, data, nt_type);
5311 #endif
5312 }
5313
5314 if (res < 0)
5315 {
5316 if (errno == EIO
5317 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5318 {
5319 /* If we get EIO on a regset, or an EINVAL and the regset is
5320 optional, do not try it again for this process mode. */
5321 disable_regset (regsets_info, regset);
5322 }
5323 else if (errno == ESRCH)
5324 {
5325 /* At this point, ESRCH should mean the process is
5326 already gone, in which case we simply ignore attempts
5327 to change its registers. See also the related
5328 comment in resume_one_lwp. */
5329 free (buf);
5330 return 0;
5331 }
5332 else
5333 {
5334 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5335 }
5336 }
5337 else if (regset->type == GENERAL_REGS)
5338 saw_general_regs = 1;
5339 free (buf);
5340 }
5341 if (saw_general_regs)
5342 return 0;
5343 else
5344 return 1;
5345 }
5346
5347 #else /* !HAVE_LINUX_REGSETS */
5348
5349 #define use_linux_regsets 0
5350 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5351 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5352
5353 #endif
5354
5355 /* Return 1 if register REGNO is supported by one of the regset ptrace
5356 calls or 0 if it has to be transferred individually. */
5357
5358 static int
5359 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5360 {
5361 unsigned char mask = 1 << (regno % 8);
5362 size_t index = regno / 8;
5363
5364 return (use_linux_regsets
5365 && (regs_info->regset_bitmap == NULL
5366 || (regs_info->regset_bitmap[index] & mask) != 0));
5367 }
5368
5369 #ifdef HAVE_LINUX_USRREGS
5370
5371 static int
5372 register_addr (const struct usrregs_info *usrregs, int regnum)
5373 {
5374 int addr;
5375
5376 if (regnum < 0 || regnum >= usrregs->num_regs)
5377 error ("Invalid register number %d.", regnum);
5378
5379 addr = usrregs->regmap[regnum];
5380
5381 return addr;
5382 }
5383
5384
5385 void
5386 linux_process_target::fetch_register (const usrregs_info *usrregs,
5387 regcache *regcache, int regno)
5388 {
5389 CORE_ADDR regaddr;
5390 int i, size;
5391 char *buf;
5392 int pid;
5393
5394 if (regno >= usrregs->num_regs)
5395 return;
5396 if (low_cannot_fetch_register (regno))
5397 return;
5398
5399 regaddr = register_addr (usrregs, regno);
5400 if (regaddr == -1)
5401 return;
5402
5403 size = ((register_size (regcache->tdesc, regno)
5404 + sizeof (PTRACE_XFER_TYPE) - 1)
5405 & -sizeof (PTRACE_XFER_TYPE));
5406 buf = (char *) alloca (size);
5407
5408 pid = lwpid_of (current_thread);
5409 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5410 {
5411 errno = 0;
5412 *(PTRACE_XFER_TYPE *) (buf + i) =
5413 ptrace (PTRACE_PEEKUSER, pid,
5414 /* Coerce to a uintptr_t first to avoid potential gcc warning
5415 of coercing an 8 byte integer to a 4 byte pointer. */
5416 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5417 regaddr += sizeof (PTRACE_XFER_TYPE);
5418 if (errno != 0)
5419 {
5420 /* Mark register REGNO unavailable. */
5421 supply_register (regcache, regno, NULL);
5422 return;
5423 }
5424 }
5425
5426 if (the_low_target.supply_ptrace_register)
5427 the_low_target.supply_ptrace_register (regcache, regno, buf);
5428 else
5429 supply_register (regcache, regno, buf);
5430 }
5431
5432 void
5433 linux_process_target::store_register (const usrregs_info *usrregs,
5434 regcache *regcache, int regno)
5435 {
5436 CORE_ADDR regaddr;
5437 int i, size;
5438 char *buf;
5439 int pid;
5440
5441 if (regno >= usrregs->num_regs)
5442 return;
5443 if (low_cannot_store_register (regno))
5444 return;
5445
5446 regaddr = register_addr (usrregs, regno);
5447 if (regaddr == -1)
5448 return;
5449
5450 size = ((register_size (regcache->tdesc, regno)
5451 + sizeof (PTRACE_XFER_TYPE) - 1)
5452 & -sizeof (PTRACE_XFER_TYPE));
5453 buf = (char *) alloca (size);
5454 memset (buf, 0, size);
5455
5456 if (the_low_target.collect_ptrace_register)
5457 the_low_target.collect_ptrace_register (regcache, regno, buf);
5458 else
5459 collect_register (regcache, regno, buf);
5460
5461 pid = lwpid_of (current_thread);
5462 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5463 {
5464 errno = 0;
5465 ptrace (PTRACE_POKEUSER, pid,
5466 /* Coerce to a uintptr_t first to avoid potential gcc warning
5467 about coercing an 8 byte integer to a 4 byte pointer. */
5468 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5469 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5470 if (errno != 0)
5471 {
5472 /* At this point, ESRCH should mean the process is
5473 already gone, in which case we simply ignore attempts
5474 to change its registers. See also the related
5475 comment in resume_one_lwp. */
5476 if (errno == ESRCH)
5477 return;
5478
5479
5480 if (!low_cannot_store_register (regno))
5481 error ("writing register %d: %s", regno, safe_strerror (errno));
5482 }
5483 regaddr += sizeof (PTRACE_XFER_TYPE);
5484 }
5485 }
5486 #endif /* HAVE_LINUX_USRREGS */
5487
5488 void
5489 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5490 regcache *regcache,
5491 int regno, int all)
5492 {
5493 #ifdef HAVE_LINUX_USRREGS
5494 struct usrregs_info *usr = regs_info->usrregs;
5495
5496 if (regno == -1)
5497 {
5498 for (regno = 0; regno < usr->num_regs; regno++)
5499 if (all || !linux_register_in_regsets (regs_info, regno))
5500 fetch_register (usr, regcache, regno);
5501 }
5502 else
5503 fetch_register (usr, regcache, regno);
5504 #endif
5505 }
5506
5507 void
5508 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5509 regcache *regcache,
5510 int regno, int all)
5511 {
5512 #ifdef HAVE_LINUX_USRREGS
5513 struct usrregs_info *usr = regs_info->usrregs;
5514
5515 if (regno == -1)
5516 {
5517 for (regno = 0; regno < usr->num_regs; regno++)
5518 if (all || !linux_register_in_regsets (regs_info, regno))
5519 store_register (usr, regcache, regno);
5520 }
5521 else
5522 store_register (usr, regcache, regno);
5523 #endif
5524 }
5525
5526 void
5527 linux_process_target::fetch_registers (regcache *regcache, int regno)
5528 {
5529 int use_regsets;
5530 int all = 0;
5531 const regs_info *regs_info = get_regs_info ();
5532
5533 if (regno == -1)
5534 {
5535 if (regs_info->usrregs != NULL)
5536 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5537 low_fetch_register (regcache, regno);
5538
5539 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5540 if (regs_info->usrregs != NULL)
5541 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5542 }
5543 else
5544 {
5545 if (low_fetch_register (regcache, regno))
5546 return;
5547
5548 use_regsets = linux_register_in_regsets (regs_info, regno);
5549 if (use_regsets)
5550 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5551 regcache);
5552 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5553 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5554 }
5555 }
5556
5557 void
5558 linux_process_target::store_registers (regcache *regcache, int regno)
5559 {
5560 int use_regsets;
5561 int all = 0;
5562 const regs_info *regs_info = get_regs_info ();
5563
5564 if (regno == -1)
5565 {
5566 all = regsets_store_inferior_registers (regs_info->regsets_info,
5567 regcache);
5568 if (regs_info->usrregs != NULL)
5569 usr_store_inferior_registers (regs_info, regcache, regno, all);
5570 }
5571 else
5572 {
5573 use_regsets = linux_register_in_regsets (regs_info, regno);
5574 if (use_regsets)
5575 all = regsets_store_inferior_registers (regs_info->regsets_info,
5576 regcache);
5577 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5578 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5579 }
5580 }
5581
5582 bool
5583 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5584 {
5585 return false;
5586 }
5587
5588 /* A wrapper for the read_memory target op. */
5589
5590 static int
5591 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5592 {
5593 return the_target->read_memory (memaddr, myaddr, len);
5594 }
5595
5596 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5597 to debugger memory starting at MYADDR. */
5598
5599 int
5600 linux_process_target::read_memory (CORE_ADDR memaddr,
5601 unsigned char *myaddr, int len)
5602 {
5603 int pid = lwpid_of (current_thread);
5604 PTRACE_XFER_TYPE *buffer;
5605 CORE_ADDR addr;
5606 int count;
5607 char filename[64];
5608 int i;
5609 int ret;
5610 int fd;
5611
5612 /* Try using /proc. Don't bother for one word. */
5613 if (len >= 3 * sizeof (long))
5614 {
5615 int bytes;
5616
5617 /* We could keep this file open and cache it - possibly one per
5618 thread. That requires some juggling, but is even faster. */
5619 sprintf (filename, "/proc/%d/mem", pid);
5620 fd = open (filename, O_RDONLY | O_LARGEFILE);
5621 if (fd == -1)
5622 goto no_proc;
5623
5624 /* If pread64 is available, use it. It's faster if the kernel
5625 supports it (only one syscall), and it's 64-bit safe even on
5626 32-bit platforms (for instance, SPARC debugging a SPARC64
5627 application). */
5628 #ifdef HAVE_PREAD64
5629 bytes = pread64 (fd, myaddr, len, memaddr);
5630 #else
5631 bytes = -1;
5632 if (lseek (fd, memaddr, SEEK_SET) != -1)
5633 bytes = read (fd, myaddr, len);
5634 #endif
5635
5636 close (fd);
5637 if (bytes == len)
5638 return 0;
5639
5640 /* Some data was read, we'll try to get the rest with ptrace. */
5641 if (bytes > 0)
5642 {
5643 memaddr += bytes;
5644 myaddr += bytes;
5645 len -= bytes;
5646 }
5647 }
5648
5649 no_proc:
5650 /* Round starting address down to longword boundary. */
5651 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5652 /* Round ending address up; get number of longwords that makes. */
5653 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5654 / sizeof (PTRACE_XFER_TYPE));
5655 /* Allocate buffer of that many longwords. */
5656 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5657
5658 /* Read all the longwords */
5659 errno = 0;
5660 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5661 {
5662 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5663 about coercing an 8 byte integer to a 4 byte pointer. */
5664 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5665 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5666 (PTRACE_TYPE_ARG4) 0);
5667 if (errno)
5668 break;
5669 }
5670 ret = errno;
5671
5672 /* Copy appropriate bytes out of the buffer. */
5673 if (i > 0)
5674 {
5675 i *= sizeof (PTRACE_XFER_TYPE);
5676 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5677 memcpy (myaddr,
5678 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5679 i < len ? i : len);
5680 }
5681
5682 return ret;
5683 }
5684
5685 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5686 memory at MEMADDR. On failure (cannot write to the inferior)
5687 returns the value of errno. Always succeeds if LEN is zero. */
5688
5689 int
5690 linux_process_target::write_memory (CORE_ADDR memaddr,
5691 const unsigned char *myaddr, int len)
5692 {
5693 int i;
5694 /* Round starting address down to longword boundary. */
5695 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5696 /* Round ending address up; get number of longwords that makes. */
5697 int count
5698 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5699 / sizeof (PTRACE_XFER_TYPE);
5700
5701 /* Allocate buffer of that many longwords. */
5702 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5703
5704 int pid = lwpid_of (current_thread);
5705
5706 if (len == 0)
5707 {
5708 /* Zero length write always succeeds. */
5709 return 0;
5710 }
5711
5712 if (debug_threads)
5713 {
5714 /* Dump up to four bytes. */
5715 char str[4 * 2 + 1];
5716 char *p = str;
5717 int dump = len < 4 ? len : 4;
5718
5719 for (i = 0; i < dump; i++)
5720 {
5721 sprintf (p, "%02x", myaddr[i]);
5722 p += 2;
5723 }
5724 *p = '\0';
5725
5726 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5727 str, (long) memaddr, pid);
5728 }
5729
5730 /* Fill start and end extra bytes of buffer with existing memory data. */
5731
5732 errno = 0;
5733 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5734 about coercing an 8 byte integer to a 4 byte pointer. */
5735 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5736 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5737 (PTRACE_TYPE_ARG4) 0);
5738 if (errno)
5739 return errno;
5740
5741 if (count > 1)
5742 {
5743 errno = 0;
5744 buffer[count - 1]
5745 = ptrace (PTRACE_PEEKTEXT, pid,
5746 /* Coerce to a uintptr_t first to avoid potential gcc warning
5747 about coercing an 8 byte integer to a 4 byte pointer. */
5748 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5749 * sizeof (PTRACE_XFER_TYPE)),
5750 (PTRACE_TYPE_ARG4) 0);
5751 if (errno)
5752 return errno;
5753 }
5754
5755 /* Copy data to be written over corresponding part of buffer. */
5756
5757 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5758 myaddr, len);
5759
5760 /* Write the entire buffer. */
5761
5762 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5763 {
5764 errno = 0;
5765 ptrace (PTRACE_POKETEXT, pid,
5766 /* Coerce to a uintptr_t first to avoid potential gcc warning
5767 about coercing an 8 byte integer to a 4 byte pointer. */
5768 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5769 (PTRACE_TYPE_ARG4) buffer[i]);
5770 if (errno)
5771 return errno;
5772 }
5773
5774 return 0;
5775 }
5776
5777 void
5778 linux_process_target::look_up_symbols ()
5779 {
5780 #ifdef USE_THREAD_DB
5781 struct process_info *proc = current_process ();
5782
5783 if (proc->priv->thread_db != NULL)
5784 return;
5785
5786 thread_db_init ();
5787 #endif
5788 }
5789
5790 void
5791 linux_process_target::request_interrupt ()
5792 {
5793 /* Send a SIGINT to the process group. This acts just like the user
5794 typed a ^C on the controlling terminal. */
5795 ::kill (-signal_pid, SIGINT);
5796 }
5797
5798 bool
5799 linux_process_target::supports_read_auxv ()
5800 {
5801 return true;
5802 }
5803
5804 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5805 to debugger memory starting at MYADDR. */
5806
5807 int
5808 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5809 unsigned int len)
5810 {
5811 char filename[PATH_MAX];
5812 int fd, n;
5813 int pid = lwpid_of (current_thread);
5814
5815 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5816
5817 fd = open (filename, O_RDONLY);
5818 if (fd < 0)
5819 return -1;
5820
5821 if (offset != (CORE_ADDR) 0
5822 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5823 n = -1;
5824 else
5825 n = read (fd, myaddr, len);
5826
5827 close (fd);
5828
5829 return n;
5830 }
5831
5832 /* These breakpoint and watchpoint related wrapper functions simply
5833 pass on the function call if the target has registered a
5834 corresponding function. */
5835
5836 bool
5837 linux_process_target::supports_z_point_type (char z_type)
5838 {
5839 return (the_low_target.supports_z_point_type != NULL
5840 && the_low_target.supports_z_point_type (z_type));
5841 }
5842
5843 int
5844 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5845 int size, raw_breakpoint *bp)
5846 {
5847 if (type == raw_bkpt_type_sw)
5848 return insert_memory_breakpoint (bp);
5849 else if (the_low_target.insert_point != NULL)
5850 return the_low_target.insert_point (type, addr, size, bp);
5851 else
5852 /* Unsupported (see target.h). */
5853 return 1;
5854 }
5855
5856 int
5857 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5858 int size, raw_breakpoint *bp)
5859 {
5860 if (type == raw_bkpt_type_sw)
5861 return remove_memory_breakpoint (bp);
5862 else if (the_low_target.remove_point != NULL)
5863 return the_low_target.remove_point (type, addr, size, bp);
5864 else
5865 /* Unsupported (see target.h). */
5866 return 1;
5867 }
5868
5869 /* Implement the stopped_by_sw_breakpoint target_ops
5870 method. */
5871
5872 bool
5873 linux_process_target::stopped_by_sw_breakpoint ()
5874 {
5875 struct lwp_info *lwp = get_thread_lwp (current_thread);
5876
5877 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5878 }
5879
5880 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5881 method. */
5882
5883 bool
5884 linux_process_target::supports_stopped_by_sw_breakpoint ()
5885 {
5886 return USE_SIGTRAP_SIGINFO;
5887 }
5888
5889 /* Implement the stopped_by_hw_breakpoint target_ops
5890 method. */
5891
5892 bool
5893 linux_process_target::stopped_by_hw_breakpoint ()
5894 {
5895 struct lwp_info *lwp = get_thread_lwp (current_thread);
5896
5897 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5898 }
5899
5900 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5901 method. */
5902
5903 bool
5904 linux_process_target::supports_stopped_by_hw_breakpoint ()
5905 {
5906 return USE_SIGTRAP_SIGINFO;
5907 }
5908
5909 /* Implement the supports_hardware_single_step target_ops method. */
5910
5911 bool
5912 linux_process_target::supports_hardware_single_step ()
5913 {
5914 return can_hardware_single_step ();
5915 }
5916
5917 bool
5918 linux_process_target::stopped_by_watchpoint ()
5919 {
5920 struct lwp_info *lwp = get_thread_lwp (current_thread);
5921
5922 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5923 }
5924
5925 CORE_ADDR
5926 linux_process_target::stopped_data_address ()
5927 {
5928 struct lwp_info *lwp = get_thread_lwp (current_thread);
5929
5930 return lwp->stopped_data_address;
5931 }
5932
5933 /* This is only used for targets that define PT_TEXT_ADDR,
5934 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5935 the target has different ways of acquiring this information, like
5936 loadmaps. */
5937
5938 bool
5939 linux_process_target::supports_read_offsets ()
5940 {
5941 #ifdef SUPPORTS_READ_OFFSETS
5942 return true;
5943 #else
5944 return false;
5945 #endif
5946 }
5947
5948 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5949 to tell gdb about. */
5950
5951 int
5952 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5953 {
5954 #ifdef SUPPORTS_READ_OFFSETS
5955 unsigned long text, text_end, data;
5956 int pid = lwpid_of (current_thread);
5957
5958 errno = 0;
5959
5960 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5961 (PTRACE_TYPE_ARG4) 0);
5962 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5963 (PTRACE_TYPE_ARG4) 0);
5964 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5965 (PTRACE_TYPE_ARG4) 0);
5966
5967 if (errno == 0)
5968 {
5969 /* Both text and data offsets produced at compile-time (and so
5970 used by gdb) are relative to the beginning of the program,
5971 with the data segment immediately following the text segment.
5972 However, the actual runtime layout in memory may put the data
5973 somewhere else, so when we send gdb a data base-address, we
5974 use the real data base address and subtract the compile-time
5975 data base-address from it (which is just the length of the
5976 text segment). BSS immediately follows data in both
5977 cases. */
5978 *text_p = text;
5979 *data_p = data - (text_end - text);
5980
5981 return 1;
5982 }
5983 return 0;
5984 #else
5985 gdb_assert_not_reached ("target op read_offsets not supported");
5986 #endif
5987 }
5988
5989 bool
5990 linux_process_target::supports_get_tls_address ()
5991 {
5992 #ifdef USE_THREAD_DB
5993 return true;
5994 #else
5995 return false;
5996 #endif
5997 }
5998
5999 int
6000 linux_process_target::get_tls_address (thread_info *thread,
6001 CORE_ADDR offset,
6002 CORE_ADDR load_module,
6003 CORE_ADDR *address)
6004 {
6005 #ifdef USE_THREAD_DB
6006 return thread_db_get_tls_address (thread, offset, load_module, address);
6007 #else
6008 return -1;
6009 #endif
6010 }
6011
6012 bool
6013 linux_process_target::supports_qxfer_osdata ()
6014 {
6015 return true;
6016 }
6017
6018 int
6019 linux_process_target::qxfer_osdata (const char *annex,
6020 unsigned char *readbuf,
6021 unsigned const char *writebuf,
6022 CORE_ADDR offset, int len)
6023 {
6024 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6025 }
6026
6027 /* Convert a native/host siginfo object, into/from the siginfo in the
6028 layout of the inferiors' architecture. */
6029
6030 static void
6031 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6032 {
6033 int done = 0;
6034
6035 if (the_low_target.siginfo_fixup != NULL)
6036 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6037
6038 /* If there was no callback, or the callback didn't do anything,
6039 then just do a straight memcpy. */
6040 if (!done)
6041 {
6042 if (direction == 1)
6043 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6044 else
6045 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6046 }
6047 }
6048
6049 bool
6050 linux_process_target::supports_qxfer_siginfo ()
6051 {
6052 return true;
6053 }
6054
6055 int
6056 linux_process_target::qxfer_siginfo (const char *annex,
6057 unsigned char *readbuf,
6058 unsigned const char *writebuf,
6059 CORE_ADDR offset, int len)
6060 {
6061 int pid;
6062 siginfo_t siginfo;
6063 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6064
6065 if (current_thread == NULL)
6066 return -1;
6067
6068 pid = lwpid_of (current_thread);
6069
6070 if (debug_threads)
6071 debug_printf ("%s siginfo for lwp %d.\n",
6072 readbuf != NULL ? "Reading" : "Writing",
6073 pid);
6074
6075 if (offset >= sizeof (siginfo))
6076 return -1;
6077
6078 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6079 return -1;
6080
6081 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6082 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6083 inferior with a 64-bit GDBSERVER should look the same as debugging it
6084 with a 32-bit GDBSERVER, we need to convert it. */
6085 siginfo_fixup (&siginfo, inf_siginfo, 0);
6086
6087 if (offset + len > sizeof (siginfo))
6088 len = sizeof (siginfo) - offset;
6089
6090 if (readbuf != NULL)
6091 memcpy (readbuf, inf_siginfo + offset, len);
6092 else
6093 {
6094 memcpy (inf_siginfo + offset, writebuf, len);
6095
6096 /* Convert back to ptrace layout before flushing it out. */
6097 siginfo_fixup (&siginfo, inf_siginfo, 1);
6098
6099 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6100 return -1;
6101 }
6102
6103 return len;
6104 }
6105
6106 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6107 so we notice when children change state; as the handler for the
6108 sigsuspend in my_waitpid. */
6109
6110 static void
6111 sigchld_handler (int signo)
6112 {
6113 int old_errno = errno;
6114
6115 if (debug_threads)
6116 {
6117 do
6118 {
6119 /* Use the async signal safe debug function. */
6120 if (debug_write ("sigchld_handler\n",
6121 sizeof ("sigchld_handler\n") - 1) < 0)
6122 break; /* just ignore */
6123 } while (0);
6124 }
6125
6126 if (target_is_async_p ())
6127 async_file_mark (); /* trigger a linux_wait */
6128
6129 errno = old_errno;
6130 }
6131
6132 bool
6133 linux_process_target::supports_non_stop ()
6134 {
6135 return true;
6136 }
6137
6138 bool
6139 linux_process_target::async (bool enable)
6140 {
6141 bool previous = target_is_async_p ();
6142
6143 if (debug_threads)
6144 debug_printf ("linux_async (%d), previous=%d\n",
6145 enable, previous);
6146
6147 if (previous != enable)
6148 {
6149 sigset_t mask;
6150 sigemptyset (&mask);
6151 sigaddset (&mask, SIGCHLD);
6152
6153 gdb_sigmask (SIG_BLOCK, &mask, NULL);
6154
6155 if (enable)
6156 {
6157 if (pipe (linux_event_pipe) == -1)
6158 {
6159 linux_event_pipe[0] = -1;
6160 linux_event_pipe[1] = -1;
6161 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6162
6163 warning ("creating event pipe failed.");
6164 return previous;
6165 }
6166
6167 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6168 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6169
6170 /* Register the event loop handler. */
6171 add_file_handler (linux_event_pipe[0],
6172 handle_target_event, NULL);
6173
6174 /* Always trigger a linux_wait. */
6175 async_file_mark ();
6176 }
6177 else
6178 {
6179 delete_file_handler (linux_event_pipe[0]);
6180
6181 close (linux_event_pipe[0]);
6182 close (linux_event_pipe[1]);
6183 linux_event_pipe[0] = -1;
6184 linux_event_pipe[1] = -1;
6185 }
6186
6187 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6188 }
6189
6190 return previous;
6191 }
6192
6193 int
6194 linux_process_target::start_non_stop (bool nonstop)
6195 {
6196 /* Register or unregister from event-loop accordingly. */
6197 target_async (nonstop);
6198
6199 if (target_is_async_p () != (nonstop != false))
6200 return -1;
6201
6202 return 0;
6203 }
6204
6205 bool
6206 linux_process_target::supports_multi_process ()
6207 {
6208 return true;
6209 }
6210
6211 /* Check if fork events are supported. */
6212
6213 bool
6214 linux_process_target::supports_fork_events ()
6215 {
6216 return linux_supports_tracefork ();
6217 }
6218
6219 /* Check if vfork events are supported. */
6220
6221 bool
6222 linux_process_target::supports_vfork_events ()
6223 {
6224 return linux_supports_tracefork ();
6225 }
6226
6227 /* Check if exec events are supported. */
6228
6229 bool
6230 linux_process_target::supports_exec_events ()
6231 {
6232 return linux_supports_traceexec ();
6233 }
6234
6235 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6236 ptrace flags for all inferiors. This is in case the new GDB connection
6237 doesn't support the same set of events that the previous one did. */
6238
6239 void
6240 linux_process_target::handle_new_gdb_connection ()
6241 {
6242 /* Request that all the lwps reset their ptrace options. */
6243 for_each_thread ([] (thread_info *thread)
6244 {
6245 struct lwp_info *lwp = get_thread_lwp (thread);
6246
6247 if (!lwp->stopped)
6248 {
6249 /* Stop the lwp so we can modify its ptrace options. */
6250 lwp->must_set_ptrace_flags = 1;
6251 linux_stop_lwp (lwp);
6252 }
6253 else
6254 {
6255 /* Already stopped; go ahead and set the ptrace options. */
6256 struct process_info *proc = find_process_pid (pid_of (thread));
6257 int options = linux_low_ptrace_options (proc->attached);
6258
6259 linux_enable_event_reporting (lwpid_of (thread), options);
6260 lwp->must_set_ptrace_flags = 0;
6261 }
6262 });
6263 }
6264
6265 int
6266 linux_process_target::handle_monitor_command (char *mon)
6267 {
6268 #ifdef USE_THREAD_DB
6269 return thread_db_handle_monitor_command (mon);
6270 #else
6271 return 0;
6272 #endif
6273 }
6274
6275 int
6276 linux_process_target::core_of_thread (ptid_t ptid)
6277 {
6278 return linux_common_core_of_thread (ptid);
6279 }
6280
6281 bool
6282 linux_process_target::supports_disable_randomization ()
6283 {
6284 #ifdef HAVE_PERSONALITY
6285 return true;
6286 #else
6287 return false;
6288 #endif
6289 }
6290
6291 bool
6292 linux_process_target::supports_agent ()
6293 {
6294 return true;
6295 }
6296
6297 bool
6298 linux_process_target::supports_range_stepping ()
6299 {
6300 if (supports_software_single_step ())
6301 return true;
6302 if (*the_low_target.supports_range_stepping == NULL)
6303 return false;
6304
6305 return (*the_low_target.supports_range_stepping) ();
6306 }
6307
6308 bool
6309 linux_process_target::supports_pid_to_exec_file ()
6310 {
6311 return true;
6312 }
6313
6314 char *
6315 linux_process_target::pid_to_exec_file (int pid)
6316 {
6317 return linux_proc_pid_to_exec_file (pid);
6318 }
6319
6320 bool
6321 linux_process_target::supports_multifs ()
6322 {
6323 return true;
6324 }
6325
6326 int
6327 linux_process_target::multifs_open (int pid, const char *filename,
6328 int flags, mode_t mode)
6329 {
6330 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6331 }
6332
6333 int
6334 linux_process_target::multifs_unlink (int pid, const char *filename)
6335 {
6336 return linux_mntns_unlink (pid, filename);
6337 }
6338
6339 ssize_t
6340 linux_process_target::multifs_readlink (int pid, const char *filename,
6341 char *buf, size_t bufsiz)
6342 {
6343 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6344 }
6345
6346 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6347 struct target_loadseg
6348 {
6349 /* Core address to which the segment is mapped. */
6350 Elf32_Addr addr;
6351 /* VMA recorded in the program header. */
6352 Elf32_Addr p_vaddr;
6353 /* Size of this segment in memory. */
6354 Elf32_Word p_memsz;
6355 };
6356
6357 # if defined PT_GETDSBT
6358 struct target_loadmap
6359 {
6360 /* Protocol version number, must be zero. */
6361 Elf32_Word version;
6362 /* Pointer to the DSBT table, its size, and the DSBT index. */
6363 unsigned *dsbt_table;
6364 unsigned dsbt_size, dsbt_index;
6365 /* Number of segments in this map. */
6366 Elf32_Word nsegs;
6367 /* The actual memory map. */
6368 struct target_loadseg segs[/*nsegs*/];
6369 };
6370 # define LINUX_LOADMAP PT_GETDSBT
6371 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6372 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6373 # else
6374 struct target_loadmap
6375 {
6376 /* Protocol version number, must be zero. */
6377 Elf32_Half version;
6378 /* Number of segments in this map. */
6379 Elf32_Half nsegs;
6380 /* The actual memory map. */
6381 struct target_loadseg segs[/*nsegs*/];
6382 };
6383 # define LINUX_LOADMAP PTRACE_GETFDPIC
6384 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6385 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6386 # endif
6387
6388 bool
6389 linux_process_target::supports_read_loadmap ()
6390 {
6391 return true;
6392 }
6393
6394 int
6395 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6396 unsigned char *myaddr, unsigned int len)
6397 {
6398 int pid = lwpid_of (current_thread);
6399 int addr = -1;
6400 struct target_loadmap *data = NULL;
6401 unsigned int actual_length, copy_length;
6402
6403 if (strcmp (annex, "exec") == 0)
6404 addr = (int) LINUX_LOADMAP_EXEC;
6405 else if (strcmp (annex, "interp") == 0)
6406 addr = (int) LINUX_LOADMAP_INTERP;
6407 else
6408 return -1;
6409
6410 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6411 return -1;
6412
6413 if (data == NULL)
6414 return -1;
6415
6416 actual_length = sizeof (struct target_loadmap)
6417 + sizeof (struct target_loadseg) * data->nsegs;
6418
6419 if (offset < 0 || offset > actual_length)
6420 return -1;
6421
6422 copy_length = actual_length - offset < len ? actual_length - offset : len;
6423 memcpy (myaddr, (char *) data + offset, copy_length);
6424 return copy_length;
6425 }
6426 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6427
6428 void
6429 linux_process_target::process_qsupported (char **features, int count)
6430 {
6431 if (the_low_target.process_qsupported != NULL)
6432 the_low_target.process_qsupported (features, count);
6433 }
6434
6435 bool
6436 linux_process_target::supports_catch_syscall ()
6437 {
6438 return (the_low_target.get_syscall_trapinfo != NULL
6439 && linux_supports_tracesysgood ());
6440 }
6441
6442 int
6443 linux_process_target::get_ipa_tdesc_idx ()
6444 {
6445 if (the_low_target.get_ipa_tdesc_idx == NULL)
6446 return 0;
6447
6448 return (*the_low_target.get_ipa_tdesc_idx) ();
6449 }
6450
6451 bool
6452 linux_process_target::supports_tracepoints ()
6453 {
6454 if (*the_low_target.supports_tracepoints == NULL)
6455 return false;
6456
6457 return (*the_low_target.supports_tracepoints) ();
6458 }
6459
6460 CORE_ADDR
6461 linux_process_target::read_pc (regcache *regcache)
6462 {
6463 if (!low_supports_breakpoints ())
6464 return 0;
6465
6466 return low_get_pc (regcache);
6467 }
6468
6469 void
6470 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6471 {
6472 gdb_assert (low_supports_breakpoints ());
6473
6474 low_set_pc (regcache, pc);
6475 }
6476
6477 bool
6478 linux_process_target::supports_thread_stopped ()
6479 {
6480 return true;
6481 }
6482
6483 bool
6484 linux_process_target::thread_stopped (thread_info *thread)
6485 {
6486 return get_thread_lwp (thread)->stopped;
6487 }
6488
6489 /* This exposes stop-all-threads functionality to other modules. */
6490
6491 void
6492 linux_process_target::pause_all (bool freeze)
6493 {
6494 stop_all_lwps (freeze, NULL);
6495 }
6496
6497 /* This exposes unstop-all-threads functionality to other gdbserver
6498 modules. */
6499
6500 void
6501 linux_process_target::unpause_all (bool unfreeze)
6502 {
6503 unstop_all_lwps (unfreeze, NULL);
6504 }
6505
6506 int
6507 linux_process_target::prepare_to_access_memory ()
6508 {
6509 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6510 running LWP. */
6511 if (non_stop)
6512 target_pause_all (true);
6513 return 0;
6514 }
6515
6516 void
6517 linux_process_target::done_accessing_memory ()
6518 {
6519 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6520 running LWP. */
6521 if (non_stop)
6522 target_unpause_all (true);
6523 }
6524
6525 bool
6526 linux_process_target::supports_fast_tracepoints ()
6527 {
6528 return the_low_target.install_fast_tracepoint_jump_pad != nullptr;
6529 }
6530
6531 int
6532 linux_process_target::install_fast_tracepoint_jump_pad
6533 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
6534 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
6535 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
6536 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
6537 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
6538 char *err)
6539 {
6540 return (*the_low_target.install_fast_tracepoint_jump_pad)
6541 (tpoint, tpaddr, collector, lockaddr, orig_size,
6542 jump_entry, trampoline, trampoline_size,
6543 jjump_pad_insn, jjump_pad_insn_size,
6544 adjusted_insn_addr, adjusted_insn_addr_end,
6545 err);
6546 }
6547
6548 emit_ops *
6549 linux_process_target::emit_ops ()
6550 {
6551 if (the_low_target.emit_ops != NULL)
6552 return (*the_low_target.emit_ops) ();
6553 else
6554 return NULL;
6555 }
6556
6557 int
6558 linux_process_target::get_min_fast_tracepoint_insn_len ()
6559 {
6560 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6561 }
6562
6563 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6564
6565 static int
6566 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6567 CORE_ADDR *phdr_memaddr, int *num_phdr)
6568 {
6569 char filename[PATH_MAX];
6570 int fd;
6571 const int auxv_size = is_elf64
6572 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6573 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6574
6575 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6576
6577 fd = open (filename, O_RDONLY);
6578 if (fd < 0)
6579 return 1;
6580
6581 *phdr_memaddr = 0;
6582 *num_phdr = 0;
6583 while (read (fd, buf, auxv_size) == auxv_size
6584 && (*phdr_memaddr == 0 || *num_phdr == 0))
6585 {
6586 if (is_elf64)
6587 {
6588 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6589
6590 switch (aux->a_type)
6591 {
6592 case AT_PHDR:
6593 *phdr_memaddr = aux->a_un.a_val;
6594 break;
6595 case AT_PHNUM:
6596 *num_phdr = aux->a_un.a_val;
6597 break;
6598 }
6599 }
6600 else
6601 {
6602 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6603
6604 switch (aux->a_type)
6605 {
6606 case AT_PHDR:
6607 *phdr_memaddr = aux->a_un.a_val;
6608 break;
6609 case AT_PHNUM:
6610 *num_phdr = aux->a_un.a_val;
6611 break;
6612 }
6613 }
6614 }
6615
6616 close (fd);
6617
6618 if (*phdr_memaddr == 0 || *num_phdr == 0)
6619 {
6620 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6621 "phdr_memaddr = %ld, phdr_num = %d",
6622 (long) *phdr_memaddr, *num_phdr);
6623 return 2;
6624 }
6625
6626 return 0;
6627 }
6628
6629 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6630
6631 static CORE_ADDR
6632 get_dynamic (const int pid, const int is_elf64)
6633 {
6634 CORE_ADDR phdr_memaddr, relocation;
6635 int num_phdr, i;
6636 unsigned char *phdr_buf;
6637 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6638
6639 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6640 return 0;
6641
6642 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6643 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6644
6645 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6646 return 0;
6647
6648 /* Compute relocation: it is expected to be 0 for "regular" executables,
6649 non-zero for PIE ones. */
6650 relocation = -1;
6651 for (i = 0; relocation == -1 && i < num_phdr; i++)
6652 if (is_elf64)
6653 {
6654 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6655
6656 if (p->p_type == PT_PHDR)
6657 relocation = phdr_memaddr - p->p_vaddr;
6658 }
6659 else
6660 {
6661 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6662
6663 if (p->p_type == PT_PHDR)
6664 relocation = phdr_memaddr - p->p_vaddr;
6665 }
6666
6667 if (relocation == -1)
6668 {
6669 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6670 any real world executables, including PIE executables, have always
6671 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6672 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6673 or present DT_DEBUG anyway (fpc binaries are statically linked).
6674
6675 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6676
6677 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6678
6679 return 0;
6680 }
6681
6682 for (i = 0; i < num_phdr; i++)
6683 {
6684 if (is_elf64)
6685 {
6686 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6687
6688 if (p->p_type == PT_DYNAMIC)
6689 return p->p_vaddr + relocation;
6690 }
6691 else
6692 {
6693 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6694
6695 if (p->p_type == PT_DYNAMIC)
6696 return p->p_vaddr + relocation;
6697 }
6698 }
6699
6700 return 0;
6701 }
6702
6703 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6704 can be 0 if the inferior does not yet have the library list initialized.
6705 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6706 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6707
6708 static CORE_ADDR
6709 get_r_debug (const int pid, const int is_elf64)
6710 {
6711 CORE_ADDR dynamic_memaddr;
6712 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6713 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6714 CORE_ADDR map = -1;
6715
6716 dynamic_memaddr = get_dynamic (pid, is_elf64);
6717 if (dynamic_memaddr == 0)
6718 return map;
6719
6720 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6721 {
6722 if (is_elf64)
6723 {
6724 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6725 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6726 union
6727 {
6728 Elf64_Xword map;
6729 unsigned char buf[sizeof (Elf64_Xword)];
6730 }
6731 rld_map;
6732 #endif
6733 #ifdef DT_MIPS_RLD_MAP
6734 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6735 {
6736 if (linux_read_memory (dyn->d_un.d_val,
6737 rld_map.buf, sizeof (rld_map.buf)) == 0)
6738 return rld_map.map;
6739 else
6740 break;
6741 }
6742 #endif /* DT_MIPS_RLD_MAP */
6743 #ifdef DT_MIPS_RLD_MAP_REL
6744 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6745 {
6746 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6747 rld_map.buf, sizeof (rld_map.buf)) == 0)
6748 return rld_map.map;
6749 else
6750 break;
6751 }
6752 #endif /* DT_MIPS_RLD_MAP_REL */
6753
6754 if (dyn->d_tag == DT_DEBUG && map == -1)
6755 map = dyn->d_un.d_val;
6756
6757 if (dyn->d_tag == DT_NULL)
6758 break;
6759 }
6760 else
6761 {
6762 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6763 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6764 union
6765 {
6766 Elf32_Word map;
6767 unsigned char buf[sizeof (Elf32_Word)];
6768 }
6769 rld_map;
6770 #endif
6771 #ifdef DT_MIPS_RLD_MAP
6772 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6773 {
6774 if (linux_read_memory (dyn->d_un.d_val,
6775 rld_map.buf, sizeof (rld_map.buf)) == 0)
6776 return rld_map.map;
6777 else
6778 break;
6779 }
6780 #endif /* DT_MIPS_RLD_MAP */
6781 #ifdef DT_MIPS_RLD_MAP_REL
6782 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6783 {
6784 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6785 rld_map.buf, sizeof (rld_map.buf)) == 0)
6786 return rld_map.map;
6787 else
6788 break;
6789 }
6790 #endif /* DT_MIPS_RLD_MAP_REL */
6791
6792 if (dyn->d_tag == DT_DEBUG && map == -1)
6793 map = dyn->d_un.d_val;
6794
6795 if (dyn->d_tag == DT_NULL)
6796 break;
6797 }
6798
6799 dynamic_memaddr += dyn_size;
6800 }
6801
6802 return map;
6803 }
6804
6805 /* Read one pointer from MEMADDR in the inferior. */
6806
6807 static int
6808 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6809 {
6810 int ret;
6811
6812 /* Go through a union so this works on either big or little endian
6813 hosts, when the inferior's pointer size is smaller than the size
6814 of CORE_ADDR. It is assumed the inferior's endianness is the
6815 same of the superior's. */
6816 union
6817 {
6818 CORE_ADDR core_addr;
6819 unsigned int ui;
6820 unsigned char uc;
6821 } addr;
6822
6823 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6824 if (ret == 0)
6825 {
6826 if (ptr_size == sizeof (CORE_ADDR))
6827 *ptr = addr.core_addr;
6828 else if (ptr_size == sizeof (unsigned int))
6829 *ptr = addr.ui;
6830 else
6831 gdb_assert_not_reached ("unhandled pointer size");
6832 }
6833 return ret;
6834 }
6835
6836 bool
6837 linux_process_target::supports_qxfer_libraries_svr4 ()
6838 {
6839 return true;
6840 }
6841
6842 struct link_map_offsets
6843 {
6844 /* Offset and size of r_debug.r_version. */
6845 int r_version_offset;
6846
6847 /* Offset and size of r_debug.r_map. */
6848 int r_map_offset;
6849
6850 /* Offset to l_addr field in struct link_map. */
6851 int l_addr_offset;
6852
6853 /* Offset to l_name field in struct link_map. */
6854 int l_name_offset;
6855
6856 /* Offset to l_ld field in struct link_map. */
6857 int l_ld_offset;
6858
6859 /* Offset to l_next field in struct link_map. */
6860 int l_next_offset;
6861
6862 /* Offset to l_prev field in struct link_map. */
6863 int l_prev_offset;
6864 };
6865
6866 /* Construct qXfer:libraries-svr4:read reply. */
6867
6868 int
6869 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6870 unsigned char *readbuf,
6871 unsigned const char *writebuf,
6872 CORE_ADDR offset, int len)
6873 {
6874 struct process_info_private *const priv = current_process ()->priv;
6875 char filename[PATH_MAX];
6876 int pid, is_elf64;
6877
6878 static const struct link_map_offsets lmo_32bit_offsets =
6879 {
6880 0, /* r_version offset. */
6881 4, /* r_debug.r_map offset. */
6882 0, /* l_addr offset in link_map. */
6883 4, /* l_name offset in link_map. */
6884 8, /* l_ld offset in link_map. */
6885 12, /* l_next offset in link_map. */
6886 16 /* l_prev offset in link_map. */
6887 };
6888
6889 static const struct link_map_offsets lmo_64bit_offsets =
6890 {
6891 0, /* r_version offset. */
6892 8, /* r_debug.r_map offset. */
6893 0, /* l_addr offset in link_map. */
6894 8, /* l_name offset in link_map. */
6895 16, /* l_ld offset in link_map. */
6896 24, /* l_next offset in link_map. */
6897 32 /* l_prev offset in link_map. */
6898 };
6899 const struct link_map_offsets *lmo;
6900 unsigned int machine;
6901 int ptr_size;
6902 CORE_ADDR lm_addr = 0, lm_prev = 0;
6903 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6904 int header_done = 0;
6905
6906 if (writebuf != NULL)
6907 return -2;
6908 if (readbuf == NULL)
6909 return -1;
6910
6911 pid = lwpid_of (current_thread);
6912 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6913 is_elf64 = elf_64_file_p (filename, &machine);
6914 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6915 ptr_size = is_elf64 ? 8 : 4;
6916
6917 while (annex[0] != '\0')
6918 {
6919 const char *sep;
6920 CORE_ADDR *addrp;
6921 int name_len;
6922
6923 sep = strchr (annex, '=');
6924 if (sep == NULL)
6925 break;
6926
6927 name_len = sep - annex;
6928 if (name_len == 5 && startswith (annex, "start"))
6929 addrp = &lm_addr;
6930 else if (name_len == 4 && startswith (annex, "prev"))
6931 addrp = &lm_prev;
6932 else
6933 {
6934 annex = strchr (sep, ';');
6935 if (annex == NULL)
6936 break;
6937 annex++;
6938 continue;
6939 }
6940
6941 annex = decode_address_to_semicolon (addrp, sep + 1);
6942 }
6943
6944 if (lm_addr == 0)
6945 {
6946 int r_version = 0;
6947
6948 if (priv->r_debug == 0)
6949 priv->r_debug = get_r_debug (pid, is_elf64);
6950
6951 /* We failed to find DT_DEBUG. Such situation will not change
6952 for this inferior - do not retry it. Report it to GDB as
6953 E01, see for the reasons at the GDB solib-svr4.c side. */
6954 if (priv->r_debug == (CORE_ADDR) -1)
6955 return -1;
6956
6957 if (priv->r_debug != 0)
6958 {
6959 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6960 (unsigned char *) &r_version,
6961 sizeof (r_version)) != 0
6962 || r_version != 1)
6963 {
6964 warning ("unexpected r_debug version %d", r_version);
6965 }
6966 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6967 &lm_addr, ptr_size) != 0)
6968 {
6969 warning ("unable to read r_map from 0x%lx",
6970 (long) priv->r_debug + lmo->r_map_offset);
6971 }
6972 }
6973 }
6974
6975 std::string document = "<library-list-svr4 version=\"1.0\"";
6976
6977 while (lm_addr
6978 && read_one_ptr (lm_addr + lmo->l_name_offset,
6979 &l_name, ptr_size) == 0
6980 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6981 &l_addr, ptr_size) == 0
6982 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6983 &l_ld, ptr_size) == 0
6984 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6985 &l_prev, ptr_size) == 0
6986 && read_one_ptr (lm_addr + lmo->l_next_offset,
6987 &l_next, ptr_size) == 0)
6988 {
6989 unsigned char libname[PATH_MAX];
6990
6991 if (lm_prev != l_prev)
6992 {
6993 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6994 (long) lm_prev, (long) l_prev);
6995 break;
6996 }
6997
6998 /* Ignore the first entry even if it has valid name as the first entry
6999 corresponds to the main executable. The first entry should not be
7000 skipped if the dynamic loader was loaded late by a static executable
7001 (see solib-svr4.c parameter ignore_first). But in such case the main
7002 executable does not have PT_DYNAMIC present and this function already
7003 exited above due to failed get_r_debug. */
7004 if (lm_prev == 0)
7005 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7006 else
7007 {
7008 /* Not checking for error because reading may stop before
7009 we've got PATH_MAX worth of characters. */
7010 libname[0] = '\0';
7011 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7012 libname[sizeof (libname) - 1] = '\0';
7013 if (libname[0] != '\0')
7014 {
7015 if (!header_done)
7016 {
7017 /* Terminate `<library-list-svr4'. */
7018 document += '>';
7019 header_done = 1;
7020 }
7021
7022 string_appendf (document, "<library name=\"");
7023 xml_escape_text_append (&document, (char *) libname);
7024 string_appendf (document, "\" lm=\"0x%lx\" "
7025 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7026 (unsigned long) lm_addr, (unsigned long) l_addr,
7027 (unsigned long) l_ld);
7028 }
7029 }
7030
7031 lm_prev = lm_addr;
7032 lm_addr = l_next;
7033 }
7034
7035 if (!header_done)
7036 {
7037 /* Empty list; terminate `<library-list-svr4'. */
7038 document += "/>";
7039 }
7040 else
7041 document += "</library-list-svr4>";
7042
7043 int document_len = document.length ();
7044 if (offset < document_len)
7045 document_len -= offset;
7046 else
7047 document_len = 0;
7048 if (len > document_len)
7049 len = document_len;
7050
7051 memcpy (readbuf, document.data () + offset, len);
7052
7053 return len;
7054 }
7055
7056 #ifdef HAVE_LINUX_BTRACE
7057
7058 btrace_target_info *
7059 linux_process_target::enable_btrace (ptid_t ptid,
7060 const btrace_config *conf)
7061 {
7062 return linux_enable_btrace (ptid, conf);
7063 }
7064
7065 /* See to_disable_btrace target method. */
7066
7067 int
7068 linux_process_target::disable_btrace (btrace_target_info *tinfo)
7069 {
7070 enum btrace_error err;
7071
7072 err = linux_disable_btrace (tinfo);
7073 return (err == BTRACE_ERR_NONE ? 0 : -1);
7074 }
7075
7076 /* Encode an Intel Processor Trace configuration. */
7077
7078 static void
7079 linux_low_encode_pt_config (struct buffer *buffer,
7080 const struct btrace_data_pt_config *config)
7081 {
7082 buffer_grow_str (buffer, "<pt-config>\n");
7083
7084 switch (config->cpu.vendor)
7085 {
7086 case CV_INTEL:
7087 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7088 "model=\"%u\" stepping=\"%u\"/>\n",
7089 config->cpu.family, config->cpu.model,
7090 config->cpu.stepping);
7091 break;
7092
7093 default:
7094 break;
7095 }
7096
7097 buffer_grow_str (buffer, "</pt-config>\n");
7098 }
7099
7100 /* Encode a raw buffer. */
7101
7102 static void
7103 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7104 unsigned int size)
7105 {
7106 if (size == 0)
7107 return;
7108
7109 /* We use hex encoding - see gdbsupport/rsp-low.h. */
7110 buffer_grow_str (buffer, "<raw>\n");
7111
7112 while (size-- > 0)
7113 {
7114 char elem[2];
7115
7116 elem[0] = tohex ((*data >> 4) & 0xf);
7117 elem[1] = tohex (*data++ & 0xf);
7118
7119 buffer_grow (buffer, elem, 2);
7120 }
7121
7122 buffer_grow_str (buffer, "</raw>\n");
7123 }
7124
7125 /* See to_read_btrace target method. */
7126
7127 int
7128 linux_process_target::read_btrace (btrace_target_info *tinfo,
7129 buffer *buffer,
7130 enum btrace_read_type type)
7131 {
7132 struct btrace_data btrace;
7133 enum btrace_error err;
7134
7135 err = linux_read_btrace (&btrace, tinfo, type);
7136 if (err != BTRACE_ERR_NONE)
7137 {
7138 if (err == BTRACE_ERR_OVERFLOW)
7139 buffer_grow_str0 (buffer, "E.Overflow.");
7140 else
7141 buffer_grow_str0 (buffer, "E.Generic Error.");
7142
7143 return -1;
7144 }
7145
7146 switch (btrace.format)
7147 {
7148 case BTRACE_FORMAT_NONE:
7149 buffer_grow_str0 (buffer, "E.No Trace.");
7150 return -1;
7151
7152 case BTRACE_FORMAT_BTS:
7153 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7154 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7155
7156 for (const btrace_block &block : *btrace.variant.bts.blocks)
7157 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7158 paddress (block.begin), paddress (block.end));
7159
7160 buffer_grow_str0 (buffer, "</btrace>\n");
7161 break;
7162
7163 case BTRACE_FORMAT_PT:
7164 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7165 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7166 buffer_grow_str (buffer, "<pt>\n");
7167
7168 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7169
7170 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7171 btrace.variant.pt.size);
7172
7173 buffer_grow_str (buffer, "</pt>\n");
7174 buffer_grow_str0 (buffer, "</btrace>\n");
7175 break;
7176
7177 default:
7178 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7179 return -1;
7180 }
7181
7182 return 0;
7183 }
7184
7185 /* See to_btrace_conf target method. */
7186
7187 int
7188 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7189 buffer *buffer)
7190 {
7191 const struct btrace_config *conf;
7192
7193 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7194 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7195
7196 conf = linux_btrace_conf (tinfo);
7197 if (conf != NULL)
7198 {
7199 switch (conf->format)
7200 {
7201 case BTRACE_FORMAT_NONE:
7202 break;
7203
7204 case BTRACE_FORMAT_BTS:
7205 buffer_xml_printf (buffer, "<bts");
7206 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7207 buffer_xml_printf (buffer, " />\n");
7208 break;
7209
7210 case BTRACE_FORMAT_PT:
7211 buffer_xml_printf (buffer, "<pt");
7212 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7213 buffer_xml_printf (buffer, "/>\n");
7214 break;
7215 }
7216 }
7217
7218 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7219 return 0;
7220 }
7221 #endif /* HAVE_LINUX_BTRACE */
7222
7223 /* See nat/linux-nat.h. */
7224
7225 ptid_t
7226 current_lwp_ptid (void)
7227 {
7228 return ptid_of (current_thread);
7229 }
7230
7231 const char *
7232 linux_process_target::thread_name (ptid_t thread)
7233 {
7234 return linux_proc_tid_get_name (thread);
7235 }
7236
7237 #if USE_THREAD_DB
7238 bool
7239 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7240 int *handle_len)
7241 {
7242 return thread_db_thread_handle (ptid, handle, handle_len);
7243 }
7244 #endif
7245
7246 /* Default implementation of linux_target_ops method "set_pc" for
7247 32-bit pc register which is literally named "pc". */
7248
7249 void
7250 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7251 {
7252 uint32_t newpc = pc;
7253
7254 supply_register_by_name (regcache, "pc", &newpc);
7255 }
7256
7257 /* Default implementation of linux_target_ops method "get_pc" for
7258 32-bit pc register which is literally named "pc". */
7259
7260 CORE_ADDR
7261 linux_get_pc_32bit (struct regcache *regcache)
7262 {
7263 uint32_t pc;
7264
7265 collect_register_by_name (regcache, "pc", &pc);
7266 if (debug_threads)
7267 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7268 return pc;
7269 }
7270
7271 /* Default implementation of linux_target_ops method "set_pc" for
7272 64-bit pc register which is literally named "pc". */
7273
7274 void
7275 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7276 {
7277 uint64_t newpc = pc;
7278
7279 supply_register_by_name (regcache, "pc", &newpc);
7280 }
7281
7282 /* Default implementation of linux_target_ops method "get_pc" for
7283 64-bit pc register which is literally named "pc". */
7284
7285 CORE_ADDR
7286 linux_get_pc_64bit (struct regcache *regcache)
7287 {
7288 uint64_t pc;
7289
7290 collect_register_by_name (regcache, "pc", &pc);
7291 if (debug_threads)
7292 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7293 return pc;
7294 }
7295
7296 /* See linux-low.h. */
7297
7298 int
7299 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7300 {
7301 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7302 int offset = 0;
7303
7304 gdb_assert (wordsize == 4 || wordsize == 8);
7305
7306 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
7307 {
7308 if (wordsize == 4)
7309 {
7310 uint32_t *data_p = (uint32_t *) data;
7311 if (data_p[0] == match)
7312 {
7313 *valp = data_p[1];
7314 return 1;
7315 }
7316 }
7317 else
7318 {
7319 uint64_t *data_p = (uint64_t *) data;
7320 if (data_p[0] == match)
7321 {
7322 *valp = data_p[1];
7323 return 1;
7324 }
7325 }
7326
7327 offset += 2 * wordsize;
7328 }
7329
7330 return 0;
7331 }
7332
7333 /* See linux-low.h. */
7334
7335 CORE_ADDR
7336 linux_get_hwcap (int wordsize)
7337 {
7338 CORE_ADDR hwcap = 0;
7339 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7340 return hwcap;
7341 }
7342
7343 /* See linux-low.h. */
7344
7345 CORE_ADDR
7346 linux_get_hwcap2 (int wordsize)
7347 {
7348 CORE_ADDR hwcap2 = 0;
7349 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7350 return hwcap2;
7351 }
7352
7353 #ifdef HAVE_LINUX_REGSETS
7354 void
7355 initialize_regsets_info (struct regsets_info *info)
7356 {
7357 for (info->num_regsets = 0;
7358 info->regsets[info->num_regsets].size >= 0;
7359 info->num_regsets++)
7360 ;
7361 }
7362 #endif
7363
7364 void
7365 initialize_low (void)
7366 {
7367 struct sigaction sigchld_action;
7368
7369 memset (&sigchld_action, 0, sizeof (sigchld_action));
7370 set_target_ops (the_linux_target);
7371
7372 linux_ptrace_init_warnings ();
7373 linux_proc_init_warnings ();
7374
7375 sigchld_action.sa_handler = sigchld_handler;
7376 sigemptyset (&sigchld_action.sa_mask);
7377 sigchld_action.sa_flags = SA_RESTART;
7378 sigaction (SIGCHLD, &sigchld_action, NULL);
7379
7380 initialize_low_arch ();
7381
7382 linux_check_ptrace_features ();
7383 }
This page took 0.177561 seconds and 5 git commands to generate.