Add pid argument in aarch64_get_debug_reg_state
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #ifndef ELFMAG0
50 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
54 #include <elf.h>
55 #endif
56 #include "nat/linux-namespaces.h"
57
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72
73 #ifndef W_STOPCODE
74 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75 #endif
76
77 /* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79 #ifndef __SIGRTMIN
80 #define __SIGRTMIN 32
81 #endif
82
83 /* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86 #if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89 #if defined(__mcoldfire__)
90 /* These are still undefined in 3.10 kernels. */
91 #define PT_TEXT_ADDR 49*4
92 #define PT_DATA_ADDR 50*4
93 #define PT_TEXT_END_ADDR 51*4
94 /* BFIN already defines these since at least 2.6.32 kernels. */
95 #elif defined(BFIN)
96 #define PT_TEXT_ADDR 220
97 #define PT_TEXT_END_ADDR 224
98 #define PT_DATA_ADDR 228
99 /* These are still undefined in 3.10 kernels. */
100 #elif defined(__TMS320C6X__)
101 #define PT_TEXT_ADDR (0x10000*4)
102 #define PT_DATA_ADDR (0x10004*4)
103 #define PT_TEXT_END_ADDR (0x10008*4)
104 #endif
105 #endif
106
107 #ifdef HAVE_LINUX_BTRACE
108 # include "nat/linux-btrace.h"
109 # include "btrace-common.h"
110 #endif
111
112 #ifndef HAVE_ELF32_AUXV_T
113 /* Copied from glibc's elf.h. */
114 typedef struct
115 {
116 uint32_t a_type; /* Entry type */
117 union
118 {
119 uint32_t a_val; /* Integer value */
120 /* We use to have pointer elements added here. We cannot do that,
121 though, since it does not work when using 32-bit definitions
122 on 64-bit platforms and vice versa. */
123 } a_un;
124 } Elf32_auxv_t;
125 #endif
126
127 #ifndef HAVE_ELF64_AUXV_T
128 /* Copied from glibc's elf.h. */
129 typedef struct
130 {
131 uint64_t a_type; /* Entry type */
132 union
133 {
134 uint64_t a_val; /* Integer value */
135 /* We use to have pointer elements added here. We cannot do that,
136 though, since it does not work when using 32-bit definitions
137 on 64-bit platforms and vice versa. */
138 } a_un;
139 } Elf64_auxv_t;
140 #endif
141
142 /* Does the current host support PTRACE_GETREGSET? */
143 int have_ptrace_getregset = -1;
144
145 /* LWP accessors. */
146
147 /* See nat/linux-nat.h. */
148
149 ptid_t
150 ptid_of_lwp (struct lwp_info *lwp)
151 {
152 return ptid_of (get_lwp_thread (lwp));
153 }
154
155 /* See nat/linux-nat.h. */
156
157 void
158 lwp_set_arch_private_info (struct lwp_info *lwp,
159 struct arch_lwp_info *info)
160 {
161 lwp->arch_private = info;
162 }
163
164 /* See nat/linux-nat.h. */
165
166 struct arch_lwp_info *
167 lwp_arch_private_info (struct lwp_info *lwp)
168 {
169 return lwp->arch_private;
170 }
171
172 /* See nat/linux-nat.h. */
173
174 int
175 lwp_is_stopped (struct lwp_info *lwp)
176 {
177 return lwp->stopped;
178 }
179
180 /* See nat/linux-nat.h. */
181
182 enum target_stop_reason
183 lwp_stop_reason (struct lwp_info *lwp)
184 {
185 return lwp->stop_reason;
186 }
187
188 /* A list of all unknown processes which receive stop signals. Some
189 other process will presumably claim each of these as forked
190 children momentarily. */
191
192 struct simple_pid_list
193 {
194 /* The process ID. */
195 int pid;
196
197 /* The status as reported by waitpid. */
198 int status;
199
200 /* Next in chain. */
201 struct simple_pid_list *next;
202 };
203 struct simple_pid_list *stopped_pids;
204
205 /* Trivial list manipulation functions to keep track of a list of new
206 stopped processes. */
207
208 static void
209 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
210 {
211 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
212
213 new_pid->pid = pid;
214 new_pid->status = status;
215 new_pid->next = *listp;
216 *listp = new_pid;
217 }
218
219 static int
220 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
221 {
222 struct simple_pid_list **p;
223
224 for (p = listp; *p != NULL; p = &(*p)->next)
225 if ((*p)->pid == pid)
226 {
227 struct simple_pid_list *next = (*p)->next;
228
229 *statusp = (*p)->status;
230 xfree (*p);
231 *p = next;
232 return 1;
233 }
234 return 0;
235 }
236
237 enum stopping_threads_kind
238 {
239 /* Not stopping threads presently. */
240 NOT_STOPPING_THREADS,
241
242 /* Stopping threads. */
243 STOPPING_THREADS,
244
245 /* Stopping and suspending threads. */
246 STOPPING_AND_SUSPENDING_THREADS
247 };
248
249 /* This is set while stop_all_lwps is in effect. */
250 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
251
252 /* FIXME make into a target method? */
253 int using_threads = 1;
254
255 /* True if we're presently stabilizing threads (moving them out of
256 jump pads). */
257 static int stabilizing_threads;
258
259 static void linux_resume_one_lwp (struct lwp_info *lwp,
260 int step, int signal, siginfo_t *info);
261 static void linux_resume (struct thread_resume *resume_info, size_t n);
262 static void stop_all_lwps (int suspend, struct lwp_info *except);
263 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
264 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
265 int *wstat, int options);
266 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
267 static struct lwp_info *add_lwp (ptid_t ptid);
268 static int linux_stopped_by_watchpoint (void);
269 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
270 static int lwp_is_marked_dead (struct lwp_info *lwp);
271 static void proceed_all_lwps (void);
272 static int finish_step_over (struct lwp_info *lwp);
273 static int kill_lwp (unsigned long lwpid, int signo);
274 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
275 static void complete_ongoing_step_over (void);
276
277 /* When the event-loop is doing a step-over, this points at the thread
278 being stepped. */
279 ptid_t step_over_bkpt;
280
281 /* True if the low target can hardware single-step. Such targets
282 don't need a BREAKPOINT_REINSERT_ADDR callback. */
283
284 static int
285 can_hardware_single_step (void)
286 {
287 return (the_low_target.breakpoint_reinsert_addr == NULL);
288 }
289
290 /* True if the low target supports memory breakpoints. If so, we'll
291 have a GET_PC implementation. */
292
293 static int
294 supports_breakpoints (void)
295 {
296 return (the_low_target.get_pc != NULL);
297 }
298
299 /* Returns true if this target can support fast tracepoints. This
300 does not mean that the in-process agent has been loaded in the
301 inferior. */
302
303 static int
304 supports_fast_tracepoints (void)
305 {
306 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
307 }
308
309 /* True if LWP is stopped in its stepping range. */
310
311 static int
312 lwp_in_step_range (struct lwp_info *lwp)
313 {
314 CORE_ADDR pc = lwp->stop_pc;
315
316 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
317 }
318
319 struct pending_signals
320 {
321 int signal;
322 siginfo_t info;
323 struct pending_signals *prev;
324 };
325
326 /* The read/write ends of the pipe registered as waitable file in the
327 event loop. */
328 static int linux_event_pipe[2] = { -1, -1 };
329
330 /* True if we're currently in async mode. */
331 #define target_is_async_p() (linux_event_pipe[0] != -1)
332
333 static void send_sigstop (struct lwp_info *lwp);
334 static void wait_for_sigstop (void);
335
336 /* Return non-zero if HEADER is a 64-bit ELF file. */
337
338 static int
339 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
340 {
341 if (header->e_ident[EI_MAG0] == ELFMAG0
342 && header->e_ident[EI_MAG1] == ELFMAG1
343 && header->e_ident[EI_MAG2] == ELFMAG2
344 && header->e_ident[EI_MAG3] == ELFMAG3)
345 {
346 *machine = header->e_machine;
347 return header->e_ident[EI_CLASS] == ELFCLASS64;
348
349 }
350 *machine = EM_NONE;
351 return -1;
352 }
353
354 /* Return non-zero if FILE is a 64-bit ELF file,
355 zero if the file is not a 64-bit ELF file,
356 and -1 if the file is not accessible or doesn't exist. */
357
358 static int
359 elf_64_file_p (const char *file, unsigned int *machine)
360 {
361 Elf64_Ehdr header;
362 int fd;
363
364 fd = open (file, O_RDONLY);
365 if (fd < 0)
366 return -1;
367
368 if (read (fd, &header, sizeof (header)) != sizeof (header))
369 {
370 close (fd);
371 return 0;
372 }
373 close (fd);
374
375 return elf_64_header_p (&header, machine);
376 }
377
378 /* Accepts an integer PID; Returns true if the executable PID is
379 running is a 64-bit ELF file.. */
380
381 int
382 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
383 {
384 char file[PATH_MAX];
385
386 sprintf (file, "/proc/%d/exe", pid);
387 return elf_64_file_p (file, machine);
388 }
389
390 static void
391 delete_lwp (struct lwp_info *lwp)
392 {
393 struct thread_info *thr = get_lwp_thread (lwp);
394
395 if (debug_threads)
396 debug_printf ("deleting %ld\n", lwpid_of (thr));
397
398 remove_thread (thr);
399 free (lwp->arch_private);
400 free (lwp);
401 }
402
403 /* Add a process to the common process list, and set its private
404 data. */
405
406 static struct process_info *
407 linux_add_process (int pid, int attached)
408 {
409 struct process_info *proc;
410
411 proc = add_process (pid, attached);
412 proc->priv = xcalloc (1, sizeof (*proc->priv));
413
414 if (the_low_target.new_process != NULL)
415 proc->priv->arch_private = the_low_target.new_process ();
416
417 return proc;
418 }
419
420 static CORE_ADDR get_pc (struct lwp_info *lwp);
421
422 /* Handle a GNU/Linux extended wait response. If we see a clone
423 event, we need to add the new LWP to our list (and return 0 so as
424 not to report the trap to higher layers). */
425
426 static int
427 handle_extended_wait (struct lwp_info *event_lwp, int wstat)
428 {
429 int event = linux_ptrace_get_extended_event (wstat);
430 struct thread_info *event_thr = get_lwp_thread (event_lwp);
431 struct lwp_info *new_lwp;
432
433 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
434 || (event == PTRACE_EVENT_CLONE))
435 {
436 ptid_t ptid;
437 unsigned long new_pid;
438 int ret, status;
439
440 /* Get the pid of the new lwp. */
441 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
442 &new_pid);
443
444 /* If we haven't already seen the new PID stop, wait for it now. */
445 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
446 {
447 /* The new child has a pending SIGSTOP. We can't affect it until it
448 hits the SIGSTOP, but we're already attached. */
449
450 ret = my_waitpid (new_pid, &status, __WALL);
451
452 if (ret == -1)
453 perror_with_name ("waiting for new child");
454 else if (ret != new_pid)
455 warning ("wait returned unexpected PID %d", ret);
456 else if (!WIFSTOPPED (status))
457 warning ("wait returned unexpected status 0x%x", status);
458 }
459
460 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
461 {
462 struct process_info *parent_proc;
463 struct process_info *child_proc;
464 struct lwp_info *child_lwp;
465 struct thread_info *child_thr;
466 struct target_desc *tdesc;
467
468 ptid = ptid_build (new_pid, new_pid, 0);
469
470 if (debug_threads)
471 {
472 debug_printf ("HEW: Got fork event from LWP %ld, "
473 "new child is %d\n",
474 ptid_get_lwp (ptid_of (event_thr)),
475 ptid_get_pid (ptid));
476 }
477
478 /* Add the new process to the tables and clone the breakpoint
479 lists of the parent. We need to do this even if the new process
480 will be detached, since we will need the process object and the
481 breakpoints to remove any breakpoints from memory when we
482 detach, and the client side will access registers. */
483 child_proc = linux_add_process (new_pid, 0);
484 gdb_assert (child_proc != NULL);
485 child_lwp = add_lwp (ptid);
486 gdb_assert (child_lwp != NULL);
487 child_lwp->stopped = 1;
488 child_lwp->must_set_ptrace_flags = 1;
489 child_lwp->status_pending_p = 0;
490 child_thr = get_lwp_thread (child_lwp);
491 child_thr->last_resume_kind = resume_stop;
492 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
493
494 /* If we're suspending all threads, leave this one suspended
495 too. */
496 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
497 {
498 if (debug_threads)
499 debug_printf ("HEW: leaving child suspended\n");
500 child_lwp->suspended = 1;
501 }
502
503 parent_proc = get_thread_process (event_thr);
504 child_proc->attached = parent_proc->attached;
505 clone_all_breakpoints (&child_proc->breakpoints,
506 &child_proc->raw_breakpoints,
507 parent_proc->breakpoints);
508
509 tdesc = xmalloc (sizeof (struct target_desc));
510 copy_target_description (tdesc, parent_proc->tdesc);
511 child_proc->tdesc = tdesc;
512
513 /* Clone arch-specific process data. */
514 if (the_low_target.new_fork != NULL)
515 the_low_target.new_fork (parent_proc, child_proc);
516
517 /* Save fork info in the parent thread. */
518 if (event == PTRACE_EVENT_FORK)
519 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
520 else if (event == PTRACE_EVENT_VFORK)
521 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
522
523 event_lwp->waitstatus.value.related_pid = ptid;
524
525 /* The status_pending field contains bits denoting the
526 extended event, so when the pending event is handled,
527 the handler will look at lwp->waitstatus. */
528 event_lwp->status_pending_p = 1;
529 event_lwp->status_pending = wstat;
530
531 /* Report the event. */
532 return 0;
533 }
534
535 if (debug_threads)
536 debug_printf ("HEW: Got clone event "
537 "from LWP %ld, new child is LWP %ld\n",
538 lwpid_of (event_thr), new_pid);
539
540 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
541 new_lwp = add_lwp (ptid);
542
543 /* Either we're going to immediately resume the new thread
544 or leave it stopped. linux_resume_one_lwp is a nop if it
545 thinks the thread is currently running, so set this first
546 before calling linux_resume_one_lwp. */
547 new_lwp->stopped = 1;
548
549 /* If we're suspending all threads, leave this one suspended
550 too. */
551 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
552 new_lwp->suspended = 1;
553
554 /* Normally we will get the pending SIGSTOP. But in some cases
555 we might get another signal delivered to the group first.
556 If we do get another signal, be sure not to lose it. */
557 if (WSTOPSIG (status) != SIGSTOP)
558 {
559 new_lwp->stop_expected = 1;
560 new_lwp->status_pending_p = 1;
561 new_lwp->status_pending = status;
562 }
563
564 /* Don't report the event. */
565 return 1;
566 }
567 else if (event == PTRACE_EVENT_VFORK_DONE)
568 {
569 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
570
571 /* Report the event. */
572 return 0;
573 }
574
575 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
576 }
577
578 /* Return the PC as read from the regcache of LWP, without any
579 adjustment. */
580
581 static CORE_ADDR
582 get_pc (struct lwp_info *lwp)
583 {
584 struct thread_info *saved_thread;
585 struct regcache *regcache;
586 CORE_ADDR pc;
587
588 if (the_low_target.get_pc == NULL)
589 return 0;
590
591 saved_thread = current_thread;
592 current_thread = get_lwp_thread (lwp);
593
594 regcache = get_thread_regcache (current_thread, 1);
595 pc = (*the_low_target.get_pc) (regcache);
596
597 if (debug_threads)
598 debug_printf ("pc is 0x%lx\n", (long) pc);
599
600 current_thread = saved_thread;
601 return pc;
602 }
603
604 /* This function should only be called if LWP got a SIGTRAP.
605 The SIGTRAP could mean several things.
606
607 On i386, where decr_pc_after_break is non-zero:
608
609 If we were single-stepping this process using PTRACE_SINGLESTEP, we
610 will get only the one SIGTRAP. The value of $eip will be the next
611 instruction. If the instruction we stepped over was a breakpoint,
612 we need to decrement the PC.
613
614 If we continue the process using PTRACE_CONT, we will get a
615 SIGTRAP when we hit a breakpoint. The value of $eip will be
616 the instruction after the breakpoint (i.e. needs to be
617 decremented). If we report the SIGTRAP to GDB, we must also
618 report the undecremented PC. If the breakpoint is removed, we
619 must resume at the decremented PC.
620
621 On a non-decr_pc_after_break machine with hardware or kernel
622 single-step:
623
624 If we either single-step a breakpoint instruction, or continue and
625 hit a breakpoint instruction, our PC will point at the breakpoint
626 instruction. */
627
628 static int
629 check_stopped_by_breakpoint (struct lwp_info *lwp)
630 {
631 CORE_ADDR pc;
632 CORE_ADDR sw_breakpoint_pc;
633 struct thread_info *saved_thread;
634 #if USE_SIGTRAP_SIGINFO
635 siginfo_t siginfo;
636 #endif
637
638 if (the_low_target.get_pc == NULL)
639 return 0;
640
641 pc = get_pc (lwp);
642 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
643
644 /* breakpoint_at reads from the current thread. */
645 saved_thread = current_thread;
646 current_thread = get_lwp_thread (lwp);
647
648 #if USE_SIGTRAP_SIGINFO
649 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
650 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
651 {
652 if (siginfo.si_signo == SIGTRAP)
653 {
654 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
655 {
656 if (debug_threads)
657 {
658 struct thread_info *thr = get_lwp_thread (lwp);
659
660 debug_printf ("CSBB: %s stopped by software breakpoint\n",
661 target_pid_to_str (ptid_of (thr)));
662 }
663
664 /* Back up the PC if necessary. */
665 if (pc != sw_breakpoint_pc)
666 {
667 struct regcache *regcache
668 = get_thread_regcache (current_thread, 1);
669 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
670 }
671
672 lwp->stop_pc = sw_breakpoint_pc;
673 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
674 current_thread = saved_thread;
675 return 1;
676 }
677 else if (siginfo.si_code == TRAP_HWBKPT)
678 {
679 if (debug_threads)
680 {
681 struct thread_info *thr = get_lwp_thread (lwp);
682
683 debug_printf ("CSBB: %s stopped by hardware "
684 "breakpoint/watchpoint\n",
685 target_pid_to_str (ptid_of (thr)));
686 }
687
688 lwp->stop_pc = pc;
689 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
690 current_thread = saved_thread;
691 return 1;
692 }
693 else if (siginfo.si_code == TRAP_TRACE)
694 {
695 if (debug_threads)
696 {
697 struct thread_info *thr = get_lwp_thread (lwp);
698
699 debug_printf ("CSBB: %s stopped by trace\n",
700 target_pid_to_str (ptid_of (thr)));
701 }
702
703 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
704 }
705 }
706 }
707 #else
708 /* We may have just stepped a breakpoint instruction. E.g., in
709 non-stop mode, GDB first tells the thread A to step a range, and
710 then the user inserts a breakpoint inside the range. In that
711 case we need to report the breakpoint PC. */
712 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
713 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
714 {
715 if (debug_threads)
716 {
717 struct thread_info *thr = get_lwp_thread (lwp);
718
719 debug_printf ("CSBB: %s stopped by software breakpoint\n",
720 target_pid_to_str (ptid_of (thr)));
721 }
722
723 /* Back up the PC if necessary. */
724 if (pc != sw_breakpoint_pc)
725 {
726 struct regcache *regcache
727 = get_thread_regcache (current_thread, 1);
728 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
729 }
730
731 lwp->stop_pc = sw_breakpoint_pc;
732 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
733 current_thread = saved_thread;
734 return 1;
735 }
736
737 if (hardware_breakpoint_inserted_here (pc))
738 {
739 if (debug_threads)
740 {
741 struct thread_info *thr = get_lwp_thread (lwp);
742
743 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
744 target_pid_to_str (ptid_of (thr)));
745 }
746
747 lwp->stop_pc = pc;
748 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
749 current_thread = saved_thread;
750 return 1;
751 }
752 #endif
753
754 current_thread = saved_thread;
755 return 0;
756 }
757
758 static struct lwp_info *
759 add_lwp (ptid_t ptid)
760 {
761 struct lwp_info *lwp;
762
763 lwp = (struct lwp_info *) xcalloc (1, sizeof (*lwp));
764
765 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
766
767 if (the_low_target.new_thread != NULL)
768 the_low_target.new_thread (lwp);
769
770 lwp->thread = add_thread (ptid, lwp);
771
772 return lwp;
773 }
774
775 /* Start an inferior process and returns its pid.
776 ALLARGS is a vector of program-name and args. */
777
778 static int
779 linux_create_inferior (char *program, char **allargs)
780 {
781 struct lwp_info *new_lwp;
782 int pid;
783 ptid_t ptid;
784 struct cleanup *restore_personality
785 = maybe_disable_address_space_randomization (disable_randomization);
786
787 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
788 pid = vfork ();
789 #else
790 pid = fork ();
791 #endif
792 if (pid < 0)
793 perror_with_name ("fork");
794
795 if (pid == 0)
796 {
797 close_most_fds ();
798 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
799
800 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
801 signal (__SIGRTMIN + 1, SIG_DFL);
802 #endif
803
804 setpgid (0, 0);
805
806 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
807 stdout to stderr so that inferior i/o doesn't corrupt the connection.
808 Also, redirect stdin to /dev/null. */
809 if (remote_connection_is_stdio ())
810 {
811 close (0);
812 open ("/dev/null", O_RDONLY);
813 dup2 (2, 1);
814 if (write (2, "stdin/stdout redirected\n",
815 sizeof ("stdin/stdout redirected\n") - 1) < 0)
816 {
817 /* Errors ignored. */;
818 }
819 }
820
821 execv (program, allargs);
822 if (errno == ENOENT)
823 execvp (program, allargs);
824
825 fprintf (stderr, "Cannot exec %s: %s.\n", program,
826 strerror (errno));
827 fflush (stderr);
828 _exit (0177);
829 }
830
831 do_cleanups (restore_personality);
832
833 linux_add_process (pid, 0);
834
835 ptid = ptid_build (pid, pid, 0);
836 new_lwp = add_lwp (ptid);
837 new_lwp->must_set_ptrace_flags = 1;
838
839 return pid;
840 }
841
842 /* Implement the arch_setup target_ops method. */
843
844 static void
845 linux_arch_setup (void)
846 {
847 the_low_target.arch_setup ();
848 }
849
850 /* Attach to an inferior process. Returns 0 on success, ERRNO on
851 error. */
852
853 int
854 linux_attach_lwp (ptid_t ptid)
855 {
856 struct lwp_info *new_lwp;
857 int lwpid = ptid_get_lwp (ptid);
858
859 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
860 != 0)
861 return errno;
862
863 new_lwp = add_lwp (ptid);
864
865 /* We need to wait for SIGSTOP before being able to make the next
866 ptrace call on this LWP. */
867 new_lwp->must_set_ptrace_flags = 1;
868
869 if (linux_proc_pid_is_stopped (lwpid))
870 {
871 if (debug_threads)
872 debug_printf ("Attached to a stopped process\n");
873
874 /* The process is definitely stopped. It is in a job control
875 stop, unless the kernel predates the TASK_STOPPED /
876 TASK_TRACED distinction, in which case it might be in a
877 ptrace stop. Make sure it is in a ptrace stop; from there we
878 can kill it, signal it, et cetera.
879
880 First make sure there is a pending SIGSTOP. Since we are
881 already attached, the process can not transition from stopped
882 to running without a PTRACE_CONT; so we know this signal will
883 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
884 probably already in the queue (unless this kernel is old
885 enough to use TASK_STOPPED for ptrace stops); but since
886 SIGSTOP is not an RT signal, it can only be queued once. */
887 kill_lwp (lwpid, SIGSTOP);
888
889 /* Finally, resume the stopped process. This will deliver the
890 SIGSTOP (or a higher priority signal, just like normal
891 PTRACE_ATTACH), which we'll catch later on. */
892 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
893 }
894
895 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
896 brings it to a halt.
897
898 There are several cases to consider here:
899
900 1) gdbserver has already attached to the process and is being notified
901 of a new thread that is being created.
902 In this case we should ignore that SIGSTOP and resume the
903 process. This is handled below by setting stop_expected = 1,
904 and the fact that add_thread sets last_resume_kind ==
905 resume_continue.
906
907 2) This is the first thread (the process thread), and we're attaching
908 to it via attach_inferior.
909 In this case we want the process thread to stop.
910 This is handled by having linux_attach set last_resume_kind ==
911 resume_stop after we return.
912
913 If the pid we are attaching to is also the tgid, we attach to and
914 stop all the existing threads. Otherwise, we attach to pid and
915 ignore any other threads in the same group as this pid.
916
917 3) GDB is connecting to gdbserver and is requesting an enumeration of all
918 existing threads.
919 In this case we want the thread to stop.
920 FIXME: This case is currently not properly handled.
921 We should wait for the SIGSTOP but don't. Things work apparently
922 because enough time passes between when we ptrace (ATTACH) and when
923 gdb makes the next ptrace call on the thread.
924
925 On the other hand, if we are currently trying to stop all threads, we
926 should treat the new thread as if we had sent it a SIGSTOP. This works
927 because we are guaranteed that the add_lwp call above added us to the
928 end of the list, and so the new thread has not yet reached
929 wait_for_sigstop (but will). */
930 new_lwp->stop_expected = 1;
931
932 return 0;
933 }
934
935 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
936 already attached. Returns true if a new LWP is found, false
937 otherwise. */
938
939 static int
940 attach_proc_task_lwp_callback (ptid_t ptid)
941 {
942 /* Is this a new thread? */
943 if (find_thread_ptid (ptid) == NULL)
944 {
945 int lwpid = ptid_get_lwp (ptid);
946 int err;
947
948 if (debug_threads)
949 debug_printf ("Found new lwp %d\n", lwpid);
950
951 err = linux_attach_lwp (ptid);
952
953 /* Be quiet if we simply raced with the thread exiting. EPERM
954 is returned if the thread's task still exists, and is marked
955 as exited or zombie, as well as other conditions, so in that
956 case, confirm the status in /proc/PID/status. */
957 if (err == ESRCH
958 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
959 {
960 if (debug_threads)
961 {
962 debug_printf ("Cannot attach to lwp %d: "
963 "thread is gone (%d: %s)\n",
964 lwpid, err, strerror (err));
965 }
966 }
967 else if (err != 0)
968 {
969 warning (_("Cannot attach to lwp %d: %s"),
970 lwpid,
971 linux_ptrace_attach_fail_reason_string (ptid, err));
972 }
973
974 return 1;
975 }
976 return 0;
977 }
978
979 /* Attach to PID. If PID is the tgid, attach to it and all
980 of its threads. */
981
982 static int
983 linux_attach (unsigned long pid)
984 {
985 ptid_t ptid = ptid_build (pid, pid, 0);
986 int err;
987
988 /* Attach to PID. We will check for other threads
989 soon. */
990 err = linux_attach_lwp (ptid);
991 if (err != 0)
992 error ("Cannot attach to process %ld: %s",
993 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
994
995 linux_add_process (pid, 1);
996
997 if (!non_stop)
998 {
999 struct thread_info *thread;
1000
1001 /* Don't ignore the initial SIGSTOP if we just attached to this
1002 process. It will be collected by wait shortly. */
1003 thread = find_thread_ptid (ptid_build (pid, pid, 0));
1004 thread->last_resume_kind = resume_stop;
1005 }
1006
1007 /* We must attach to every LWP. If /proc is mounted, use that to
1008 find them now. On the one hand, the inferior may be using raw
1009 clone instead of using pthreads. On the other hand, even if it
1010 is using pthreads, GDB may not be connected yet (thread_db needs
1011 to do symbol lookups, through qSymbol). Also, thread_db walks
1012 structures in the inferior's address space to find the list of
1013 threads/LWPs, and those structures may well be corrupted. Note
1014 that once thread_db is loaded, we'll still use it to list threads
1015 and associate pthread info with each LWP. */
1016 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1017 return 0;
1018 }
1019
1020 struct counter
1021 {
1022 int pid;
1023 int count;
1024 };
1025
1026 static int
1027 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1028 {
1029 struct counter *counter = args;
1030
1031 if (ptid_get_pid (entry->id) == counter->pid)
1032 {
1033 if (++counter->count > 1)
1034 return 1;
1035 }
1036
1037 return 0;
1038 }
1039
1040 static int
1041 last_thread_of_process_p (int pid)
1042 {
1043 struct counter counter = { pid , 0 };
1044
1045 return (find_inferior (&all_threads,
1046 second_thread_of_pid_p, &counter) == NULL);
1047 }
1048
1049 /* Kill LWP. */
1050
1051 static void
1052 linux_kill_one_lwp (struct lwp_info *lwp)
1053 {
1054 struct thread_info *thr = get_lwp_thread (lwp);
1055 int pid = lwpid_of (thr);
1056
1057 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1058 there is no signal context, and ptrace(PTRACE_KILL) (or
1059 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1060 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1061 alternative is to kill with SIGKILL. We only need one SIGKILL
1062 per process, not one for each thread. But since we still support
1063 linuxthreads, and we also support debugging programs using raw
1064 clone without CLONE_THREAD, we send one for each thread. For
1065 years, we used PTRACE_KILL only, so we're being a bit paranoid
1066 about some old kernels where PTRACE_KILL might work better
1067 (dubious if there are any such, but that's why it's paranoia), so
1068 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1069 everywhere. */
1070
1071 errno = 0;
1072 kill_lwp (pid, SIGKILL);
1073 if (debug_threads)
1074 {
1075 int save_errno = errno;
1076
1077 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1078 target_pid_to_str (ptid_of (thr)),
1079 save_errno ? strerror (save_errno) : "OK");
1080 }
1081
1082 errno = 0;
1083 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1084 if (debug_threads)
1085 {
1086 int save_errno = errno;
1087
1088 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1089 target_pid_to_str (ptid_of (thr)),
1090 save_errno ? strerror (save_errno) : "OK");
1091 }
1092 }
1093
1094 /* Kill LWP and wait for it to die. */
1095
1096 static void
1097 kill_wait_lwp (struct lwp_info *lwp)
1098 {
1099 struct thread_info *thr = get_lwp_thread (lwp);
1100 int pid = ptid_get_pid (ptid_of (thr));
1101 int lwpid = ptid_get_lwp (ptid_of (thr));
1102 int wstat;
1103 int res;
1104
1105 if (debug_threads)
1106 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1107
1108 do
1109 {
1110 linux_kill_one_lwp (lwp);
1111
1112 /* Make sure it died. Notes:
1113
1114 - The loop is most likely unnecessary.
1115
1116 - We don't use linux_wait_for_event as that could delete lwps
1117 while we're iterating over them. We're not interested in
1118 any pending status at this point, only in making sure all
1119 wait status on the kernel side are collected until the
1120 process is reaped.
1121
1122 - We don't use __WALL here as the __WALL emulation relies on
1123 SIGCHLD, and killing a stopped process doesn't generate
1124 one, nor an exit status.
1125 */
1126 res = my_waitpid (lwpid, &wstat, 0);
1127 if (res == -1 && errno == ECHILD)
1128 res = my_waitpid (lwpid, &wstat, __WCLONE);
1129 } while (res > 0 && WIFSTOPPED (wstat));
1130
1131 /* Even if it was stopped, the child may have already disappeared.
1132 E.g., if it was killed by SIGKILL. */
1133 if (res < 0 && errno != ECHILD)
1134 perror_with_name ("kill_wait_lwp");
1135 }
1136
1137 /* Callback for `find_inferior'. Kills an lwp of a given process,
1138 except the leader. */
1139
1140 static int
1141 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1142 {
1143 struct thread_info *thread = (struct thread_info *) entry;
1144 struct lwp_info *lwp = get_thread_lwp (thread);
1145 int pid = * (int *) args;
1146
1147 if (ptid_get_pid (entry->id) != pid)
1148 return 0;
1149
1150 /* We avoid killing the first thread here, because of a Linux kernel (at
1151 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1152 the children get a chance to be reaped, it will remain a zombie
1153 forever. */
1154
1155 if (lwpid_of (thread) == pid)
1156 {
1157 if (debug_threads)
1158 debug_printf ("lkop: is last of process %s\n",
1159 target_pid_to_str (entry->id));
1160 return 0;
1161 }
1162
1163 kill_wait_lwp (lwp);
1164 return 0;
1165 }
1166
1167 static int
1168 linux_kill (int pid)
1169 {
1170 struct process_info *process;
1171 struct lwp_info *lwp;
1172
1173 process = find_process_pid (pid);
1174 if (process == NULL)
1175 return -1;
1176
1177 /* If we're killing a running inferior, make sure it is stopped
1178 first, as PTRACE_KILL will not work otherwise. */
1179 stop_all_lwps (0, NULL);
1180
1181 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1182
1183 /* See the comment in linux_kill_one_lwp. We did not kill the first
1184 thread in the list, so do so now. */
1185 lwp = find_lwp_pid (pid_to_ptid (pid));
1186
1187 if (lwp == NULL)
1188 {
1189 if (debug_threads)
1190 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1191 pid);
1192 }
1193 else
1194 kill_wait_lwp (lwp);
1195
1196 the_target->mourn (process);
1197
1198 /* Since we presently can only stop all lwps of all processes, we
1199 need to unstop lwps of other processes. */
1200 unstop_all_lwps (0, NULL);
1201 return 0;
1202 }
1203
1204 /* Get pending signal of THREAD, for detaching purposes. This is the
1205 signal the thread last stopped for, which we need to deliver to the
1206 thread when detaching, otherwise, it'd be suppressed/lost. */
1207
1208 static int
1209 get_detach_signal (struct thread_info *thread)
1210 {
1211 enum gdb_signal signo = GDB_SIGNAL_0;
1212 int status;
1213 struct lwp_info *lp = get_thread_lwp (thread);
1214
1215 if (lp->status_pending_p)
1216 status = lp->status_pending;
1217 else
1218 {
1219 /* If the thread had been suspended by gdbserver, and it stopped
1220 cleanly, then it'll have stopped with SIGSTOP. But we don't
1221 want to deliver that SIGSTOP. */
1222 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1223 || thread->last_status.value.sig == GDB_SIGNAL_0)
1224 return 0;
1225
1226 /* Otherwise, we may need to deliver the signal we
1227 intercepted. */
1228 status = lp->last_status;
1229 }
1230
1231 if (!WIFSTOPPED (status))
1232 {
1233 if (debug_threads)
1234 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1235 target_pid_to_str (ptid_of (thread)));
1236 return 0;
1237 }
1238
1239 /* Extended wait statuses aren't real SIGTRAPs. */
1240 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1241 {
1242 if (debug_threads)
1243 debug_printf ("GPS: lwp %s had stopped with extended "
1244 "status: no pending signal\n",
1245 target_pid_to_str (ptid_of (thread)));
1246 return 0;
1247 }
1248
1249 signo = gdb_signal_from_host (WSTOPSIG (status));
1250
1251 if (program_signals_p && !program_signals[signo])
1252 {
1253 if (debug_threads)
1254 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1255 target_pid_to_str (ptid_of (thread)),
1256 gdb_signal_to_string (signo));
1257 return 0;
1258 }
1259 else if (!program_signals_p
1260 /* If we have no way to know which signals GDB does not
1261 want to have passed to the program, assume
1262 SIGTRAP/SIGINT, which is GDB's default. */
1263 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1264 {
1265 if (debug_threads)
1266 debug_printf ("GPS: lwp %s had signal %s, "
1267 "but we don't know if we should pass it. "
1268 "Default to not.\n",
1269 target_pid_to_str (ptid_of (thread)),
1270 gdb_signal_to_string (signo));
1271 return 0;
1272 }
1273 else
1274 {
1275 if (debug_threads)
1276 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1277 target_pid_to_str (ptid_of (thread)),
1278 gdb_signal_to_string (signo));
1279
1280 return WSTOPSIG (status);
1281 }
1282 }
1283
1284 static int
1285 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1286 {
1287 struct thread_info *thread = (struct thread_info *) entry;
1288 struct lwp_info *lwp = get_thread_lwp (thread);
1289 int pid = * (int *) args;
1290 int sig;
1291
1292 if (ptid_get_pid (entry->id) != pid)
1293 return 0;
1294
1295 /* If there is a pending SIGSTOP, get rid of it. */
1296 if (lwp->stop_expected)
1297 {
1298 if (debug_threads)
1299 debug_printf ("Sending SIGCONT to %s\n",
1300 target_pid_to_str (ptid_of (thread)));
1301
1302 kill_lwp (lwpid_of (thread), SIGCONT);
1303 lwp->stop_expected = 0;
1304 }
1305
1306 /* Flush any pending changes to the process's registers. */
1307 regcache_invalidate_thread (thread);
1308
1309 /* Pass on any pending signal for this thread. */
1310 sig = get_detach_signal (thread);
1311
1312 /* Finally, let it resume. */
1313 if (the_low_target.prepare_to_resume != NULL)
1314 the_low_target.prepare_to_resume (lwp);
1315 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1316 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1317 error (_("Can't detach %s: %s"),
1318 target_pid_to_str (ptid_of (thread)),
1319 strerror (errno));
1320
1321 delete_lwp (lwp);
1322 return 0;
1323 }
1324
1325 static int
1326 linux_detach (int pid)
1327 {
1328 struct process_info *process;
1329
1330 process = find_process_pid (pid);
1331 if (process == NULL)
1332 return -1;
1333
1334 /* As there's a step over already in progress, let it finish first,
1335 otherwise nesting a stabilize_threads operation on top gets real
1336 messy. */
1337 complete_ongoing_step_over ();
1338
1339 /* Stop all threads before detaching. First, ptrace requires that
1340 the thread is stopped to sucessfully detach. Second, thread_db
1341 may need to uninstall thread event breakpoints from memory, which
1342 only works with a stopped process anyway. */
1343 stop_all_lwps (0, NULL);
1344
1345 #ifdef USE_THREAD_DB
1346 thread_db_detach (process);
1347 #endif
1348
1349 /* Stabilize threads (move out of jump pads). */
1350 stabilize_threads ();
1351
1352 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1353
1354 the_target->mourn (process);
1355
1356 /* Since we presently can only stop all lwps of all processes, we
1357 need to unstop lwps of other processes. */
1358 unstop_all_lwps (0, NULL);
1359 return 0;
1360 }
1361
1362 /* Remove all LWPs that belong to process PROC from the lwp list. */
1363
1364 static int
1365 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1366 {
1367 struct thread_info *thread = (struct thread_info *) entry;
1368 struct lwp_info *lwp = get_thread_lwp (thread);
1369 struct process_info *process = proc;
1370
1371 if (pid_of (thread) == pid_of (process))
1372 delete_lwp (lwp);
1373
1374 return 0;
1375 }
1376
1377 static void
1378 linux_mourn (struct process_info *process)
1379 {
1380 struct process_info_private *priv;
1381
1382 #ifdef USE_THREAD_DB
1383 thread_db_mourn (process);
1384 #endif
1385
1386 find_inferior (&all_threads, delete_lwp_callback, process);
1387
1388 /* Freeing all private data. */
1389 priv = process->priv;
1390 free (priv->arch_private);
1391 free (priv);
1392 process->priv = NULL;
1393
1394 remove_process (process);
1395 }
1396
1397 static void
1398 linux_join (int pid)
1399 {
1400 int status, ret;
1401
1402 do {
1403 ret = my_waitpid (pid, &status, 0);
1404 if (WIFEXITED (status) || WIFSIGNALED (status))
1405 break;
1406 } while (ret != -1 || errno != ECHILD);
1407 }
1408
1409 /* Return nonzero if the given thread is still alive. */
1410 static int
1411 linux_thread_alive (ptid_t ptid)
1412 {
1413 struct lwp_info *lwp = find_lwp_pid (ptid);
1414
1415 /* We assume we always know if a thread exits. If a whole process
1416 exited but we still haven't been able to report it to GDB, we'll
1417 hold on to the last lwp of the dead process. */
1418 if (lwp != NULL)
1419 return !lwp_is_marked_dead (lwp);
1420 else
1421 return 0;
1422 }
1423
1424 /* Return 1 if this lwp still has an interesting status pending. If
1425 not (e.g., it had stopped for a breakpoint that is gone), return
1426 false. */
1427
1428 static int
1429 thread_still_has_status_pending_p (struct thread_info *thread)
1430 {
1431 struct lwp_info *lp = get_thread_lwp (thread);
1432
1433 if (!lp->status_pending_p)
1434 return 0;
1435
1436 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1437 report any status pending the LWP may have. */
1438 if (thread->last_resume_kind == resume_stop
1439 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1440 return 0;
1441
1442 if (thread->last_resume_kind != resume_stop
1443 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1444 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1445 {
1446 struct thread_info *saved_thread;
1447 CORE_ADDR pc;
1448 int discard = 0;
1449
1450 gdb_assert (lp->last_status != 0);
1451
1452 pc = get_pc (lp);
1453
1454 saved_thread = current_thread;
1455 current_thread = thread;
1456
1457 if (pc != lp->stop_pc)
1458 {
1459 if (debug_threads)
1460 debug_printf ("PC of %ld changed\n",
1461 lwpid_of (thread));
1462 discard = 1;
1463 }
1464
1465 #if !USE_SIGTRAP_SIGINFO
1466 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1467 && !(*the_low_target.breakpoint_at) (pc))
1468 {
1469 if (debug_threads)
1470 debug_printf ("previous SW breakpoint of %ld gone\n",
1471 lwpid_of (thread));
1472 discard = 1;
1473 }
1474 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1475 && !hardware_breakpoint_inserted_here (pc))
1476 {
1477 if (debug_threads)
1478 debug_printf ("previous HW breakpoint of %ld gone\n",
1479 lwpid_of (thread));
1480 discard = 1;
1481 }
1482 #endif
1483
1484 current_thread = saved_thread;
1485
1486 if (discard)
1487 {
1488 if (debug_threads)
1489 debug_printf ("discarding pending breakpoint status\n");
1490 lp->status_pending_p = 0;
1491 return 0;
1492 }
1493 }
1494
1495 return 1;
1496 }
1497
1498 /* Return 1 if this lwp has an interesting status pending. */
1499 static int
1500 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1501 {
1502 struct thread_info *thread = (struct thread_info *) entry;
1503 struct lwp_info *lp = get_thread_lwp (thread);
1504 ptid_t ptid = * (ptid_t *) arg;
1505
1506 /* Check if we're only interested in events from a specific process
1507 or a specific LWP. */
1508 if (!ptid_match (ptid_of (thread), ptid))
1509 return 0;
1510
1511 if (lp->status_pending_p
1512 && !thread_still_has_status_pending_p (thread))
1513 {
1514 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1515 return 0;
1516 }
1517
1518 return lp->status_pending_p;
1519 }
1520
1521 static int
1522 same_lwp (struct inferior_list_entry *entry, void *data)
1523 {
1524 ptid_t ptid = *(ptid_t *) data;
1525 int lwp;
1526
1527 if (ptid_get_lwp (ptid) != 0)
1528 lwp = ptid_get_lwp (ptid);
1529 else
1530 lwp = ptid_get_pid (ptid);
1531
1532 if (ptid_get_lwp (entry->id) == lwp)
1533 return 1;
1534
1535 return 0;
1536 }
1537
1538 struct lwp_info *
1539 find_lwp_pid (ptid_t ptid)
1540 {
1541 struct inferior_list_entry *thread
1542 = find_inferior (&all_threads, same_lwp, &ptid);
1543
1544 if (thread == NULL)
1545 return NULL;
1546
1547 return get_thread_lwp ((struct thread_info *) thread);
1548 }
1549
1550 /* Return the number of known LWPs in the tgid given by PID. */
1551
1552 static int
1553 num_lwps (int pid)
1554 {
1555 struct inferior_list_entry *inf, *tmp;
1556 int count = 0;
1557
1558 ALL_INFERIORS (&all_threads, inf, tmp)
1559 {
1560 if (ptid_get_pid (inf->id) == pid)
1561 count++;
1562 }
1563
1564 return count;
1565 }
1566
1567 /* The arguments passed to iterate_over_lwps. */
1568
1569 struct iterate_over_lwps_args
1570 {
1571 /* The FILTER argument passed to iterate_over_lwps. */
1572 ptid_t filter;
1573
1574 /* The CALLBACK argument passed to iterate_over_lwps. */
1575 iterate_over_lwps_ftype *callback;
1576
1577 /* The DATA argument passed to iterate_over_lwps. */
1578 void *data;
1579 };
1580
1581 /* Callback for find_inferior used by iterate_over_lwps to filter
1582 calls to the callback supplied to that function. Returning a
1583 nonzero value causes find_inferiors to stop iterating and return
1584 the current inferior_list_entry. Returning zero indicates that
1585 find_inferiors should continue iterating. */
1586
1587 static int
1588 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1589 {
1590 struct iterate_over_lwps_args *args
1591 = (struct iterate_over_lwps_args *) args_p;
1592
1593 if (ptid_match (entry->id, args->filter))
1594 {
1595 struct thread_info *thr = (struct thread_info *) entry;
1596 struct lwp_info *lwp = get_thread_lwp (thr);
1597
1598 return (*args->callback) (lwp, args->data);
1599 }
1600
1601 return 0;
1602 }
1603
1604 /* See nat/linux-nat.h. */
1605
1606 struct lwp_info *
1607 iterate_over_lwps (ptid_t filter,
1608 iterate_over_lwps_ftype callback,
1609 void *data)
1610 {
1611 struct iterate_over_lwps_args args = {filter, callback, data};
1612 struct inferior_list_entry *entry;
1613
1614 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1615 if (entry == NULL)
1616 return NULL;
1617
1618 return get_thread_lwp ((struct thread_info *) entry);
1619 }
1620
1621 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1622 their exits until all other threads in the group have exited. */
1623
1624 static void
1625 check_zombie_leaders (void)
1626 {
1627 struct process_info *proc, *tmp;
1628
1629 ALL_PROCESSES (proc, tmp)
1630 {
1631 pid_t leader_pid = pid_of (proc);
1632 struct lwp_info *leader_lp;
1633
1634 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1635
1636 if (debug_threads)
1637 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1638 "num_lwps=%d, zombie=%d\n",
1639 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1640 linux_proc_pid_is_zombie (leader_pid));
1641
1642 if (leader_lp != NULL
1643 /* Check if there are other threads in the group, as we may
1644 have raced with the inferior simply exiting. */
1645 && !last_thread_of_process_p (leader_pid)
1646 && linux_proc_pid_is_zombie (leader_pid))
1647 {
1648 /* A leader zombie can mean one of two things:
1649
1650 - It exited, and there's an exit status pending
1651 available, or only the leader exited (not the whole
1652 program). In the latter case, we can't waitpid the
1653 leader's exit status until all other threads are gone.
1654
1655 - There are 3 or more threads in the group, and a thread
1656 other than the leader exec'd. On an exec, the Linux
1657 kernel destroys all other threads (except the execing
1658 one) in the thread group, and resets the execing thread's
1659 tid to the tgid. No exit notification is sent for the
1660 execing thread -- from the ptracer's perspective, it
1661 appears as though the execing thread just vanishes.
1662 Until we reap all other threads except the leader and the
1663 execing thread, the leader will be zombie, and the
1664 execing thread will be in `D (disc sleep)'. As soon as
1665 all other threads are reaped, the execing thread changes
1666 it's tid to the tgid, and the previous (zombie) leader
1667 vanishes, giving place to the "new" leader. We could try
1668 distinguishing the exit and exec cases, by waiting once
1669 more, and seeing if something comes out, but it doesn't
1670 sound useful. The previous leader _does_ go away, and
1671 we'll re-add the new one once we see the exec event
1672 (which is just the same as what would happen if the
1673 previous leader did exit voluntarily before some other
1674 thread execs). */
1675
1676 if (debug_threads)
1677 fprintf (stderr,
1678 "CZL: Thread group leader %d zombie "
1679 "(it exited, or another thread execd).\n",
1680 leader_pid);
1681
1682 delete_lwp (leader_lp);
1683 }
1684 }
1685 }
1686
1687 /* Callback for `find_inferior'. Returns the first LWP that is not
1688 stopped. ARG is a PTID filter. */
1689
1690 static int
1691 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1692 {
1693 struct thread_info *thr = (struct thread_info *) entry;
1694 struct lwp_info *lwp;
1695 ptid_t filter = *(ptid_t *) arg;
1696
1697 if (!ptid_match (ptid_of (thr), filter))
1698 return 0;
1699
1700 lwp = get_thread_lwp (thr);
1701 if (!lwp->stopped)
1702 return 1;
1703
1704 return 0;
1705 }
1706
1707 /* Increment LWP's suspend count. */
1708
1709 static void
1710 lwp_suspended_inc (struct lwp_info *lwp)
1711 {
1712 lwp->suspended++;
1713
1714 if (debug_threads && lwp->suspended > 4)
1715 {
1716 struct thread_info *thread = get_lwp_thread (lwp);
1717
1718 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1719 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1720 }
1721 }
1722
1723 /* Decrement LWP's suspend count. */
1724
1725 static void
1726 lwp_suspended_decr (struct lwp_info *lwp)
1727 {
1728 lwp->suspended--;
1729
1730 if (lwp->suspended < 0)
1731 {
1732 struct thread_info *thread = get_lwp_thread (lwp);
1733
1734 internal_error (__FILE__, __LINE__,
1735 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1736 lwp->suspended);
1737 }
1738 }
1739
1740 /* This function should only be called if the LWP got a SIGTRAP.
1741
1742 Handle any tracepoint steps or hits. Return true if a tracepoint
1743 event was handled, 0 otherwise. */
1744
1745 static int
1746 handle_tracepoints (struct lwp_info *lwp)
1747 {
1748 struct thread_info *tinfo = get_lwp_thread (lwp);
1749 int tpoint_related_event = 0;
1750
1751 gdb_assert (lwp->suspended == 0);
1752
1753 /* If this tracepoint hit causes a tracing stop, we'll immediately
1754 uninsert tracepoints. To do this, we temporarily pause all
1755 threads, unpatch away, and then unpause threads. We need to make
1756 sure the unpausing doesn't resume LWP too. */
1757 lwp_suspended_inc (lwp);
1758
1759 /* And we need to be sure that any all-threads-stopping doesn't try
1760 to move threads out of the jump pads, as it could deadlock the
1761 inferior (LWP could be in the jump pad, maybe even holding the
1762 lock.) */
1763
1764 /* Do any necessary step collect actions. */
1765 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1766
1767 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1768
1769 /* See if we just hit a tracepoint and do its main collect
1770 actions. */
1771 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1772
1773 lwp_suspended_decr (lwp);
1774
1775 gdb_assert (lwp->suspended == 0);
1776 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1777
1778 if (tpoint_related_event)
1779 {
1780 if (debug_threads)
1781 debug_printf ("got a tracepoint event\n");
1782 return 1;
1783 }
1784
1785 return 0;
1786 }
1787
1788 /* Convenience wrapper. Returns true if LWP is presently collecting a
1789 fast tracepoint. */
1790
1791 static int
1792 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1793 struct fast_tpoint_collect_status *status)
1794 {
1795 CORE_ADDR thread_area;
1796 struct thread_info *thread = get_lwp_thread (lwp);
1797
1798 if (the_low_target.get_thread_area == NULL)
1799 return 0;
1800
1801 /* Get the thread area address. This is used to recognize which
1802 thread is which when tracing with the in-process agent library.
1803 We don't read anything from the address, and treat it as opaque;
1804 it's the address itself that we assume is unique per-thread. */
1805 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1806 return 0;
1807
1808 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1809 }
1810
1811 /* The reason we resume in the caller, is because we want to be able
1812 to pass lwp->status_pending as WSTAT, and we need to clear
1813 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1814 refuses to resume. */
1815
1816 static int
1817 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1818 {
1819 struct thread_info *saved_thread;
1820
1821 saved_thread = current_thread;
1822 current_thread = get_lwp_thread (lwp);
1823
1824 if ((wstat == NULL
1825 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1826 && supports_fast_tracepoints ()
1827 && agent_loaded_p ())
1828 {
1829 struct fast_tpoint_collect_status status;
1830 int r;
1831
1832 if (debug_threads)
1833 debug_printf ("Checking whether LWP %ld needs to move out of the "
1834 "jump pad.\n",
1835 lwpid_of (current_thread));
1836
1837 r = linux_fast_tracepoint_collecting (lwp, &status);
1838
1839 if (wstat == NULL
1840 || (WSTOPSIG (*wstat) != SIGILL
1841 && WSTOPSIG (*wstat) != SIGFPE
1842 && WSTOPSIG (*wstat) != SIGSEGV
1843 && WSTOPSIG (*wstat) != SIGBUS))
1844 {
1845 lwp->collecting_fast_tracepoint = r;
1846
1847 if (r != 0)
1848 {
1849 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1850 {
1851 /* Haven't executed the original instruction yet.
1852 Set breakpoint there, and wait till it's hit,
1853 then single-step until exiting the jump pad. */
1854 lwp->exit_jump_pad_bkpt
1855 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1856 }
1857
1858 if (debug_threads)
1859 debug_printf ("Checking whether LWP %ld needs to move out of "
1860 "the jump pad...it does\n",
1861 lwpid_of (current_thread));
1862 current_thread = saved_thread;
1863
1864 return 1;
1865 }
1866 }
1867 else
1868 {
1869 /* If we get a synchronous signal while collecting, *and*
1870 while executing the (relocated) original instruction,
1871 reset the PC to point at the tpoint address, before
1872 reporting to GDB. Otherwise, it's an IPA lib bug: just
1873 report the signal to GDB, and pray for the best. */
1874
1875 lwp->collecting_fast_tracepoint = 0;
1876
1877 if (r != 0
1878 && (status.adjusted_insn_addr <= lwp->stop_pc
1879 && lwp->stop_pc < status.adjusted_insn_addr_end))
1880 {
1881 siginfo_t info;
1882 struct regcache *regcache;
1883
1884 /* The si_addr on a few signals references the address
1885 of the faulting instruction. Adjust that as
1886 well. */
1887 if ((WSTOPSIG (*wstat) == SIGILL
1888 || WSTOPSIG (*wstat) == SIGFPE
1889 || WSTOPSIG (*wstat) == SIGBUS
1890 || WSTOPSIG (*wstat) == SIGSEGV)
1891 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1892 (PTRACE_TYPE_ARG3) 0, &info) == 0
1893 /* Final check just to make sure we don't clobber
1894 the siginfo of non-kernel-sent signals. */
1895 && (uintptr_t) info.si_addr == lwp->stop_pc)
1896 {
1897 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1898 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1899 (PTRACE_TYPE_ARG3) 0, &info);
1900 }
1901
1902 regcache = get_thread_regcache (current_thread, 1);
1903 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1904 lwp->stop_pc = status.tpoint_addr;
1905
1906 /* Cancel any fast tracepoint lock this thread was
1907 holding. */
1908 force_unlock_trace_buffer ();
1909 }
1910
1911 if (lwp->exit_jump_pad_bkpt != NULL)
1912 {
1913 if (debug_threads)
1914 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1915 "stopping all threads momentarily.\n");
1916
1917 stop_all_lwps (1, lwp);
1918
1919 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1920 lwp->exit_jump_pad_bkpt = NULL;
1921
1922 unstop_all_lwps (1, lwp);
1923
1924 gdb_assert (lwp->suspended >= 0);
1925 }
1926 }
1927 }
1928
1929 if (debug_threads)
1930 debug_printf ("Checking whether LWP %ld needs to move out of the "
1931 "jump pad...no\n",
1932 lwpid_of (current_thread));
1933
1934 current_thread = saved_thread;
1935 return 0;
1936 }
1937
1938 /* Enqueue one signal in the "signals to report later when out of the
1939 jump pad" list. */
1940
1941 static void
1942 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1943 {
1944 struct pending_signals *p_sig;
1945 struct thread_info *thread = get_lwp_thread (lwp);
1946
1947 if (debug_threads)
1948 debug_printf ("Deferring signal %d for LWP %ld.\n",
1949 WSTOPSIG (*wstat), lwpid_of (thread));
1950
1951 if (debug_threads)
1952 {
1953 struct pending_signals *sig;
1954
1955 for (sig = lwp->pending_signals_to_report;
1956 sig != NULL;
1957 sig = sig->prev)
1958 debug_printf (" Already queued %d\n",
1959 sig->signal);
1960
1961 debug_printf (" (no more currently queued signals)\n");
1962 }
1963
1964 /* Don't enqueue non-RT signals if they are already in the deferred
1965 queue. (SIGSTOP being the easiest signal to see ending up here
1966 twice) */
1967 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1968 {
1969 struct pending_signals *sig;
1970
1971 for (sig = lwp->pending_signals_to_report;
1972 sig != NULL;
1973 sig = sig->prev)
1974 {
1975 if (sig->signal == WSTOPSIG (*wstat))
1976 {
1977 if (debug_threads)
1978 debug_printf ("Not requeuing already queued non-RT signal %d"
1979 " for LWP %ld\n",
1980 sig->signal,
1981 lwpid_of (thread));
1982 return;
1983 }
1984 }
1985 }
1986
1987 p_sig = xmalloc (sizeof (*p_sig));
1988 p_sig->prev = lwp->pending_signals_to_report;
1989 p_sig->signal = WSTOPSIG (*wstat);
1990 memset (&p_sig->info, 0, sizeof (siginfo_t));
1991 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1992 &p_sig->info);
1993
1994 lwp->pending_signals_to_report = p_sig;
1995 }
1996
1997 /* Dequeue one signal from the "signals to report later when out of
1998 the jump pad" list. */
1999
2000 static int
2001 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2002 {
2003 struct thread_info *thread = get_lwp_thread (lwp);
2004
2005 if (lwp->pending_signals_to_report != NULL)
2006 {
2007 struct pending_signals **p_sig;
2008
2009 p_sig = &lwp->pending_signals_to_report;
2010 while ((*p_sig)->prev != NULL)
2011 p_sig = &(*p_sig)->prev;
2012
2013 *wstat = W_STOPCODE ((*p_sig)->signal);
2014 if ((*p_sig)->info.si_signo != 0)
2015 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2016 &(*p_sig)->info);
2017 free (*p_sig);
2018 *p_sig = NULL;
2019
2020 if (debug_threads)
2021 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2022 WSTOPSIG (*wstat), lwpid_of (thread));
2023
2024 if (debug_threads)
2025 {
2026 struct pending_signals *sig;
2027
2028 for (sig = lwp->pending_signals_to_report;
2029 sig != NULL;
2030 sig = sig->prev)
2031 debug_printf (" Still queued %d\n",
2032 sig->signal);
2033
2034 debug_printf (" (no more queued signals)\n");
2035 }
2036
2037 return 1;
2038 }
2039
2040 return 0;
2041 }
2042
2043 /* Fetch the possibly triggered data watchpoint info and store it in
2044 CHILD.
2045
2046 On some archs, like x86, that use debug registers to set
2047 watchpoints, it's possible that the way to know which watched
2048 address trapped, is to check the register that is used to select
2049 which address to watch. Problem is, between setting the watchpoint
2050 and reading back which data address trapped, the user may change
2051 the set of watchpoints, and, as a consequence, GDB changes the
2052 debug registers in the inferior. To avoid reading back a stale
2053 stopped-data-address when that happens, we cache in LP the fact
2054 that a watchpoint trapped, and the corresponding data address, as
2055 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2056 registers meanwhile, we have the cached data we can rely on. */
2057
2058 static int
2059 check_stopped_by_watchpoint (struct lwp_info *child)
2060 {
2061 if (the_low_target.stopped_by_watchpoint != NULL)
2062 {
2063 struct thread_info *saved_thread;
2064
2065 saved_thread = current_thread;
2066 current_thread = get_lwp_thread (child);
2067
2068 if (the_low_target.stopped_by_watchpoint ())
2069 {
2070 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2071
2072 if (the_low_target.stopped_data_address != NULL)
2073 child->stopped_data_address
2074 = the_low_target.stopped_data_address ();
2075 else
2076 child->stopped_data_address = 0;
2077 }
2078
2079 current_thread = saved_thread;
2080 }
2081
2082 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2083 }
2084
2085 /* Return the ptrace options that we want to try to enable. */
2086
2087 static int
2088 linux_low_ptrace_options (int attached)
2089 {
2090 int options = 0;
2091
2092 if (!attached)
2093 options |= PTRACE_O_EXITKILL;
2094
2095 if (report_fork_events)
2096 options |= PTRACE_O_TRACEFORK;
2097
2098 if (report_vfork_events)
2099 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2100
2101 return options;
2102 }
2103
2104 /* Do low-level handling of the event, and check if we should go on
2105 and pass it to caller code. Return the affected lwp if we are, or
2106 NULL otherwise. */
2107
2108 static struct lwp_info *
2109 linux_low_filter_event (int lwpid, int wstat)
2110 {
2111 struct lwp_info *child;
2112 struct thread_info *thread;
2113 int have_stop_pc = 0;
2114
2115 child = find_lwp_pid (pid_to_ptid (lwpid));
2116
2117 /* If we didn't find a process, one of two things presumably happened:
2118 - A process we started and then detached from has exited. Ignore it.
2119 - A process we are controlling has forked and the new child's stop
2120 was reported to us by the kernel. Save its PID. */
2121 if (child == NULL && WIFSTOPPED (wstat))
2122 {
2123 add_to_pid_list (&stopped_pids, lwpid, wstat);
2124 return NULL;
2125 }
2126 else if (child == NULL)
2127 return NULL;
2128
2129 thread = get_lwp_thread (child);
2130
2131 child->stopped = 1;
2132
2133 child->last_status = wstat;
2134
2135 /* Check if the thread has exited. */
2136 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2137 {
2138 if (debug_threads)
2139 debug_printf ("LLFE: %d exited.\n", lwpid);
2140 if (num_lwps (pid_of (thread)) > 1)
2141 {
2142
2143 /* If there is at least one more LWP, then the exit signal was
2144 not the end of the debugged application and should be
2145 ignored. */
2146 delete_lwp (child);
2147 return NULL;
2148 }
2149 else
2150 {
2151 /* This was the last lwp in the process. Since events are
2152 serialized to GDB core, and we can't report this one
2153 right now, but GDB core and the other target layers will
2154 want to be notified about the exit code/signal, leave the
2155 status pending for the next time we're able to report
2156 it. */
2157 mark_lwp_dead (child, wstat);
2158 return child;
2159 }
2160 }
2161
2162 gdb_assert (WIFSTOPPED (wstat));
2163
2164 if (WIFSTOPPED (wstat))
2165 {
2166 struct process_info *proc;
2167
2168 /* Architecture-specific setup after inferior is running. */
2169 proc = find_process_pid (pid_of (thread));
2170 if (proc->tdesc == NULL)
2171 {
2172 if (proc->attached)
2173 {
2174 struct thread_info *saved_thread;
2175
2176 /* This needs to happen after we have attached to the
2177 inferior and it is stopped for the first time, but
2178 before we access any inferior registers. */
2179 saved_thread = current_thread;
2180 current_thread = thread;
2181
2182 the_low_target.arch_setup ();
2183
2184 current_thread = saved_thread;
2185 }
2186 else
2187 {
2188 /* The process is started, but GDBserver will do
2189 architecture-specific setup after the program stops at
2190 the first instruction. */
2191 child->status_pending_p = 1;
2192 child->status_pending = wstat;
2193 return child;
2194 }
2195 }
2196 }
2197
2198 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2199 {
2200 struct process_info *proc = find_process_pid (pid_of (thread));
2201 int options = linux_low_ptrace_options (proc->attached);
2202
2203 linux_enable_event_reporting (lwpid, options);
2204 child->must_set_ptrace_flags = 0;
2205 }
2206
2207 /* Be careful to not overwrite stop_pc until
2208 check_stopped_by_breakpoint is called. */
2209 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2210 && linux_is_extended_waitstatus (wstat))
2211 {
2212 child->stop_pc = get_pc (child);
2213 if (handle_extended_wait (child, wstat))
2214 {
2215 /* The event has been handled, so just return without
2216 reporting it. */
2217 return NULL;
2218 }
2219 }
2220
2221 /* Check first whether this was a SW/HW breakpoint before checking
2222 watchpoints, because at least s390 can't tell the data address of
2223 hardware watchpoint hits, and returns stopped-by-watchpoint as
2224 long as there's a watchpoint set. */
2225 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2226 {
2227 if (check_stopped_by_breakpoint (child))
2228 have_stop_pc = 1;
2229 }
2230
2231 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2232 or hardware watchpoint. Check which is which if we got
2233 TARGET_STOPPED_BY_HW_BREAKPOINT. Likewise, we may have single
2234 stepped an instruction that triggered a watchpoint. In that
2235 case, on some architectures (such as x86), instead of
2236 TRAP_HWBKPT, si_code indicates TRAP_TRACE, and we need to check
2237 the debug registers separately. */
2238 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2239 && child->stop_reason != TARGET_STOPPED_BY_SW_BREAKPOINT)
2240 check_stopped_by_watchpoint (child);
2241
2242 if (!have_stop_pc)
2243 child->stop_pc = get_pc (child);
2244
2245 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2246 && child->stop_expected)
2247 {
2248 if (debug_threads)
2249 debug_printf ("Expected stop.\n");
2250 child->stop_expected = 0;
2251
2252 if (thread->last_resume_kind == resume_stop)
2253 {
2254 /* We want to report the stop to the core. Treat the
2255 SIGSTOP as a normal event. */
2256 if (debug_threads)
2257 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2258 target_pid_to_str (ptid_of (thread)));
2259 }
2260 else if (stopping_threads != NOT_STOPPING_THREADS)
2261 {
2262 /* Stopping threads. We don't want this SIGSTOP to end up
2263 pending. */
2264 if (debug_threads)
2265 debug_printf ("LLW: SIGSTOP caught for %s "
2266 "while stopping threads.\n",
2267 target_pid_to_str (ptid_of (thread)));
2268 return NULL;
2269 }
2270 else
2271 {
2272 /* This is a delayed SIGSTOP. Filter out the event. */
2273 if (debug_threads)
2274 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2275 child->stepping ? "step" : "continue",
2276 target_pid_to_str (ptid_of (thread)));
2277
2278 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2279 return NULL;
2280 }
2281 }
2282
2283 child->status_pending_p = 1;
2284 child->status_pending = wstat;
2285 return child;
2286 }
2287
2288 /* Resume LWPs that are currently stopped without any pending status
2289 to report, but are resumed from the core's perspective. */
2290
2291 static void
2292 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2293 {
2294 struct thread_info *thread = (struct thread_info *) entry;
2295 struct lwp_info *lp = get_thread_lwp (thread);
2296
2297 if (lp->stopped
2298 && !lp->suspended
2299 && !lp->status_pending_p
2300 && thread->last_resume_kind != resume_stop
2301 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2302 {
2303 int step = thread->last_resume_kind == resume_step;
2304
2305 if (debug_threads)
2306 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2307 target_pid_to_str (ptid_of (thread)),
2308 paddress (lp->stop_pc),
2309 step);
2310
2311 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2312 }
2313 }
2314
2315 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2316 match FILTER_PTID (leaving others pending). The PTIDs can be:
2317 minus_one_ptid, to specify any child; a pid PTID, specifying all
2318 lwps of a thread group; or a PTID representing a single lwp. Store
2319 the stop status through the status pointer WSTAT. OPTIONS is
2320 passed to the waitpid call. Return 0 if no event was found and
2321 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2322 was found. Return the PID of the stopped child otherwise. */
2323
2324 static int
2325 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2326 int *wstatp, int options)
2327 {
2328 struct thread_info *event_thread;
2329 struct lwp_info *event_child, *requested_child;
2330 sigset_t block_mask, prev_mask;
2331
2332 retry:
2333 /* N.B. event_thread points to the thread_info struct that contains
2334 event_child. Keep them in sync. */
2335 event_thread = NULL;
2336 event_child = NULL;
2337 requested_child = NULL;
2338
2339 /* Check for a lwp with a pending status. */
2340
2341 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2342 {
2343 event_thread = (struct thread_info *)
2344 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2345 if (event_thread != NULL)
2346 event_child = get_thread_lwp (event_thread);
2347 if (debug_threads && event_thread)
2348 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2349 }
2350 else if (!ptid_equal (filter_ptid, null_ptid))
2351 {
2352 requested_child = find_lwp_pid (filter_ptid);
2353
2354 if (stopping_threads == NOT_STOPPING_THREADS
2355 && requested_child->status_pending_p
2356 && requested_child->collecting_fast_tracepoint)
2357 {
2358 enqueue_one_deferred_signal (requested_child,
2359 &requested_child->status_pending);
2360 requested_child->status_pending_p = 0;
2361 requested_child->status_pending = 0;
2362 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2363 }
2364
2365 if (requested_child->suspended
2366 && requested_child->status_pending_p)
2367 {
2368 internal_error (__FILE__, __LINE__,
2369 "requesting an event out of a"
2370 " suspended child?");
2371 }
2372
2373 if (requested_child->status_pending_p)
2374 {
2375 event_child = requested_child;
2376 event_thread = get_lwp_thread (event_child);
2377 }
2378 }
2379
2380 if (event_child != NULL)
2381 {
2382 if (debug_threads)
2383 debug_printf ("Got an event from pending child %ld (%04x)\n",
2384 lwpid_of (event_thread), event_child->status_pending);
2385 *wstatp = event_child->status_pending;
2386 event_child->status_pending_p = 0;
2387 event_child->status_pending = 0;
2388 current_thread = event_thread;
2389 return lwpid_of (event_thread);
2390 }
2391
2392 /* But if we don't find a pending event, we'll have to wait.
2393
2394 We only enter this loop if no process has a pending wait status.
2395 Thus any action taken in response to a wait status inside this
2396 loop is responding as soon as we detect the status, not after any
2397 pending events. */
2398
2399 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2400 all signals while here. */
2401 sigfillset (&block_mask);
2402 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2403
2404 /* Always pull all events out of the kernel. We'll randomly select
2405 an event LWP out of all that have events, to prevent
2406 starvation. */
2407 while (event_child == NULL)
2408 {
2409 pid_t ret = 0;
2410
2411 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2412 quirks:
2413
2414 - If the thread group leader exits while other threads in the
2415 thread group still exist, waitpid(TGID, ...) hangs. That
2416 waitpid won't return an exit status until the other threads
2417 in the group are reaped.
2418
2419 - When a non-leader thread execs, that thread just vanishes
2420 without reporting an exit (so we'd hang if we waited for it
2421 explicitly in that case). The exec event is reported to
2422 the TGID pid (although we don't currently enable exec
2423 events). */
2424 errno = 0;
2425 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2426
2427 if (debug_threads)
2428 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2429 ret, errno ? strerror (errno) : "ERRNO-OK");
2430
2431 if (ret > 0)
2432 {
2433 if (debug_threads)
2434 {
2435 debug_printf ("LLW: waitpid %ld received %s\n",
2436 (long) ret, status_to_str (*wstatp));
2437 }
2438
2439 /* Filter all events. IOW, leave all events pending. We'll
2440 randomly select an event LWP out of all that have events
2441 below. */
2442 linux_low_filter_event (ret, *wstatp);
2443 /* Retry until nothing comes out of waitpid. A single
2444 SIGCHLD can indicate more than one child stopped. */
2445 continue;
2446 }
2447
2448 /* Now that we've pulled all events out of the kernel, resume
2449 LWPs that don't have an interesting event to report. */
2450 if (stopping_threads == NOT_STOPPING_THREADS)
2451 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2452
2453 /* ... and find an LWP with a status to report to the core, if
2454 any. */
2455 event_thread = (struct thread_info *)
2456 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2457 if (event_thread != NULL)
2458 {
2459 event_child = get_thread_lwp (event_thread);
2460 *wstatp = event_child->status_pending;
2461 event_child->status_pending_p = 0;
2462 event_child->status_pending = 0;
2463 break;
2464 }
2465
2466 /* Check for zombie thread group leaders. Those can't be reaped
2467 until all other threads in the thread group are. */
2468 check_zombie_leaders ();
2469
2470 /* If there are no resumed children left in the set of LWPs we
2471 want to wait for, bail. We can't just block in
2472 waitpid/sigsuspend, because lwps might have been left stopped
2473 in trace-stop state, and we'd be stuck forever waiting for
2474 their status to change (which would only happen if we resumed
2475 them). Even if WNOHANG is set, this return code is preferred
2476 over 0 (below), as it is more detailed. */
2477 if ((find_inferior (&all_threads,
2478 not_stopped_callback,
2479 &wait_ptid) == NULL))
2480 {
2481 if (debug_threads)
2482 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2483 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2484 return -1;
2485 }
2486
2487 /* No interesting event to report to the caller. */
2488 if ((options & WNOHANG))
2489 {
2490 if (debug_threads)
2491 debug_printf ("WNOHANG set, no event found\n");
2492
2493 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2494 return 0;
2495 }
2496
2497 /* Block until we get an event reported with SIGCHLD. */
2498 if (debug_threads)
2499 debug_printf ("sigsuspend'ing\n");
2500
2501 sigsuspend (&prev_mask);
2502 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2503 goto retry;
2504 }
2505
2506 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2507
2508 current_thread = event_thread;
2509
2510 /* Check for thread exit. */
2511 if (! WIFSTOPPED (*wstatp))
2512 {
2513 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2514
2515 if (debug_threads)
2516 debug_printf ("LWP %d is the last lwp of process. "
2517 "Process %ld exiting.\n",
2518 pid_of (event_thread), lwpid_of (event_thread));
2519 return lwpid_of (event_thread);
2520 }
2521
2522 return lwpid_of (event_thread);
2523 }
2524
2525 /* Wait for an event from child(ren) PTID. PTIDs can be:
2526 minus_one_ptid, to specify any child; a pid PTID, specifying all
2527 lwps of a thread group; or a PTID representing a single lwp. Store
2528 the stop status through the status pointer WSTAT. OPTIONS is
2529 passed to the waitpid call. Return 0 if no event was found and
2530 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2531 was found. Return the PID of the stopped child otherwise. */
2532
2533 static int
2534 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2535 {
2536 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2537 }
2538
2539 /* Count the LWP's that have had events. */
2540
2541 static int
2542 count_events_callback (struct inferior_list_entry *entry, void *data)
2543 {
2544 struct thread_info *thread = (struct thread_info *) entry;
2545 struct lwp_info *lp = get_thread_lwp (thread);
2546 int *count = data;
2547
2548 gdb_assert (count != NULL);
2549
2550 /* Count only resumed LWPs that have an event pending. */
2551 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2552 && lp->status_pending_p)
2553 (*count)++;
2554
2555 return 0;
2556 }
2557
2558 /* Select the LWP (if any) that is currently being single-stepped. */
2559
2560 static int
2561 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2562 {
2563 struct thread_info *thread = (struct thread_info *) entry;
2564 struct lwp_info *lp = get_thread_lwp (thread);
2565
2566 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2567 && thread->last_resume_kind == resume_step
2568 && lp->status_pending_p)
2569 return 1;
2570 else
2571 return 0;
2572 }
2573
2574 /* Select the Nth LWP that has had an event. */
2575
2576 static int
2577 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2578 {
2579 struct thread_info *thread = (struct thread_info *) entry;
2580 struct lwp_info *lp = get_thread_lwp (thread);
2581 int *selector = data;
2582
2583 gdb_assert (selector != NULL);
2584
2585 /* Select only resumed LWPs that have an event pending. */
2586 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2587 && lp->status_pending_p)
2588 if ((*selector)-- == 0)
2589 return 1;
2590
2591 return 0;
2592 }
2593
2594 /* Select one LWP out of those that have events pending. */
2595
2596 static void
2597 select_event_lwp (struct lwp_info **orig_lp)
2598 {
2599 int num_events = 0;
2600 int random_selector;
2601 struct thread_info *event_thread = NULL;
2602
2603 /* In all-stop, give preference to the LWP that is being
2604 single-stepped. There will be at most one, and it's the LWP that
2605 the core is most interested in. If we didn't do this, then we'd
2606 have to handle pending step SIGTRAPs somehow in case the core
2607 later continues the previously-stepped thread, otherwise we'd
2608 report the pending SIGTRAP, and the core, not having stepped the
2609 thread, wouldn't understand what the trap was for, and therefore
2610 would report it to the user as a random signal. */
2611 if (!non_stop)
2612 {
2613 event_thread
2614 = (struct thread_info *) find_inferior (&all_threads,
2615 select_singlestep_lwp_callback,
2616 NULL);
2617 if (event_thread != NULL)
2618 {
2619 if (debug_threads)
2620 debug_printf ("SEL: Select single-step %s\n",
2621 target_pid_to_str (ptid_of (event_thread)));
2622 }
2623 }
2624 if (event_thread == NULL)
2625 {
2626 /* No single-stepping LWP. Select one at random, out of those
2627 which have had events. */
2628
2629 /* First see how many events we have. */
2630 find_inferior (&all_threads, count_events_callback, &num_events);
2631 gdb_assert (num_events > 0);
2632
2633 /* Now randomly pick a LWP out of those that have had
2634 events. */
2635 random_selector = (int)
2636 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2637
2638 if (debug_threads && num_events > 1)
2639 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2640 num_events, random_selector);
2641
2642 event_thread
2643 = (struct thread_info *) find_inferior (&all_threads,
2644 select_event_lwp_callback,
2645 &random_selector);
2646 }
2647
2648 if (event_thread != NULL)
2649 {
2650 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2651
2652 /* Switch the event LWP. */
2653 *orig_lp = event_lp;
2654 }
2655 }
2656
2657 /* Decrement the suspend count of an LWP. */
2658
2659 static int
2660 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2661 {
2662 struct thread_info *thread = (struct thread_info *) entry;
2663 struct lwp_info *lwp = get_thread_lwp (thread);
2664
2665 /* Ignore EXCEPT. */
2666 if (lwp == except)
2667 return 0;
2668
2669 lwp_suspended_decr (lwp);
2670 return 0;
2671 }
2672
2673 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2674 NULL. */
2675
2676 static void
2677 unsuspend_all_lwps (struct lwp_info *except)
2678 {
2679 find_inferior (&all_threads, unsuspend_one_lwp, except);
2680 }
2681
2682 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2683 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2684 void *data);
2685 static int lwp_running (struct inferior_list_entry *entry, void *data);
2686 static ptid_t linux_wait_1 (ptid_t ptid,
2687 struct target_waitstatus *ourstatus,
2688 int target_options);
2689
2690 /* Stabilize threads (move out of jump pads).
2691
2692 If a thread is midway collecting a fast tracepoint, we need to
2693 finish the collection and move it out of the jump pad before
2694 reporting the signal.
2695
2696 This avoids recursion while collecting (when a signal arrives
2697 midway, and the signal handler itself collects), which would trash
2698 the trace buffer. In case the user set a breakpoint in a signal
2699 handler, this avoids the backtrace showing the jump pad, etc..
2700 Most importantly, there are certain things we can't do safely if
2701 threads are stopped in a jump pad (or in its callee's). For
2702 example:
2703
2704 - starting a new trace run. A thread still collecting the
2705 previous run, could trash the trace buffer when resumed. The trace
2706 buffer control structures would have been reset but the thread had
2707 no way to tell. The thread could even midway memcpy'ing to the
2708 buffer, which would mean that when resumed, it would clobber the
2709 trace buffer that had been set for a new run.
2710
2711 - we can't rewrite/reuse the jump pads for new tracepoints
2712 safely. Say you do tstart while a thread is stopped midway while
2713 collecting. When the thread is later resumed, it finishes the
2714 collection, and returns to the jump pad, to execute the original
2715 instruction that was under the tracepoint jump at the time the
2716 older run had been started. If the jump pad had been rewritten
2717 since for something else in the new run, the thread would now
2718 execute the wrong / random instructions. */
2719
2720 static void
2721 linux_stabilize_threads (void)
2722 {
2723 struct thread_info *saved_thread;
2724 struct thread_info *thread_stuck;
2725
2726 thread_stuck
2727 = (struct thread_info *) find_inferior (&all_threads,
2728 stuck_in_jump_pad_callback,
2729 NULL);
2730 if (thread_stuck != NULL)
2731 {
2732 if (debug_threads)
2733 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2734 lwpid_of (thread_stuck));
2735 return;
2736 }
2737
2738 saved_thread = current_thread;
2739
2740 stabilizing_threads = 1;
2741
2742 /* Kick 'em all. */
2743 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2744
2745 /* Loop until all are stopped out of the jump pads. */
2746 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2747 {
2748 struct target_waitstatus ourstatus;
2749 struct lwp_info *lwp;
2750 int wstat;
2751
2752 /* Note that we go through the full wait even loop. While
2753 moving threads out of jump pad, we need to be able to step
2754 over internal breakpoints and such. */
2755 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2756
2757 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2758 {
2759 lwp = get_thread_lwp (current_thread);
2760
2761 /* Lock it. */
2762 lwp_suspended_inc (lwp);
2763
2764 if (ourstatus.value.sig != GDB_SIGNAL_0
2765 || current_thread->last_resume_kind == resume_stop)
2766 {
2767 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2768 enqueue_one_deferred_signal (lwp, &wstat);
2769 }
2770 }
2771 }
2772
2773 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2774
2775 stabilizing_threads = 0;
2776
2777 current_thread = saved_thread;
2778
2779 if (debug_threads)
2780 {
2781 thread_stuck
2782 = (struct thread_info *) find_inferior (&all_threads,
2783 stuck_in_jump_pad_callback,
2784 NULL);
2785 if (thread_stuck != NULL)
2786 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2787 lwpid_of (thread_stuck));
2788 }
2789 }
2790
2791 static void async_file_mark (void);
2792
2793 /* Convenience function that is called when the kernel reports an
2794 event that is not passed out to GDB. */
2795
2796 static ptid_t
2797 ignore_event (struct target_waitstatus *ourstatus)
2798 {
2799 /* If we got an event, there may still be others, as a single
2800 SIGCHLD can indicate more than one child stopped. This forces
2801 another target_wait call. */
2802 async_file_mark ();
2803
2804 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2805 return null_ptid;
2806 }
2807
2808 /* Wait for process, returns status. */
2809
2810 static ptid_t
2811 linux_wait_1 (ptid_t ptid,
2812 struct target_waitstatus *ourstatus, int target_options)
2813 {
2814 int w;
2815 struct lwp_info *event_child;
2816 int options;
2817 int pid;
2818 int step_over_finished;
2819 int bp_explains_trap;
2820 int maybe_internal_trap;
2821 int report_to_gdb;
2822 int trace_event;
2823 int in_step_range;
2824
2825 if (debug_threads)
2826 {
2827 debug_enter ();
2828 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2829 }
2830
2831 /* Translate generic target options into linux options. */
2832 options = __WALL;
2833 if (target_options & TARGET_WNOHANG)
2834 options |= WNOHANG;
2835
2836 bp_explains_trap = 0;
2837 trace_event = 0;
2838 in_step_range = 0;
2839 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2840
2841 if (ptid_equal (step_over_bkpt, null_ptid))
2842 pid = linux_wait_for_event (ptid, &w, options);
2843 else
2844 {
2845 if (debug_threads)
2846 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2847 target_pid_to_str (step_over_bkpt));
2848 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2849 }
2850
2851 if (pid == 0)
2852 {
2853 gdb_assert (target_options & TARGET_WNOHANG);
2854
2855 if (debug_threads)
2856 {
2857 debug_printf ("linux_wait_1 ret = null_ptid, "
2858 "TARGET_WAITKIND_IGNORE\n");
2859 debug_exit ();
2860 }
2861
2862 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2863 return null_ptid;
2864 }
2865 else if (pid == -1)
2866 {
2867 if (debug_threads)
2868 {
2869 debug_printf ("linux_wait_1 ret = null_ptid, "
2870 "TARGET_WAITKIND_NO_RESUMED\n");
2871 debug_exit ();
2872 }
2873
2874 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2875 return null_ptid;
2876 }
2877
2878 event_child = get_thread_lwp (current_thread);
2879
2880 /* linux_wait_for_event only returns an exit status for the last
2881 child of a process. Report it. */
2882 if (WIFEXITED (w) || WIFSIGNALED (w))
2883 {
2884 if (WIFEXITED (w))
2885 {
2886 ourstatus->kind = TARGET_WAITKIND_EXITED;
2887 ourstatus->value.integer = WEXITSTATUS (w);
2888
2889 if (debug_threads)
2890 {
2891 debug_printf ("linux_wait_1 ret = %s, exited with "
2892 "retcode %d\n",
2893 target_pid_to_str (ptid_of (current_thread)),
2894 WEXITSTATUS (w));
2895 debug_exit ();
2896 }
2897 }
2898 else
2899 {
2900 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2901 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2902
2903 if (debug_threads)
2904 {
2905 debug_printf ("linux_wait_1 ret = %s, terminated with "
2906 "signal %d\n",
2907 target_pid_to_str (ptid_of (current_thread)),
2908 WTERMSIG (w));
2909 debug_exit ();
2910 }
2911 }
2912
2913 return ptid_of (current_thread);
2914 }
2915
2916 /* If step-over executes a breakpoint instruction, it means a
2917 gdb/gdbserver breakpoint had been planted on top of a permanent
2918 breakpoint. The PC has been adjusted by
2919 check_stopped_by_breakpoint to point at the breakpoint address.
2920 Advance the PC manually past the breakpoint, otherwise the
2921 program would keep trapping the permanent breakpoint forever. */
2922 if (!ptid_equal (step_over_bkpt, null_ptid)
2923 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2924 {
2925 unsigned int increment_pc = the_low_target.breakpoint_len;
2926
2927 if (debug_threads)
2928 {
2929 debug_printf ("step-over for %s executed software breakpoint\n",
2930 target_pid_to_str (ptid_of (current_thread)));
2931 }
2932
2933 if (increment_pc != 0)
2934 {
2935 struct regcache *regcache
2936 = get_thread_regcache (current_thread, 1);
2937
2938 event_child->stop_pc += increment_pc;
2939 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2940
2941 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2942 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2943 }
2944 }
2945
2946 /* If this event was not handled before, and is not a SIGTRAP, we
2947 report it. SIGILL and SIGSEGV are also treated as traps in case
2948 a breakpoint is inserted at the current PC. If this target does
2949 not support internal breakpoints at all, we also report the
2950 SIGTRAP without further processing; it's of no concern to us. */
2951 maybe_internal_trap
2952 = (supports_breakpoints ()
2953 && (WSTOPSIG (w) == SIGTRAP
2954 || ((WSTOPSIG (w) == SIGILL
2955 || WSTOPSIG (w) == SIGSEGV)
2956 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2957
2958 if (maybe_internal_trap)
2959 {
2960 /* Handle anything that requires bookkeeping before deciding to
2961 report the event or continue waiting. */
2962
2963 /* First check if we can explain the SIGTRAP with an internal
2964 breakpoint, or if we should possibly report the event to GDB.
2965 Do this before anything that may remove or insert a
2966 breakpoint. */
2967 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2968
2969 /* We have a SIGTRAP, possibly a step-over dance has just
2970 finished. If so, tweak the state machine accordingly,
2971 reinsert breakpoints and delete any reinsert (software
2972 single-step) breakpoints. */
2973 step_over_finished = finish_step_over (event_child);
2974
2975 /* Now invoke the callbacks of any internal breakpoints there. */
2976 check_breakpoints (event_child->stop_pc);
2977
2978 /* Handle tracepoint data collecting. This may overflow the
2979 trace buffer, and cause a tracing stop, removing
2980 breakpoints. */
2981 trace_event = handle_tracepoints (event_child);
2982
2983 if (bp_explains_trap)
2984 {
2985 /* If we stepped or ran into an internal breakpoint, we've
2986 already handled it. So next time we resume (from this
2987 PC), we should step over it. */
2988 if (debug_threads)
2989 debug_printf ("Hit a gdbserver breakpoint.\n");
2990
2991 if (breakpoint_here (event_child->stop_pc))
2992 event_child->need_step_over = 1;
2993 }
2994 }
2995 else
2996 {
2997 /* We have some other signal, possibly a step-over dance was in
2998 progress, and it should be cancelled too. */
2999 step_over_finished = finish_step_over (event_child);
3000 }
3001
3002 /* We have all the data we need. Either report the event to GDB, or
3003 resume threads and keep waiting for more. */
3004
3005 /* If we're collecting a fast tracepoint, finish the collection and
3006 move out of the jump pad before delivering a signal. See
3007 linux_stabilize_threads. */
3008
3009 if (WIFSTOPPED (w)
3010 && WSTOPSIG (w) != SIGTRAP
3011 && supports_fast_tracepoints ()
3012 && agent_loaded_p ())
3013 {
3014 if (debug_threads)
3015 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3016 "to defer or adjust it.\n",
3017 WSTOPSIG (w), lwpid_of (current_thread));
3018
3019 /* Allow debugging the jump pad itself. */
3020 if (current_thread->last_resume_kind != resume_step
3021 && maybe_move_out_of_jump_pad (event_child, &w))
3022 {
3023 enqueue_one_deferred_signal (event_child, &w);
3024
3025 if (debug_threads)
3026 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3027 WSTOPSIG (w), lwpid_of (current_thread));
3028
3029 linux_resume_one_lwp (event_child, 0, 0, NULL);
3030
3031 return ignore_event (ourstatus);
3032 }
3033 }
3034
3035 if (event_child->collecting_fast_tracepoint)
3036 {
3037 if (debug_threads)
3038 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3039 "Check if we're already there.\n",
3040 lwpid_of (current_thread),
3041 event_child->collecting_fast_tracepoint);
3042
3043 trace_event = 1;
3044
3045 event_child->collecting_fast_tracepoint
3046 = linux_fast_tracepoint_collecting (event_child, NULL);
3047
3048 if (event_child->collecting_fast_tracepoint != 1)
3049 {
3050 /* No longer need this breakpoint. */
3051 if (event_child->exit_jump_pad_bkpt != NULL)
3052 {
3053 if (debug_threads)
3054 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3055 "stopping all threads momentarily.\n");
3056
3057 /* Other running threads could hit this breakpoint.
3058 We don't handle moribund locations like GDB does,
3059 instead we always pause all threads when removing
3060 breakpoints, so that any step-over or
3061 decr_pc_after_break adjustment is always taken
3062 care of while the breakpoint is still
3063 inserted. */
3064 stop_all_lwps (1, event_child);
3065
3066 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3067 event_child->exit_jump_pad_bkpt = NULL;
3068
3069 unstop_all_lwps (1, event_child);
3070
3071 gdb_assert (event_child->suspended >= 0);
3072 }
3073 }
3074
3075 if (event_child->collecting_fast_tracepoint == 0)
3076 {
3077 if (debug_threads)
3078 debug_printf ("fast tracepoint finished "
3079 "collecting successfully.\n");
3080
3081 /* We may have a deferred signal to report. */
3082 if (dequeue_one_deferred_signal (event_child, &w))
3083 {
3084 if (debug_threads)
3085 debug_printf ("dequeued one signal.\n");
3086 }
3087 else
3088 {
3089 if (debug_threads)
3090 debug_printf ("no deferred signals.\n");
3091
3092 if (stabilizing_threads)
3093 {
3094 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3095 ourstatus->value.sig = GDB_SIGNAL_0;
3096
3097 if (debug_threads)
3098 {
3099 debug_printf ("linux_wait_1 ret = %s, stopped "
3100 "while stabilizing threads\n",
3101 target_pid_to_str (ptid_of (current_thread)));
3102 debug_exit ();
3103 }
3104
3105 return ptid_of (current_thread);
3106 }
3107 }
3108 }
3109 }
3110
3111 /* Check whether GDB would be interested in this event. */
3112
3113 /* If GDB is not interested in this signal, don't stop other
3114 threads, and don't report it to GDB. Just resume the inferior
3115 right away. We do this for threading-related signals as well as
3116 any that GDB specifically requested we ignore. But never ignore
3117 SIGSTOP if we sent it ourselves, and do not ignore signals when
3118 stepping - they may require special handling to skip the signal
3119 handler. Also never ignore signals that could be caused by a
3120 breakpoint. */
3121 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3122 thread library? */
3123 if (WIFSTOPPED (w)
3124 && current_thread->last_resume_kind != resume_step
3125 && (
3126 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3127 (current_process ()->priv->thread_db != NULL
3128 && (WSTOPSIG (w) == __SIGRTMIN
3129 || WSTOPSIG (w) == __SIGRTMIN + 1))
3130 ||
3131 #endif
3132 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3133 && !(WSTOPSIG (w) == SIGSTOP
3134 && current_thread->last_resume_kind == resume_stop)
3135 && !linux_wstatus_maybe_breakpoint (w))))
3136 {
3137 siginfo_t info, *info_p;
3138
3139 if (debug_threads)
3140 debug_printf ("Ignored signal %d for LWP %ld.\n",
3141 WSTOPSIG (w), lwpid_of (current_thread));
3142
3143 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3144 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3145 info_p = &info;
3146 else
3147 info_p = NULL;
3148
3149 if (step_over_finished)
3150 {
3151 /* We cancelled this thread's step-over above. We still
3152 need to unsuspend all other LWPs, and set them back
3153 running again while the signal handler runs. */
3154 unsuspend_all_lwps (event_child);
3155
3156 /* Enqueue the pending signal info so that proceed_all_lwps
3157 doesn't lose it. */
3158 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3159
3160 proceed_all_lwps ();
3161 }
3162 else
3163 {
3164 linux_resume_one_lwp (event_child, event_child->stepping,
3165 WSTOPSIG (w), info_p);
3166 }
3167 return ignore_event (ourstatus);
3168 }
3169
3170 /* Note that all addresses are always "out of the step range" when
3171 there's no range to begin with. */
3172 in_step_range = lwp_in_step_range (event_child);
3173
3174 /* If GDB wanted this thread to single step, and the thread is out
3175 of the step range, we always want to report the SIGTRAP, and let
3176 GDB handle it. Watchpoints should always be reported. So should
3177 signals we can't explain. A SIGTRAP we can't explain could be a
3178 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3179 do, we're be able to handle GDB breakpoints on top of internal
3180 breakpoints, by handling the internal breakpoint and still
3181 reporting the event to GDB. If we don't, we're out of luck, GDB
3182 won't see the breakpoint hit. If we see a single-step event but
3183 the thread should be continuing, don't pass the trap to gdb.
3184 That indicates that we had previously finished a single-step but
3185 left the single-step pending -- see
3186 complete_ongoing_step_over. */
3187 report_to_gdb = (!maybe_internal_trap
3188 || (current_thread->last_resume_kind == resume_step
3189 && !in_step_range)
3190 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3191 || (!in_step_range
3192 && !bp_explains_trap
3193 && !trace_event
3194 && !step_over_finished
3195 && !(current_thread->last_resume_kind == resume_continue
3196 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3197 || (gdb_breakpoint_here (event_child->stop_pc)
3198 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3199 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3200 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3201
3202 run_breakpoint_commands (event_child->stop_pc);
3203
3204 /* We found no reason GDB would want us to stop. We either hit one
3205 of our own breakpoints, or finished an internal step GDB
3206 shouldn't know about. */
3207 if (!report_to_gdb)
3208 {
3209 if (debug_threads)
3210 {
3211 if (bp_explains_trap)
3212 debug_printf ("Hit a gdbserver breakpoint.\n");
3213 if (step_over_finished)
3214 debug_printf ("Step-over finished.\n");
3215 if (trace_event)
3216 debug_printf ("Tracepoint event.\n");
3217 if (lwp_in_step_range (event_child))
3218 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3219 paddress (event_child->stop_pc),
3220 paddress (event_child->step_range_start),
3221 paddress (event_child->step_range_end));
3222 }
3223
3224 /* We're not reporting this breakpoint to GDB, so apply the
3225 decr_pc_after_break adjustment to the inferior's regcache
3226 ourselves. */
3227
3228 if (the_low_target.set_pc != NULL)
3229 {
3230 struct regcache *regcache
3231 = get_thread_regcache (current_thread, 1);
3232 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3233 }
3234
3235 /* We may have finished stepping over a breakpoint. If so,
3236 we've stopped and suspended all LWPs momentarily except the
3237 stepping one. This is where we resume them all again. We're
3238 going to keep waiting, so use proceed, which handles stepping
3239 over the next breakpoint. */
3240 if (debug_threads)
3241 debug_printf ("proceeding all threads.\n");
3242
3243 if (step_over_finished)
3244 unsuspend_all_lwps (event_child);
3245
3246 proceed_all_lwps ();
3247 return ignore_event (ourstatus);
3248 }
3249
3250 if (debug_threads)
3251 {
3252 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3253 {
3254 char *str;
3255
3256 str = target_waitstatus_to_string (&event_child->waitstatus);
3257 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3258 lwpid_of (get_lwp_thread (event_child)), str);
3259 xfree (str);
3260 }
3261 if (current_thread->last_resume_kind == resume_step)
3262 {
3263 if (event_child->step_range_start == event_child->step_range_end)
3264 debug_printf ("GDB wanted to single-step, reporting event.\n");
3265 else if (!lwp_in_step_range (event_child))
3266 debug_printf ("Out of step range, reporting event.\n");
3267 }
3268 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3269 debug_printf ("Stopped by watchpoint.\n");
3270 else if (gdb_breakpoint_here (event_child->stop_pc))
3271 debug_printf ("Stopped by GDB breakpoint.\n");
3272 if (debug_threads)
3273 debug_printf ("Hit a non-gdbserver trap event.\n");
3274 }
3275
3276 /* Alright, we're going to report a stop. */
3277
3278 if (!stabilizing_threads)
3279 {
3280 /* In all-stop, stop all threads. */
3281 if (!non_stop)
3282 stop_all_lwps (0, NULL);
3283
3284 /* If we're not waiting for a specific LWP, choose an event LWP
3285 from among those that have had events. Giving equal priority
3286 to all LWPs that have had events helps prevent
3287 starvation. */
3288 if (ptid_equal (ptid, minus_one_ptid))
3289 {
3290 event_child->status_pending_p = 1;
3291 event_child->status_pending = w;
3292
3293 select_event_lwp (&event_child);
3294
3295 /* current_thread and event_child must stay in sync. */
3296 current_thread = get_lwp_thread (event_child);
3297
3298 event_child->status_pending_p = 0;
3299 w = event_child->status_pending;
3300 }
3301
3302 if (step_over_finished)
3303 {
3304 if (!non_stop)
3305 {
3306 /* If we were doing a step-over, all other threads but
3307 the stepping one had been paused in start_step_over,
3308 with their suspend counts incremented. We don't want
3309 to do a full unstop/unpause, because we're in
3310 all-stop mode (so we want threads stopped), but we
3311 still need to unsuspend the other threads, to
3312 decrement their `suspended' count back. */
3313 unsuspend_all_lwps (event_child);
3314 }
3315 else
3316 {
3317 /* If we just finished a step-over, then all threads had
3318 been momentarily paused. In all-stop, that's fine,
3319 we want threads stopped by now anyway. In non-stop,
3320 we need to re-resume threads that GDB wanted to be
3321 running. */
3322 unstop_all_lwps (1, event_child);
3323 }
3324 }
3325
3326 /* Stabilize threads (move out of jump pads). */
3327 if (!non_stop)
3328 stabilize_threads ();
3329 }
3330 else
3331 {
3332 /* If we just finished a step-over, then all threads had been
3333 momentarily paused. In all-stop, that's fine, we want
3334 threads stopped by now anyway. In non-stop, we need to
3335 re-resume threads that GDB wanted to be running. */
3336 if (step_over_finished)
3337 unstop_all_lwps (1, event_child);
3338 }
3339
3340 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3341 {
3342 /* If the reported event is an exit, fork, vfork or exec, let
3343 GDB know. */
3344 *ourstatus = event_child->waitstatus;
3345 /* Clear the event lwp's waitstatus since we handled it already. */
3346 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3347 }
3348 else
3349 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3350
3351 /* Now that we've selected our final event LWP, un-adjust its PC if
3352 it was a software breakpoint, and the client doesn't know we can
3353 adjust the breakpoint ourselves. */
3354 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3355 && !swbreak_feature)
3356 {
3357 int decr_pc = the_low_target.decr_pc_after_break;
3358
3359 if (decr_pc != 0)
3360 {
3361 struct regcache *regcache
3362 = get_thread_regcache (current_thread, 1);
3363 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3364 }
3365 }
3366
3367 if (current_thread->last_resume_kind == resume_stop
3368 && WSTOPSIG (w) == SIGSTOP)
3369 {
3370 /* A thread that has been requested to stop by GDB with vCont;t,
3371 and it stopped cleanly, so report as SIG0. The use of
3372 SIGSTOP is an implementation detail. */
3373 ourstatus->value.sig = GDB_SIGNAL_0;
3374 }
3375 else if (current_thread->last_resume_kind == resume_stop
3376 && WSTOPSIG (w) != SIGSTOP)
3377 {
3378 /* A thread that has been requested to stop by GDB with vCont;t,
3379 but, it stopped for other reasons. */
3380 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3381 }
3382 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3383 {
3384 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3385 }
3386
3387 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3388
3389 if (debug_threads)
3390 {
3391 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3392 target_pid_to_str (ptid_of (current_thread)),
3393 ourstatus->kind, ourstatus->value.sig);
3394 debug_exit ();
3395 }
3396
3397 return ptid_of (current_thread);
3398 }
3399
3400 /* Get rid of any pending event in the pipe. */
3401 static void
3402 async_file_flush (void)
3403 {
3404 int ret;
3405 char buf;
3406
3407 do
3408 ret = read (linux_event_pipe[0], &buf, 1);
3409 while (ret >= 0 || (ret == -1 && errno == EINTR));
3410 }
3411
3412 /* Put something in the pipe, so the event loop wakes up. */
3413 static void
3414 async_file_mark (void)
3415 {
3416 int ret;
3417
3418 async_file_flush ();
3419
3420 do
3421 ret = write (linux_event_pipe[1], "+", 1);
3422 while (ret == 0 || (ret == -1 && errno == EINTR));
3423
3424 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3425 be awakened anyway. */
3426 }
3427
3428 static ptid_t
3429 linux_wait (ptid_t ptid,
3430 struct target_waitstatus *ourstatus, int target_options)
3431 {
3432 ptid_t event_ptid;
3433
3434 /* Flush the async file first. */
3435 if (target_is_async_p ())
3436 async_file_flush ();
3437
3438 do
3439 {
3440 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3441 }
3442 while ((target_options & TARGET_WNOHANG) == 0
3443 && ptid_equal (event_ptid, null_ptid)
3444 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3445
3446 /* If at least one stop was reported, there may be more. A single
3447 SIGCHLD can signal more than one child stop. */
3448 if (target_is_async_p ()
3449 && (target_options & TARGET_WNOHANG) != 0
3450 && !ptid_equal (event_ptid, null_ptid))
3451 async_file_mark ();
3452
3453 return event_ptid;
3454 }
3455
3456 /* Send a signal to an LWP. */
3457
3458 static int
3459 kill_lwp (unsigned long lwpid, int signo)
3460 {
3461 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3462 fails, then we are not using nptl threads and we should be using kill. */
3463
3464 #ifdef __NR_tkill
3465 {
3466 static int tkill_failed;
3467
3468 if (!tkill_failed)
3469 {
3470 int ret;
3471
3472 errno = 0;
3473 ret = syscall (__NR_tkill, lwpid, signo);
3474 if (errno != ENOSYS)
3475 return ret;
3476 tkill_failed = 1;
3477 }
3478 }
3479 #endif
3480
3481 return kill (lwpid, signo);
3482 }
3483
3484 void
3485 linux_stop_lwp (struct lwp_info *lwp)
3486 {
3487 send_sigstop (lwp);
3488 }
3489
3490 static void
3491 send_sigstop (struct lwp_info *lwp)
3492 {
3493 int pid;
3494
3495 pid = lwpid_of (get_lwp_thread (lwp));
3496
3497 /* If we already have a pending stop signal for this process, don't
3498 send another. */
3499 if (lwp->stop_expected)
3500 {
3501 if (debug_threads)
3502 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3503
3504 return;
3505 }
3506
3507 if (debug_threads)
3508 debug_printf ("Sending sigstop to lwp %d\n", pid);
3509
3510 lwp->stop_expected = 1;
3511 kill_lwp (pid, SIGSTOP);
3512 }
3513
3514 static int
3515 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3516 {
3517 struct thread_info *thread = (struct thread_info *) entry;
3518 struct lwp_info *lwp = get_thread_lwp (thread);
3519
3520 /* Ignore EXCEPT. */
3521 if (lwp == except)
3522 return 0;
3523
3524 if (lwp->stopped)
3525 return 0;
3526
3527 send_sigstop (lwp);
3528 return 0;
3529 }
3530
3531 /* Increment the suspend count of an LWP, and stop it, if not stopped
3532 yet. */
3533 static int
3534 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3535 void *except)
3536 {
3537 struct thread_info *thread = (struct thread_info *) entry;
3538 struct lwp_info *lwp = get_thread_lwp (thread);
3539
3540 /* Ignore EXCEPT. */
3541 if (lwp == except)
3542 return 0;
3543
3544 lwp_suspended_inc (lwp);
3545
3546 return send_sigstop_callback (entry, except);
3547 }
3548
3549 static void
3550 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3551 {
3552 /* Store the exit status for later. */
3553 lwp->status_pending_p = 1;
3554 lwp->status_pending = wstat;
3555
3556 /* Store in waitstatus as well, as there's nothing else to process
3557 for this event. */
3558 if (WIFEXITED (wstat))
3559 {
3560 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3561 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3562 }
3563 else if (WIFSIGNALED (wstat))
3564 {
3565 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3566 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3567 }
3568
3569 /* Prevent trying to stop it. */
3570 lwp->stopped = 1;
3571
3572 /* No further stops are expected from a dead lwp. */
3573 lwp->stop_expected = 0;
3574 }
3575
3576 /* Return true if LWP has exited already, and has a pending exit event
3577 to report to GDB. */
3578
3579 static int
3580 lwp_is_marked_dead (struct lwp_info *lwp)
3581 {
3582 return (lwp->status_pending_p
3583 && (WIFEXITED (lwp->status_pending)
3584 || WIFSIGNALED (lwp->status_pending)));
3585 }
3586
3587 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3588
3589 static void
3590 wait_for_sigstop (void)
3591 {
3592 struct thread_info *saved_thread;
3593 ptid_t saved_tid;
3594 int wstat;
3595 int ret;
3596
3597 saved_thread = current_thread;
3598 if (saved_thread != NULL)
3599 saved_tid = saved_thread->entry.id;
3600 else
3601 saved_tid = null_ptid; /* avoid bogus unused warning */
3602
3603 if (debug_threads)
3604 debug_printf ("wait_for_sigstop: pulling events\n");
3605
3606 /* Passing NULL_PTID as filter indicates we want all events to be
3607 left pending. Eventually this returns when there are no
3608 unwaited-for children left. */
3609 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3610 &wstat, __WALL);
3611 gdb_assert (ret == -1);
3612
3613 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3614 current_thread = saved_thread;
3615 else
3616 {
3617 if (debug_threads)
3618 debug_printf ("Previously current thread died.\n");
3619
3620 /* We can't change the current inferior behind GDB's back,
3621 otherwise, a subsequent command may apply to the wrong
3622 process. */
3623 current_thread = NULL;
3624 }
3625 }
3626
3627 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3628 move it out, because we need to report the stop event to GDB. For
3629 example, if the user puts a breakpoint in the jump pad, it's
3630 because she wants to debug it. */
3631
3632 static int
3633 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3634 {
3635 struct thread_info *thread = (struct thread_info *) entry;
3636 struct lwp_info *lwp = get_thread_lwp (thread);
3637
3638 if (lwp->suspended != 0)
3639 {
3640 internal_error (__FILE__, __LINE__,
3641 "LWP %ld is suspended, suspended=%d\n",
3642 lwpid_of (thread), lwp->suspended);
3643 }
3644 gdb_assert (lwp->stopped);
3645
3646 /* Allow debugging the jump pad, gdb_collect, etc.. */
3647 return (supports_fast_tracepoints ()
3648 && agent_loaded_p ()
3649 && (gdb_breakpoint_here (lwp->stop_pc)
3650 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3651 || thread->last_resume_kind == resume_step)
3652 && linux_fast_tracepoint_collecting (lwp, NULL));
3653 }
3654
3655 static void
3656 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3657 {
3658 struct thread_info *thread = (struct thread_info *) entry;
3659 struct thread_info *saved_thread;
3660 struct lwp_info *lwp = get_thread_lwp (thread);
3661 int *wstat;
3662
3663 if (lwp->suspended != 0)
3664 {
3665 internal_error (__FILE__, __LINE__,
3666 "LWP %ld is suspended, suspended=%d\n",
3667 lwpid_of (thread), lwp->suspended);
3668 }
3669 gdb_assert (lwp->stopped);
3670
3671 /* For gdb_breakpoint_here. */
3672 saved_thread = current_thread;
3673 current_thread = thread;
3674
3675 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3676
3677 /* Allow debugging the jump pad, gdb_collect, etc. */
3678 if (!gdb_breakpoint_here (lwp->stop_pc)
3679 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3680 && thread->last_resume_kind != resume_step
3681 && maybe_move_out_of_jump_pad (lwp, wstat))
3682 {
3683 if (debug_threads)
3684 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3685 lwpid_of (thread));
3686
3687 if (wstat)
3688 {
3689 lwp->status_pending_p = 0;
3690 enqueue_one_deferred_signal (lwp, wstat);
3691
3692 if (debug_threads)
3693 debug_printf ("Signal %d for LWP %ld deferred "
3694 "(in jump pad)\n",
3695 WSTOPSIG (*wstat), lwpid_of (thread));
3696 }
3697
3698 linux_resume_one_lwp (lwp, 0, 0, NULL);
3699 }
3700 else
3701 lwp_suspended_inc (lwp);
3702
3703 current_thread = saved_thread;
3704 }
3705
3706 static int
3707 lwp_running (struct inferior_list_entry *entry, void *data)
3708 {
3709 struct thread_info *thread = (struct thread_info *) entry;
3710 struct lwp_info *lwp = get_thread_lwp (thread);
3711
3712 if (lwp_is_marked_dead (lwp))
3713 return 0;
3714 if (lwp->stopped)
3715 return 0;
3716 return 1;
3717 }
3718
3719 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3720 If SUSPEND, then also increase the suspend count of every LWP,
3721 except EXCEPT. */
3722
3723 static void
3724 stop_all_lwps (int suspend, struct lwp_info *except)
3725 {
3726 /* Should not be called recursively. */
3727 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3728
3729 if (debug_threads)
3730 {
3731 debug_enter ();
3732 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3733 suspend ? "stop-and-suspend" : "stop",
3734 except != NULL
3735 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3736 : "none");
3737 }
3738
3739 stopping_threads = (suspend
3740 ? STOPPING_AND_SUSPENDING_THREADS
3741 : STOPPING_THREADS);
3742
3743 if (suspend)
3744 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3745 else
3746 find_inferior (&all_threads, send_sigstop_callback, except);
3747 wait_for_sigstop ();
3748 stopping_threads = NOT_STOPPING_THREADS;
3749
3750 if (debug_threads)
3751 {
3752 debug_printf ("stop_all_lwps done, setting stopping_threads "
3753 "back to !stopping\n");
3754 debug_exit ();
3755 }
3756 }
3757
3758 /* Enqueue one signal in the chain of signals which need to be
3759 delivered to this process on next resume. */
3760
3761 static void
3762 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3763 {
3764 struct pending_signals *p_sig;
3765
3766 p_sig = xmalloc (sizeof (*p_sig));
3767 p_sig->prev = lwp->pending_signals;
3768 p_sig->signal = signal;
3769 if (info == NULL)
3770 memset (&p_sig->info, 0, sizeof (siginfo_t));
3771 else
3772 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3773 lwp->pending_signals = p_sig;
3774 }
3775
3776 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3777 SIGNAL is nonzero, give it that signal. */
3778
3779 static void
3780 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3781 int step, int signal, siginfo_t *info)
3782 {
3783 struct thread_info *thread = get_lwp_thread (lwp);
3784 struct thread_info *saved_thread;
3785 int fast_tp_collecting;
3786 struct process_info *proc = get_thread_process (thread);
3787
3788 /* Note that target description may not be initialised
3789 (proc->tdesc == NULL) at this point because the program hasn't
3790 stopped at the first instruction yet. It means GDBserver skips
3791 the extra traps from the wrapper program (see option --wrapper).
3792 Code in this function that requires register access should be
3793 guarded by proc->tdesc == NULL or something else. */
3794
3795 if (lwp->stopped == 0)
3796 return;
3797
3798 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3799
3800 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3801
3802 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3803 user used the "jump" command, or "set $pc = foo"). */
3804 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
3805 {
3806 /* Collecting 'while-stepping' actions doesn't make sense
3807 anymore. */
3808 release_while_stepping_state_list (thread);
3809 }
3810
3811 /* If we have pending signals or status, and a new signal, enqueue the
3812 signal. Also enqueue the signal if we are waiting to reinsert a
3813 breakpoint; it will be picked up again below. */
3814 if (signal != 0
3815 && (lwp->status_pending_p
3816 || lwp->pending_signals != NULL
3817 || lwp->bp_reinsert != 0
3818 || fast_tp_collecting))
3819 {
3820 struct pending_signals *p_sig;
3821 p_sig = xmalloc (sizeof (*p_sig));
3822 p_sig->prev = lwp->pending_signals;
3823 p_sig->signal = signal;
3824 if (info == NULL)
3825 memset (&p_sig->info, 0, sizeof (siginfo_t));
3826 else
3827 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3828 lwp->pending_signals = p_sig;
3829 }
3830
3831 if (lwp->status_pending_p)
3832 {
3833 if (debug_threads)
3834 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3835 " has pending status\n",
3836 lwpid_of (thread), step ? "step" : "continue", signal,
3837 lwp->stop_expected ? "expected" : "not expected");
3838 return;
3839 }
3840
3841 saved_thread = current_thread;
3842 current_thread = thread;
3843
3844 if (debug_threads)
3845 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3846 lwpid_of (thread), step ? "step" : "continue", signal,
3847 lwp->stop_expected ? "expected" : "not expected");
3848
3849 /* This bit needs some thinking about. If we get a signal that
3850 we must report while a single-step reinsert is still pending,
3851 we often end up resuming the thread. It might be better to
3852 (ew) allow a stack of pending events; then we could be sure that
3853 the reinsert happened right away and not lose any signals.
3854
3855 Making this stack would also shrink the window in which breakpoints are
3856 uninserted (see comment in linux_wait_for_lwp) but not enough for
3857 complete correctness, so it won't solve that problem. It may be
3858 worthwhile just to solve this one, however. */
3859 if (lwp->bp_reinsert != 0)
3860 {
3861 if (debug_threads)
3862 debug_printf (" pending reinsert at 0x%s\n",
3863 paddress (lwp->bp_reinsert));
3864
3865 if (can_hardware_single_step ())
3866 {
3867 if (fast_tp_collecting == 0)
3868 {
3869 if (step == 0)
3870 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3871 if (lwp->suspended)
3872 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3873 lwp->suspended);
3874 }
3875
3876 step = 1;
3877 }
3878
3879 /* Postpone any pending signal. It was enqueued above. */
3880 signal = 0;
3881 }
3882
3883 if (fast_tp_collecting == 1)
3884 {
3885 if (debug_threads)
3886 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3887 " (exit-jump-pad-bkpt)\n",
3888 lwpid_of (thread));
3889
3890 /* Postpone any pending signal. It was enqueued above. */
3891 signal = 0;
3892 }
3893 else if (fast_tp_collecting == 2)
3894 {
3895 if (debug_threads)
3896 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3897 " single-stepping\n",
3898 lwpid_of (thread));
3899
3900 if (can_hardware_single_step ())
3901 step = 1;
3902 else
3903 {
3904 internal_error (__FILE__, __LINE__,
3905 "moving out of jump pad single-stepping"
3906 " not implemented on this target");
3907 }
3908
3909 /* Postpone any pending signal. It was enqueued above. */
3910 signal = 0;
3911 }
3912
3913 /* If we have while-stepping actions in this thread set it stepping.
3914 If we have a signal to deliver, it may or may not be set to
3915 SIG_IGN, we don't know. Assume so, and allow collecting
3916 while-stepping into a signal handler. A possible smart thing to
3917 do would be to set an internal breakpoint at the signal return
3918 address, continue, and carry on catching this while-stepping
3919 action only when that breakpoint is hit. A future
3920 enhancement. */
3921 if (thread->while_stepping != NULL
3922 && can_hardware_single_step ())
3923 {
3924 if (debug_threads)
3925 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3926 lwpid_of (thread));
3927 step = 1;
3928 }
3929
3930 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
3931 {
3932 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3933
3934 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3935
3936 if (debug_threads)
3937 {
3938 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3939 (long) lwp->stop_pc);
3940 }
3941 }
3942
3943 /* If we have pending signals, consume one unless we are trying to
3944 reinsert a breakpoint or we're trying to finish a fast tracepoint
3945 collect. */
3946 if (lwp->pending_signals != NULL
3947 && lwp->bp_reinsert == 0
3948 && fast_tp_collecting == 0)
3949 {
3950 struct pending_signals **p_sig;
3951
3952 p_sig = &lwp->pending_signals;
3953 while ((*p_sig)->prev != NULL)
3954 p_sig = &(*p_sig)->prev;
3955
3956 signal = (*p_sig)->signal;
3957 if ((*p_sig)->info.si_signo != 0)
3958 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3959 &(*p_sig)->info);
3960
3961 free (*p_sig);
3962 *p_sig = NULL;
3963 }
3964
3965 if (the_low_target.prepare_to_resume != NULL)
3966 the_low_target.prepare_to_resume (lwp);
3967
3968 regcache_invalidate_thread (thread);
3969 errno = 0;
3970 lwp->stepping = step;
3971 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3972 (PTRACE_TYPE_ARG3) 0,
3973 /* Coerce to a uintptr_t first to avoid potential gcc warning
3974 of coercing an 8 byte integer to a 4 byte pointer. */
3975 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3976
3977 current_thread = saved_thread;
3978 if (errno)
3979 perror_with_name ("resuming thread");
3980
3981 /* Successfully resumed. Clear state that no longer makes sense,
3982 and mark the LWP as running. Must not do this before resuming
3983 otherwise if that fails other code will be confused. E.g., we'd
3984 later try to stop the LWP and hang forever waiting for a stop
3985 status. Note that we must not throw after this is cleared,
3986 otherwise handle_zombie_lwp_error would get confused. */
3987 lwp->stopped = 0;
3988 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3989 }
3990
3991 /* Called when we try to resume a stopped LWP and that errors out. If
3992 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3993 or about to become), discard the error, clear any pending status
3994 the LWP may have, and return true (we'll collect the exit status
3995 soon enough). Otherwise, return false. */
3996
3997 static int
3998 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3999 {
4000 struct thread_info *thread = get_lwp_thread (lp);
4001
4002 /* If we get an error after resuming the LWP successfully, we'd
4003 confuse !T state for the LWP being gone. */
4004 gdb_assert (lp->stopped);
4005
4006 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4007 because even if ptrace failed with ESRCH, the tracee may be "not
4008 yet fully dead", but already refusing ptrace requests. In that
4009 case the tracee has 'R (Running)' state for a little bit
4010 (observed in Linux 3.18). See also the note on ESRCH in the
4011 ptrace(2) man page. Instead, check whether the LWP has any state
4012 other than ptrace-stopped. */
4013
4014 /* Don't assume anything if /proc/PID/status can't be read. */
4015 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4016 {
4017 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4018 lp->status_pending_p = 0;
4019 return 1;
4020 }
4021 return 0;
4022 }
4023
4024 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4025 disappears while we try to resume it. */
4026
4027 static void
4028 linux_resume_one_lwp (struct lwp_info *lwp,
4029 int step, int signal, siginfo_t *info)
4030 {
4031 TRY
4032 {
4033 linux_resume_one_lwp_throw (lwp, step, signal, info);
4034 }
4035 CATCH (ex, RETURN_MASK_ERROR)
4036 {
4037 if (!check_ptrace_stopped_lwp_gone (lwp))
4038 throw_exception (ex);
4039 }
4040 END_CATCH
4041 }
4042
4043 struct thread_resume_array
4044 {
4045 struct thread_resume *resume;
4046 size_t n;
4047 };
4048
4049 /* This function is called once per thread via find_inferior.
4050 ARG is a pointer to a thread_resume_array struct.
4051 We look up the thread specified by ENTRY in ARG, and mark the thread
4052 with a pointer to the appropriate resume request.
4053
4054 This algorithm is O(threads * resume elements), but resume elements
4055 is small (and will remain small at least until GDB supports thread
4056 suspension). */
4057
4058 static int
4059 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4060 {
4061 struct thread_info *thread = (struct thread_info *) entry;
4062 struct lwp_info *lwp = get_thread_lwp (thread);
4063 int ndx;
4064 struct thread_resume_array *r;
4065
4066 r = arg;
4067
4068 for (ndx = 0; ndx < r->n; ndx++)
4069 {
4070 ptid_t ptid = r->resume[ndx].thread;
4071 if (ptid_equal (ptid, minus_one_ptid)
4072 || ptid_equal (ptid, entry->id)
4073 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4074 of PID'. */
4075 || (ptid_get_pid (ptid) == pid_of (thread)
4076 && (ptid_is_pid (ptid)
4077 || ptid_get_lwp (ptid) == -1)))
4078 {
4079 if (r->resume[ndx].kind == resume_stop
4080 && thread->last_resume_kind == resume_stop)
4081 {
4082 if (debug_threads)
4083 debug_printf ("already %s LWP %ld at GDB's request\n",
4084 (thread->last_status.kind
4085 == TARGET_WAITKIND_STOPPED)
4086 ? "stopped"
4087 : "stopping",
4088 lwpid_of (thread));
4089
4090 continue;
4091 }
4092
4093 lwp->resume = &r->resume[ndx];
4094 thread->last_resume_kind = lwp->resume->kind;
4095
4096 lwp->step_range_start = lwp->resume->step_range_start;
4097 lwp->step_range_end = lwp->resume->step_range_end;
4098
4099 /* If we had a deferred signal to report, dequeue one now.
4100 This can happen if LWP gets more than one signal while
4101 trying to get out of a jump pad. */
4102 if (lwp->stopped
4103 && !lwp->status_pending_p
4104 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4105 {
4106 lwp->status_pending_p = 1;
4107
4108 if (debug_threads)
4109 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4110 "leaving status pending.\n",
4111 WSTOPSIG (lwp->status_pending),
4112 lwpid_of (thread));
4113 }
4114
4115 return 0;
4116 }
4117 }
4118
4119 /* No resume action for this thread. */
4120 lwp->resume = NULL;
4121
4122 return 0;
4123 }
4124
4125 /* find_inferior callback for linux_resume.
4126 Set *FLAG_P if this lwp has an interesting status pending. */
4127
4128 static int
4129 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4130 {
4131 struct thread_info *thread = (struct thread_info *) entry;
4132 struct lwp_info *lwp = get_thread_lwp (thread);
4133
4134 /* LWPs which will not be resumed are not interesting, because
4135 we might not wait for them next time through linux_wait. */
4136 if (lwp->resume == NULL)
4137 return 0;
4138
4139 if (thread_still_has_status_pending_p (thread))
4140 * (int *) flag_p = 1;
4141
4142 return 0;
4143 }
4144
4145 /* Return 1 if this lwp that GDB wants running is stopped at an
4146 internal breakpoint that we need to step over. It assumes that any
4147 required STOP_PC adjustment has already been propagated to the
4148 inferior's regcache. */
4149
4150 static int
4151 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4152 {
4153 struct thread_info *thread = (struct thread_info *) entry;
4154 struct lwp_info *lwp = get_thread_lwp (thread);
4155 struct thread_info *saved_thread;
4156 CORE_ADDR pc;
4157 struct process_info *proc = get_thread_process (thread);
4158
4159 /* GDBserver is skipping the extra traps from the wrapper program,
4160 don't have to do step over. */
4161 if (proc->tdesc == NULL)
4162 return 0;
4163
4164 /* LWPs which will not be resumed are not interesting, because we
4165 might not wait for them next time through linux_wait. */
4166
4167 if (!lwp->stopped)
4168 {
4169 if (debug_threads)
4170 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4171 lwpid_of (thread));
4172 return 0;
4173 }
4174
4175 if (thread->last_resume_kind == resume_stop)
4176 {
4177 if (debug_threads)
4178 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4179 " stopped\n",
4180 lwpid_of (thread));
4181 return 0;
4182 }
4183
4184 gdb_assert (lwp->suspended >= 0);
4185
4186 if (lwp->suspended)
4187 {
4188 if (debug_threads)
4189 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4190 lwpid_of (thread));
4191 return 0;
4192 }
4193
4194 if (!lwp->need_step_over)
4195 {
4196 if (debug_threads)
4197 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4198 }
4199
4200 if (lwp->status_pending_p)
4201 {
4202 if (debug_threads)
4203 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4204 " status.\n",
4205 lwpid_of (thread));
4206 return 0;
4207 }
4208
4209 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4210 or we have. */
4211 pc = get_pc (lwp);
4212
4213 /* If the PC has changed since we stopped, then don't do anything,
4214 and let the breakpoint/tracepoint be hit. This happens if, for
4215 instance, GDB handled the decr_pc_after_break subtraction itself,
4216 GDB is OOL stepping this thread, or the user has issued a "jump"
4217 command, or poked thread's registers herself. */
4218 if (pc != lwp->stop_pc)
4219 {
4220 if (debug_threads)
4221 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4222 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4223 lwpid_of (thread),
4224 paddress (lwp->stop_pc), paddress (pc));
4225
4226 lwp->need_step_over = 0;
4227 return 0;
4228 }
4229
4230 saved_thread = current_thread;
4231 current_thread = thread;
4232
4233 /* We can only step over breakpoints we know about. */
4234 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4235 {
4236 /* Don't step over a breakpoint that GDB expects to hit
4237 though. If the condition is being evaluated on the target's side
4238 and it evaluate to false, step over this breakpoint as well. */
4239 if (gdb_breakpoint_here (pc)
4240 && gdb_condition_true_at_breakpoint (pc)
4241 && gdb_no_commands_at_breakpoint (pc))
4242 {
4243 if (debug_threads)
4244 debug_printf ("Need step over [LWP %ld]? yes, but found"
4245 " GDB breakpoint at 0x%s; skipping step over\n",
4246 lwpid_of (thread), paddress (pc));
4247
4248 current_thread = saved_thread;
4249 return 0;
4250 }
4251 else
4252 {
4253 if (debug_threads)
4254 debug_printf ("Need step over [LWP %ld]? yes, "
4255 "found breakpoint at 0x%s\n",
4256 lwpid_of (thread), paddress (pc));
4257
4258 /* We've found an lwp that needs stepping over --- return 1 so
4259 that find_inferior stops looking. */
4260 current_thread = saved_thread;
4261
4262 /* If the step over is cancelled, this is set again. */
4263 lwp->need_step_over = 0;
4264 return 1;
4265 }
4266 }
4267
4268 current_thread = saved_thread;
4269
4270 if (debug_threads)
4271 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4272 " at 0x%s\n",
4273 lwpid_of (thread), paddress (pc));
4274
4275 return 0;
4276 }
4277
4278 /* Start a step-over operation on LWP. When LWP stopped at a
4279 breakpoint, to make progress, we need to remove the breakpoint out
4280 of the way. If we let other threads run while we do that, they may
4281 pass by the breakpoint location and miss hitting it. To avoid
4282 that, a step-over momentarily stops all threads while LWP is
4283 single-stepped while the breakpoint is temporarily uninserted from
4284 the inferior. When the single-step finishes, we reinsert the
4285 breakpoint, and let all threads that are supposed to be running,
4286 run again.
4287
4288 On targets that don't support hardware single-step, we don't
4289 currently support full software single-stepping. Instead, we only
4290 support stepping over the thread event breakpoint, by asking the
4291 low target where to place a reinsert breakpoint. Since this
4292 routine assumes the breakpoint being stepped over is a thread event
4293 breakpoint, it usually assumes the return address of the current
4294 function is a good enough place to set the reinsert breakpoint. */
4295
4296 static int
4297 start_step_over (struct lwp_info *lwp)
4298 {
4299 struct thread_info *thread = get_lwp_thread (lwp);
4300 struct thread_info *saved_thread;
4301 CORE_ADDR pc;
4302 int step;
4303
4304 if (debug_threads)
4305 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4306 lwpid_of (thread));
4307
4308 stop_all_lwps (1, lwp);
4309
4310 if (lwp->suspended != 0)
4311 {
4312 internal_error (__FILE__, __LINE__,
4313 "LWP %ld suspended=%d\n", lwpid_of (thread),
4314 lwp->suspended);
4315 }
4316
4317 if (debug_threads)
4318 debug_printf ("Done stopping all threads for step-over.\n");
4319
4320 /* Note, we should always reach here with an already adjusted PC,
4321 either by GDB (if we're resuming due to GDB's request), or by our
4322 caller, if we just finished handling an internal breakpoint GDB
4323 shouldn't care about. */
4324 pc = get_pc (lwp);
4325
4326 saved_thread = current_thread;
4327 current_thread = thread;
4328
4329 lwp->bp_reinsert = pc;
4330 uninsert_breakpoints_at (pc);
4331 uninsert_fast_tracepoint_jumps_at (pc);
4332
4333 if (can_hardware_single_step ())
4334 {
4335 step = 1;
4336 }
4337 else
4338 {
4339 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4340 set_reinsert_breakpoint (raddr);
4341 step = 0;
4342 }
4343
4344 current_thread = saved_thread;
4345
4346 linux_resume_one_lwp (lwp, step, 0, NULL);
4347
4348 /* Require next event from this LWP. */
4349 step_over_bkpt = thread->entry.id;
4350 return 1;
4351 }
4352
4353 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4354 start_step_over, if still there, and delete any reinsert
4355 breakpoints we've set, on non hardware single-step targets. */
4356
4357 static int
4358 finish_step_over (struct lwp_info *lwp)
4359 {
4360 if (lwp->bp_reinsert != 0)
4361 {
4362 if (debug_threads)
4363 debug_printf ("Finished step over.\n");
4364
4365 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4366 may be no breakpoint to reinsert there by now. */
4367 reinsert_breakpoints_at (lwp->bp_reinsert);
4368 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4369
4370 lwp->bp_reinsert = 0;
4371
4372 /* Delete any software-single-step reinsert breakpoints. No
4373 longer needed. We don't have to worry about other threads
4374 hitting this trap, and later not being able to explain it,
4375 because we were stepping over a breakpoint, and we hold all
4376 threads but LWP stopped while doing that. */
4377 if (!can_hardware_single_step ())
4378 delete_reinsert_breakpoints ();
4379
4380 step_over_bkpt = null_ptid;
4381 return 1;
4382 }
4383 else
4384 return 0;
4385 }
4386
4387 /* If there's a step over in progress, wait until all threads stop
4388 (that is, until the stepping thread finishes its step), and
4389 unsuspend all lwps. The stepping thread ends with its status
4390 pending, which is processed later when we get back to processing
4391 events. */
4392
4393 static void
4394 complete_ongoing_step_over (void)
4395 {
4396 if (!ptid_equal (step_over_bkpt, null_ptid))
4397 {
4398 struct lwp_info *lwp;
4399 int wstat;
4400 int ret;
4401
4402 if (debug_threads)
4403 debug_printf ("detach: step over in progress, finish it first\n");
4404
4405 /* Passing NULL_PTID as filter indicates we want all events to
4406 be left pending. Eventually this returns when there are no
4407 unwaited-for children left. */
4408 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4409 &wstat, __WALL);
4410 gdb_assert (ret == -1);
4411
4412 lwp = find_lwp_pid (step_over_bkpt);
4413 if (lwp != NULL)
4414 finish_step_over (lwp);
4415 step_over_bkpt = null_ptid;
4416 unsuspend_all_lwps (lwp);
4417 }
4418 }
4419
4420 /* This function is called once per thread. We check the thread's resume
4421 request, which will tell us whether to resume, step, or leave the thread
4422 stopped; and what signal, if any, it should be sent.
4423
4424 For threads which we aren't explicitly told otherwise, we preserve
4425 the stepping flag; this is used for stepping over gdbserver-placed
4426 breakpoints.
4427
4428 If pending_flags was set in any thread, we queue any needed
4429 signals, since we won't actually resume. We already have a pending
4430 event to report, so we don't need to preserve any step requests;
4431 they should be re-issued if necessary. */
4432
4433 static int
4434 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4435 {
4436 struct thread_info *thread = (struct thread_info *) entry;
4437 struct lwp_info *lwp = get_thread_lwp (thread);
4438 int step;
4439 int leave_all_stopped = * (int *) arg;
4440 int leave_pending;
4441
4442 if (lwp->resume == NULL)
4443 return 0;
4444
4445 if (lwp->resume->kind == resume_stop)
4446 {
4447 if (debug_threads)
4448 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4449
4450 if (!lwp->stopped)
4451 {
4452 if (debug_threads)
4453 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4454
4455 /* Stop the thread, and wait for the event asynchronously,
4456 through the event loop. */
4457 send_sigstop (lwp);
4458 }
4459 else
4460 {
4461 if (debug_threads)
4462 debug_printf ("already stopped LWP %ld\n",
4463 lwpid_of (thread));
4464
4465 /* The LWP may have been stopped in an internal event that
4466 was not meant to be notified back to GDB (e.g., gdbserver
4467 breakpoint), so we should be reporting a stop event in
4468 this case too. */
4469
4470 /* If the thread already has a pending SIGSTOP, this is a
4471 no-op. Otherwise, something later will presumably resume
4472 the thread and this will cause it to cancel any pending
4473 operation, due to last_resume_kind == resume_stop. If
4474 the thread already has a pending status to report, we
4475 will still report it the next time we wait - see
4476 status_pending_p_callback. */
4477
4478 /* If we already have a pending signal to report, then
4479 there's no need to queue a SIGSTOP, as this means we're
4480 midway through moving the LWP out of the jumppad, and we
4481 will report the pending signal as soon as that is
4482 finished. */
4483 if (lwp->pending_signals_to_report == NULL)
4484 send_sigstop (lwp);
4485 }
4486
4487 /* For stop requests, we're done. */
4488 lwp->resume = NULL;
4489 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4490 return 0;
4491 }
4492
4493 /* If this thread which is about to be resumed has a pending status,
4494 then don't resume it - we can just report the pending status.
4495 Likewise if it is suspended, because e.g., another thread is
4496 stepping past a breakpoint. Make sure to queue any signals that
4497 would otherwise be sent. In all-stop mode, we do this decision
4498 based on if *any* thread has a pending status. If there's a
4499 thread that needs the step-over-breakpoint dance, then don't
4500 resume any other thread but that particular one. */
4501 leave_pending = (lwp->suspended
4502 || lwp->status_pending_p
4503 || leave_all_stopped);
4504
4505 if (!leave_pending)
4506 {
4507 if (debug_threads)
4508 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4509
4510 step = (lwp->resume->kind == resume_step);
4511 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4512 }
4513 else
4514 {
4515 if (debug_threads)
4516 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4517
4518 /* If we have a new signal, enqueue the signal. */
4519 if (lwp->resume->sig != 0)
4520 {
4521 struct pending_signals *p_sig;
4522 p_sig = xmalloc (sizeof (*p_sig));
4523 p_sig->prev = lwp->pending_signals;
4524 p_sig->signal = lwp->resume->sig;
4525 memset (&p_sig->info, 0, sizeof (siginfo_t));
4526
4527 /* If this is the same signal we were previously stopped by,
4528 make sure to queue its siginfo. We can ignore the return
4529 value of ptrace; if it fails, we'll skip
4530 PTRACE_SETSIGINFO. */
4531 if (WIFSTOPPED (lwp->last_status)
4532 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4533 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4534 &p_sig->info);
4535
4536 lwp->pending_signals = p_sig;
4537 }
4538 }
4539
4540 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4541 lwp->resume = NULL;
4542 return 0;
4543 }
4544
4545 static void
4546 linux_resume (struct thread_resume *resume_info, size_t n)
4547 {
4548 struct thread_resume_array array = { resume_info, n };
4549 struct thread_info *need_step_over = NULL;
4550 int any_pending;
4551 int leave_all_stopped;
4552
4553 if (debug_threads)
4554 {
4555 debug_enter ();
4556 debug_printf ("linux_resume:\n");
4557 }
4558
4559 find_inferior (&all_threads, linux_set_resume_request, &array);
4560
4561 /* If there is a thread which would otherwise be resumed, which has
4562 a pending status, then don't resume any threads - we can just
4563 report the pending status. Make sure to queue any signals that
4564 would otherwise be sent. In non-stop mode, we'll apply this
4565 logic to each thread individually. We consume all pending events
4566 before considering to start a step-over (in all-stop). */
4567 any_pending = 0;
4568 if (!non_stop)
4569 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4570
4571 /* If there is a thread which would otherwise be resumed, which is
4572 stopped at a breakpoint that needs stepping over, then don't
4573 resume any threads - have it step over the breakpoint with all
4574 other threads stopped, then resume all threads again. Make sure
4575 to queue any signals that would otherwise be delivered or
4576 queued. */
4577 if (!any_pending && supports_breakpoints ())
4578 need_step_over
4579 = (struct thread_info *) find_inferior (&all_threads,
4580 need_step_over_p, NULL);
4581
4582 leave_all_stopped = (need_step_over != NULL || any_pending);
4583
4584 if (debug_threads)
4585 {
4586 if (need_step_over != NULL)
4587 debug_printf ("Not resuming all, need step over\n");
4588 else if (any_pending)
4589 debug_printf ("Not resuming, all-stop and found "
4590 "an LWP with pending status\n");
4591 else
4592 debug_printf ("Resuming, no pending status or step over needed\n");
4593 }
4594
4595 /* Even if we're leaving threads stopped, queue all signals we'd
4596 otherwise deliver. */
4597 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4598
4599 if (need_step_over)
4600 start_step_over (get_thread_lwp (need_step_over));
4601
4602 if (debug_threads)
4603 {
4604 debug_printf ("linux_resume done\n");
4605 debug_exit ();
4606 }
4607 }
4608
4609 /* This function is called once per thread. We check the thread's
4610 last resume request, which will tell us whether to resume, step, or
4611 leave the thread stopped. Any signal the client requested to be
4612 delivered has already been enqueued at this point.
4613
4614 If any thread that GDB wants running is stopped at an internal
4615 breakpoint that needs stepping over, we start a step-over operation
4616 on that particular thread, and leave all others stopped. */
4617
4618 static int
4619 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4620 {
4621 struct thread_info *thread = (struct thread_info *) entry;
4622 struct lwp_info *lwp = get_thread_lwp (thread);
4623 int step;
4624
4625 if (lwp == except)
4626 return 0;
4627
4628 if (debug_threads)
4629 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4630
4631 if (!lwp->stopped)
4632 {
4633 if (debug_threads)
4634 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4635 return 0;
4636 }
4637
4638 if (thread->last_resume_kind == resume_stop
4639 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4640 {
4641 if (debug_threads)
4642 debug_printf (" client wants LWP to remain %ld stopped\n",
4643 lwpid_of (thread));
4644 return 0;
4645 }
4646
4647 if (lwp->status_pending_p)
4648 {
4649 if (debug_threads)
4650 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4651 lwpid_of (thread));
4652 return 0;
4653 }
4654
4655 gdb_assert (lwp->suspended >= 0);
4656
4657 if (lwp->suspended)
4658 {
4659 if (debug_threads)
4660 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4661 return 0;
4662 }
4663
4664 if (thread->last_resume_kind == resume_stop
4665 && lwp->pending_signals_to_report == NULL
4666 && lwp->collecting_fast_tracepoint == 0)
4667 {
4668 /* We haven't reported this LWP as stopped yet (otherwise, the
4669 last_status.kind check above would catch it, and we wouldn't
4670 reach here. This LWP may have been momentarily paused by a
4671 stop_all_lwps call while handling for example, another LWP's
4672 step-over. In that case, the pending expected SIGSTOP signal
4673 that was queued at vCont;t handling time will have already
4674 been consumed by wait_for_sigstop, and so we need to requeue
4675 another one here. Note that if the LWP already has a SIGSTOP
4676 pending, this is a no-op. */
4677
4678 if (debug_threads)
4679 debug_printf ("Client wants LWP %ld to stop. "
4680 "Making sure it has a SIGSTOP pending\n",
4681 lwpid_of (thread));
4682
4683 send_sigstop (lwp);
4684 }
4685
4686 if (thread->last_resume_kind == resume_step)
4687 {
4688 if (debug_threads)
4689 debug_printf (" stepping LWP %ld, client wants it stepping\n",
4690 lwpid_of (thread));
4691 step = 1;
4692 }
4693 else if (lwp->bp_reinsert != 0)
4694 {
4695 if (debug_threads)
4696 debug_printf (" stepping LWP %ld, reinsert set\n",
4697 lwpid_of (thread));
4698 step = 1;
4699 }
4700 else
4701 step = 0;
4702
4703 linux_resume_one_lwp (lwp, step, 0, NULL);
4704 return 0;
4705 }
4706
4707 static int
4708 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4709 {
4710 struct thread_info *thread = (struct thread_info *) entry;
4711 struct lwp_info *lwp = get_thread_lwp (thread);
4712
4713 if (lwp == except)
4714 return 0;
4715
4716 lwp_suspended_decr (lwp);
4717
4718 return proceed_one_lwp (entry, except);
4719 }
4720
4721 /* When we finish a step-over, set threads running again. If there's
4722 another thread that may need a step-over, now's the time to start
4723 it. Eventually, we'll move all threads past their breakpoints. */
4724
4725 static void
4726 proceed_all_lwps (void)
4727 {
4728 struct thread_info *need_step_over;
4729
4730 /* If there is a thread which would otherwise be resumed, which is
4731 stopped at a breakpoint that needs stepping over, then don't
4732 resume any threads - have it step over the breakpoint with all
4733 other threads stopped, then resume all threads again. */
4734
4735 if (supports_breakpoints ())
4736 {
4737 need_step_over
4738 = (struct thread_info *) find_inferior (&all_threads,
4739 need_step_over_p, NULL);
4740
4741 if (need_step_over != NULL)
4742 {
4743 if (debug_threads)
4744 debug_printf ("proceed_all_lwps: found "
4745 "thread %ld needing a step-over\n",
4746 lwpid_of (need_step_over));
4747
4748 start_step_over (get_thread_lwp (need_step_over));
4749 return;
4750 }
4751 }
4752
4753 if (debug_threads)
4754 debug_printf ("Proceeding, no step-over needed\n");
4755
4756 find_inferior (&all_threads, proceed_one_lwp, NULL);
4757 }
4758
4759 /* Stopped LWPs that the client wanted to be running, that don't have
4760 pending statuses, are set to run again, except for EXCEPT, if not
4761 NULL. This undoes a stop_all_lwps call. */
4762
4763 static void
4764 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4765 {
4766 if (debug_threads)
4767 {
4768 debug_enter ();
4769 if (except)
4770 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4771 lwpid_of (get_lwp_thread (except)));
4772 else
4773 debug_printf ("unstopping all lwps\n");
4774 }
4775
4776 if (unsuspend)
4777 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4778 else
4779 find_inferior (&all_threads, proceed_one_lwp, except);
4780
4781 if (debug_threads)
4782 {
4783 debug_printf ("unstop_all_lwps done\n");
4784 debug_exit ();
4785 }
4786 }
4787
4788
4789 #ifdef HAVE_LINUX_REGSETS
4790
4791 #define use_linux_regsets 1
4792
4793 /* Returns true if REGSET has been disabled. */
4794
4795 static int
4796 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4797 {
4798 return (info->disabled_regsets != NULL
4799 && info->disabled_regsets[regset - info->regsets]);
4800 }
4801
4802 /* Disable REGSET. */
4803
4804 static void
4805 disable_regset (struct regsets_info *info, struct regset_info *regset)
4806 {
4807 int dr_offset;
4808
4809 dr_offset = regset - info->regsets;
4810 if (info->disabled_regsets == NULL)
4811 info->disabled_regsets = xcalloc (1, info->num_regsets);
4812 info->disabled_regsets[dr_offset] = 1;
4813 }
4814
4815 static int
4816 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4817 struct regcache *regcache)
4818 {
4819 struct regset_info *regset;
4820 int saw_general_regs = 0;
4821 int pid;
4822 struct iovec iov;
4823
4824 pid = lwpid_of (current_thread);
4825 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4826 {
4827 void *buf, *data;
4828 int nt_type, res;
4829
4830 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4831 continue;
4832
4833 buf = xmalloc (regset->size);
4834
4835 nt_type = regset->nt_type;
4836 if (nt_type)
4837 {
4838 iov.iov_base = buf;
4839 iov.iov_len = regset->size;
4840 data = (void *) &iov;
4841 }
4842 else
4843 data = buf;
4844
4845 #ifndef __sparc__
4846 res = ptrace (regset->get_request, pid,
4847 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4848 #else
4849 res = ptrace (regset->get_request, pid, data, nt_type);
4850 #endif
4851 if (res < 0)
4852 {
4853 if (errno == EIO)
4854 {
4855 /* If we get EIO on a regset, do not try it again for
4856 this process mode. */
4857 disable_regset (regsets_info, regset);
4858 }
4859 else if (errno == ENODATA)
4860 {
4861 /* ENODATA may be returned if the regset is currently
4862 not "active". This can happen in normal operation,
4863 so suppress the warning in this case. */
4864 }
4865 else
4866 {
4867 char s[256];
4868 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4869 pid);
4870 perror (s);
4871 }
4872 }
4873 else
4874 {
4875 if (regset->type == GENERAL_REGS)
4876 saw_general_regs = 1;
4877 regset->store_function (regcache, buf);
4878 }
4879 free (buf);
4880 }
4881 if (saw_general_regs)
4882 return 0;
4883 else
4884 return 1;
4885 }
4886
4887 static int
4888 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4889 struct regcache *regcache)
4890 {
4891 struct regset_info *regset;
4892 int saw_general_regs = 0;
4893 int pid;
4894 struct iovec iov;
4895
4896 pid = lwpid_of (current_thread);
4897 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4898 {
4899 void *buf, *data;
4900 int nt_type, res;
4901
4902 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4903 || regset->fill_function == NULL)
4904 continue;
4905
4906 buf = xmalloc (regset->size);
4907
4908 /* First fill the buffer with the current register set contents,
4909 in case there are any items in the kernel's regset that are
4910 not in gdbserver's regcache. */
4911
4912 nt_type = regset->nt_type;
4913 if (nt_type)
4914 {
4915 iov.iov_base = buf;
4916 iov.iov_len = regset->size;
4917 data = (void *) &iov;
4918 }
4919 else
4920 data = buf;
4921
4922 #ifndef __sparc__
4923 res = ptrace (regset->get_request, pid,
4924 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4925 #else
4926 res = ptrace (regset->get_request, pid, data, nt_type);
4927 #endif
4928
4929 if (res == 0)
4930 {
4931 /* Then overlay our cached registers on that. */
4932 regset->fill_function (regcache, buf);
4933
4934 /* Only now do we write the register set. */
4935 #ifndef __sparc__
4936 res = ptrace (regset->set_request, pid,
4937 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4938 #else
4939 res = ptrace (regset->set_request, pid, data, nt_type);
4940 #endif
4941 }
4942
4943 if (res < 0)
4944 {
4945 if (errno == EIO)
4946 {
4947 /* If we get EIO on a regset, do not try it again for
4948 this process mode. */
4949 disable_regset (regsets_info, regset);
4950 }
4951 else if (errno == ESRCH)
4952 {
4953 /* At this point, ESRCH should mean the process is
4954 already gone, in which case we simply ignore attempts
4955 to change its registers. See also the related
4956 comment in linux_resume_one_lwp. */
4957 free (buf);
4958 return 0;
4959 }
4960 else
4961 {
4962 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4963 }
4964 }
4965 else if (regset->type == GENERAL_REGS)
4966 saw_general_regs = 1;
4967 free (buf);
4968 }
4969 if (saw_general_regs)
4970 return 0;
4971 else
4972 return 1;
4973 }
4974
4975 #else /* !HAVE_LINUX_REGSETS */
4976
4977 #define use_linux_regsets 0
4978 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4979 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4980
4981 #endif
4982
4983 /* Return 1 if register REGNO is supported by one of the regset ptrace
4984 calls or 0 if it has to be transferred individually. */
4985
4986 static int
4987 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4988 {
4989 unsigned char mask = 1 << (regno % 8);
4990 size_t index = regno / 8;
4991
4992 return (use_linux_regsets
4993 && (regs_info->regset_bitmap == NULL
4994 || (regs_info->regset_bitmap[index] & mask) != 0));
4995 }
4996
4997 #ifdef HAVE_LINUX_USRREGS
4998
4999 int
5000 register_addr (const struct usrregs_info *usrregs, int regnum)
5001 {
5002 int addr;
5003
5004 if (regnum < 0 || regnum >= usrregs->num_regs)
5005 error ("Invalid register number %d.", regnum);
5006
5007 addr = usrregs->regmap[regnum];
5008
5009 return addr;
5010 }
5011
5012 /* Fetch one register. */
5013 static void
5014 fetch_register (const struct usrregs_info *usrregs,
5015 struct regcache *regcache, int regno)
5016 {
5017 CORE_ADDR regaddr;
5018 int i, size;
5019 char *buf;
5020 int pid;
5021
5022 if (regno >= usrregs->num_regs)
5023 return;
5024 if ((*the_low_target.cannot_fetch_register) (regno))
5025 return;
5026
5027 regaddr = register_addr (usrregs, regno);
5028 if (regaddr == -1)
5029 return;
5030
5031 size = ((register_size (regcache->tdesc, regno)
5032 + sizeof (PTRACE_XFER_TYPE) - 1)
5033 & -sizeof (PTRACE_XFER_TYPE));
5034 buf = alloca (size);
5035
5036 pid = lwpid_of (current_thread);
5037 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5038 {
5039 errno = 0;
5040 *(PTRACE_XFER_TYPE *) (buf + i) =
5041 ptrace (PTRACE_PEEKUSER, pid,
5042 /* Coerce to a uintptr_t first to avoid potential gcc warning
5043 of coercing an 8 byte integer to a 4 byte pointer. */
5044 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5045 regaddr += sizeof (PTRACE_XFER_TYPE);
5046 if (errno != 0)
5047 error ("reading register %d: %s", regno, strerror (errno));
5048 }
5049
5050 if (the_low_target.supply_ptrace_register)
5051 the_low_target.supply_ptrace_register (regcache, regno, buf);
5052 else
5053 supply_register (regcache, regno, buf);
5054 }
5055
5056 /* Store one register. */
5057 static void
5058 store_register (const struct usrregs_info *usrregs,
5059 struct regcache *regcache, int regno)
5060 {
5061 CORE_ADDR regaddr;
5062 int i, size;
5063 char *buf;
5064 int pid;
5065
5066 if (regno >= usrregs->num_regs)
5067 return;
5068 if ((*the_low_target.cannot_store_register) (regno))
5069 return;
5070
5071 regaddr = register_addr (usrregs, regno);
5072 if (regaddr == -1)
5073 return;
5074
5075 size = ((register_size (regcache->tdesc, regno)
5076 + sizeof (PTRACE_XFER_TYPE) - 1)
5077 & -sizeof (PTRACE_XFER_TYPE));
5078 buf = alloca (size);
5079 memset (buf, 0, size);
5080
5081 if (the_low_target.collect_ptrace_register)
5082 the_low_target.collect_ptrace_register (regcache, regno, buf);
5083 else
5084 collect_register (regcache, regno, buf);
5085
5086 pid = lwpid_of (current_thread);
5087 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5088 {
5089 errno = 0;
5090 ptrace (PTRACE_POKEUSER, pid,
5091 /* Coerce to a uintptr_t first to avoid potential gcc warning
5092 about coercing an 8 byte integer to a 4 byte pointer. */
5093 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5094 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5095 if (errno != 0)
5096 {
5097 /* At this point, ESRCH should mean the process is
5098 already gone, in which case we simply ignore attempts
5099 to change its registers. See also the related
5100 comment in linux_resume_one_lwp. */
5101 if (errno == ESRCH)
5102 return;
5103
5104 if ((*the_low_target.cannot_store_register) (regno) == 0)
5105 error ("writing register %d: %s", regno, strerror (errno));
5106 }
5107 regaddr += sizeof (PTRACE_XFER_TYPE);
5108 }
5109 }
5110
5111 /* Fetch all registers, or just one, from the child process.
5112 If REGNO is -1, do this for all registers, skipping any that are
5113 assumed to have been retrieved by regsets_fetch_inferior_registers,
5114 unless ALL is non-zero.
5115 Otherwise, REGNO specifies which register (so we can save time). */
5116 static void
5117 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5118 struct regcache *regcache, int regno, int all)
5119 {
5120 struct usrregs_info *usr = regs_info->usrregs;
5121
5122 if (regno == -1)
5123 {
5124 for (regno = 0; regno < usr->num_regs; regno++)
5125 if (all || !linux_register_in_regsets (regs_info, regno))
5126 fetch_register (usr, regcache, regno);
5127 }
5128 else
5129 fetch_register (usr, regcache, regno);
5130 }
5131
5132 /* Store our register values back into the inferior.
5133 If REGNO is -1, do this for all registers, skipping any that are
5134 assumed to have been saved by regsets_store_inferior_registers,
5135 unless ALL is non-zero.
5136 Otherwise, REGNO specifies which register (so we can save time). */
5137 static void
5138 usr_store_inferior_registers (const struct regs_info *regs_info,
5139 struct regcache *regcache, int regno, int all)
5140 {
5141 struct usrregs_info *usr = regs_info->usrregs;
5142
5143 if (regno == -1)
5144 {
5145 for (regno = 0; regno < usr->num_regs; regno++)
5146 if (all || !linux_register_in_regsets (regs_info, regno))
5147 store_register (usr, regcache, regno);
5148 }
5149 else
5150 store_register (usr, regcache, regno);
5151 }
5152
5153 #else /* !HAVE_LINUX_USRREGS */
5154
5155 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5156 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5157
5158 #endif
5159
5160
5161 void
5162 linux_fetch_registers (struct regcache *regcache, int regno)
5163 {
5164 int use_regsets;
5165 int all = 0;
5166 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5167
5168 if (regno == -1)
5169 {
5170 if (the_low_target.fetch_register != NULL
5171 && regs_info->usrregs != NULL)
5172 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5173 (*the_low_target.fetch_register) (regcache, regno);
5174
5175 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5176 if (regs_info->usrregs != NULL)
5177 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5178 }
5179 else
5180 {
5181 if (the_low_target.fetch_register != NULL
5182 && (*the_low_target.fetch_register) (regcache, regno))
5183 return;
5184
5185 use_regsets = linux_register_in_regsets (regs_info, regno);
5186 if (use_regsets)
5187 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5188 regcache);
5189 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5190 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5191 }
5192 }
5193
5194 void
5195 linux_store_registers (struct regcache *regcache, int regno)
5196 {
5197 int use_regsets;
5198 int all = 0;
5199 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5200
5201 if (regno == -1)
5202 {
5203 all = regsets_store_inferior_registers (regs_info->regsets_info,
5204 regcache);
5205 if (regs_info->usrregs != NULL)
5206 usr_store_inferior_registers (regs_info, regcache, regno, all);
5207 }
5208 else
5209 {
5210 use_regsets = linux_register_in_regsets (regs_info, regno);
5211 if (use_regsets)
5212 all = regsets_store_inferior_registers (regs_info->regsets_info,
5213 regcache);
5214 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5215 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5216 }
5217 }
5218
5219
5220 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5221 to debugger memory starting at MYADDR. */
5222
5223 static int
5224 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5225 {
5226 int pid = lwpid_of (current_thread);
5227 register PTRACE_XFER_TYPE *buffer;
5228 register CORE_ADDR addr;
5229 register int count;
5230 char filename[64];
5231 register int i;
5232 int ret;
5233 int fd;
5234
5235 /* Try using /proc. Don't bother for one word. */
5236 if (len >= 3 * sizeof (long))
5237 {
5238 int bytes;
5239
5240 /* We could keep this file open and cache it - possibly one per
5241 thread. That requires some juggling, but is even faster. */
5242 sprintf (filename, "/proc/%d/mem", pid);
5243 fd = open (filename, O_RDONLY | O_LARGEFILE);
5244 if (fd == -1)
5245 goto no_proc;
5246
5247 /* If pread64 is available, use it. It's faster if the kernel
5248 supports it (only one syscall), and it's 64-bit safe even on
5249 32-bit platforms (for instance, SPARC debugging a SPARC64
5250 application). */
5251 #ifdef HAVE_PREAD64
5252 bytes = pread64 (fd, myaddr, len, memaddr);
5253 #else
5254 bytes = -1;
5255 if (lseek (fd, memaddr, SEEK_SET) != -1)
5256 bytes = read (fd, myaddr, len);
5257 #endif
5258
5259 close (fd);
5260 if (bytes == len)
5261 return 0;
5262
5263 /* Some data was read, we'll try to get the rest with ptrace. */
5264 if (bytes > 0)
5265 {
5266 memaddr += bytes;
5267 myaddr += bytes;
5268 len -= bytes;
5269 }
5270 }
5271
5272 no_proc:
5273 /* Round starting address down to longword boundary. */
5274 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5275 /* Round ending address up; get number of longwords that makes. */
5276 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5277 / sizeof (PTRACE_XFER_TYPE));
5278 /* Allocate buffer of that many longwords. */
5279 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
5280
5281 /* Read all the longwords */
5282 errno = 0;
5283 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5284 {
5285 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5286 about coercing an 8 byte integer to a 4 byte pointer. */
5287 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5288 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5289 (PTRACE_TYPE_ARG4) 0);
5290 if (errno)
5291 break;
5292 }
5293 ret = errno;
5294
5295 /* Copy appropriate bytes out of the buffer. */
5296 if (i > 0)
5297 {
5298 i *= sizeof (PTRACE_XFER_TYPE);
5299 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5300 memcpy (myaddr,
5301 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5302 i < len ? i : len);
5303 }
5304
5305 return ret;
5306 }
5307
5308 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5309 memory at MEMADDR. On failure (cannot write to the inferior)
5310 returns the value of errno. Always succeeds if LEN is zero. */
5311
5312 static int
5313 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5314 {
5315 register int i;
5316 /* Round starting address down to longword boundary. */
5317 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5318 /* Round ending address up; get number of longwords that makes. */
5319 register int count
5320 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5321 / sizeof (PTRACE_XFER_TYPE);
5322
5323 /* Allocate buffer of that many longwords. */
5324 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
5325 alloca (count * sizeof (PTRACE_XFER_TYPE));
5326
5327 int pid = lwpid_of (current_thread);
5328
5329 if (len == 0)
5330 {
5331 /* Zero length write always succeeds. */
5332 return 0;
5333 }
5334
5335 if (debug_threads)
5336 {
5337 /* Dump up to four bytes. */
5338 char str[4 * 2 + 1];
5339 char *p = str;
5340 int dump = len < 4 ? len : 4;
5341
5342 for (i = 0; i < dump; i++)
5343 {
5344 sprintf (p, "%02x", myaddr[i]);
5345 p += 2;
5346 }
5347 *p = '\0';
5348
5349 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5350 str, (long) memaddr, pid);
5351 }
5352
5353 /* Fill start and end extra bytes of buffer with existing memory data. */
5354
5355 errno = 0;
5356 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5357 about coercing an 8 byte integer to a 4 byte pointer. */
5358 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5359 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5360 (PTRACE_TYPE_ARG4) 0);
5361 if (errno)
5362 return errno;
5363
5364 if (count > 1)
5365 {
5366 errno = 0;
5367 buffer[count - 1]
5368 = ptrace (PTRACE_PEEKTEXT, pid,
5369 /* Coerce to a uintptr_t first to avoid potential gcc warning
5370 about coercing an 8 byte integer to a 4 byte pointer. */
5371 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5372 * sizeof (PTRACE_XFER_TYPE)),
5373 (PTRACE_TYPE_ARG4) 0);
5374 if (errno)
5375 return errno;
5376 }
5377
5378 /* Copy data to be written over corresponding part of buffer. */
5379
5380 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5381 myaddr, len);
5382
5383 /* Write the entire buffer. */
5384
5385 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5386 {
5387 errno = 0;
5388 ptrace (PTRACE_POKETEXT, pid,
5389 /* Coerce to a uintptr_t first to avoid potential gcc warning
5390 about coercing an 8 byte integer to a 4 byte pointer. */
5391 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5392 (PTRACE_TYPE_ARG4) buffer[i]);
5393 if (errno)
5394 return errno;
5395 }
5396
5397 return 0;
5398 }
5399
5400 static void
5401 linux_look_up_symbols (void)
5402 {
5403 #ifdef USE_THREAD_DB
5404 struct process_info *proc = current_process ();
5405
5406 if (proc->priv->thread_db != NULL)
5407 return;
5408
5409 /* If the kernel supports tracing clones, then we don't need to
5410 use the magic thread event breakpoint to learn about
5411 threads. */
5412 thread_db_init (!linux_supports_traceclone ());
5413 #endif
5414 }
5415
5416 static void
5417 linux_request_interrupt (void)
5418 {
5419 extern unsigned long signal_pid;
5420
5421 /* Send a SIGINT to the process group. This acts just like the user
5422 typed a ^C on the controlling terminal. */
5423 kill (-signal_pid, SIGINT);
5424 }
5425
5426 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5427 to debugger memory starting at MYADDR. */
5428
5429 static int
5430 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5431 {
5432 char filename[PATH_MAX];
5433 int fd, n;
5434 int pid = lwpid_of (current_thread);
5435
5436 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5437
5438 fd = open (filename, O_RDONLY);
5439 if (fd < 0)
5440 return -1;
5441
5442 if (offset != (CORE_ADDR) 0
5443 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5444 n = -1;
5445 else
5446 n = read (fd, myaddr, len);
5447
5448 close (fd);
5449
5450 return n;
5451 }
5452
5453 /* These breakpoint and watchpoint related wrapper functions simply
5454 pass on the function call if the target has registered a
5455 corresponding function. */
5456
5457 static int
5458 linux_supports_z_point_type (char z_type)
5459 {
5460 return (the_low_target.supports_z_point_type != NULL
5461 && the_low_target.supports_z_point_type (z_type));
5462 }
5463
5464 static int
5465 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5466 int size, struct raw_breakpoint *bp)
5467 {
5468 if (type == raw_bkpt_type_sw)
5469 return insert_memory_breakpoint (bp);
5470 else if (the_low_target.insert_point != NULL)
5471 return the_low_target.insert_point (type, addr, size, bp);
5472 else
5473 /* Unsupported (see target.h). */
5474 return 1;
5475 }
5476
5477 static int
5478 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5479 int size, struct raw_breakpoint *bp)
5480 {
5481 if (type == raw_bkpt_type_sw)
5482 return remove_memory_breakpoint (bp);
5483 else if (the_low_target.remove_point != NULL)
5484 return the_low_target.remove_point (type, addr, size, bp);
5485 else
5486 /* Unsupported (see target.h). */
5487 return 1;
5488 }
5489
5490 /* Implement the to_stopped_by_sw_breakpoint target_ops
5491 method. */
5492
5493 static int
5494 linux_stopped_by_sw_breakpoint (void)
5495 {
5496 struct lwp_info *lwp = get_thread_lwp (current_thread);
5497
5498 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5499 }
5500
5501 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5502 method. */
5503
5504 static int
5505 linux_supports_stopped_by_sw_breakpoint (void)
5506 {
5507 return USE_SIGTRAP_SIGINFO;
5508 }
5509
5510 /* Implement the to_stopped_by_hw_breakpoint target_ops
5511 method. */
5512
5513 static int
5514 linux_stopped_by_hw_breakpoint (void)
5515 {
5516 struct lwp_info *lwp = get_thread_lwp (current_thread);
5517
5518 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5519 }
5520
5521 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5522 method. */
5523
5524 static int
5525 linux_supports_stopped_by_hw_breakpoint (void)
5526 {
5527 return USE_SIGTRAP_SIGINFO;
5528 }
5529
5530 /* Implement the supports_conditional_breakpoints target_ops
5531 method. */
5532
5533 static int
5534 linux_supports_conditional_breakpoints (void)
5535 {
5536 /* GDBserver needs to step over the breakpoint if the condition is
5537 false. GDBserver software single step is too simple, so disable
5538 conditional breakpoints if the target doesn't have hardware single
5539 step. */
5540 return can_hardware_single_step ();
5541 }
5542
5543 static int
5544 linux_stopped_by_watchpoint (void)
5545 {
5546 struct lwp_info *lwp = get_thread_lwp (current_thread);
5547
5548 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5549 }
5550
5551 static CORE_ADDR
5552 linux_stopped_data_address (void)
5553 {
5554 struct lwp_info *lwp = get_thread_lwp (current_thread);
5555
5556 return lwp->stopped_data_address;
5557 }
5558
5559 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5560 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5561 && defined(PT_TEXT_END_ADDR)
5562
5563 /* This is only used for targets that define PT_TEXT_ADDR,
5564 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5565 the target has different ways of acquiring this information, like
5566 loadmaps. */
5567
5568 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5569 to tell gdb about. */
5570
5571 static int
5572 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5573 {
5574 unsigned long text, text_end, data;
5575 int pid = lwpid_of (current_thread);
5576
5577 errno = 0;
5578
5579 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5580 (PTRACE_TYPE_ARG4) 0);
5581 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5582 (PTRACE_TYPE_ARG4) 0);
5583 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5584 (PTRACE_TYPE_ARG4) 0);
5585
5586 if (errno == 0)
5587 {
5588 /* Both text and data offsets produced at compile-time (and so
5589 used by gdb) are relative to the beginning of the program,
5590 with the data segment immediately following the text segment.
5591 However, the actual runtime layout in memory may put the data
5592 somewhere else, so when we send gdb a data base-address, we
5593 use the real data base address and subtract the compile-time
5594 data base-address from it (which is just the length of the
5595 text segment). BSS immediately follows data in both
5596 cases. */
5597 *text_p = text;
5598 *data_p = data - (text_end - text);
5599
5600 return 1;
5601 }
5602 return 0;
5603 }
5604 #endif
5605
5606 static int
5607 linux_qxfer_osdata (const char *annex,
5608 unsigned char *readbuf, unsigned const char *writebuf,
5609 CORE_ADDR offset, int len)
5610 {
5611 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5612 }
5613
5614 /* Convert a native/host siginfo object, into/from the siginfo in the
5615 layout of the inferiors' architecture. */
5616
5617 static void
5618 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5619 {
5620 int done = 0;
5621
5622 if (the_low_target.siginfo_fixup != NULL)
5623 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5624
5625 /* If there was no callback, or the callback didn't do anything,
5626 then just do a straight memcpy. */
5627 if (!done)
5628 {
5629 if (direction == 1)
5630 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5631 else
5632 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5633 }
5634 }
5635
5636 static int
5637 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5638 unsigned const char *writebuf, CORE_ADDR offset, int len)
5639 {
5640 int pid;
5641 siginfo_t siginfo;
5642 char inf_siginfo[sizeof (siginfo_t)];
5643
5644 if (current_thread == NULL)
5645 return -1;
5646
5647 pid = lwpid_of (current_thread);
5648
5649 if (debug_threads)
5650 debug_printf ("%s siginfo for lwp %d.\n",
5651 readbuf != NULL ? "Reading" : "Writing",
5652 pid);
5653
5654 if (offset >= sizeof (siginfo))
5655 return -1;
5656
5657 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5658 return -1;
5659
5660 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5661 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5662 inferior with a 64-bit GDBSERVER should look the same as debugging it
5663 with a 32-bit GDBSERVER, we need to convert it. */
5664 siginfo_fixup (&siginfo, inf_siginfo, 0);
5665
5666 if (offset + len > sizeof (siginfo))
5667 len = sizeof (siginfo) - offset;
5668
5669 if (readbuf != NULL)
5670 memcpy (readbuf, inf_siginfo + offset, len);
5671 else
5672 {
5673 memcpy (inf_siginfo + offset, writebuf, len);
5674
5675 /* Convert back to ptrace layout before flushing it out. */
5676 siginfo_fixup (&siginfo, inf_siginfo, 1);
5677
5678 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5679 return -1;
5680 }
5681
5682 return len;
5683 }
5684
5685 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5686 so we notice when children change state; as the handler for the
5687 sigsuspend in my_waitpid. */
5688
5689 static void
5690 sigchld_handler (int signo)
5691 {
5692 int old_errno = errno;
5693
5694 if (debug_threads)
5695 {
5696 do
5697 {
5698 /* fprintf is not async-signal-safe, so call write
5699 directly. */
5700 if (write (2, "sigchld_handler\n",
5701 sizeof ("sigchld_handler\n") - 1) < 0)
5702 break; /* just ignore */
5703 } while (0);
5704 }
5705
5706 if (target_is_async_p ())
5707 async_file_mark (); /* trigger a linux_wait */
5708
5709 errno = old_errno;
5710 }
5711
5712 static int
5713 linux_supports_non_stop (void)
5714 {
5715 return 1;
5716 }
5717
5718 static int
5719 linux_async (int enable)
5720 {
5721 int previous = target_is_async_p ();
5722
5723 if (debug_threads)
5724 debug_printf ("linux_async (%d), previous=%d\n",
5725 enable, previous);
5726
5727 if (previous != enable)
5728 {
5729 sigset_t mask;
5730 sigemptyset (&mask);
5731 sigaddset (&mask, SIGCHLD);
5732
5733 sigprocmask (SIG_BLOCK, &mask, NULL);
5734
5735 if (enable)
5736 {
5737 if (pipe (linux_event_pipe) == -1)
5738 {
5739 linux_event_pipe[0] = -1;
5740 linux_event_pipe[1] = -1;
5741 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5742
5743 warning ("creating event pipe failed.");
5744 return previous;
5745 }
5746
5747 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5748 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5749
5750 /* Register the event loop handler. */
5751 add_file_handler (linux_event_pipe[0],
5752 handle_target_event, NULL);
5753
5754 /* Always trigger a linux_wait. */
5755 async_file_mark ();
5756 }
5757 else
5758 {
5759 delete_file_handler (linux_event_pipe[0]);
5760
5761 close (linux_event_pipe[0]);
5762 close (linux_event_pipe[1]);
5763 linux_event_pipe[0] = -1;
5764 linux_event_pipe[1] = -1;
5765 }
5766
5767 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5768 }
5769
5770 return previous;
5771 }
5772
5773 static int
5774 linux_start_non_stop (int nonstop)
5775 {
5776 /* Register or unregister from event-loop accordingly. */
5777 linux_async (nonstop);
5778
5779 if (target_is_async_p () != (nonstop != 0))
5780 return -1;
5781
5782 return 0;
5783 }
5784
5785 static int
5786 linux_supports_multi_process (void)
5787 {
5788 return 1;
5789 }
5790
5791 /* Check if fork events are supported. */
5792
5793 static int
5794 linux_supports_fork_events (void)
5795 {
5796 return linux_supports_tracefork ();
5797 }
5798
5799 /* Check if vfork events are supported. */
5800
5801 static int
5802 linux_supports_vfork_events (void)
5803 {
5804 return linux_supports_tracefork ();
5805 }
5806
5807 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5808 options for the specified lwp. */
5809
5810 static int
5811 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5812 void *args)
5813 {
5814 struct thread_info *thread = (struct thread_info *) entry;
5815 struct lwp_info *lwp = get_thread_lwp (thread);
5816
5817 if (!lwp->stopped)
5818 {
5819 /* Stop the lwp so we can modify its ptrace options. */
5820 lwp->must_set_ptrace_flags = 1;
5821 linux_stop_lwp (lwp);
5822 }
5823 else
5824 {
5825 /* Already stopped; go ahead and set the ptrace options. */
5826 struct process_info *proc = find_process_pid (pid_of (thread));
5827 int options = linux_low_ptrace_options (proc->attached);
5828
5829 linux_enable_event_reporting (lwpid_of (thread), options);
5830 lwp->must_set_ptrace_flags = 0;
5831 }
5832
5833 return 0;
5834 }
5835
5836 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5837 ptrace flags for all inferiors. This is in case the new GDB connection
5838 doesn't support the same set of events that the previous one did. */
5839
5840 static void
5841 linux_handle_new_gdb_connection (void)
5842 {
5843 pid_t pid;
5844
5845 /* Request that all the lwps reset their ptrace options. */
5846 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5847 }
5848
5849 static int
5850 linux_supports_disable_randomization (void)
5851 {
5852 #ifdef HAVE_PERSONALITY
5853 return 1;
5854 #else
5855 return 0;
5856 #endif
5857 }
5858
5859 static int
5860 linux_supports_agent (void)
5861 {
5862 return 1;
5863 }
5864
5865 static int
5866 linux_supports_range_stepping (void)
5867 {
5868 if (*the_low_target.supports_range_stepping == NULL)
5869 return 0;
5870
5871 return (*the_low_target.supports_range_stepping) ();
5872 }
5873
5874 /* Enumerate spufs IDs for process PID. */
5875 static int
5876 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5877 {
5878 int pos = 0;
5879 int written = 0;
5880 char path[128];
5881 DIR *dir;
5882 struct dirent *entry;
5883
5884 sprintf (path, "/proc/%ld/fd", pid);
5885 dir = opendir (path);
5886 if (!dir)
5887 return -1;
5888
5889 rewinddir (dir);
5890 while ((entry = readdir (dir)) != NULL)
5891 {
5892 struct stat st;
5893 struct statfs stfs;
5894 int fd;
5895
5896 fd = atoi (entry->d_name);
5897 if (!fd)
5898 continue;
5899
5900 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5901 if (stat (path, &st) != 0)
5902 continue;
5903 if (!S_ISDIR (st.st_mode))
5904 continue;
5905
5906 if (statfs (path, &stfs) != 0)
5907 continue;
5908 if (stfs.f_type != SPUFS_MAGIC)
5909 continue;
5910
5911 if (pos >= offset && pos + 4 <= offset + len)
5912 {
5913 *(unsigned int *)(buf + pos - offset) = fd;
5914 written += 4;
5915 }
5916 pos += 4;
5917 }
5918
5919 closedir (dir);
5920 return written;
5921 }
5922
5923 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5924 object type, using the /proc file system. */
5925 static int
5926 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5927 unsigned const char *writebuf,
5928 CORE_ADDR offset, int len)
5929 {
5930 long pid = lwpid_of (current_thread);
5931 char buf[128];
5932 int fd = 0;
5933 int ret = 0;
5934
5935 if (!writebuf && !readbuf)
5936 return -1;
5937
5938 if (!*annex)
5939 {
5940 if (!readbuf)
5941 return -1;
5942 else
5943 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5944 }
5945
5946 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5947 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5948 if (fd <= 0)
5949 return -1;
5950
5951 if (offset != 0
5952 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5953 {
5954 close (fd);
5955 return 0;
5956 }
5957
5958 if (writebuf)
5959 ret = write (fd, writebuf, (size_t) len);
5960 else
5961 ret = read (fd, readbuf, (size_t) len);
5962
5963 close (fd);
5964 return ret;
5965 }
5966
5967 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5968 struct target_loadseg
5969 {
5970 /* Core address to which the segment is mapped. */
5971 Elf32_Addr addr;
5972 /* VMA recorded in the program header. */
5973 Elf32_Addr p_vaddr;
5974 /* Size of this segment in memory. */
5975 Elf32_Word p_memsz;
5976 };
5977
5978 # if defined PT_GETDSBT
5979 struct target_loadmap
5980 {
5981 /* Protocol version number, must be zero. */
5982 Elf32_Word version;
5983 /* Pointer to the DSBT table, its size, and the DSBT index. */
5984 unsigned *dsbt_table;
5985 unsigned dsbt_size, dsbt_index;
5986 /* Number of segments in this map. */
5987 Elf32_Word nsegs;
5988 /* The actual memory map. */
5989 struct target_loadseg segs[/*nsegs*/];
5990 };
5991 # define LINUX_LOADMAP PT_GETDSBT
5992 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5993 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5994 # else
5995 struct target_loadmap
5996 {
5997 /* Protocol version number, must be zero. */
5998 Elf32_Half version;
5999 /* Number of segments in this map. */
6000 Elf32_Half nsegs;
6001 /* The actual memory map. */
6002 struct target_loadseg segs[/*nsegs*/];
6003 };
6004 # define LINUX_LOADMAP PTRACE_GETFDPIC
6005 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6006 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6007 # endif
6008
6009 static int
6010 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6011 unsigned char *myaddr, unsigned int len)
6012 {
6013 int pid = lwpid_of (current_thread);
6014 int addr = -1;
6015 struct target_loadmap *data = NULL;
6016 unsigned int actual_length, copy_length;
6017
6018 if (strcmp (annex, "exec") == 0)
6019 addr = (int) LINUX_LOADMAP_EXEC;
6020 else if (strcmp (annex, "interp") == 0)
6021 addr = (int) LINUX_LOADMAP_INTERP;
6022 else
6023 return -1;
6024
6025 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6026 return -1;
6027
6028 if (data == NULL)
6029 return -1;
6030
6031 actual_length = sizeof (struct target_loadmap)
6032 + sizeof (struct target_loadseg) * data->nsegs;
6033
6034 if (offset < 0 || offset > actual_length)
6035 return -1;
6036
6037 copy_length = actual_length - offset < len ? actual_length - offset : len;
6038 memcpy (myaddr, (char *) data + offset, copy_length);
6039 return copy_length;
6040 }
6041 #else
6042 # define linux_read_loadmap NULL
6043 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6044
6045 static void
6046 linux_process_qsupported (const char *query)
6047 {
6048 if (the_low_target.process_qsupported != NULL)
6049 the_low_target.process_qsupported (query);
6050 }
6051
6052 static int
6053 linux_supports_tracepoints (void)
6054 {
6055 if (*the_low_target.supports_tracepoints == NULL)
6056 return 0;
6057
6058 return (*the_low_target.supports_tracepoints) ();
6059 }
6060
6061 static CORE_ADDR
6062 linux_read_pc (struct regcache *regcache)
6063 {
6064 if (the_low_target.get_pc == NULL)
6065 return 0;
6066
6067 return (*the_low_target.get_pc) (regcache);
6068 }
6069
6070 static void
6071 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6072 {
6073 gdb_assert (the_low_target.set_pc != NULL);
6074
6075 (*the_low_target.set_pc) (regcache, pc);
6076 }
6077
6078 static int
6079 linux_thread_stopped (struct thread_info *thread)
6080 {
6081 return get_thread_lwp (thread)->stopped;
6082 }
6083
6084 /* This exposes stop-all-threads functionality to other modules. */
6085
6086 static void
6087 linux_pause_all (int freeze)
6088 {
6089 stop_all_lwps (freeze, NULL);
6090 }
6091
6092 /* This exposes unstop-all-threads functionality to other gdbserver
6093 modules. */
6094
6095 static void
6096 linux_unpause_all (int unfreeze)
6097 {
6098 unstop_all_lwps (unfreeze, NULL);
6099 }
6100
6101 static int
6102 linux_prepare_to_access_memory (void)
6103 {
6104 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6105 running LWP. */
6106 if (non_stop)
6107 linux_pause_all (1);
6108 return 0;
6109 }
6110
6111 static void
6112 linux_done_accessing_memory (void)
6113 {
6114 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6115 running LWP. */
6116 if (non_stop)
6117 linux_unpause_all (1);
6118 }
6119
6120 static int
6121 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6122 CORE_ADDR collector,
6123 CORE_ADDR lockaddr,
6124 ULONGEST orig_size,
6125 CORE_ADDR *jump_entry,
6126 CORE_ADDR *trampoline,
6127 ULONGEST *trampoline_size,
6128 unsigned char *jjump_pad_insn,
6129 ULONGEST *jjump_pad_insn_size,
6130 CORE_ADDR *adjusted_insn_addr,
6131 CORE_ADDR *adjusted_insn_addr_end,
6132 char *err)
6133 {
6134 return (*the_low_target.install_fast_tracepoint_jump_pad)
6135 (tpoint, tpaddr, collector, lockaddr, orig_size,
6136 jump_entry, trampoline, trampoline_size,
6137 jjump_pad_insn, jjump_pad_insn_size,
6138 adjusted_insn_addr, adjusted_insn_addr_end,
6139 err);
6140 }
6141
6142 static struct emit_ops *
6143 linux_emit_ops (void)
6144 {
6145 if (the_low_target.emit_ops != NULL)
6146 return (*the_low_target.emit_ops) ();
6147 else
6148 return NULL;
6149 }
6150
6151 static int
6152 linux_get_min_fast_tracepoint_insn_len (void)
6153 {
6154 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6155 }
6156
6157 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6158
6159 static int
6160 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6161 CORE_ADDR *phdr_memaddr, int *num_phdr)
6162 {
6163 char filename[PATH_MAX];
6164 int fd;
6165 const int auxv_size = is_elf64
6166 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6167 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6168
6169 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6170
6171 fd = open (filename, O_RDONLY);
6172 if (fd < 0)
6173 return 1;
6174
6175 *phdr_memaddr = 0;
6176 *num_phdr = 0;
6177 while (read (fd, buf, auxv_size) == auxv_size
6178 && (*phdr_memaddr == 0 || *num_phdr == 0))
6179 {
6180 if (is_elf64)
6181 {
6182 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6183
6184 switch (aux->a_type)
6185 {
6186 case AT_PHDR:
6187 *phdr_memaddr = aux->a_un.a_val;
6188 break;
6189 case AT_PHNUM:
6190 *num_phdr = aux->a_un.a_val;
6191 break;
6192 }
6193 }
6194 else
6195 {
6196 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6197
6198 switch (aux->a_type)
6199 {
6200 case AT_PHDR:
6201 *phdr_memaddr = aux->a_un.a_val;
6202 break;
6203 case AT_PHNUM:
6204 *num_phdr = aux->a_un.a_val;
6205 break;
6206 }
6207 }
6208 }
6209
6210 close (fd);
6211
6212 if (*phdr_memaddr == 0 || *num_phdr == 0)
6213 {
6214 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6215 "phdr_memaddr = %ld, phdr_num = %d",
6216 (long) *phdr_memaddr, *num_phdr);
6217 return 2;
6218 }
6219
6220 return 0;
6221 }
6222
6223 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6224
6225 static CORE_ADDR
6226 get_dynamic (const int pid, const int is_elf64)
6227 {
6228 CORE_ADDR phdr_memaddr, relocation;
6229 int num_phdr, i;
6230 unsigned char *phdr_buf;
6231 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6232
6233 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6234 return 0;
6235
6236 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6237 phdr_buf = alloca (num_phdr * phdr_size);
6238
6239 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6240 return 0;
6241
6242 /* Compute relocation: it is expected to be 0 for "regular" executables,
6243 non-zero for PIE ones. */
6244 relocation = -1;
6245 for (i = 0; relocation == -1 && i < num_phdr; i++)
6246 if (is_elf64)
6247 {
6248 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6249
6250 if (p->p_type == PT_PHDR)
6251 relocation = phdr_memaddr - p->p_vaddr;
6252 }
6253 else
6254 {
6255 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6256
6257 if (p->p_type == PT_PHDR)
6258 relocation = phdr_memaddr - p->p_vaddr;
6259 }
6260
6261 if (relocation == -1)
6262 {
6263 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6264 any real world executables, including PIE executables, have always
6265 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6266 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6267 or present DT_DEBUG anyway (fpc binaries are statically linked).
6268
6269 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6270
6271 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6272
6273 return 0;
6274 }
6275
6276 for (i = 0; i < num_phdr; i++)
6277 {
6278 if (is_elf64)
6279 {
6280 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6281
6282 if (p->p_type == PT_DYNAMIC)
6283 return p->p_vaddr + relocation;
6284 }
6285 else
6286 {
6287 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6288
6289 if (p->p_type == PT_DYNAMIC)
6290 return p->p_vaddr + relocation;
6291 }
6292 }
6293
6294 return 0;
6295 }
6296
6297 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6298 can be 0 if the inferior does not yet have the library list initialized.
6299 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6300 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6301
6302 static CORE_ADDR
6303 get_r_debug (const int pid, const int is_elf64)
6304 {
6305 CORE_ADDR dynamic_memaddr;
6306 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6307 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6308 CORE_ADDR map = -1;
6309
6310 dynamic_memaddr = get_dynamic (pid, is_elf64);
6311 if (dynamic_memaddr == 0)
6312 return map;
6313
6314 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6315 {
6316 if (is_elf64)
6317 {
6318 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6319 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6320 union
6321 {
6322 Elf64_Xword map;
6323 unsigned char buf[sizeof (Elf64_Xword)];
6324 }
6325 rld_map;
6326 #endif
6327 #ifdef DT_MIPS_RLD_MAP
6328 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6329 {
6330 if (linux_read_memory (dyn->d_un.d_val,
6331 rld_map.buf, sizeof (rld_map.buf)) == 0)
6332 return rld_map.map;
6333 else
6334 break;
6335 }
6336 #endif /* DT_MIPS_RLD_MAP */
6337 #ifdef DT_MIPS_RLD_MAP_REL
6338 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6339 {
6340 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6341 rld_map.buf, sizeof (rld_map.buf)) == 0)
6342 return rld_map.map;
6343 else
6344 break;
6345 }
6346 #endif /* DT_MIPS_RLD_MAP_REL */
6347
6348 if (dyn->d_tag == DT_DEBUG && map == -1)
6349 map = dyn->d_un.d_val;
6350
6351 if (dyn->d_tag == DT_NULL)
6352 break;
6353 }
6354 else
6355 {
6356 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6357 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6358 union
6359 {
6360 Elf32_Word map;
6361 unsigned char buf[sizeof (Elf32_Word)];
6362 }
6363 rld_map;
6364 #endif
6365 #ifdef DT_MIPS_RLD_MAP
6366 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6367 {
6368 if (linux_read_memory (dyn->d_un.d_val,
6369 rld_map.buf, sizeof (rld_map.buf)) == 0)
6370 return rld_map.map;
6371 else
6372 break;
6373 }
6374 #endif /* DT_MIPS_RLD_MAP */
6375 #ifdef DT_MIPS_RLD_MAP_REL
6376 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6377 {
6378 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6379 rld_map.buf, sizeof (rld_map.buf)) == 0)
6380 return rld_map.map;
6381 else
6382 break;
6383 }
6384 #endif /* DT_MIPS_RLD_MAP_REL */
6385
6386 if (dyn->d_tag == DT_DEBUG && map == -1)
6387 map = dyn->d_un.d_val;
6388
6389 if (dyn->d_tag == DT_NULL)
6390 break;
6391 }
6392
6393 dynamic_memaddr += dyn_size;
6394 }
6395
6396 return map;
6397 }
6398
6399 /* Read one pointer from MEMADDR in the inferior. */
6400
6401 static int
6402 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6403 {
6404 int ret;
6405
6406 /* Go through a union so this works on either big or little endian
6407 hosts, when the inferior's pointer size is smaller than the size
6408 of CORE_ADDR. It is assumed the inferior's endianness is the
6409 same of the superior's. */
6410 union
6411 {
6412 CORE_ADDR core_addr;
6413 unsigned int ui;
6414 unsigned char uc;
6415 } addr;
6416
6417 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6418 if (ret == 0)
6419 {
6420 if (ptr_size == sizeof (CORE_ADDR))
6421 *ptr = addr.core_addr;
6422 else if (ptr_size == sizeof (unsigned int))
6423 *ptr = addr.ui;
6424 else
6425 gdb_assert_not_reached ("unhandled pointer size");
6426 }
6427 return ret;
6428 }
6429
6430 struct link_map_offsets
6431 {
6432 /* Offset and size of r_debug.r_version. */
6433 int r_version_offset;
6434
6435 /* Offset and size of r_debug.r_map. */
6436 int r_map_offset;
6437
6438 /* Offset to l_addr field in struct link_map. */
6439 int l_addr_offset;
6440
6441 /* Offset to l_name field in struct link_map. */
6442 int l_name_offset;
6443
6444 /* Offset to l_ld field in struct link_map. */
6445 int l_ld_offset;
6446
6447 /* Offset to l_next field in struct link_map. */
6448 int l_next_offset;
6449
6450 /* Offset to l_prev field in struct link_map. */
6451 int l_prev_offset;
6452 };
6453
6454 /* Construct qXfer:libraries-svr4:read reply. */
6455
6456 static int
6457 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6458 unsigned const char *writebuf,
6459 CORE_ADDR offset, int len)
6460 {
6461 char *document;
6462 unsigned document_len;
6463 struct process_info_private *const priv = current_process ()->priv;
6464 char filename[PATH_MAX];
6465 int pid, is_elf64;
6466
6467 static const struct link_map_offsets lmo_32bit_offsets =
6468 {
6469 0, /* r_version offset. */
6470 4, /* r_debug.r_map offset. */
6471 0, /* l_addr offset in link_map. */
6472 4, /* l_name offset in link_map. */
6473 8, /* l_ld offset in link_map. */
6474 12, /* l_next offset in link_map. */
6475 16 /* l_prev offset in link_map. */
6476 };
6477
6478 static const struct link_map_offsets lmo_64bit_offsets =
6479 {
6480 0, /* r_version offset. */
6481 8, /* r_debug.r_map offset. */
6482 0, /* l_addr offset in link_map. */
6483 8, /* l_name offset in link_map. */
6484 16, /* l_ld offset in link_map. */
6485 24, /* l_next offset in link_map. */
6486 32 /* l_prev offset in link_map. */
6487 };
6488 const struct link_map_offsets *lmo;
6489 unsigned int machine;
6490 int ptr_size;
6491 CORE_ADDR lm_addr = 0, lm_prev = 0;
6492 int allocated = 1024;
6493 char *p;
6494 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6495 int header_done = 0;
6496
6497 if (writebuf != NULL)
6498 return -2;
6499 if (readbuf == NULL)
6500 return -1;
6501
6502 pid = lwpid_of (current_thread);
6503 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6504 is_elf64 = elf_64_file_p (filename, &machine);
6505 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6506 ptr_size = is_elf64 ? 8 : 4;
6507
6508 while (annex[0] != '\0')
6509 {
6510 const char *sep;
6511 CORE_ADDR *addrp;
6512 int len;
6513
6514 sep = strchr (annex, '=');
6515 if (sep == NULL)
6516 break;
6517
6518 len = sep - annex;
6519 if (len == 5 && startswith (annex, "start"))
6520 addrp = &lm_addr;
6521 else if (len == 4 && startswith (annex, "prev"))
6522 addrp = &lm_prev;
6523 else
6524 {
6525 annex = strchr (sep, ';');
6526 if (annex == NULL)
6527 break;
6528 annex++;
6529 continue;
6530 }
6531
6532 annex = decode_address_to_semicolon (addrp, sep + 1);
6533 }
6534
6535 if (lm_addr == 0)
6536 {
6537 int r_version = 0;
6538
6539 if (priv->r_debug == 0)
6540 priv->r_debug = get_r_debug (pid, is_elf64);
6541
6542 /* We failed to find DT_DEBUG. Such situation will not change
6543 for this inferior - do not retry it. Report it to GDB as
6544 E01, see for the reasons at the GDB solib-svr4.c side. */
6545 if (priv->r_debug == (CORE_ADDR) -1)
6546 return -1;
6547
6548 if (priv->r_debug != 0)
6549 {
6550 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6551 (unsigned char *) &r_version,
6552 sizeof (r_version)) != 0
6553 || r_version != 1)
6554 {
6555 warning ("unexpected r_debug version %d", r_version);
6556 }
6557 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6558 &lm_addr, ptr_size) != 0)
6559 {
6560 warning ("unable to read r_map from 0x%lx",
6561 (long) priv->r_debug + lmo->r_map_offset);
6562 }
6563 }
6564 }
6565
6566 document = xmalloc (allocated);
6567 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6568 p = document + strlen (document);
6569
6570 while (lm_addr
6571 && read_one_ptr (lm_addr + lmo->l_name_offset,
6572 &l_name, ptr_size) == 0
6573 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6574 &l_addr, ptr_size) == 0
6575 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6576 &l_ld, ptr_size) == 0
6577 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6578 &l_prev, ptr_size) == 0
6579 && read_one_ptr (lm_addr + lmo->l_next_offset,
6580 &l_next, ptr_size) == 0)
6581 {
6582 unsigned char libname[PATH_MAX];
6583
6584 if (lm_prev != l_prev)
6585 {
6586 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6587 (long) lm_prev, (long) l_prev);
6588 break;
6589 }
6590
6591 /* Ignore the first entry even if it has valid name as the first entry
6592 corresponds to the main executable. The first entry should not be
6593 skipped if the dynamic loader was loaded late by a static executable
6594 (see solib-svr4.c parameter ignore_first). But in such case the main
6595 executable does not have PT_DYNAMIC present and this function already
6596 exited above due to failed get_r_debug. */
6597 if (lm_prev == 0)
6598 {
6599 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6600 p = p + strlen (p);
6601 }
6602 else
6603 {
6604 /* Not checking for error because reading may stop before
6605 we've got PATH_MAX worth of characters. */
6606 libname[0] = '\0';
6607 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6608 libname[sizeof (libname) - 1] = '\0';
6609 if (libname[0] != '\0')
6610 {
6611 /* 6x the size for xml_escape_text below. */
6612 size_t len = 6 * strlen ((char *) libname);
6613 char *name;
6614
6615 if (!header_done)
6616 {
6617 /* Terminate `<library-list-svr4'. */
6618 *p++ = '>';
6619 header_done = 1;
6620 }
6621
6622 while (allocated < p - document + len + 200)
6623 {
6624 /* Expand to guarantee sufficient storage. */
6625 uintptr_t document_len = p - document;
6626
6627 document = xrealloc (document, 2 * allocated);
6628 allocated *= 2;
6629 p = document + document_len;
6630 }
6631
6632 name = xml_escape_text ((char *) libname);
6633 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6634 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6635 name, (unsigned long) lm_addr,
6636 (unsigned long) l_addr, (unsigned long) l_ld);
6637 free (name);
6638 }
6639 }
6640
6641 lm_prev = lm_addr;
6642 lm_addr = l_next;
6643 }
6644
6645 if (!header_done)
6646 {
6647 /* Empty list; terminate `<library-list-svr4'. */
6648 strcpy (p, "/>");
6649 }
6650 else
6651 strcpy (p, "</library-list-svr4>");
6652
6653 document_len = strlen (document);
6654 if (offset < document_len)
6655 document_len -= offset;
6656 else
6657 document_len = 0;
6658 if (len > document_len)
6659 len = document_len;
6660
6661 memcpy (readbuf, document + offset, len);
6662 xfree (document);
6663
6664 return len;
6665 }
6666
6667 #ifdef HAVE_LINUX_BTRACE
6668
6669 /* See to_enable_btrace target method. */
6670
6671 static struct btrace_target_info *
6672 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6673 {
6674 struct btrace_target_info *tinfo;
6675
6676 tinfo = linux_enable_btrace (ptid, conf);
6677
6678 if (tinfo != NULL && tinfo->ptr_bits == 0)
6679 {
6680 struct thread_info *thread = find_thread_ptid (ptid);
6681 struct regcache *regcache = get_thread_regcache (thread, 0);
6682
6683 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6684 }
6685
6686 return tinfo;
6687 }
6688
6689 /* See to_disable_btrace target method. */
6690
6691 static int
6692 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6693 {
6694 enum btrace_error err;
6695
6696 err = linux_disable_btrace (tinfo);
6697 return (err == BTRACE_ERR_NONE ? 0 : -1);
6698 }
6699
6700 /* Encode an Intel(R) Processor Trace configuration. */
6701
6702 static void
6703 linux_low_encode_pt_config (struct buffer *buffer,
6704 const struct btrace_data_pt_config *config)
6705 {
6706 buffer_grow_str (buffer, "<pt-config>\n");
6707
6708 switch (config->cpu.vendor)
6709 {
6710 case CV_INTEL:
6711 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6712 "model=\"%u\" stepping=\"%u\"/>\n",
6713 config->cpu.family, config->cpu.model,
6714 config->cpu.stepping);
6715 break;
6716
6717 default:
6718 break;
6719 }
6720
6721 buffer_grow_str (buffer, "</pt-config>\n");
6722 }
6723
6724 /* Encode a raw buffer. */
6725
6726 static void
6727 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6728 unsigned int size)
6729 {
6730 if (size == 0)
6731 return;
6732
6733 /* We use hex encoding - see common/rsp-low.h. */
6734 buffer_grow_str (buffer, "<raw>\n");
6735
6736 while (size-- > 0)
6737 {
6738 char elem[2];
6739
6740 elem[0] = tohex ((*data >> 4) & 0xf);
6741 elem[1] = tohex (*data++ & 0xf);
6742
6743 buffer_grow (buffer, elem, 2);
6744 }
6745
6746 buffer_grow_str (buffer, "</raw>\n");
6747 }
6748
6749 /* See to_read_btrace target method. */
6750
6751 static int
6752 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6753 int type)
6754 {
6755 struct btrace_data btrace;
6756 struct btrace_block *block;
6757 enum btrace_error err;
6758 int i;
6759
6760 btrace_data_init (&btrace);
6761
6762 err = linux_read_btrace (&btrace, tinfo, type);
6763 if (err != BTRACE_ERR_NONE)
6764 {
6765 if (err == BTRACE_ERR_OVERFLOW)
6766 buffer_grow_str0 (buffer, "E.Overflow.");
6767 else
6768 buffer_grow_str0 (buffer, "E.Generic Error.");
6769
6770 goto err;
6771 }
6772
6773 switch (btrace.format)
6774 {
6775 case BTRACE_FORMAT_NONE:
6776 buffer_grow_str0 (buffer, "E.No Trace.");
6777 goto err;
6778
6779 case BTRACE_FORMAT_BTS:
6780 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6781 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6782
6783 for (i = 0;
6784 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6785 i++)
6786 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6787 paddress (block->begin), paddress (block->end));
6788
6789 buffer_grow_str0 (buffer, "</btrace>\n");
6790 break;
6791
6792 case BTRACE_FORMAT_PT:
6793 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6794 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6795 buffer_grow_str (buffer, "<pt>\n");
6796
6797 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6798
6799 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6800 btrace.variant.pt.size);
6801
6802 buffer_grow_str (buffer, "</pt>\n");
6803 buffer_grow_str0 (buffer, "</btrace>\n");
6804 break;
6805
6806 default:
6807 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6808 goto err;
6809 }
6810
6811 btrace_data_fini (&btrace);
6812 return 0;
6813
6814 err:
6815 btrace_data_fini (&btrace);
6816 return -1;
6817 }
6818
6819 /* See to_btrace_conf target method. */
6820
6821 static int
6822 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6823 struct buffer *buffer)
6824 {
6825 const struct btrace_config *conf;
6826
6827 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6828 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6829
6830 conf = linux_btrace_conf (tinfo);
6831 if (conf != NULL)
6832 {
6833 switch (conf->format)
6834 {
6835 case BTRACE_FORMAT_NONE:
6836 break;
6837
6838 case BTRACE_FORMAT_BTS:
6839 buffer_xml_printf (buffer, "<bts");
6840 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6841 buffer_xml_printf (buffer, " />\n");
6842 break;
6843
6844 case BTRACE_FORMAT_PT:
6845 buffer_xml_printf (buffer, "<pt");
6846 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6847 buffer_xml_printf (buffer, "/>\n");
6848 break;
6849 }
6850 }
6851
6852 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6853 return 0;
6854 }
6855 #endif /* HAVE_LINUX_BTRACE */
6856
6857 /* See nat/linux-nat.h. */
6858
6859 ptid_t
6860 current_lwp_ptid (void)
6861 {
6862 return ptid_of (current_thread);
6863 }
6864
6865 static struct target_ops linux_target_ops = {
6866 linux_create_inferior,
6867 linux_arch_setup,
6868 linux_attach,
6869 linux_kill,
6870 linux_detach,
6871 linux_mourn,
6872 linux_join,
6873 linux_thread_alive,
6874 linux_resume,
6875 linux_wait,
6876 linux_fetch_registers,
6877 linux_store_registers,
6878 linux_prepare_to_access_memory,
6879 linux_done_accessing_memory,
6880 linux_read_memory,
6881 linux_write_memory,
6882 linux_look_up_symbols,
6883 linux_request_interrupt,
6884 linux_read_auxv,
6885 linux_supports_z_point_type,
6886 linux_insert_point,
6887 linux_remove_point,
6888 linux_stopped_by_sw_breakpoint,
6889 linux_supports_stopped_by_sw_breakpoint,
6890 linux_stopped_by_hw_breakpoint,
6891 linux_supports_stopped_by_hw_breakpoint,
6892 linux_supports_conditional_breakpoints,
6893 linux_stopped_by_watchpoint,
6894 linux_stopped_data_address,
6895 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6896 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6897 && defined(PT_TEXT_END_ADDR)
6898 linux_read_offsets,
6899 #else
6900 NULL,
6901 #endif
6902 #ifdef USE_THREAD_DB
6903 thread_db_get_tls_address,
6904 #else
6905 NULL,
6906 #endif
6907 linux_qxfer_spu,
6908 hostio_last_error_from_errno,
6909 linux_qxfer_osdata,
6910 linux_xfer_siginfo,
6911 linux_supports_non_stop,
6912 linux_async,
6913 linux_start_non_stop,
6914 linux_supports_multi_process,
6915 linux_supports_fork_events,
6916 linux_supports_vfork_events,
6917 linux_handle_new_gdb_connection,
6918 #ifdef USE_THREAD_DB
6919 thread_db_handle_monitor_command,
6920 #else
6921 NULL,
6922 #endif
6923 linux_common_core_of_thread,
6924 linux_read_loadmap,
6925 linux_process_qsupported,
6926 linux_supports_tracepoints,
6927 linux_read_pc,
6928 linux_write_pc,
6929 linux_thread_stopped,
6930 NULL,
6931 linux_pause_all,
6932 linux_unpause_all,
6933 linux_stabilize_threads,
6934 linux_install_fast_tracepoint_jump_pad,
6935 linux_emit_ops,
6936 linux_supports_disable_randomization,
6937 linux_get_min_fast_tracepoint_insn_len,
6938 linux_qxfer_libraries_svr4,
6939 linux_supports_agent,
6940 #ifdef HAVE_LINUX_BTRACE
6941 linux_supports_btrace,
6942 linux_low_enable_btrace,
6943 linux_low_disable_btrace,
6944 linux_low_read_btrace,
6945 linux_low_btrace_conf,
6946 #else
6947 NULL,
6948 NULL,
6949 NULL,
6950 NULL,
6951 NULL,
6952 #endif
6953 linux_supports_range_stepping,
6954 linux_proc_pid_to_exec_file,
6955 linux_mntns_open_cloexec,
6956 linux_mntns_unlink,
6957 linux_mntns_readlink,
6958 };
6959
6960 static void
6961 linux_init_signals ()
6962 {
6963 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6964 to find what the cancel signal actually is. */
6965 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6966 signal (__SIGRTMIN+1, SIG_IGN);
6967 #endif
6968 }
6969
6970 #ifdef HAVE_LINUX_REGSETS
6971 void
6972 initialize_regsets_info (struct regsets_info *info)
6973 {
6974 for (info->num_regsets = 0;
6975 info->regsets[info->num_regsets].size >= 0;
6976 info->num_regsets++)
6977 ;
6978 }
6979 #endif
6980
6981 void
6982 initialize_low (void)
6983 {
6984 struct sigaction sigchld_action;
6985 memset (&sigchld_action, 0, sizeof (sigchld_action));
6986 set_target_ops (&linux_target_ops);
6987 set_breakpoint_data (the_low_target.breakpoint,
6988 the_low_target.breakpoint_len);
6989 linux_init_signals ();
6990 linux_ptrace_init_warnings ();
6991
6992 sigchld_action.sa_handler = sigchld_handler;
6993 sigemptyset (&sigchld_action.sa_mask);
6994 sigchld_action.sa_flags = SA_RESTART;
6995 sigaction (SIGCHLD, &sigchld_action, NULL);
6996
6997 initialize_low_arch ();
6998
6999 linux_check_ptrace_features ();
7000 }
This page took 0.172951 seconds and 4 git commands to generate.