95104d264343b50d835ce9bccd4bcae76e669903
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2016 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57 #include "nat/linux-namespaces.h"
58
59 #ifndef SPUFS_MAGIC
60 #define SPUFS_MAGIC 0x23c9b64e
61 #endif
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 /* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77 #if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80 #if defined(__mcoldfire__)
81 /* These are still undefined in 3.10 kernels. */
82 #define PT_TEXT_ADDR 49*4
83 #define PT_DATA_ADDR 50*4
84 #define PT_TEXT_END_ADDR 51*4
85 /* BFIN already defines these since at least 2.6.32 kernels. */
86 #elif defined(BFIN)
87 #define PT_TEXT_ADDR 220
88 #define PT_TEXT_END_ADDR 224
89 #define PT_DATA_ADDR 228
90 /* These are still undefined in 3.10 kernels. */
91 #elif defined(__TMS320C6X__)
92 #define PT_TEXT_ADDR (0x10000*4)
93 #define PT_DATA_ADDR (0x10004*4)
94 #define PT_TEXT_END_ADDR (0x10008*4)
95 #endif
96 #endif
97
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "btrace-common.h"
101 #endif
102
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
105 typedef struct
106 {
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115 } Elf32_auxv_t;
116 #endif
117
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
120 typedef struct
121 {
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130 } Elf64_auxv_t;
131 #endif
132
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset = -1;
135
136 /* LWP accessors. */
137
138 /* See nat/linux-nat.h. */
139
140 ptid_t
141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143 return ptid_of (get_lwp_thread (lwp));
144 }
145
146 /* See nat/linux-nat.h. */
147
148 void
149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151 {
152 lwp->arch_private = info;
153 }
154
155 /* See nat/linux-nat.h. */
156
157 struct arch_lwp_info *
158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160 return lwp->arch_private;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 int
166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168 return lwp->stopped;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176 return lwp->stop_reason;
177 }
178
179 /* A list of all unknown processes which receive stop signals. Some
180 other process will presumably claim each of these as forked
181 children momentarily. */
182
183 struct simple_pid_list
184 {
185 /* The process ID. */
186 int pid;
187
188 /* The status as reported by waitpid. */
189 int status;
190
191 /* Next in chain. */
192 struct simple_pid_list *next;
193 };
194 struct simple_pid_list *stopped_pids;
195
196 /* Trivial list manipulation functions to keep track of a list of new
197 stopped processes. */
198
199 static void
200 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
201 {
202 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
203
204 new_pid->pid = pid;
205 new_pid->status = status;
206 new_pid->next = *listp;
207 *listp = new_pid;
208 }
209
210 static int
211 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
212 {
213 struct simple_pid_list **p;
214
215 for (p = listp; *p != NULL; p = &(*p)->next)
216 if ((*p)->pid == pid)
217 {
218 struct simple_pid_list *next = (*p)->next;
219
220 *statusp = (*p)->status;
221 xfree (*p);
222 *p = next;
223 return 1;
224 }
225 return 0;
226 }
227
228 enum stopping_threads_kind
229 {
230 /* Not stopping threads presently. */
231 NOT_STOPPING_THREADS,
232
233 /* Stopping threads. */
234 STOPPING_THREADS,
235
236 /* Stopping and suspending threads. */
237 STOPPING_AND_SUSPENDING_THREADS
238 };
239
240 /* This is set while stop_all_lwps is in effect. */
241 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
242
243 /* FIXME make into a target method? */
244 int using_threads = 1;
245
246 /* True if we're presently stabilizing threads (moving them out of
247 jump pads). */
248 static int stabilizing_threads;
249
250 static void linux_resume_one_lwp (struct lwp_info *lwp,
251 int step, int signal, siginfo_t *info);
252 static void linux_resume (struct thread_resume *resume_info, size_t n);
253 static void stop_all_lwps (int suspend, struct lwp_info *except);
254 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
255 static void unsuspend_all_lwps (struct lwp_info *except);
256 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
257 int *wstat, int options);
258 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
259 static struct lwp_info *add_lwp (ptid_t ptid);
260 static void linux_mourn (struct process_info *process);
261 static int linux_stopped_by_watchpoint (void);
262 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
263 static int lwp_is_marked_dead (struct lwp_info *lwp);
264 static void proceed_all_lwps (void);
265 static int finish_step_over (struct lwp_info *lwp);
266 static int kill_lwp (unsigned long lwpid, int signo);
267 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
268 static void complete_ongoing_step_over (void);
269 static int linux_low_ptrace_options (int attached);
270
271 /* When the event-loop is doing a step-over, this points at the thread
272 being stepped. */
273 ptid_t step_over_bkpt;
274
275 /* True if the low target can hardware single-step. */
276
277 static int
278 can_hardware_single_step (void)
279 {
280 if (the_low_target.supports_hardware_single_step != NULL)
281 return the_low_target.supports_hardware_single_step ();
282 else
283 return 0;
284 }
285
286 /* True if the low target can software single-step. Such targets
287 implement the GET_NEXT_PCS callback. */
288
289 static int
290 can_software_single_step (void)
291 {
292 return (the_low_target.get_next_pcs != NULL);
293 }
294
295 /* True if the low target supports memory breakpoints. If so, we'll
296 have a GET_PC implementation. */
297
298 static int
299 supports_breakpoints (void)
300 {
301 return (the_low_target.get_pc != NULL);
302 }
303
304 /* Returns true if this target can support fast tracepoints. This
305 does not mean that the in-process agent has been loaded in the
306 inferior. */
307
308 static int
309 supports_fast_tracepoints (void)
310 {
311 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
312 }
313
314 /* True if LWP is stopped in its stepping range. */
315
316 static int
317 lwp_in_step_range (struct lwp_info *lwp)
318 {
319 CORE_ADDR pc = lwp->stop_pc;
320
321 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
322 }
323
324 struct pending_signals
325 {
326 int signal;
327 siginfo_t info;
328 struct pending_signals *prev;
329 };
330
331 /* The read/write ends of the pipe registered as waitable file in the
332 event loop. */
333 static int linux_event_pipe[2] = { -1, -1 };
334
335 /* True if we're currently in async mode. */
336 #define target_is_async_p() (linux_event_pipe[0] != -1)
337
338 static void send_sigstop (struct lwp_info *lwp);
339 static void wait_for_sigstop (void);
340
341 /* Return non-zero if HEADER is a 64-bit ELF file. */
342
343 static int
344 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
345 {
346 if (header->e_ident[EI_MAG0] == ELFMAG0
347 && header->e_ident[EI_MAG1] == ELFMAG1
348 && header->e_ident[EI_MAG2] == ELFMAG2
349 && header->e_ident[EI_MAG3] == ELFMAG3)
350 {
351 *machine = header->e_machine;
352 return header->e_ident[EI_CLASS] == ELFCLASS64;
353
354 }
355 *machine = EM_NONE;
356 return -1;
357 }
358
359 /* Return non-zero if FILE is a 64-bit ELF file,
360 zero if the file is not a 64-bit ELF file,
361 and -1 if the file is not accessible or doesn't exist. */
362
363 static int
364 elf_64_file_p (const char *file, unsigned int *machine)
365 {
366 Elf64_Ehdr header;
367 int fd;
368
369 fd = open (file, O_RDONLY);
370 if (fd < 0)
371 return -1;
372
373 if (read (fd, &header, sizeof (header)) != sizeof (header))
374 {
375 close (fd);
376 return 0;
377 }
378 close (fd);
379
380 return elf_64_header_p (&header, machine);
381 }
382
383 /* Accepts an integer PID; Returns true if the executable PID is
384 running is a 64-bit ELF file.. */
385
386 int
387 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
388 {
389 char file[PATH_MAX];
390
391 sprintf (file, "/proc/%d/exe", pid);
392 return elf_64_file_p (file, machine);
393 }
394
395 static void
396 delete_lwp (struct lwp_info *lwp)
397 {
398 struct thread_info *thr = get_lwp_thread (lwp);
399
400 if (debug_threads)
401 debug_printf ("deleting %ld\n", lwpid_of (thr));
402
403 remove_thread (thr);
404 free (lwp->arch_private);
405 free (lwp);
406 }
407
408 /* Add a process to the common process list, and set its private
409 data. */
410
411 static struct process_info *
412 linux_add_process (int pid, int attached)
413 {
414 struct process_info *proc;
415
416 proc = add_process (pid, attached);
417 proc->priv = XCNEW (struct process_info_private);
418
419 if (the_low_target.new_process != NULL)
420 proc->priv->arch_private = the_low_target.new_process ();
421
422 return proc;
423 }
424
425 static CORE_ADDR get_pc (struct lwp_info *lwp);
426
427 /* Call the target arch_setup function on the current thread. */
428
429 static void
430 linux_arch_setup (void)
431 {
432 the_low_target.arch_setup ();
433 }
434
435 /* Call the target arch_setup function on THREAD. */
436
437 static void
438 linux_arch_setup_thread (struct thread_info *thread)
439 {
440 struct thread_info *saved_thread;
441
442 saved_thread = current_thread;
443 current_thread = thread;
444
445 linux_arch_setup ();
446
447 current_thread = saved_thread;
448 }
449
450 /* Handle a GNU/Linux extended wait response. If we see a clone,
451 fork, or vfork event, we need to add the new LWP to our list
452 (and return 0 so as not to report the trap to higher layers).
453 If we see an exec event, we will modify ORIG_EVENT_LWP to point
454 to a new LWP representing the new program. */
455
456 static int
457 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
458 {
459 struct lwp_info *event_lwp = *orig_event_lwp;
460 int event = linux_ptrace_get_extended_event (wstat);
461 struct thread_info *event_thr = get_lwp_thread (event_lwp);
462 struct lwp_info *new_lwp;
463
464 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
465
466 /* All extended events we currently use are mid-syscall. Only
467 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
468 you have to be using PTRACE_SEIZE to get that. */
469 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
470
471 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
472 || (event == PTRACE_EVENT_CLONE))
473 {
474 ptid_t ptid;
475 unsigned long new_pid;
476 int ret, status;
477
478 /* Get the pid of the new lwp. */
479 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
480 &new_pid);
481
482 /* If we haven't already seen the new PID stop, wait for it now. */
483 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
484 {
485 /* The new child has a pending SIGSTOP. We can't affect it until it
486 hits the SIGSTOP, but we're already attached. */
487
488 ret = my_waitpid (new_pid, &status, __WALL);
489
490 if (ret == -1)
491 perror_with_name ("waiting for new child");
492 else if (ret != new_pid)
493 warning ("wait returned unexpected PID %d", ret);
494 else if (!WIFSTOPPED (status))
495 warning ("wait returned unexpected status 0x%x", status);
496 }
497
498 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
499 {
500 struct process_info *parent_proc;
501 struct process_info *child_proc;
502 struct lwp_info *child_lwp;
503 struct thread_info *child_thr;
504 struct target_desc *tdesc;
505
506 ptid = ptid_build (new_pid, new_pid, 0);
507
508 if (debug_threads)
509 {
510 debug_printf ("HEW: Got fork event from LWP %ld, "
511 "new child is %d\n",
512 ptid_get_lwp (ptid_of (event_thr)),
513 ptid_get_pid (ptid));
514 }
515
516 /* Add the new process to the tables and clone the breakpoint
517 lists of the parent. We need to do this even if the new process
518 will be detached, since we will need the process object and the
519 breakpoints to remove any breakpoints from memory when we
520 detach, and the client side will access registers. */
521 child_proc = linux_add_process (new_pid, 0);
522 gdb_assert (child_proc != NULL);
523 child_lwp = add_lwp (ptid);
524 gdb_assert (child_lwp != NULL);
525 child_lwp->stopped = 1;
526 child_lwp->must_set_ptrace_flags = 1;
527 child_lwp->status_pending_p = 0;
528 child_thr = get_lwp_thread (child_lwp);
529 child_thr->last_resume_kind = resume_stop;
530 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
531
532 /* If we're suspending all threads, leave this one suspended
533 too. If the fork/clone parent is stepping over a breakpoint,
534 all other threads have been suspended already. Leave the
535 child suspended too. */
536 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
537 || event_lwp->bp_reinsert != 0)
538 {
539 if (debug_threads)
540 debug_printf ("HEW: leaving child suspended\n");
541 child_lwp->suspended = 1;
542 }
543
544 parent_proc = get_thread_process (event_thr);
545 child_proc->attached = parent_proc->attached;
546 clone_all_breakpoints (&child_proc->breakpoints,
547 &child_proc->raw_breakpoints,
548 parent_proc->breakpoints);
549
550 tdesc = XNEW (struct target_desc);
551 copy_target_description (tdesc, parent_proc->tdesc);
552 child_proc->tdesc = tdesc;
553
554 /* Clone arch-specific process data. */
555 if (the_low_target.new_fork != NULL)
556 the_low_target.new_fork (parent_proc, child_proc);
557
558 /* Save fork info in the parent thread. */
559 if (event == PTRACE_EVENT_FORK)
560 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
561 else if (event == PTRACE_EVENT_VFORK)
562 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
563
564 event_lwp->waitstatus.value.related_pid = ptid;
565
566 /* The status_pending field contains bits denoting the
567 extended event, so when the pending event is handled,
568 the handler will look at lwp->waitstatus. */
569 event_lwp->status_pending_p = 1;
570 event_lwp->status_pending = wstat;
571
572 /* Report the event. */
573 return 0;
574 }
575
576 if (debug_threads)
577 debug_printf ("HEW: Got clone event "
578 "from LWP %ld, new child is LWP %ld\n",
579 lwpid_of (event_thr), new_pid);
580
581 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
582 new_lwp = add_lwp (ptid);
583
584 /* Either we're going to immediately resume the new thread
585 or leave it stopped. linux_resume_one_lwp is a nop if it
586 thinks the thread is currently running, so set this first
587 before calling linux_resume_one_lwp. */
588 new_lwp->stopped = 1;
589
590 /* If we're suspending all threads, leave this one suspended
591 too. If the fork/clone parent is stepping over a breakpoint,
592 all other threads have been suspended already. Leave the
593 child suspended too. */
594 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
595 || event_lwp->bp_reinsert != 0)
596 new_lwp->suspended = 1;
597
598 /* Normally we will get the pending SIGSTOP. But in some cases
599 we might get another signal delivered to the group first.
600 If we do get another signal, be sure not to lose it. */
601 if (WSTOPSIG (status) != SIGSTOP)
602 {
603 new_lwp->stop_expected = 1;
604 new_lwp->status_pending_p = 1;
605 new_lwp->status_pending = status;
606 }
607 else if (report_thread_events)
608 {
609 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
610 new_lwp->status_pending_p = 1;
611 new_lwp->status_pending = status;
612 }
613
614 /* Don't report the event. */
615 return 1;
616 }
617 else if (event == PTRACE_EVENT_VFORK_DONE)
618 {
619 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
620
621 /* Report the event. */
622 return 0;
623 }
624 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
625 {
626 struct process_info *proc;
627 VEC (int) *syscalls_to_catch;
628 ptid_t event_ptid;
629 pid_t event_pid;
630
631 if (debug_threads)
632 {
633 debug_printf ("HEW: Got exec event from LWP %ld\n",
634 lwpid_of (event_thr));
635 }
636
637 /* Get the event ptid. */
638 event_ptid = ptid_of (event_thr);
639 event_pid = ptid_get_pid (event_ptid);
640
641 /* Save the syscall list from the execing process. */
642 proc = get_thread_process (event_thr);
643 syscalls_to_catch = proc->syscalls_to_catch;
644 proc->syscalls_to_catch = NULL;
645
646 /* Delete the execing process and all its threads. */
647 linux_mourn (proc);
648 current_thread = NULL;
649
650 /* Create a new process/lwp/thread. */
651 proc = linux_add_process (event_pid, 0);
652 event_lwp = add_lwp (event_ptid);
653 event_thr = get_lwp_thread (event_lwp);
654 gdb_assert (current_thread == event_thr);
655 linux_arch_setup_thread (event_thr);
656
657 /* Set the event status. */
658 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
659 event_lwp->waitstatus.value.execd_pathname
660 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
661
662 /* Mark the exec status as pending. */
663 event_lwp->stopped = 1;
664 event_lwp->status_pending_p = 1;
665 event_lwp->status_pending = wstat;
666 event_thr->last_resume_kind = resume_continue;
667 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
668
669 /* Update syscall state in the new lwp, effectively mid-syscall too. */
670 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
671
672 /* Restore the list to catch. Don't rely on the client, which is free
673 to avoid sending a new list when the architecture doesn't change.
674 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
675 proc->syscalls_to_catch = syscalls_to_catch;
676
677 /* Report the event. */
678 *orig_event_lwp = event_lwp;
679 return 0;
680 }
681
682 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
683 }
684
685 /* Return the PC as read from the regcache of LWP, without any
686 adjustment. */
687
688 static CORE_ADDR
689 get_pc (struct lwp_info *lwp)
690 {
691 struct thread_info *saved_thread;
692 struct regcache *regcache;
693 CORE_ADDR pc;
694
695 if (the_low_target.get_pc == NULL)
696 return 0;
697
698 saved_thread = current_thread;
699 current_thread = get_lwp_thread (lwp);
700
701 regcache = get_thread_regcache (current_thread, 1);
702 pc = (*the_low_target.get_pc) (regcache);
703
704 if (debug_threads)
705 debug_printf ("pc is 0x%lx\n", (long) pc);
706
707 current_thread = saved_thread;
708 return pc;
709 }
710
711 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
712 Fill *SYSNO with the syscall nr trapped. Fill *SYSRET with the
713 return code. */
714
715 static void
716 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno, int *sysret)
717 {
718 struct thread_info *saved_thread;
719 struct regcache *regcache;
720
721 if (the_low_target.get_syscall_trapinfo == NULL)
722 {
723 /* If we cannot get the syscall trapinfo, report an unknown
724 system call number and -ENOSYS return value. */
725 *sysno = UNKNOWN_SYSCALL;
726 *sysret = -ENOSYS;
727 return;
728 }
729
730 saved_thread = current_thread;
731 current_thread = get_lwp_thread (lwp);
732
733 regcache = get_thread_regcache (current_thread, 1);
734 (*the_low_target.get_syscall_trapinfo) (regcache, sysno, sysret);
735
736 if (debug_threads)
737 {
738 debug_printf ("get_syscall_trapinfo sysno %d sysret %d\n",
739 *sysno, *sysret);
740 }
741
742 current_thread = saved_thread;
743 }
744
745 static int check_stopped_by_watchpoint (struct lwp_info *child);
746
747 /* Called when the LWP stopped for a signal/trap. If it stopped for a
748 trap check what caused it (breakpoint, watchpoint, trace, etc.),
749 and save the result in the LWP's stop_reason field. If it stopped
750 for a breakpoint, decrement the PC if necessary on the lwp's
751 architecture. Returns true if we now have the LWP's stop PC. */
752
753 static int
754 save_stop_reason (struct lwp_info *lwp)
755 {
756 CORE_ADDR pc;
757 CORE_ADDR sw_breakpoint_pc;
758 struct thread_info *saved_thread;
759 #if USE_SIGTRAP_SIGINFO
760 siginfo_t siginfo;
761 #endif
762
763 if (the_low_target.get_pc == NULL)
764 return 0;
765
766 pc = get_pc (lwp);
767 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
768
769 /* breakpoint_at reads from the current thread. */
770 saved_thread = current_thread;
771 current_thread = get_lwp_thread (lwp);
772
773 #if USE_SIGTRAP_SIGINFO
774 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
775 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
776 {
777 if (siginfo.si_signo == SIGTRAP)
778 {
779 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
780 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
781 {
782 /* The si_code is ambiguous on this arch -- check debug
783 registers. */
784 if (!check_stopped_by_watchpoint (lwp))
785 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
786 }
787 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
788 {
789 /* If we determine the LWP stopped for a SW breakpoint,
790 trust it. Particularly don't check watchpoint
791 registers, because at least on s390, we'd find
792 stopped-by-watchpoint as long as there's a watchpoint
793 set. */
794 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
795 }
796 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
797 {
798 /* This can indicate either a hardware breakpoint or
799 hardware watchpoint. Check debug registers. */
800 if (!check_stopped_by_watchpoint (lwp))
801 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
802 }
803 else if (siginfo.si_code == TRAP_TRACE)
804 {
805 /* We may have single stepped an instruction that
806 triggered a watchpoint. In that case, on some
807 architectures (such as x86), instead of TRAP_HWBKPT,
808 si_code indicates TRAP_TRACE, and we need to check
809 the debug registers separately. */
810 if (!check_stopped_by_watchpoint (lwp))
811 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
812 }
813 }
814 }
815 #else
816 /* We may have just stepped a breakpoint instruction. E.g., in
817 non-stop mode, GDB first tells the thread A to step a range, and
818 then the user inserts a breakpoint inside the range. In that
819 case we need to report the breakpoint PC. */
820 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
821 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
822 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
823
824 if (hardware_breakpoint_inserted_here (pc))
825 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
826
827 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
828 check_stopped_by_watchpoint (lwp);
829 #endif
830
831 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
832 {
833 if (debug_threads)
834 {
835 struct thread_info *thr = get_lwp_thread (lwp);
836
837 debug_printf ("CSBB: %s stopped by software breakpoint\n",
838 target_pid_to_str (ptid_of (thr)));
839 }
840
841 /* Back up the PC if necessary. */
842 if (pc != sw_breakpoint_pc)
843 {
844 struct regcache *regcache
845 = get_thread_regcache (current_thread, 1);
846 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
847 }
848
849 /* Update this so we record the correct stop PC below. */
850 pc = sw_breakpoint_pc;
851 }
852 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
853 {
854 if (debug_threads)
855 {
856 struct thread_info *thr = get_lwp_thread (lwp);
857
858 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
859 target_pid_to_str (ptid_of (thr)));
860 }
861 }
862 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
863 {
864 if (debug_threads)
865 {
866 struct thread_info *thr = get_lwp_thread (lwp);
867
868 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
869 target_pid_to_str (ptid_of (thr)));
870 }
871 }
872 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
873 {
874 if (debug_threads)
875 {
876 struct thread_info *thr = get_lwp_thread (lwp);
877
878 debug_printf ("CSBB: %s stopped by trace\n",
879 target_pid_to_str (ptid_of (thr)));
880 }
881 }
882
883 lwp->stop_pc = pc;
884 current_thread = saved_thread;
885 return 1;
886 }
887
888 static struct lwp_info *
889 add_lwp (ptid_t ptid)
890 {
891 struct lwp_info *lwp;
892
893 lwp = XCNEW (struct lwp_info);
894
895 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
896
897 if (the_low_target.new_thread != NULL)
898 the_low_target.new_thread (lwp);
899
900 lwp->thread = add_thread (ptid, lwp);
901
902 return lwp;
903 }
904
905 /* Start an inferior process and returns its pid.
906 ALLARGS is a vector of program-name and args. */
907
908 static int
909 linux_create_inferior (char *program, char **allargs)
910 {
911 struct lwp_info *new_lwp;
912 int pid;
913 ptid_t ptid;
914 struct cleanup *restore_personality
915 = maybe_disable_address_space_randomization (disable_randomization);
916
917 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
918 pid = vfork ();
919 #else
920 pid = fork ();
921 #endif
922 if (pid < 0)
923 perror_with_name ("fork");
924
925 if (pid == 0)
926 {
927 close_most_fds ();
928 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
929
930 setpgid (0, 0);
931
932 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
933 stdout to stderr so that inferior i/o doesn't corrupt the connection.
934 Also, redirect stdin to /dev/null. */
935 if (remote_connection_is_stdio ())
936 {
937 close (0);
938 open ("/dev/null", O_RDONLY);
939 dup2 (2, 1);
940 if (write (2, "stdin/stdout redirected\n",
941 sizeof ("stdin/stdout redirected\n") - 1) < 0)
942 {
943 /* Errors ignored. */;
944 }
945 }
946
947 execv (program, allargs);
948 if (errno == ENOENT)
949 execvp (program, allargs);
950
951 fprintf (stderr, "Cannot exec %s: %s.\n", program,
952 strerror (errno));
953 fflush (stderr);
954 _exit (0177);
955 }
956
957 do_cleanups (restore_personality);
958
959 linux_add_process (pid, 0);
960
961 ptid = ptid_build (pid, pid, 0);
962 new_lwp = add_lwp (ptid);
963 new_lwp->must_set_ptrace_flags = 1;
964
965 return pid;
966 }
967
968 /* Implement the post_create_inferior target_ops method. */
969
970 static void
971 linux_post_create_inferior (void)
972 {
973 struct lwp_info *lwp = get_thread_lwp (current_thread);
974
975 linux_arch_setup ();
976
977 if (lwp->must_set_ptrace_flags)
978 {
979 struct process_info *proc = current_process ();
980 int options = linux_low_ptrace_options (proc->attached);
981
982 linux_enable_event_reporting (lwpid_of (current_thread), options);
983 lwp->must_set_ptrace_flags = 0;
984 }
985 }
986
987 /* Attach to an inferior process. Returns 0 on success, ERRNO on
988 error. */
989
990 int
991 linux_attach_lwp (ptid_t ptid)
992 {
993 struct lwp_info *new_lwp;
994 int lwpid = ptid_get_lwp (ptid);
995
996 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
997 != 0)
998 return errno;
999
1000 new_lwp = add_lwp (ptid);
1001
1002 /* We need to wait for SIGSTOP before being able to make the next
1003 ptrace call on this LWP. */
1004 new_lwp->must_set_ptrace_flags = 1;
1005
1006 if (linux_proc_pid_is_stopped (lwpid))
1007 {
1008 if (debug_threads)
1009 debug_printf ("Attached to a stopped process\n");
1010
1011 /* The process is definitely stopped. It is in a job control
1012 stop, unless the kernel predates the TASK_STOPPED /
1013 TASK_TRACED distinction, in which case it might be in a
1014 ptrace stop. Make sure it is in a ptrace stop; from there we
1015 can kill it, signal it, et cetera.
1016
1017 First make sure there is a pending SIGSTOP. Since we are
1018 already attached, the process can not transition from stopped
1019 to running without a PTRACE_CONT; so we know this signal will
1020 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1021 probably already in the queue (unless this kernel is old
1022 enough to use TASK_STOPPED for ptrace stops); but since
1023 SIGSTOP is not an RT signal, it can only be queued once. */
1024 kill_lwp (lwpid, SIGSTOP);
1025
1026 /* Finally, resume the stopped process. This will deliver the
1027 SIGSTOP (or a higher priority signal, just like normal
1028 PTRACE_ATTACH), which we'll catch later on. */
1029 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1030 }
1031
1032 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1033 brings it to a halt.
1034
1035 There are several cases to consider here:
1036
1037 1) gdbserver has already attached to the process and is being notified
1038 of a new thread that is being created.
1039 In this case we should ignore that SIGSTOP and resume the
1040 process. This is handled below by setting stop_expected = 1,
1041 and the fact that add_thread sets last_resume_kind ==
1042 resume_continue.
1043
1044 2) This is the first thread (the process thread), and we're attaching
1045 to it via attach_inferior.
1046 In this case we want the process thread to stop.
1047 This is handled by having linux_attach set last_resume_kind ==
1048 resume_stop after we return.
1049
1050 If the pid we are attaching to is also the tgid, we attach to and
1051 stop all the existing threads. Otherwise, we attach to pid and
1052 ignore any other threads in the same group as this pid.
1053
1054 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1055 existing threads.
1056 In this case we want the thread to stop.
1057 FIXME: This case is currently not properly handled.
1058 We should wait for the SIGSTOP but don't. Things work apparently
1059 because enough time passes between when we ptrace (ATTACH) and when
1060 gdb makes the next ptrace call on the thread.
1061
1062 On the other hand, if we are currently trying to stop all threads, we
1063 should treat the new thread as if we had sent it a SIGSTOP. This works
1064 because we are guaranteed that the add_lwp call above added us to the
1065 end of the list, and so the new thread has not yet reached
1066 wait_for_sigstop (but will). */
1067 new_lwp->stop_expected = 1;
1068
1069 return 0;
1070 }
1071
1072 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1073 already attached. Returns true if a new LWP is found, false
1074 otherwise. */
1075
1076 static int
1077 attach_proc_task_lwp_callback (ptid_t ptid)
1078 {
1079 /* Is this a new thread? */
1080 if (find_thread_ptid (ptid) == NULL)
1081 {
1082 int lwpid = ptid_get_lwp (ptid);
1083 int err;
1084
1085 if (debug_threads)
1086 debug_printf ("Found new lwp %d\n", lwpid);
1087
1088 err = linux_attach_lwp (ptid);
1089
1090 /* Be quiet if we simply raced with the thread exiting. EPERM
1091 is returned if the thread's task still exists, and is marked
1092 as exited or zombie, as well as other conditions, so in that
1093 case, confirm the status in /proc/PID/status. */
1094 if (err == ESRCH
1095 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1096 {
1097 if (debug_threads)
1098 {
1099 debug_printf ("Cannot attach to lwp %d: "
1100 "thread is gone (%d: %s)\n",
1101 lwpid, err, strerror (err));
1102 }
1103 }
1104 else if (err != 0)
1105 {
1106 warning (_("Cannot attach to lwp %d: %s"),
1107 lwpid,
1108 linux_ptrace_attach_fail_reason_string (ptid, err));
1109 }
1110
1111 return 1;
1112 }
1113 return 0;
1114 }
1115
1116 static void async_file_mark (void);
1117
1118 /* Attach to PID. If PID is the tgid, attach to it and all
1119 of its threads. */
1120
1121 static int
1122 linux_attach (unsigned long pid)
1123 {
1124 struct process_info *proc;
1125 struct thread_info *initial_thread;
1126 ptid_t ptid = ptid_build (pid, pid, 0);
1127 int err;
1128
1129 /* Attach to PID. We will check for other threads
1130 soon. */
1131 err = linux_attach_lwp (ptid);
1132 if (err != 0)
1133 error ("Cannot attach to process %ld: %s",
1134 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1135
1136 proc = linux_add_process (pid, 1);
1137
1138 /* Don't ignore the initial SIGSTOP if we just attached to this
1139 process. It will be collected by wait shortly. */
1140 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1141 initial_thread->last_resume_kind = resume_stop;
1142
1143 /* We must attach to every LWP. If /proc is mounted, use that to
1144 find them now. On the one hand, the inferior may be using raw
1145 clone instead of using pthreads. On the other hand, even if it
1146 is using pthreads, GDB may not be connected yet (thread_db needs
1147 to do symbol lookups, through qSymbol). Also, thread_db walks
1148 structures in the inferior's address space to find the list of
1149 threads/LWPs, and those structures may well be corrupted. Note
1150 that once thread_db is loaded, we'll still use it to list threads
1151 and associate pthread info with each LWP. */
1152 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1153
1154 /* GDB will shortly read the xml target description for this
1155 process, to figure out the process' architecture. But the target
1156 description is only filled in when the first process/thread in
1157 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1158 that now, otherwise, if GDB is fast enough, it could read the
1159 target description _before_ that initial stop. */
1160 if (non_stop)
1161 {
1162 struct lwp_info *lwp;
1163 int wstat, lwpid;
1164 ptid_t pid_ptid = pid_to_ptid (pid);
1165
1166 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1167 &wstat, __WALL);
1168 gdb_assert (lwpid > 0);
1169
1170 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1171
1172 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1173 {
1174 lwp->status_pending_p = 1;
1175 lwp->status_pending = wstat;
1176 }
1177
1178 initial_thread->last_resume_kind = resume_continue;
1179
1180 async_file_mark ();
1181
1182 gdb_assert (proc->tdesc != NULL);
1183 }
1184
1185 return 0;
1186 }
1187
1188 struct counter
1189 {
1190 int pid;
1191 int count;
1192 };
1193
1194 static int
1195 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1196 {
1197 struct counter *counter = (struct counter *) args;
1198
1199 if (ptid_get_pid (entry->id) == counter->pid)
1200 {
1201 if (++counter->count > 1)
1202 return 1;
1203 }
1204
1205 return 0;
1206 }
1207
1208 static int
1209 last_thread_of_process_p (int pid)
1210 {
1211 struct counter counter = { pid , 0 };
1212
1213 return (find_inferior (&all_threads,
1214 second_thread_of_pid_p, &counter) == NULL);
1215 }
1216
1217 /* Kill LWP. */
1218
1219 static void
1220 linux_kill_one_lwp (struct lwp_info *lwp)
1221 {
1222 struct thread_info *thr = get_lwp_thread (lwp);
1223 int pid = lwpid_of (thr);
1224
1225 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1226 there is no signal context, and ptrace(PTRACE_KILL) (or
1227 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1228 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1229 alternative is to kill with SIGKILL. We only need one SIGKILL
1230 per process, not one for each thread. But since we still support
1231 support debugging programs using raw clone without CLONE_THREAD,
1232 we send one for each thread. For years, we used PTRACE_KILL
1233 only, so we're being a bit paranoid about some old kernels where
1234 PTRACE_KILL might work better (dubious if there are any such, but
1235 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1236 second, and so we're fine everywhere. */
1237
1238 errno = 0;
1239 kill_lwp (pid, SIGKILL);
1240 if (debug_threads)
1241 {
1242 int save_errno = errno;
1243
1244 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1245 target_pid_to_str (ptid_of (thr)),
1246 save_errno ? strerror (save_errno) : "OK");
1247 }
1248
1249 errno = 0;
1250 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1251 if (debug_threads)
1252 {
1253 int save_errno = errno;
1254
1255 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1256 target_pid_to_str (ptid_of (thr)),
1257 save_errno ? strerror (save_errno) : "OK");
1258 }
1259 }
1260
1261 /* Kill LWP and wait for it to die. */
1262
1263 static void
1264 kill_wait_lwp (struct lwp_info *lwp)
1265 {
1266 struct thread_info *thr = get_lwp_thread (lwp);
1267 int pid = ptid_get_pid (ptid_of (thr));
1268 int lwpid = ptid_get_lwp (ptid_of (thr));
1269 int wstat;
1270 int res;
1271
1272 if (debug_threads)
1273 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1274
1275 do
1276 {
1277 linux_kill_one_lwp (lwp);
1278
1279 /* Make sure it died. Notes:
1280
1281 - The loop is most likely unnecessary.
1282
1283 - We don't use linux_wait_for_event as that could delete lwps
1284 while we're iterating over them. We're not interested in
1285 any pending status at this point, only in making sure all
1286 wait status on the kernel side are collected until the
1287 process is reaped.
1288
1289 - We don't use __WALL here as the __WALL emulation relies on
1290 SIGCHLD, and killing a stopped process doesn't generate
1291 one, nor an exit status.
1292 */
1293 res = my_waitpid (lwpid, &wstat, 0);
1294 if (res == -1 && errno == ECHILD)
1295 res = my_waitpid (lwpid, &wstat, __WCLONE);
1296 } while (res > 0 && WIFSTOPPED (wstat));
1297
1298 /* Even if it was stopped, the child may have already disappeared.
1299 E.g., if it was killed by SIGKILL. */
1300 if (res < 0 && errno != ECHILD)
1301 perror_with_name ("kill_wait_lwp");
1302 }
1303
1304 /* Callback for `find_inferior'. Kills an lwp of a given process,
1305 except the leader. */
1306
1307 static int
1308 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1309 {
1310 struct thread_info *thread = (struct thread_info *) entry;
1311 struct lwp_info *lwp = get_thread_lwp (thread);
1312 int pid = * (int *) args;
1313
1314 if (ptid_get_pid (entry->id) != pid)
1315 return 0;
1316
1317 /* We avoid killing the first thread here, because of a Linux kernel (at
1318 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1319 the children get a chance to be reaped, it will remain a zombie
1320 forever. */
1321
1322 if (lwpid_of (thread) == pid)
1323 {
1324 if (debug_threads)
1325 debug_printf ("lkop: is last of process %s\n",
1326 target_pid_to_str (entry->id));
1327 return 0;
1328 }
1329
1330 kill_wait_lwp (lwp);
1331 return 0;
1332 }
1333
1334 static int
1335 linux_kill (int pid)
1336 {
1337 struct process_info *process;
1338 struct lwp_info *lwp;
1339
1340 process = find_process_pid (pid);
1341 if (process == NULL)
1342 return -1;
1343
1344 /* If we're killing a running inferior, make sure it is stopped
1345 first, as PTRACE_KILL will not work otherwise. */
1346 stop_all_lwps (0, NULL);
1347
1348 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1349
1350 /* See the comment in linux_kill_one_lwp. We did not kill the first
1351 thread in the list, so do so now. */
1352 lwp = find_lwp_pid (pid_to_ptid (pid));
1353
1354 if (lwp == NULL)
1355 {
1356 if (debug_threads)
1357 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1358 pid);
1359 }
1360 else
1361 kill_wait_lwp (lwp);
1362
1363 the_target->mourn (process);
1364
1365 /* Since we presently can only stop all lwps of all processes, we
1366 need to unstop lwps of other processes. */
1367 unstop_all_lwps (0, NULL);
1368 return 0;
1369 }
1370
1371 /* Get pending signal of THREAD, for detaching purposes. This is the
1372 signal the thread last stopped for, which we need to deliver to the
1373 thread when detaching, otherwise, it'd be suppressed/lost. */
1374
1375 static int
1376 get_detach_signal (struct thread_info *thread)
1377 {
1378 enum gdb_signal signo = GDB_SIGNAL_0;
1379 int status;
1380 struct lwp_info *lp = get_thread_lwp (thread);
1381
1382 if (lp->status_pending_p)
1383 status = lp->status_pending;
1384 else
1385 {
1386 /* If the thread had been suspended by gdbserver, and it stopped
1387 cleanly, then it'll have stopped with SIGSTOP. But we don't
1388 want to deliver that SIGSTOP. */
1389 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1390 || thread->last_status.value.sig == GDB_SIGNAL_0)
1391 return 0;
1392
1393 /* Otherwise, we may need to deliver the signal we
1394 intercepted. */
1395 status = lp->last_status;
1396 }
1397
1398 if (!WIFSTOPPED (status))
1399 {
1400 if (debug_threads)
1401 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1402 target_pid_to_str (ptid_of (thread)));
1403 return 0;
1404 }
1405
1406 /* Extended wait statuses aren't real SIGTRAPs. */
1407 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1408 {
1409 if (debug_threads)
1410 debug_printf ("GPS: lwp %s had stopped with extended "
1411 "status: no pending signal\n",
1412 target_pid_to_str (ptid_of (thread)));
1413 return 0;
1414 }
1415
1416 signo = gdb_signal_from_host (WSTOPSIG (status));
1417
1418 if (program_signals_p && !program_signals[signo])
1419 {
1420 if (debug_threads)
1421 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1422 target_pid_to_str (ptid_of (thread)),
1423 gdb_signal_to_string (signo));
1424 return 0;
1425 }
1426 else if (!program_signals_p
1427 /* If we have no way to know which signals GDB does not
1428 want to have passed to the program, assume
1429 SIGTRAP/SIGINT, which is GDB's default. */
1430 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1431 {
1432 if (debug_threads)
1433 debug_printf ("GPS: lwp %s had signal %s, "
1434 "but we don't know if we should pass it. "
1435 "Default to not.\n",
1436 target_pid_to_str (ptid_of (thread)),
1437 gdb_signal_to_string (signo));
1438 return 0;
1439 }
1440 else
1441 {
1442 if (debug_threads)
1443 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1444 target_pid_to_str (ptid_of (thread)),
1445 gdb_signal_to_string (signo));
1446
1447 return WSTOPSIG (status);
1448 }
1449 }
1450
1451 static int
1452 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1453 {
1454 struct thread_info *thread = (struct thread_info *) entry;
1455 struct lwp_info *lwp = get_thread_lwp (thread);
1456 int pid = * (int *) args;
1457 int sig;
1458
1459 if (ptid_get_pid (entry->id) != pid)
1460 return 0;
1461
1462 /* If there is a pending SIGSTOP, get rid of it. */
1463 if (lwp->stop_expected)
1464 {
1465 if (debug_threads)
1466 debug_printf ("Sending SIGCONT to %s\n",
1467 target_pid_to_str (ptid_of (thread)));
1468
1469 kill_lwp (lwpid_of (thread), SIGCONT);
1470 lwp->stop_expected = 0;
1471 }
1472
1473 /* Flush any pending changes to the process's registers. */
1474 regcache_invalidate_thread (thread);
1475
1476 /* Pass on any pending signal for this thread. */
1477 sig = get_detach_signal (thread);
1478
1479 /* Finally, let it resume. */
1480 if (the_low_target.prepare_to_resume != NULL)
1481 the_low_target.prepare_to_resume (lwp);
1482 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1483 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1484 error (_("Can't detach %s: %s"),
1485 target_pid_to_str (ptid_of (thread)),
1486 strerror (errno));
1487
1488 delete_lwp (lwp);
1489 return 0;
1490 }
1491
1492 static int
1493 linux_detach (int pid)
1494 {
1495 struct process_info *process;
1496
1497 process = find_process_pid (pid);
1498 if (process == NULL)
1499 return -1;
1500
1501 /* As there's a step over already in progress, let it finish first,
1502 otherwise nesting a stabilize_threads operation on top gets real
1503 messy. */
1504 complete_ongoing_step_over ();
1505
1506 /* Stop all threads before detaching. First, ptrace requires that
1507 the thread is stopped to sucessfully detach. Second, thread_db
1508 may need to uninstall thread event breakpoints from memory, which
1509 only works with a stopped process anyway. */
1510 stop_all_lwps (0, NULL);
1511
1512 #ifdef USE_THREAD_DB
1513 thread_db_detach (process);
1514 #endif
1515
1516 /* Stabilize threads (move out of jump pads). */
1517 stabilize_threads ();
1518
1519 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1520
1521 the_target->mourn (process);
1522
1523 /* Since we presently can only stop all lwps of all processes, we
1524 need to unstop lwps of other processes. */
1525 unstop_all_lwps (0, NULL);
1526 return 0;
1527 }
1528
1529 /* Remove all LWPs that belong to process PROC from the lwp list. */
1530
1531 static int
1532 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1533 {
1534 struct thread_info *thread = (struct thread_info *) entry;
1535 struct lwp_info *lwp = get_thread_lwp (thread);
1536 struct process_info *process = (struct process_info *) proc;
1537
1538 if (pid_of (thread) == pid_of (process))
1539 delete_lwp (lwp);
1540
1541 return 0;
1542 }
1543
1544 static void
1545 linux_mourn (struct process_info *process)
1546 {
1547 struct process_info_private *priv;
1548
1549 #ifdef USE_THREAD_DB
1550 thread_db_mourn (process);
1551 #endif
1552
1553 find_inferior (&all_threads, delete_lwp_callback, process);
1554
1555 /* Freeing all private data. */
1556 priv = process->priv;
1557 free (priv->arch_private);
1558 free (priv);
1559 process->priv = NULL;
1560
1561 remove_process (process);
1562 }
1563
1564 static void
1565 linux_join (int pid)
1566 {
1567 int status, ret;
1568
1569 do {
1570 ret = my_waitpid (pid, &status, 0);
1571 if (WIFEXITED (status) || WIFSIGNALED (status))
1572 break;
1573 } while (ret != -1 || errno != ECHILD);
1574 }
1575
1576 /* Return nonzero if the given thread is still alive. */
1577 static int
1578 linux_thread_alive (ptid_t ptid)
1579 {
1580 struct lwp_info *lwp = find_lwp_pid (ptid);
1581
1582 /* We assume we always know if a thread exits. If a whole process
1583 exited but we still haven't been able to report it to GDB, we'll
1584 hold on to the last lwp of the dead process. */
1585 if (lwp != NULL)
1586 return !lwp_is_marked_dead (lwp);
1587 else
1588 return 0;
1589 }
1590
1591 /* Return 1 if this lwp still has an interesting status pending. If
1592 not (e.g., it had stopped for a breakpoint that is gone), return
1593 false. */
1594
1595 static int
1596 thread_still_has_status_pending_p (struct thread_info *thread)
1597 {
1598 struct lwp_info *lp = get_thread_lwp (thread);
1599
1600 if (!lp->status_pending_p)
1601 return 0;
1602
1603 if (thread->last_resume_kind != resume_stop
1604 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1605 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1606 {
1607 struct thread_info *saved_thread;
1608 CORE_ADDR pc;
1609 int discard = 0;
1610
1611 gdb_assert (lp->last_status != 0);
1612
1613 pc = get_pc (lp);
1614
1615 saved_thread = current_thread;
1616 current_thread = thread;
1617
1618 if (pc != lp->stop_pc)
1619 {
1620 if (debug_threads)
1621 debug_printf ("PC of %ld changed\n",
1622 lwpid_of (thread));
1623 discard = 1;
1624 }
1625
1626 #if !USE_SIGTRAP_SIGINFO
1627 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1628 && !(*the_low_target.breakpoint_at) (pc))
1629 {
1630 if (debug_threads)
1631 debug_printf ("previous SW breakpoint of %ld gone\n",
1632 lwpid_of (thread));
1633 discard = 1;
1634 }
1635 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1636 && !hardware_breakpoint_inserted_here (pc))
1637 {
1638 if (debug_threads)
1639 debug_printf ("previous HW breakpoint of %ld gone\n",
1640 lwpid_of (thread));
1641 discard = 1;
1642 }
1643 #endif
1644
1645 current_thread = saved_thread;
1646
1647 if (discard)
1648 {
1649 if (debug_threads)
1650 debug_printf ("discarding pending breakpoint status\n");
1651 lp->status_pending_p = 0;
1652 return 0;
1653 }
1654 }
1655
1656 return 1;
1657 }
1658
1659 /* Returns true if LWP is resumed from the client's perspective. */
1660
1661 static int
1662 lwp_resumed (struct lwp_info *lwp)
1663 {
1664 struct thread_info *thread = get_lwp_thread (lwp);
1665
1666 if (thread->last_resume_kind != resume_stop)
1667 return 1;
1668
1669 /* Did gdb send us a `vCont;t', but we haven't reported the
1670 corresponding stop to gdb yet? If so, the thread is still
1671 resumed/running from gdb's perspective. */
1672 if (thread->last_resume_kind == resume_stop
1673 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1674 return 1;
1675
1676 return 0;
1677 }
1678
1679 /* Return 1 if this lwp has an interesting status pending. */
1680 static int
1681 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1682 {
1683 struct thread_info *thread = (struct thread_info *) entry;
1684 struct lwp_info *lp = get_thread_lwp (thread);
1685 ptid_t ptid = * (ptid_t *) arg;
1686
1687 /* Check if we're only interested in events from a specific process
1688 or a specific LWP. */
1689 if (!ptid_match (ptid_of (thread), ptid))
1690 return 0;
1691
1692 if (!lwp_resumed (lp))
1693 return 0;
1694
1695 if (lp->status_pending_p
1696 && !thread_still_has_status_pending_p (thread))
1697 {
1698 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1699 return 0;
1700 }
1701
1702 return lp->status_pending_p;
1703 }
1704
1705 static int
1706 same_lwp (struct inferior_list_entry *entry, void *data)
1707 {
1708 ptid_t ptid = *(ptid_t *) data;
1709 int lwp;
1710
1711 if (ptid_get_lwp (ptid) != 0)
1712 lwp = ptid_get_lwp (ptid);
1713 else
1714 lwp = ptid_get_pid (ptid);
1715
1716 if (ptid_get_lwp (entry->id) == lwp)
1717 return 1;
1718
1719 return 0;
1720 }
1721
1722 struct lwp_info *
1723 find_lwp_pid (ptid_t ptid)
1724 {
1725 struct inferior_list_entry *thread
1726 = find_inferior (&all_threads, same_lwp, &ptid);
1727
1728 if (thread == NULL)
1729 return NULL;
1730
1731 return get_thread_lwp ((struct thread_info *) thread);
1732 }
1733
1734 /* Return the number of known LWPs in the tgid given by PID. */
1735
1736 static int
1737 num_lwps (int pid)
1738 {
1739 struct inferior_list_entry *inf, *tmp;
1740 int count = 0;
1741
1742 ALL_INFERIORS (&all_threads, inf, tmp)
1743 {
1744 if (ptid_get_pid (inf->id) == pid)
1745 count++;
1746 }
1747
1748 return count;
1749 }
1750
1751 /* The arguments passed to iterate_over_lwps. */
1752
1753 struct iterate_over_lwps_args
1754 {
1755 /* The FILTER argument passed to iterate_over_lwps. */
1756 ptid_t filter;
1757
1758 /* The CALLBACK argument passed to iterate_over_lwps. */
1759 iterate_over_lwps_ftype *callback;
1760
1761 /* The DATA argument passed to iterate_over_lwps. */
1762 void *data;
1763 };
1764
1765 /* Callback for find_inferior used by iterate_over_lwps to filter
1766 calls to the callback supplied to that function. Returning a
1767 nonzero value causes find_inferiors to stop iterating and return
1768 the current inferior_list_entry. Returning zero indicates that
1769 find_inferiors should continue iterating. */
1770
1771 static int
1772 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1773 {
1774 struct iterate_over_lwps_args *args
1775 = (struct iterate_over_lwps_args *) args_p;
1776
1777 if (ptid_match (entry->id, args->filter))
1778 {
1779 struct thread_info *thr = (struct thread_info *) entry;
1780 struct lwp_info *lwp = get_thread_lwp (thr);
1781
1782 return (*args->callback) (lwp, args->data);
1783 }
1784
1785 return 0;
1786 }
1787
1788 /* See nat/linux-nat.h. */
1789
1790 struct lwp_info *
1791 iterate_over_lwps (ptid_t filter,
1792 iterate_over_lwps_ftype callback,
1793 void *data)
1794 {
1795 struct iterate_over_lwps_args args = {filter, callback, data};
1796 struct inferior_list_entry *entry;
1797
1798 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1799 if (entry == NULL)
1800 return NULL;
1801
1802 return get_thread_lwp ((struct thread_info *) entry);
1803 }
1804
1805 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1806 their exits until all other threads in the group have exited. */
1807
1808 static void
1809 check_zombie_leaders (void)
1810 {
1811 struct process_info *proc, *tmp;
1812
1813 ALL_PROCESSES (proc, tmp)
1814 {
1815 pid_t leader_pid = pid_of (proc);
1816 struct lwp_info *leader_lp;
1817
1818 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1819
1820 if (debug_threads)
1821 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1822 "num_lwps=%d, zombie=%d\n",
1823 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1824 linux_proc_pid_is_zombie (leader_pid));
1825
1826 if (leader_lp != NULL && !leader_lp->stopped
1827 /* Check if there are other threads in the group, as we may
1828 have raced with the inferior simply exiting. */
1829 && !last_thread_of_process_p (leader_pid)
1830 && linux_proc_pid_is_zombie (leader_pid))
1831 {
1832 /* A leader zombie can mean one of two things:
1833
1834 - It exited, and there's an exit status pending
1835 available, or only the leader exited (not the whole
1836 program). In the latter case, we can't waitpid the
1837 leader's exit status until all other threads are gone.
1838
1839 - There are 3 or more threads in the group, and a thread
1840 other than the leader exec'd. On an exec, the Linux
1841 kernel destroys all other threads (except the execing
1842 one) in the thread group, and resets the execing thread's
1843 tid to the tgid. No exit notification is sent for the
1844 execing thread -- from the ptracer's perspective, it
1845 appears as though the execing thread just vanishes.
1846 Until we reap all other threads except the leader and the
1847 execing thread, the leader will be zombie, and the
1848 execing thread will be in `D (disc sleep)'. As soon as
1849 all other threads are reaped, the execing thread changes
1850 it's tid to the tgid, and the previous (zombie) leader
1851 vanishes, giving place to the "new" leader. We could try
1852 distinguishing the exit and exec cases, by waiting once
1853 more, and seeing if something comes out, but it doesn't
1854 sound useful. The previous leader _does_ go away, and
1855 we'll re-add the new one once we see the exec event
1856 (which is just the same as what would happen if the
1857 previous leader did exit voluntarily before some other
1858 thread execs). */
1859
1860 if (debug_threads)
1861 fprintf (stderr,
1862 "CZL: Thread group leader %d zombie "
1863 "(it exited, or another thread execd).\n",
1864 leader_pid);
1865
1866 delete_lwp (leader_lp);
1867 }
1868 }
1869 }
1870
1871 /* Callback for `find_inferior'. Returns the first LWP that is not
1872 stopped. ARG is a PTID filter. */
1873
1874 static int
1875 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1876 {
1877 struct thread_info *thr = (struct thread_info *) entry;
1878 struct lwp_info *lwp;
1879 ptid_t filter = *(ptid_t *) arg;
1880
1881 if (!ptid_match (ptid_of (thr), filter))
1882 return 0;
1883
1884 lwp = get_thread_lwp (thr);
1885 if (!lwp->stopped)
1886 return 1;
1887
1888 return 0;
1889 }
1890
1891 /* Increment LWP's suspend count. */
1892
1893 static void
1894 lwp_suspended_inc (struct lwp_info *lwp)
1895 {
1896 lwp->suspended++;
1897
1898 if (debug_threads && lwp->suspended > 4)
1899 {
1900 struct thread_info *thread = get_lwp_thread (lwp);
1901
1902 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1903 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1904 }
1905 }
1906
1907 /* Decrement LWP's suspend count. */
1908
1909 static void
1910 lwp_suspended_decr (struct lwp_info *lwp)
1911 {
1912 lwp->suspended--;
1913
1914 if (lwp->suspended < 0)
1915 {
1916 struct thread_info *thread = get_lwp_thread (lwp);
1917
1918 internal_error (__FILE__, __LINE__,
1919 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1920 lwp->suspended);
1921 }
1922 }
1923
1924 /* This function should only be called if the LWP got a SIGTRAP.
1925
1926 Handle any tracepoint steps or hits. Return true if a tracepoint
1927 event was handled, 0 otherwise. */
1928
1929 static int
1930 handle_tracepoints (struct lwp_info *lwp)
1931 {
1932 struct thread_info *tinfo = get_lwp_thread (lwp);
1933 int tpoint_related_event = 0;
1934
1935 gdb_assert (lwp->suspended == 0);
1936
1937 /* If this tracepoint hit causes a tracing stop, we'll immediately
1938 uninsert tracepoints. To do this, we temporarily pause all
1939 threads, unpatch away, and then unpause threads. We need to make
1940 sure the unpausing doesn't resume LWP too. */
1941 lwp_suspended_inc (lwp);
1942
1943 /* And we need to be sure that any all-threads-stopping doesn't try
1944 to move threads out of the jump pads, as it could deadlock the
1945 inferior (LWP could be in the jump pad, maybe even holding the
1946 lock.) */
1947
1948 /* Do any necessary step collect actions. */
1949 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1950
1951 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1952
1953 /* See if we just hit a tracepoint and do its main collect
1954 actions. */
1955 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1956
1957 lwp_suspended_decr (lwp);
1958
1959 gdb_assert (lwp->suspended == 0);
1960 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1961
1962 if (tpoint_related_event)
1963 {
1964 if (debug_threads)
1965 debug_printf ("got a tracepoint event\n");
1966 return 1;
1967 }
1968
1969 return 0;
1970 }
1971
1972 /* Convenience wrapper. Returns true if LWP is presently collecting a
1973 fast tracepoint. */
1974
1975 static int
1976 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1977 struct fast_tpoint_collect_status *status)
1978 {
1979 CORE_ADDR thread_area;
1980 struct thread_info *thread = get_lwp_thread (lwp);
1981
1982 if (the_low_target.get_thread_area == NULL)
1983 return 0;
1984
1985 /* Get the thread area address. This is used to recognize which
1986 thread is which when tracing with the in-process agent library.
1987 We don't read anything from the address, and treat it as opaque;
1988 it's the address itself that we assume is unique per-thread. */
1989 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1990 return 0;
1991
1992 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1993 }
1994
1995 /* The reason we resume in the caller, is because we want to be able
1996 to pass lwp->status_pending as WSTAT, and we need to clear
1997 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1998 refuses to resume. */
1999
2000 static int
2001 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2002 {
2003 struct thread_info *saved_thread;
2004
2005 saved_thread = current_thread;
2006 current_thread = get_lwp_thread (lwp);
2007
2008 if ((wstat == NULL
2009 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2010 && supports_fast_tracepoints ()
2011 && agent_loaded_p ())
2012 {
2013 struct fast_tpoint_collect_status status;
2014 int r;
2015
2016 if (debug_threads)
2017 debug_printf ("Checking whether LWP %ld needs to move out of the "
2018 "jump pad.\n",
2019 lwpid_of (current_thread));
2020
2021 r = linux_fast_tracepoint_collecting (lwp, &status);
2022
2023 if (wstat == NULL
2024 || (WSTOPSIG (*wstat) != SIGILL
2025 && WSTOPSIG (*wstat) != SIGFPE
2026 && WSTOPSIG (*wstat) != SIGSEGV
2027 && WSTOPSIG (*wstat) != SIGBUS))
2028 {
2029 lwp->collecting_fast_tracepoint = r;
2030
2031 if (r != 0)
2032 {
2033 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
2034 {
2035 /* Haven't executed the original instruction yet.
2036 Set breakpoint there, and wait till it's hit,
2037 then single-step until exiting the jump pad. */
2038 lwp->exit_jump_pad_bkpt
2039 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2040 }
2041
2042 if (debug_threads)
2043 debug_printf ("Checking whether LWP %ld needs to move out of "
2044 "the jump pad...it does\n",
2045 lwpid_of (current_thread));
2046 current_thread = saved_thread;
2047
2048 return 1;
2049 }
2050 }
2051 else
2052 {
2053 /* If we get a synchronous signal while collecting, *and*
2054 while executing the (relocated) original instruction,
2055 reset the PC to point at the tpoint address, before
2056 reporting to GDB. Otherwise, it's an IPA lib bug: just
2057 report the signal to GDB, and pray for the best. */
2058
2059 lwp->collecting_fast_tracepoint = 0;
2060
2061 if (r != 0
2062 && (status.adjusted_insn_addr <= lwp->stop_pc
2063 && lwp->stop_pc < status.adjusted_insn_addr_end))
2064 {
2065 siginfo_t info;
2066 struct regcache *regcache;
2067
2068 /* The si_addr on a few signals references the address
2069 of the faulting instruction. Adjust that as
2070 well. */
2071 if ((WSTOPSIG (*wstat) == SIGILL
2072 || WSTOPSIG (*wstat) == SIGFPE
2073 || WSTOPSIG (*wstat) == SIGBUS
2074 || WSTOPSIG (*wstat) == SIGSEGV)
2075 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2076 (PTRACE_TYPE_ARG3) 0, &info) == 0
2077 /* Final check just to make sure we don't clobber
2078 the siginfo of non-kernel-sent signals. */
2079 && (uintptr_t) info.si_addr == lwp->stop_pc)
2080 {
2081 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2082 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2083 (PTRACE_TYPE_ARG3) 0, &info);
2084 }
2085
2086 regcache = get_thread_regcache (current_thread, 1);
2087 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2088 lwp->stop_pc = status.tpoint_addr;
2089
2090 /* Cancel any fast tracepoint lock this thread was
2091 holding. */
2092 force_unlock_trace_buffer ();
2093 }
2094
2095 if (lwp->exit_jump_pad_bkpt != NULL)
2096 {
2097 if (debug_threads)
2098 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2099 "stopping all threads momentarily.\n");
2100
2101 stop_all_lwps (1, lwp);
2102
2103 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2104 lwp->exit_jump_pad_bkpt = NULL;
2105
2106 unstop_all_lwps (1, lwp);
2107
2108 gdb_assert (lwp->suspended >= 0);
2109 }
2110 }
2111 }
2112
2113 if (debug_threads)
2114 debug_printf ("Checking whether LWP %ld needs to move out of the "
2115 "jump pad...no\n",
2116 lwpid_of (current_thread));
2117
2118 current_thread = saved_thread;
2119 return 0;
2120 }
2121
2122 /* Enqueue one signal in the "signals to report later when out of the
2123 jump pad" list. */
2124
2125 static void
2126 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2127 {
2128 struct pending_signals *p_sig;
2129 struct thread_info *thread = get_lwp_thread (lwp);
2130
2131 if (debug_threads)
2132 debug_printf ("Deferring signal %d for LWP %ld.\n",
2133 WSTOPSIG (*wstat), lwpid_of (thread));
2134
2135 if (debug_threads)
2136 {
2137 struct pending_signals *sig;
2138
2139 for (sig = lwp->pending_signals_to_report;
2140 sig != NULL;
2141 sig = sig->prev)
2142 debug_printf (" Already queued %d\n",
2143 sig->signal);
2144
2145 debug_printf (" (no more currently queued signals)\n");
2146 }
2147
2148 /* Don't enqueue non-RT signals if they are already in the deferred
2149 queue. (SIGSTOP being the easiest signal to see ending up here
2150 twice) */
2151 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2152 {
2153 struct pending_signals *sig;
2154
2155 for (sig = lwp->pending_signals_to_report;
2156 sig != NULL;
2157 sig = sig->prev)
2158 {
2159 if (sig->signal == WSTOPSIG (*wstat))
2160 {
2161 if (debug_threads)
2162 debug_printf ("Not requeuing already queued non-RT signal %d"
2163 " for LWP %ld\n",
2164 sig->signal,
2165 lwpid_of (thread));
2166 return;
2167 }
2168 }
2169 }
2170
2171 p_sig = XCNEW (struct pending_signals);
2172 p_sig->prev = lwp->pending_signals_to_report;
2173 p_sig->signal = WSTOPSIG (*wstat);
2174
2175 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2176 &p_sig->info);
2177
2178 lwp->pending_signals_to_report = p_sig;
2179 }
2180
2181 /* Dequeue one signal from the "signals to report later when out of
2182 the jump pad" list. */
2183
2184 static int
2185 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2186 {
2187 struct thread_info *thread = get_lwp_thread (lwp);
2188
2189 if (lwp->pending_signals_to_report != NULL)
2190 {
2191 struct pending_signals **p_sig;
2192
2193 p_sig = &lwp->pending_signals_to_report;
2194 while ((*p_sig)->prev != NULL)
2195 p_sig = &(*p_sig)->prev;
2196
2197 *wstat = W_STOPCODE ((*p_sig)->signal);
2198 if ((*p_sig)->info.si_signo != 0)
2199 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2200 &(*p_sig)->info);
2201 free (*p_sig);
2202 *p_sig = NULL;
2203
2204 if (debug_threads)
2205 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2206 WSTOPSIG (*wstat), lwpid_of (thread));
2207
2208 if (debug_threads)
2209 {
2210 struct pending_signals *sig;
2211
2212 for (sig = lwp->pending_signals_to_report;
2213 sig != NULL;
2214 sig = sig->prev)
2215 debug_printf (" Still queued %d\n",
2216 sig->signal);
2217
2218 debug_printf (" (no more queued signals)\n");
2219 }
2220
2221 return 1;
2222 }
2223
2224 return 0;
2225 }
2226
2227 /* Fetch the possibly triggered data watchpoint info and store it in
2228 CHILD.
2229
2230 On some archs, like x86, that use debug registers to set
2231 watchpoints, it's possible that the way to know which watched
2232 address trapped, is to check the register that is used to select
2233 which address to watch. Problem is, between setting the watchpoint
2234 and reading back which data address trapped, the user may change
2235 the set of watchpoints, and, as a consequence, GDB changes the
2236 debug registers in the inferior. To avoid reading back a stale
2237 stopped-data-address when that happens, we cache in LP the fact
2238 that a watchpoint trapped, and the corresponding data address, as
2239 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2240 registers meanwhile, we have the cached data we can rely on. */
2241
2242 static int
2243 check_stopped_by_watchpoint (struct lwp_info *child)
2244 {
2245 if (the_low_target.stopped_by_watchpoint != NULL)
2246 {
2247 struct thread_info *saved_thread;
2248
2249 saved_thread = current_thread;
2250 current_thread = get_lwp_thread (child);
2251
2252 if (the_low_target.stopped_by_watchpoint ())
2253 {
2254 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2255
2256 if (the_low_target.stopped_data_address != NULL)
2257 child->stopped_data_address
2258 = the_low_target.stopped_data_address ();
2259 else
2260 child->stopped_data_address = 0;
2261 }
2262
2263 current_thread = saved_thread;
2264 }
2265
2266 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2267 }
2268
2269 /* Return the ptrace options that we want to try to enable. */
2270
2271 static int
2272 linux_low_ptrace_options (int attached)
2273 {
2274 int options = 0;
2275
2276 if (!attached)
2277 options |= PTRACE_O_EXITKILL;
2278
2279 if (report_fork_events)
2280 options |= PTRACE_O_TRACEFORK;
2281
2282 if (report_vfork_events)
2283 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2284
2285 if (report_exec_events)
2286 options |= PTRACE_O_TRACEEXEC;
2287
2288 options |= PTRACE_O_TRACESYSGOOD;
2289
2290 return options;
2291 }
2292
2293 /* Do low-level handling of the event, and check if we should go on
2294 and pass it to caller code. Return the affected lwp if we are, or
2295 NULL otherwise. */
2296
2297 static struct lwp_info *
2298 linux_low_filter_event (int lwpid, int wstat)
2299 {
2300 struct lwp_info *child;
2301 struct thread_info *thread;
2302 int have_stop_pc = 0;
2303
2304 child = find_lwp_pid (pid_to_ptid (lwpid));
2305
2306 /* Check for stop events reported by a process we didn't already
2307 know about - anything not already in our LWP list.
2308
2309 If we're expecting to receive stopped processes after
2310 fork, vfork, and clone events, then we'll just add the
2311 new one to our list and go back to waiting for the event
2312 to be reported - the stopped process might be returned
2313 from waitpid before or after the event is.
2314
2315 But note the case of a non-leader thread exec'ing after the
2316 leader having exited, and gone from our lists (because
2317 check_zombie_leaders deleted it). The non-leader thread
2318 changes its tid to the tgid. */
2319
2320 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2321 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2322 {
2323 ptid_t child_ptid;
2324
2325 /* A multi-thread exec after we had seen the leader exiting. */
2326 if (debug_threads)
2327 {
2328 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2329 "after exec.\n", lwpid);
2330 }
2331
2332 child_ptid = ptid_build (lwpid, lwpid, 0);
2333 child = add_lwp (child_ptid);
2334 child->stopped = 1;
2335 current_thread = child->thread;
2336 }
2337
2338 /* If we didn't find a process, one of two things presumably happened:
2339 - A process we started and then detached from has exited. Ignore it.
2340 - A process we are controlling has forked and the new child's stop
2341 was reported to us by the kernel. Save its PID. */
2342 if (child == NULL && WIFSTOPPED (wstat))
2343 {
2344 add_to_pid_list (&stopped_pids, lwpid, wstat);
2345 return NULL;
2346 }
2347 else if (child == NULL)
2348 return NULL;
2349
2350 thread = get_lwp_thread (child);
2351
2352 child->stopped = 1;
2353
2354 child->last_status = wstat;
2355
2356 /* Check if the thread has exited. */
2357 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2358 {
2359 if (debug_threads)
2360 debug_printf ("LLFE: %d exited.\n", lwpid);
2361
2362 if (finish_step_over (child))
2363 {
2364 /* Unsuspend all other LWPs, and set them back running again. */
2365 unsuspend_all_lwps (child);
2366 }
2367
2368 /* If there is at least one more LWP, then the exit signal was
2369 not the end of the debugged application and should be
2370 ignored, unless GDB wants to hear about thread exits. */
2371 if (report_thread_events
2372 || last_thread_of_process_p (pid_of (thread)))
2373 {
2374 /* Since events are serialized to GDB core, and we can't
2375 report this one right now. Leave the status pending for
2376 the next time we're able to report it. */
2377 mark_lwp_dead (child, wstat);
2378 return child;
2379 }
2380 else
2381 {
2382 delete_lwp (child);
2383 return NULL;
2384 }
2385 }
2386
2387 gdb_assert (WIFSTOPPED (wstat));
2388
2389 if (WIFSTOPPED (wstat))
2390 {
2391 struct process_info *proc;
2392
2393 /* Architecture-specific setup after inferior is running. */
2394 proc = find_process_pid (pid_of (thread));
2395 if (proc->tdesc == NULL)
2396 {
2397 if (proc->attached)
2398 {
2399 /* This needs to happen after we have attached to the
2400 inferior and it is stopped for the first time, but
2401 before we access any inferior registers. */
2402 linux_arch_setup_thread (thread);
2403 }
2404 else
2405 {
2406 /* The process is started, but GDBserver will do
2407 architecture-specific setup after the program stops at
2408 the first instruction. */
2409 child->status_pending_p = 1;
2410 child->status_pending = wstat;
2411 return child;
2412 }
2413 }
2414 }
2415
2416 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2417 {
2418 struct process_info *proc = find_process_pid (pid_of (thread));
2419 int options = linux_low_ptrace_options (proc->attached);
2420
2421 linux_enable_event_reporting (lwpid, options);
2422 child->must_set_ptrace_flags = 0;
2423 }
2424
2425 /* Always update syscall_state, even if it will be filtered later. */
2426 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2427 {
2428 child->syscall_state
2429 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2430 ? TARGET_WAITKIND_SYSCALL_RETURN
2431 : TARGET_WAITKIND_SYSCALL_ENTRY);
2432 }
2433 else
2434 {
2435 /* Almost all other ptrace-stops are known to be outside of system
2436 calls, with further exceptions in handle_extended_wait. */
2437 child->syscall_state = TARGET_WAITKIND_IGNORE;
2438 }
2439
2440 /* Be careful to not overwrite stop_pc until save_stop_reason is
2441 called. */
2442 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2443 && linux_is_extended_waitstatus (wstat))
2444 {
2445 child->stop_pc = get_pc (child);
2446 if (handle_extended_wait (&child, wstat))
2447 {
2448 /* The event has been handled, so just return without
2449 reporting it. */
2450 return NULL;
2451 }
2452 }
2453
2454 if (linux_wstatus_maybe_breakpoint (wstat))
2455 {
2456 if (save_stop_reason (child))
2457 have_stop_pc = 1;
2458 }
2459
2460 if (!have_stop_pc)
2461 child->stop_pc = get_pc (child);
2462
2463 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2464 && child->stop_expected)
2465 {
2466 if (debug_threads)
2467 debug_printf ("Expected stop.\n");
2468 child->stop_expected = 0;
2469
2470 if (thread->last_resume_kind == resume_stop)
2471 {
2472 /* We want to report the stop to the core. Treat the
2473 SIGSTOP as a normal event. */
2474 if (debug_threads)
2475 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2476 target_pid_to_str (ptid_of (thread)));
2477 }
2478 else if (stopping_threads != NOT_STOPPING_THREADS)
2479 {
2480 /* Stopping threads. We don't want this SIGSTOP to end up
2481 pending. */
2482 if (debug_threads)
2483 debug_printf ("LLW: SIGSTOP caught for %s "
2484 "while stopping threads.\n",
2485 target_pid_to_str (ptid_of (thread)));
2486 return NULL;
2487 }
2488 else
2489 {
2490 /* This is a delayed SIGSTOP. Filter out the event. */
2491 if (debug_threads)
2492 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2493 child->stepping ? "step" : "continue",
2494 target_pid_to_str (ptid_of (thread)));
2495
2496 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2497 return NULL;
2498 }
2499 }
2500
2501 child->status_pending_p = 1;
2502 child->status_pending = wstat;
2503 return child;
2504 }
2505
2506 /* Return true if THREAD is doing hardware single step. */
2507
2508 static int
2509 maybe_hw_step (struct thread_info *thread)
2510 {
2511 if (can_hardware_single_step ())
2512 return 1;
2513 else
2514 {
2515 struct process_info *proc = get_thread_process (thread);
2516
2517 /* GDBserver must insert reinsert breakpoint for software
2518 single step. */
2519 gdb_assert (has_reinsert_breakpoints (proc));
2520 return 0;
2521 }
2522 }
2523
2524 /* Resume LWPs that are currently stopped without any pending status
2525 to report, but are resumed from the core's perspective. */
2526
2527 static void
2528 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2529 {
2530 struct thread_info *thread = (struct thread_info *) entry;
2531 struct lwp_info *lp = get_thread_lwp (thread);
2532
2533 if (lp->stopped
2534 && !lp->suspended
2535 && !lp->status_pending_p
2536 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2537 {
2538 int step = thread->last_resume_kind == resume_step;
2539
2540 if (debug_threads)
2541 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2542 target_pid_to_str (ptid_of (thread)),
2543 paddress (lp->stop_pc),
2544 step);
2545
2546 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2547 }
2548 }
2549
2550 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2551 match FILTER_PTID (leaving others pending). The PTIDs can be:
2552 minus_one_ptid, to specify any child; a pid PTID, specifying all
2553 lwps of a thread group; or a PTID representing a single lwp. Store
2554 the stop status through the status pointer WSTAT. OPTIONS is
2555 passed to the waitpid call. Return 0 if no event was found and
2556 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2557 was found. Return the PID of the stopped child otherwise. */
2558
2559 static int
2560 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2561 int *wstatp, int options)
2562 {
2563 struct thread_info *event_thread;
2564 struct lwp_info *event_child, *requested_child;
2565 sigset_t block_mask, prev_mask;
2566
2567 retry:
2568 /* N.B. event_thread points to the thread_info struct that contains
2569 event_child. Keep them in sync. */
2570 event_thread = NULL;
2571 event_child = NULL;
2572 requested_child = NULL;
2573
2574 /* Check for a lwp with a pending status. */
2575
2576 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2577 {
2578 event_thread = (struct thread_info *)
2579 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2580 if (event_thread != NULL)
2581 event_child = get_thread_lwp (event_thread);
2582 if (debug_threads && event_thread)
2583 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2584 }
2585 else if (!ptid_equal (filter_ptid, null_ptid))
2586 {
2587 requested_child = find_lwp_pid (filter_ptid);
2588
2589 if (stopping_threads == NOT_STOPPING_THREADS
2590 && requested_child->status_pending_p
2591 && requested_child->collecting_fast_tracepoint)
2592 {
2593 enqueue_one_deferred_signal (requested_child,
2594 &requested_child->status_pending);
2595 requested_child->status_pending_p = 0;
2596 requested_child->status_pending = 0;
2597 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2598 }
2599
2600 if (requested_child->suspended
2601 && requested_child->status_pending_p)
2602 {
2603 internal_error (__FILE__, __LINE__,
2604 "requesting an event out of a"
2605 " suspended child?");
2606 }
2607
2608 if (requested_child->status_pending_p)
2609 {
2610 event_child = requested_child;
2611 event_thread = get_lwp_thread (event_child);
2612 }
2613 }
2614
2615 if (event_child != NULL)
2616 {
2617 if (debug_threads)
2618 debug_printf ("Got an event from pending child %ld (%04x)\n",
2619 lwpid_of (event_thread), event_child->status_pending);
2620 *wstatp = event_child->status_pending;
2621 event_child->status_pending_p = 0;
2622 event_child->status_pending = 0;
2623 current_thread = event_thread;
2624 return lwpid_of (event_thread);
2625 }
2626
2627 /* But if we don't find a pending event, we'll have to wait.
2628
2629 We only enter this loop if no process has a pending wait status.
2630 Thus any action taken in response to a wait status inside this
2631 loop is responding as soon as we detect the status, not after any
2632 pending events. */
2633
2634 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2635 all signals while here. */
2636 sigfillset (&block_mask);
2637 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2638
2639 /* Always pull all events out of the kernel. We'll randomly select
2640 an event LWP out of all that have events, to prevent
2641 starvation. */
2642 while (event_child == NULL)
2643 {
2644 pid_t ret = 0;
2645
2646 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2647 quirks:
2648
2649 - If the thread group leader exits while other threads in the
2650 thread group still exist, waitpid(TGID, ...) hangs. That
2651 waitpid won't return an exit status until the other threads
2652 in the group are reaped.
2653
2654 - When a non-leader thread execs, that thread just vanishes
2655 without reporting an exit (so we'd hang if we waited for it
2656 explicitly in that case). The exec event is reported to
2657 the TGID pid. */
2658 errno = 0;
2659 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2660
2661 if (debug_threads)
2662 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2663 ret, errno ? strerror (errno) : "ERRNO-OK");
2664
2665 if (ret > 0)
2666 {
2667 if (debug_threads)
2668 {
2669 debug_printf ("LLW: waitpid %ld received %s\n",
2670 (long) ret, status_to_str (*wstatp));
2671 }
2672
2673 /* Filter all events. IOW, leave all events pending. We'll
2674 randomly select an event LWP out of all that have events
2675 below. */
2676 linux_low_filter_event (ret, *wstatp);
2677 /* Retry until nothing comes out of waitpid. A single
2678 SIGCHLD can indicate more than one child stopped. */
2679 continue;
2680 }
2681
2682 /* Now that we've pulled all events out of the kernel, resume
2683 LWPs that don't have an interesting event to report. */
2684 if (stopping_threads == NOT_STOPPING_THREADS)
2685 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2686
2687 /* ... and find an LWP with a status to report to the core, if
2688 any. */
2689 event_thread = (struct thread_info *)
2690 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2691 if (event_thread != NULL)
2692 {
2693 event_child = get_thread_lwp (event_thread);
2694 *wstatp = event_child->status_pending;
2695 event_child->status_pending_p = 0;
2696 event_child->status_pending = 0;
2697 break;
2698 }
2699
2700 /* Check for zombie thread group leaders. Those can't be reaped
2701 until all other threads in the thread group are. */
2702 check_zombie_leaders ();
2703
2704 /* If there are no resumed children left in the set of LWPs we
2705 want to wait for, bail. We can't just block in
2706 waitpid/sigsuspend, because lwps might have been left stopped
2707 in trace-stop state, and we'd be stuck forever waiting for
2708 their status to change (which would only happen if we resumed
2709 them). Even if WNOHANG is set, this return code is preferred
2710 over 0 (below), as it is more detailed. */
2711 if ((find_inferior (&all_threads,
2712 not_stopped_callback,
2713 &wait_ptid) == NULL))
2714 {
2715 if (debug_threads)
2716 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2717 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2718 return -1;
2719 }
2720
2721 /* No interesting event to report to the caller. */
2722 if ((options & WNOHANG))
2723 {
2724 if (debug_threads)
2725 debug_printf ("WNOHANG set, no event found\n");
2726
2727 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2728 return 0;
2729 }
2730
2731 /* Block until we get an event reported with SIGCHLD. */
2732 if (debug_threads)
2733 debug_printf ("sigsuspend'ing\n");
2734
2735 sigsuspend (&prev_mask);
2736 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2737 goto retry;
2738 }
2739
2740 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2741
2742 current_thread = event_thread;
2743
2744 return lwpid_of (event_thread);
2745 }
2746
2747 /* Wait for an event from child(ren) PTID. PTIDs can be:
2748 minus_one_ptid, to specify any child; a pid PTID, specifying all
2749 lwps of a thread group; or a PTID representing a single lwp. Store
2750 the stop status through the status pointer WSTAT. OPTIONS is
2751 passed to the waitpid call. Return 0 if no event was found and
2752 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2753 was found. Return the PID of the stopped child otherwise. */
2754
2755 static int
2756 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2757 {
2758 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2759 }
2760
2761 /* Count the LWP's that have had events. */
2762
2763 static int
2764 count_events_callback (struct inferior_list_entry *entry, void *data)
2765 {
2766 struct thread_info *thread = (struct thread_info *) entry;
2767 struct lwp_info *lp = get_thread_lwp (thread);
2768 int *count = (int *) data;
2769
2770 gdb_assert (count != NULL);
2771
2772 /* Count only resumed LWPs that have an event pending. */
2773 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2774 && lp->status_pending_p)
2775 (*count)++;
2776
2777 return 0;
2778 }
2779
2780 /* Select the LWP (if any) that is currently being single-stepped. */
2781
2782 static int
2783 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2784 {
2785 struct thread_info *thread = (struct thread_info *) entry;
2786 struct lwp_info *lp = get_thread_lwp (thread);
2787
2788 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2789 && thread->last_resume_kind == resume_step
2790 && lp->status_pending_p)
2791 return 1;
2792 else
2793 return 0;
2794 }
2795
2796 /* Select the Nth LWP that has had an event. */
2797
2798 static int
2799 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2800 {
2801 struct thread_info *thread = (struct thread_info *) entry;
2802 struct lwp_info *lp = get_thread_lwp (thread);
2803 int *selector = (int *) data;
2804
2805 gdb_assert (selector != NULL);
2806
2807 /* Select only resumed LWPs that have an event pending. */
2808 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2809 && lp->status_pending_p)
2810 if ((*selector)-- == 0)
2811 return 1;
2812
2813 return 0;
2814 }
2815
2816 /* Select one LWP out of those that have events pending. */
2817
2818 static void
2819 select_event_lwp (struct lwp_info **orig_lp)
2820 {
2821 int num_events = 0;
2822 int random_selector;
2823 struct thread_info *event_thread = NULL;
2824
2825 /* In all-stop, give preference to the LWP that is being
2826 single-stepped. There will be at most one, and it's the LWP that
2827 the core is most interested in. If we didn't do this, then we'd
2828 have to handle pending step SIGTRAPs somehow in case the core
2829 later continues the previously-stepped thread, otherwise we'd
2830 report the pending SIGTRAP, and the core, not having stepped the
2831 thread, wouldn't understand what the trap was for, and therefore
2832 would report it to the user as a random signal. */
2833 if (!non_stop)
2834 {
2835 event_thread
2836 = (struct thread_info *) find_inferior (&all_threads,
2837 select_singlestep_lwp_callback,
2838 NULL);
2839 if (event_thread != NULL)
2840 {
2841 if (debug_threads)
2842 debug_printf ("SEL: Select single-step %s\n",
2843 target_pid_to_str (ptid_of (event_thread)));
2844 }
2845 }
2846 if (event_thread == NULL)
2847 {
2848 /* No single-stepping LWP. Select one at random, out of those
2849 which have had events. */
2850
2851 /* First see how many events we have. */
2852 find_inferior (&all_threads, count_events_callback, &num_events);
2853 gdb_assert (num_events > 0);
2854
2855 /* Now randomly pick a LWP out of those that have had
2856 events. */
2857 random_selector = (int)
2858 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2859
2860 if (debug_threads && num_events > 1)
2861 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2862 num_events, random_selector);
2863
2864 event_thread
2865 = (struct thread_info *) find_inferior (&all_threads,
2866 select_event_lwp_callback,
2867 &random_selector);
2868 }
2869
2870 if (event_thread != NULL)
2871 {
2872 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2873
2874 /* Switch the event LWP. */
2875 *orig_lp = event_lp;
2876 }
2877 }
2878
2879 /* Decrement the suspend count of an LWP. */
2880
2881 static int
2882 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2883 {
2884 struct thread_info *thread = (struct thread_info *) entry;
2885 struct lwp_info *lwp = get_thread_lwp (thread);
2886
2887 /* Ignore EXCEPT. */
2888 if (lwp == except)
2889 return 0;
2890
2891 lwp_suspended_decr (lwp);
2892 return 0;
2893 }
2894
2895 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2896 NULL. */
2897
2898 static void
2899 unsuspend_all_lwps (struct lwp_info *except)
2900 {
2901 find_inferior (&all_threads, unsuspend_one_lwp, except);
2902 }
2903
2904 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2905 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2906 void *data);
2907 static int lwp_running (struct inferior_list_entry *entry, void *data);
2908 static ptid_t linux_wait_1 (ptid_t ptid,
2909 struct target_waitstatus *ourstatus,
2910 int target_options);
2911
2912 /* Stabilize threads (move out of jump pads).
2913
2914 If a thread is midway collecting a fast tracepoint, we need to
2915 finish the collection and move it out of the jump pad before
2916 reporting the signal.
2917
2918 This avoids recursion while collecting (when a signal arrives
2919 midway, and the signal handler itself collects), which would trash
2920 the trace buffer. In case the user set a breakpoint in a signal
2921 handler, this avoids the backtrace showing the jump pad, etc..
2922 Most importantly, there are certain things we can't do safely if
2923 threads are stopped in a jump pad (or in its callee's). For
2924 example:
2925
2926 - starting a new trace run. A thread still collecting the
2927 previous run, could trash the trace buffer when resumed. The trace
2928 buffer control structures would have been reset but the thread had
2929 no way to tell. The thread could even midway memcpy'ing to the
2930 buffer, which would mean that when resumed, it would clobber the
2931 trace buffer that had been set for a new run.
2932
2933 - we can't rewrite/reuse the jump pads for new tracepoints
2934 safely. Say you do tstart while a thread is stopped midway while
2935 collecting. When the thread is later resumed, it finishes the
2936 collection, and returns to the jump pad, to execute the original
2937 instruction that was under the tracepoint jump at the time the
2938 older run had been started. If the jump pad had been rewritten
2939 since for something else in the new run, the thread would now
2940 execute the wrong / random instructions. */
2941
2942 static void
2943 linux_stabilize_threads (void)
2944 {
2945 struct thread_info *saved_thread;
2946 struct thread_info *thread_stuck;
2947
2948 thread_stuck
2949 = (struct thread_info *) find_inferior (&all_threads,
2950 stuck_in_jump_pad_callback,
2951 NULL);
2952 if (thread_stuck != NULL)
2953 {
2954 if (debug_threads)
2955 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2956 lwpid_of (thread_stuck));
2957 return;
2958 }
2959
2960 saved_thread = current_thread;
2961
2962 stabilizing_threads = 1;
2963
2964 /* Kick 'em all. */
2965 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2966
2967 /* Loop until all are stopped out of the jump pads. */
2968 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2969 {
2970 struct target_waitstatus ourstatus;
2971 struct lwp_info *lwp;
2972 int wstat;
2973
2974 /* Note that we go through the full wait even loop. While
2975 moving threads out of jump pad, we need to be able to step
2976 over internal breakpoints and such. */
2977 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2978
2979 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2980 {
2981 lwp = get_thread_lwp (current_thread);
2982
2983 /* Lock it. */
2984 lwp_suspended_inc (lwp);
2985
2986 if (ourstatus.value.sig != GDB_SIGNAL_0
2987 || current_thread->last_resume_kind == resume_stop)
2988 {
2989 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2990 enqueue_one_deferred_signal (lwp, &wstat);
2991 }
2992 }
2993 }
2994
2995 unsuspend_all_lwps (NULL);
2996
2997 stabilizing_threads = 0;
2998
2999 current_thread = saved_thread;
3000
3001 if (debug_threads)
3002 {
3003 thread_stuck
3004 = (struct thread_info *) find_inferior (&all_threads,
3005 stuck_in_jump_pad_callback,
3006 NULL);
3007 if (thread_stuck != NULL)
3008 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3009 lwpid_of (thread_stuck));
3010 }
3011 }
3012
3013 /* Convenience function that is called when the kernel reports an
3014 event that is not passed out to GDB. */
3015
3016 static ptid_t
3017 ignore_event (struct target_waitstatus *ourstatus)
3018 {
3019 /* If we got an event, there may still be others, as a single
3020 SIGCHLD can indicate more than one child stopped. This forces
3021 another target_wait call. */
3022 async_file_mark ();
3023
3024 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3025 return null_ptid;
3026 }
3027
3028 /* Convenience function that is called when the kernel reports an exit
3029 event. This decides whether to report the event to GDB as a
3030 process exit event, a thread exit event, or to suppress the
3031 event. */
3032
3033 static ptid_t
3034 filter_exit_event (struct lwp_info *event_child,
3035 struct target_waitstatus *ourstatus)
3036 {
3037 struct thread_info *thread = get_lwp_thread (event_child);
3038 ptid_t ptid = ptid_of (thread);
3039
3040 if (!last_thread_of_process_p (pid_of (thread)))
3041 {
3042 if (report_thread_events)
3043 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3044 else
3045 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3046
3047 delete_lwp (event_child);
3048 }
3049 return ptid;
3050 }
3051
3052 /* Returns 1 if GDB is interested in any event_child syscalls. */
3053
3054 static int
3055 gdb_catching_syscalls_p (struct lwp_info *event_child)
3056 {
3057 struct thread_info *thread = get_lwp_thread (event_child);
3058 struct process_info *proc = get_thread_process (thread);
3059
3060 return !VEC_empty (int, proc->syscalls_to_catch);
3061 }
3062
3063 /* Returns 1 if GDB is interested in the event_child syscall.
3064 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3065
3066 static int
3067 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3068 {
3069 int i, iter;
3070 int sysno, sysret;
3071 struct thread_info *thread = get_lwp_thread (event_child);
3072 struct process_info *proc = get_thread_process (thread);
3073
3074 if (VEC_empty (int, proc->syscalls_to_catch))
3075 return 0;
3076
3077 if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
3078 return 1;
3079
3080 get_syscall_trapinfo (event_child, &sysno, &sysret);
3081 for (i = 0;
3082 VEC_iterate (int, proc->syscalls_to_catch, i, iter);
3083 i++)
3084 if (iter == sysno)
3085 return 1;
3086
3087 return 0;
3088 }
3089
3090 /* Wait for process, returns status. */
3091
3092 static ptid_t
3093 linux_wait_1 (ptid_t ptid,
3094 struct target_waitstatus *ourstatus, int target_options)
3095 {
3096 int w;
3097 struct lwp_info *event_child;
3098 int options;
3099 int pid;
3100 int step_over_finished;
3101 int bp_explains_trap;
3102 int maybe_internal_trap;
3103 int report_to_gdb;
3104 int trace_event;
3105 int in_step_range;
3106 int any_resumed;
3107
3108 if (debug_threads)
3109 {
3110 debug_enter ();
3111 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3112 }
3113
3114 /* Translate generic target options into linux options. */
3115 options = __WALL;
3116 if (target_options & TARGET_WNOHANG)
3117 options |= WNOHANG;
3118
3119 bp_explains_trap = 0;
3120 trace_event = 0;
3121 in_step_range = 0;
3122 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3123
3124 /* Find a resumed LWP, if any. */
3125 if (find_inferior (&all_threads,
3126 status_pending_p_callback,
3127 &minus_one_ptid) != NULL)
3128 any_resumed = 1;
3129 else if ((find_inferior (&all_threads,
3130 not_stopped_callback,
3131 &minus_one_ptid) != NULL))
3132 any_resumed = 1;
3133 else
3134 any_resumed = 0;
3135
3136 if (ptid_equal (step_over_bkpt, null_ptid))
3137 pid = linux_wait_for_event (ptid, &w, options);
3138 else
3139 {
3140 if (debug_threads)
3141 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3142 target_pid_to_str (step_over_bkpt));
3143 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3144 }
3145
3146 if (pid == 0 || (pid == -1 && !any_resumed))
3147 {
3148 gdb_assert (target_options & TARGET_WNOHANG);
3149
3150 if (debug_threads)
3151 {
3152 debug_printf ("linux_wait_1 ret = null_ptid, "
3153 "TARGET_WAITKIND_IGNORE\n");
3154 debug_exit ();
3155 }
3156
3157 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3158 return null_ptid;
3159 }
3160 else if (pid == -1)
3161 {
3162 if (debug_threads)
3163 {
3164 debug_printf ("linux_wait_1 ret = null_ptid, "
3165 "TARGET_WAITKIND_NO_RESUMED\n");
3166 debug_exit ();
3167 }
3168
3169 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3170 return null_ptid;
3171 }
3172
3173 event_child = get_thread_lwp (current_thread);
3174
3175 /* linux_wait_for_event only returns an exit status for the last
3176 child of a process. Report it. */
3177 if (WIFEXITED (w) || WIFSIGNALED (w))
3178 {
3179 if (WIFEXITED (w))
3180 {
3181 ourstatus->kind = TARGET_WAITKIND_EXITED;
3182 ourstatus->value.integer = WEXITSTATUS (w);
3183
3184 if (debug_threads)
3185 {
3186 debug_printf ("linux_wait_1 ret = %s, exited with "
3187 "retcode %d\n",
3188 target_pid_to_str (ptid_of (current_thread)),
3189 WEXITSTATUS (w));
3190 debug_exit ();
3191 }
3192 }
3193 else
3194 {
3195 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3196 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3197
3198 if (debug_threads)
3199 {
3200 debug_printf ("linux_wait_1 ret = %s, terminated with "
3201 "signal %d\n",
3202 target_pid_to_str (ptid_of (current_thread)),
3203 WTERMSIG (w));
3204 debug_exit ();
3205 }
3206 }
3207
3208 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3209 return filter_exit_event (event_child, ourstatus);
3210
3211 return ptid_of (current_thread);
3212 }
3213
3214 /* If step-over executes a breakpoint instruction, in the case of a
3215 hardware single step it means a gdb/gdbserver breakpoint had been
3216 planted on top of a permanent breakpoint, in the case of a software
3217 single step it may just mean that gdbserver hit the reinsert breakpoint.
3218 The PC has been adjusted by save_stop_reason to point at
3219 the breakpoint address.
3220 So in the case of the hardware single step advance the PC manually
3221 past the breakpoint and in the case of software single step advance only
3222 if it's not the reinsert_breakpoint we are hitting.
3223 This avoids that a program would keep trapping a permanent breakpoint
3224 forever. */
3225 if (!ptid_equal (step_over_bkpt, null_ptid)
3226 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3227 && (event_child->stepping
3228 || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
3229 {
3230 int increment_pc = 0;
3231 int breakpoint_kind = 0;
3232 CORE_ADDR stop_pc = event_child->stop_pc;
3233
3234 breakpoint_kind =
3235 the_target->breakpoint_kind_from_current_state (&stop_pc);
3236 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3237
3238 if (debug_threads)
3239 {
3240 debug_printf ("step-over for %s executed software breakpoint\n",
3241 target_pid_to_str (ptid_of (current_thread)));
3242 }
3243
3244 if (increment_pc != 0)
3245 {
3246 struct regcache *regcache
3247 = get_thread_regcache (current_thread, 1);
3248
3249 event_child->stop_pc += increment_pc;
3250 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3251
3252 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3253 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3254 }
3255 }
3256
3257 /* If this event was not handled before, and is not a SIGTRAP, we
3258 report it. SIGILL and SIGSEGV are also treated as traps in case
3259 a breakpoint is inserted at the current PC. If this target does
3260 not support internal breakpoints at all, we also report the
3261 SIGTRAP without further processing; it's of no concern to us. */
3262 maybe_internal_trap
3263 = (supports_breakpoints ()
3264 && (WSTOPSIG (w) == SIGTRAP
3265 || ((WSTOPSIG (w) == SIGILL
3266 || WSTOPSIG (w) == SIGSEGV)
3267 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3268
3269 if (maybe_internal_trap)
3270 {
3271 /* Handle anything that requires bookkeeping before deciding to
3272 report the event or continue waiting. */
3273
3274 /* First check if we can explain the SIGTRAP with an internal
3275 breakpoint, or if we should possibly report the event to GDB.
3276 Do this before anything that may remove or insert a
3277 breakpoint. */
3278 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3279
3280 /* We have a SIGTRAP, possibly a step-over dance has just
3281 finished. If so, tweak the state machine accordingly,
3282 reinsert breakpoints and delete any reinsert (software
3283 single-step) breakpoints. */
3284 step_over_finished = finish_step_over (event_child);
3285
3286 /* Now invoke the callbacks of any internal breakpoints there. */
3287 check_breakpoints (event_child->stop_pc);
3288
3289 /* Handle tracepoint data collecting. This may overflow the
3290 trace buffer, and cause a tracing stop, removing
3291 breakpoints. */
3292 trace_event = handle_tracepoints (event_child);
3293
3294 if (bp_explains_trap)
3295 {
3296 if (debug_threads)
3297 debug_printf ("Hit a gdbserver breakpoint.\n");
3298 }
3299 }
3300 else
3301 {
3302 /* We have some other signal, possibly a step-over dance was in
3303 progress, and it should be cancelled too. */
3304 step_over_finished = finish_step_over (event_child);
3305 }
3306
3307 /* We have all the data we need. Either report the event to GDB, or
3308 resume threads and keep waiting for more. */
3309
3310 /* If we're collecting a fast tracepoint, finish the collection and
3311 move out of the jump pad before delivering a signal. See
3312 linux_stabilize_threads. */
3313
3314 if (WIFSTOPPED (w)
3315 && WSTOPSIG (w) != SIGTRAP
3316 && supports_fast_tracepoints ()
3317 && agent_loaded_p ())
3318 {
3319 if (debug_threads)
3320 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3321 "to defer or adjust it.\n",
3322 WSTOPSIG (w), lwpid_of (current_thread));
3323
3324 /* Allow debugging the jump pad itself. */
3325 if (current_thread->last_resume_kind != resume_step
3326 && maybe_move_out_of_jump_pad (event_child, &w))
3327 {
3328 enqueue_one_deferred_signal (event_child, &w);
3329
3330 if (debug_threads)
3331 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3332 WSTOPSIG (w), lwpid_of (current_thread));
3333
3334 linux_resume_one_lwp (event_child, 0, 0, NULL);
3335
3336 return ignore_event (ourstatus);
3337 }
3338 }
3339
3340 if (event_child->collecting_fast_tracepoint)
3341 {
3342 if (debug_threads)
3343 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3344 "Check if we're already there.\n",
3345 lwpid_of (current_thread),
3346 event_child->collecting_fast_tracepoint);
3347
3348 trace_event = 1;
3349
3350 event_child->collecting_fast_tracepoint
3351 = linux_fast_tracepoint_collecting (event_child, NULL);
3352
3353 if (event_child->collecting_fast_tracepoint != 1)
3354 {
3355 /* No longer need this breakpoint. */
3356 if (event_child->exit_jump_pad_bkpt != NULL)
3357 {
3358 if (debug_threads)
3359 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3360 "stopping all threads momentarily.\n");
3361
3362 /* Other running threads could hit this breakpoint.
3363 We don't handle moribund locations like GDB does,
3364 instead we always pause all threads when removing
3365 breakpoints, so that any step-over or
3366 decr_pc_after_break adjustment is always taken
3367 care of while the breakpoint is still
3368 inserted. */
3369 stop_all_lwps (1, event_child);
3370
3371 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3372 event_child->exit_jump_pad_bkpt = NULL;
3373
3374 unstop_all_lwps (1, event_child);
3375
3376 gdb_assert (event_child->suspended >= 0);
3377 }
3378 }
3379
3380 if (event_child->collecting_fast_tracepoint == 0)
3381 {
3382 if (debug_threads)
3383 debug_printf ("fast tracepoint finished "
3384 "collecting successfully.\n");
3385
3386 /* We may have a deferred signal to report. */
3387 if (dequeue_one_deferred_signal (event_child, &w))
3388 {
3389 if (debug_threads)
3390 debug_printf ("dequeued one signal.\n");
3391 }
3392 else
3393 {
3394 if (debug_threads)
3395 debug_printf ("no deferred signals.\n");
3396
3397 if (stabilizing_threads)
3398 {
3399 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3400 ourstatus->value.sig = GDB_SIGNAL_0;
3401
3402 if (debug_threads)
3403 {
3404 debug_printf ("linux_wait_1 ret = %s, stopped "
3405 "while stabilizing threads\n",
3406 target_pid_to_str (ptid_of (current_thread)));
3407 debug_exit ();
3408 }
3409
3410 return ptid_of (current_thread);
3411 }
3412 }
3413 }
3414 }
3415
3416 /* Check whether GDB would be interested in this event. */
3417
3418 /* Check if GDB is interested in this syscall. */
3419 if (WIFSTOPPED (w)
3420 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3421 && !gdb_catch_this_syscall_p (event_child))
3422 {
3423 if (debug_threads)
3424 {
3425 debug_printf ("Ignored syscall for LWP %ld.\n",
3426 lwpid_of (current_thread));
3427 }
3428
3429 linux_resume_one_lwp (event_child, event_child->stepping,
3430 0, NULL);
3431 return ignore_event (ourstatus);
3432 }
3433
3434 /* If GDB is not interested in this signal, don't stop other
3435 threads, and don't report it to GDB. Just resume the inferior
3436 right away. We do this for threading-related signals as well as
3437 any that GDB specifically requested we ignore. But never ignore
3438 SIGSTOP if we sent it ourselves, and do not ignore signals when
3439 stepping - they may require special handling to skip the signal
3440 handler. Also never ignore signals that could be caused by a
3441 breakpoint. */
3442 if (WIFSTOPPED (w)
3443 && current_thread->last_resume_kind != resume_step
3444 && (
3445 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3446 (current_process ()->priv->thread_db != NULL
3447 && (WSTOPSIG (w) == __SIGRTMIN
3448 || WSTOPSIG (w) == __SIGRTMIN + 1))
3449 ||
3450 #endif
3451 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3452 && !(WSTOPSIG (w) == SIGSTOP
3453 && current_thread->last_resume_kind == resume_stop)
3454 && !linux_wstatus_maybe_breakpoint (w))))
3455 {
3456 siginfo_t info, *info_p;
3457
3458 if (debug_threads)
3459 debug_printf ("Ignored signal %d for LWP %ld.\n",
3460 WSTOPSIG (w), lwpid_of (current_thread));
3461
3462 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3463 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3464 info_p = &info;
3465 else
3466 info_p = NULL;
3467
3468 if (step_over_finished)
3469 {
3470 /* We cancelled this thread's step-over above. We still
3471 need to unsuspend all other LWPs, and set them back
3472 running again while the signal handler runs. */
3473 unsuspend_all_lwps (event_child);
3474
3475 /* Enqueue the pending signal info so that proceed_all_lwps
3476 doesn't lose it. */
3477 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3478
3479 proceed_all_lwps ();
3480 }
3481 else
3482 {
3483 linux_resume_one_lwp (event_child, event_child->stepping,
3484 WSTOPSIG (w), info_p);
3485 }
3486 return ignore_event (ourstatus);
3487 }
3488
3489 /* Note that all addresses are always "out of the step range" when
3490 there's no range to begin with. */
3491 in_step_range = lwp_in_step_range (event_child);
3492
3493 /* If GDB wanted this thread to single step, and the thread is out
3494 of the step range, we always want to report the SIGTRAP, and let
3495 GDB handle it. Watchpoints should always be reported. So should
3496 signals we can't explain. A SIGTRAP we can't explain could be a
3497 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3498 do, we're be able to handle GDB breakpoints on top of internal
3499 breakpoints, by handling the internal breakpoint and still
3500 reporting the event to GDB. If we don't, we're out of luck, GDB
3501 won't see the breakpoint hit. If we see a single-step event but
3502 the thread should be continuing, don't pass the trap to gdb.
3503 That indicates that we had previously finished a single-step but
3504 left the single-step pending -- see
3505 complete_ongoing_step_over. */
3506 report_to_gdb = (!maybe_internal_trap
3507 || (current_thread->last_resume_kind == resume_step
3508 && !in_step_range)
3509 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3510 || (!in_step_range
3511 && !bp_explains_trap
3512 && !trace_event
3513 && !step_over_finished
3514 && !(current_thread->last_resume_kind == resume_continue
3515 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3516 || (gdb_breakpoint_here (event_child->stop_pc)
3517 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3518 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3519 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3520
3521 run_breakpoint_commands (event_child->stop_pc);
3522
3523 /* We found no reason GDB would want us to stop. We either hit one
3524 of our own breakpoints, or finished an internal step GDB
3525 shouldn't know about. */
3526 if (!report_to_gdb)
3527 {
3528 if (debug_threads)
3529 {
3530 if (bp_explains_trap)
3531 debug_printf ("Hit a gdbserver breakpoint.\n");
3532 if (step_over_finished)
3533 debug_printf ("Step-over finished.\n");
3534 if (trace_event)
3535 debug_printf ("Tracepoint event.\n");
3536 if (lwp_in_step_range (event_child))
3537 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3538 paddress (event_child->stop_pc),
3539 paddress (event_child->step_range_start),
3540 paddress (event_child->step_range_end));
3541 }
3542
3543 /* We're not reporting this breakpoint to GDB, so apply the
3544 decr_pc_after_break adjustment to the inferior's regcache
3545 ourselves. */
3546
3547 if (the_low_target.set_pc != NULL)
3548 {
3549 struct regcache *regcache
3550 = get_thread_regcache (current_thread, 1);
3551 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3552 }
3553
3554 /* We may have finished stepping over a breakpoint. If so,
3555 we've stopped and suspended all LWPs momentarily except the
3556 stepping one. This is where we resume them all again. We're
3557 going to keep waiting, so use proceed, which handles stepping
3558 over the next breakpoint. */
3559 if (debug_threads)
3560 debug_printf ("proceeding all threads.\n");
3561
3562 if (step_over_finished)
3563 unsuspend_all_lwps (event_child);
3564
3565 proceed_all_lwps ();
3566 return ignore_event (ourstatus);
3567 }
3568
3569 if (debug_threads)
3570 {
3571 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3572 {
3573 char *str;
3574
3575 str = target_waitstatus_to_string (&event_child->waitstatus);
3576 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3577 lwpid_of (get_lwp_thread (event_child)), str);
3578 xfree (str);
3579 }
3580 if (current_thread->last_resume_kind == resume_step)
3581 {
3582 if (event_child->step_range_start == event_child->step_range_end)
3583 debug_printf ("GDB wanted to single-step, reporting event.\n");
3584 else if (!lwp_in_step_range (event_child))
3585 debug_printf ("Out of step range, reporting event.\n");
3586 }
3587 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3588 debug_printf ("Stopped by watchpoint.\n");
3589 else if (gdb_breakpoint_here (event_child->stop_pc))
3590 debug_printf ("Stopped by GDB breakpoint.\n");
3591 if (debug_threads)
3592 debug_printf ("Hit a non-gdbserver trap event.\n");
3593 }
3594
3595 /* Alright, we're going to report a stop. */
3596
3597 if (!stabilizing_threads)
3598 {
3599 /* In all-stop, stop all threads. */
3600 if (!non_stop)
3601 stop_all_lwps (0, NULL);
3602
3603 /* If we're not waiting for a specific LWP, choose an event LWP
3604 from among those that have had events. Giving equal priority
3605 to all LWPs that have had events helps prevent
3606 starvation. */
3607 if (ptid_equal (ptid, minus_one_ptid))
3608 {
3609 event_child->status_pending_p = 1;
3610 event_child->status_pending = w;
3611
3612 select_event_lwp (&event_child);
3613
3614 /* current_thread and event_child must stay in sync. */
3615 current_thread = get_lwp_thread (event_child);
3616
3617 event_child->status_pending_p = 0;
3618 w = event_child->status_pending;
3619 }
3620
3621 if (step_over_finished)
3622 {
3623 if (!non_stop)
3624 {
3625 /* If we were doing a step-over, all other threads but
3626 the stepping one had been paused in start_step_over,
3627 with their suspend counts incremented. We don't want
3628 to do a full unstop/unpause, because we're in
3629 all-stop mode (so we want threads stopped), but we
3630 still need to unsuspend the other threads, to
3631 decrement their `suspended' count back. */
3632 unsuspend_all_lwps (event_child);
3633 }
3634 else
3635 {
3636 /* If we just finished a step-over, then all threads had
3637 been momentarily paused. In all-stop, that's fine,
3638 we want threads stopped by now anyway. In non-stop,
3639 we need to re-resume threads that GDB wanted to be
3640 running. */
3641 unstop_all_lwps (1, event_child);
3642 }
3643 }
3644
3645 /* Stabilize threads (move out of jump pads). */
3646 if (!non_stop)
3647 stabilize_threads ();
3648 }
3649 else
3650 {
3651 /* If we just finished a step-over, then all threads had been
3652 momentarily paused. In all-stop, that's fine, we want
3653 threads stopped by now anyway. In non-stop, we need to
3654 re-resume threads that GDB wanted to be running. */
3655 if (step_over_finished)
3656 unstop_all_lwps (1, event_child);
3657 }
3658
3659 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3660 {
3661 /* If the reported event is an exit, fork, vfork or exec, let
3662 GDB know. */
3663 *ourstatus = event_child->waitstatus;
3664 /* Clear the event lwp's waitstatus since we handled it already. */
3665 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3666 }
3667 else
3668 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3669
3670 /* Now that we've selected our final event LWP, un-adjust its PC if
3671 it was a software breakpoint, and the client doesn't know we can
3672 adjust the breakpoint ourselves. */
3673 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3674 && !swbreak_feature)
3675 {
3676 int decr_pc = the_low_target.decr_pc_after_break;
3677
3678 if (decr_pc != 0)
3679 {
3680 struct regcache *regcache
3681 = get_thread_regcache (current_thread, 1);
3682 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3683 }
3684 }
3685
3686 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3687 {
3688 int sysret;
3689
3690 get_syscall_trapinfo (event_child,
3691 &ourstatus->value.syscall_number, &sysret);
3692 ourstatus->kind = event_child->syscall_state;
3693 }
3694 else if (current_thread->last_resume_kind == resume_stop
3695 && WSTOPSIG (w) == SIGSTOP)
3696 {
3697 /* A thread that has been requested to stop by GDB with vCont;t,
3698 and it stopped cleanly, so report as SIG0. The use of
3699 SIGSTOP is an implementation detail. */
3700 ourstatus->value.sig = GDB_SIGNAL_0;
3701 }
3702 else if (current_thread->last_resume_kind == resume_stop
3703 && WSTOPSIG (w) != SIGSTOP)
3704 {
3705 /* A thread that has been requested to stop by GDB with vCont;t,
3706 but, it stopped for other reasons. */
3707 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3708 }
3709 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3710 {
3711 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3712 }
3713
3714 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3715
3716 if (debug_threads)
3717 {
3718 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3719 target_pid_to_str (ptid_of (current_thread)),
3720 ourstatus->kind, ourstatus->value.sig);
3721 debug_exit ();
3722 }
3723
3724 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3725 return filter_exit_event (event_child, ourstatus);
3726
3727 return ptid_of (current_thread);
3728 }
3729
3730 /* Get rid of any pending event in the pipe. */
3731 static void
3732 async_file_flush (void)
3733 {
3734 int ret;
3735 char buf;
3736
3737 do
3738 ret = read (linux_event_pipe[0], &buf, 1);
3739 while (ret >= 0 || (ret == -1 && errno == EINTR));
3740 }
3741
3742 /* Put something in the pipe, so the event loop wakes up. */
3743 static void
3744 async_file_mark (void)
3745 {
3746 int ret;
3747
3748 async_file_flush ();
3749
3750 do
3751 ret = write (linux_event_pipe[1], "+", 1);
3752 while (ret == 0 || (ret == -1 && errno == EINTR));
3753
3754 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3755 be awakened anyway. */
3756 }
3757
3758 static ptid_t
3759 linux_wait (ptid_t ptid,
3760 struct target_waitstatus *ourstatus, int target_options)
3761 {
3762 ptid_t event_ptid;
3763
3764 /* Flush the async file first. */
3765 if (target_is_async_p ())
3766 async_file_flush ();
3767
3768 do
3769 {
3770 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3771 }
3772 while ((target_options & TARGET_WNOHANG) == 0
3773 && ptid_equal (event_ptid, null_ptid)
3774 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3775
3776 /* If at least one stop was reported, there may be more. A single
3777 SIGCHLD can signal more than one child stop. */
3778 if (target_is_async_p ()
3779 && (target_options & TARGET_WNOHANG) != 0
3780 && !ptid_equal (event_ptid, null_ptid))
3781 async_file_mark ();
3782
3783 return event_ptid;
3784 }
3785
3786 /* Send a signal to an LWP. */
3787
3788 static int
3789 kill_lwp (unsigned long lwpid, int signo)
3790 {
3791 int ret;
3792
3793 errno = 0;
3794 ret = syscall (__NR_tkill, lwpid, signo);
3795 if (errno == ENOSYS)
3796 {
3797 /* If tkill fails, then we are not using nptl threads, a
3798 configuration we no longer support. */
3799 perror_with_name (("tkill"));
3800 }
3801 return ret;
3802 }
3803
3804 void
3805 linux_stop_lwp (struct lwp_info *lwp)
3806 {
3807 send_sigstop (lwp);
3808 }
3809
3810 static void
3811 send_sigstop (struct lwp_info *lwp)
3812 {
3813 int pid;
3814
3815 pid = lwpid_of (get_lwp_thread (lwp));
3816
3817 /* If we already have a pending stop signal for this process, don't
3818 send another. */
3819 if (lwp->stop_expected)
3820 {
3821 if (debug_threads)
3822 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3823
3824 return;
3825 }
3826
3827 if (debug_threads)
3828 debug_printf ("Sending sigstop to lwp %d\n", pid);
3829
3830 lwp->stop_expected = 1;
3831 kill_lwp (pid, SIGSTOP);
3832 }
3833
3834 static int
3835 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3836 {
3837 struct thread_info *thread = (struct thread_info *) entry;
3838 struct lwp_info *lwp = get_thread_lwp (thread);
3839
3840 /* Ignore EXCEPT. */
3841 if (lwp == except)
3842 return 0;
3843
3844 if (lwp->stopped)
3845 return 0;
3846
3847 send_sigstop (lwp);
3848 return 0;
3849 }
3850
3851 /* Increment the suspend count of an LWP, and stop it, if not stopped
3852 yet. */
3853 static int
3854 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3855 void *except)
3856 {
3857 struct thread_info *thread = (struct thread_info *) entry;
3858 struct lwp_info *lwp = get_thread_lwp (thread);
3859
3860 /* Ignore EXCEPT. */
3861 if (lwp == except)
3862 return 0;
3863
3864 lwp_suspended_inc (lwp);
3865
3866 return send_sigstop_callback (entry, except);
3867 }
3868
3869 static void
3870 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3871 {
3872 /* Store the exit status for later. */
3873 lwp->status_pending_p = 1;
3874 lwp->status_pending = wstat;
3875
3876 /* Store in waitstatus as well, as there's nothing else to process
3877 for this event. */
3878 if (WIFEXITED (wstat))
3879 {
3880 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3881 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3882 }
3883 else if (WIFSIGNALED (wstat))
3884 {
3885 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3886 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3887 }
3888
3889 /* Prevent trying to stop it. */
3890 lwp->stopped = 1;
3891
3892 /* No further stops are expected from a dead lwp. */
3893 lwp->stop_expected = 0;
3894 }
3895
3896 /* Return true if LWP has exited already, and has a pending exit event
3897 to report to GDB. */
3898
3899 static int
3900 lwp_is_marked_dead (struct lwp_info *lwp)
3901 {
3902 return (lwp->status_pending_p
3903 && (WIFEXITED (lwp->status_pending)
3904 || WIFSIGNALED (lwp->status_pending)));
3905 }
3906
3907 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3908
3909 static void
3910 wait_for_sigstop (void)
3911 {
3912 struct thread_info *saved_thread;
3913 ptid_t saved_tid;
3914 int wstat;
3915 int ret;
3916
3917 saved_thread = current_thread;
3918 if (saved_thread != NULL)
3919 saved_tid = saved_thread->entry.id;
3920 else
3921 saved_tid = null_ptid; /* avoid bogus unused warning */
3922
3923 if (debug_threads)
3924 debug_printf ("wait_for_sigstop: pulling events\n");
3925
3926 /* Passing NULL_PTID as filter indicates we want all events to be
3927 left pending. Eventually this returns when there are no
3928 unwaited-for children left. */
3929 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3930 &wstat, __WALL);
3931 gdb_assert (ret == -1);
3932
3933 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3934 current_thread = saved_thread;
3935 else
3936 {
3937 if (debug_threads)
3938 debug_printf ("Previously current thread died.\n");
3939
3940 /* We can't change the current inferior behind GDB's back,
3941 otherwise, a subsequent command may apply to the wrong
3942 process. */
3943 current_thread = NULL;
3944 }
3945 }
3946
3947 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3948 move it out, because we need to report the stop event to GDB. For
3949 example, if the user puts a breakpoint in the jump pad, it's
3950 because she wants to debug it. */
3951
3952 static int
3953 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3954 {
3955 struct thread_info *thread = (struct thread_info *) entry;
3956 struct lwp_info *lwp = get_thread_lwp (thread);
3957
3958 if (lwp->suspended != 0)
3959 {
3960 internal_error (__FILE__, __LINE__,
3961 "LWP %ld is suspended, suspended=%d\n",
3962 lwpid_of (thread), lwp->suspended);
3963 }
3964 gdb_assert (lwp->stopped);
3965
3966 /* Allow debugging the jump pad, gdb_collect, etc.. */
3967 return (supports_fast_tracepoints ()
3968 && agent_loaded_p ()
3969 && (gdb_breakpoint_here (lwp->stop_pc)
3970 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3971 || thread->last_resume_kind == resume_step)
3972 && linux_fast_tracepoint_collecting (lwp, NULL));
3973 }
3974
3975 static void
3976 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3977 {
3978 struct thread_info *thread = (struct thread_info *) entry;
3979 struct thread_info *saved_thread;
3980 struct lwp_info *lwp = get_thread_lwp (thread);
3981 int *wstat;
3982
3983 if (lwp->suspended != 0)
3984 {
3985 internal_error (__FILE__, __LINE__,
3986 "LWP %ld is suspended, suspended=%d\n",
3987 lwpid_of (thread), lwp->suspended);
3988 }
3989 gdb_assert (lwp->stopped);
3990
3991 /* For gdb_breakpoint_here. */
3992 saved_thread = current_thread;
3993 current_thread = thread;
3994
3995 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3996
3997 /* Allow debugging the jump pad, gdb_collect, etc. */
3998 if (!gdb_breakpoint_here (lwp->stop_pc)
3999 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4000 && thread->last_resume_kind != resume_step
4001 && maybe_move_out_of_jump_pad (lwp, wstat))
4002 {
4003 if (debug_threads)
4004 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4005 lwpid_of (thread));
4006
4007 if (wstat)
4008 {
4009 lwp->status_pending_p = 0;
4010 enqueue_one_deferred_signal (lwp, wstat);
4011
4012 if (debug_threads)
4013 debug_printf ("Signal %d for LWP %ld deferred "
4014 "(in jump pad)\n",
4015 WSTOPSIG (*wstat), lwpid_of (thread));
4016 }
4017
4018 linux_resume_one_lwp (lwp, 0, 0, NULL);
4019 }
4020 else
4021 lwp_suspended_inc (lwp);
4022
4023 current_thread = saved_thread;
4024 }
4025
4026 static int
4027 lwp_running (struct inferior_list_entry *entry, void *data)
4028 {
4029 struct thread_info *thread = (struct thread_info *) entry;
4030 struct lwp_info *lwp = get_thread_lwp (thread);
4031
4032 if (lwp_is_marked_dead (lwp))
4033 return 0;
4034 if (lwp->stopped)
4035 return 0;
4036 return 1;
4037 }
4038
4039 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4040 If SUSPEND, then also increase the suspend count of every LWP,
4041 except EXCEPT. */
4042
4043 static void
4044 stop_all_lwps (int suspend, struct lwp_info *except)
4045 {
4046 /* Should not be called recursively. */
4047 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4048
4049 if (debug_threads)
4050 {
4051 debug_enter ();
4052 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4053 suspend ? "stop-and-suspend" : "stop",
4054 except != NULL
4055 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4056 : "none");
4057 }
4058
4059 stopping_threads = (suspend
4060 ? STOPPING_AND_SUSPENDING_THREADS
4061 : STOPPING_THREADS);
4062
4063 if (suspend)
4064 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4065 else
4066 find_inferior (&all_threads, send_sigstop_callback, except);
4067 wait_for_sigstop ();
4068 stopping_threads = NOT_STOPPING_THREADS;
4069
4070 if (debug_threads)
4071 {
4072 debug_printf ("stop_all_lwps done, setting stopping_threads "
4073 "back to !stopping\n");
4074 debug_exit ();
4075 }
4076 }
4077
4078 /* Enqueue one signal in the chain of signals which need to be
4079 delivered to this process on next resume. */
4080
4081 static void
4082 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4083 {
4084 struct pending_signals *p_sig = XNEW (struct pending_signals);
4085
4086 p_sig->prev = lwp->pending_signals;
4087 p_sig->signal = signal;
4088 if (info == NULL)
4089 memset (&p_sig->info, 0, sizeof (siginfo_t));
4090 else
4091 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4092 lwp->pending_signals = p_sig;
4093 }
4094
4095 /* Install breakpoints for software single stepping. */
4096
4097 static void
4098 install_software_single_step_breakpoints (struct lwp_info *lwp)
4099 {
4100 int i;
4101 CORE_ADDR pc;
4102 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4103 VEC (CORE_ADDR) *next_pcs = NULL;
4104 struct cleanup *old_chain = make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
4105
4106 next_pcs = (*the_low_target.get_next_pcs) (regcache);
4107
4108 for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
4109 set_reinsert_breakpoint (pc);
4110
4111 do_cleanups (old_chain);
4112 }
4113
4114 /* Single step via hardware or software single step.
4115 Return 1 if hardware single stepping, 0 if software single stepping
4116 or can't single step. */
4117
4118 static int
4119 single_step (struct lwp_info* lwp)
4120 {
4121 int step = 0;
4122
4123 if (can_hardware_single_step ())
4124 {
4125 step = 1;
4126 }
4127 else if (can_software_single_step ())
4128 {
4129 install_software_single_step_breakpoints (lwp);
4130 step = 0;
4131 }
4132 else
4133 {
4134 if (debug_threads)
4135 debug_printf ("stepping is not implemented on this target");
4136 }
4137
4138 return step;
4139 }
4140
4141 /* The signal can be delivered to the inferior if we are not trying to
4142 finish a fast tracepoint collect. Since signal can be delivered in
4143 the step-over, the program may go to signal handler and trap again
4144 after return from the signal handler. We can live with the spurious
4145 double traps. */
4146
4147 static int
4148 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4149 {
4150 return !lwp->collecting_fast_tracepoint;
4151 }
4152
4153 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4154 SIGNAL is nonzero, give it that signal. */
4155
4156 static void
4157 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4158 int step, int signal, siginfo_t *info)
4159 {
4160 struct thread_info *thread = get_lwp_thread (lwp);
4161 struct thread_info *saved_thread;
4162 int fast_tp_collecting;
4163 int ptrace_request;
4164 struct process_info *proc = get_thread_process (thread);
4165
4166 /* Note that target description may not be initialised
4167 (proc->tdesc == NULL) at this point because the program hasn't
4168 stopped at the first instruction yet. It means GDBserver skips
4169 the extra traps from the wrapper program (see option --wrapper).
4170 Code in this function that requires register access should be
4171 guarded by proc->tdesc == NULL or something else. */
4172
4173 if (lwp->stopped == 0)
4174 return;
4175
4176 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4177
4178 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4179
4180 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4181
4182 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4183 user used the "jump" command, or "set $pc = foo"). */
4184 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4185 {
4186 /* Collecting 'while-stepping' actions doesn't make sense
4187 anymore. */
4188 release_while_stepping_state_list (thread);
4189 }
4190
4191 /* If we have pending signals or status, and a new signal, enqueue the
4192 signal. Also enqueue the signal if it can't be delivered to the
4193 inferior right now. */
4194 if (signal != 0
4195 && (lwp->status_pending_p
4196 || lwp->pending_signals != NULL
4197 || !lwp_signal_can_be_delivered (lwp)))
4198 {
4199 enqueue_pending_signal (lwp, signal, info);
4200
4201 /* Postpone any pending signal. It was enqueued above. */
4202 signal = 0;
4203 }
4204
4205 if (lwp->status_pending_p)
4206 {
4207 if (debug_threads)
4208 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4209 " has pending status\n",
4210 lwpid_of (thread), step ? "step" : "continue",
4211 lwp->stop_expected ? "expected" : "not expected");
4212 return;
4213 }
4214
4215 saved_thread = current_thread;
4216 current_thread = thread;
4217
4218 /* This bit needs some thinking about. If we get a signal that
4219 we must report while a single-step reinsert is still pending,
4220 we often end up resuming the thread. It might be better to
4221 (ew) allow a stack of pending events; then we could be sure that
4222 the reinsert happened right away and not lose any signals.
4223
4224 Making this stack would also shrink the window in which breakpoints are
4225 uninserted (see comment in linux_wait_for_lwp) but not enough for
4226 complete correctness, so it won't solve that problem. It may be
4227 worthwhile just to solve this one, however. */
4228 if (lwp->bp_reinsert != 0)
4229 {
4230 if (debug_threads)
4231 debug_printf (" pending reinsert at 0x%s\n",
4232 paddress (lwp->bp_reinsert));
4233
4234 if (can_hardware_single_step ())
4235 {
4236 if (fast_tp_collecting == 0)
4237 {
4238 if (step == 0)
4239 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4240 if (lwp->suspended)
4241 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4242 lwp->suspended);
4243 }
4244 }
4245
4246 step = maybe_hw_step (thread);
4247 }
4248 else
4249 {
4250 /* If the thread isn't doing step-over, there shouldn't be any
4251 reinsert breakpoints. */
4252 gdb_assert (!has_reinsert_breakpoints (proc));
4253 }
4254
4255 if (fast_tp_collecting == 1)
4256 {
4257 if (debug_threads)
4258 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4259 " (exit-jump-pad-bkpt)\n",
4260 lwpid_of (thread));
4261 }
4262 else if (fast_tp_collecting == 2)
4263 {
4264 if (debug_threads)
4265 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4266 " single-stepping\n",
4267 lwpid_of (thread));
4268
4269 if (can_hardware_single_step ())
4270 step = 1;
4271 else
4272 {
4273 internal_error (__FILE__, __LINE__,
4274 "moving out of jump pad single-stepping"
4275 " not implemented on this target");
4276 }
4277 }
4278
4279 /* If we have while-stepping actions in this thread set it stepping.
4280 If we have a signal to deliver, it may or may not be set to
4281 SIG_IGN, we don't know. Assume so, and allow collecting
4282 while-stepping into a signal handler. A possible smart thing to
4283 do would be to set an internal breakpoint at the signal return
4284 address, continue, and carry on catching this while-stepping
4285 action only when that breakpoint is hit. A future
4286 enhancement. */
4287 if (thread->while_stepping != NULL)
4288 {
4289 if (debug_threads)
4290 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4291 lwpid_of (thread));
4292
4293 step = single_step (lwp);
4294 }
4295
4296 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4297 {
4298 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4299
4300 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4301
4302 if (debug_threads)
4303 {
4304 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4305 (long) lwp->stop_pc);
4306 }
4307 }
4308
4309 /* If we have pending signals, consume one if it can be delivered to
4310 the inferior. */
4311 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4312 {
4313 struct pending_signals **p_sig;
4314
4315 p_sig = &lwp->pending_signals;
4316 while ((*p_sig)->prev != NULL)
4317 p_sig = &(*p_sig)->prev;
4318
4319 signal = (*p_sig)->signal;
4320 if ((*p_sig)->info.si_signo != 0)
4321 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4322 &(*p_sig)->info);
4323
4324 free (*p_sig);
4325 *p_sig = NULL;
4326 }
4327
4328 if (debug_threads)
4329 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4330 lwpid_of (thread), step ? "step" : "continue", signal,
4331 lwp->stop_expected ? "expected" : "not expected");
4332
4333 if (the_low_target.prepare_to_resume != NULL)
4334 the_low_target.prepare_to_resume (lwp);
4335
4336 regcache_invalidate_thread (thread);
4337 errno = 0;
4338 lwp->stepping = step;
4339 if (step)
4340 ptrace_request = PTRACE_SINGLESTEP;
4341 else if (gdb_catching_syscalls_p (lwp))
4342 ptrace_request = PTRACE_SYSCALL;
4343 else
4344 ptrace_request = PTRACE_CONT;
4345 ptrace (ptrace_request,
4346 lwpid_of (thread),
4347 (PTRACE_TYPE_ARG3) 0,
4348 /* Coerce to a uintptr_t first to avoid potential gcc warning
4349 of coercing an 8 byte integer to a 4 byte pointer. */
4350 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4351
4352 current_thread = saved_thread;
4353 if (errno)
4354 perror_with_name ("resuming thread");
4355
4356 /* Successfully resumed. Clear state that no longer makes sense,
4357 and mark the LWP as running. Must not do this before resuming
4358 otherwise if that fails other code will be confused. E.g., we'd
4359 later try to stop the LWP and hang forever waiting for a stop
4360 status. Note that we must not throw after this is cleared,
4361 otherwise handle_zombie_lwp_error would get confused. */
4362 lwp->stopped = 0;
4363 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4364 }
4365
4366 /* Called when we try to resume a stopped LWP and that errors out. If
4367 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4368 or about to become), discard the error, clear any pending status
4369 the LWP may have, and return true (we'll collect the exit status
4370 soon enough). Otherwise, return false. */
4371
4372 static int
4373 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4374 {
4375 struct thread_info *thread = get_lwp_thread (lp);
4376
4377 /* If we get an error after resuming the LWP successfully, we'd
4378 confuse !T state for the LWP being gone. */
4379 gdb_assert (lp->stopped);
4380
4381 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4382 because even if ptrace failed with ESRCH, the tracee may be "not
4383 yet fully dead", but already refusing ptrace requests. In that
4384 case the tracee has 'R (Running)' state for a little bit
4385 (observed in Linux 3.18). See also the note on ESRCH in the
4386 ptrace(2) man page. Instead, check whether the LWP has any state
4387 other than ptrace-stopped. */
4388
4389 /* Don't assume anything if /proc/PID/status can't be read. */
4390 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4391 {
4392 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4393 lp->status_pending_p = 0;
4394 return 1;
4395 }
4396 return 0;
4397 }
4398
4399 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4400 disappears while we try to resume it. */
4401
4402 static void
4403 linux_resume_one_lwp (struct lwp_info *lwp,
4404 int step, int signal, siginfo_t *info)
4405 {
4406 TRY
4407 {
4408 linux_resume_one_lwp_throw (lwp, step, signal, info);
4409 }
4410 CATCH (ex, RETURN_MASK_ERROR)
4411 {
4412 if (!check_ptrace_stopped_lwp_gone (lwp))
4413 throw_exception (ex);
4414 }
4415 END_CATCH
4416 }
4417
4418 struct thread_resume_array
4419 {
4420 struct thread_resume *resume;
4421 size_t n;
4422 };
4423
4424 /* This function is called once per thread via find_inferior.
4425 ARG is a pointer to a thread_resume_array struct.
4426 We look up the thread specified by ENTRY in ARG, and mark the thread
4427 with a pointer to the appropriate resume request.
4428
4429 This algorithm is O(threads * resume elements), but resume elements
4430 is small (and will remain small at least until GDB supports thread
4431 suspension). */
4432
4433 static int
4434 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4435 {
4436 struct thread_info *thread = (struct thread_info *) entry;
4437 struct lwp_info *lwp = get_thread_lwp (thread);
4438 int ndx;
4439 struct thread_resume_array *r;
4440
4441 r = (struct thread_resume_array *) arg;
4442
4443 for (ndx = 0; ndx < r->n; ndx++)
4444 {
4445 ptid_t ptid = r->resume[ndx].thread;
4446 if (ptid_equal (ptid, minus_one_ptid)
4447 || ptid_equal (ptid, entry->id)
4448 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4449 of PID'. */
4450 || (ptid_get_pid (ptid) == pid_of (thread)
4451 && (ptid_is_pid (ptid)
4452 || ptid_get_lwp (ptid) == -1)))
4453 {
4454 if (r->resume[ndx].kind == resume_stop
4455 && thread->last_resume_kind == resume_stop)
4456 {
4457 if (debug_threads)
4458 debug_printf ("already %s LWP %ld at GDB's request\n",
4459 (thread->last_status.kind
4460 == TARGET_WAITKIND_STOPPED)
4461 ? "stopped"
4462 : "stopping",
4463 lwpid_of (thread));
4464
4465 continue;
4466 }
4467
4468 lwp->resume = &r->resume[ndx];
4469 thread->last_resume_kind = lwp->resume->kind;
4470
4471 lwp->step_range_start = lwp->resume->step_range_start;
4472 lwp->step_range_end = lwp->resume->step_range_end;
4473
4474 /* If we had a deferred signal to report, dequeue one now.
4475 This can happen if LWP gets more than one signal while
4476 trying to get out of a jump pad. */
4477 if (lwp->stopped
4478 && !lwp->status_pending_p
4479 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4480 {
4481 lwp->status_pending_p = 1;
4482
4483 if (debug_threads)
4484 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4485 "leaving status pending.\n",
4486 WSTOPSIG (lwp->status_pending),
4487 lwpid_of (thread));
4488 }
4489
4490 return 0;
4491 }
4492 }
4493
4494 /* No resume action for this thread. */
4495 lwp->resume = NULL;
4496
4497 return 0;
4498 }
4499
4500 /* find_inferior callback for linux_resume.
4501 Set *FLAG_P if this lwp has an interesting status pending. */
4502
4503 static int
4504 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4505 {
4506 struct thread_info *thread = (struct thread_info *) entry;
4507 struct lwp_info *lwp = get_thread_lwp (thread);
4508
4509 /* LWPs which will not be resumed are not interesting, because
4510 we might not wait for them next time through linux_wait. */
4511 if (lwp->resume == NULL)
4512 return 0;
4513
4514 if (thread_still_has_status_pending_p (thread))
4515 * (int *) flag_p = 1;
4516
4517 return 0;
4518 }
4519
4520 /* Return 1 if this lwp that GDB wants running is stopped at an
4521 internal breakpoint that we need to step over. It assumes that any
4522 required STOP_PC adjustment has already been propagated to the
4523 inferior's regcache. */
4524
4525 static int
4526 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4527 {
4528 struct thread_info *thread = (struct thread_info *) entry;
4529 struct lwp_info *lwp = get_thread_lwp (thread);
4530 struct thread_info *saved_thread;
4531 CORE_ADDR pc;
4532 struct process_info *proc = get_thread_process (thread);
4533
4534 /* GDBserver is skipping the extra traps from the wrapper program,
4535 don't have to do step over. */
4536 if (proc->tdesc == NULL)
4537 return 0;
4538
4539 /* LWPs which will not be resumed are not interesting, because we
4540 might not wait for them next time through linux_wait. */
4541
4542 if (!lwp->stopped)
4543 {
4544 if (debug_threads)
4545 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4546 lwpid_of (thread));
4547 return 0;
4548 }
4549
4550 if (thread->last_resume_kind == resume_stop)
4551 {
4552 if (debug_threads)
4553 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4554 " stopped\n",
4555 lwpid_of (thread));
4556 return 0;
4557 }
4558
4559 gdb_assert (lwp->suspended >= 0);
4560
4561 if (lwp->suspended)
4562 {
4563 if (debug_threads)
4564 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4565 lwpid_of (thread));
4566 return 0;
4567 }
4568
4569 if (lwp->status_pending_p)
4570 {
4571 if (debug_threads)
4572 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4573 " status.\n",
4574 lwpid_of (thread));
4575 return 0;
4576 }
4577
4578 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4579 or we have. */
4580 pc = get_pc (lwp);
4581
4582 /* If the PC has changed since we stopped, then don't do anything,
4583 and let the breakpoint/tracepoint be hit. This happens if, for
4584 instance, GDB handled the decr_pc_after_break subtraction itself,
4585 GDB is OOL stepping this thread, or the user has issued a "jump"
4586 command, or poked thread's registers herself. */
4587 if (pc != lwp->stop_pc)
4588 {
4589 if (debug_threads)
4590 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4591 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4592 lwpid_of (thread),
4593 paddress (lwp->stop_pc), paddress (pc));
4594 return 0;
4595 }
4596
4597 /* On software single step target, resume the inferior with signal
4598 rather than stepping over. */
4599 if (can_software_single_step ()
4600 && lwp->pending_signals != NULL
4601 && lwp_signal_can_be_delivered (lwp))
4602 {
4603 if (debug_threads)
4604 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4605 " signals.\n",
4606 lwpid_of (thread));
4607
4608 return 0;
4609 }
4610
4611 saved_thread = current_thread;
4612 current_thread = thread;
4613
4614 /* We can only step over breakpoints we know about. */
4615 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4616 {
4617 /* Don't step over a breakpoint that GDB expects to hit
4618 though. If the condition is being evaluated on the target's side
4619 and it evaluate to false, step over this breakpoint as well. */
4620 if (gdb_breakpoint_here (pc)
4621 && gdb_condition_true_at_breakpoint (pc)
4622 && gdb_no_commands_at_breakpoint (pc))
4623 {
4624 if (debug_threads)
4625 debug_printf ("Need step over [LWP %ld]? yes, but found"
4626 " GDB breakpoint at 0x%s; skipping step over\n",
4627 lwpid_of (thread), paddress (pc));
4628
4629 current_thread = saved_thread;
4630 return 0;
4631 }
4632 else
4633 {
4634 if (debug_threads)
4635 debug_printf ("Need step over [LWP %ld]? yes, "
4636 "found breakpoint at 0x%s\n",
4637 lwpid_of (thread), paddress (pc));
4638
4639 /* We've found an lwp that needs stepping over --- return 1 so
4640 that find_inferior stops looking. */
4641 current_thread = saved_thread;
4642
4643 return 1;
4644 }
4645 }
4646
4647 current_thread = saved_thread;
4648
4649 if (debug_threads)
4650 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4651 " at 0x%s\n",
4652 lwpid_of (thread), paddress (pc));
4653
4654 return 0;
4655 }
4656
4657 /* Start a step-over operation on LWP. When LWP stopped at a
4658 breakpoint, to make progress, we need to remove the breakpoint out
4659 of the way. If we let other threads run while we do that, they may
4660 pass by the breakpoint location and miss hitting it. To avoid
4661 that, a step-over momentarily stops all threads while LWP is
4662 single-stepped by either hardware or software while the breakpoint
4663 is temporarily uninserted from the inferior. When the single-step
4664 finishes, we reinsert the breakpoint, and let all threads that are
4665 supposed to be running, run again. */
4666
4667 static int
4668 start_step_over (struct lwp_info *lwp)
4669 {
4670 struct thread_info *thread = get_lwp_thread (lwp);
4671 struct thread_info *saved_thread;
4672 CORE_ADDR pc;
4673 int step;
4674
4675 if (debug_threads)
4676 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4677 lwpid_of (thread));
4678
4679 stop_all_lwps (1, lwp);
4680
4681 if (lwp->suspended != 0)
4682 {
4683 internal_error (__FILE__, __LINE__,
4684 "LWP %ld suspended=%d\n", lwpid_of (thread),
4685 lwp->suspended);
4686 }
4687
4688 if (debug_threads)
4689 debug_printf ("Done stopping all threads for step-over.\n");
4690
4691 /* Note, we should always reach here with an already adjusted PC,
4692 either by GDB (if we're resuming due to GDB's request), or by our
4693 caller, if we just finished handling an internal breakpoint GDB
4694 shouldn't care about. */
4695 pc = get_pc (lwp);
4696
4697 saved_thread = current_thread;
4698 current_thread = thread;
4699
4700 lwp->bp_reinsert = pc;
4701 uninsert_breakpoints_at (pc);
4702 uninsert_fast_tracepoint_jumps_at (pc);
4703
4704 step = single_step (lwp);
4705
4706 current_thread = saved_thread;
4707
4708 linux_resume_one_lwp (lwp, step, 0, NULL);
4709
4710 /* Require next event from this LWP. */
4711 step_over_bkpt = thread->entry.id;
4712 return 1;
4713 }
4714
4715 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4716 start_step_over, if still there, and delete any reinsert
4717 breakpoints we've set, on non hardware single-step targets. */
4718
4719 static int
4720 finish_step_over (struct lwp_info *lwp)
4721 {
4722 if (lwp->bp_reinsert != 0)
4723 {
4724 struct thread_info *saved_thread = current_thread;
4725
4726 if (debug_threads)
4727 debug_printf ("Finished step over.\n");
4728
4729 current_thread = get_lwp_thread (lwp);
4730
4731 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4732 may be no breakpoint to reinsert there by now. */
4733 reinsert_breakpoints_at (lwp->bp_reinsert);
4734 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4735
4736 lwp->bp_reinsert = 0;
4737
4738 /* Delete any software-single-step reinsert breakpoints. No
4739 longer needed. We don't have to worry about other threads
4740 hitting this trap, and later not being able to explain it,
4741 because we were stepping over a breakpoint, and we hold all
4742 threads but LWP stopped while doing that. */
4743 if (!can_hardware_single_step ())
4744 {
4745 gdb_assert (has_reinsert_breakpoints (current_process ()));
4746 delete_reinsert_breakpoints ();
4747 }
4748
4749 step_over_bkpt = null_ptid;
4750 current_thread = saved_thread;
4751 return 1;
4752 }
4753 else
4754 return 0;
4755 }
4756
4757 /* If there's a step over in progress, wait until all threads stop
4758 (that is, until the stepping thread finishes its step), and
4759 unsuspend all lwps. The stepping thread ends with its status
4760 pending, which is processed later when we get back to processing
4761 events. */
4762
4763 static void
4764 complete_ongoing_step_over (void)
4765 {
4766 if (!ptid_equal (step_over_bkpt, null_ptid))
4767 {
4768 struct lwp_info *lwp;
4769 int wstat;
4770 int ret;
4771
4772 if (debug_threads)
4773 debug_printf ("detach: step over in progress, finish it first\n");
4774
4775 /* Passing NULL_PTID as filter indicates we want all events to
4776 be left pending. Eventually this returns when there are no
4777 unwaited-for children left. */
4778 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4779 &wstat, __WALL);
4780 gdb_assert (ret == -1);
4781
4782 lwp = find_lwp_pid (step_over_bkpt);
4783 if (lwp != NULL)
4784 finish_step_over (lwp);
4785 step_over_bkpt = null_ptid;
4786 unsuspend_all_lwps (lwp);
4787 }
4788 }
4789
4790 /* This function is called once per thread. We check the thread's resume
4791 request, which will tell us whether to resume, step, or leave the thread
4792 stopped; and what signal, if any, it should be sent.
4793
4794 For threads which we aren't explicitly told otherwise, we preserve
4795 the stepping flag; this is used for stepping over gdbserver-placed
4796 breakpoints.
4797
4798 If pending_flags was set in any thread, we queue any needed
4799 signals, since we won't actually resume. We already have a pending
4800 event to report, so we don't need to preserve any step requests;
4801 they should be re-issued if necessary. */
4802
4803 static int
4804 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4805 {
4806 struct thread_info *thread = (struct thread_info *) entry;
4807 struct lwp_info *lwp = get_thread_lwp (thread);
4808 int step;
4809 int leave_all_stopped = * (int *) arg;
4810 int leave_pending;
4811
4812 if (lwp->resume == NULL)
4813 return 0;
4814
4815 if (lwp->resume->kind == resume_stop)
4816 {
4817 if (debug_threads)
4818 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4819
4820 if (!lwp->stopped)
4821 {
4822 if (debug_threads)
4823 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4824
4825 /* Stop the thread, and wait for the event asynchronously,
4826 through the event loop. */
4827 send_sigstop (lwp);
4828 }
4829 else
4830 {
4831 if (debug_threads)
4832 debug_printf ("already stopped LWP %ld\n",
4833 lwpid_of (thread));
4834
4835 /* The LWP may have been stopped in an internal event that
4836 was not meant to be notified back to GDB (e.g., gdbserver
4837 breakpoint), so we should be reporting a stop event in
4838 this case too. */
4839
4840 /* If the thread already has a pending SIGSTOP, this is a
4841 no-op. Otherwise, something later will presumably resume
4842 the thread and this will cause it to cancel any pending
4843 operation, due to last_resume_kind == resume_stop. If
4844 the thread already has a pending status to report, we
4845 will still report it the next time we wait - see
4846 status_pending_p_callback. */
4847
4848 /* If we already have a pending signal to report, then
4849 there's no need to queue a SIGSTOP, as this means we're
4850 midway through moving the LWP out of the jumppad, and we
4851 will report the pending signal as soon as that is
4852 finished. */
4853 if (lwp->pending_signals_to_report == NULL)
4854 send_sigstop (lwp);
4855 }
4856
4857 /* For stop requests, we're done. */
4858 lwp->resume = NULL;
4859 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4860 return 0;
4861 }
4862
4863 /* If this thread which is about to be resumed has a pending status,
4864 then don't resume it - we can just report the pending status.
4865 Likewise if it is suspended, because e.g., another thread is
4866 stepping past a breakpoint. Make sure to queue any signals that
4867 would otherwise be sent. In all-stop mode, we do this decision
4868 based on if *any* thread has a pending status. If there's a
4869 thread that needs the step-over-breakpoint dance, then don't
4870 resume any other thread but that particular one. */
4871 leave_pending = (lwp->suspended
4872 || lwp->status_pending_p
4873 || leave_all_stopped);
4874
4875 if (!leave_pending)
4876 {
4877 if (debug_threads)
4878 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4879
4880 step = (lwp->resume->kind == resume_step);
4881 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4882 }
4883 else
4884 {
4885 if (debug_threads)
4886 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4887
4888 /* If we have a new signal, enqueue the signal. */
4889 if (lwp->resume->sig != 0)
4890 {
4891 struct pending_signals *p_sig = XCNEW (struct pending_signals);
4892
4893 p_sig->prev = lwp->pending_signals;
4894 p_sig->signal = lwp->resume->sig;
4895
4896 /* If this is the same signal we were previously stopped by,
4897 make sure to queue its siginfo. We can ignore the return
4898 value of ptrace; if it fails, we'll skip
4899 PTRACE_SETSIGINFO. */
4900 if (WIFSTOPPED (lwp->last_status)
4901 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4902 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4903 &p_sig->info);
4904
4905 lwp->pending_signals = p_sig;
4906 }
4907 }
4908
4909 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4910 lwp->resume = NULL;
4911 return 0;
4912 }
4913
4914 static void
4915 linux_resume (struct thread_resume *resume_info, size_t n)
4916 {
4917 struct thread_resume_array array = { resume_info, n };
4918 struct thread_info *need_step_over = NULL;
4919 int any_pending;
4920 int leave_all_stopped;
4921
4922 if (debug_threads)
4923 {
4924 debug_enter ();
4925 debug_printf ("linux_resume:\n");
4926 }
4927
4928 find_inferior (&all_threads, linux_set_resume_request, &array);
4929
4930 /* If there is a thread which would otherwise be resumed, which has
4931 a pending status, then don't resume any threads - we can just
4932 report the pending status. Make sure to queue any signals that
4933 would otherwise be sent. In non-stop mode, we'll apply this
4934 logic to each thread individually. We consume all pending events
4935 before considering to start a step-over (in all-stop). */
4936 any_pending = 0;
4937 if (!non_stop)
4938 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4939
4940 /* If there is a thread which would otherwise be resumed, which is
4941 stopped at a breakpoint that needs stepping over, then don't
4942 resume any threads - have it step over the breakpoint with all
4943 other threads stopped, then resume all threads again. Make sure
4944 to queue any signals that would otherwise be delivered or
4945 queued. */
4946 if (!any_pending && supports_breakpoints ())
4947 need_step_over
4948 = (struct thread_info *) find_inferior (&all_threads,
4949 need_step_over_p, NULL);
4950
4951 leave_all_stopped = (need_step_over != NULL || any_pending);
4952
4953 if (debug_threads)
4954 {
4955 if (need_step_over != NULL)
4956 debug_printf ("Not resuming all, need step over\n");
4957 else if (any_pending)
4958 debug_printf ("Not resuming, all-stop and found "
4959 "an LWP with pending status\n");
4960 else
4961 debug_printf ("Resuming, no pending status or step over needed\n");
4962 }
4963
4964 /* Even if we're leaving threads stopped, queue all signals we'd
4965 otherwise deliver. */
4966 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4967
4968 if (need_step_over)
4969 start_step_over (get_thread_lwp (need_step_over));
4970
4971 if (debug_threads)
4972 {
4973 debug_printf ("linux_resume done\n");
4974 debug_exit ();
4975 }
4976
4977 /* We may have events that were pending that can/should be sent to
4978 the client now. Trigger a linux_wait call. */
4979 if (target_is_async_p ())
4980 async_file_mark ();
4981 }
4982
4983 /* This function is called once per thread. We check the thread's
4984 last resume request, which will tell us whether to resume, step, or
4985 leave the thread stopped. Any signal the client requested to be
4986 delivered has already been enqueued at this point.
4987
4988 If any thread that GDB wants running is stopped at an internal
4989 breakpoint that needs stepping over, we start a step-over operation
4990 on that particular thread, and leave all others stopped. */
4991
4992 static int
4993 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4994 {
4995 struct thread_info *thread = (struct thread_info *) entry;
4996 struct lwp_info *lwp = get_thread_lwp (thread);
4997 int step;
4998
4999 if (lwp == except)
5000 return 0;
5001
5002 if (debug_threads)
5003 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5004
5005 if (!lwp->stopped)
5006 {
5007 if (debug_threads)
5008 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5009 return 0;
5010 }
5011
5012 if (thread->last_resume_kind == resume_stop
5013 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5014 {
5015 if (debug_threads)
5016 debug_printf (" client wants LWP to remain %ld stopped\n",
5017 lwpid_of (thread));
5018 return 0;
5019 }
5020
5021 if (lwp->status_pending_p)
5022 {
5023 if (debug_threads)
5024 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5025 lwpid_of (thread));
5026 return 0;
5027 }
5028
5029 gdb_assert (lwp->suspended >= 0);
5030
5031 if (lwp->suspended)
5032 {
5033 if (debug_threads)
5034 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5035 return 0;
5036 }
5037
5038 if (thread->last_resume_kind == resume_stop
5039 && lwp->pending_signals_to_report == NULL
5040 && lwp->collecting_fast_tracepoint == 0)
5041 {
5042 /* We haven't reported this LWP as stopped yet (otherwise, the
5043 last_status.kind check above would catch it, and we wouldn't
5044 reach here. This LWP may have been momentarily paused by a
5045 stop_all_lwps call while handling for example, another LWP's
5046 step-over. In that case, the pending expected SIGSTOP signal
5047 that was queued at vCont;t handling time will have already
5048 been consumed by wait_for_sigstop, and so we need to requeue
5049 another one here. Note that if the LWP already has a SIGSTOP
5050 pending, this is a no-op. */
5051
5052 if (debug_threads)
5053 debug_printf ("Client wants LWP %ld to stop. "
5054 "Making sure it has a SIGSTOP pending\n",
5055 lwpid_of (thread));
5056
5057 send_sigstop (lwp);
5058 }
5059
5060 if (thread->last_resume_kind == resume_step)
5061 {
5062 if (debug_threads)
5063 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5064 lwpid_of (thread));
5065 step = 1;
5066 }
5067 else if (lwp->bp_reinsert != 0)
5068 {
5069 if (debug_threads)
5070 debug_printf (" stepping LWP %ld, reinsert set\n",
5071 lwpid_of (thread));
5072
5073 step = maybe_hw_step (thread);
5074 }
5075 else
5076 step = 0;
5077
5078 linux_resume_one_lwp (lwp, step, 0, NULL);
5079 return 0;
5080 }
5081
5082 static int
5083 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5084 {
5085 struct thread_info *thread = (struct thread_info *) entry;
5086 struct lwp_info *lwp = get_thread_lwp (thread);
5087
5088 if (lwp == except)
5089 return 0;
5090
5091 lwp_suspended_decr (lwp);
5092
5093 return proceed_one_lwp (entry, except);
5094 }
5095
5096 /* When we finish a step-over, set threads running again. If there's
5097 another thread that may need a step-over, now's the time to start
5098 it. Eventually, we'll move all threads past their breakpoints. */
5099
5100 static void
5101 proceed_all_lwps (void)
5102 {
5103 struct thread_info *need_step_over;
5104
5105 /* If there is a thread which would otherwise be resumed, which is
5106 stopped at a breakpoint that needs stepping over, then don't
5107 resume any threads - have it step over the breakpoint with all
5108 other threads stopped, then resume all threads again. */
5109
5110 if (supports_breakpoints ())
5111 {
5112 need_step_over
5113 = (struct thread_info *) find_inferior (&all_threads,
5114 need_step_over_p, NULL);
5115
5116 if (need_step_over != NULL)
5117 {
5118 if (debug_threads)
5119 debug_printf ("proceed_all_lwps: found "
5120 "thread %ld needing a step-over\n",
5121 lwpid_of (need_step_over));
5122
5123 start_step_over (get_thread_lwp (need_step_over));
5124 return;
5125 }
5126 }
5127
5128 if (debug_threads)
5129 debug_printf ("Proceeding, no step-over needed\n");
5130
5131 find_inferior (&all_threads, proceed_one_lwp, NULL);
5132 }
5133
5134 /* Stopped LWPs that the client wanted to be running, that don't have
5135 pending statuses, are set to run again, except for EXCEPT, if not
5136 NULL. This undoes a stop_all_lwps call. */
5137
5138 static void
5139 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5140 {
5141 if (debug_threads)
5142 {
5143 debug_enter ();
5144 if (except)
5145 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5146 lwpid_of (get_lwp_thread (except)));
5147 else
5148 debug_printf ("unstopping all lwps\n");
5149 }
5150
5151 if (unsuspend)
5152 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5153 else
5154 find_inferior (&all_threads, proceed_one_lwp, except);
5155
5156 if (debug_threads)
5157 {
5158 debug_printf ("unstop_all_lwps done\n");
5159 debug_exit ();
5160 }
5161 }
5162
5163
5164 #ifdef HAVE_LINUX_REGSETS
5165
5166 #define use_linux_regsets 1
5167
5168 /* Returns true if REGSET has been disabled. */
5169
5170 static int
5171 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5172 {
5173 return (info->disabled_regsets != NULL
5174 && info->disabled_regsets[regset - info->regsets]);
5175 }
5176
5177 /* Disable REGSET. */
5178
5179 static void
5180 disable_regset (struct regsets_info *info, struct regset_info *regset)
5181 {
5182 int dr_offset;
5183
5184 dr_offset = regset - info->regsets;
5185 if (info->disabled_regsets == NULL)
5186 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5187 info->disabled_regsets[dr_offset] = 1;
5188 }
5189
5190 static int
5191 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5192 struct regcache *regcache)
5193 {
5194 struct regset_info *regset;
5195 int saw_general_regs = 0;
5196 int pid;
5197 struct iovec iov;
5198
5199 pid = lwpid_of (current_thread);
5200 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5201 {
5202 void *buf, *data;
5203 int nt_type, res;
5204
5205 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5206 continue;
5207
5208 buf = xmalloc (regset->size);
5209
5210 nt_type = regset->nt_type;
5211 if (nt_type)
5212 {
5213 iov.iov_base = buf;
5214 iov.iov_len = regset->size;
5215 data = (void *) &iov;
5216 }
5217 else
5218 data = buf;
5219
5220 #ifndef __sparc__
5221 res = ptrace (regset->get_request, pid,
5222 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5223 #else
5224 res = ptrace (regset->get_request, pid, data, nt_type);
5225 #endif
5226 if (res < 0)
5227 {
5228 if (errno == EIO)
5229 {
5230 /* If we get EIO on a regset, do not try it again for
5231 this process mode. */
5232 disable_regset (regsets_info, regset);
5233 }
5234 else if (errno == ENODATA)
5235 {
5236 /* ENODATA may be returned if the regset is currently
5237 not "active". This can happen in normal operation,
5238 so suppress the warning in this case. */
5239 }
5240 else
5241 {
5242 char s[256];
5243 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5244 pid);
5245 perror (s);
5246 }
5247 }
5248 else
5249 {
5250 if (regset->type == GENERAL_REGS)
5251 saw_general_regs = 1;
5252 regset->store_function (regcache, buf);
5253 }
5254 free (buf);
5255 }
5256 if (saw_general_regs)
5257 return 0;
5258 else
5259 return 1;
5260 }
5261
5262 static int
5263 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5264 struct regcache *regcache)
5265 {
5266 struct regset_info *regset;
5267 int saw_general_regs = 0;
5268 int pid;
5269 struct iovec iov;
5270
5271 pid = lwpid_of (current_thread);
5272 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5273 {
5274 void *buf, *data;
5275 int nt_type, res;
5276
5277 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5278 || regset->fill_function == NULL)
5279 continue;
5280
5281 buf = xmalloc (regset->size);
5282
5283 /* First fill the buffer with the current register set contents,
5284 in case there are any items in the kernel's regset that are
5285 not in gdbserver's regcache. */
5286
5287 nt_type = regset->nt_type;
5288 if (nt_type)
5289 {
5290 iov.iov_base = buf;
5291 iov.iov_len = regset->size;
5292 data = (void *) &iov;
5293 }
5294 else
5295 data = buf;
5296
5297 #ifndef __sparc__
5298 res = ptrace (regset->get_request, pid,
5299 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5300 #else
5301 res = ptrace (regset->get_request, pid, data, nt_type);
5302 #endif
5303
5304 if (res == 0)
5305 {
5306 /* Then overlay our cached registers on that. */
5307 regset->fill_function (regcache, buf);
5308
5309 /* Only now do we write the register set. */
5310 #ifndef __sparc__
5311 res = ptrace (regset->set_request, pid,
5312 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5313 #else
5314 res = ptrace (regset->set_request, pid, data, nt_type);
5315 #endif
5316 }
5317
5318 if (res < 0)
5319 {
5320 if (errno == EIO)
5321 {
5322 /* If we get EIO on a regset, do not try it again for
5323 this process mode. */
5324 disable_regset (regsets_info, regset);
5325 }
5326 else if (errno == ESRCH)
5327 {
5328 /* At this point, ESRCH should mean the process is
5329 already gone, in which case we simply ignore attempts
5330 to change its registers. See also the related
5331 comment in linux_resume_one_lwp. */
5332 free (buf);
5333 return 0;
5334 }
5335 else
5336 {
5337 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5338 }
5339 }
5340 else if (regset->type == GENERAL_REGS)
5341 saw_general_regs = 1;
5342 free (buf);
5343 }
5344 if (saw_general_regs)
5345 return 0;
5346 else
5347 return 1;
5348 }
5349
5350 #else /* !HAVE_LINUX_REGSETS */
5351
5352 #define use_linux_regsets 0
5353 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5354 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5355
5356 #endif
5357
5358 /* Return 1 if register REGNO is supported by one of the regset ptrace
5359 calls or 0 if it has to be transferred individually. */
5360
5361 static int
5362 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5363 {
5364 unsigned char mask = 1 << (regno % 8);
5365 size_t index = regno / 8;
5366
5367 return (use_linux_regsets
5368 && (regs_info->regset_bitmap == NULL
5369 || (regs_info->regset_bitmap[index] & mask) != 0));
5370 }
5371
5372 #ifdef HAVE_LINUX_USRREGS
5373
5374 static int
5375 register_addr (const struct usrregs_info *usrregs, int regnum)
5376 {
5377 int addr;
5378
5379 if (regnum < 0 || regnum >= usrregs->num_regs)
5380 error ("Invalid register number %d.", regnum);
5381
5382 addr = usrregs->regmap[regnum];
5383
5384 return addr;
5385 }
5386
5387 /* Fetch one register. */
5388 static void
5389 fetch_register (const struct usrregs_info *usrregs,
5390 struct regcache *regcache, int regno)
5391 {
5392 CORE_ADDR regaddr;
5393 int i, size;
5394 char *buf;
5395 int pid;
5396
5397 if (regno >= usrregs->num_regs)
5398 return;
5399 if ((*the_low_target.cannot_fetch_register) (regno))
5400 return;
5401
5402 regaddr = register_addr (usrregs, regno);
5403 if (regaddr == -1)
5404 return;
5405
5406 size = ((register_size (regcache->tdesc, regno)
5407 + sizeof (PTRACE_XFER_TYPE) - 1)
5408 & -sizeof (PTRACE_XFER_TYPE));
5409 buf = (char *) alloca (size);
5410
5411 pid = lwpid_of (current_thread);
5412 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5413 {
5414 errno = 0;
5415 *(PTRACE_XFER_TYPE *) (buf + i) =
5416 ptrace (PTRACE_PEEKUSER, pid,
5417 /* Coerce to a uintptr_t first to avoid potential gcc warning
5418 of coercing an 8 byte integer to a 4 byte pointer. */
5419 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5420 regaddr += sizeof (PTRACE_XFER_TYPE);
5421 if (errno != 0)
5422 error ("reading register %d: %s", regno, strerror (errno));
5423 }
5424
5425 if (the_low_target.supply_ptrace_register)
5426 the_low_target.supply_ptrace_register (regcache, regno, buf);
5427 else
5428 supply_register (regcache, regno, buf);
5429 }
5430
5431 /* Store one register. */
5432 static void
5433 store_register (const struct usrregs_info *usrregs,
5434 struct regcache *regcache, int regno)
5435 {
5436 CORE_ADDR regaddr;
5437 int i, size;
5438 char *buf;
5439 int pid;
5440
5441 if (regno >= usrregs->num_regs)
5442 return;
5443 if ((*the_low_target.cannot_store_register) (regno))
5444 return;
5445
5446 regaddr = register_addr (usrregs, regno);
5447 if (regaddr == -1)
5448 return;
5449
5450 size = ((register_size (regcache->tdesc, regno)
5451 + sizeof (PTRACE_XFER_TYPE) - 1)
5452 & -sizeof (PTRACE_XFER_TYPE));
5453 buf = (char *) alloca (size);
5454 memset (buf, 0, size);
5455
5456 if (the_low_target.collect_ptrace_register)
5457 the_low_target.collect_ptrace_register (regcache, regno, buf);
5458 else
5459 collect_register (regcache, regno, buf);
5460
5461 pid = lwpid_of (current_thread);
5462 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5463 {
5464 errno = 0;
5465 ptrace (PTRACE_POKEUSER, pid,
5466 /* Coerce to a uintptr_t first to avoid potential gcc warning
5467 about coercing an 8 byte integer to a 4 byte pointer. */
5468 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5469 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5470 if (errno != 0)
5471 {
5472 /* At this point, ESRCH should mean the process is
5473 already gone, in which case we simply ignore attempts
5474 to change its registers. See also the related
5475 comment in linux_resume_one_lwp. */
5476 if (errno == ESRCH)
5477 return;
5478
5479 if ((*the_low_target.cannot_store_register) (regno) == 0)
5480 error ("writing register %d: %s", regno, strerror (errno));
5481 }
5482 regaddr += sizeof (PTRACE_XFER_TYPE);
5483 }
5484 }
5485
5486 /* Fetch all registers, or just one, from the child process.
5487 If REGNO is -1, do this for all registers, skipping any that are
5488 assumed to have been retrieved by regsets_fetch_inferior_registers,
5489 unless ALL is non-zero.
5490 Otherwise, REGNO specifies which register (so we can save time). */
5491 static void
5492 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5493 struct regcache *regcache, int regno, int all)
5494 {
5495 struct usrregs_info *usr = regs_info->usrregs;
5496
5497 if (regno == -1)
5498 {
5499 for (regno = 0; regno < usr->num_regs; regno++)
5500 if (all || !linux_register_in_regsets (regs_info, regno))
5501 fetch_register (usr, regcache, regno);
5502 }
5503 else
5504 fetch_register (usr, regcache, regno);
5505 }
5506
5507 /* Store our register values back into the inferior.
5508 If REGNO is -1, do this for all registers, skipping any that are
5509 assumed to have been saved by regsets_store_inferior_registers,
5510 unless ALL is non-zero.
5511 Otherwise, REGNO specifies which register (so we can save time). */
5512 static void
5513 usr_store_inferior_registers (const struct regs_info *regs_info,
5514 struct regcache *regcache, int regno, int all)
5515 {
5516 struct usrregs_info *usr = regs_info->usrregs;
5517
5518 if (regno == -1)
5519 {
5520 for (regno = 0; regno < usr->num_regs; regno++)
5521 if (all || !linux_register_in_regsets (regs_info, regno))
5522 store_register (usr, regcache, regno);
5523 }
5524 else
5525 store_register (usr, regcache, regno);
5526 }
5527
5528 #else /* !HAVE_LINUX_USRREGS */
5529
5530 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5531 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5532
5533 #endif
5534
5535
5536 static void
5537 linux_fetch_registers (struct regcache *regcache, int regno)
5538 {
5539 int use_regsets;
5540 int all = 0;
5541 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5542
5543 if (regno == -1)
5544 {
5545 if (the_low_target.fetch_register != NULL
5546 && regs_info->usrregs != NULL)
5547 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5548 (*the_low_target.fetch_register) (regcache, regno);
5549
5550 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5551 if (regs_info->usrregs != NULL)
5552 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5553 }
5554 else
5555 {
5556 if (the_low_target.fetch_register != NULL
5557 && (*the_low_target.fetch_register) (regcache, regno))
5558 return;
5559
5560 use_regsets = linux_register_in_regsets (regs_info, regno);
5561 if (use_regsets)
5562 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5563 regcache);
5564 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5565 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5566 }
5567 }
5568
5569 static void
5570 linux_store_registers (struct regcache *regcache, int regno)
5571 {
5572 int use_regsets;
5573 int all = 0;
5574 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5575
5576 if (regno == -1)
5577 {
5578 all = regsets_store_inferior_registers (regs_info->regsets_info,
5579 regcache);
5580 if (regs_info->usrregs != NULL)
5581 usr_store_inferior_registers (regs_info, regcache, regno, all);
5582 }
5583 else
5584 {
5585 use_regsets = linux_register_in_regsets (regs_info, regno);
5586 if (use_regsets)
5587 all = regsets_store_inferior_registers (regs_info->regsets_info,
5588 regcache);
5589 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5590 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5591 }
5592 }
5593
5594
5595 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5596 to debugger memory starting at MYADDR. */
5597
5598 static int
5599 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5600 {
5601 int pid = lwpid_of (current_thread);
5602 register PTRACE_XFER_TYPE *buffer;
5603 register CORE_ADDR addr;
5604 register int count;
5605 char filename[64];
5606 register int i;
5607 int ret;
5608 int fd;
5609
5610 /* Try using /proc. Don't bother for one word. */
5611 if (len >= 3 * sizeof (long))
5612 {
5613 int bytes;
5614
5615 /* We could keep this file open and cache it - possibly one per
5616 thread. That requires some juggling, but is even faster. */
5617 sprintf (filename, "/proc/%d/mem", pid);
5618 fd = open (filename, O_RDONLY | O_LARGEFILE);
5619 if (fd == -1)
5620 goto no_proc;
5621
5622 /* If pread64 is available, use it. It's faster if the kernel
5623 supports it (only one syscall), and it's 64-bit safe even on
5624 32-bit platforms (for instance, SPARC debugging a SPARC64
5625 application). */
5626 #ifdef HAVE_PREAD64
5627 bytes = pread64 (fd, myaddr, len, memaddr);
5628 #else
5629 bytes = -1;
5630 if (lseek (fd, memaddr, SEEK_SET) != -1)
5631 bytes = read (fd, myaddr, len);
5632 #endif
5633
5634 close (fd);
5635 if (bytes == len)
5636 return 0;
5637
5638 /* Some data was read, we'll try to get the rest with ptrace. */
5639 if (bytes > 0)
5640 {
5641 memaddr += bytes;
5642 myaddr += bytes;
5643 len -= bytes;
5644 }
5645 }
5646
5647 no_proc:
5648 /* Round starting address down to longword boundary. */
5649 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5650 /* Round ending address up; get number of longwords that makes. */
5651 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5652 / sizeof (PTRACE_XFER_TYPE));
5653 /* Allocate buffer of that many longwords. */
5654 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5655
5656 /* Read all the longwords */
5657 errno = 0;
5658 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5659 {
5660 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5661 about coercing an 8 byte integer to a 4 byte pointer. */
5662 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5663 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5664 (PTRACE_TYPE_ARG4) 0);
5665 if (errno)
5666 break;
5667 }
5668 ret = errno;
5669
5670 /* Copy appropriate bytes out of the buffer. */
5671 if (i > 0)
5672 {
5673 i *= sizeof (PTRACE_XFER_TYPE);
5674 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5675 memcpy (myaddr,
5676 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5677 i < len ? i : len);
5678 }
5679
5680 return ret;
5681 }
5682
5683 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5684 memory at MEMADDR. On failure (cannot write to the inferior)
5685 returns the value of errno. Always succeeds if LEN is zero. */
5686
5687 static int
5688 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5689 {
5690 register int i;
5691 /* Round starting address down to longword boundary. */
5692 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5693 /* Round ending address up; get number of longwords that makes. */
5694 register int count
5695 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5696 / sizeof (PTRACE_XFER_TYPE);
5697
5698 /* Allocate buffer of that many longwords. */
5699 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5700
5701 int pid = lwpid_of (current_thread);
5702
5703 if (len == 0)
5704 {
5705 /* Zero length write always succeeds. */
5706 return 0;
5707 }
5708
5709 if (debug_threads)
5710 {
5711 /* Dump up to four bytes. */
5712 char str[4 * 2 + 1];
5713 char *p = str;
5714 int dump = len < 4 ? len : 4;
5715
5716 for (i = 0; i < dump; i++)
5717 {
5718 sprintf (p, "%02x", myaddr[i]);
5719 p += 2;
5720 }
5721 *p = '\0';
5722
5723 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5724 str, (long) memaddr, pid);
5725 }
5726
5727 /* Fill start and end extra bytes of buffer with existing memory data. */
5728
5729 errno = 0;
5730 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5731 about coercing an 8 byte integer to a 4 byte pointer. */
5732 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5733 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5734 (PTRACE_TYPE_ARG4) 0);
5735 if (errno)
5736 return errno;
5737
5738 if (count > 1)
5739 {
5740 errno = 0;
5741 buffer[count - 1]
5742 = ptrace (PTRACE_PEEKTEXT, pid,
5743 /* Coerce to a uintptr_t first to avoid potential gcc warning
5744 about coercing an 8 byte integer to a 4 byte pointer. */
5745 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5746 * sizeof (PTRACE_XFER_TYPE)),
5747 (PTRACE_TYPE_ARG4) 0);
5748 if (errno)
5749 return errno;
5750 }
5751
5752 /* Copy data to be written over corresponding part of buffer. */
5753
5754 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5755 myaddr, len);
5756
5757 /* Write the entire buffer. */
5758
5759 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5760 {
5761 errno = 0;
5762 ptrace (PTRACE_POKETEXT, pid,
5763 /* Coerce to a uintptr_t first to avoid potential gcc warning
5764 about coercing an 8 byte integer to a 4 byte pointer. */
5765 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5766 (PTRACE_TYPE_ARG4) buffer[i]);
5767 if (errno)
5768 return errno;
5769 }
5770
5771 return 0;
5772 }
5773
5774 static void
5775 linux_look_up_symbols (void)
5776 {
5777 #ifdef USE_THREAD_DB
5778 struct process_info *proc = current_process ();
5779
5780 if (proc->priv->thread_db != NULL)
5781 return;
5782
5783 thread_db_init ();
5784 #endif
5785 }
5786
5787 static void
5788 linux_request_interrupt (void)
5789 {
5790 extern unsigned long signal_pid;
5791
5792 /* Send a SIGINT to the process group. This acts just like the user
5793 typed a ^C on the controlling terminal. */
5794 kill (-signal_pid, SIGINT);
5795 }
5796
5797 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5798 to debugger memory starting at MYADDR. */
5799
5800 static int
5801 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5802 {
5803 char filename[PATH_MAX];
5804 int fd, n;
5805 int pid = lwpid_of (current_thread);
5806
5807 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5808
5809 fd = open (filename, O_RDONLY);
5810 if (fd < 0)
5811 return -1;
5812
5813 if (offset != (CORE_ADDR) 0
5814 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5815 n = -1;
5816 else
5817 n = read (fd, myaddr, len);
5818
5819 close (fd);
5820
5821 return n;
5822 }
5823
5824 /* These breakpoint and watchpoint related wrapper functions simply
5825 pass on the function call if the target has registered a
5826 corresponding function. */
5827
5828 static int
5829 linux_supports_z_point_type (char z_type)
5830 {
5831 return (the_low_target.supports_z_point_type != NULL
5832 && the_low_target.supports_z_point_type (z_type));
5833 }
5834
5835 static int
5836 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5837 int size, struct raw_breakpoint *bp)
5838 {
5839 if (type == raw_bkpt_type_sw)
5840 return insert_memory_breakpoint (bp);
5841 else if (the_low_target.insert_point != NULL)
5842 return the_low_target.insert_point (type, addr, size, bp);
5843 else
5844 /* Unsupported (see target.h). */
5845 return 1;
5846 }
5847
5848 static int
5849 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5850 int size, struct raw_breakpoint *bp)
5851 {
5852 if (type == raw_bkpt_type_sw)
5853 return remove_memory_breakpoint (bp);
5854 else if (the_low_target.remove_point != NULL)
5855 return the_low_target.remove_point (type, addr, size, bp);
5856 else
5857 /* Unsupported (see target.h). */
5858 return 1;
5859 }
5860
5861 /* Implement the to_stopped_by_sw_breakpoint target_ops
5862 method. */
5863
5864 static int
5865 linux_stopped_by_sw_breakpoint (void)
5866 {
5867 struct lwp_info *lwp = get_thread_lwp (current_thread);
5868
5869 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5870 }
5871
5872 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5873 method. */
5874
5875 static int
5876 linux_supports_stopped_by_sw_breakpoint (void)
5877 {
5878 return USE_SIGTRAP_SIGINFO;
5879 }
5880
5881 /* Implement the to_stopped_by_hw_breakpoint target_ops
5882 method. */
5883
5884 static int
5885 linux_stopped_by_hw_breakpoint (void)
5886 {
5887 struct lwp_info *lwp = get_thread_lwp (current_thread);
5888
5889 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5890 }
5891
5892 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5893 method. */
5894
5895 static int
5896 linux_supports_stopped_by_hw_breakpoint (void)
5897 {
5898 return USE_SIGTRAP_SIGINFO;
5899 }
5900
5901 /* Implement the supports_hardware_single_step target_ops method. */
5902
5903 static int
5904 linux_supports_hardware_single_step (void)
5905 {
5906 return can_hardware_single_step ();
5907 }
5908
5909 static int
5910 linux_supports_software_single_step (void)
5911 {
5912 return can_software_single_step ();
5913 }
5914
5915 static int
5916 linux_stopped_by_watchpoint (void)
5917 {
5918 struct lwp_info *lwp = get_thread_lwp (current_thread);
5919
5920 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5921 }
5922
5923 static CORE_ADDR
5924 linux_stopped_data_address (void)
5925 {
5926 struct lwp_info *lwp = get_thread_lwp (current_thread);
5927
5928 return lwp->stopped_data_address;
5929 }
5930
5931 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5932 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5933 && defined(PT_TEXT_END_ADDR)
5934
5935 /* This is only used for targets that define PT_TEXT_ADDR,
5936 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5937 the target has different ways of acquiring this information, like
5938 loadmaps. */
5939
5940 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5941 to tell gdb about. */
5942
5943 static int
5944 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5945 {
5946 unsigned long text, text_end, data;
5947 int pid = lwpid_of (current_thread);
5948
5949 errno = 0;
5950
5951 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5952 (PTRACE_TYPE_ARG4) 0);
5953 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5954 (PTRACE_TYPE_ARG4) 0);
5955 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5956 (PTRACE_TYPE_ARG4) 0);
5957
5958 if (errno == 0)
5959 {
5960 /* Both text and data offsets produced at compile-time (and so
5961 used by gdb) are relative to the beginning of the program,
5962 with the data segment immediately following the text segment.
5963 However, the actual runtime layout in memory may put the data
5964 somewhere else, so when we send gdb a data base-address, we
5965 use the real data base address and subtract the compile-time
5966 data base-address from it (which is just the length of the
5967 text segment). BSS immediately follows data in both
5968 cases. */
5969 *text_p = text;
5970 *data_p = data - (text_end - text);
5971
5972 return 1;
5973 }
5974 return 0;
5975 }
5976 #endif
5977
5978 static int
5979 linux_qxfer_osdata (const char *annex,
5980 unsigned char *readbuf, unsigned const char *writebuf,
5981 CORE_ADDR offset, int len)
5982 {
5983 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5984 }
5985
5986 /* Convert a native/host siginfo object, into/from the siginfo in the
5987 layout of the inferiors' architecture. */
5988
5989 static void
5990 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5991 {
5992 int done = 0;
5993
5994 if (the_low_target.siginfo_fixup != NULL)
5995 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5996
5997 /* If there was no callback, or the callback didn't do anything,
5998 then just do a straight memcpy. */
5999 if (!done)
6000 {
6001 if (direction == 1)
6002 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6003 else
6004 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6005 }
6006 }
6007
6008 static int
6009 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6010 unsigned const char *writebuf, CORE_ADDR offset, int len)
6011 {
6012 int pid;
6013 siginfo_t siginfo;
6014 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6015
6016 if (current_thread == NULL)
6017 return -1;
6018
6019 pid = lwpid_of (current_thread);
6020
6021 if (debug_threads)
6022 debug_printf ("%s siginfo for lwp %d.\n",
6023 readbuf != NULL ? "Reading" : "Writing",
6024 pid);
6025
6026 if (offset >= sizeof (siginfo))
6027 return -1;
6028
6029 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6030 return -1;
6031
6032 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6033 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6034 inferior with a 64-bit GDBSERVER should look the same as debugging it
6035 with a 32-bit GDBSERVER, we need to convert it. */
6036 siginfo_fixup (&siginfo, inf_siginfo, 0);
6037
6038 if (offset + len > sizeof (siginfo))
6039 len = sizeof (siginfo) - offset;
6040
6041 if (readbuf != NULL)
6042 memcpy (readbuf, inf_siginfo + offset, len);
6043 else
6044 {
6045 memcpy (inf_siginfo + offset, writebuf, len);
6046
6047 /* Convert back to ptrace layout before flushing it out. */
6048 siginfo_fixup (&siginfo, inf_siginfo, 1);
6049
6050 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6051 return -1;
6052 }
6053
6054 return len;
6055 }
6056
6057 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6058 so we notice when children change state; as the handler for the
6059 sigsuspend in my_waitpid. */
6060
6061 static void
6062 sigchld_handler (int signo)
6063 {
6064 int old_errno = errno;
6065
6066 if (debug_threads)
6067 {
6068 do
6069 {
6070 /* fprintf is not async-signal-safe, so call write
6071 directly. */
6072 if (write (2, "sigchld_handler\n",
6073 sizeof ("sigchld_handler\n") - 1) < 0)
6074 break; /* just ignore */
6075 } while (0);
6076 }
6077
6078 if (target_is_async_p ())
6079 async_file_mark (); /* trigger a linux_wait */
6080
6081 errno = old_errno;
6082 }
6083
6084 static int
6085 linux_supports_non_stop (void)
6086 {
6087 return 1;
6088 }
6089
6090 static int
6091 linux_async (int enable)
6092 {
6093 int previous = target_is_async_p ();
6094
6095 if (debug_threads)
6096 debug_printf ("linux_async (%d), previous=%d\n",
6097 enable, previous);
6098
6099 if (previous != enable)
6100 {
6101 sigset_t mask;
6102 sigemptyset (&mask);
6103 sigaddset (&mask, SIGCHLD);
6104
6105 sigprocmask (SIG_BLOCK, &mask, NULL);
6106
6107 if (enable)
6108 {
6109 if (pipe (linux_event_pipe) == -1)
6110 {
6111 linux_event_pipe[0] = -1;
6112 linux_event_pipe[1] = -1;
6113 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6114
6115 warning ("creating event pipe failed.");
6116 return previous;
6117 }
6118
6119 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6120 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6121
6122 /* Register the event loop handler. */
6123 add_file_handler (linux_event_pipe[0],
6124 handle_target_event, NULL);
6125
6126 /* Always trigger a linux_wait. */
6127 async_file_mark ();
6128 }
6129 else
6130 {
6131 delete_file_handler (linux_event_pipe[0]);
6132
6133 close (linux_event_pipe[0]);
6134 close (linux_event_pipe[1]);
6135 linux_event_pipe[0] = -1;
6136 linux_event_pipe[1] = -1;
6137 }
6138
6139 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6140 }
6141
6142 return previous;
6143 }
6144
6145 static int
6146 linux_start_non_stop (int nonstop)
6147 {
6148 /* Register or unregister from event-loop accordingly. */
6149 linux_async (nonstop);
6150
6151 if (target_is_async_p () != (nonstop != 0))
6152 return -1;
6153
6154 return 0;
6155 }
6156
6157 static int
6158 linux_supports_multi_process (void)
6159 {
6160 return 1;
6161 }
6162
6163 /* Check if fork events are supported. */
6164
6165 static int
6166 linux_supports_fork_events (void)
6167 {
6168 return linux_supports_tracefork ();
6169 }
6170
6171 /* Check if vfork events are supported. */
6172
6173 static int
6174 linux_supports_vfork_events (void)
6175 {
6176 return linux_supports_tracefork ();
6177 }
6178
6179 /* Check if exec events are supported. */
6180
6181 static int
6182 linux_supports_exec_events (void)
6183 {
6184 return linux_supports_traceexec ();
6185 }
6186
6187 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6188 options for the specified lwp. */
6189
6190 static int
6191 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6192 void *args)
6193 {
6194 struct thread_info *thread = (struct thread_info *) entry;
6195 struct lwp_info *lwp = get_thread_lwp (thread);
6196
6197 if (!lwp->stopped)
6198 {
6199 /* Stop the lwp so we can modify its ptrace options. */
6200 lwp->must_set_ptrace_flags = 1;
6201 linux_stop_lwp (lwp);
6202 }
6203 else
6204 {
6205 /* Already stopped; go ahead and set the ptrace options. */
6206 struct process_info *proc = find_process_pid (pid_of (thread));
6207 int options = linux_low_ptrace_options (proc->attached);
6208
6209 linux_enable_event_reporting (lwpid_of (thread), options);
6210 lwp->must_set_ptrace_flags = 0;
6211 }
6212
6213 return 0;
6214 }
6215
6216 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6217 ptrace flags for all inferiors. This is in case the new GDB connection
6218 doesn't support the same set of events that the previous one did. */
6219
6220 static void
6221 linux_handle_new_gdb_connection (void)
6222 {
6223 pid_t pid;
6224
6225 /* Request that all the lwps reset their ptrace options. */
6226 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6227 }
6228
6229 static int
6230 linux_supports_disable_randomization (void)
6231 {
6232 #ifdef HAVE_PERSONALITY
6233 return 1;
6234 #else
6235 return 0;
6236 #endif
6237 }
6238
6239 static int
6240 linux_supports_agent (void)
6241 {
6242 return 1;
6243 }
6244
6245 static int
6246 linux_supports_range_stepping (void)
6247 {
6248 if (*the_low_target.supports_range_stepping == NULL)
6249 return 0;
6250
6251 return (*the_low_target.supports_range_stepping) ();
6252 }
6253
6254 /* Enumerate spufs IDs for process PID. */
6255 static int
6256 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6257 {
6258 int pos = 0;
6259 int written = 0;
6260 char path[128];
6261 DIR *dir;
6262 struct dirent *entry;
6263
6264 sprintf (path, "/proc/%ld/fd", pid);
6265 dir = opendir (path);
6266 if (!dir)
6267 return -1;
6268
6269 rewinddir (dir);
6270 while ((entry = readdir (dir)) != NULL)
6271 {
6272 struct stat st;
6273 struct statfs stfs;
6274 int fd;
6275
6276 fd = atoi (entry->d_name);
6277 if (!fd)
6278 continue;
6279
6280 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6281 if (stat (path, &st) != 0)
6282 continue;
6283 if (!S_ISDIR (st.st_mode))
6284 continue;
6285
6286 if (statfs (path, &stfs) != 0)
6287 continue;
6288 if (stfs.f_type != SPUFS_MAGIC)
6289 continue;
6290
6291 if (pos >= offset && pos + 4 <= offset + len)
6292 {
6293 *(unsigned int *)(buf + pos - offset) = fd;
6294 written += 4;
6295 }
6296 pos += 4;
6297 }
6298
6299 closedir (dir);
6300 return written;
6301 }
6302
6303 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6304 object type, using the /proc file system. */
6305 static int
6306 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6307 unsigned const char *writebuf,
6308 CORE_ADDR offset, int len)
6309 {
6310 long pid = lwpid_of (current_thread);
6311 char buf[128];
6312 int fd = 0;
6313 int ret = 0;
6314
6315 if (!writebuf && !readbuf)
6316 return -1;
6317
6318 if (!*annex)
6319 {
6320 if (!readbuf)
6321 return -1;
6322 else
6323 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6324 }
6325
6326 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6327 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6328 if (fd <= 0)
6329 return -1;
6330
6331 if (offset != 0
6332 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6333 {
6334 close (fd);
6335 return 0;
6336 }
6337
6338 if (writebuf)
6339 ret = write (fd, writebuf, (size_t) len);
6340 else
6341 ret = read (fd, readbuf, (size_t) len);
6342
6343 close (fd);
6344 return ret;
6345 }
6346
6347 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6348 struct target_loadseg
6349 {
6350 /* Core address to which the segment is mapped. */
6351 Elf32_Addr addr;
6352 /* VMA recorded in the program header. */
6353 Elf32_Addr p_vaddr;
6354 /* Size of this segment in memory. */
6355 Elf32_Word p_memsz;
6356 };
6357
6358 # if defined PT_GETDSBT
6359 struct target_loadmap
6360 {
6361 /* Protocol version number, must be zero. */
6362 Elf32_Word version;
6363 /* Pointer to the DSBT table, its size, and the DSBT index. */
6364 unsigned *dsbt_table;
6365 unsigned dsbt_size, dsbt_index;
6366 /* Number of segments in this map. */
6367 Elf32_Word nsegs;
6368 /* The actual memory map. */
6369 struct target_loadseg segs[/*nsegs*/];
6370 };
6371 # define LINUX_LOADMAP PT_GETDSBT
6372 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6373 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6374 # else
6375 struct target_loadmap
6376 {
6377 /* Protocol version number, must be zero. */
6378 Elf32_Half version;
6379 /* Number of segments in this map. */
6380 Elf32_Half nsegs;
6381 /* The actual memory map. */
6382 struct target_loadseg segs[/*nsegs*/];
6383 };
6384 # define LINUX_LOADMAP PTRACE_GETFDPIC
6385 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6386 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6387 # endif
6388
6389 static int
6390 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6391 unsigned char *myaddr, unsigned int len)
6392 {
6393 int pid = lwpid_of (current_thread);
6394 int addr = -1;
6395 struct target_loadmap *data = NULL;
6396 unsigned int actual_length, copy_length;
6397
6398 if (strcmp (annex, "exec") == 0)
6399 addr = (int) LINUX_LOADMAP_EXEC;
6400 else if (strcmp (annex, "interp") == 0)
6401 addr = (int) LINUX_LOADMAP_INTERP;
6402 else
6403 return -1;
6404
6405 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6406 return -1;
6407
6408 if (data == NULL)
6409 return -1;
6410
6411 actual_length = sizeof (struct target_loadmap)
6412 + sizeof (struct target_loadseg) * data->nsegs;
6413
6414 if (offset < 0 || offset > actual_length)
6415 return -1;
6416
6417 copy_length = actual_length - offset < len ? actual_length - offset : len;
6418 memcpy (myaddr, (char *) data + offset, copy_length);
6419 return copy_length;
6420 }
6421 #else
6422 # define linux_read_loadmap NULL
6423 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6424
6425 static void
6426 linux_process_qsupported (char **features, int count)
6427 {
6428 if (the_low_target.process_qsupported != NULL)
6429 the_low_target.process_qsupported (features, count);
6430 }
6431
6432 static int
6433 linux_supports_catch_syscall (void)
6434 {
6435 return (the_low_target.get_syscall_trapinfo != NULL
6436 && linux_supports_tracesysgood ());
6437 }
6438
6439 static int
6440 linux_get_ipa_tdesc_idx (void)
6441 {
6442 if (the_low_target.get_ipa_tdesc_idx == NULL)
6443 return 0;
6444
6445 return (*the_low_target.get_ipa_tdesc_idx) ();
6446 }
6447
6448 static int
6449 linux_supports_tracepoints (void)
6450 {
6451 if (*the_low_target.supports_tracepoints == NULL)
6452 return 0;
6453
6454 return (*the_low_target.supports_tracepoints) ();
6455 }
6456
6457 static CORE_ADDR
6458 linux_read_pc (struct regcache *regcache)
6459 {
6460 if (the_low_target.get_pc == NULL)
6461 return 0;
6462
6463 return (*the_low_target.get_pc) (regcache);
6464 }
6465
6466 static void
6467 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6468 {
6469 gdb_assert (the_low_target.set_pc != NULL);
6470
6471 (*the_low_target.set_pc) (regcache, pc);
6472 }
6473
6474 static int
6475 linux_thread_stopped (struct thread_info *thread)
6476 {
6477 return get_thread_lwp (thread)->stopped;
6478 }
6479
6480 /* This exposes stop-all-threads functionality to other modules. */
6481
6482 static void
6483 linux_pause_all (int freeze)
6484 {
6485 stop_all_lwps (freeze, NULL);
6486 }
6487
6488 /* This exposes unstop-all-threads functionality to other gdbserver
6489 modules. */
6490
6491 static void
6492 linux_unpause_all (int unfreeze)
6493 {
6494 unstop_all_lwps (unfreeze, NULL);
6495 }
6496
6497 static int
6498 linux_prepare_to_access_memory (void)
6499 {
6500 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6501 running LWP. */
6502 if (non_stop)
6503 linux_pause_all (1);
6504 return 0;
6505 }
6506
6507 static void
6508 linux_done_accessing_memory (void)
6509 {
6510 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6511 running LWP. */
6512 if (non_stop)
6513 linux_unpause_all (1);
6514 }
6515
6516 static int
6517 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6518 CORE_ADDR collector,
6519 CORE_ADDR lockaddr,
6520 ULONGEST orig_size,
6521 CORE_ADDR *jump_entry,
6522 CORE_ADDR *trampoline,
6523 ULONGEST *trampoline_size,
6524 unsigned char *jjump_pad_insn,
6525 ULONGEST *jjump_pad_insn_size,
6526 CORE_ADDR *adjusted_insn_addr,
6527 CORE_ADDR *adjusted_insn_addr_end,
6528 char *err)
6529 {
6530 return (*the_low_target.install_fast_tracepoint_jump_pad)
6531 (tpoint, tpaddr, collector, lockaddr, orig_size,
6532 jump_entry, trampoline, trampoline_size,
6533 jjump_pad_insn, jjump_pad_insn_size,
6534 adjusted_insn_addr, adjusted_insn_addr_end,
6535 err);
6536 }
6537
6538 static struct emit_ops *
6539 linux_emit_ops (void)
6540 {
6541 if (the_low_target.emit_ops != NULL)
6542 return (*the_low_target.emit_ops) ();
6543 else
6544 return NULL;
6545 }
6546
6547 static int
6548 linux_get_min_fast_tracepoint_insn_len (void)
6549 {
6550 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6551 }
6552
6553 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6554
6555 static int
6556 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6557 CORE_ADDR *phdr_memaddr, int *num_phdr)
6558 {
6559 char filename[PATH_MAX];
6560 int fd;
6561 const int auxv_size = is_elf64
6562 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6563 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6564
6565 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6566
6567 fd = open (filename, O_RDONLY);
6568 if (fd < 0)
6569 return 1;
6570
6571 *phdr_memaddr = 0;
6572 *num_phdr = 0;
6573 while (read (fd, buf, auxv_size) == auxv_size
6574 && (*phdr_memaddr == 0 || *num_phdr == 0))
6575 {
6576 if (is_elf64)
6577 {
6578 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6579
6580 switch (aux->a_type)
6581 {
6582 case AT_PHDR:
6583 *phdr_memaddr = aux->a_un.a_val;
6584 break;
6585 case AT_PHNUM:
6586 *num_phdr = aux->a_un.a_val;
6587 break;
6588 }
6589 }
6590 else
6591 {
6592 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6593
6594 switch (aux->a_type)
6595 {
6596 case AT_PHDR:
6597 *phdr_memaddr = aux->a_un.a_val;
6598 break;
6599 case AT_PHNUM:
6600 *num_phdr = aux->a_un.a_val;
6601 break;
6602 }
6603 }
6604 }
6605
6606 close (fd);
6607
6608 if (*phdr_memaddr == 0 || *num_phdr == 0)
6609 {
6610 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6611 "phdr_memaddr = %ld, phdr_num = %d",
6612 (long) *phdr_memaddr, *num_phdr);
6613 return 2;
6614 }
6615
6616 return 0;
6617 }
6618
6619 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6620
6621 static CORE_ADDR
6622 get_dynamic (const int pid, const int is_elf64)
6623 {
6624 CORE_ADDR phdr_memaddr, relocation;
6625 int num_phdr, i;
6626 unsigned char *phdr_buf;
6627 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6628
6629 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6630 return 0;
6631
6632 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6633 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6634
6635 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6636 return 0;
6637
6638 /* Compute relocation: it is expected to be 0 for "regular" executables,
6639 non-zero for PIE ones. */
6640 relocation = -1;
6641 for (i = 0; relocation == -1 && i < num_phdr; i++)
6642 if (is_elf64)
6643 {
6644 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6645
6646 if (p->p_type == PT_PHDR)
6647 relocation = phdr_memaddr - p->p_vaddr;
6648 }
6649 else
6650 {
6651 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6652
6653 if (p->p_type == PT_PHDR)
6654 relocation = phdr_memaddr - p->p_vaddr;
6655 }
6656
6657 if (relocation == -1)
6658 {
6659 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6660 any real world executables, including PIE executables, have always
6661 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6662 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6663 or present DT_DEBUG anyway (fpc binaries are statically linked).
6664
6665 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6666
6667 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6668
6669 return 0;
6670 }
6671
6672 for (i = 0; i < num_phdr; i++)
6673 {
6674 if (is_elf64)
6675 {
6676 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6677
6678 if (p->p_type == PT_DYNAMIC)
6679 return p->p_vaddr + relocation;
6680 }
6681 else
6682 {
6683 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6684
6685 if (p->p_type == PT_DYNAMIC)
6686 return p->p_vaddr + relocation;
6687 }
6688 }
6689
6690 return 0;
6691 }
6692
6693 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6694 can be 0 if the inferior does not yet have the library list initialized.
6695 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6696 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6697
6698 static CORE_ADDR
6699 get_r_debug (const int pid, const int is_elf64)
6700 {
6701 CORE_ADDR dynamic_memaddr;
6702 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6703 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6704 CORE_ADDR map = -1;
6705
6706 dynamic_memaddr = get_dynamic (pid, is_elf64);
6707 if (dynamic_memaddr == 0)
6708 return map;
6709
6710 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6711 {
6712 if (is_elf64)
6713 {
6714 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6715 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6716 union
6717 {
6718 Elf64_Xword map;
6719 unsigned char buf[sizeof (Elf64_Xword)];
6720 }
6721 rld_map;
6722 #endif
6723 #ifdef DT_MIPS_RLD_MAP
6724 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6725 {
6726 if (linux_read_memory (dyn->d_un.d_val,
6727 rld_map.buf, sizeof (rld_map.buf)) == 0)
6728 return rld_map.map;
6729 else
6730 break;
6731 }
6732 #endif /* DT_MIPS_RLD_MAP */
6733 #ifdef DT_MIPS_RLD_MAP_REL
6734 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6735 {
6736 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6737 rld_map.buf, sizeof (rld_map.buf)) == 0)
6738 return rld_map.map;
6739 else
6740 break;
6741 }
6742 #endif /* DT_MIPS_RLD_MAP_REL */
6743
6744 if (dyn->d_tag == DT_DEBUG && map == -1)
6745 map = dyn->d_un.d_val;
6746
6747 if (dyn->d_tag == DT_NULL)
6748 break;
6749 }
6750 else
6751 {
6752 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6753 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6754 union
6755 {
6756 Elf32_Word map;
6757 unsigned char buf[sizeof (Elf32_Word)];
6758 }
6759 rld_map;
6760 #endif
6761 #ifdef DT_MIPS_RLD_MAP
6762 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6763 {
6764 if (linux_read_memory (dyn->d_un.d_val,
6765 rld_map.buf, sizeof (rld_map.buf)) == 0)
6766 return rld_map.map;
6767 else
6768 break;
6769 }
6770 #endif /* DT_MIPS_RLD_MAP */
6771 #ifdef DT_MIPS_RLD_MAP_REL
6772 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6773 {
6774 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6775 rld_map.buf, sizeof (rld_map.buf)) == 0)
6776 return rld_map.map;
6777 else
6778 break;
6779 }
6780 #endif /* DT_MIPS_RLD_MAP_REL */
6781
6782 if (dyn->d_tag == DT_DEBUG && map == -1)
6783 map = dyn->d_un.d_val;
6784
6785 if (dyn->d_tag == DT_NULL)
6786 break;
6787 }
6788
6789 dynamic_memaddr += dyn_size;
6790 }
6791
6792 return map;
6793 }
6794
6795 /* Read one pointer from MEMADDR in the inferior. */
6796
6797 static int
6798 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6799 {
6800 int ret;
6801
6802 /* Go through a union so this works on either big or little endian
6803 hosts, when the inferior's pointer size is smaller than the size
6804 of CORE_ADDR. It is assumed the inferior's endianness is the
6805 same of the superior's. */
6806 union
6807 {
6808 CORE_ADDR core_addr;
6809 unsigned int ui;
6810 unsigned char uc;
6811 } addr;
6812
6813 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6814 if (ret == 0)
6815 {
6816 if (ptr_size == sizeof (CORE_ADDR))
6817 *ptr = addr.core_addr;
6818 else if (ptr_size == sizeof (unsigned int))
6819 *ptr = addr.ui;
6820 else
6821 gdb_assert_not_reached ("unhandled pointer size");
6822 }
6823 return ret;
6824 }
6825
6826 struct link_map_offsets
6827 {
6828 /* Offset and size of r_debug.r_version. */
6829 int r_version_offset;
6830
6831 /* Offset and size of r_debug.r_map. */
6832 int r_map_offset;
6833
6834 /* Offset to l_addr field in struct link_map. */
6835 int l_addr_offset;
6836
6837 /* Offset to l_name field in struct link_map. */
6838 int l_name_offset;
6839
6840 /* Offset to l_ld field in struct link_map. */
6841 int l_ld_offset;
6842
6843 /* Offset to l_next field in struct link_map. */
6844 int l_next_offset;
6845
6846 /* Offset to l_prev field in struct link_map. */
6847 int l_prev_offset;
6848 };
6849
6850 /* Construct qXfer:libraries-svr4:read reply. */
6851
6852 static int
6853 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6854 unsigned const char *writebuf,
6855 CORE_ADDR offset, int len)
6856 {
6857 char *document;
6858 unsigned document_len;
6859 struct process_info_private *const priv = current_process ()->priv;
6860 char filename[PATH_MAX];
6861 int pid, is_elf64;
6862
6863 static const struct link_map_offsets lmo_32bit_offsets =
6864 {
6865 0, /* r_version offset. */
6866 4, /* r_debug.r_map offset. */
6867 0, /* l_addr offset in link_map. */
6868 4, /* l_name offset in link_map. */
6869 8, /* l_ld offset in link_map. */
6870 12, /* l_next offset in link_map. */
6871 16 /* l_prev offset in link_map. */
6872 };
6873
6874 static const struct link_map_offsets lmo_64bit_offsets =
6875 {
6876 0, /* r_version offset. */
6877 8, /* r_debug.r_map offset. */
6878 0, /* l_addr offset in link_map. */
6879 8, /* l_name offset in link_map. */
6880 16, /* l_ld offset in link_map. */
6881 24, /* l_next offset in link_map. */
6882 32 /* l_prev offset in link_map. */
6883 };
6884 const struct link_map_offsets *lmo;
6885 unsigned int machine;
6886 int ptr_size;
6887 CORE_ADDR lm_addr = 0, lm_prev = 0;
6888 int allocated = 1024;
6889 char *p;
6890 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6891 int header_done = 0;
6892
6893 if (writebuf != NULL)
6894 return -2;
6895 if (readbuf == NULL)
6896 return -1;
6897
6898 pid = lwpid_of (current_thread);
6899 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6900 is_elf64 = elf_64_file_p (filename, &machine);
6901 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6902 ptr_size = is_elf64 ? 8 : 4;
6903
6904 while (annex[0] != '\0')
6905 {
6906 const char *sep;
6907 CORE_ADDR *addrp;
6908 int len;
6909
6910 sep = strchr (annex, '=');
6911 if (sep == NULL)
6912 break;
6913
6914 len = sep - annex;
6915 if (len == 5 && startswith (annex, "start"))
6916 addrp = &lm_addr;
6917 else if (len == 4 && startswith (annex, "prev"))
6918 addrp = &lm_prev;
6919 else
6920 {
6921 annex = strchr (sep, ';');
6922 if (annex == NULL)
6923 break;
6924 annex++;
6925 continue;
6926 }
6927
6928 annex = decode_address_to_semicolon (addrp, sep + 1);
6929 }
6930
6931 if (lm_addr == 0)
6932 {
6933 int r_version = 0;
6934
6935 if (priv->r_debug == 0)
6936 priv->r_debug = get_r_debug (pid, is_elf64);
6937
6938 /* We failed to find DT_DEBUG. Such situation will not change
6939 for this inferior - do not retry it. Report it to GDB as
6940 E01, see for the reasons at the GDB solib-svr4.c side. */
6941 if (priv->r_debug == (CORE_ADDR) -1)
6942 return -1;
6943
6944 if (priv->r_debug != 0)
6945 {
6946 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6947 (unsigned char *) &r_version,
6948 sizeof (r_version)) != 0
6949 || r_version != 1)
6950 {
6951 warning ("unexpected r_debug version %d", r_version);
6952 }
6953 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6954 &lm_addr, ptr_size) != 0)
6955 {
6956 warning ("unable to read r_map from 0x%lx",
6957 (long) priv->r_debug + lmo->r_map_offset);
6958 }
6959 }
6960 }
6961
6962 document = (char *) xmalloc (allocated);
6963 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6964 p = document + strlen (document);
6965
6966 while (lm_addr
6967 && read_one_ptr (lm_addr + lmo->l_name_offset,
6968 &l_name, ptr_size) == 0
6969 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6970 &l_addr, ptr_size) == 0
6971 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6972 &l_ld, ptr_size) == 0
6973 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6974 &l_prev, ptr_size) == 0
6975 && read_one_ptr (lm_addr + lmo->l_next_offset,
6976 &l_next, ptr_size) == 0)
6977 {
6978 unsigned char libname[PATH_MAX];
6979
6980 if (lm_prev != l_prev)
6981 {
6982 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6983 (long) lm_prev, (long) l_prev);
6984 break;
6985 }
6986
6987 /* Ignore the first entry even if it has valid name as the first entry
6988 corresponds to the main executable. The first entry should not be
6989 skipped if the dynamic loader was loaded late by a static executable
6990 (see solib-svr4.c parameter ignore_first). But in such case the main
6991 executable does not have PT_DYNAMIC present and this function already
6992 exited above due to failed get_r_debug. */
6993 if (lm_prev == 0)
6994 {
6995 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6996 p = p + strlen (p);
6997 }
6998 else
6999 {
7000 /* Not checking for error because reading may stop before
7001 we've got PATH_MAX worth of characters. */
7002 libname[0] = '\0';
7003 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7004 libname[sizeof (libname) - 1] = '\0';
7005 if (libname[0] != '\0')
7006 {
7007 /* 6x the size for xml_escape_text below. */
7008 size_t len = 6 * strlen ((char *) libname);
7009 char *name;
7010
7011 if (!header_done)
7012 {
7013 /* Terminate `<library-list-svr4'. */
7014 *p++ = '>';
7015 header_done = 1;
7016 }
7017
7018 while (allocated < p - document + len + 200)
7019 {
7020 /* Expand to guarantee sufficient storage. */
7021 uintptr_t document_len = p - document;
7022
7023 document = (char *) xrealloc (document, 2 * allocated);
7024 allocated *= 2;
7025 p = document + document_len;
7026 }
7027
7028 name = xml_escape_text ((char *) libname);
7029 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7030 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7031 name, (unsigned long) lm_addr,
7032 (unsigned long) l_addr, (unsigned long) l_ld);
7033 free (name);
7034 }
7035 }
7036
7037 lm_prev = lm_addr;
7038 lm_addr = l_next;
7039 }
7040
7041 if (!header_done)
7042 {
7043 /* Empty list; terminate `<library-list-svr4'. */
7044 strcpy (p, "/>");
7045 }
7046 else
7047 strcpy (p, "</library-list-svr4>");
7048
7049 document_len = strlen (document);
7050 if (offset < document_len)
7051 document_len -= offset;
7052 else
7053 document_len = 0;
7054 if (len > document_len)
7055 len = document_len;
7056
7057 memcpy (readbuf, document + offset, len);
7058 xfree (document);
7059
7060 return len;
7061 }
7062
7063 #ifdef HAVE_LINUX_BTRACE
7064
7065 /* See to_disable_btrace target method. */
7066
7067 static int
7068 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7069 {
7070 enum btrace_error err;
7071
7072 err = linux_disable_btrace (tinfo);
7073 return (err == BTRACE_ERR_NONE ? 0 : -1);
7074 }
7075
7076 /* Encode an Intel Processor Trace configuration. */
7077
7078 static void
7079 linux_low_encode_pt_config (struct buffer *buffer,
7080 const struct btrace_data_pt_config *config)
7081 {
7082 buffer_grow_str (buffer, "<pt-config>\n");
7083
7084 switch (config->cpu.vendor)
7085 {
7086 case CV_INTEL:
7087 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7088 "model=\"%u\" stepping=\"%u\"/>\n",
7089 config->cpu.family, config->cpu.model,
7090 config->cpu.stepping);
7091 break;
7092
7093 default:
7094 break;
7095 }
7096
7097 buffer_grow_str (buffer, "</pt-config>\n");
7098 }
7099
7100 /* Encode a raw buffer. */
7101
7102 static void
7103 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7104 unsigned int size)
7105 {
7106 if (size == 0)
7107 return;
7108
7109 /* We use hex encoding - see common/rsp-low.h. */
7110 buffer_grow_str (buffer, "<raw>\n");
7111
7112 while (size-- > 0)
7113 {
7114 char elem[2];
7115
7116 elem[0] = tohex ((*data >> 4) & 0xf);
7117 elem[1] = tohex (*data++ & 0xf);
7118
7119 buffer_grow (buffer, elem, 2);
7120 }
7121
7122 buffer_grow_str (buffer, "</raw>\n");
7123 }
7124
7125 /* See to_read_btrace target method. */
7126
7127 static int
7128 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7129 enum btrace_read_type type)
7130 {
7131 struct btrace_data btrace;
7132 struct btrace_block *block;
7133 enum btrace_error err;
7134 int i;
7135
7136 btrace_data_init (&btrace);
7137
7138 err = linux_read_btrace (&btrace, tinfo, type);
7139 if (err != BTRACE_ERR_NONE)
7140 {
7141 if (err == BTRACE_ERR_OVERFLOW)
7142 buffer_grow_str0 (buffer, "E.Overflow.");
7143 else
7144 buffer_grow_str0 (buffer, "E.Generic Error.");
7145
7146 goto err;
7147 }
7148
7149 switch (btrace.format)
7150 {
7151 case BTRACE_FORMAT_NONE:
7152 buffer_grow_str0 (buffer, "E.No Trace.");
7153 goto err;
7154
7155 case BTRACE_FORMAT_BTS:
7156 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7157 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7158
7159 for (i = 0;
7160 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7161 i++)
7162 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7163 paddress (block->begin), paddress (block->end));
7164
7165 buffer_grow_str0 (buffer, "</btrace>\n");
7166 break;
7167
7168 case BTRACE_FORMAT_PT:
7169 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7170 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7171 buffer_grow_str (buffer, "<pt>\n");
7172
7173 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7174
7175 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7176 btrace.variant.pt.size);
7177
7178 buffer_grow_str (buffer, "</pt>\n");
7179 buffer_grow_str0 (buffer, "</btrace>\n");
7180 break;
7181
7182 default:
7183 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7184 goto err;
7185 }
7186
7187 btrace_data_fini (&btrace);
7188 return 0;
7189
7190 err:
7191 btrace_data_fini (&btrace);
7192 return -1;
7193 }
7194
7195 /* See to_btrace_conf target method. */
7196
7197 static int
7198 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7199 struct buffer *buffer)
7200 {
7201 const struct btrace_config *conf;
7202
7203 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7204 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7205
7206 conf = linux_btrace_conf (tinfo);
7207 if (conf != NULL)
7208 {
7209 switch (conf->format)
7210 {
7211 case BTRACE_FORMAT_NONE:
7212 break;
7213
7214 case BTRACE_FORMAT_BTS:
7215 buffer_xml_printf (buffer, "<bts");
7216 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7217 buffer_xml_printf (buffer, " />\n");
7218 break;
7219
7220 case BTRACE_FORMAT_PT:
7221 buffer_xml_printf (buffer, "<pt");
7222 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7223 buffer_xml_printf (buffer, "/>\n");
7224 break;
7225 }
7226 }
7227
7228 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7229 return 0;
7230 }
7231 #endif /* HAVE_LINUX_BTRACE */
7232
7233 /* See nat/linux-nat.h. */
7234
7235 ptid_t
7236 current_lwp_ptid (void)
7237 {
7238 return ptid_of (current_thread);
7239 }
7240
7241 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7242
7243 static int
7244 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7245 {
7246 if (the_low_target.breakpoint_kind_from_pc != NULL)
7247 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7248 else
7249 return default_breakpoint_kind_from_pc (pcptr);
7250 }
7251
7252 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7253
7254 static const gdb_byte *
7255 linux_sw_breakpoint_from_kind (int kind, int *size)
7256 {
7257 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7258
7259 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7260 }
7261
7262 /* Implementation of the target_ops method
7263 "breakpoint_kind_from_current_state". */
7264
7265 static int
7266 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7267 {
7268 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7269 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7270 else
7271 return linux_breakpoint_kind_from_pc (pcptr);
7272 }
7273
7274 /* Default implementation of linux_target_ops method "set_pc" for
7275 32-bit pc register which is literally named "pc". */
7276
7277 void
7278 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7279 {
7280 uint32_t newpc = pc;
7281
7282 supply_register_by_name (regcache, "pc", &newpc);
7283 }
7284
7285 /* Default implementation of linux_target_ops method "get_pc" for
7286 32-bit pc register which is literally named "pc". */
7287
7288 CORE_ADDR
7289 linux_get_pc_32bit (struct regcache *regcache)
7290 {
7291 uint32_t pc;
7292
7293 collect_register_by_name (regcache, "pc", &pc);
7294 if (debug_threads)
7295 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7296 return pc;
7297 }
7298
7299 /* Default implementation of linux_target_ops method "set_pc" for
7300 64-bit pc register which is literally named "pc". */
7301
7302 void
7303 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7304 {
7305 uint64_t newpc = pc;
7306
7307 supply_register_by_name (regcache, "pc", &newpc);
7308 }
7309
7310 /* Default implementation of linux_target_ops method "get_pc" for
7311 64-bit pc register which is literally named "pc". */
7312
7313 CORE_ADDR
7314 linux_get_pc_64bit (struct regcache *regcache)
7315 {
7316 uint64_t pc;
7317
7318 collect_register_by_name (regcache, "pc", &pc);
7319 if (debug_threads)
7320 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7321 return pc;
7322 }
7323
7324
7325 static struct target_ops linux_target_ops = {
7326 linux_create_inferior,
7327 linux_post_create_inferior,
7328 linux_attach,
7329 linux_kill,
7330 linux_detach,
7331 linux_mourn,
7332 linux_join,
7333 linux_thread_alive,
7334 linux_resume,
7335 linux_wait,
7336 linux_fetch_registers,
7337 linux_store_registers,
7338 linux_prepare_to_access_memory,
7339 linux_done_accessing_memory,
7340 linux_read_memory,
7341 linux_write_memory,
7342 linux_look_up_symbols,
7343 linux_request_interrupt,
7344 linux_read_auxv,
7345 linux_supports_z_point_type,
7346 linux_insert_point,
7347 linux_remove_point,
7348 linux_stopped_by_sw_breakpoint,
7349 linux_supports_stopped_by_sw_breakpoint,
7350 linux_stopped_by_hw_breakpoint,
7351 linux_supports_stopped_by_hw_breakpoint,
7352 linux_supports_hardware_single_step,
7353 linux_stopped_by_watchpoint,
7354 linux_stopped_data_address,
7355 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7356 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7357 && defined(PT_TEXT_END_ADDR)
7358 linux_read_offsets,
7359 #else
7360 NULL,
7361 #endif
7362 #ifdef USE_THREAD_DB
7363 thread_db_get_tls_address,
7364 #else
7365 NULL,
7366 #endif
7367 linux_qxfer_spu,
7368 hostio_last_error_from_errno,
7369 linux_qxfer_osdata,
7370 linux_xfer_siginfo,
7371 linux_supports_non_stop,
7372 linux_async,
7373 linux_start_non_stop,
7374 linux_supports_multi_process,
7375 linux_supports_fork_events,
7376 linux_supports_vfork_events,
7377 linux_supports_exec_events,
7378 linux_handle_new_gdb_connection,
7379 #ifdef USE_THREAD_DB
7380 thread_db_handle_monitor_command,
7381 #else
7382 NULL,
7383 #endif
7384 linux_common_core_of_thread,
7385 linux_read_loadmap,
7386 linux_process_qsupported,
7387 linux_supports_tracepoints,
7388 linux_read_pc,
7389 linux_write_pc,
7390 linux_thread_stopped,
7391 NULL,
7392 linux_pause_all,
7393 linux_unpause_all,
7394 linux_stabilize_threads,
7395 linux_install_fast_tracepoint_jump_pad,
7396 linux_emit_ops,
7397 linux_supports_disable_randomization,
7398 linux_get_min_fast_tracepoint_insn_len,
7399 linux_qxfer_libraries_svr4,
7400 linux_supports_agent,
7401 #ifdef HAVE_LINUX_BTRACE
7402 linux_supports_btrace,
7403 linux_enable_btrace,
7404 linux_low_disable_btrace,
7405 linux_low_read_btrace,
7406 linux_low_btrace_conf,
7407 #else
7408 NULL,
7409 NULL,
7410 NULL,
7411 NULL,
7412 NULL,
7413 #endif
7414 linux_supports_range_stepping,
7415 linux_proc_pid_to_exec_file,
7416 linux_mntns_open_cloexec,
7417 linux_mntns_unlink,
7418 linux_mntns_readlink,
7419 linux_breakpoint_kind_from_pc,
7420 linux_sw_breakpoint_from_kind,
7421 linux_proc_tid_get_name,
7422 linux_breakpoint_kind_from_current_state,
7423 linux_supports_software_single_step,
7424 linux_supports_catch_syscall,
7425 linux_get_ipa_tdesc_idx,
7426 };
7427
7428 #ifdef HAVE_LINUX_REGSETS
7429 void
7430 initialize_regsets_info (struct regsets_info *info)
7431 {
7432 for (info->num_regsets = 0;
7433 info->regsets[info->num_regsets].size >= 0;
7434 info->num_regsets++)
7435 ;
7436 }
7437 #endif
7438
7439 void
7440 initialize_low (void)
7441 {
7442 struct sigaction sigchld_action;
7443
7444 memset (&sigchld_action, 0, sizeof (sigchld_action));
7445 set_target_ops (&linux_target_ops);
7446
7447 linux_ptrace_init_warnings ();
7448
7449 sigchld_action.sa_handler = sigchld_handler;
7450 sigemptyset (&sigchld_action.sa_mask);
7451 sigchld_action.sa_flags = SA_RESTART;
7452 sigaction (SIGCHLD, &sigchld_action, NULL);
7453
7454 initialize_low_arch ();
7455
7456 linux_check_ptrace_features ();
7457 }
This page took 0.296834 seconds and 4 git commands to generate.