Fix instruction skipping when using software single step in GDBServer
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #ifndef ELFMAG0
50 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
54 #include <elf.h>
55 #endif
56 #include "nat/linux-namespaces.h"
57
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72
73 /* Some targets did not define these ptrace constants from the start,
74 so gdbserver defines them locally here. In the future, these may
75 be removed after they are added to asm/ptrace.h. */
76 #if !(defined(PT_TEXT_ADDR) \
77 || defined(PT_DATA_ADDR) \
78 || defined(PT_TEXT_END_ADDR))
79 #if defined(__mcoldfire__)
80 /* These are still undefined in 3.10 kernels. */
81 #define PT_TEXT_ADDR 49*4
82 #define PT_DATA_ADDR 50*4
83 #define PT_TEXT_END_ADDR 51*4
84 /* BFIN already defines these since at least 2.6.32 kernels. */
85 #elif defined(BFIN)
86 #define PT_TEXT_ADDR 220
87 #define PT_TEXT_END_ADDR 224
88 #define PT_DATA_ADDR 228
89 /* These are still undefined in 3.10 kernels. */
90 #elif defined(__TMS320C6X__)
91 #define PT_TEXT_ADDR (0x10000*4)
92 #define PT_DATA_ADDR (0x10004*4)
93 #define PT_TEXT_END_ADDR (0x10008*4)
94 #endif
95 #endif
96
97 #ifdef HAVE_LINUX_BTRACE
98 # include "nat/linux-btrace.h"
99 # include "btrace-common.h"
100 #endif
101
102 #ifndef HAVE_ELF32_AUXV_T
103 /* Copied from glibc's elf.h. */
104 typedef struct
105 {
106 uint32_t a_type; /* Entry type */
107 union
108 {
109 uint32_t a_val; /* Integer value */
110 /* We use to have pointer elements added here. We cannot do that,
111 though, since it does not work when using 32-bit definitions
112 on 64-bit platforms and vice versa. */
113 } a_un;
114 } Elf32_auxv_t;
115 #endif
116
117 #ifndef HAVE_ELF64_AUXV_T
118 /* Copied from glibc's elf.h. */
119 typedef struct
120 {
121 uint64_t a_type; /* Entry type */
122 union
123 {
124 uint64_t a_val; /* Integer value */
125 /* We use to have pointer elements added here. We cannot do that,
126 though, since it does not work when using 32-bit definitions
127 on 64-bit platforms and vice versa. */
128 } a_un;
129 } Elf64_auxv_t;
130 #endif
131
132 /* Does the current host support PTRACE_GETREGSET? */
133 int have_ptrace_getregset = -1;
134
135 /* LWP accessors. */
136
137 /* See nat/linux-nat.h. */
138
139 ptid_t
140 ptid_of_lwp (struct lwp_info *lwp)
141 {
142 return ptid_of (get_lwp_thread (lwp));
143 }
144
145 /* See nat/linux-nat.h. */
146
147 void
148 lwp_set_arch_private_info (struct lwp_info *lwp,
149 struct arch_lwp_info *info)
150 {
151 lwp->arch_private = info;
152 }
153
154 /* See nat/linux-nat.h. */
155
156 struct arch_lwp_info *
157 lwp_arch_private_info (struct lwp_info *lwp)
158 {
159 return lwp->arch_private;
160 }
161
162 /* See nat/linux-nat.h. */
163
164 int
165 lwp_is_stopped (struct lwp_info *lwp)
166 {
167 return lwp->stopped;
168 }
169
170 /* See nat/linux-nat.h. */
171
172 enum target_stop_reason
173 lwp_stop_reason (struct lwp_info *lwp)
174 {
175 return lwp->stop_reason;
176 }
177
178 /* A list of all unknown processes which receive stop signals. Some
179 other process will presumably claim each of these as forked
180 children momentarily. */
181
182 struct simple_pid_list
183 {
184 /* The process ID. */
185 int pid;
186
187 /* The status as reported by waitpid. */
188 int status;
189
190 /* Next in chain. */
191 struct simple_pid_list *next;
192 };
193 struct simple_pid_list *stopped_pids;
194
195 /* Trivial list manipulation functions to keep track of a list of new
196 stopped processes. */
197
198 static void
199 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
200 {
201 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
202
203 new_pid->pid = pid;
204 new_pid->status = status;
205 new_pid->next = *listp;
206 *listp = new_pid;
207 }
208
209 static int
210 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
211 {
212 struct simple_pid_list **p;
213
214 for (p = listp; *p != NULL; p = &(*p)->next)
215 if ((*p)->pid == pid)
216 {
217 struct simple_pid_list *next = (*p)->next;
218
219 *statusp = (*p)->status;
220 xfree (*p);
221 *p = next;
222 return 1;
223 }
224 return 0;
225 }
226
227 enum stopping_threads_kind
228 {
229 /* Not stopping threads presently. */
230 NOT_STOPPING_THREADS,
231
232 /* Stopping threads. */
233 STOPPING_THREADS,
234
235 /* Stopping and suspending threads. */
236 STOPPING_AND_SUSPENDING_THREADS
237 };
238
239 /* This is set while stop_all_lwps is in effect. */
240 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
241
242 /* FIXME make into a target method? */
243 int using_threads = 1;
244
245 /* True if we're presently stabilizing threads (moving them out of
246 jump pads). */
247 static int stabilizing_threads;
248
249 static void linux_resume_one_lwp (struct lwp_info *lwp,
250 int step, int signal, siginfo_t *info);
251 static void linux_resume (struct thread_resume *resume_info, size_t n);
252 static void stop_all_lwps (int suspend, struct lwp_info *except);
253 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
254 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
255 int *wstat, int options);
256 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
257 static struct lwp_info *add_lwp (ptid_t ptid);
258 static void linux_mourn (struct process_info *process);
259 static int linux_stopped_by_watchpoint (void);
260 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
261 static int lwp_is_marked_dead (struct lwp_info *lwp);
262 static void proceed_all_lwps (void);
263 static int finish_step_over (struct lwp_info *lwp);
264 static int kill_lwp (unsigned long lwpid, int signo);
265 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
266 static void complete_ongoing_step_over (void);
267
268 /* When the event-loop is doing a step-over, this points at the thread
269 being stepped. */
270 ptid_t step_over_bkpt;
271
272 /* True if the low target can hardware single-step. Such targets
273 don't need a BREAKPOINT_REINSERT_ADDR callback. */
274
275 static int
276 can_hardware_single_step (void)
277 {
278 return (the_low_target.breakpoint_reinsert_addr == NULL);
279 }
280
281 /* True if the low target supports memory breakpoints. If so, we'll
282 have a GET_PC implementation. */
283
284 static int
285 supports_breakpoints (void)
286 {
287 return (the_low_target.get_pc != NULL);
288 }
289
290 /* Returns true if this target can support fast tracepoints. This
291 does not mean that the in-process agent has been loaded in the
292 inferior. */
293
294 static int
295 supports_fast_tracepoints (void)
296 {
297 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
298 }
299
300 /* True if LWP is stopped in its stepping range. */
301
302 static int
303 lwp_in_step_range (struct lwp_info *lwp)
304 {
305 CORE_ADDR pc = lwp->stop_pc;
306
307 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
308 }
309
310 struct pending_signals
311 {
312 int signal;
313 siginfo_t info;
314 struct pending_signals *prev;
315 };
316
317 /* The read/write ends of the pipe registered as waitable file in the
318 event loop. */
319 static int linux_event_pipe[2] = { -1, -1 };
320
321 /* True if we're currently in async mode. */
322 #define target_is_async_p() (linux_event_pipe[0] != -1)
323
324 static void send_sigstop (struct lwp_info *lwp);
325 static void wait_for_sigstop (void);
326
327 /* Return non-zero if HEADER is a 64-bit ELF file. */
328
329 static int
330 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
331 {
332 if (header->e_ident[EI_MAG0] == ELFMAG0
333 && header->e_ident[EI_MAG1] == ELFMAG1
334 && header->e_ident[EI_MAG2] == ELFMAG2
335 && header->e_ident[EI_MAG3] == ELFMAG3)
336 {
337 *machine = header->e_machine;
338 return header->e_ident[EI_CLASS] == ELFCLASS64;
339
340 }
341 *machine = EM_NONE;
342 return -1;
343 }
344
345 /* Return non-zero if FILE is a 64-bit ELF file,
346 zero if the file is not a 64-bit ELF file,
347 and -1 if the file is not accessible or doesn't exist. */
348
349 static int
350 elf_64_file_p (const char *file, unsigned int *machine)
351 {
352 Elf64_Ehdr header;
353 int fd;
354
355 fd = open (file, O_RDONLY);
356 if (fd < 0)
357 return -1;
358
359 if (read (fd, &header, sizeof (header)) != sizeof (header))
360 {
361 close (fd);
362 return 0;
363 }
364 close (fd);
365
366 return elf_64_header_p (&header, machine);
367 }
368
369 /* Accepts an integer PID; Returns true if the executable PID is
370 running is a 64-bit ELF file.. */
371
372 int
373 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
374 {
375 char file[PATH_MAX];
376
377 sprintf (file, "/proc/%d/exe", pid);
378 return elf_64_file_p (file, machine);
379 }
380
381 static void
382 delete_lwp (struct lwp_info *lwp)
383 {
384 struct thread_info *thr = get_lwp_thread (lwp);
385
386 if (debug_threads)
387 debug_printf ("deleting %ld\n", lwpid_of (thr));
388
389 remove_thread (thr);
390 free (lwp->arch_private);
391 free (lwp);
392 }
393
394 /* Add a process to the common process list, and set its private
395 data. */
396
397 static struct process_info *
398 linux_add_process (int pid, int attached)
399 {
400 struct process_info *proc;
401
402 proc = add_process (pid, attached);
403 proc->priv = XCNEW (struct process_info_private);
404
405 if (the_low_target.new_process != NULL)
406 proc->priv->arch_private = the_low_target.new_process ();
407
408 return proc;
409 }
410
411 static CORE_ADDR get_pc (struct lwp_info *lwp);
412
413 /* Implement the arch_setup target_ops method. */
414
415 static void
416 linux_arch_setup (void)
417 {
418 the_low_target.arch_setup ();
419 }
420
421 /* Call the target arch_setup function on THREAD. */
422
423 static void
424 linux_arch_setup_thread (struct thread_info *thread)
425 {
426 struct thread_info *saved_thread;
427
428 saved_thread = current_thread;
429 current_thread = thread;
430
431 linux_arch_setup ();
432
433 current_thread = saved_thread;
434 }
435
436 /* Handle a GNU/Linux extended wait response. If we see a clone,
437 fork, or vfork event, we need to add the new LWP to our list
438 (and return 0 so as not to report the trap to higher layers).
439 If we see an exec event, we will modify ORIG_EVENT_LWP to point
440 to a new LWP representing the new program. */
441
442 static int
443 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
444 {
445 struct lwp_info *event_lwp = *orig_event_lwp;
446 int event = linux_ptrace_get_extended_event (wstat);
447 struct thread_info *event_thr = get_lwp_thread (event_lwp);
448 struct lwp_info *new_lwp;
449
450 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
451
452 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
453 || (event == PTRACE_EVENT_CLONE))
454 {
455 ptid_t ptid;
456 unsigned long new_pid;
457 int ret, status;
458
459 /* Get the pid of the new lwp. */
460 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
461 &new_pid);
462
463 /* If we haven't already seen the new PID stop, wait for it now. */
464 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
465 {
466 /* The new child has a pending SIGSTOP. We can't affect it until it
467 hits the SIGSTOP, but we're already attached. */
468
469 ret = my_waitpid (new_pid, &status, __WALL);
470
471 if (ret == -1)
472 perror_with_name ("waiting for new child");
473 else if (ret != new_pid)
474 warning ("wait returned unexpected PID %d", ret);
475 else if (!WIFSTOPPED (status))
476 warning ("wait returned unexpected status 0x%x", status);
477 }
478
479 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
480 {
481 struct process_info *parent_proc;
482 struct process_info *child_proc;
483 struct lwp_info *child_lwp;
484 struct thread_info *child_thr;
485 struct target_desc *tdesc;
486
487 ptid = ptid_build (new_pid, new_pid, 0);
488
489 if (debug_threads)
490 {
491 debug_printf ("HEW: Got fork event from LWP %ld, "
492 "new child is %d\n",
493 ptid_get_lwp (ptid_of (event_thr)),
494 ptid_get_pid (ptid));
495 }
496
497 /* Add the new process to the tables and clone the breakpoint
498 lists of the parent. We need to do this even if the new process
499 will be detached, since we will need the process object and the
500 breakpoints to remove any breakpoints from memory when we
501 detach, and the client side will access registers. */
502 child_proc = linux_add_process (new_pid, 0);
503 gdb_assert (child_proc != NULL);
504 child_lwp = add_lwp (ptid);
505 gdb_assert (child_lwp != NULL);
506 child_lwp->stopped = 1;
507 child_lwp->must_set_ptrace_flags = 1;
508 child_lwp->status_pending_p = 0;
509 child_thr = get_lwp_thread (child_lwp);
510 child_thr->last_resume_kind = resume_stop;
511 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
512
513 /* If we're suspending all threads, leave this one suspended
514 too. */
515 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
516 {
517 if (debug_threads)
518 debug_printf ("HEW: leaving child suspended\n");
519 child_lwp->suspended = 1;
520 }
521
522 parent_proc = get_thread_process (event_thr);
523 child_proc->attached = parent_proc->attached;
524 clone_all_breakpoints (&child_proc->breakpoints,
525 &child_proc->raw_breakpoints,
526 parent_proc->breakpoints);
527
528 tdesc = XNEW (struct target_desc);
529 copy_target_description (tdesc, parent_proc->tdesc);
530 child_proc->tdesc = tdesc;
531
532 /* Clone arch-specific process data. */
533 if (the_low_target.new_fork != NULL)
534 the_low_target.new_fork (parent_proc, child_proc);
535
536 /* Save fork info in the parent thread. */
537 if (event == PTRACE_EVENT_FORK)
538 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
539 else if (event == PTRACE_EVENT_VFORK)
540 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
541
542 event_lwp->waitstatus.value.related_pid = ptid;
543
544 /* The status_pending field contains bits denoting the
545 extended event, so when the pending event is handled,
546 the handler will look at lwp->waitstatus. */
547 event_lwp->status_pending_p = 1;
548 event_lwp->status_pending = wstat;
549
550 /* Report the event. */
551 return 0;
552 }
553
554 if (debug_threads)
555 debug_printf ("HEW: Got clone event "
556 "from LWP %ld, new child is LWP %ld\n",
557 lwpid_of (event_thr), new_pid);
558
559 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
560 new_lwp = add_lwp (ptid);
561
562 /* Either we're going to immediately resume the new thread
563 or leave it stopped. linux_resume_one_lwp is a nop if it
564 thinks the thread is currently running, so set this first
565 before calling linux_resume_one_lwp. */
566 new_lwp->stopped = 1;
567
568 /* If we're suspending all threads, leave this one suspended
569 too. */
570 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
571 new_lwp->suspended = 1;
572
573 /* Normally we will get the pending SIGSTOP. But in some cases
574 we might get another signal delivered to the group first.
575 If we do get another signal, be sure not to lose it. */
576 if (WSTOPSIG (status) != SIGSTOP)
577 {
578 new_lwp->stop_expected = 1;
579 new_lwp->status_pending_p = 1;
580 new_lwp->status_pending = status;
581 }
582 else if (report_thread_events)
583 {
584 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
585 new_lwp->status_pending_p = 1;
586 new_lwp->status_pending = status;
587 }
588
589 /* Don't report the event. */
590 return 1;
591 }
592 else if (event == PTRACE_EVENT_VFORK_DONE)
593 {
594 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
595
596 /* Report the event. */
597 return 0;
598 }
599 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
600 {
601 struct process_info *proc;
602 ptid_t event_ptid;
603 pid_t event_pid;
604
605 if (debug_threads)
606 {
607 debug_printf ("HEW: Got exec event from LWP %ld\n",
608 lwpid_of (event_thr));
609 }
610
611 /* Get the event ptid. */
612 event_ptid = ptid_of (event_thr);
613 event_pid = ptid_get_pid (event_ptid);
614
615 /* Delete the execing process and all its threads. */
616 proc = get_thread_process (event_thr);
617 linux_mourn (proc);
618 current_thread = NULL;
619
620 /* Create a new process/lwp/thread. */
621 proc = linux_add_process (event_pid, 0);
622 event_lwp = add_lwp (event_ptid);
623 event_thr = get_lwp_thread (event_lwp);
624 gdb_assert (current_thread == event_thr);
625 linux_arch_setup_thread (event_thr);
626
627 /* Set the event status. */
628 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
629 event_lwp->waitstatus.value.execd_pathname
630 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
631
632 /* Mark the exec status as pending. */
633 event_lwp->stopped = 1;
634 event_lwp->status_pending_p = 1;
635 event_lwp->status_pending = wstat;
636 event_thr->last_resume_kind = resume_continue;
637 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
638
639 /* Report the event. */
640 *orig_event_lwp = event_lwp;
641 return 0;
642 }
643
644 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
645 }
646
647 /* Return the PC as read from the regcache of LWP, without any
648 adjustment. */
649
650 static CORE_ADDR
651 get_pc (struct lwp_info *lwp)
652 {
653 struct thread_info *saved_thread;
654 struct regcache *regcache;
655 CORE_ADDR pc;
656
657 if (the_low_target.get_pc == NULL)
658 return 0;
659
660 saved_thread = current_thread;
661 current_thread = get_lwp_thread (lwp);
662
663 regcache = get_thread_regcache (current_thread, 1);
664 pc = (*the_low_target.get_pc) (regcache);
665
666 if (debug_threads)
667 debug_printf ("pc is 0x%lx\n", (long) pc);
668
669 current_thread = saved_thread;
670 return pc;
671 }
672
673 /* This function should only be called if LWP got a SIGTRAP.
674 The SIGTRAP could mean several things.
675
676 On i386, where decr_pc_after_break is non-zero:
677
678 If we were single-stepping this process using PTRACE_SINGLESTEP, we
679 will get only the one SIGTRAP. The value of $eip will be the next
680 instruction. If the instruction we stepped over was a breakpoint,
681 we need to decrement the PC.
682
683 If we continue the process using PTRACE_CONT, we will get a
684 SIGTRAP when we hit a breakpoint. The value of $eip will be
685 the instruction after the breakpoint (i.e. needs to be
686 decremented). If we report the SIGTRAP to GDB, we must also
687 report the undecremented PC. If the breakpoint is removed, we
688 must resume at the decremented PC.
689
690 On a non-decr_pc_after_break machine with hardware or kernel
691 single-step:
692
693 If we either single-step a breakpoint instruction, or continue and
694 hit a breakpoint instruction, our PC will point at the breakpoint
695 instruction. */
696
697 static int
698 check_stopped_by_breakpoint (struct lwp_info *lwp)
699 {
700 CORE_ADDR pc;
701 CORE_ADDR sw_breakpoint_pc;
702 struct thread_info *saved_thread;
703 #if USE_SIGTRAP_SIGINFO
704 siginfo_t siginfo;
705 #endif
706
707 if (the_low_target.get_pc == NULL)
708 return 0;
709
710 pc = get_pc (lwp);
711 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
712
713 /* breakpoint_at reads from the current thread. */
714 saved_thread = current_thread;
715 current_thread = get_lwp_thread (lwp);
716
717 #if USE_SIGTRAP_SIGINFO
718 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
719 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
720 {
721 if (siginfo.si_signo == SIGTRAP)
722 {
723 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
724 {
725 if (debug_threads)
726 {
727 struct thread_info *thr = get_lwp_thread (lwp);
728
729 debug_printf ("CSBB: %s stopped by software breakpoint\n",
730 target_pid_to_str (ptid_of (thr)));
731 }
732
733 /* Back up the PC if necessary. */
734 if (pc != sw_breakpoint_pc)
735 {
736 struct regcache *regcache
737 = get_thread_regcache (current_thread, 1);
738 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
739 }
740
741 lwp->stop_pc = sw_breakpoint_pc;
742 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
743 current_thread = saved_thread;
744 return 1;
745 }
746 else if (siginfo.si_code == TRAP_HWBKPT)
747 {
748 if (debug_threads)
749 {
750 struct thread_info *thr = get_lwp_thread (lwp);
751
752 debug_printf ("CSBB: %s stopped by hardware "
753 "breakpoint/watchpoint\n",
754 target_pid_to_str (ptid_of (thr)));
755 }
756
757 lwp->stop_pc = pc;
758 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
759 current_thread = saved_thread;
760 return 1;
761 }
762 else if (siginfo.si_code == TRAP_TRACE)
763 {
764 if (debug_threads)
765 {
766 struct thread_info *thr = get_lwp_thread (lwp);
767
768 debug_printf ("CSBB: %s stopped by trace\n",
769 target_pid_to_str (ptid_of (thr)));
770 }
771
772 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
773 }
774 }
775 }
776 #else
777 /* We may have just stepped a breakpoint instruction. E.g., in
778 non-stop mode, GDB first tells the thread A to step a range, and
779 then the user inserts a breakpoint inside the range. In that
780 case we need to report the breakpoint PC. */
781 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
782 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
783 {
784 if (debug_threads)
785 {
786 struct thread_info *thr = get_lwp_thread (lwp);
787
788 debug_printf ("CSBB: %s stopped by software breakpoint\n",
789 target_pid_to_str (ptid_of (thr)));
790 }
791
792 /* Back up the PC if necessary. */
793 if (pc != sw_breakpoint_pc)
794 {
795 struct regcache *regcache
796 = get_thread_regcache (current_thread, 1);
797 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
798 }
799
800 lwp->stop_pc = sw_breakpoint_pc;
801 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
802 current_thread = saved_thread;
803 return 1;
804 }
805
806 if (hardware_breakpoint_inserted_here (pc))
807 {
808 if (debug_threads)
809 {
810 struct thread_info *thr = get_lwp_thread (lwp);
811
812 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
813 target_pid_to_str (ptid_of (thr)));
814 }
815
816 lwp->stop_pc = pc;
817 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
818 current_thread = saved_thread;
819 return 1;
820 }
821 #endif
822
823 current_thread = saved_thread;
824 return 0;
825 }
826
827 static struct lwp_info *
828 add_lwp (ptid_t ptid)
829 {
830 struct lwp_info *lwp;
831
832 lwp = XCNEW (struct lwp_info);
833
834 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
835
836 if (the_low_target.new_thread != NULL)
837 the_low_target.new_thread (lwp);
838
839 lwp->thread = add_thread (ptid, lwp);
840
841 return lwp;
842 }
843
844 /* Start an inferior process and returns its pid.
845 ALLARGS is a vector of program-name and args. */
846
847 static int
848 linux_create_inferior (char *program, char **allargs)
849 {
850 struct lwp_info *new_lwp;
851 int pid;
852 ptid_t ptid;
853 struct cleanup *restore_personality
854 = maybe_disable_address_space_randomization (disable_randomization);
855
856 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
857 pid = vfork ();
858 #else
859 pid = fork ();
860 #endif
861 if (pid < 0)
862 perror_with_name ("fork");
863
864 if (pid == 0)
865 {
866 close_most_fds ();
867 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
868
869 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
870 signal (__SIGRTMIN + 1, SIG_DFL);
871 #endif
872
873 setpgid (0, 0);
874
875 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
876 stdout to stderr so that inferior i/o doesn't corrupt the connection.
877 Also, redirect stdin to /dev/null. */
878 if (remote_connection_is_stdio ())
879 {
880 close (0);
881 open ("/dev/null", O_RDONLY);
882 dup2 (2, 1);
883 if (write (2, "stdin/stdout redirected\n",
884 sizeof ("stdin/stdout redirected\n") - 1) < 0)
885 {
886 /* Errors ignored. */;
887 }
888 }
889
890 execv (program, allargs);
891 if (errno == ENOENT)
892 execvp (program, allargs);
893
894 fprintf (stderr, "Cannot exec %s: %s.\n", program,
895 strerror (errno));
896 fflush (stderr);
897 _exit (0177);
898 }
899
900 do_cleanups (restore_personality);
901
902 linux_add_process (pid, 0);
903
904 ptid = ptid_build (pid, pid, 0);
905 new_lwp = add_lwp (ptid);
906 new_lwp->must_set_ptrace_flags = 1;
907
908 return pid;
909 }
910
911 /* Attach to an inferior process. Returns 0 on success, ERRNO on
912 error. */
913
914 int
915 linux_attach_lwp (ptid_t ptid)
916 {
917 struct lwp_info *new_lwp;
918 int lwpid = ptid_get_lwp (ptid);
919
920 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
921 != 0)
922 return errno;
923
924 new_lwp = add_lwp (ptid);
925
926 /* We need to wait for SIGSTOP before being able to make the next
927 ptrace call on this LWP. */
928 new_lwp->must_set_ptrace_flags = 1;
929
930 if (linux_proc_pid_is_stopped (lwpid))
931 {
932 if (debug_threads)
933 debug_printf ("Attached to a stopped process\n");
934
935 /* The process is definitely stopped. It is in a job control
936 stop, unless the kernel predates the TASK_STOPPED /
937 TASK_TRACED distinction, in which case it might be in a
938 ptrace stop. Make sure it is in a ptrace stop; from there we
939 can kill it, signal it, et cetera.
940
941 First make sure there is a pending SIGSTOP. Since we are
942 already attached, the process can not transition from stopped
943 to running without a PTRACE_CONT; so we know this signal will
944 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
945 probably already in the queue (unless this kernel is old
946 enough to use TASK_STOPPED for ptrace stops); but since
947 SIGSTOP is not an RT signal, it can only be queued once. */
948 kill_lwp (lwpid, SIGSTOP);
949
950 /* Finally, resume the stopped process. This will deliver the
951 SIGSTOP (or a higher priority signal, just like normal
952 PTRACE_ATTACH), which we'll catch later on. */
953 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
954 }
955
956 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
957 brings it to a halt.
958
959 There are several cases to consider here:
960
961 1) gdbserver has already attached to the process and is being notified
962 of a new thread that is being created.
963 In this case we should ignore that SIGSTOP and resume the
964 process. This is handled below by setting stop_expected = 1,
965 and the fact that add_thread sets last_resume_kind ==
966 resume_continue.
967
968 2) This is the first thread (the process thread), and we're attaching
969 to it via attach_inferior.
970 In this case we want the process thread to stop.
971 This is handled by having linux_attach set last_resume_kind ==
972 resume_stop after we return.
973
974 If the pid we are attaching to is also the tgid, we attach to and
975 stop all the existing threads. Otherwise, we attach to pid and
976 ignore any other threads in the same group as this pid.
977
978 3) GDB is connecting to gdbserver and is requesting an enumeration of all
979 existing threads.
980 In this case we want the thread to stop.
981 FIXME: This case is currently not properly handled.
982 We should wait for the SIGSTOP but don't. Things work apparently
983 because enough time passes between when we ptrace (ATTACH) and when
984 gdb makes the next ptrace call on the thread.
985
986 On the other hand, if we are currently trying to stop all threads, we
987 should treat the new thread as if we had sent it a SIGSTOP. This works
988 because we are guaranteed that the add_lwp call above added us to the
989 end of the list, and so the new thread has not yet reached
990 wait_for_sigstop (but will). */
991 new_lwp->stop_expected = 1;
992
993 return 0;
994 }
995
996 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
997 already attached. Returns true if a new LWP is found, false
998 otherwise. */
999
1000 static int
1001 attach_proc_task_lwp_callback (ptid_t ptid)
1002 {
1003 /* Is this a new thread? */
1004 if (find_thread_ptid (ptid) == NULL)
1005 {
1006 int lwpid = ptid_get_lwp (ptid);
1007 int err;
1008
1009 if (debug_threads)
1010 debug_printf ("Found new lwp %d\n", lwpid);
1011
1012 err = linux_attach_lwp (ptid);
1013
1014 /* Be quiet if we simply raced with the thread exiting. EPERM
1015 is returned if the thread's task still exists, and is marked
1016 as exited or zombie, as well as other conditions, so in that
1017 case, confirm the status in /proc/PID/status. */
1018 if (err == ESRCH
1019 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1020 {
1021 if (debug_threads)
1022 {
1023 debug_printf ("Cannot attach to lwp %d: "
1024 "thread is gone (%d: %s)\n",
1025 lwpid, err, strerror (err));
1026 }
1027 }
1028 else if (err != 0)
1029 {
1030 warning (_("Cannot attach to lwp %d: %s"),
1031 lwpid,
1032 linux_ptrace_attach_fail_reason_string (ptid, err));
1033 }
1034
1035 return 1;
1036 }
1037 return 0;
1038 }
1039
1040 static void async_file_mark (void);
1041
1042 /* Attach to PID. If PID is the tgid, attach to it and all
1043 of its threads. */
1044
1045 static int
1046 linux_attach (unsigned long pid)
1047 {
1048 struct process_info *proc;
1049 struct thread_info *initial_thread;
1050 ptid_t ptid = ptid_build (pid, pid, 0);
1051 int err;
1052
1053 /* Attach to PID. We will check for other threads
1054 soon. */
1055 err = linux_attach_lwp (ptid);
1056 if (err != 0)
1057 error ("Cannot attach to process %ld: %s",
1058 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1059
1060 proc = linux_add_process (pid, 1);
1061
1062 /* Don't ignore the initial SIGSTOP if we just attached to this
1063 process. It will be collected by wait shortly. */
1064 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1065 initial_thread->last_resume_kind = resume_stop;
1066
1067 /* We must attach to every LWP. If /proc is mounted, use that to
1068 find them now. On the one hand, the inferior may be using raw
1069 clone instead of using pthreads. On the other hand, even if it
1070 is using pthreads, GDB may not be connected yet (thread_db needs
1071 to do symbol lookups, through qSymbol). Also, thread_db walks
1072 structures in the inferior's address space to find the list of
1073 threads/LWPs, and those structures may well be corrupted. Note
1074 that once thread_db is loaded, we'll still use it to list threads
1075 and associate pthread info with each LWP. */
1076 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1077
1078 /* GDB will shortly read the xml target description for this
1079 process, to figure out the process' architecture. But the target
1080 description is only filled in when the first process/thread in
1081 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1082 that now, otherwise, if GDB is fast enough, it could read the
1083 target description _before_ that initial stop. */
1084 if (non_stop)
1085 {
1086 struct lwp_info *lwp;
1087 int wstat, lwpid;
1088 ptid_t pid_ptid = pid_to_ptid (pid);
1089
1090 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1091 &wstat, __WALL);
1092 gdb_assert (lwpid > 0);
1093
1094 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1095
1096 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1097 {
1098 lwp->status_pending_p = 1;
1099 lwp->status_pending = wstat;
1100 }
1101
1102 initial_thread->last_resume_kind = resume_continue;
1103
1104 async_file_mark ();
1105
1106 gdb_assert (proc->tdesc != NULL);
1107 }
1108
1109 return 0;
1110 }
1111
1112 struct counter
1113 {
1114 int pid;
1115 int count;
1116 };
1117
1118 static int
1119 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1120 {
1121 struct counter *counter = (struct counter *) args;
1122
1123 if (ptid_get_pid (entry->id) == counter->pid)
1124 {
1125 if (++counter->count > 1)
1126 return 1;
1127 }
1128
1129 return 0;
1130 }
1131
1132 static int
1133 last_thread_of_process_p (int pid)
1134 {
1135 struct counter counter = { pid , 0 };
1136
1137 return (find_inferior (&all_threads,
1138 second_thread_of_pid_p, &counter) == NULL);
1139 }
1140
1141 /* Kill LWP. */
1142
1143 static void
1144 linux_kill_one_lwp (struct lwp_info *lwp)
1145 {
1146 struct thread_info *thr = get_lwp_thread (lwp);
1147 int pid = lwpid_of (thr);
1148
1149 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1150 there is no signal context, and ptrace(PTRACE_KILL) (or
1151 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1152 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1153 alternative is to kill with SIGKILL. We only need one SIGKILL
1154 per process, not one for each thread. But since we still support
1155 linuxthreads, and we also support debugging programs using raw
1156 clone without CLONE_THREAD, we send one for each thread. For
1157 years, we used PTRACE_KILL only, so we're being a bit paranoid
1158 about some old kernels where PTRACE_KILL might work better
1159 (dubious if there are any such, but that's why it's paranoia), so
1160 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1161 everywhere. */
1162
1163 errno = 0;
1164 kill_lwp (pid, SIGKILL);
1165 if (debug_threads)
1166 {
1167 int save_errno = errno;
1168
1169 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1170 target_pid_to_str (ptid_of (thr)),
1171 save_errno ? strerror (save_errno) : "OK");
1172 }
1173
1174 errno = 0;
1175 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1176 if (debug_threads)
1177 {
1178 int save_errno = errno;
1179
1180 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1181 target_pid_to_str (ptid_of (thr)),
1182 save_errno ? strerror (save_errno) : "OK");
1183 }
1184 }
1185
1186 /* Kill LWP and wait for it to die. */
1187
1188 static void
1189 kill_wait_lwp (struct lwp_info *lwp)
1190 {
1191 struct thread_info *thr = get_lwp_thread (lwp);
1192 int pid = ptid_get_pid (ptid_of (thr));
1193 int lwpid = ptid_get_lwp (ptid_of (thr));
1194 int wstat;
1195 int res;
1196
1197 if (debug_threads)
1198 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1199
1200 do
1201 {
1202 linux_kill_one_lwp (lwp);
1203
1204 /* Make sure it died. Notes:
1205
1206 - The loop is most likely unnecessary.
1207
1208 - We don't use linux_wait_for_event as that could delete lwps
1209 while we're iterating over them. We're not interested in
1210 any pending status at this point, only in making sure all
1211 wait status on the kernel side are collected until the
1212 process is reaped.
1213
1214 - We don't use __WALL here as the __WALL emulation relies on
1215 SIGCHLD, and killing a stopped process doesn't generate
1216 one, nor an exit status.
1217 */
1218 res = my_waitpid (lwpid, &wstat, 0);
1219 if (res == -1 && errno == ECHILD)
1220 res = my_waitpid (lwpid, &wstat, __WCLONE);
1221 } while (res > 0 && WIFSTOPPED (wstat));
1222
1223 /* Even if it was stopped, the child may have already disappeared.
1224 E.g., if it was killed by SIGKILL. */
1225 if (res < 0 && errno != ECHILD)
1226 perror_with_name ("kill_wait_lwp");
1227 }
1228
1229 /* Callback for `find_inferior'. Kills an lwp of a given process,
1230 except the leader. */
1231
1232 static int
1233 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1234 {
1235 struct thread_info *thread = (struct thread_info *) entry;
1236 struct lwp_info *lwp = get_thread_lwp (thread);
1237 int pid = * (int *) args;
1238
1239 if (ptid_get_pid (entry->id) != pid)
1240 return 0;
1241
1242 /* We avoid killing the first thread here, because of a Linux kernel (at
1243 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1244 the children get a chance to be reaped, it will remain a zombie
1245 forever. */
1246
1247 if (lwpid_of (thread) == pid)
1248 {
1249 if (debug_threads)
1250 debug_printf ("lkop: is last of process %s\n",
1251 target_pid_to_str (entry->id));
1252 return 0;
1253 }
1254
1255 kill_wait_lwp (lwp);
1256 return 0;
1257 }
1258
1259 static int
1260 linux_kill (int pid)
1261 {
1262 struct process_info *process;
1263 struct lwp_info *lwp;
1264
1265 process = find_process_pid (pid);
1266 if (process == NULL)
1267 return -1;
1268
1269 /* If we're killing a running inferior, make sure it is stopped
1270 first, as PTRACE_KILL will not work otherwise. */
1271 stop_all_lwps (0, NULL);
1272
1273 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1274
1275 /* See the comment in linux_kill_one_lwp. We did not kill the first
1276 thread in the list, so do so now. */
1277 lwp = find_lwp_pid (pid_to_ptid (pid));
1278
1279 if (lwp == NULL)
1280 {
1281 if (debug_threads)
1282 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1283 pid);
1284 }
1285 else
1286 kill_wait_lwp (lwp);
1287
1288 the_target->mourn (process);
1289
1290 /* Since we presently can only stop all lwps of all processes, we
1291 need to unstop lwps of other processes. */
1292 unstop_all_lwps (0, NULL);
1293 return 0;
1294 }
1295
1296 /* Get pending signal of THREAD, for detaching purposes. This is the
1297 signal the thread last stopped for, which we need to deliver to the
1298 thread when detaching, otherwise, it'd be suppressed/lost. */
1299
1300 static int
1301 get_detach_signal (struct thread_info *thread)
1302 {
1303 enum gdb_signal signo = GDB_SIGNAL_0;
1304 int status;
1305 struct lwp_info *lp = get_thread_lwp (thread);
1306
1307 if (lp->status_pending_p)
1308 status = lp->status_pending;
1309 else
1310 {
1311 /* If the thread had been suspended by gdbserver, and it stopped
1312 cleanly, then it'll have stopped with SIGSTOP. But we don't
1313 want to deliver that SIGSTOP. */
1314 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1315 || thread->last_status.value.sig == GDB_SIGNAL_0)
1316 return 0;
1317
1318 /* Otherwise, we may need to deliver the signal we
1319 intercepted. */
1320 status = lp->last_status;
1321 }
1322
1323 if (!WIFSTOPPED (status))
1324 {
1325 if (debug_threads)
1326 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1327 target_pid_to_str (ptid_of (thread)));
1328 return 0;
1329 }
1330
1331 /* Extended wait statuses aren't real SIGTRAPs. */
1332 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1333 {
1334 if (debug_threads)
1335 debug_printf ("GPS: lwp %s had stopped with extended "
1336 "status: no pending signal\n",
1337 target_pid_to_str (ptid_of (thread)));
1338 return 0;
1339 }
1340
1341 signo = gdb_signal_from_host (WSTOPSIG (status));
1342
1343 if (program_signals_p && !program_signals[signo])
1344 {
1345 if (debug_threads)
1346 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1347 target_pid_to_str (ptid_of (thread)),
1348 gdb_signal_to_string (signo));
1349 return 0;
1350 }
1351 else if (!program_signals_p
1352 /* If we have no way to know which signals GDB does not
1353 want to have passed to the program, assume
1354 SIGTRAP/SIGINT, which is GDB's default. */
1355 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1356 {
1357 if (debug_threads)
1358 debug_printf ("GPS: lwp %s had signal %s, "
1359 "but we don't know if we should pass it. "
1360 "Default to not.\n",
1361 target_pid_to_str (ptid_of (thread)),
1362 gdb_signal_to_string (signo));
1363 return 0;
1364 }
1365 else
1366 {
1367 if (debug_threads)
1368 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1369 target_pid_to_str (ptid_of (thread)),
1370 gdb_signal_to_string (signo));
1371
1372 return WSTOPSIG (status);
1373 }
1374 }
1375
1376 static int
1377 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1378 {
1379 struct thread_info *thread = (struct thread_info *) entry;
1380 struct lwp_info *lwp = get_thread_lwp (thread);
1381 int pid = * (int *) args;
1382 int sig;
1383
1384 if (ptid_get_pid (entry->id) != pid)
1385 return 0;
1386
1387 /* If there is a pending SIGSTOP, get rid of it. */
1388 if (lwp->stop_expected)
1389 {
1390 if (debug_threads)
1391 debug_printf ("Sending SIGCONT to %s\n",
1392 target_pid_to_str (ptid_of (thread)));
1393
1394 kill_lwp (lwpid_of (thread), SIGCONT);
1395 lwp->stop_expected = 0;
1396 }
1397
1398 /* Flush any pending changes to the process's registers. */
1399 regcache_invalidate_thread (thread);
1400
1401 /* Pass on any pending signal for this thread. */
1402 sig = get_detach_signal (thread);
1403
1404 /* Finally, let it resume. */
1405 if (the_low_target.prepare_to_resume != NULL)
1406 the_low_target.prepare_to_resume (lwp);
1407 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1408 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1409 error (_("Can't detach %s: %s"),
1410 target_pid_to_str (ptid_of (thread)),
1411 strerror (errno));
1412
1413 delete_lwp (lwp);
1414 return 0;
1415 }
1416
1417 static int
1418 linux_detach (int pid)
1419 {
1420 struct process_info *process;
1421
1422 process = find_process_pid (pid);
1423 if (process == NULL)
1424 return -1;
1425
1426 /* As there's a step over already in progress, let it finish first,
1427 otherwise nesting a stabilize_threads operation on top gets real
1428 messy. */
1429 complete_ongoing_step_over ();
1430
1431 /* Stop all threads before detaching. First, ptrace requires that
1432 the thread is stopped to sucessfully detach. Second, thread_db
1433 may need to uninstall thread event breakpoints from memory, which
1434 only works with a stopped process anyway. */
1435 stop_all_lwps (0, NULL);
1436
1437 #ifdef USE_THREAD_DB
1438 thread_db_detach (process);
1439 #endif
1440
1441 /* Stabilize threads (move out of jump pads). */
1442 stabilize_threads ();
1443
1444 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1445
1446 the_target->mourn (process);
1447
1448 /* Since we presently can only stop all lwps of all processes, we
1449 need to unstop lwps of other processes. */
1450 unstop_all_lwps (0, NULL);
1451 return 0;
1452 }
1453
1454 /* Remove all LWPs that belong to process PROC from the lwp list. */
1455
1456 static int
1457 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1458 {
1459 struct thread_info *thread = (struct thread_info *) entry;
1460 struct lwp_info *lwp = get_thread_lwp (thread);
1461 struct process_info *process = (struct process_info *) proc;
1462
1463 if (pid_of (thread) == pid_of (process))
1464 delete_lwp (lwp);
1465
1466 return 0;
1467 }
1468
1469 static void
1470 linux_mourn (struct process_info *process)
1471 {
1472 struct process_info_private *priv;
1473
1474 #ifdef USE_THREAD_DB
1475 thread_db_mourn (process);
1476 #endif
1477
1478 find_inferior (&all_threads, delete_lwp_callback, process);
1479
1480 /* Freeing all private data. */
1481 priv = process->priv;
1482 free (priv->arch_private);
1483 free (priv);
1484 process->priv = NULL;
1485
1486 remove_process (process);
1487 }
1488
1489 static void
1490 linux_join (int pid)
1491 {
1492 int status, ret;
1493
1494 do {
1495 ret = my_waitpid (pid, &status, 0);
1496 if (WIFEXITED (status) || WIFSIGNALED (status))
1497 break;
1498 } while (ret != -1 || errno != ECHILD);
1499 }
1500
1501 /* Return nonzero if the given thread is still alive. */
1502 static int
1503 linux_thread_alive (ptid_t ptid)
1504 {
1505 struct lwp_info *lwp = find_lwp_pid (ptid);
1506
1507 /* We assume we always know if a thread exits. If a whole process
1508 exited but we still haven't been able to report it to GDB, we'll
1509 hold on to the last lwp of the dead process. */
1510 if (lwp != NULL)
1511 return !lwp_is_marked_dead (lwp);
1512 else
1513 return 0;
1514 }
1515
1516 /* Return 1 if this lwp still has an interesting status pending. If
1517 not (e.g., it had stopped for a breakpoint that is gone), return
1518 false. */
1519
1520 static int
1521 thread_still_has_status_pending_p (struct thread_info *thread)
1522 {
1523 struct lwp_info *lp = get_thread_lwp (thread);
1524
1525 if (!lp->status_pending_p)
1526 return 0;
1527
1528 if (thread->last_resume_kind != resume_stop
1529 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1530 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1531 {
1532 struct thread_info *saved_thread;
1533 CORE_ADDR pc;
1534 int discard = 0;
1535
1536 gdb_assert (lp->last_status != 0);
1537
1538 pc = get_pc (lp);
1539
1540 saved_thread = current_thread;
1541 current_thread = thread;
1542
1543 if (pc != lp->stop_pc)
1544 {
1545 if (debug_threads)
1546 debug_printf ("PC of %ld changed\n",
1547 lwpid_of (thread));
1548 discard = 1;
1549 }
1550
1551 #if !USE_SIGTRAP_SIGINFO
1552 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1553 && !(*the_low_target.breakpoint_at) (pc))
1554 {
1555 if (debug_threads)
1556 debug_printf ("previous SW breakpoint of %ld gone\n",
1557 lwpid_of (thread));
1558 discard = 1;
1559 }
1560 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1561 && !hardware_breakpoint_inserted_here (pc))
1562 {
1563 if (debug_threads)
1564 debug_printf ("previous HW breakpoint of %ld gone\n",
1565 lwpid_of (thread));
1566 discard = 1;
1567 }
1568 #endif
1569
1570 current_thread = saved_thread;
1571
1572 if (discard)
1573 {
1574 if (debug_threads)
1575 debug_printf ("discarding pending breakpoint status\n");
1576 lp->status_pending_p = 0;
1577 return 0;
1578 }
1579 }
1580
1581 return 1;
1582 }
1583
1584 /* Returns true if LWP is resumed from the client's perspective. */
1585
1586 static int
1587 lwp_resumed (struct lwp_info *lwp)
1588 {
1589 struct thread_info *thread = get_lwp_thread (lwp);
1590
1591 if (thread->last_resume_kind != resume_stop)
1592 return 1;
1593
1594 /* Did gdb send us a `vCont;t', but we haven't reported the
1595 corresponding stop to gdb yet? If so, the thread is still
1596 resumed/running from gdb's perspective. */
1597 if (thread->last_resume_kind == resume_stop
1598 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1599 return 1;
1600
1601 return 0;
1602 }
1603
1604 /* Return 1 if this lwp has an interesting status pending. */
1605 static int
1606 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1607 {
1608 struct thread_info *thread = (struct thread_info *) entry;
1609 struct lwp_info *lp = get_thread_lwp (thread);
1610 ptid_t ptid = * (ptid_t *) arg;
1611
1612 /* Check if we're only interested in events from a specific process
1613 or a specific LWP. */
1614 if (!ptid_match (ptid_of (thread), ptid))
1615 return 0;
1616
1617 if (!lwp_resumed (lp))
1618 return 0;
1619
1620 if (lp->status_pending_p
1621 && !thread_still_has_status_pending_p (thread))
1622 {
1623 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1624 return 0;
1625 }
1626
1627 return lp->status_pending_p;
1628 }
1629
1630 static int
1631 same_lwp (struct inferior_list_entry *entry, void *data)
1632 {
1633 ptid_t ptid = *(ptid_t *) data;
1634 int lwp;
1635
1636 if (ptid_get_lwp (ptid) != 0)
1637 lwp = ptid_get_lwp (ptid);
1638 else
1639 lwp = ptid_get_pid (ptid);
1640
1641 if (ptid_get_lwp (entry->id) == lwp)
1642 return 1;
1643
1644 return 0;
1645 }
1646
1647 struct lwp_info *
1648 find_lwp_pid (ptid_t ptid)
1649 {
1650 struct inferior_list_entry *thread
1651 = find_inferior (&all_threads, same_lwp, &ptid);
1652
1653 if (thread == NULL)
1654 return NULL;
1655
1656 return get_thread_lwp ((struct thread_info *) thread);
1657 }
1658
1659 /* Return the number of known LWPs in the tgid given by PID. */
1660
1661 static int
1662 num_lwps (int pid)
1663 {
1664 struct inferior_list_entry *inf, *tmp;
1665 int count = 0;
1666
1667 ALL_INFERIORS (&all_threads, inf, tmp)
1668 {
1669 if (ptid_get_pid (inf->id) == pid)
1670 count++;
1671 }
1672
1673 return count;
1674 }
1675
1676 /* The arguments passed to iterate_over_lwps. */
1677
1678 struct iterate_over_lwps_args
1679 {
1680 /* The FILTER argument passed to iterate_over_lwps. */
1681 ptid_t filter;
1682
1683 /* The CALLBACK argument passed to iterate_over_lwps. */
1684 iterate_over_lwps_ftype *callback;
1685
1686 /* The DATA argument passed to iterate_over_lwps. */
1687 void *data;
1688 };
1689
1690 /* Callback for find_inferior used by iterate_over_lwps to filter
1691 calls to the callback supplied to that function. Returning a
1692 nonzero value causes find_inferiors to stop iterating and return
1693 the current inferior_list_entry. Returning zero indicates that
1694 find_inferiors should continue iterating. */
1695
1696 static int
1697 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1698 {
1699 struct iterate_over_lwps_args *args
1700 = (struct iterate_over_lwps_args *) args_p;
1701
1702 if (ptid_match (entry->id, args->filter))
1703 {
1704 struct thread_info *thr = (struct thread_info *) entry;
1705 struct lwp_info *lwp = get_thread_lwp (thr);
1706
1707 return (*args->callback) (lwp, args->data);
1708 }
1709
1710 return 0;
1711 }
1712
1713 /* See nat/linux-nat.h. */
1714
1715 struct lwp_info *
1716 iterate_over_lwps (ptid_t filter,
1717 iterate_over_lwps_ftype callback,
1718 void *data)
1719 {
1720 struct iterate_over_lwps_args args = {filter, callback, data};
1721 struct inferior_list_entry *entry;
1722
1723 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1724 if (entry == NULL)
1725 return NULL;
1726
1727 return get_thread_lwp ((struct thread_info *) entry);
1728 }
1729
1730 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1731 their exits until all other threads in the group have exited. */
1732
1733 static void
1734 check_zombie_leaders (void)
1735 {
1736 struct process_info *proc, *tmp;
1737
1738 ALL_PROCESSES (proc, tmp)
1739 {
1740 pid_t leader_pid = pid_of (proc);
1741 struct lwp_info *leader_lp;
1742
1743 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1744
1745 if (debug_threads)
1746 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1747 "num_lwps=%d, zombie=%d\n",
1748 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1749 linux_proc_pid_is_zombie (leader_pid));
1750
1751 if (leader_lp != NULL && !leader_lp->stopped
1752 /* Check if there are other threads in the group, as we may
1753 have raced with the inferior simply exiting. */
1754 && !last_thread_of_process_p (leader_pid)
1755 && linux_proc_pid_is_zombie (leader_pid))
1756 {
1757 /* A leader zombie can mean one of two things:
1758
1759 - It exited, and there's an exit status pending
1760 available, or only the leader exited (not the whole
1761 program). In the latter case, we can't waitpid the
1762 leader's exit status until all other threads are gone.
1763
1764 - There are 3 or more threads in the group, and a thread
1765 other than the leader exec'd. On an exec, the Linux
1766 kernel destroys all other threads (except the execing
1767 one) in the thread group, and resets the execing thread's
1768 tid to the tgid. No exit notification is sent for the
1769 execing thread -- from the ptracer's perspective, it
1770 appears as though the execing thread just vanishes.
1771 Until we reap all other threads except the leader and the
1772 execing thread, the leader will be zombie, and the
1773 execing thread will be in `D (disc sleep)'. As soon as
1774 all other threads are reaped, the execing thread changes
1775 it's tid to the tgid, and the previous (zombie) leader
1776 vanishes, giving place to the "new" leader. We could try
1777 distinguishing the exit and exec cases, by waiting once
1778 more, and seeing if something comes out, but it doesn't
1779 sound useful. The previous leader _does_ go away, and
1780 we'll re-add the new one once we see the exec event
1781 (which is just the same as what would happen if the
1782 previous leader did exit voluntarily before some other
1783 thread execs). */
1784
1785 if (debug_threads)
1786 fprintf (stderr,
1787 "CZL: Thread group leader %d zombie "
1788 "(it exited, or another thread execd).\n",
1789 leader_pid);
1790
1791 delete_lwp (leader_lp);
1792 }
1793 }
1794 }
1795
1796 /* Callback for `find_inferior'. Returns the first LWP that is not
1797 stopped. ARG is a PTID filter. */
1798
1799 static int
1800 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1801 {
1802 struct thread_info *thr = (struct thread_info *) entry;
1803 struct lwp_info *lwp;
1804 ptid_t filter = *(ptid_t *) arg;
1805
1806 if (!ptid_match (ptid_of (thr), filter))
1807 return 0;
1808
1809 lwp = get_thread_lwp (thr);
1810 if (!lwp->stopped)
1811 return 1;
1812
1813 return 0;
1814 }
1815
1816 /* Increment LWP's suspend count. */
1817
1818 static void
1819 lwp_suspended_inc (struct lwp_info *lwp)
1820 {
1821 lwp->suspended++;
1822
1823 if (debug_threads && lwp->suspended > 4)
1824 {
1825 struct thread_info *thread = get_lwp_thread (lwp);
1826
1827 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1828 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1829 }
1830 }
1831
1832 /* Decrement LWP's suspend count. */
1833
1834 static void
1835 lwp_suspended_decr (struct lwp_info *lwp)
1836 {
1837 lwp->suspended--;
1838
1839 if (lwp->suspended < 0)
1840 {
1841 struct thread_info *thread = get_lwp_thread (lwp);
1842
1843 internal_error (__FILE__, __LINE__,
1844 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1845 lwp->suspended);
1846 }
1847 }
1848
1849 /* This function should only be called if the LWP got a SIGTRAP.
1850
1851 Handle any tracepoint steps or hits. Return true if a tracepoint
1852 event was handled, 0 otherwise. */
1853
1854 static int
1855 handle_tracepoints (struct lwp_info *lwp)
1856 {
1857 struct thread_info *tinfo = get_lwp_thread (lwp);
1858 int tpoint_related_event = 0;
1859
1860 gdb_assert (lwp->suspended == 0);
1861
1862 /* If this tracepoint hit causes a tracing stop, we'll immediately
1863 uninsert tracepoints. To do this, we temporarily pause all
1864 threads, unpatch away, and then unpause threads. We need to make
1865 sure the unpausing doesn't resume LWP too. */
1866 lwp_suspended_inc (lwp);
1867
1868 /* And we need to be sure that any all-threads-stopping doesn't try
1869 to move threads out of the jump pads, as it could deadlock the
1870 inferior (LWP could be in the jump pad, maybe even holding the
1871 lock.) */
1872
1873 /* Do any necessary step collect actions. */
1874 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1875
1876 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1877
1878 /* See if we just hit a tracepoint and do its main collect
1879 actions. */
1880 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1881
1882 lwp_suspended_decr (lwp);
1883
1884 gdb_assert (lwp->suspended == 0);
1885 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1886
1887 if (tpoint_related_event)
1888 {
1889 if (debug_threads)
1890 debug_printf ("got a tracepoint event\n");
1891 return 1;
1892 }
1893
1894 return 0;
1895 }
1896
1897 /* Convenience wrapper. Returns true if LWP is presently collecting a
1898 fast tracepoint. */
1899
1900 static int
1901 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1902 struct fast_tpoint_collect_status *status)
1903 {
1904 CORE_ADDR thread_area;
1905 struct thread_info *thread = get_lwp_thread (lwp);
1906
1907 if (the_low_target.get_thread_area == NULL)
1908 return 0;
1909
1910 /* Get the thread area address. This is used to recognize which
1911 thread is which when tracing with the in-process agent library.
1912 We don't read anything from the address, and treat it as opaque;
1913 it's the address itself that we assume is unique per-thread. */
1914 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1915 return 0;
1916
1917 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1918 }
1919
1920 /* The reason we resume in the caller, is because we want to be able
1921 to pass lwp->status_pending as WSTAT, and we need to clear
1922 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1923 refuses to resume. */
1924
1925 static int
1926 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1927 {
1928 struct thread_info *saved_thread;
1929
1930 saved_thread = current_thread;
1931 current_thread = get_lwp_thread (lwp);
1932
1933 if ((wstat == NULL
1934 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1935 && supports_fast_tracepoints ()
1936 && agent_loaded_p ())
1937 {
1938 struct fast_tpoint_collect_status status;
1939 int r;
1940
1941 if (debug_threads)
1942 debug_printf ("Checking whether LWP %ld needs to move out of the "
1943 "jump pad.\n",
1944 lwpid_of (current_thread));
1945
1946 r = linux_fast_tracepoint_collecting (lwp, &status);
1947
1948 if (wstat == NULL
1949 || (WSTOPSIG (*wstat) != SIGILL
1950 && WSTOPSIG (*wstat) != SIGFPE
1951 && WSTOPSIG (*wstat) != SIGSEGV
1952 && WSTOPSIG (*wstat) != SIGBUS))
1953 {
1954 lwp->collecting_fast_tracepoint = r;
1955
1956 if (r != 0)
1957 {
1958 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1959 {
1960 /* Haven't executed the original instruction yet.
1961 Set breakpoint there, and wait till it's hit,
1962 then single-step until exiting the jump pad. */
1963 lwp->exit_jump_pad_bkpt
1964 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1965 }
1966
1967 if (debug_threads)
1968 debug_printf ("Checking whether LWP %ld needs to move out of "
1969 "the jump pad...it does\n",
1970 lwpid_of (current_thread));
1971 current_thread = saved_thread;
1972
1973 return 1;
1974 }
1975 }
1976 else
1977 {
1978 /* If we get a synchronous signal while collecting, *and*
1979 while executing the (relocated) original instruction,
1980 reset the PC to point at the tpoint address, before
1981 reporting to GDB. Otherwise, it's an IPA lib bug: just
1982 report the signal to GDB, and pray for the best. */
1983
1984 lwp->collecting_fast_tracepoint = 0;
1985
1986 if (r != 0
1987 && (status.adjusted_insn_addr <= lwp->stop_pc
1988 && lwp->stop_pc < status.adjusted_insn_addr_end))
1989 {
1990 siginfo_t info;
1991 struct regcache *regcache;
1992
1993 /* The si_addr on a few signals references the address
1994 of the faulting instruction. Adjust that as
1995 well. */
1996 if ((WSTOPSIG (*wstat) == SIGILL
1997 || WSTOPSIG (*wstat) == SIGFPE
1998 || WSTOPSIG (*wstat) == SIGBUS
1999 || WSTOPSIG (*wstat) == SIGSEGV)
2000 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2001 (PTRACE_TYPE_ARG3) 0, &info) == 0
2002 /* Final check just to make sure we don't clobber
2003 the siginfo of non-kernel-sent signals. */
2004 && (uintptr_t) info.si_addr == lwp->stop_pc)
2005 {
2006 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2007 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2008 (PTRACE_TYPE_ARG3) 0, &info);
2009 }
2010
2011 regcache = get_thread_regcache (current_thread, 1);
2012 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2013 lwp->stop_pc = status.tpoint_addr;
2014
2015 /* Cancel any fast tracepoint lock this thread was
2016 holding. */
2017 force_unlock_trace_buffer ();
2018 }
2019
2020 if (lwp->exit_jump_pad_bkpt != NULL)
2021 {
2022 if (debug_threads)
2023 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2024 "stopping all threads momentarily.\n");
2025
2026 stop_all_lwps (1, lwp);
2027
2028 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2029 lwp->exit_jump_pad_bkpt = NULL;
2030
2031 unstop_all_lwps (1, lwp);
2032
2033 gdb_assert (lwp->suspended >= 0);
2034 }
2035 }
2036 }
2037
2038 if (debug_threads)
2039 debug_printf ("Checking whether LWP %ld needs to move out of the "
2040 "jump pad...no\n",
2041 lwpid_of (current_thread));
2042
2043 current_thread = saved_thread;
2044 return 0;
2045 }
2046
2047 /* Enqueue one signal in the "signals to report later when out of the
2048 jump pad" list. */
2049
2050 static void
2051 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2052 {
2053 struct pending_signals *p_sig;
2054 struct thread_info *thread = get_lwp_thread (lwp);
2055
2056 if (debug_threads)
2057 debug_printf ("Deferring signal %d for LWP %ld.\n",
2058 WSTOPSIG (*wstat), lwpid_of (thread));
2059
2060 if (debug_threads)
2061 {
2062 struct pending_signals *sig;
2063
2064 for (sig = lwp->pending_signals_to_report;
2065 sig != NULL;
2066 sig = sig->prev)
2067 debug_printf (" Already queued %d\n",
2068 sig->signal);
2069
2070 debug_printf (" (no more currently queued signals)\n");
2071 }
2072
2073 /* Don't enqueue non-RT signals if they are already in the deferred
2074 queue. (SIGSTOP being the easiest signal to see ending up here
2075 twice) */
2076 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2077 {
2078 struct pending_signals *sig;
2079
2080 for (sig = lwp->pending_signals_to_report;
2081 sig != NULL;
2082 sig = sig->prev)
2083 {
2084 if (sig->signal == WSTOPSIG (*wstat))
2085 {
2086 if (debug_threads)
2087 debug_printf ("Not requeuing already queued non-RT signal %d"
2088 " for LWP %ld\n",
2089 sig->signal,
2090 lwpid_of (thread));
2091 return;
2092 }
2093 }
2094 }
2095
2096 p_sig = XCNEW (struct pending_signals);
2097 p_sig->prev = lwp->pending_signals_to_report;
2098 p_sig->signal = WSTOPSIG (*wstat);
2099
2100 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2101 &p_sig->info);
2102
2103 lwp->pending_signals_to_report = p_sig;
2104 }
2105
2106 /* Dequeue one signal from the "signals to report later when out of
2107 the jump pad" list. */
2108
2109 static int
2110 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2111 {
2112 struct thread_info *thread = get_lwp_thread (lwp);
2113
2114 if (lwp->pending_signals_to_report != NULL)
2115 {
2116 struct pending_signals **p_sig;
2117
2118 p_sig = &lwp->pending_signals_to_report;
2119 while ((*p_sig)->prev != NULL)
2120 p_sig = &(*p_sig)->prev;
2121
2122 *wstat = W_STOPCODE ((*p_sig)->signal);
2123 if ((*p_sig)->info.si_signo != 0)
2124 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2125 &(*p_sig)->info);
2126 free (*p_sig);
2127 *p_sig = NULL;
2128
2129 if (debug_threads)
2130 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2131 WSTOPSIG (*wstat), lwpid_of (thread));
2132
2133 if (debug_threads)
2134 {
2135 struct pending_signals *sig;
2136
2137 for (sig = lwp->pending_signals_to_report;
2138 sig != NULL;
2139 sig = sig->prev)
2140 debug_printf (" Still queued %d\n",
2141 sig->signal);
2142
2143 debug_printf (" (no more queued signals)\n");
2144 }
2145
2146 return 1;
2147 }
2148
2149 return 0;
2150 }
2151
2152 /* Fetch the possibly triggered data watchpoint info and store it in
2153 CHILD.
2154
2155 On some archs, like x86, that use debug registers to set
2156 watchpoints, it's possible that the way to know which watched
2157 address trapped, is to check the register that is used to select
2158 which address to watch. Problem is, between setting the watchpoint
2159 and reading back which data address trapped, the user may change
2160 the set of watchpoints, and, as a consequence, GDB changes the
2161 debug registers in the inferior. To avoid reading back a stale
2162 stopped-data-address when that happens, we cache in LP the fact
2163 that a watchpoint trapped, and the corresponding data address, as
2164 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2165 registers meanwhile, we have the cached data we can rely on. */
2166
2167 static int
2168 check_stopped_by_watchpoint (struct lwp_info *child)
2169 {
2170 if (the_low_target.stopped_by_watchpoint != NULL)
2171 {
2172 struct thread_info *saved_thread;
2173
2174 saved_thread = current_thread;
2175 current_thread = get_lwp_thread (child);
2176
2177 if (the_low_target.stopped_by_watchpoint ())
2178 {
2179 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2180
2181 if (the_low_target.stopped_data_address != NULL)
2182 child->stopped_data_address
2183 = the_low_target.stopped_data_address ();
2184 else
2185 child->stopped_data_address = 0;
2186 }
2187
2188 current_thread = saved_thread;
2189 }
2190
2191 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2192 }
2193
2194 /* Return the ptrace options that we want to try to enable. */
2195
2196 static int
2197 linux_low_ptrace_options (int attached)
2198 {
2199 int options = 0;
2200
2201 if (!attached)
2202 options |= PTRACE_O_EXITKILL;
2203
2204 if (report_fork_events)
2205 options |= PTRACE_O_TRACEFORK;
2206
2207 if (report_vfork_events)
2208 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2209
2210 if (report_exec_events)
2211 options |= PTRACE_O_TRACEEXEC;
2212
2213 return options;
2214 }
2215
2216 /* Do low-level handling of the event, and check if we should go on
2217 and pass it to caller code. Return the affected lwp if we are, or
2218 NULL otherwise. */
2219
2220 static struct lwp_info *
2221 linux_low_filter_event (int lwpid, int wstat)
2222 {
2223 struct lwp_info *child;
2224 struct thread_info *thread;
2225 int have_stop_pc = 0;
2226
2227 child = find_lwp_pid (pid_to_ptid (lwpid));
2228
2229 /* Check for stop events reported by a process we didn't already
2230 know about - anything not already in our LWP list.
2231
2232 If we're expecting to receive stopped processes after
2233 fork, vfork, and clone events, then we'll just add the
2234 new one to our list and go back to waiting for the event
2235 to be reported - the stopped process might be returned
2236 from waitpid before or after the event is.
2237
2238 But note the case of a non-leader thread exec'ing after the
2239 leader having exited, and gone from our lists (because
2240 check_zombie_leaders deleted it). The non-leader thread
2241 changes its tid to the tgid. */
2242
2243 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2244 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2245 {
2246 ptid_t child_ptid;
2247
2248 /* A multi-thread exec after we had seen the leader exiting. */
2249 if (debug_threads)
2250 {
2251 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2252 "after exec.\n", lwpid);
2253 }
2254
2255 child_ptid = ptid_build (lwpid, lwpid, 0);
2256 child = add_lwp (child_ptid);
2257 child->stopped = 1;
2258 current_thread = child->thread;
2259 }
2260
2261 /* If we didn't find a process, one of two things presumably happened:
2262 - A process we started and then detached from has exited. Ignore it.
2263 - A process we are controlling has forked and the new child's stop
2264 was reported to us by the kernel. Save its PID. */
2265 if (child == NULL && WIFSTOPPED (wstat))
2266 {
2267 add_to_pid_list (&stopped_pids, lwpid, wstat);
2268 return NULL;
2269 }
2270 else if (child == NULL)
2271 return NULL;
2272
2273 thread = get_lwp_thread (child);
2274
2275 child->stopped = 1;
2276
2277 child->last_status = wstat;
2278
2279 /* Check if the thread has exited. */
2280 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2281 {
2282 if (debug_threads)
2283 debug_printf ("LLFE: %d exited.\n", lwpid);
2284 /* If there is at least one more LWP, then the exit signal was
2285 not the end of the debugged application and should be
2286 ignored, unless GDB wants to hear about thread exits. */
2287 if (report_thread_events
2288 || last_thread_of_process_p (pid_of (thread)))
2289 {
2290 /* Since events are serialized to GDB core, and we can't
2291 report this one right now. Leave the status pending for
2292 the next time we're able to report it. */
2293 mark_lwp_dead (child, wstat);
2294 return child;
2295 }
2296 else
2297 {
2298 delete_lwp (child);
2299 return NULL;
2300 }
2301 }
2302
2303 gdb_assert (WIFSTOPPED (wstat));
2304
2305 if (WIFSTOPPED (wstat))
2306 {
2307 struct process_info *proc;
2308
2309 /* Architecture-specific setup after inferior is running. */
2310 proc = find_process_pid (pid_of (thread));
2311 if (proc->tdesc == NULL)
2312 {
2313 if (proc->attached)
2314 {
2315 /* This needs to happen after we have attached to the
2316 inferior and it is stopped for the first time, but
2317 before we access any inferior registers. */
2318 linux_arch_setup_thread (thread);
2319 }
2320 else
2321 {
2322 /* The process is started, but GDBserver will do
2323 architecture-specific setup after the program stops at
2324 the first instruction. */
2325 child->status_pending_p = 1;
2326 child->status_pending = wstat;
2327 return child;
2328 }
2329 }
2330 }
2331
2332 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2333 {
2334 struct process_info *proc = find_process_pid (pid_of (thread));
2335 int options = linux_low_ptrace_options (proc->attached);
2336
2337 linux_enable_event_reporting (lwpid, options);
2338 child->must_set_ptrace_flags = 0;
2339 }
2340
2341 /* Be careful to not overwrite stop_pc until
2342 check_stopped_by_breakpoint is called. */
2343 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2344 && linux_is_extended_waitstatus (wstat))
2345 {
2346 child->stop_pc = get_pc (child);
2347 if (handle_extended_wait (&child, wstat))
2348 {
2349 /* The event has been handled, so just return without
2350 reporting it. */
2351 return NULL;
2352 }
2353 }
2354
2355 /* Check first whether this was a SW/HW breakpoint before checking
2356 watchpoints, because at least s390 can't tell the data address of
2357 hardware watchpoint hits, and returns stopped-by-watchpoint as
2358 long as there's a watchpoint set. */
2359 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2360 {
2361 if (check_stopped_by_breakpoint (child))
2362 have_stop_pc = 1;
2363 }
2364
2365 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2366 or hardware watchpoint. Check which is which if we got
2367 TARGET_STOPPED_BY_HW_BREAKPOINT. Likewise, we may have single
2368 stepped an instruction that triggered a watchpoint. In that
2369 case, on some architectures (such as x86), instead of
2370 TRAP_HWBKPT, si_code indicates TRAP_TRACE, and we need to check
2371 the debug registers separately. */
2372 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2373 && child->stop_reason != TARGET_STOPPED_BY_SW_BREAKPOINT)
2374 check_stopped_by_watchpoint (child);
2375
2376 if (!have_stop_pc)
2377 child->stop_pc = get_pc (child);
2378
2379 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2380 && child->stop_expected)
2381 {
2382 if (debug_threads)
2383 debug_printf ("Expected stop.\n");
2384 child->stop_expected = 0;
2385
2386 if (thread->last_resume_kind == resume_stop)
2387 {
2388 /* We want to report the stop to the core. Treat the
2389 SIGSTOP as a normal event. */
2390 if (debug_threads)
2391 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2392 target_pid_to_str (ptid_of (thread)));
2393 }
2394 else if (stopping_threads != NOT_STOPPING_THREADS)
2395 {
2396 /* Stopping threads. We don't want this SIGSTOP to end up
2397 pending. */
2398 if (debug_threads)
2399 debug_printf ("LLW: SIGSTOP caught for %s "
2400 "while stopping threads.\n",
2401 target_pid_to_str (ptid_of (thread)));
2402 return NULL;
2403 }
2404 else
2405 {
2406 /* This is a delayed SIGSTOP. Filter out the event. */
2407 if (debug_threads)
2408 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2409 child->stepping ? "step" : "continue",
2410 target_pid_to_str (ptid_of (thread)));
2411
2412 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2413 return NULL;
2414 }
2415 }
2416
2417 child->status_pending_p = 1;
2418 child->status_pending = wstat;
2419 return child;
2420 }
2421
2422 /* Resume LWPs that are currently stopped without any pending status
2423 to report, but are resumed from the core's perspective. */
2424
2425 static void
2426 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2427 {
2428 struct thread_info *thread = (struct thread_info *) entry;
2429 struct lwp_info *lp = get_thread_lwp (thread);
2430
2431 if (lp->stopped
2432 && !lp->suspended
2433 && !lp->status_pending_p
2434 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2435 {
2436 int step = thread->last_resume_kind == resume_step;
2437
2438 if (debug_threads)
2439 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2440 target_pid_to_str (ptid_of (thread)),
2441 paddress (lp->stop_pc),
2442 step);
2443
2444 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2445 }
2446 }
2447
2448 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2449 match FILTER_PTID (leaving others pending). The PTIDs can be:
2450 minus_one_ptid, to specify any child; a pid PTID, specifying all
2451 lwps of a thread group; or a PTID representing a single lwp. Store
2452 the stop status through the status pointer WSTAT. OPTIONS is
2453 passed to the waitpid call. Return 0 if no event was found and
2454 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2455 was found. Return the PID of the stopped child otherwise. */
2456
2457 static int
2458 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2459 int *wstatp, int options)
2460 {
2461 struct thread_info *event_thread;
2462 struct lwp_info *event_child, *requested_child;
2463 sigset_t block_mask, prev_mask;
2464
2465 retry:
2466 /* N.B. event_thread points to the thread_info struct that contains
2467 event_child. Keep them in sync. */
2468 event_thread = NULL;
2469 event_child = NULL;
2470 requested_child = NULL;
2471
2472 /* Check for a lwp with a pending status. */
2473
2474 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2475 {
2476 event_thread = (struct thread_info *)
2477 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2478 if (event_thread != NULL)
2479 event_child = get_thread_lwp (event_thread);
2480 if (debug_threads && event_thread)
2481 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2482 }
2483 else if (!ptid_equal (filter_ptid, null_ptid))
2484 {
2485 requested_child = find_lwp_pid (filter_ptid);
2486
2487 if (stopping_threads == NOT_STOPPING_THREADS
2488 && requested_child->status_pending_p
2489 && requested_child->collecting_fast_tracepoint)
2490 {
2491 enqueue_one_deferred_signal (requested_child,
2492 &requested_child->status_pending);
2493 requested_child->status_pending_p = 0;
2494 requested_child->status_pending = 0;
2495 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2496 }
2497
2498 if (requested_child->suspended
2499 && requested_child->status_pending_p)
2500 {
2501 internal_error (__FILE__, __LINE__,
2502 "requesting an event out of a"
2503 " suspended child?");
2504 }
2505
2506 if (requested_child->status_pending_p)
2507 {
2508 event_child = requested_child;
2509 event_thread = get_lwp_thread (event_child);
2510 }
2511 }
2512
2513 if (event_child != NULL)
2514 {
2515 if (debug_threads)
2516 debug_printf ("Got an event from pending child %ld (%04x)\n",
2517 lwpid_of (event_thread), event_child->status_pending);
2518 *wstatp = event_child->status_pending;
2519 event_child->status_pending_p = 0;
2520 event_child->status_pending = 0;
2521 current_thread = event_thread;
2522 return lwpid_of (event_thread);
2523 }
2524
2525 /* But if we don't find a pending event, we'll have to wait.
2526
2527 We only enter this loop if no process has a pending wait status.
2528 Thus any action taken in response to a wait status inside this
2529 loop is responding as soon as we detect the status, not after any
2530 pending events. */
2531
2532 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2533 all signals while here. */
2534 sigfillset (&block_mask);
2535 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2536
2537 /* Always pull all events out of the kernel. We'll randomly select
2538 an event LWP out of all that have events, to prevent
2539 starvation. */
2540 while (event_child == NULL)
2541 {
2542 pid_t ret = 0;
2543
2544 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2545 quirks:
2546
2547 - If the thread group leader exits while other threads in the
2548 thread group still exist, waitpid(TGID, ...) hangs. That
2549 waitpid won't return an exit status until the other threads
2550 in the group are reaped.
2551
2552 - When a non-leader thread execs, that thread just vanishes
2553 without reporting an exit (so we'd hang if we waited for it
2554 explicitly in that case). The exec event is reported to
2555 the TGID pid. */
2556 errno = 0;
2557 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2558
2559 if (debug_threads)
2560 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2561 ret, errno ? strerror (errno) : "ERRNO-OK");
2562
2563 if (ret > 0)
2564 {
2565 if (debug_threads)
2566 {
2567 debug_printf ("LLW: waitpid %ld received %s\n",
2568 (long) ret, status_to_str (*wstatp));
2569 }
2570
2571 /* Filter all events. IOW, leave all events pending. We'll
2572 randomly select an event LWP out of all that have events
2573 below. */
2574 linux_low_filter_event (ret, *wstatp);
2575 /* Retry until nothing comes out of waitpid. A single
2576 SIGCHLD can indicate more than one child stopped. */
2577 continue;
2578 }
2579
2580 /* Now that we've pulled all events out of the kernel, resume
2581 LWPs that don't have an interesting event to report. */
2582 if (stopping_threads == NOT_STOPPING_THREADS)
2583 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2584
2585 /* ... and find an LWP with a status to report to the core, if
2586 any. */
2587 event_thread = (struct thread_info *)
2588 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2589 if (event_thread != NULL)
2590 {
2591 event_child = get_thread_lwp (event_thread);
2592 *wstatp = event_child->status_pending;
2593 event_child->status_pending_p = 0;
2594 event_child->status_pending = 0;
2595 break;
2596 }
2597
2598 /* Check for zombie thread group leaders. Those can't be reaped
2599 until all other threads in the thread group are. */
2600 check_zombie_leaders ();
2601
2602 /* If there are no resumed children left in the set of LWPs we
2603 want to wait for, bail. We can't just block in
2604 waitpid/sigsuspend, because lwps might have been left stopped
2605 in trace-stop state, and we'd be stuck forever waiting for
2606 their status to change (which would only happen if we resumed
2607 them). Even if WNOHANG is set, this return code is preferred
2608 over 0 (below), as it is more detailed. */
2609 if ((find_inferior (&all_threads,
2610 not_stopped_callback,
2611 &wait_ptid) == NULL))
2612 {
2613 if (debug_threads)
2614 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2615 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2616 return -1;
2617 }
2618
2619 /* No interesting event to report to the caller. */
2620 if ((options & WNOHANG))
2621 {
2622 if (debug_threads)
2623 debug_printf ("WNOHANG set, no event found\n");
2624
2625 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2626 return 0;
2627 }
2628
2629 /* Block until we get an event reported with SIGCHLD. */
2630 if (debug_threads)
2631 debug_printf ("sigsuspend'ing\n");
2632
2633 sigsuspend (&prev_mask);
2634 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2635 goto retry;
2636 }
2637
2638 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2639
2640 current_thread = event_thread;
2641
2642 return lwpid_of (event_thread);
2643 }
2644
2645 /* Wait for an event from child(ren) PTID. PTIDs can be:
2646 minus_one_ptid, to specify any child; a pid PTID, specifying all
2647 lwps of a thread group; or a PTID representing a single lwp. Store
2648 the stop status through the status pointer WSTAT. OPTIONS is
2649 passed to the waitpid call. Return 0 if no event was found and
2650 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2651 was found. Return the PID of the stopped child otherwise. */
2652
2653 static int
2654 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2655 {
2656 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2657 }
2658
2659 /* Count the LWP's that have had events. */
2660
2661 static int
2662 count_events_callback (struct inferior_list_entry *entry, void *data)
2663 {
2664 struct thread_info *thread = (struct thread_info *) entry;
2665 struct lwp_info *lp = get_thread_lwp (thread);
2666 int *count = (int *) data;
2667
2668 gdb_assert (count != NULL);
2669
2670 /* Count only resumed LWPs that have an event pending. */
2671 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2672 && lp->status_pending_p)
2673 (*count)++;
2674
2675 return 0;
2676 }
2677
2678 /* Select the LWP (if any) that is currently being single-stepped. */
2679
2680 static int
2681 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2682 {
2683 struct thread_info *thread = (struct thread_info *) entry;
2684 struct lwp_info *lp = get_thread_lwp (thread);
2685
2686 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2687 && thread->last_resume_kind == resume_step
2688 && lp->status_pending_p)
2689 return 1;
2690 else
2691 return 0;
2692 }
2693
2694 /* Select the Nth LWP that has had an event. */
2695
2696 static int
2697 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2698 {
2699 struct thread_info *thread = (struct thread_info *) entry;
2700 struct lwp_info *lp = get_thread_lwp (thread);
2701 int *selector = (int *) data;
2702
2703 gdb_assert (selector != NULL);
2704
2705 /* Select only resumed LWPs that have an event pending. */
2706 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2707 && lp->status_pending_p)
2708 if ((*selector)-- == 0)
2709 return 1;
2710
2711 return 0;
2712 }
2713
2714 /* Select one LWP out of those that have events pending. */
2715
2716 static void
2717 select_event_lwp (struct lwp_info **orig_lp)
2718 {
2719 int num_events = 0;
2720 int random_selector;
2721 struct thread_info *event_thread = NULL;
2722
2723 /* In all-stop, give preference to the LWP that is being
2724 single-stepped. There will be at most one, and it's the LWP that
2725 the core is most interested in. If we didn't do this, then we'd
2726 have to handle pending step SIGTRAPs somehow in case the core
2727 later continues the previously-stepped thread, otherwise we'd
2728 report the pending SIGTRAP, and the core, not having stepped the
2729 thread, wouldn't understand what the trap was for, and therefore
2730 would report it to the user as a random signal. */
2731 if (!non_stop)
2732 {
2733 event_thread
2734 = (struct thread_info *) find_inferior (&all_threads,
2735 select_singlestep_lwp_callback,
2736 NULL);
2737 if (event_thread != NULL)
2738 {
2739 if (debug_threads)
2740 debug_printf ("SEL: Select single-step %s\n",
2741 target_pid_to_str (ptid_of (event_thread)));
2742 }
2743 }
2744 if (event_thread == NULL)
2745 {
2746 /* No single-stepping LWP. Select one at random, out of those
2747 which have had events. */
2748
2749 /* First see how many events we have. */
2750 find_inferior (&all_threads, count_events_callback, &num_events);
2751 gdb_assert (num_events > 0);
2752
2753 /* Now randomly pick a LWP out of those that have had
2754 events. */
2755 random_selector = (int)
2756 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2757
2758 if (debug_threads && num_events > 1)
2759 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2760 num_events, random_selector);
2761
2762 event_thread
2763 = (struct thread_info *) find_inferior (&all_threads,
2764 select_event_lwp_callback,
2765 &random_selector);
2766 }
2767
2768 if (event_thread != NULL)
2769 {
2770 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2771
2772 /* Switch the event LWP. */
2773 *orig_lp = event_lp;
2774 }
2775 }
2776
2777 /* Decrement the suspend count of an LWP. */
2778
2779 static int
2780 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2781 {
2782 struct thread_info *thread = (struct thread_info *) entry;
2783 struct lwp_info *lwp = get_thread_lwp (thread);
2784
2785 /* Ignore EXCEPT. */
2786 if (lwp == except)
2787 return 0;
2788
2789 lwp_suspended_decr (lwp);
2790 return 0;
2791 }
2792
2793 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2794 NULL. */
2795
2796 static void
2797 unsuspend_all_lwps (struct lwp_info *except)
2798 {
2799 find_inferior (&all_threads, unsuspend_one_lwp, except);
2800 }
2801
2802 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2803 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2804 void *data);
2805 static int lwp_running (struct inferior_list_entry *entry, void *data);
2806 static ptid_t linux_wait_1 (ptid_t ptid,
2807 struct target_waitstatus *ourstatus,
2808 int target_options);
2809
2810 /* Stabilize threads (move out of jump pads).
2811
2812 If a thread is midway collecting a fast tracepoint, we need to
2813 finish the collection and move it out of the jump pad before
2814 reporting the signal.
2815
2816 This avoids recursion while collecting (when a signal arrives
2817 midway, and the signal handler itself collects), which would trash
2818 the trace buffer. In case the user set a breakpoint in a signal
2819 handler, this avoids the backtrace showing the jump pad, etc..
2820 Most importantly, there are certain things we can't do safely if
2821 threads are stopped in a jump pad (or in its callee's). For
2822 example:
2823
2824 - starting a new trace run. A thread still collecting the
2825 previous run, could trash the trace buffer when resumed. The trace
2826 buffer control structures would have been reset but the thread had
2827 no way to tell. The thread could even midway memcpy'ing to the
2828 buffer, which would mean that when resumed, it would clobber the
2829 trace buffer that had been set for a new run.
2830
2831 - we can't rewrite/reuse the jump pads for new tracepoints
2832 safely. Say you do tstart while a thread is stopped midway while
2833 collecting. When the thread is later resumed, it finishes the
2834 collection, and returns to the jump pad, to execute the original
2835 instruction that was under the tracepoint jump at the time the
2836 older run had been started. If the jump pad had been rewritten
2837 since for something else in the new run, the thread would now
2838 execute the wrong / random instructions. */
2839
2840 static void
2841 linux_stabilize_threads (void)
2842 {
2843 struct thread_info *saved_thread;
2844 struct thread_info *thread_stuck;
2845
2846 thread_stuck
2847 = (struct thread_info *) find_inferior (&all_threads,
2848 stuck_in_jump_pad_callback,
2849 NULL);
2850 if (thread_stuck != NULL)
2851 {
2852 if (debug_threads)
2853 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2854 lwpid_of (thread_stuck));
2855 return;
2856 }
2857
2858 saved_thread = current_thread;
2859
2860 stabilizing_threads = 1;
2861
2862 /* Kick 'em all. */
2863 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2864
2865 /* Loop until all are stopped out of the jump pads. */
2866 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2867 {
2868 struct target_waitstatus ourstatus;
2869 struct lwp_info *lwp;
2870 int wstat;
2871
2872 /* Note that we go through the full wait even loop. While
2873 moving threads out of jump pad, we need to be able to step
2874 over internal breakpoints and such. */
2875 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2876
2877 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2878 {
2879 lwp = get_thread_lwp (current_thread);
2880
2881 /* Lock it. */
2882 lwp_suspended_inc (lwp);
2883
2884 if (ourstatus.value.sig != GDB_SIGNAL_0
2885 || current_thread->last_resume_kind == resume_stop)
2886 {
2887 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2888 enqueue_one_deferred_signal (lwp, &wstat);
2889 }
2890 }
2891 }
2892
2893 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2894
2895 stabilizing_threads = 0;
2896
2897 current_thread = saved_thread;
2898
2899 if (debug_threads)
2900 {
2901 thread_stuck
2902 = (struct thread_info *) find_inferior (&all_threads,
2903 stuck_in_jump_pad_callback,
2904 NULL);
2905 if (thread_stuck != NULL)
2906 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2907 lwpid_of (thread_stuck));
2908 }
2909 }
2910
2911 /* Convenience function that is called when the kernel reports an
2912 event that is not passed out to GDB. */
2913
2914 static ptid_t
2915 ignore_event (struct target_waitstatus *ourstatus)
2916 {
2917 /* If we got an event, there may still be others, as a single
2918 SIGCHLD can indicate more than one child stopped. This forces
2919 another target_wait call. */
2920 async_file_mark ();
2921
2922 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2923 return null_ptid;
2924 }
2925
2926 /* Convenience function that is called when the kernel reports an exit
2927 event. This decides whether to report the event to GDB as a
2928 process exit event, a thread exit event, or to suppress the
2929 event. */
2930
2931 static ptid_t
2932 filter_exit_event (struct lwp_info *event_child,
2933 struct target_waitstatus *ourstatus)
2934 {
2935 struct thread_info *thread = get_lwp_thread (event_child);
2936 ptid_t ptid = ptid_of (thread);
2937
2938 if (!last_thread_of_process_p (pid_of (thread)))
2939 {
2940 if (report_thread_events)
2941 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2942 else
2943 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2944
2945 delete_lwp (event_child);
2946 }
2947 return ptid;
2948 }
2949
2950 /* Wait for process, returns status. */
2951
2952 static ptid_t
2953 linux_wait_1 (ptid_t ptid,
2954 struct target_waitstatus *ourstatus, int target_options)
2955 {
2956 int w;
2957 struct lwp_info *event_child;
2958 int options;
2959 int pid;
2960 int step_over_finished;
2961 int bp_explains_trap;
2962 int maybe_internal_trap;
2963 int report_to_gdb;
2964 int trace_event;
2965 int in_step_range;
2966 int any_resumed;
2967
2968 if (debug_threads)
2969 {
2970 debug_enter ();
2971 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2972 }
2973
2974 /* Translate generic target options into linux options. */
2975 options = __WALL;
2976 if (target_options & TARGET_WNOHANG)
2977 options |= WNOHANG;
2978
2979 bp_explains_trap = 0;
2980 trace_event = 0;
2981 in_step_range = 0;
2982 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2983
2984 /* Find a resumed LWP, if any. */
2985 if (find_inferior (&all_threads,
2986 status_pending_p_callback,
2987 &minus_one_ptid) != NULL)
2988 any_resumed = 1;
2989 else if ((find_inferior (&all_threads,
2990 not_stopped_callback,
2991 &minus_one_ptid) != NULL))
2992 any_resumed = 1;
2993 else
2994 any_resumed = 0;
2995
2996 if (ptid_equal (step_over_bkpt, null_ptid))
2997 pid = linux_wait_for_event (ptid, &w, options);
2998 else
2999 {
3000 if (debug_threads)
3001 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3002 target_pid_to_str (step_over_bkpt));
3003 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3004 }
3005
3006 if (pid == 0 || (pid == -1 && !any_resumed))
3007 {
3008 gdb_assert (target_options & TARGET_WNOHANG);
3009
3010 if (debug_threads)
3011 {
3012 debug_printf ("linux_wait_1 ret = null_ptid, "
3013 "TARGET_WAITKIND_IGNORE\n");
3014 debug_exit ();
3015 }
3016
3017 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3018 return null_ptid;
3019 }
3020 else if (pid == -1)
3021 {
3022 if (debug_threads)
3023 {
3024 debug_printf ("linux_wait_1 ret = null_ptid, "
3025 "TARGET_WAITKIND_NO_RESUMED\n");
3026 debug_exit ();
3027 }
3028
3029 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3030 return null_ptid;
3031 }
3032
3033 event_child = get_thread_lwp (current_thread);
3034
3035 /* linux_wait_for_event only returns an exit status for the last
3036 child of a process. Report it. */
3037 if (WIFEXITED (w) || WIFSIGNALED (w))
3038 {
3039 if (WIFEXITED (w))
3040 {
3041 ourstatus->kind = TARGET_WAITKIND_EXITED;
3042 ourstatus->value.integer = WEXITSTATUS (w);
3043
3044 if (debug_threads)
3045 {
3046 debug_printf ("linux_wait_1 ret = %s, exited with "
3047 "retcode %d\n",
3048 target_pid_to_str (ptid_of (current_thread)),
3049 WEXITSTATUS (w));
3050 debug_exit ();
3051 }
3052 }
3053 else
3054 {
3055 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3056 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3057
3058 if (debug_threads)
3059 {
3060 debug_printf ("linux_wait_1 ret = %s, terminated with "
3061 "signal %d\n",
3062 target_pid_to_str (ptid_of (current_thread)),
3063 WTERMSIG (w));
3064 debug_exit ();
3065 }
3066 }
3067
3068 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3069 return filter_exit_event (event_child, ourstatus);
3070
3071 return ptid_of (current_thread);
3072 }
3073
3074 /* If step-over executes a breakpoint instruction, in the case of a
3075 hardware single step it means a gdb/gdbserver breakpoint had been
3076 planted on top of a permanent breakpoint, in the case of a software
3077 single step it may just mean that gdbserver hit the reinsert breakpoint.
3078 The PC has been adjusted by check_stopped_by_breakpoint to point at
3079 the breakpoint address.
3080 So in the case of the hardware single step advance the PC manually
3081 past the breakpoint and in the case of software single step advance only
3082 if it's not the reinsert_breakpoint we are hitting.
3083 This avoids that a program would keep trapping a permanent breakpoint
3084 forever. */
3085 if (!ptid_equal (step_over_bkpt, null_ptid)
3086 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3087 && (event_child->stepping
3088 || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
3089 {
3090 int increment_pc = 0;
3091 int breakpoint_kind = 0;
3092 CORE_ADDR stop_pc = event_child->stop_pc;
3093
3094 breakpoint_kind =
3095 the_target->breakpoint_kind_from_current_state (&stop_pc);
3096 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3097
3098 if (debug_threads)
3099 {
3100 debug_printf ("step-over for %s executed software breakpoint\n",
3101 target_pid_to_str (ptid_of (current_thread)));
3102 }
3103
3104 if (increment_pc != 0)
3105 {
3106 struct regcache *regcache
3107 = get_thread_regcache (current_thread, 1);
3108
3109 event_child->stop_pc += increment_pc;
3110 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3111
3112 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3113 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3114 }
3115 }
3116
3117 /* If this event was not handled before, and is not a SIGTRAP, we
3118 report it. SIGILL and SIGSEGV are also treated as traps in case
3119 a breakpoint is inserted at the current PC. If this target does
3120 not support internal breakpoints at all, we also report the
3121 SIGTRAP without further processing; it's of no concern to us. */
3122 maybe_internal_trap
3123 = (supports_breakpoints ()
3124 && (WSTOPSIG (w) == SIGTRAP
3125 || ((WSTOPSIG (w) == SIGILL
3126 || WSTOPSIG (w) == SIGSEGV)
3127 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3128
3129 if (maybe_internal_trap)
3130 {
3131 /* Handle anything that requires bookkeeping before deciding to
3132 report the event or continue waiting. */
3133
3134 /* First check if we can explain the SIGTRAP with an internal
3135 breakpoint, or if we should possibly report the event to GDB.
3136 Do this before anything that may remove or insert a
3137 breakpoint. */
3138 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3139
3140 /* We have a SIGTRAP, possibly a step-over dance has just
3141 finished. If so, tweak the state machine accordingly,
3142 reinsert breakpoints and delete any reinsert (software
3143 single-step) breakpoints. */
3144 step_over_finished = finish_step_over (event_child);
3145
3146 /* Now invoke the callbacks of any internal breakpoints there. */
3147 check_breakpoints (event_child->stop_pc);
3148
3149 /* Handle tracepoint data collecting. This may overflow the
3150 trace buffer, and cause a tracing stop, removing
3151 breakpoints. */
3152 trace_event = handle_tracepoints (event_child);
3153
3154 if (bp_explains_trap)
3155 {
3156 /* If we stepped or ran into an internal breakpoint, we've
3157 already handled it. So next time we resume (from this
3158 PC), we should step over it. */
3159 if (debug_threads)
3160 debug_printf ("Hit a gdbserver breakpoint.\n");
3161
3162 if (breakpoint_here (event_child->stop_pc))
3163 event_child->need_step_over = 1;
3164 }
3165 }
3166 else
3167 {
3168 /* We have some other signal, possibly a step-over dance was in
3169 progress, and it should be cancelled too. */
3170 step_over_finished = finish_step_over (event_child);
3171 }
3172
3173 /* We have all the data we need. Either report the event to GDB, or
3174 resume threads and keep waiting for more. */
3175
3176 /* If we're collecting a fast tracepoint, finish the collection and
3177 move out of the jump pad before delivering a signal. See
3178 linux_stabilize_threads. */
3179
3180 if (WIFSTOPPED (w)
3181 && WSTOPSIG (w) != SIGTRAP
3182 && supports_fast_tracepoints ()
3183 && agent_loaded_p ())
3184 {
3185 if (debug_threads)
3186 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3187 "to defer or adjust it.\n",
3188 WSTOPSIG (w), lwpid_of (current_thread));
3189
3190 /* Allow debugging the jump pad itself. */
3191 if (current_thread->last_resume_kind != resume_step
3192 && maybe_move_out_of_jump_pad (event_child, &w))
3193 {
3194 enqueue_one_deferred_signal (event_child, &w);
3195
3196 if (debug_threads)
3197 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3198 WSTOPSIG (w), lwpid_of (current_thread));
3199
3200 linux_resume_one_lwp (event_child, 0, 0, NULL);
3201
3202 return ignore_event (ourstatus);
3203 }
3204 }
3205
3206 if (event_child->collecting_fast_tracepoint)
3207 {
3208 if (debug_threads)
3209 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3210 "Check if we're already there.\n",
3211 lwpid_of (current_thread),
3212 event_child->collecting_fast_tracepoint);
3213
3214 trace_event = 1;
3215
3216 event_child->collecting_fast_tracepoint
3217 = linux_fast_tracepoint_collecting (event_child, NULL);
3218
3219 if (event_child->collecting_fast_tracepoint != 1)
3220 {
3221 /* No longer need this breakpoint. */
3222 if (event_child->exit_jump_pad_bkpt != NULL)
3223 {
3224 if (debug_threads)
3225 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3226 "stopping all threads momentarily.\n");
3227
3228 /* Other running threads could hit this breakpoint.
3229 We don't handle moribund locations like GDB does,
3230 instead we always pause all threads when removing
3231 breakpoints, so that any step-over or
3232 decr_pc_after_break adjustment is always taken
3233 care of while the breakpoint is still
3234 inserted. */
3235 stop_all_lwps (1, event_child);
3236
3237 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3238 event_child->exit_jump_pad_bkpt = NULL;
3239
3240 unstop_all_lwps (1, event_child);
3241
3242 gdb_assert (event_child->suspended >= 0);
3243 }
3244 }
3245
3246 if (event_child->collecting_fast_tracepoint == 0)
3247 {
3248 if (debug_threads)
3249 debug_printf ("fast tracepoint finished "
3250 "collecting successfully.\n");
3251
3252 /* We may have a deferred signal to report. */
3253 if (dequeue_one_deferred_signal (event_child, &w))
3254 {
3255 if (debug_threads)
3256 debug_printf ("dequeued one signal.\n");
3257 }
3258 else
3259 {
3260 if (debug_threads)
3261 debug_printf ("no deferred signals.\n");
3262
3263 if (stabilizing_threads)
3264 {
3265 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3266 ourstatus->value.sig = GDB_SIGNAL_0;
3267
3268 if (debug_threads)
3269 {
3270 debug_printf ("linux_wait_1 ret = %s, stopped "
3271 "while stabilizing threads\n",
3272 target_pid_to_str (ptid_of (current_thread)));
3273 debug_exit ();
3274 }
3275
3276 return ptid_of (current_thread);
3277 }
3278 }
3279 }
3280 }
3281
3282 /* Check whether GDB would be interested in this event. */
3283
3284 /* If GDB is not interested in this signal, don't stop other
3285 threads, and don't report it to GDB. Just resume the inferior
3286 right away. We do this for threading-related signals as well as
3287 any that GDB specifically requested we ignore. But never ignore
3288 SIGSTOP if we sent it ourselves, and do not ignore signals when
3289 stepping - they may require special handling to skip the signal
3290 handler. Also never ignore signals that could be caused by a
3291 breakpoint. */
3292 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3293 thread library? */
3294 if (WIFSTOPPED (w)
3295 && current_thread->last_resume_kind != resume_step
3296 && (
3297 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3298 (current_process ()->priv->thread_db != NULL
3299 && (WSTOPSIG (w) == __SIGRTMIN
3300 || WSTOPSIG (w) == __SIGRTMIN + 1))
3301 ||
3302 #endif
3303 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3304 && !(WSTOPSIG (w) == SIGSTOP
3305 && current_thread->last_resume_kind == resume_stop)
3306 && !linux_wstatus_maybe_breakpoint (w))))
3307 {
3308 siginfo_t info, *info_p;
3309
3310 if (debug_threads)
3311 debug_printf ("Ignored signal %d for LWP %ld.\n",
3312 WSTOPSIG (w), lwpid_of (current_thread));
3313
3314 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3315 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3316 info_p = &info;
3317 else
3318 info_p = NULL;
3319
3320 if (step_over_finished)
3321 {
3322 /* We cancelled this thread's step-over above. We still
3323 need to unsuspend all other LWPs, and set them back
3324 running again while the signal handler runs. */
3325 unsuspend_all_lwps (event_child);
3326
3327 /* Enqueue the pending signal info so that proceed_all_lwps
3328 doesn't lose it. */
3329 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3330
3331 proceed_all_lwps ();
3332 }
3333 else
3334 {
3335 linux_resume_one_lwp (event_child, event_child->stepping,
3336 WSTOPSIG (w), info_p);
3337 }
3338 return ignore_event (ourstatus);
3339 }
3340
3341 /* Note that all addresses are always "out of the step range" when
3342 there's no range to begin with. */
3343 in_step_range = lwp_in_step_range (event_child);
3344
3345 /* If GDB wanted this thread to single step, and the thread is out
3346 of the step range, we always want to report the SIGTRAP, and let
3347 GDB handle it. Watchpoints should always be reported. So should
3348 signals we can't explain. A SIGTRAP we can't explain could be a
3349 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3350 do, we're be able to handle GDB breakpoints on top of internal
3351 breakpoints, by handling the internal breakpoint and still
3352 reporting the event to GDB. If we don't, we're out of luck, GDB
3353 won't see the breakpoint hit. If we see a single-step event but
3354 the thread should be continuing, don't pass the trap to gdb.
3355 That indicates that we had previously finished a single-step but
3356 left the single-step pending -- see
3357 complete_ongoing_step_over. */
3358 report_to_gdb = (!maybe_internal_trap
3359 || (current_thread->last_resume_kind == resume_step
3360 && !in_step_range)
3361 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3362 || (!in_step_range
3363 && !bp_explains_trap
3364 && !trace_event
3365 && !step_over_finished
3366 && !(current_thread->last_resume_kind == resume_continue
3367 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3368 || (gdb_breakpoint_here (event_child->stop_pc)
3369 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3370 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3371 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3372
3373 run_breakpoint_commands (event_child->stop_pc);
3374
3375 /* We found no reason GDB would want us to stop. We either hit one
3376 of our own breakpoints, or finished an internal step GDB
3377 shouldn't know about. */
3378 if (!report_to_gdb)
3379 {
3380 if (debug_threads)
3381 {
3382 if (bp_explains_trap)
3383 debug_printf ("Hit a gdbserver breakpoint.\n");
3384 if (step_over_finished)
3385 debug_printf ("Step-over finished.\n");
3386 if (trace_event)
3387 debug_printf ("Tracepoint event.\n");
3388 if (lwp_in_step_range (event_child))
3389 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3390 paddress (event_child->stop_pc),
3391 paddress (event_child->step_range_start),
3392 paddress (event_child->step_range_end));
3393 }
3394
3395 /* We're not reporting this breakpoint to GDB, so apply the
3396 decr_pc_after_break adjustment to the inferior's regcache
3397 ourselves. */
3398
3399 if (the_low_target.set_pc != NULL)
3400 {
3401 struct regcache *regcache
3402 = get_thread_regcache (current_thread, 1);
3403 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3404 }
3405
3406 /* We may have finished stepping over a breakpoint. If so,
3407 we've stopped and suspended all LWPs momentarily except the
3408 stepping one. This is where we resume them all again. We're
3409 going to keep waiting, so use proceed, which handles stepping
3410 over the next breakpoint. */
3411 if (debug_threads)
3412 debug_printf ("proceeding all threads.\n");
3413
3414 if (step_over_finished)
3415 unsuspend_all_lwps (event_child);
3416
3417 proceed_all_lwps ();
3418 return ignore_event (ourstatus);
3419 }
3420
3421 if (debug_threads)
3422 {
3423 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3424 {
3425 char *str;
3426
3427 str = target_waitstatus_to_string (&event_child->waitstatus);
3428 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3429 lwpid_of (get_lwp_thread (event_child)), str);
3430 xfree (str);
3431 }
3432 if (current_thread->last_resume_kind == resume_step)
3433 {
3434 if (event_child->step_range_start == event_child->step_range_end)
3435 debug_printf ("GDB wanted to single-step, reporting event.\n");
3436 else if (!lwp_in_step_range (event_child))
3437 debug_printf ("Out of step range, reporting event.\n");
3438 }
3439 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3440 debug_printf ("Stopped by watchpoint.\n");
3441 else if (gdb_breakpoint_here (event_child->stop_pc))
3442 debug_printf ("Stopped by GDB breakpoint.\n");
3443 if (debug_threads)
3444 debug_printf ("Hit a non-gdbserver trap event.\n");
3445 }
3446
3447 /* Alright, we're going to report a stop. */
3448
3449 if (!stabilizing_threads)
3450 {
3451 /* In all-stop, stop all threads. */
3452 if (!non_stop)
3453 stop_all_lwps (0, NULL);
3454
3455 /* If we're not waiting for a specific LWP, choose an event LWP
3456 from among those that have had events. Giving equal priority
3457 to all LWPs that have had events helps prevent
3458 starvation. */
3459 if (ptid_equal (ptid, minus_one_ptid))
3460 {
3461 event_child->status_pending_p = 1;
3462 event_child->status_pending = w;
3463
3464 select_event_lwp (&event_child);
3465
3466 /* current_thread and event_child must stay in sync. */
3467 current_thread = get_lwp_thread (event_child);
3468
3469 event_child->status_pending_p = 0;
3470 w = event_child->status_pending;
3471 }
3472
3473 if (step_over_finished)
3474 {
3475 if (!non_stop)
3476 {
3477 /* If we were doing a step-over, all other threads but
3478 the stepping one had been paused in start_step_over,
3479 with their suspend counts incremented. We don't want
3480 to do a full unstop/unpause, because we're in
3481 all-stop mode (so we want threads stopped), but we
3482 still need to unsuspend the other threads, to
3483 decrement their `suspended' count back. */
3484 unsuspend_all_lwps (event_child);
3485 }
3486 else
3487 {
3488 /* If we just finished a step-over, then all threads had
3489 been momentarily paused. In all-stop, that's fine,
3490 we want threads stopped by now anyway. In non-stop,
3491 we need to re-resume threads that GDB wanted to be
3492 running. */
3493 unstop_all_lwps (1, event_child);
3494 }
3495 }
3496
3497 /* Stabilize threads (move out of jump pads). */
3498 if (!non_stop)
3499 stabilize_threads ();
3500 }
3501 else
3502 {
3503 /* If we just finished a step-over, then all threads had been
3504 momentarily paused. In all-stop, that's fine, we want
3505 threads stopped by now anyway. In non-stop, we need to
3506 re-resume threads that GDB wanted to be running. */
3507 if (step_over_finished)
3508 unstop_all_lwps (1, event_child);
3509 }
3510
3511 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3512 {
3513 /* If the reported event is an exit, fork, vfork or exec, let
3514 GDB know. */
3515 *ourstatus = event_child->waitstatus;
3516 /* Clear the event lwp's waitstatus since we handled it already. */
3517 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3518 }
3519 else
3520 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3521
3522 /* Now that we've selected our final event LWP, un-adjust its PC if
3523 it was a software breakpoint, and the client doesn't know we can
3524 adjust the breakpoint ourselves. */
3525 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3526 && !swbreak_feature)
3527 {
3528 int decr_pc = the_low_target.decr_pc_after_break;
3529
3530 if (decr_pc != 0)
3531 {
3532 struct regcache *regcache
3533 = get_thread_regcache (current_thread, 1);
3534 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3535 }
3536 }
3537
3538 if (current_thread->last_resume_kind == resume_stop
3539 && WSTOPSIG (w) == SIGSTOP)
3540 {
3541 /* A thread that has been requested to stop by GDB with vCont;t,
3542 and it stopped cleanly, so report as SIG0. The use of
3543 SIGSTOP is an implementation detail. */
3544 ourstatus->value.sig = GDB_SIGNAL_0;
3545 }
3546 else if (current_thread->last_resume_kind == resume_stop
3547 && WSTOPSIG (w) != SIGSTOP)
3548 {
3549 /* A thread that has been requested to stop by GDB with vCont;t,
3550 but, it stopped for other reasons. */
3551 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3552 }
3553 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3554 {
3555 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3556 }
3557
3558 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3559
3560 if (debug_threads)
3561 {
3562 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3563 target_pid_to_str (ptid_of (current_thread)),
3564 ourstatus->kind, ourstatus->value.sig);
3565 debug_exit ();
3566 }
3567
3568 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3569 return filter_exit_event (event_child, ourstatus);
3570
3571 return ptid_of (current_thread);
3572 }
3573
3574 /* Get rid of any pending event in the pipe. */
3575 static void
3576 async_file_flush (void)
3577 {
3578 int ret;
3579 char buf;
3580
3581 do
3582 ret = read (linux_event_pipe[0], &buf, 1);
3583 while (ret >= 0 || (ret == -1 && errno == EINTR));
3584 }
3585
3586 /* Put something in the pipe, so the event loop wakes up. */
3587 static void
3588 async_file_mark (void)
3589 {
3590 int ret;
3591
3592 async_file_flush ();
3593
3594 do
3595 ret = write (linux_event_pipe[1], "+", 1);
3596 while (ret == 0 || (ret == -1 && errno == EINTR));
3597
3598 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3599 be awakened anyway. */
3600 }
3601
3602 static ptid_t
3603 linux_wait (ptid_t ptid,
3604 struct target_waitstatus *ourstatus, int target_options)
3605 {
3606 ptid_t event_ptid;
3607
3608 /* Flush the async file first. */
3609 if (target_is_async_p ())
3610 async_file_flush ();
3611
3612 do
3613 {
3614 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3615 }
3616 while ((target_options & TARGET_WNOHANG) == 0
3617 && ptid_equal (event_ptid, null_ptid)
3618 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3619
3620 /* If at least one stop was reported, there may be more. A single
3621 SIGCHLD can signal more than one child stop. */
3622 if (target_is_async_p ()
3623 && (target_options & TARGET_WNOHANG) != 0
3624 && !ptid_equal (event_ptid, null_ptid))
3625 async_file_mark ();
3626
3627 return event_ptid;
3628 }
3629
3630 /* Send a signal to an LWP. */
3631
3632 static int
3633 kill_lwp (unsigned long lwpid, int signo)
3634 {
3635 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3636 fails, then we are not using nptl threads and we should be using kill. */
3637
3638 #ifdef __NR_tkill
3639 {
3640 static int tkill_failed;
3641
3642 if (!tkill_failed)
3643 {
3644 int ret;
3645
3646 errno = 0;
3647 ret = syscall (__NR_tkill, lwpid, signo);
3648 if (errno != ENOSYS)
3649 return ret;
3650 tkill_failed = 1;
3651 }
3652 }
3653 #endif
3654
3655 return kill (lwpid, signo);
3656 }
3657
3658 void
3659 linux_stop_lwp (struct lwp_info *lwp)
3660 {
3661 send_sigstop (lwp);
3662 }
3663
3664 static void
3665 send_sigstop (struct lwp_info *lwp)
3666 {
3667 int pid;
3668
3669 pid = lwpid_of (get_lwp_thread (lwp));
3670
3671 /* If we already have a pending stop signal for this process, don't
3672 send another. */
3673 if (lwp->stop_expected)
3674 {
3675 if (debug_threads)
3676 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3677
3678 return;
3679 }
3680
3681 if (debug_threads)
3682 debug_printf ("Sending sigstop to lwp %d\n", pid);
3683
3684 lwp->stop_expected = 1;
3685 kill_lwp (pid, SIGSTOP);
3686 }
3687
3688 static int
3689 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3690 {
3691 struct thread_info *thread = (struct thread_info *) entry;
3692 struct lwp_info *lwp = get_thread_lwp (thread);
3693
3694 /* Ignore EXCEPT. */
3695 if (lwp == except)
3696 return 0;
3697
3698 if (lwp->stopped)
3699 return 0;
3700
3701 send_sigstop (lwp);
3702 return 0;
3703 }
3704
3705 /* Increment the suspend count of an LWP, and stop it, if not stopped
3706 yet. */
3707 static int
3708 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3709 void *except)
3710 {
3711 struct thread_info *thread = (struct thread_info *) entry;
3712 struct lwp_info *lwp = get_thread_lwp (thread);
3713
3714 /* Ignore EXCEPT. */
3715 if (lwp == except)
3716 return 0;
3717
3718 lwp_suspended_inc (lwp);
3719
3720 return send_sigstop_callback (entry, except);
3721 }
3722
3723 static void
3724 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3725 {
3726 /* Store the exit status for later. */
3727 lwp->status_pending_p = 1;
3728 lwp->status_pending = wstat;
3729
3730 /* Store in waitstatus as well, as there's nothing else to process
3731 for this event. */
3732 if (WIFEXITED (wstat))
3733 {
3734 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3735 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3736 }
3737 else if (WIFSIGNALED (wstat))
3738 {
3739 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3740 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3741 }
3742
3743 /* Prevent trying to stop it. */
3744 lwp->stopped = 1;
3745
3746 /* No further stops are expected from a dead lwp. */
3747 lwp->stop_expected = 0;
3748 }
3749
3750 /* Return true if LWP has exited already, and has a pending exit event
3751 to report to GDB. */
3752
3753 static int
3754 lwp_is_marked_dead (struct lwp_info *lwp)
3755 {
3756 return (lwp->status_pending_p
3757 && (WIFEXITED (lwp->status_pending)
3758 || WIFSIGNALED (lwp->status_pending)));
3759 }
3760
3761 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3762
3763 static void
3764 wait_for_sigstop (void)
3765 {
3766 struct thread_info *saved_thread;
3767 ptid_t saved_tid;
3768 int wstat;
3769 int ret;
3770
3771 saved_thread = current_thread;
3772 if (saved_thread != NULL)
3773 saved_tid = saved_thread->entry.id;
3774 else
3775 saved_tid = null_ptid; /* avoid bogus unused warning */
3776
3777 if (debug_threads)
3778 debug_printf ("wait_for_sigstop: pulling events\n");
3779
3780 /* Passing NULL_PTID as filter indicates we want all events to be
3781 left pending. Eventually this returns when there are no
3782 unwaited-for children left. */
3783 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3784 &wstat, __WALL);
3785 gdb_assert (ret == -1);
3786
3787 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3788 current_thread = saved_thread;
3789 else
3790 {
3791 if (debug_threads)
3792 debug_printf ("Previously current thread died.\n");
3793
3794 /* We can't change the current inferior behind GDB's back,
3795 otherwise, a subsequent command may apply to the wrong
3796 process. */
3797 current_thread = NULL;
3798 }
3799 }
3800
3801 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3802 move it out, because we need to report the stop event to GDB. For
3803 example, if the user puts a breakpoint in the jump pad, it's
3804 because she wants to debug it. */
3805
3806 static int
3807 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3808 {
3809 struct thread_info *thread = (struct thread_info *) entry;
3810 struct lwp_info *lwp = get_thread_lwp (thread);
3811
3812 if (lwp->suspended != 0)
3813 {
3814 internal_error (__FILE__, __LINE__,
3815 "LWP %ld is suspended, suspended=%d\n",
3816 lwpid_of (thread), lwp->suspended);
3817 }
3818 gdb_assert (lwp->stopped);
3819
3820 /* Allow debugging the jump pad, gdb_collect, etc.. */
3821 return (supports_fast_tracepoints ()
3822 && agent_loaded_p ()
3823 && (gdb_breakpoint_here (lwp->stop_pc)
3824 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3825 || thread->last_resume_kind == resume_step)
3826 && linux_fast_tracepoint_collecting (lwp, NULL));
3827 }
3828
3829 static void
3830 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3831 {
3832 struct thread_info *thread = (struct thread_info *) entry;
3833 struct thread_info *saved_thread;
3834 struct lwp_info *lwp = get_thread_lwp (thread);
3835 int *wstat;
3836
3837 if (lwp->suspended != 0)
3838 {
3839 internal_error (__FILE__, __LINE__,
3840 "LWP %ld is suspended, suspended=%d\n",
3841 lwpid_of (thread), lwp->suspended);
3842 }
3843 gdb_assert (lwp->stopped);
3844
3845 /* For gdb_breakpoint_here. */
3846 saved_thread = current_thread;
3847 current_thread = thread;
3848
3849 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3850
3851 /* Allow debugging the jump pad, gdb_collect, etc. */
3852 if (!gdb_breakpoint_here (lwp->stop_pc)
3853 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3854 && thread->last_resume_kind != resume_step
3855 && maybe_move_out_of_jump_pad (lwp, wstat))
3856 {
3857 if (debug_threads)
3858 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3859 lwpid_of (thread));
3860
3861 if (wstat)
3862 {
3863 lwp->status_pending_p = 0;
3864 enqueue_one_deferred_signal (lwp, wstat);
3865
3866 if (debug_threads)
3867 debug_printf ("Signal %d for LWP %ld deferred "
3868 "(in jump pad)\n",
3869 WSTOPSIG (*wstat), lwpid_of (thread));
3870 }
3871
3872 linux_resume_one_lwp (lwp, 0, 0, NULL);
3873 }
3874 else
3875 lwp_suspended_inc (lwp);
3876
3877 current_thread = saved_thread;
3878 }
3879
3880 static int
3881 lwp_running (struct inferior_list_entry *entry, void *data)
3882 {
3883 struct thread_info *thread = (struct thread_info *) entry;
3884 struct lwp_info *lwp = get_thread_lwp (thread);
3885
3886 if (lwp_is_marked_dead (lwp))
3887 return 0;
3888 if (lwp->stopped)
3889 return 0;
3890 return 1;
3891 }
3892
3893 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3894 If SUSPEND, then also increase the suspend count of every LWP,
3895 except EXCEPT. */
3896
3897 static void
3898 stop_all_lwps (int suspend, struct lwp_info *except)
3899 {
3900 /* Should not be called recursively. */
3901 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3902
3903 if (debug_threads)
3904 {
3905 debug_enter ();
3906 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3907 suspend ? "stop-and-suspend" : "stop",
3908 except != NULL
3909 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3910 : "none");
3911 }
3912
3913 stopping_threads = (suspend
3914 ? STOPPING_AND_SUSPENDING_THREADS
3915 : STOPPING_THREADS);
3916
3917 if (suspend)
3918 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3919 else
3920 find_inferior (&all_threads, send_sigstop_callback, except);
3921 wait_for_sigstop ();
3922 stopping_threads = NOT_STOPPING_THREADS;
3923
3924 if (debug_threads)
3925 {
3926 debug_printf ("stop_all_lwps done, setting stopping_threads "
3927 "back to !stopping\n");
3928 debug_exit ();
3929 }
3930 }
3931
3932 /* Enqueue one signal in the chain of signals which need to be
3933 delivered to this process on next resume. */
3934
3935 static void
3936 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3937 {
3938 struct pending_signals *p_sig = XNEW (struct pending_signals);
3939
3940 p_sig->prev = lwp->pending_signals;
3941 p_sig->signal = signal;
3942 if (info == NULL)
3943 memset (&p_sig->info, 0, sizeof (siginfo_t));
3944 else
3945 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3946 lwp->pending_signals = p_sig;
3947 }
3948
3949 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3950 SIGNAL is nonzero, give it that signal. */
3951
3952 static void
3953 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3954 int step, int signal, siginfo_t *info)
3955 {
3956 struct thread_info *thread = get_lwp_thread (lwp);
3957 struct thread_info *saved_thread;
3958 int fast_tp_collecting;
3959 struct process_info *proc = get_thread_process (thread);
3960
3961 /* Note that target description may not be initialised
3962 (proc->tdesc == NULL) at this point because the program hasn't
3963 stopped at the first instruction yet. It means GDBserver skips
3964 the extra traps from the wrapper program (see option --wrapper).
3965 Code in this function that requires register access should be
3966 guarded by proc->tdesc == NULL or something else. */
3967
3968 if (lwp->stopped == 0)
3969 return;
3970
3971 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
3972
3973 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3974
3975 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3976
3977 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3978 user used the "jump" command, or "set $pc = foo"). */
3979 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
3980 {
3981 /* Collecting 'while-stepping' actions doesn't make sense
3982 anymore. */
3983 release_while_stepping_state_list (thread);
3984 }
3985
3986 /* If we have pending signals or status, and a new signal, enqueue the
3987 signal. Also enqueue the signal if we are waiting to reinsert a
3988 breakpoint; it will be picked up again below. */
3989 if (signal != 0
3990 && (lwp->status_pending_p
3991 || lwp->pending_signals != NULL
3992 || lwp->bp_reinsert != 0
3993 || fast_tp_collecting))
3994 {
3995 struct pending_signals *p_sig = XNEW (struct pending_signals);
3996
3997 p_sig->prev = lwp->pending_signals;
3998 p_sig->signal = signal;
3999 if (info == NULL)
4000 memset (&p_sig->info, 0, sizeof (siginfo_t));
4001 else
4002 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4003 lwp->pending_signals = p_sig;
4004 }
4005
4006 if (lwp->status_pending_p)
4007 {
4008 if (debug_threads)
4009 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
4010 " has pending status\n",
4011 lwpid_of (thread), step ? "step" : "continue", signal,
4012 lwp->stop_expected ? "expected" : "not expected");
4013 return;
4014 }
4015
4016 saved_thread = current_thread;
4017 current_thread = thread;
4018
4019 if (debug_threads)
4020 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4021 lwpid_of (thread), step ? "step" : "continue", signal,
4022 lwp->stop_expected ? "expected" : "not expected");
4023
4024 /* This bit needs some thinking about. If we get a signal that
4025 we must report while a single-step reinsert is still pending,
4026 we often end up resuming the thread. It might be better to
4027 (ew) allow a stack of pending events; then we could be sure that
4028 the reinsert happened right away and not lose any signals.
4029
4030 Making this stack would also shrink the window in which breakpoints are
4031 uninserted (see comment in linux_wait_for_lwp) but not enough for
4032 complete correctness, so it won't solve that problem. It may be
4033 worthwhile just to solve this one, however. */
4034 if (lwp->bp_reinsert != 0)
4035 {
4036 if (debug_threads)
4037 debug_printf (" pending reinsert at 0x%s\n",
4038 paddress (lwp->bp_reinsert));
4039
4040 if (can_hardware_single_step ())
4041 {
4042 if (fast_tp_collecting == 0)
4043 {
4044 if (step == 0)
4045 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4046 if (lwp->suspended)
4047 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4048 lwp->suspended);
4049 }
4050
4051 step = 1;
4052 }
4053
4054 /* Postpone any pending signal. It was enqueued above. */
4055 signal = 0;
4056 }
4057
4058 if (fast_tp_collecting == 1)
4059 {
4060 if (debug_threads)
4061 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4062 " (exit-jump-pad-bkpt)\n",
4063 lwpid_of (thread));
4064
4065 /* Postpone any pending signal. It was enqueued above. */
4066 signal = 0;
4067 }
4068 else if (fast_tp_collecting == 2)
4069 {
4070 if (debug_threads)
4071 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4072 " single-stepping\n",
4073 lwpid_of (thread));
4074
4075 if (can_hardware_single_step ())
4076 step = 1;
4077 else
4078 {
4079 internal_error (__FILE__, __LINE__,
4080 "moving out of jump pad single-stepping"
4081 " not implemented on this target");
4082 }
4083
4084 /* Postpone any pending signal. It was enqueued above. */
4085 signal = 0;
4086 }
4087
4088 /* If we have while-stepping actions in this thread set it stepping.
4089 If we have a signal to deliver, it may or may not be set to
4090 SIG_IGN, we don't know. Assume so, and allow collecting
4091 while-stepping into a signal handler. A possible smart thing to
4092 do would be to set an internal breakpoint at the signal return
4093 address, continue, and carry on catching this while-stepping
4094 action only when that breakpoint is hit. A future
4095 enhancement. */
4096 if (thread->while_stepping != NULL
4097 && can_hardware_single_step ())
4098 {
4099 if (debug_threads)
4100 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4101 lwpid_of (thread));
4102 step = 1;
4103 }
4104
4105 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4106 {
4107 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4108
4109 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4110
4111 if (debug_threads)
4112 {
4113 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4114 (long) lwp->stop_pc);
4115 }
4116 }
4117
4118 /* If we have pending signals, consume one unless we are trying to
4119 reinsert a breakpoint or we're trying to finish a fast tracepoint
4120 collect. */
4121 if (lwp->pending_signals != NULL
4122 && lwp->bp_reinsert == 0
4123 && fast_tp_collecting == 0)
4124 {
4125 struct pending_signals **p_sig;
4126
4127 p_sig = &lwp->pending_signals;
4128 while ((*p_sig)->prev != NULL)
4129 p_sig = &(*p_sig)->prev;
4130
4131 signal = (*p_sig)->signal;
4132 if ((*p_sig)->info.si_signo != 0)
4133 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4134 &(*p_sig)->info);
4135
4136 free (*p_sig);
4137 *p_sig = NULL;
4138 }
4139
4140 if (the_low_target.prepare_to_resume != NULL)
4141 the_low_target.prepare_to_resume (lwp);
4142
4143 regcache_invalidate_thread (thread);
4144 errno = 0;
4145 lwp->stepping = step;
4146 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
4147 (PTRACE_TYPE_ARG3) 0,
4148 /* Coerce to a uintptr_t first to avoid potential gcc warning
4149 of coercing an 8 byte integer to a 4 byte pointer. */
4150 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4151
4152 current_thread = saved_thread;
4153 if (errno)
4154 perror_with_name ("resuming thread");
4155
4156 /* Successfully resumed. Clear state that no longer makes sense,
4157 and mark the LWP as running. Must not do this before resuming
4158 otherwise if that fails other code will be confused. E.g., we'd
4159 later try to stop the LWP and hang forever waiting for a stop
4160 status. Note that we must not throw after this is cleared,
4161 otherwise handle_zombie_lwp_error would get confused. */
4162 lwp->stopped = 0;
4163 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4164 }
4165
4166 /* Called when we try to resume a stopped LWP and that errors out. If
4167 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4168 or about to become), discard the error, clear any pending status
4169 the LWP may have, and return true (we'll collect the exit status
4170 soon enough). Otherwise, return false. */
4171
4172 static int
4173 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4174 {
4175 struct thread_info *thread = get_lwp_thread (lp);
4176
4177 /* If we get an error after resuming the LWP successfully, we'd
4178 confuse !T state for the LWP being gone. */
4179 gdb_assert (lp->stopped);
4180
4181 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4182 because even if ptrace failed with ESRCH, the tracee may be "not
4183 yet fully dead", but already refusing ptrace requests. In that
4184 case the tracee has 'R (Running)' state for a little bit
4185 (observed in Linux 3.18). See also the note on ESRCH in the
4186 ptrace(2) man page. Instead, check whether the LWP has any state
4187 other than ptrace-stopped. */
4188
4189 /* Don't assume anything if /proc/PID/status can't be read. */
4190 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4191 {
4192 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4193 lp->status_pending_p = 0;
4194 return 1;
4195 }
4196 return 0;
4197 }
4198
4199 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4200 disappears while we try to resume it. */
4201
4202 static void
4203 linux_resume_one_lwp (struct lwp_info *lwp,
4204 int step, int signal, siginfo_t *info)
4205 {
4206 TRY
4207 {
4208 linux_resume_one_lwp_throw (lwp, step, signal, info);
4209 }
4210 CATCH (ex, RETURN_MASK_ERROR)
4211 {
4212 if (!check_ptrace_stopped_lwp_gone (lwp))
4213 throw_exception (ex);
4214 }
4215 END_CATCH
4216 }
4217
4218 struct thread_resume_array
4219 {
4220 struct thread_resume *resume;
4221 size_t n;
4222 };
4223
4224 /* This function is called once per thread via find_inferior.
4225 ARG is a pointer to a thread_resume_array struct.
4226 We look up the thread specified by ENTRY in ARG, and mark the thread
4227 with a pointer to the appropriate resume request.
4228
4229 This algorithm is O(threads * resume elements), but resume elements
4230 is small (and will remain small at least until GDB supports thread
4231 suspension). */
4232
4233 static int
4234 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4235 {
4236 struct thread_info *thread = (struct thread_info *) entry;
4237 struct lwp_info *lwp = get_thread_lwp (thread);
4238 int ndx;
4239 struct thread_resume_array *r;
4240
4241 r = (struct thread_resume_array *) arg;
4242
4243 for (ndx = 0; ndx < r->n; ndx++)
4244 {
4245 ptid_t ptid = r->resume[ndx].thread;
4246 if (ptid_equal (ptid, minus_one_ptid)
4247 || ptid_equal (ptid, entry->id)
4248 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4249 of PID'. */
4250 || (ptid_get_pid (ptid) == pid_of (thread)
4251 && (ptid_is_pid (ptid)
4252 || ptid_get_lwp (ptid) == -1)))
4253 {
4254 if (r->resume[ndx].kind == resume_stop
4255 && thread->last_resume_kind == resume_stop)
4256 {
4257 if (debug_threads)
4258 debug_printf ("already %s LWP %ld at GDB's request\n",
4259 (thread->last_status.kind
4260 == TARGET_WAITKIND_STOPPED)
4261 ? "stopped"
4262 : "stopping",
4263 lwpid_of (thread));
4264
4265 continue;
4266 }
4267
4268 lwp->resume = &r->resume[ndx];
4269 thread->last_resume_kind = lwp->resume->kind;
4270
4271 lwp->step_range_start = lwp->resume->step_range_start;
4272 lwp->step_range_end = lwp->resume->step_range_end;
4273
4274 /* If we had a deferred signal to report, dequeue one now.
4275 This can happen if LWP gets more than one signal while
4276 trying to get out of a jump pad. */
4277 if (lwp->stopped
4278 && !lwp->status_pending_p
4279 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4280 {
4281 lwp->status_pending_p = 1;
4282
4283 if (debug_threads)
4284 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4285 "leaving status pending.\n",
4286 WSTOPSIG (lwp->status_pending),
4287 lwpid_of (thread));
4288 }
4289
4290 return 0;
4291 }
4292 }
4293
4294 /* No resume action for this thread. */
4295 lwp->resume = NULL;
4296
4297 return 0;
4298 }
4299
4300 /* find_inferior callback for linux_resume.
4301 Set *FLAG_P if this lwp has an interesting status pending. */
4302
4303 static int
4304 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4305 {
4306 struct thread_info *thread = (struct thread_info *) entry;
4307 struct lwp_info *lwp = get_thread_lwp (thread);
4308
4309 /* LWPs which will not be resumed are not interesting, because
4310 we might not wait for them next time through linux_wait. */
4311 if (lwp->resume == NULL)
4312 return 0;
4313
4314 if (thread_still_has_status_pending_p (thread))
4315 * (int *) flag_p = 1;
4316
4317 return 0;
4318 }
4319
4320 /* Return 1 if this lwp that GDB wants running is stopped at an
4321 internal breakpoint that we need to step over. It assumes that any
4322 required STOP_PC adjustment has already been propagated to the
4323 inferior's regcache. */
4324
4325 static int
4326 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4327 {
4328 struct thread_info *thread = (struct thread_info *) entry;
4329 struct lwp_info *lwp = get_thread_lwp (thread);
4330 struct thread_info *saved_thread;
4331 CORE_ADDR pc;
4332 struct process_info *proc = get_thread_process (thread);
4333
4334 /* GDBserver is skipping the extra traps from the wrapper program,
4335 don't have to do step over. */
4336 if (proc->tdesc == NULL)
4337 return 0;
4338
4339 /* LWPs which will not be resumed are not interesting, because we
4340 might not wait for them next time through linux_wait. */
4341
4342 if (!lwp->stopped)
4343 {
4344 if (debug_threads)
4345 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4346 lwpid_of (thread));
4347 return 0;
4348 }
4349
4350 if (thread->last_resume_kind == resume_stop)
4351 {
4352 if (debug_threads)
4353 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4354 " stopped\n",
4355 lwpid_of (thread));
4356 return 0;
4357 }
4358
4359 gdb_assert (lwp->suspended >= 0);
4360
4361 if (lwp->suspended)
4362 {
4363 if (debug_threads)
4364 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4365 lwpid_of (thread));
4366 return 0;
4367 }
4368
4369 if (!lwp->need_step_over)
4370 {
4371 if (debug_threads)
4372 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4373 }
4374
4375 if (lwp->status_pending_p)
4376 {
4377 if (debug_threads)
4378 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4379 " status.\n",
4380 lwpid_of (thread));
4381 return 0;
4382 }
4383
4384 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4385 or we have. */
4386 pc = get_pc (lwp);
4387
4388 /* If the PC has changed since we stopped, then don't do anything,
4389 and let the breakpoint/tracepoint be hit. This happens if, for
4390 instance, GDB handled the decr_pc_after_break subtraction itself,
4391 GDB is OOL stepping this thread, or the user has issued a "jump"
4392 command, or poked thread's registers herself. */
4393 if (pc != lwp->stop_pc)
4394 {
4395 if (debug_threads)
4396 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4397 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4398 lwpid_of (thread),
4399 paddress (lwp->stop_pc), paddress (pc));
4400
4401 lwp->need_step_over = 0;
4402 return 0;
4403 }
4404
4405 saved_thread = current_thread;
4406 current_thread = thread;
4407
4408 /* We can only step over breakpoints we know about. */
4409 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4410 {
4411 /* Don't step over a breakpoint that GDB expects to hit
4412 though. If the condition is being evaluated on the target's side
4413 and it evaluate to false, step over this breakpoint as well. */
4414 if (gdb_breakpoint_here (pc)
4415 && gdb_condition_true_at_breakpoint (pc)
4416 && gdb_no_commands_at_breakpoint (pc))
4417 {
4418 if (debug_threads)
4419 debug_printf ("Need step over [LWP %ld]? yes, but found"
4420 " GDB breakpoint at 0x%s; skipping step over\n",
4421 lwpid_of (thread), paddress (pc));
4422
4423 current_thread = saved_thread;
4424 return 0;
4425 }
4426 else
4427 {
4428 if (debug_threads)
4429 debug_printf ("Need step over [LWP %ld]? yes, "
4430 "found breakpoint at 0x%s\n",
4431 lwpid_of (thread), paddress (pc));
4432
4433 /* We've found an lwp that needs stepping over --- return 1 so
4434 that find_inferior stops looking. */
4435 current_thread = saved_thread;
4436
4437 /* If the step over is cancelled, this is set again. */
4438 lwp->need_step_over = 0;
4439 return 1;
4440 }
4441 }
4442
4443 current_thread = saved_thread;
4444
4445 if (debug_threads)
4446 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4447 " at 0x%s\n",
4448 lwpid_of (thread), paddress (pc));
4449
4450 return 0;
4451 }
4452
4453 /* Start a step-over operation on LWP. When LWP stopped at a
4454 breakpoint, to make progress, we need to remove the breakpoint out
4455 of the way. If we let other threads run while we do that, they may
4456 pass by the breakpoint location and miss hitting it. To avoid
4457 that, a step-over momentarily stops all threads while LWP is
4458 single-stepped while the breakpoint is temporarily uninserted from
4459 the inferior. When the single-step finishes, we reinsert the
4460 breakpoint, and let all threads that are supposed to be running,
4461 run again.
4462
4463 On targets that don't support hardware single-step, we don't
4464 currently support full software single-stepping. Instead, we only
4465 support stepping over the thread event breakpoint, by asking the
4466 low target where to place a reinsert breakpoint. Since this
4467 routine assumes the breakpoint being stepped over is a thread event
4468 breakpoint, it usually assumes the return address of the current
4469 function is a good enough place to set the reinsert breakpoint. */
4470
4471 static int
4472 start_step_over (struct lwp_info *lwp)
4473 {
4474 struct thread_info *thread = get_lwp_thread (lwp);
4475 struct thread_info *saved_thread;
4476 CORE_ADDR pc;
4477 int step;
4478
4479 if (debug_threads)
4480 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4481 lwpid_of (thread));
4482
4483 stop_all_lwps (1, lwp);
4484
4485 if (lwp->suspended != 0)
4486 {
4487 internal_error (__FILE__, __LINE__,
4488 "LWP %ld suspended=%d\n", lwpid_of (thread),
4489 lwp->suspended);
4490 }
4491
4492 if (debug_threads)
4493 debug_printf ("Done stopping all threads for step-over.\n");
4494
4495 /* Note, we should always reach here with an already adjusted PC,
4496 either by GDB (if we're resuming due to GDB's request), or by our
4497 caller, if we just finished handling an internal breakpoint GDB
4498 shouldn't care about. */
4499 pc = get_pc (lwp);
4500
4501 saved_thread = current_thread;
4502 current_thread = thread;
4503
4504 lwp->bp_reinsert = pc;
4505 uninsert_breakpoints_at (pc);
4506 uninsert_fast_tracepoint_jumps_at (pc);
4507
4508 if (can_hardware_single_step ())
4509 {
4510 step = 1;
4511 }
4512 else
4513 {
4514 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4515 set_reinsert_breakpoint (raddr);
4516 step = 0;
4517 }
4518
4519 current_thread = saved_thread;
4520
4521 linux_resume_one_lwp (lwp, step, 0, NULL);
4522
4523 /* Require next event from this LWP. */
4524 step_over_bkpt = thread->entry.id;
4525 return 1;
4526 }
4527
4528 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4529 start_step_over, if still there, and delete any reinsert
4530 breakpoints we've set, on non hardware single-step targets. */
4531
4532 static int
4533 finish_step_over (struct lwp_info *lwp)
4534 {
4535 if (lwp->bp_reinsert != 0)
4536 {
4537 if (debug_threads)
4538 debug_printf ("Finished step over.\n");
4539
4540 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4541 may be no breakpoint to reinsert there by now. */
4542 reinsert_breakpoints_at (lwp->bp_reinsert);
4543 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4544
4545 lwp->bp_reinsert = 0;
4546
4547 /* Delete any software-single-step reinsert breakpoints. No
4548 longer needed. We don't have to worry about other threads
4549 hitting this trap, and later not being able to explain it,
4550 because we were stepping over a breakpoint, and we hold all
4551 threads but LWP stopped while doing that. */
4552 if (!can_hardware_single_step ())
4553 delete_reinsert_breakpoints ();
4554
4555 step_over_bkpt = null_ptid;
4556 return 1;
4557 }
4558 else
4559 return 0;
4560 }
4561
4562 /* If there's a step over in progress, wait until all threads stop
4563 (that is, until the stepping thread finishes its step), and
4564 unsuspend all lwps. The stepping thread ends with its status
4565 pending, which is processed later when we get back to processing
4566 events. */
4567
4568 static void
4569 complete_ongoing_step_over (void)
4570 {
4571 if (!ptid_equal (step_over_bkpt, null_ptid))
4572 {
4573 struct lwp_info *lwp;
4574 int wstat;
4575 int ret;
4576
4577 if (debug_threads)
4578 debug_printf ("detach: step over in progress, finish it first\n");
4579
4580 /* Passing NULL_PTID as filter indicates we want all events to
4581 be left pending. Eventually this returns when there are no
4582 unwaited-for children left. */
4583 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4584 &wstat, __WALL);
4585 gdb_assert (ret == -1);
4586
4587 lwp = find_lwp_pid (step_over_bkpt);
4588 if (lwp != NULL)
4589 finish_step_over (lwp);
4590 step_over_bkpt = null_ptid;
4591 unsuspend_all_lwps (lwp);
4592 }
4593 }
4594
4595 /* This function is called once per thread. We check the thread's resume
4596 request, which will tell us whether to resume, step, or leave the thread
4597 stopped; and what signal, if any, it should be sent.
4598
4599 For threads which we aren't explicitly told otherwise, we preserve
4600 the stepping flag; this is used for stepping over gdbserver-placed
4601 breakpoints.
4602
4603 If pending_flags was set in any thread, we queue any needed
4604 signals, since we won't actually resume. We already have a pending
4605 event to report, so we don't need to preserve any step requests;
4606 they should be re-issued if necessary. */
4607
4608 static int
4609 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4610 {
4611 struct thread_info *thread = (struct thread_info *) entry;
4612 struct lwp_info *lwp = get_thread_lwp (thread);
4613 int step;
4614 int leave_all_stopped = * (int *) arg;
4615 int leave_pending;
4616
4617 if (lwp->resume == NULL)
4618 return 0;
4619
4620 if (lwp->resume->kind == resume_stop)
4621 {
4622 if (debug_threads)
4623 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4624
4625 if (!lwp->stopped)
4626 {
4627 if (debug_threads)
4628 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4629
4630 /* Stop the thread, and wait for the event asynchronously,
4631 through the event loop. */
4632 send_sigstop (lwp);
4633 }
4634 else
4635 {
4636 if (debug_threads)
4637 debug_printf ("already stopped LWP %ld\n",
4638 lwpid_of (thread));
4639
4640 /* The LWP may have been stopped in an internal event that
4641 was not meant to be notified back to GDB (e.g., gdbserver
4642 breakpoint), so we should be reporting a stop event in
4643 this case too. */
4644
4645 /* If the thread already has a pending SIGSTOP, this is a
4646 no-op. Otherwise, something later will presumably resume
4647 the thread and this will cause it to cancel any pending
4648 operation, due to last_resume_kind == resume_stop. If
4649 the thread already has a pending status to report, we
4650 will still report it the next time we wait - see
4651 status_pending_p_callback. */
4652
4653 /* If we already have a pending signal to report, then
4654 there's no need to queue a SIGSTOP, as this means we're
4655 midway through moving the LWP out of the jumppad, and we
4656 will report the pending signal as soon as that is
4657 finished. */
4658 if (lwp->pending_signals_to_report == NULL)
4659 send_sigstop (lwp);
4660 }
4661
4662 /* For stop requests, we're done. */
4663 lwp->resume = NULL;
4664 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4665 return 0;
4666 }
4667
4668 /* If this thread which is about to be resumed has a pending status,
4669 then don't resume it - we can just report the pending status.
4670 Likewise if it is suspended, because e.g., another thread is
4671 stepping past a breakpoint. Make sure to queue any signals that
4672 would otherwise be sent. In all-stop mode, we do this decision
4673 based on if *any* thread has a pending status. If there's a
4674 thread that needs the step-over-breakpoint dance, then don't
4675 resume any other thread but that particular one. */
4676 leave_pending = (lwp->suspended
4677 || lwp->status_pending_p
4678 || leave_all_stopped);
4679
4680 if (!leave_pending)
4681 {
4682 if (debug_threads)
4683 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4684
4685 step = (lwp->resume->kind == resume_step);
4686 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4687 }
4688 else
4689 {
4690 if (debug_threads)
4691 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4692
4693 /* If we have a new signal, enqueue the signal. */
4694 if (lwp->resume->sig != 0)
4695 {
4696 struct pending_signals *p_sig = XCNEW (struct pending_signals);
4697
4698 p_sig->prev = lwp->pending_signals;
4699 p_sig->signal = lwp->resume->sig;
4700
4701 /* If this is the same signal we were previously stopped by,
4702 make sure to queue its siginfo. We can ignore the return
4703 value of ptrace; if it fails, we'll skip
4704 PTRACE_SETSIGINFO. */
4705 if (WIFSTOPPED (lwp->last_status)
4706 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4707 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4708 &p_sig->info);
4709
4710 lwp->pending_signals = p_sig;
4711 }
4712 }
4713
4714 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4715 lwp->resume = NULL;
4716 return 0;
4717 }
4718
4719 static void
4720 linux_resume (struct thread_resume *resume_info, size_t n)
4721 {
4722 struct thread_resume_array array = { resume_info, n };
4723 struct thread_info *need_step_over = NULL;
4724 int any_pending;
4725 int leave_all_stopped;
4726
4727 if (debug_threads)
4728 {
4729 debug_enter ();
4730 debug_printf ("linux_resume:\n");
4731 }
4732
4733 find_inferior (&all_threads, linux_set_resume_request, &array);
4734
4735 /* If there is a thread which would otherwise be resumed, which has
4736 a pending status, then don't resume any threads - we can just
4737 report the pending status. Make sure to queue any signals that
4738 would otherwise be sent. In non-stop mode, we'll apply this
4739 logic to each thread individually. We consume all pending events
4740 before considering to start a step-over (in all-stop). */
4741 any_pending = 0;
4742 if (!non_stop)
4743 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4744
4745 /* If there is a thread which would otherwise be resumed, which is
4746 stopped at a breakpoint that needs stepping over, then don't
4747 resume any threads - have it step over the breakpoint with all
4748 other threads stopped, then resume all threads again. Make sure
4749 to queue any signals that would otherwise be delivered or
4750 queued. */
4751 if (!any_pending && supports_breakpoints ())
4752 need_step_over
4753 = (struct thread_info *) find_inferior (&all_threads,
4754 need_step_over_p, NULL);
4755
4756 leave_all_stopped = (need_step_over != NULL || any_pending);
4757
4758 if (debug_threads)
4759 {
4760 if (need_step_over != NULL)
4761 debug_printf ("Not resuming all, need step over\n");
4762 else if (any_pending)
4763 debug_printf ("Not resuming, all-stop and found "
4764 "an LWP with pending status\n");
4765 else
4766 debug_printf ("Resuming, no pending status or step over needed\n");
4767 }
4768
4769 /* Even if we're leaving threads stopped, queue all signals we'd
4770 otherwise deliver. */
4771 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4772
4773 if (need_step_over)
4774 start_step_over (get_thread_lwp (need_step_over));
4775
4776 if (debug_threads)
4777 {
4778 debug_printf ("linux_resume done\n");
4779 debug_exit ();
4780 }
4781
4782 /* We may have events that were pending that can/should be sent to
4783 the client now. Trigger a linux_wait call. */
4784 if (target_is_async_p ())
4785 async_file_mark ();
4786 }
4787
4788 /* This function is called once per thread. We check the thread's
4789 last resume request, which will tell us whether to resume, step, or
4790 leave the thread stopped. Any signal the client requested to be
4791 delivered has already been enqueued at this point.
4792
4793 If any thread that GDB wants running is stopped at an internal
4794 breakpoint that needs stepping over, we start a step-over operation
4795 on that particular thread, and leave all others stopped. */
4796
4797 static int
4798 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4799 {
4800 struct thread_info *thread = (struct thread_info *) entry;
4801 struct lwp_info *lwp = get_thread_lwp (thread);
4802 int step;
4803
4804 if (lwp == except)
4805 return 0;
4806
4807 if (debug_threads)
4808 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4809
4810 if (!lwp->stopped)
4811 {
4812 if (debug_threads)
4813 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4814 return 0;
4815 }
4816
4817 if (thread->last_resume_kind == resume_stop
4818 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4819 {
4820 if (debug_threads)
4821 debug_printf (" client wants LWP to remain %ld stopped\n",
4822 lwpid_of (thread));
4823 return 0;
4824 }
4825
4826 if (lwp->status_pending_p)
4827 {
4828 if (debug_threads)
4829 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4830 lwpid_of (thread));
4831 return 0;
4832 }
4833
4834 gdb_assert (lwp->suspended >= 0);
4835
4836 if (lwp->suspended)
4837 {
4838 if (debug_threads)
4839 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4840 return 0;
4841 }
4842
4843 if (thread->last_resume_kind == resume_stop
4844 && lwp->pending_signals_to_report == NULL
4845 && lwp->collecting_fast_tracepoint == 0)
4846 {
4847 /* We haven't reported this LWP as stopped yet (otherwise, the
4848 last_status.kind check above would catch it, and we wouldn't
4849 reach here. This LWP may have been momentarily paused by a
4850 stop_all_lwps call while handling for example, another LWP's
4851 step-over. In that case, the pending expected SIGSTOP signal
4852 that was queued at vCont;t handling time will have already
4853 been consumed by wait_for_sigstop, and so we need to requeue
4854 another one here. Note that if the LWP already has a SIGSTOP
4855 pending, this is a no-op. */
4856
4857 if (debug_threads)
4858 debug_printf ("Client wants LWP %ld to stop. "
4859 "Making sure it has a SIGSTOP pending\n",
4860 lwpid_of (thread));
4861
4862 send_sigstop (lwp);
4863 }
4864
4865 if (thread->last_resume_kind == resume_step)
4866 {
4867 if (debug_threads)
4868 debug_printf (" stepping LWP %ld, client wants it stepping\n",
4869 lwpid_of (thread));
4870 step = 1;
4871 }
4872 else if (lwp->bp_reinsert != 0)
4873 {
4874 if (debug_threads)
4875 debug_printf (" stepping LWP %ld, reinsert set\n",
4876 lwpid_of (thread));
4877 step = 1;
4878 }
4879 else
4880 step = 0;
4881
4882 linux_resume_one_lwp (lwp, step, 0, NULL);
4883 return 0;
4884 }
4885
4886 static int
4887 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4888 {
4889 struct thread_info *thread = (struct thread_info *) entry;
4890 struct lwp_info *lwp = get_thread_lwp (thread);
4891
4892 if (lwp == except)
4893 return 0;
4894
4895 lwp_suspended_decr (lwp);
4896
4897 return proceed_one_lwp (entry, except);
4898 }
4899
4900 /* When we finish a step-over, set threads running again. If there's
4901 another thread that may need a step-over, now's the time to start
4902 it. Eventually, we'll move all threads past their breakpoints. */
4903
4904 static void
4905 proceed_all_lwps (void)
4906 {
4907 struct thread_info *need_step_over;
4908
4909 /* If there is a thread which would otherwise be resumed, which is
4910 stopped at a breakpoint that needs stepping over, then don't
4911 resume any threads - have it step over the breakpoint with all
4912 other threads stopped, then resume all threads again. */
4913
4914 if (supports_breakpoints ())
4915 {
4916 need_step_over
4917 = (struct thread_info *) find_inferior (&all_threads,
4918 need_step_over_p, NULL);
4919
4920 if (need_step_over != NULL)
4921 {
4922 if (debug_threads)
4923 debug_printf ("proceed_all_lwps: found "
4924 "thread %ld needing a step-over\n",
4925 lwpid_of (need_step_over));
4926
4927 start_step_over (get_thread_lwp (need_step_over));
4928 return;
4929 }
4930 }
4931
4932 if (debug_threads)
4933 debug_printf ("Proceeding, no step-over needed\n");
4934
4935 find_inferior (&all_threads, proceed_one_lwp, NULL);
4936 }
4937
4938 /* Stopped LWPs that the client wanted to be running, that don't have
4939 pending statuses, are set to run again, except for EXCEPT, if not
4940 NULL. This undoes a stop_all_lwps call. */
4941
4942 static void
4943 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4944 {
4945 if (debug_threads)
4946 {
4947 debug_enter ();
4948 if (except)
4949 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4950 lwpid_of (get_lwp_thread (except)));
4951 else
4952 debug_printf ("unstopping all lwps\n");
4953 }
4954
4955 if (unsuspend)
4956 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4957 else
4958 find_inferior (&all_threads, proceed_one_lwp, except);
4959
4960 if (debug_threads)
4961 {
4962 debug_printf ("unstop_all_lwps done\n");
4963 debug_exit ();
4964 }
4965 }
4966
4967
4968 #ifdef HAVE_LINUX_REGSETS
4969
4970 #define use_linux_regsets 1
4971
4972 /* Returns true if REGSET has been disabled. */
4973
4974 static int
4975 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4976 {
4977 return (info->disabled_regsets != NULL
4978 && info->disabled_regsets[regset - info->regsets]);
4979 }
4980
4981 /* Disable REGSET. */
4982
4983 static void
4984 disable_regset (struct regsets_info *info, struct regset_info *regset)
4985 {
4986 int dr_offset;
4987
4988 dr_offset = regset - info->regsets;
4989 if (info->disabled_regsets == NULL)
4990 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
4991 info->disabled_regsets[dr_offset] = 1;
4992 }
4993
4994 static int
4995 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4996 struct regcache *regcache)
4997 {
4998 struct regset_info *regset;
4999 int saw_general_regs = 0;
5000 int pid;
5001 struct iovec iov;
5002
5003 pid = lwpid_of (current_thread);
5004 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5005 {
5006 void *buf, *data;
5007 int nt_type, res;
5008
5009 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5010 continue;
5011
5012 buf = xmalloc (regset->size);
5013
5014 nt_type = regset->nt_type;
5015 if (nt_type)
5016 {
5017 iov.iov_base = buf;
5018 iov.iov_len = regset->size;
5019 data = (void *) &iov;
5020 }
5021 else
5022 data = buf;
5023
5024 #ifndef __sparc__
5025 res = ptrace (regset->get_request, pid,
5026 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5027 #else
5028 res = ptrace (regset->get_request, pid, data, nt_type);
5029 #endif
5030 if (res < 0)
5031 {
5032 if (errno == EIO)
5033 {
5034 /* If we get EIO on a regset, do not try it again for
5035 this process mode. */
5036 disable_regset (regsets_info, regset);
5037 }
5038 else if (errno == ENODATA)
5039 {
5040 /* ENODATA may be returned if the regset is currently
5041 not "active". This can happen in normal operation,
5042 so suppress the warning in this case. */
5043 }
5044 else
5045 {
5046 char s[256];
5047 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5048 pid);
5049 perror (s);
5050 }
5051 }
5052 else
5053 {
5054 if (regset->type == GENERAL_REGS)
5055 saw_general_regs = 1;
5056 regset->store_function (regcache, buf);
5057 }
5058 free (buf);
5059 }
5060 if (saw_general_regs)
5061 return 0;
5062 else
5063 return 1;
5064 }
5065
5066 static int
5067 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5068 struct regcache *regcache)
5069 {
5070 struct regset_info *regset;
5071 int saw_general_regs = 0;
5072 int pid;
5073 struct iovec iov;
5074
5075 pid = lwpid_of (current_thread);
5076 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5077 {
5078 void *buf, *data;
5079 int nt_type, res;
5080
5081 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5082 || regset->fill_function == NULL)
5083 continue;
5084
5085 buf = xmalloc (regset->size);
5086
5087 /* First fill the buffer with the current register set contents,
5088 in case there are any items in the kernel's regset that are
5089 not in gdbserver's regcache. */
5090
5091 nt_type = regset->nt_type;
5092 if (nt_type)
5093 {
5094 iov.iov_base = buf;
5095 iov.iov_len = regset->size;
5096 data = (void *) &iov;
5097 }
5098 else
5099 data = buf;
5100
5101 #ifndef __sparc__
5102 res = ptrace (regset->get_request, pid,
5103 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5104 #else
5105 res = ptrace (regset->get_request, pid, data, nt_type);
5106 #endif
5107
5108 if (res == 0)
5109 {
5110 /* Then overlay our cached registers on that. */
5111 regset->fill_function (regcache, buf);
5112
5113 /* Only now do we write the register set. */
5114 #ifndef __sparc__
5115 res = ptrace (regset->set_request, pid,
5116 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5117 #else
5118 res = ptrace (regset->set_request, pid, data, nt_type);
5119 #endif
5120 }
5121
5122 if (res < 0)
5123 {
5124 if (errno == EIO)
5125 {
5126 /* If we get EIO on a regset, do not try it again for
5127 this process mode. */
5128 disable_regset (regsets_info, regset);
5129 }
5130 else if (errno == ESRCH)
5131 {
5132 /* At this point, ESRCH should mean the process is
5133 already gone, in which case we simply ignore attempts
5134 to change its registers. See also the related
5135 comment in linux_resume_one_lwp. */
5136 free (buf);
5137 return 0;
5138 }
5139 else
5140 {
5141 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5142 }
5143 }
5144 else if (regset->type == GENERAL_REGS)
5145 saw_general_regs = 1;
5146 free (buf);
5147 }
5148 if (saw_general_regs)
5149 return 0;
5150 else
5151 return 1;
5152 }
5153
5154 #else /* !HAVE_LINUX_REGSETS */
5155
5156 #define use_linux_regsets 0
5157 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5158 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5159
5160 #endif
5161
5162 /* Return 1 if register REGNO is supported by one of the regset ptrace
5163 calls or 0 if it has to be transferred individually. */
5164
5165 static int
5166 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5167 {
5168 unsigned char mask = 1 << (regno % 8);
5169 size_t index = regno / 8;
5170
5171 return (use_linux_regsets
5172 && (regs_info->regset_bitmap == NULL
5173 || (regs_info->regset_bitmap[index] & mask) != 0));
5174 }
5175
5176 #ifdef HAVE_LINUX_USRREGS
5177
5178 int
5179 register_addr (const struct usrregs_info *usrregs, int regnum)
5180 {
5181 int addr;
5182
5183 if (regnum < 0 || regnum >= usrregs->num_regs)
5184 error ("Invalid register number %d.", regnum);
5185
5186 addr = usrregs->regmap[regnum];
5187
5188 return addr;
5189 }
5190
5191 /* Fetch one register. */
5192 static void
5193 fetch_register (const struct usrregs_info *usrregs,
5194 struct regcache *regcache, int regno)
5195 {
5196 CORE_ADDR regaddr;
5197 int i, size;
5198 char *buf;
5199 int pid;
5200
5201 if (regno >= usrregs->num_regs)
5202 return;
5203 if ((*the_low_target.cannot_fetch_register) (regno))
5204 return;
5205
5206 regaddr = register_addr (usrregs, regno);
5207 if (regaddr == -1)
5208 return;
5209
5210 size = ((register_size (regcache->tdesc, regno)
5211 + sizeof (PTRACE_XFER_TYPE) - 1)
5212 & -sizeof (PTRACE_XFER_TYPE));
5213 buf = (char *) alloca (size);
5214
5215 pid = lwpid_of (current_thread);
5216 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5217 {
5218 errno = 0;
5219 *(PTRACE_XFER_TYPE *) (buf + i) =
5220 ptrace (PTRACE_PEEKUSER, pid,
5221 /* Coerce to a uintptr_t first to avoid potential gcc warning
5222 of coercing an 8 byte integer to a 4 byte pointer. */
5223 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5224 regaddr += sizeof (PTRACE_XFER_TYPE);
5225 if (errno != 0)
5226 error ("reading register %d: %s", regno, strerror (errno));
5227 }
5228
5229 if (the_low_target.supply_ptrace_register)
5230 the_low_target.supply_ptrace_register (regcache, regno, buf);
5231 else
5232 supply_register (regcache, regno, buf);
5233 }
5234
5235 /* Store one register. */
5236 static void
5237 store_register (const struct usrregs_info *usrregs,
5238 struct regcache *regcache, int regno)
5239 {
5240 CORE_ADDR regaddr;
5241 int i, size;
5242 char *buf;
5243 int pid;
5244
5245 if (regno >= usrregs->num_regs)
5246 return;
5247 if ((*the_low_target.cannot_store_register) (regno))
5248 return;
5249
5250 regaddr = register_addr (usrregs, regno);
5251 if (regaddr == -1)
5252 return;
5253
5254 size = ((register_size (regcache->tdesc, regno)
5255 + sizeof (PTRACE_XFER_TYPE) - 1)
5256 & -sizeof (PTRACE_XFER_TYPE));
5257 buf = (char *) alloca (size);
5258 memset (buf, 0, size);
5259
5260 if (the_low_target.collect_ptrace_register)
5261 the_low_target.collect_ptrace_register (regcache, regno, buf);
5262 else
5263 collect_register (regcache, regno, buf);
5264
5265 pid = lwpid_of (current_thread);
5266 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5267 {
5268 errno = 0;
5269 ptrace (PTRACE_POKEUSER, pid,
5270 /* Coerce to a uintptr_t first to avoid potential gcc warning
5271 about coercing an 8 byte integer to a 4 byte pointer. */
5272 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5273 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5274 if (errno != 0)
5275 {
5276 /* At this point, ESRCH should mean the process is
5277 already gone, in which case we simply ignore attempts
5278 to change its registers. See also the related
5279 comment in linux_resume_one_lwp. */
5280 if (errno == ESRCH)
5281 return;
5282
5283 if ((*the_low_target.cannot_store_register) (regno) == 0)
5284 error ("writing register %d: %s", regno, strerror (errno));
5285 }
5286 regaddr += sizeof (PTRACE_XFER_TYPE);
5287 }
5288 }
5289
5290 /* Fetch all registers, or just one, from the child process.
5291 If REGNO is -1, do this for all registers, skipping any that are
5292 assumed to have been retrieved by regsets_fetch_inferior_registers,
5293 unless ALL is non-zero.
5294 Otherwise, REGNO specifies which register (so we can save time). */
5295 static void
5296 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5297 struct regcache *regcache, int regno, int all)
5298 {
5299 struct usrregs_info *usr = regs_info->usrregs;
5300
5301 if (regno == -1)
5302 {
5303 for (regno = 0; regno < usr->num_regs; regno++)
5304 if (all || !linux_register_in_regsets (regs_info, regno))
5305 fetch_register (usr, regcache, regno);
5306 }
5307 else
5308 fetch_register (usr, regcache, regno);
5309 }
5310
5311 /* Store our register values back into the inferior.
5312 If REGNO is -1, do this for all registers, skipping any that are
5313 assumed to have been saved by regsets_store_inferior_registers,
5314 unless ALL is non-zero.
5315 Otherwise, REGNO specifies which register (so we can save time). */
5316 static void
5317 usr_store_inferior_registers (const struct regs_info *regs_info,
5318 struct regcache *regcache, int regno, int all)
5319 {
5320 struct usrregs_info *usr = regs_info->usrregs;
5321
5322 if (regno == -1)
5323 {
5324 for (regno = 0; regno < usr->num_regs; regno++)
5325 if (all || !linux_register_in_regsets (regs_info, regno))
5326 store_register (usr, regcache, regno);
5327 }
5328 else
5329 store_register (usr, regcache, regno);
5330 }
5331
5332 #else /* !HAVE_LINUX_USRREGS */
5333
5334 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5335 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5336
5337 #endif
5338
5339
5340 void
5341 linux_fetch_registers (struct regcache *regcache, int regno)
5342 {
5343 int use_regsets;
5344 int all = 0;
5345 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5346
5347 if (regno == -1)
5348 {
5349 if (the_low_target.fetch_register != NULL
5350 && regs_info->usrregs != NULL)
5351 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5352 (*the_low_target.fetch_register) (regcache, regno);
5353
5354 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5355 if (regs_info->usrregs != NULL)
5356 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5357 }
5358 else
5359 {
5360 if (the_low_target.fetch_register != NULL
5361 && (*the_low_target.fetch_register) (regcache, regno))
5362 return;
5363
5364 use_regsets = linux_register_in_regsets (regs_info, regno);
5365 if (use_regsets)
5366 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5367 regcache);
5368 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5369 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5370 }
5371 }
5372
5373 void
5374 linux_store_registers (struct regcache *regcache, int regno)
5375 {
5376 int use_regsets;
5377 int all = 0;
5378 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5379
5380 if (regno == -1)
5381 {
5382 all = regsets_store_inferior_registers (regs_info->regsets_info,
5383 regcache);
5384 if (regs_info->usrregs != NULL)
5385 usr_store_inferior_registers (regs_info, regcache, regno, all);
5386 }
5387 else
5388 {
5389 use_regsets = linux_register_in_regsets (regs_info, regno);
5390 if (use_regsets)
5391 all = regsets_store_inferior_registers (regs_info->regsets_info,
5392 regcache);
5393 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5394 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5395 }
5396 }
5397
5398
5399 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5400 to debugger memory starting at MYADDR. */
5401
5402 static int
5403 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5404 {
5405 int pid = lwpid_of (current_thread);
5406 register PTRACE_XFER_TYPE *buffer;
5407 register CORE_ADDR addr;
5408 register int count;
5409 char filename[64];
5410 register int i;
5411 int ret;
5412 int fd;
5413
5414 /* Try using /proc. Don't bother for one word. */
5415 if (len >= 3 * sizeof (long))
5416 {
5417 int bytes;
5418
5419 /* We could keep this file open and cache it - possibly one per
5420 thread. That requires some juggling, but is even faster. */
5421 sprintf (filename, "/proc/%d/mem", pid);
5422 fd = open (filename, O_RDONLY | O_LARGEFILE);
5423 if (fd == -1)
5424 goto no_proc;
5425
5426 /* If pread64 is available, use it. It's faster if the kernel
5427 supports it (only one syscall), and it's 64-bit safe even on
5428 32-bit platforms (for instance, SPARC debugging a SPARC64
5429 application). */
5430 #ifdef HAVE_PREAD64
5431 bytes = pread64 (fd, myaddr, len, memaddr);
5432 #else
5433 bytes = -1;
5434 if (lseek (fd, memaddr, SEEK_SET) != -1)
5435 bytes = read (fd, myaddr, len);
5436 #endif
5437
5438 close (fd);
5439 if (bytes == len)
5440 return 0;
5441
5442 /* Some data was read, we'll try to get the rest with ptrace. */
5443 if (bytes > 0)
5444 {
5445 memaddr += bytes;
5446 myaddr += bytes;
5447 len -= bytes;
5448 }
5449 }
5450
5451 no_proc:
5452 /* Round starting address down to longword boundary. */
5453 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5454 /* Round ending address up; get number of longwords that makes. */
5455 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5456 / sizeof (PTRACE_XFER_TYPE));
5457 /* Allocate buffer of that many longwords. */
5458 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5459
5460 /* Read all the longwords */
5461 errno = 0;
5462 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5463 {
5464 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5465 about coercing an 8 byte integer to a 4 byte pointer. */
5466 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5467 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5468 (PTRACE_TYPE_ARG4) 0);
5469 if (errno)
5470 break;
5471 }
5472 ret = errno;
5473
5474 /* Copy appropriate bytes out of the buffer. */
5475 if (i > 0)
5476 {
5477 i *= sizeof (PTRACE_XFER_TYPE);
5478 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5479 memcpy (myaddr,
5480 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5481 i < len ? i : len);
5482 }
5483
5484 return ret;
5485 }
5486
5487 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5488 memory at MEMADDR. On failure (cannot write to the inferior)
5489 returns the value of errno. Always succeeds if LEN is zero. */
5490
5491 static int
5492 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5493 {
5494 register int i;
5495 /* Round starting address down to longword boundary. */
5496 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5497 /* Round ending address up; get number of longwords that makes. */
5498 register int count
5499 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5500 / sizeof (PTRACE_XFER_TYPE);
5501
5502 /* Allocate buffer of that many longwords. */
5503 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5504
5505 int pid = lwpid_of (current_thread);
5506
5507 if (len == 0)
5508 {
5509 /* Zero length write always succeeds. */
5510 return 0;
5511 }
5512
5513 if (debug_threads)
5514 {
5515 /* Dump up to four bytes. */
5516 char str[4 * 2 + 1];
5517 char *p = str;
5518 int dump = len < 4 ? len : 4;
5519
5520 for (i = 0; i < dump; i++)
5521 {
5522 sprintf (p, "%02x", myaddr[i]);
5523 p += 2;
5524 }
5525 *p = '\0';
5526
5527 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5528 str, (long) memaddr, pid);
5529 }
5530
5531 /* Fill start and end extra bytes of buffer with existing memory data. */
5532
5533 errno = 0;
5534 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5535 about coercing an 8 byte integer to a 4 byte pointer. */
5536 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5537 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5538 (PTRACE_TYPE_ARG4) 0);
5539 if (errno)
5540 return errno;
5541
5542 if (count > 1)
5543 {
5544 errno = 0;
5545 buffer[count - 1]
5546 = ptrace (PTRACE_PEEKTEXT, pid,
5547 /* Coerce to a uintptr_t first to avoid potential gcc warning
5548 about coercing an 8 byte integer to a 4 byte pointer. */
5549 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5550 * sizeof (PTRACE_XFER_TYPE)),
5551 (PTRACE_TYPE_ARG4) 0);
5552 if (errno)
5553 return errno;
5554 }
5555
5556 /* Copy data to be written over corresponding part of buffer. */
5557
5558 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5559 myaddr, len);
5560
5561 /* Write the entire buffer. */
5562
5563 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5564 {
5565 errno = 0;
5566 ptrace (PTRACE_POKETEXT, pid,
5567 /* Coerce to a uintptr_t first to avoid potential gcc warning
5568 about coercing an 8 byte integer to a 4 byte pointer. */
5569 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5570 (PTRACE_TYPE_ARG4) buffer[i]);
5571 if (errno)
5572 return errno;
5573 }
5574
5575 return 0;
5576 }
5577
5578 static void
5579 linux_look_up_symbols (void)
5580 {
5581 #ifdef USE_THREAD_DB
5582 struct process_info *proc = current_process ();
5583
5584 if (proc->priv->thread_db != NULL)
5585 return;
5586
5587 /* If the kernel supports tracing clones, then we don't need to
5588 use the magic thread event breakpoint to learn about
5589 threads. */
5590 thread_db_init (!linux_supports_traceclone ());
5591 #endif
5592 }
5593
5594 static void
5595 linux_request_interrupt (void)
5596 {
5597 extern unsigned long signal_pid;
5598
5599 /* Send a SIGINT to the process group. This acts just like the user
5600 typed a ^C on the controlling terminal. */
5601 kill (-signal_pid, SIGINT);
5602 }
5603
5604 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5605 to debugger memory starting at MYADDR. */
5606
5607 static int
5608 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5609 {
5610 char filename[PATH_MAX];
5611 int fd, n;
5612 int pid = lwpid_of (current_thread);
5613
5614 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5615
5616 fd = open (filename, O_RDONLY);
5617 if (fd < 0)
5618 return -1;
5619
5620 if (offset != (CORE_ADDR) 0
5621 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5622 n = -1;
5623 else
5624 n = read (fd, myaddr, len);
5625
5626 close (fd);
5627
5628 return n;
5629 }
5630
5631 /* These breakpoint and watchpoint related wrapper functions simply
5632 pass on the function call if the target has registered a
5633 corresponding function. */
5634
5635 static int
5636 linux_supports_z_point_type (char z_type)
5637 {
5638 return (the_low_target.supports_z_point_type != NULL
5639 && the_low_target.supports_z_point_type (z_type));
5640 }
5641
5642 static int
5643 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5644 int size, struct raw_breakpoint *bp)
5645 {
5646 if (type == raw_bkpt_type_sw)
5647 return insert_memory_breakpoint (bp);
5648 else if (the_low_target.insert_point != NULL)
5649 return the_low_target.insert_point (type, addr, size, bp);
5650 else
5651 /* Unsupported (see target.h). */
5652 return 1;
5653 }
5654
5655 static int
5656 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5657 int size, struct raw_breakpoint *bp)
5658 {
5659 if (type == raw_bkpt_type_sw)
5660 return remove_memory_breakpoint (bp);
5661 else if (the_low_target.remove_point != NULL)
5662 return the_low_target.remove_point (type, addr, size, bp);
5663 else
5664 /* Unsupported (see target.h). */
5665 return 1;
5666 }
5667
5668 /* Implement the to_stopped_by_sw_breakpoint target_ops
5669 method. */
5670
5671 static int
5672 linux_stopped_by_sw_breakpoint (void)
5673 {
5674 struct lwp_info *lwp = get_thread_lwp (current_thread);
5675
5676 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5677 }
5678
5679 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5680 method. */
5681
5682 static int
5683 linux_supports_stopped_by_sw_breakpoint (void)
5684 {
5685 return USE_SIGTRAP_SIGINFO;
5686 }
5687
5688 /* Implement the to_stopped_by_hw_breakpoint target_ops
5689 method. */
5690
5691 static int
5692 linux_stopped_by_hw_breakpoint (void)
5693 {
5694 struct lwp_info *lwp = get_thread_lwp (current_thread);
5695
5696 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5697 }
5698
5699 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5700 method. */
5701
5702 static int
5703 linux_supports_stopped_by_hw_breakpoint (void)
5704 {
5705 return USE_SIGTRAP_SIGINFO;
5706 }
5707
5708 /* Implement the supports_hardware_single_step target_ops method. */
5709
5710 static int
5711 linux_supports_hardware_single_step (void)
5712 {
5713 return can_hardware_single_step ();
5714 }
5715
5716 static int
5717 linux_stopped_by_watchpoint (void)
5718 {
5719 struct lwp_info *lwp = get_thread_lwp (current_thread);
5720
5721 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5722 }
5723
5724 static CORE_ADDR
5725 linux_stopped_data_address (void)
5726 {
5727 struct lwp_info *lwp = get_thread_lwp (current_thread);
5728
5729 return lwp->stopped_data_address;
5730 }
5731
5732 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5733 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5734 && defined(PT_TEXT_END_ADDR)
5735
5736 /* This is only used for targets that define PT_TEXT_ADDR,
5737 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5738 the target has different ways of acquiring this information, like
5739 loadmaps. */
5740
5741 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5742 to tell gdb about. */
5743
5744 static int
5745 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5746 {
5747 unsigned long text, text_end, data;
5748 int pid = lwpid_of (current_thread);
5749
5750 errno = 0;
5751
5752 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5753 (PTRACE_TYPE_ARG4) 0);
5754 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5755 (PTRACE_TYPE_ARG4) 0);
5756 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5757 (PTRACE_TYPE_ARG4) 0);
5758
5759 if (errno == 0)
5760 {
5761 /* Both text and data offsets produced at compile-time (and so
5762 used by gdb) are relative to the beginning of the program,
5763 with the data segment immediately following the text segment.
5764 However, the actual runtime layout in memory may put the data
5765 somewhere else, so when we send gdb a data base-address, we
5766 use the real data base address and subtract the compile-time
5767 data base-address from it (which is just the length of the
5768 text segment). BSS immediately follows data in both
5769 cases. */
5770 *text_p = text;
5771 *data_p = data - (text_end - text);
5772
5773 return 1;
5774 }
5775 return 0;
5776 }
5777 #endif
5778
5779 static int
5780 linux_qxfer_osdata (const char *annex,
5781 unsigned char *readbuf, unsigned const char *writebuf,
5782 CORE_ADDR offset, int len)
5783 {
5784 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5785 }
5786
5787 /* Convert a native/host siginfo object, into/from the siginfo in the
5788 layout of the inferiors' architecture. */
5789
5790 static void
5791 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5792 {
5793 int done = 0;
5794
5795 if (the_low_target.siginfo_fixup != NULL)
5796 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5797
5798 /* If there was no callback, or the callback didn't do anything,
5799 then just do a straight memcpy. */
5800 if (!done)
5801 {
5802 if (direction == 1)
5803 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5804 else
5805 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5806 }
5807 }
5808
5809 static int
5810 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5811 unsigned const char *writebuf, CORE_ADDR offset, int len)
5812 {
5813 int pid;
5814 siginfo_t siginfo;
5815 char inf_siginfo[sizeof (siginfo_t)];
5816
5817 if (current_thread == NULL)
5818 return -1;
5819
5820 pid = lwpid_of (current_thread);
5821
5822 if (debug_threads)
5823 debug_printf ("%s siginfo for lwp %d.\n",
5824 readbuf != NULL ? "Reading" : "Writing",
5825 pid);
5826
5827 if (offset >= sizeof (siginfo))
5828 return -1;
5829
5830 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5831 return -1;
5832
5833 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5834 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5835 inferior with a 64-bit GDBSERVER should look the same as debugging it
5836 with a 32-bit GDBSERVER, we need to convert it. */
5837 siginfo_fixup (&siginfo, inf_siginfo, 0);
5838
5839 if (offset + len > sizeof (siginfo))
5840 len = sizeof (siginfo) - offset;
5841
5842 if (readbuf != NULL)
5843 memcpy (readbuf, inf_siginfo + offset, len);
5844 else
5845 {
5846 memcpy (inf_siginfo + offset, writebuf, len);
5847
5848 /* Convert back to ptrace layout before flushing it out. */
5849 siginfo_fixup (&siginfo, inf_siginfo, 1);
5850
5851 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5852 return -1;
5853 }
5854
5855 return len;
5856 }
5857
5858 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5859 so we notice when children change state; as the handler for the
5860 sigsuspend in my_waitpid. */
5861
5862 static void
5863 sigchld_handler (int signo)
5864 {
5865 int old_errno = errno;
5866
5867 if (debug_threads)
5868 {
5869 do
5870 {
5871 /* fprintf is not async-signal-safe, so call write
5872 directly. */
5873 if (write (2, "sigchld_handler\n",
5874 sizeof ("sigchld_handler\n") - 1) < 0)
5875 break; /* just ignore */
5876 } while (0);
5877 }
5878
5879 if (target_is_async_p ())
5880 async_file_mark (); /* trigger a linux_wait */
5881
5882 errno = old_errno;
5883 }
5884
5885 static int
5886 linux_supports_non_stop (void)
5887 {
5888 return 1;
5889 }
5890
5891 static int
5892 linux_async (int enable)
5893 {
5894 int previous = target_is_async_p ();
5895
5896 if (debug_threads)
5897 debug_printf ("linux_async (%d), previous=%d\n",
5898 enable, previous);
5899
5900 if (previous != enable)
5901 {
5902 sigset_t mask;
5903 sigemptyset (&mask);
5904 sigaddset (&mask, SIGCHLD);
5905
5906 sigprocmask (SIG_BLOCK, &mask, NULL);
5907
5908 if (enable)
5909 {
5910 if (pipe (linux_event_pipe) == -1)
5911 {
5912 linux_event_pipe[0] = -1;
5913 linux_event_pipe[1] = -1;
5914 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5915
5916 warning ("creating event pipe failed.");
5917 return previous;
5918 }
5919
5920 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5921 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5922
5923 /* Register the event loop handler. */
5924 add_file_handler (linux_event_pipe[0],
5925 handle_target_event, NULL);
5926
5927 /* Always trigger a linux_wait. */
5928 async_file_mark ();
5929 }
5930 else
5931 {
5932 delete_file_handler (linux_event_pipe[0]);
5933
5934 close (linux_event_pipe[0]);
5935 close (linux_event_pipe[1]);
5936 linux_event_pipe[0] = -1;
5937 linux_event_pipe[1] = -1;
5938 }
5939
5940 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5941 }
5942
5943 return previous;
5944 }
5945
5946 static int
5947 linux_start_non_stop (int nonstop)
5948 {
5949 /* Register or unregister from event-loop accordingly. */
5950 linux_async (nonstop);
5951
5952 if (target_is_async_p () != (nonstop != 0))
5953 return -1;
5954
5955 return 0;
5956 }
5957
5958 static int
5959 linux_supports_multi_process (void)
5960 {
5961 return 1;
5962 }
5963
5964 /* Check if fork events are supported. */
5965
5966 static int
5967 linux_supports_fork_events (void)
5968 {
5969 return linux_supports_tracefork ();
5970 }
5971
5972 /* Check if vfork events are supported. */
5973
5974 static int
5975 linux_supports_vfork_events (void)
5976 {
5977 return linux_supports_tracefork ();
5978 }
5979
5980 /* Check if exec events are supported. */
5981
5982 static int
5983 linux_supports_exec_events (void)
5984 {
5985 return linux_supports_traceexec ();
5986 }
5987
5988 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5989 options for the specified lwp. */
5990
5991 static int
5992 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5993 void *args)
5994 {
5995 struct thread_info *thread = (struct thread_info *) entry;
5996 struct lwp_info *lwp = get_thread_lwp (thread);
5997
5998 if (!lwp->stopped)
5999 {
6000 /* Stop the lwp so we can modify its ptrace options. */
6001 lwp->must_set_ptrace_flags = 1;
6002 linux_stop_lwp (lwp);
6003 }
6004 else
6005 {
6006 /* Already stopped; go ahead and set the ptrace options. */
6007 struct process_info *proc = find_process_pid (pid_of (thread));
6008 int options = linux_low_ptrace_options (proc->attached);
6009
6010 linux_enable_event_reporting (lwpid_of (thread), options);
6011 lwp->must_set_ptrace_flags = 0;
6012 }
6013
6014 return 0;
6015 }
6016
6017 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6018 ptrace flags for all inferiors. This is in case the new GDB connection
6019 doesn't support the same set of events that the previous one did. */
6020
6021 static void
6022 linux_handle_new_gdb_connection (void)
6023 {
6024 pid_t pid;
6025
6026 /* Request that all the lwps reset their ptrace options. */
6027 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6028 }
6029
6030 static int
6031 linux_supports_disable_randomization (void)
6032 {
6033 #ifdef HAVE_PERSONALITY
6034 return 1;
6035 #else
6036 return 0;
6037 #endif
6038 }
6039
6040 static int
6041 linux_supports_agent (void)
6042 {
6043 return 1;
6044 }
6045
6046 static int
6047 linux_supports_range_stepping (void)
6048 {
6049 if (*the_low_target.supports_range_stepping == NULL)
6050 return 0;
6051
6052 return (*the_low_target.supports_range_stepping) ();
6053 }
6054
6055 /* Enumerate spufs IDs for process PID. */
6056 static int
6057 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6058 {
6059 int pos = 0;
6060 int written = 0;
6061 char path[128];
6062 DIR *dir;
6063 struct dirent *entry;
6064
6065 sprintf (path, "/proc/%ld/fd", pid);
6066 dir = opendir (path);
6067 if (!dir)
6068 return -1;
6069
6070 rewinddir (dir);
6071 while ((entry = readdir (dir)) != NULL)
6072 {
6073 struct stat st;
6074 struct statfs stfs;
6075 int fd;
6076
6077 fd = atoi (entry->d_name);
6078 if (!fd)
6079 continue;
6080
6081 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6082 if (stat (path, &st) != 0)
6083 continue;
6084 if (!S_ISDIR (st.st_mode))
6085 continue;
6086
6087 if (statfs (path, &stfs) != 0)
6088 continue;
6089 if (stfs.f_type != SPUFS_MAGIC)
6090 continue;
6091
6092 if (pos >= offset && pos + 4 <= offset + len)
6093 {
6094 *(unsigned int *)(buf + pos - offset) = fd;
6095 written += 4;
6096 }
6097 pos += 4;
6098 }
6099
6100 closedir (dir);
6101 return written;
6102 }
6103
6104 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6105 object type, using the /proc file system. */
6106 static int
6107 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6108 unsigned const char *writebuf,
6109 CORE_ADDR offset, int len)
6110 {
6111 long pid = lwpid_of (current_thread);
6112 char buf[128];
6113 int fd = 0;
6114 int ret = 0;
6115
6116 if (!writebuf && !readbuf)
6117 return -1;
6118
6119 if (!*annex)
6120 {
6121 if (!readbuf)
6122 return -1;
6123 else
6124 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6125 }
6126
6127 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6128 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6129 if (fd <= 0)
6130 return -1;
6131
6132 if (offset != 0
6133 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6134 {
6135 close (fd);
6136 return 0;
6137 }
6138
6139 if (writebuf)
6140 ret = write (fd, writebuf, (size_t) len);
6141 else
6142 ret = read (fd, readbuf, (size_t) len);
6143
6144 close (fd);
6145 return ret;
6146 }
6147
6148 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6149 struct target_loadseg
6150 {
6151 /* Core address to which the segment is mapped. */
6152 Elf32_Addr addr;
6153 /* VMA recorded in the program header. */
6154 Elf32_Addr p_vaddr;
6155 /* Size of this segment in memory. */
6156 Elf32_Word p_memsz;
6157 };
6158
6159 # if defined PT_GETDSBT
6160 struct target_loadmap
6161 {
6162 /* Protocol version number, must be zero. */
6163 Elf32_Word version;
6164 /* Pointer to the DSBT table, its size, and the DSBT index. */
6165 unsigned *dsbt_table;
6166 unsigned dsbt_size, dsbt_index;
6167 /* Number of segments in this map. */
6168 Elf32_Word nsegs;
6169 /* The actual memory map. */
6170 struct target_loadseg segs[/*nsegs*/];
6171 };
6172 # define LINUX_LOADMAP PT_GETDSBT
6173 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6174 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6175 # else
6176 struct target_loadmap
6177 {
6178 /* Protocol version number, must be zero. */
6179 Elf32_Half version;
6180 /* Number of segments in this map. */
6181 Elf32_Half nsegs;
6182 /* The actual memory map. */
6183 struct target_loadseg segs[/*nsegs*/];
6184 };
6185 # define LINUX_LOADMAP PTRACE_GETFDPIC
6186 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6187 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6188 # endif
6189
6190 static int
6191 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6192 unsigned char *myaddr, unsigned int len)
6193 {
6194 int pid = lwpid_of (current_thread);
6195 int addr = -1;
6196 struct target_loadmap *data = NULL;
6197 unsigned int actual_length, copy_length;
6198
6199 if (strcmp (annex, "exec") == 0)
6200 addr = (int) LINUX_LOADMAP_EXEC;
6201 else if (strcmp (annex, "interp") == 0)
6202 addr = (int) LINUX_LOADMAP_INTERP;
6203 else
6204 return -1;
6205
6206 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6207 return -1;
6208
6209 if (data == NULL)
6210 return -1;
6211
6212 actual_length = sizeof (struct target_loadmap)
6213 + sizeof (struct target_loadseg) * data->nsegs;
6214
6215 if (offset < 0 || offset > actual_length)
6216 return -1;
6217
6218 copy_length = actual_length - offset < len ? actual_length - offset : len;
6219 memcpy (myaddr, (char *) data + offset, copy_length);
6220 return copy_length;
6221 }
6222 #else
6223 # define linux_read_loadmap NULL
6224 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6225
6226 static void
6227 linux_process_qsupported (char **features, int count)
6228 {
6229 if (the_low_target.process_qsupported != NULL)
6230 the_low_target.process_qsupported (features, count);
6231 }
6232
6233 static int
6234 linux_supports_tracepoints (void)
6235 {
6236 if (*the_low_target.supports_tracepoints == NULL)
6237 return 0;
6238
6239 return (*the_low_target.supports_tracepoints) ();
6240 }
6241
6242 static CORE_ADDR
6243 linux_read_pc (struct regcache *regcache)
6244 {
6245 if (the_low_target.get_pc == NULL)
6246 return 0;
6247
6248 return (*the_low_target.get_pc) (regcache);
6249 }
6250
6251 static void
6252 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6253 {
6254 gdb_assert (the_low_target.set_pc != NULL);
6255
6256 (*the_low_target.set_pc) (regcache, pc);
6257 }
6258
6259 static int
6260 linux_thread_stopped (struct thread_info *thread)
6261 {
6262 return get_thread_lwp (thread)->stopped;
6263 }
6264
6265 /* This exposes stop-all-threads functionality to other modules. */
6266
6267 static void
6268 linux_pause_all (int freeze)
6269 {
6270 stop_all_lwps (freeze, NULL);
6271 }
6272
6273 /* This exposes unstop-all-threads functionality to other gdbserver
6274 modules. */
6275
6276 static void
6277 linux_unpause_all (int unfreeze)
6278 {
6279 unstop_all_lwps (unfreeze, NULL);
6280 }
6281
6282 static int
6283 linux_prepare_to_access_memory (void)
6284 {
6285 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6286 running LWP. */
6287 if (non_stop)
6288 linux_pause_all (1);
6289 return 0;
6290 }
6291
6292 static void
6293 linux_done_accessing_memory (void)
6294 {
6295 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6296 running LWP. */
6297 if (non_stop)
6298 linux_unpause_all (1);
6299 }
6300
6301 static int
6302 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6303 CORE_ADDR collector,
6304 CORE_ADDR lockaddr,
6305 ULONGEST orig_size,
6306 CORE_ADDR *jump_entry,
6307 CORE_ADDR *trampoline,
6308 ULONGEST *trampoline_size,
6309 unsigned char *jjump_pad_insn,
6310 ULONGEST *jjump_pad_insn_size,
6311 CORE_ADDR *adjusted_insn_addr,
6312 CORE_ADDR *adjusted_insn_addr_end,
6313 char *err)
6314 {
6315 return (*the_low_target.install_fast_tracepoint_jump_pad)
6316 (tpoint, tpaddr, collector, lockaddr, orig_size,
6317 jump_entry, trampoline, trampoline_size,
6318 jjump_pad_insn, jjump_pad_insn_size,
6319 adjusted_insn_addr, adjusted_insn_addr_end,
6320 err);
6321 }
6322
6323 static struct emit_ops *
6324 linux_emit_ops (void)
6325 {
6326 if (the_low_target.emit_ops != NULL)
6327 return (*the_low_target.emit_ops) ();
6328 else
6329 return NULL;
6330 }
6331
6332 static int
6333 linux_get_min_fast_tracepoint_insn_len (void)
6334 {
6335 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6336 }
6337
6338 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6339
6340 static int
6341 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6342 CORE_ADDR *phdr_memaddr, int *num_phdr)
6343 {
6344 char filename[PATH_MAX];
6345 int fd;
6346 const int auxv_size = is_elf64
6347 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6348 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6349
6350 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6351
6352 fd = open (filename, O_RDONLY);
6353 if (fd < 0)
6354 return 1;
6355
6356 *phdr_memaddr = 0;
6357 *num_phdr = 0;
6358 while (read (fd, buf, auxv_size) == auxv_size
6359 && (*phdr_memaddr == 0 || *num_phdr == 0))
6360 {
6361 if (is_elf64)
6362 {
6363 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6364
6365 switch (aux->a_type)
6366 {
6367 case AT_PHDR:
6368 *phdr_memaddr = aux->a_un.a_val;
6369 break;
6370 case AT_PHNUM:
6371 *num_phdr = aux->a_un.a_val;
6372 break;
6373 }
6374 }
6375 else
6376 {
6377 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6378
6379 switch (aux->a_type)
6380 {
6381 case AT_PHDR:
6382 *phdr_memaddr = aux->a_un.a_val;
6383 break;
6384 case AT_PHNUM:
6385 *num_phdr = aux->a_un.a_val;
6386 break;
6387 }
6388 }
6389 }
6390
6391 close (fd);
6392
6393 if (*phdr_memaddr == 0 || *num_phdr == 0)
6394 {
6395 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6396 "phdr_memaddr = %ld, phdr_num = %d",
6397 (long) *phdr_memaddr, *num_phdr);
6398 return 2;
6399 }
6400
6401 return 0;
6402 }
6403
6404 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6405
6406 static CORE_ADDR
6407 get_dynamic (const int pid, const int is_elf64)
6408 {
6409 CORE_ADDR phdr_memaddr, relocation;
6410 int num_phdr, i;
6411 unsigned char *phdr_buf;
6412 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6413
6414 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6415 return 0;
6416
6417 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6418 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6419
6420 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6421 return 0;
6422
6423 /* Compute relocation: it is expected to be 0 for "regular" executables,
6424 non-zero for PIE ones. */
6425 relocation = -1;
6426 for (i = 0; relocation == -1 && i < num_phdr; i++)
6427 if (is_elf64)
6428 {
6429 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6430
6431 if (p->p_type == PT_PHDR)
6432 relocation = phdr_memaddr - p->p_vaddr;
6433 }
6434 else
6435 {
6436 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6437
6438 if (p->p_type == PT_PHDR)
6439 relocation = phdr_memaddr - p->p_vaddr;
6440 }
6441
6442 if (relocation == -1)
6443 {
6444 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6445 any real world executables, including PIE executables, have always
6446 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6447 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6448 or present DT_DEBUG anyway (fpc binaries are statically linked).
6449
6450 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6451
6452 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6453
6454 return 0;
6455 }
6456
6457 for (i = 0; i < num_phdr; i++)
6458 {
6459 if (is_elf64)
6460 {
6461 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6462
6463 if (p->p_type == PT_DYNAMIC)
6464 return p->p_vaddr + relocation;
6465 }
6466 else
6467 {
6468 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6469
6470 if (p->p_type == PT_DYNAMIC)
6471 return p->p_vaddr + relocation;
6472 }
6473 }
6474
6475 return 0;
6476 }
6477
6478 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6479 can be 0 if the inferior does not yet have the library list initialized.
6480 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6481 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6482
6483 static CORE_ADDR
6484 get_r_debug (const int pid, const int is_elf64)
6485 {
6486 CORE_ADDR dynamic_memaddr;
6487 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6488 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6489 CORE_ADDR map = -1;
6490
6491 dynamic_memaddr = get_dynamic (pid, is_elf64);
6492 if (dynamic_memaddr == 0)
6493 return map;
6494
6495 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6496 {
6497 if (is_elf64)
6498 {
6499 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6500 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6501 union
6502 {
6503 Elf64_Xword map;
6504 unsigned char buf[sizeof (Elf64_Xword)];
6505 }
6506 rld_map;
6507 #endif
6508 #ifdef DT_MIPS_RLD_MAP
6509 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6510 {
6511 if (linux_read_memory (dyn->d_un.d_val,
6512 rld_map.buf, sizeof (rld_map.buf)) == 0)
6513 return rld_map.map;
6514 else
6515 break;
6516 }
6517 #endif /* DT_MIPS_RLD_MAP */
6518 #ifdef DT_MIPS_RLD_MAP_REL
6519 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6520 {
6521 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6522 rld_map.buf, sizeof (rld_map.buf)) == 0)
6523 return rld_map.map;
6524 else
6525 break;
6526 }
6527 #endif /* DT_MIPS_RLD_MAP_REL */
6528
6529 if (dyn->d_tag == DT_DEBUG && map == -1)
6530 map = dyn->d_un.d_val;
6531
6532 if (dyn->d_tag == DT_NULL)
6533 break;
6534 }
6535 else
6536 {
6537 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6538 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6539 union
6540 {
6541 Elf32_Word map;
6542 unsigned char buf[sizeof (Elf32_Word)];
6543 }
6544 rld_map;
6545 #endif
6546 #ifdef DT_MIPS_RLD_MAP
6547 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6548 {
6549 if (linux_read_memory (dyn->d_un.d_val,
6550 rld_map.buf, sizeof (rld_map.buf)) == 0)
6551 return rld_map.map;
6552 else
6553 break;
6554 }
6555 #endif /* DT_MIPS_RLD_MAP */
6556 #ifdef DT_MIPS_RLD_MAP_REL
6557 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6558 {
6559 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6560 rld_map.buf, sizeof (rld_map.buf)) == 0)
6561 return rld_map.map;
6562 else
6563 break;
6564 }
6565 #endif /* DT_MIPS_RLD_MAP_REL */
6566
6567 if (dyn->d_tag == DT_DEBUG && map == -1)
6568 map = dyn->d_un.d_val;
6569
6570 if (dyn->d_tag == DT_NULL)
6571 break;
6572 }
6573
6574 dynamic_memaddr += dyn_size;
6575 }
6576
6577 return map;
6578 }
6579
6580 /* Read one pointer from MEMADDR in the inferior. */
6581
6582 static int
6583 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6584 {
6585 int ret;
6586
6587 /* Go through a union so this works on either big or little endian
6588 hosts, when the inferior's pointer size is smaller than the size
6589 of CORE_ADDR. It is assumed the inferior's endianness is the
6590 same of the superior's. */
6591 union
6592 {
6593 CORE_ADDR core_addr;
6594 unsigned int ui;
6595 unsigned char uc;
6596 } addr;
6597
6598 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6599 if (ret == 0)
6600 {
6601 if (ptr_size == sizeof (CORE_ADDR))
6602 *ptr = addr.core_addr;
6603 else if (ptr_size == sizeof (unsigned int))
6604 *ptr = addr.ui;
6605 else
6606 gdb_assert_not_reached ("unhandled pointer size");
6607 }
6608 return ret;
6609 }
6610
6611 struct link_map_offsets
6612 {
6613 /* Offset and size of r_debug.r_version. */
6614 int r_version_offset;
6615
6616 /* Offset and size of r_debug.r_map. */
6617 int r_map_offset;
6618
6619 /* Offset to l_addr field in struct link_map. */
6620 int l_addr_offset;
6621
6622 /* Offset to l_name field in struct link_map. */
6623 int l_name_offset;
6624
6625 /* Offset to l_ld field in struct link_map. */
6626 int l_ld_offset;
6627
6628 /* Offset to l_next field in struct link_map. */
6629 int l_next_offset;
6630
6631 /* Offset to l_prev field in struct link_map. */
6632 int l_prev_offset;
6633 };
6634
6635 /* Construct qXfer:libraries-svr4:read reply. */
6636
6637 static int
6638 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6639 unsigned const char *writebuf,
6640 CORE_ADDR offset, int len)
6641 {
6642 char *document;
6643 unsigned document_len;
6644 struct process_info_private *const priv = current_process ()->priv;
6645 char filename[PATH_MAX];
6646 int pid, is_elf64;
6647
6648 static const struct link_map_offsets lmo_32bit_offsets =
6649 {
6650 0, /* r_version offset. */
6651 4, /* r_debug.r_map offset. */
6652 0, /* l_addr offset in link_map. */
6653 4, /* l_name offset in link_map. */
6654 8, /* l_ld offset in link_map. */
6655 12, /* l_next offset in link_map. */
6656 16 /* l_prev offset in link_map. */
6657 };
6658
6659 static const struct link_map_offsets lmo_64bit_offsets =
6660 {
6661 0, /* r_version offset. */
6662 8, /* r_debug.r_map offset. */
6663 0, /* l_addr offset in link_map. */
6664 8, /* l_name offset in link_map. */
6665 16, /* l_ld offset in link_map. */
6666 24, /* l_next offset in link_map. */
6667 32 /* l_prev offset in link_map. */
6668 };
6669 const struct link_map_offsets *lmo;
6670 unsigned int machine;
6671 int ptr_size;
6672 CORE_ADDR lm_addr = 0, lm_prev = 0;
6673 int allocated = 1024;
6674 char *p;
6675 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6676 int header_done = 0;
6677
6678 if (writebuf != NULL)
6679 return -2;
6680 if (readbuf == NULL)
6681 return -1;
6682
6683 pid = lwpid_of (current_thread);
6684 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6685 is_elf64 = elf_64_file_p (filename, &machine);
6686 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6687 ptr_size = is_elf64 ? 8 : 4;
6688
6689 while (annex[0] != '\0')
6690 {
6691 const char *sep;
6692 CORE_ADDR *addrp;
6693 int len;
6694
6695 sep = strchr (annex, '=');
6696 if (sep == NULL)
6697 break;
6698
6699 len = sep - annex;
6700 if (len == 5 && startswith (annex, "start"))
6701 addrp = &lm_addr;
6702 else if (len == 4 && startswith (annex, "prev"))
6703 addrp = &lm_prev;
6704 else
6705 {
6706 annex = strchr (sep, ';');
6707 if (annex == NULL)
6708 break;
6709 annex++;
6710 continue;
6711 }
6712
6713 annex = decode_address_to_semicolon (addrp, sep + 1);
6714 }
6715
6716 if (lm_addr == 0)
6717 {
6718 int r_version = 0;
6719
6720 if (priv->r_debug == 0)
6721 priv->r_debug = get_r_debug (pid, is_elf64);
6722
6723 /* We failed to find DT_DEBUG. Such situation will not change
6724 for this inferior - do not retry it. Report it to GDB as
6725 E01, see for the reasons at the GDB solib-svr4.c side. */
6726 if (priv->r_debug == (CORE_ADDR) -1)
6727 return -1;
6728
6729 if (priv->r_debug != 0)
6730 {
6731 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6732 (unsigned char *) &r_version,
6733 sizeof (r_version)) != 0
6734 || r_version != 1)
6735 {
6736 warning ("unexpected r_debug version %d", r_version);
6737 }
6738 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6739 &lm_addr, ptr_size) != 0)
6740 {
6741 warning ("unable to read r_map from 0x%lx",
6742 (long) priv->r_debug + lmo->r_map_offset);
6743 }
6744 }
6745 }
6746
6747 document = (char *) xmalloc (allocated);
6748 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6749 p = document + strlen (document);
6750
6751 while (lm_addr
6752 && read_one_ptr (lm_addr + lmo->l_name_offset,
6753 &l_name, ptr_size) == 0
6754 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6755 &l_addr, ptr_size) == 0
6756 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6757 &l_ld, ptr_size) == 0
6758 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6759 &l_prev, ptr_size) == 0
6760 && read_one_ptr (lm_addr + lmo->l_next_offset,
6761 &l_next, ptr_size) == 0)
6762 {
6763 unsigned char libname[PATH_MAX];
6764
6765 if (lm_prev != l_prev)
6766 {
6767 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6768 (long) lm_prev, (long) l_prev);
6769 break;
6770 }
6771
6772 /* Ignore the first entry even if it has valid name as the first entry
6773 corresponds to the main executable. The first entry should not be
6774 skipped if the dynamic loader was loaded late by a static executable
6775 (see solib-svr4.c parameter ignore_first). But in such case the main
6776 executable does not have PT_DYNAMIC present and this function already
6777 exited above due to failed get_r_debug. */
6778 if (lm_prev == 0)
6779 {
6780 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6781 p = p + strlen (p);
6782 }
6783 else
6784 {
6785 /* Not checking for error because reading may stop before
6786 we've got PATH_MAX worth of characters. */
6787 libname[0] = '\0';
6788 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6789 libname[sizeof (libname) - 1] = '\0';
6790 if (libname[0] != '\0')
6791 {
6792 /* 6x the size for xml_escape_text below. */
6793 size_t len = 6 * strlen ((char *) libname);
6794 char *name;
6795
6796 if (!header_done)
6797 {
6798 /* Terminate `<library-list-svr4'. */
6799 *p++ = '>';
6800 header_done = 1;
6801 }
6802
6803 while (allocated < p - document + len + 200)
6804 {
6805 /* Expand to guarantee sufficient storage. */
6806 uintptr_t document_len = p - document;
6807
6808 document = (char *) xrealloc (document, 2 * allocated);
6809 allocated *= 2;
6810 p = document + document_len;
6811 }
6812
6813 name = xml_escape_text ((char *) libname);
6814 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6815 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6816 name, (unsigned long) lm_addr,
6817 (unsigned long) l_addr, (unsigned long) l_ld);
6818 free (name);
6819 }
6820 }
6821
6822 lm_prev = lm_addr;
6823 lm_addr = l_next;
6824 }
6825
6826 if (!header_done)
6827 {
6828 /* Empty list; terminate `<library-list-svr4'. */
6829 strcpy (p, "/>");
6830 }
6831 else
6832 strcpy (p, "</library-list-svr4>");
6833
6834 document_len = strlen (document);
6835 if (offset < document_len)
6836 document_len -= offset;
6837 else
6838 document_len = 0;
6839 if (len > document_len)
6840 len = document_len;
6841
6842 memcpy (readbuf, document + offset, len);
6843 xfree (document);
6844
6845 return len;
6846 }
6847
6848 #ifdef HAVE_LINUX_BTRACE
6849
6850 /* See to_disable_btrace target method. */
6851
6852 static int
6853 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6854 {
6855 enum btrace_error err;
6856
6857 err = linux_disable_btrace (tinfo);
6858 return (err == BTRACE_ERR_NONE ? 0 : -1);
6859 }
6860
6861 /* Encode an Intel(R) Processor Trace configuration. */
6862
6863 static void
6864 linux_low_encode_pt_config (struct buffer *buffer,
6865 const struct btrace_data_pt_config *config)
6866 {
6867 buffer_grow_str (buffer, "<pt-config>\n");
6868
6869 switch (config->cpu.vendor)
6870 {
6871 case CV_INTEL:
6872 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6873 "model=\"%u\" stepping=\"%u\"/>\n",
6874 config->cpu.family, config->cpu.model,
6875 config->cpu.stepping);
6876 break;
6877
6878 default:
6879 break;
6880 }
6881
6882 buffer_grow_str (buffer, "</pt-config>\n");
6883 }
6884
6885 /* Encode a raw buffer. */
6886
6887 static void
6888 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6889 unsigned int size)
6890 {
6891 if (size == 0)
6892 return;
6893
6894 /* We use hex encoding - see common/rsp-low.h. */
6895 buffer_grow_str (buffer, "<raw>\n");
6896
6897 while (size-- > 0)
6898 {
6899 char elem[2];
6900
6901 elem[0] = tohex ((*data >> 4) & 0xf);
6902 elem[1] = tohex (*data++ & 0xf);
6903
6904 buffer_grow (buffer, elem, 2);
6905 }
6906
6907 buffer_grow_str (buffer, "</raw>\n");
6908 }
6909
6910 /* See to_read_btrace target method. */
6911
6912 static int
6913 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6914 enum btrace_read_type type)
6915 {
6916 struct btrace_data btrace;
6917 struct btrace_block *block;
6918 enum btrace_error err;
6919 int i;
6920
6921 btrace_data_init (&btrace);
6922
6923 err = linux_read_btrace (&btrace, tinfo, type);
6924 if (err != BTRACE_ERR_NONE)
6925 {
6926 if (err == BTRACE_ERR_OVERFLOW)
6927 buffer_grow_str0 (buffer, "E.Overflow.");
6928 else
6929 buffer_grow_str0 (buffer, "E.Generic Error.");
6930
6931 goto err;
6932 }
6933
6934 switch (btrace.format)
6935 {
6936 case BTRACE_FORMAT_NONE:
6937 buffer_grow_str0 (buffer, "E.No Trace.");
6938 goto err;
6939
6940 case BTRACE_FORMAT_BTS:
6941 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6942 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6943
6944 for (i = 0;
6945 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6946 i++)
6947 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6948 paddress (block->begin), paddress (block->end));
6949
6950 buffer_grow_str0 (buffer, "</btrace>\n");
6951 break;
6952
6953 case BTRACE_FORMAT_PT:
6954 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6955 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6956 buffer_grow_str (buffer, "<pt>\n");
6957
6958 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6959
6960 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6961 btrace.variant.pt.size);
6962
6963 buffer_grow_str (buffer, "</pt>\n");
6964 buffer_grow_str0 (buffer, "</btrace>\n");
6965 break;
6966
6967 default:
6968 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6969 goto err;
6970 }
6971
6972 btrace_data_fini (&btrace);
6973 return 0;
6974
6975 err:
6976 btrace_data_fini (&btrace);
6977 return -1;
6978 }
6979
6980 /* See to_btrace_conf target method. */
6981
6982 static int
6983 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6984 struct buffer *buffer)
6985 {
6986 const struct btrace_config *conf;
6987
6988 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6989 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6990
6991 conf = linux_btrace_conf (tinfo);
6992 if (conf != NULL)
6993 {
6994 switch (conf->format)
6995 {
6996 case BTRACE_FORMAT_NONE:
6997 break;
6998
6999 case BTRACE_FORMAT_BTS:
7000 buffer_xml_printf (buffer, "<bts");
7001 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7002 buffer_xml_printf (buffer, " />\n");
7003 break;
7004
7005 case BTRACE_FORMAT_PT:
7006 buffer_xml_printf (buffer, "<pt");
7007 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7008 buffer_xml_printf (buffer, "/>\n");
7009 break;
7010 }
7011 }
7012
7013 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7014 return 0;
7015 }
7016 #endif /* HAVE_LINUX_BTRACE */
7017
7018 /* See nat/linux-nat.h. */
7019
7020 ptid_t
7021 current_lwp_ptid (void)
7022 {
7023 return ptid_of (current_thread);
7024 }
7025
7026 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7027
7028 static int
7029 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7030 {
7031 if (the_low_target.breakpoint_kind_from_pc != NULL)
7032 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7033 else
7034 return default_breakpoint_kind_from_pc (pcptr);
7035 }
7036
7037 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7038
7039 static const gdb_byte *
7040 linux_sw_breakpoint_from_kind (int kind, int *size)
7041 {
7042 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7043
7044 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7045 }
7046
7047 /* Implementation of the target_ops method
7048 "breakpoint_kind_from_current_state". */
7049
7050 static int
7051 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7052 {
7053 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7054 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7055 else
7056 return linux_breakpoint_kind_from_pc (pcptr);
7057 }
7058
7059 static struct target_ops linux_target_ops = {
7060 linux_create_inferior,
7061 linux_arch_setup,
7062 linux_attach,
7063 linux_kill,
7064 linux_detach,
7065 linux_mourn,
7066 linux_join,
7067 linux_thread_alive,
7068 linux_resume,
7069 linux_wait,
7070 linux_fetch_registers,
7071 linux_store_registers,
7072 linux_prepare_to_access_memory,
7073 linux_done_accessing_memory,
7074 linux_read_memory,
7075 linux_write_memory,
7076 linux_look_up_symbols,
7077 linux_request_interrupt,
7078 linux_read_auxv,
7079 linux_supports_z_point_type,
7080 linux_insert_point,
7081 linux_remove_point,
7082 linux_stopped_by_sw_breakpoint,
7083 linux_supports_stopped_by_sw_breakpoint,
7084 linux_stopped_by_hw_breakpoint,
7085 linux_supports_stopped_by_hw_breakpoint,
7086 linux_supports_hardware_single_step,
7087 linux_stopped_by_watchpoint,
7088 linux_stopped_data_address,
7089 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7090 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7091 && defined(PT_TEXT_END_ADDR)
7092 linux_read_offsets,
7093 #else
7094 NULL,
7095 #endif
7096 #ifdef USE_THREAD_DB
7097 thread_db_get_tls_address,
7098 #else
7099 NULL,
7100 #endif
7101 linux_qxfer_spu,
7102 hostio_last_error_from_errno,
7103 linux_qxfer_osdata,
7104 linux_xfer_siginfo,
7105 linux_supports_non_stop,
7106 linux_async,
7107 linux_start_non_stop,
7108 linux_supports_multi_process,
7109 linux_supports_fork_events,
7110 linux_supports_vfork_events,
7111 linux_supports_exec_events,
7112 linux_handle_new_gdb_connection,
7113 #ifdef USE_THREAD_DB
7114 thread_db_handle_monitor_command,
7115 #else
7116 NULL,
7117 #endif
7118 linux_common_core_of_thread,
7119 linux_read_loadmap,
7120 linux_process_qsupported,
7121 linux_supports_tracepoints,
7122 linux_read_pc,
7123 linux_write_pc,
7124 linux_thread_stopped,
7125 NULL,
7126 linux_pause_all,
7127 linux_unpause_all,
7128 linux_stabilize_threads,
7129 linux_install_fast_tracepoint_jump_pad,
7130 linux_emit_ops,
7131 linux_supports_disable_randomization,
7132 linux_get_min_fast_tracepoint_insn_len,
7133 linux_qxfer_libraries_svr4,
7134 linux_supports_agent,
7135 #ifdef HAVE_LINUX_BTRACE
7136 linux_supports_btrace,
7137 linux_enable_btrace,
7138 linux_low_disable_btrace,
7139 linux_low_read_btrace,
7140 linux_low_btrace_conf,
7141 #else
7142 NULL,
7143 NULL,
7144 NULL,
7145 NULL,
7146 NULL,
7147 #endif
7148 linux_supports_range_stepping,
7149 linux_proc_pid_to_exec_file,
7150 linux_mntns_open_cloexec,
7151 linux_mntns_unlink,
7152 linux_mntns_readlink,
7153 linux_breakpoint_kind_from_pc,
7154 linux_sw_breakpoint_from_kind,
7155 linux_proc_tid_get_name,
7156 linux_breakpoint_kind_from_current_state
7157 };
7158
7159 static void
7160 linux_init_signals ()
7161 {
7162 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
7163 to find what the cancel signal actually is. */
7164 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
7165 signal (__SIGRTMIN+1, SIG_IGN);
7166 #endif
7167 }
7168
7169 #ifdef HAVE_LINUX_REGSETS
7170 void
7171 initialize_regsets_info (struct regsets_info *info)
7172 {
7173 for (info->num_regsets = 0;
7174 info->regsets[info->num_regsets].size >= 0;
7175 info->num_regsets++)
7176 ;
7177 }
7178 #endif
7179
7180 void
7181 initialize_low (void)
7182 {
7183 struct sigaction sigchld_action;
7184
7185 memset (&sigchld_action, 0, sizeof (sigchld_action));
7186 set_target_ops (&linux_target_ops);
7187
7188 linux_init_signals ();
7189 linux_ptrace_init_warnings ();
7190
7191 sigchld_action.sa_handler = sigchld_handler;
7192 sigemptyset (&sigchld_action.sa_mask);
7193 sigchld_action.sa_flags = SA_RESTART;
7194 sigaction (SIGCHLD, &sigchld_action, NULL);
7195
7196 initialize_low_arch ();
7197
7198 linux_check_ptrace_features ();
7199 }
This page took 0.199383 seconds and 4 git commands to generate.