*** empty log message ***
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-1996, 1998-2012 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22 #include "agent.h"
23
24 #include <sys/wait.h>
25 #include <stdio.h>
26 #include <sys/param.h>
27 #include <sys/ptrace.h>
28 #include "linux-ptrace.h"
29 #include "linux-procfs.h"
30 #include <signal.h>
31 #include <sys/ioctl.h>
32 #include <fcntl.h>
33 #include <string.h>
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #ifndef ELFMAG0
47 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51 #include <elf.h>
52 #endif
53
54 #ifndef SPUFS_MAGIC
55 #define SPUFS_MAGIC 0x23c9b64e
56 #endif
57
58 #ifdef HAVE_PERSONALITY
59 # include <sys/personality.h>
60 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
61 # define ADDR_NO_RANDOMIZE 0x0040000
62 # endif
63 #endif
64
65 #ifndef O_LARGEFILE
66 #define O_LARGEFILE 0
67 #endif
68
69 #ifndef W_STOPCODE
70 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
71 #endif
72
73 /* This is the kernel's hard limit. Not to be confused with
74 SIGRTMIN. */
75 #ifndef __SIGRTMIN
76 #define __SIGRTMIN 32
77 #endif
78
79 #ifdef __UCLIBC__
80 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
81 /* PTRACE_TEXT_ADDR and friends. */
82 #include <asm/ptrace.h>
83 #define HAS_NOMMU
84 #endif
85 #endif
86
87 #ifndef HAVE_ELF32_AUXV_T
88 /* Copied from glibc's elf.h. */
89 typedef struct
90 {
91 uint32_t a_type; /* Entry type */
92 union
93 {
94 uint32_t a_val; /* Integer value */
95 /* We use to have pointer elements added here. We cannot do that,
96 though, since it does not work when using 32-bit definitions
97 on 64-bit platforms and vice versa. */
98 } a_un;
99 } Elf32_auxv_t;
100 #endif
101
102 #ifndef HAVE_ELF64_AUXV_T
103 /* Copied from glibc's elf.h. */
104 typedef struct
105 {
106 uint64_t a_type; /* Entry type */
107 union
108 {
109 uint64_t a_val; /* Integer value */
110 /* We use to have pointer elements added here. We cannot do that,
111 though, since it does not work when using 32-bit definitions
112 on 64-bit platforms and vice versa. */
113 } a_un;
114 } Elf64_auxv_t;
115 #endif
116
117 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
118 representation of the thread ID.
119
120 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
121 the same as the LWP ID.
122
123 ``all_processes'' is keyed by the "overall process ID", which
124 GNU/Linux calls tgid, "thread group ID". */
125
126 struct inferior_list all_lwps;
127
128 /* A list of all unknown processes which receive stop signals. Some
129 other process will presumably claim each of these as forked
130 children momentarily. */
131
132 struct simple_pid_list
133 {
134 /* The process ID. */
135 int pid;
136
137 /* The status as reported by waitpid. */
138 int status;
139
140 /* Next in chain. */
141 struct simple_pid_list *next;
142 };
143 struct simple_pid_list *stopped_pids;
144
145 /* Trivial list manipulation functions to keep track of a list of new
146 stopped processes. */
147
148 static void
149 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
150 {
151 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
152
153 new_pid->pid = pid;
154 new_pid->status = status;
155 new_pid->next = *listp;
156 *listp = new_pid;
157 }
158
159 static int
160 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
161 {
162 struct simple_pid_list **p;
163
164 for (p = listp; *p != NULL; p = &(*p)->next)
165 if ((*p)->pid == pid)
166 {
167 struct simple_pid_list *next = (*p)->next;
168
169 *statusp = (*p)->status;
170 xfree (*p);
171 *p = next;
172 return 1;
173 }
174 return 0;
175 }
176
177 enum stopping_threads_kind
178 {
179 /* Not stopping threads presently. */
180 NOT_STOPPING_THREADS,
181
182 /* Stopping threads. */
183 STOPPING_THREADS,
184
185 /* Stopping and suspending threads. */
186 STOPPING_AND_SUSPENDING_THREADS
187 };
188
189 /* This is set while stop_all_lwps is in effect. */
190 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
191
192 /* FIXME make into a target method? */
193 int using_threads = 1;
194
195 /* True if we're presently stabilizing threads (moving them out of
196 jump pads). */
197 static int stabilizing_threads;
198
199 /* This flag is true iff we've just created or attached to our first
200 inferior but it has not stopped yet. As soon as it does, we need
201 to call the low target's arch_setup callback. Doing this only on
202 the first inferior avoids reinializing the architecture on every
203 inferior, and avoids messing with the register caches of the
204 already running inferiors. NOTE: this assumes all inferiors under
205 control of gdbserver have the same architecture. */
206 static int new_inferior;
207
208 static void linux_resume_one_lwp (struct lwp_info *lwp,
209 int step, int signal, siginfo_t *info);
210 static void linux_resume (struct thread_resume *resume_info, size_t n);
211 static void stop_all_lwps (int suspend, struct lwp_info *except);
212 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
213 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
214 static void *add_lwp (ptid_t ptid);
215 static int linux_stopped_by_watchpoint (void);
216 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
217 static void proceed_all_lwps (void);
218 static int finish_step_over (struct lwp_info *lwp);
219 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
220 static int kill_lwp (unsigned long lwpid, int signo);
221 static void linux_enable_event_reporting (int pid);
222
223 /* True if the low target can hardware single-step. Such targets
224 don't need a BREAKPOINT_REINSERT_ADDR callback. */
225
226 static int
227 can_hardware_single_step (void)
228 {
229 return (the_low_target.breakpoint_reinsert_addr == NULL);
230 }
231
232 /* True if the low target supports memory breakpoints. If so, we'll
233 have a GET_PC implementation. */
234
235 static int
236 supports_breakpoints (void)
237 {
238 return (the_low_target.get_pc != NULL);
239 }
240
241 /* Returns true if this target can support fast tracepoints. This
242 does not mean that the in-process agent has been loaded in the
243 inferior. */
244
245 static int
246 supports_fast_tracepoints (void)
247 {
248 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
249 }
250
251 struct pending_signals
252 {
253 int signal;
254 siginfo_t info;
255 struct pending_signals *prev;
256 };
257
258 #ifdef HAVE_LINUX_REGSETS
259 static char *disabled_regsets;
260 static int num_regsets;
261 #endif
262
263 /* The read/write ends of the pipe registered as waitable file in the
264 event loop. */
265 static int linux_event_pipe[2] = { -1, -1 };
266
267 /* True if we're currently in async mode. */
268 #define target_is_async_p() (linux_event_pipe[0] != -1)
269
270 static void send_sigstop (struct lwp_info *lwp);
271 static void wait_for_sigstop (struct inferior_list_entry *entry);
272
273 /* Return non-zero if HEADER is a 64-bit ELF file. */
274
275 static int
276 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
277 {
278 if (header->e_ident[EI_MAG0] == ELFMAG0
279 && header->e_ident[EI_MAG1] == ELFMAG1
280 && header->e_ident[EI_MAG2] == ELFMAG2
281 && header->e_ident[EI_MAG3] == ELFMAG3)
282 {
283 *machine = header->e_machine;
284 return header->e_ident[EI_CLASS] == ELFCLASS64;
285
286 }
287 *machine = EM_NONE;
288 return -1;
289 }
290
291 /* Return non-zero if FILE is a 64-bit ELF file,
292 zero if the file is not a 64-bit ELF file,
293 and -1 if the file is not accessible or doesn't exist. */
294
295 static int
296 elf_64_file_p (const char *file, unsigned int *machine)
297 {
298 Elf64_Ehdr header;
299 int fd;
300
301 fd = open (file, O_RDONLY);
302 if (fd < 0)
303 return -1;
304
305 if (read (fd, &header, sizeof (header)) != sizeof (header))
306 {
307 close (fd);
308 return 0;
309 }
310 close (fd);
311
312 return elf_64_header_p (&header, machine);
313 }
314
315 /* Accepts an integer PID; Returns true if the executable PID is
316 running is a 64-bit ELF file.. */
317
318 int
319 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
320 {
321 char file[MAXPATHLEN];
322
323 sprintf (file, "/proc/%d/exe", pid);
324 return elf_64_file_p (file, machine);
325 }
326
327 static void
328 delete_lwp (struct lwp_info *lwp)
329 {
330 remove_thread (get_lwp_thread (lwp));
331 remove_inferior (&all_lwps, &lwp->head);
332 free (lwp->arch_private);
333 free (lwp);
334 }
335
336 /* Add a process to the common process list, and set its private
337 data. */
338
339 static struct process_info *
340 linux_add_process (int pid, int attached)
341 {
342 struct process_info *proc;
343
344 /* Is this the first process? If so, then set the arch. */
345 if (all_processes.head == NULL)
346 new_inferior = 1;
347
348 proc = add_process (pid, attached);
349 proc->private = xcalloc (1, sizeof (*proc->private));
350
351 if (the_low_target.new_process != NULL)
352 proc->private->arch_private = the_low_target.new_process ();
353
354 return proc;
355 }
356
357 /* Wrapper function for waitpid which handles EINTR, and emulates
358 __WALL for systems where that is not available. */
359
360 static int
361 my_waitpid (int pid, int *status, int flags)
362 {
363 int ret, out_errno;
364
365 if (debug_threads)
366 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
367
368 if (flags & __WALL)
369 {
370 sigset_t block_mask, org_mask, wake_mask;
371 int wnohang;
372
373 wnohang = (flags & WNOHANG) != 0;
374 flags &= ~(__WALL | __WCLONE);
375 flags |= WNOHANG;
376
377 /* Block all signals while here. This avoids knowing about
378 LinuxThread's signals. */
379 sigfillset (&block_mask);
380 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
381
382 /* ... except during the sigsuspend below. */
383 sigemptyset (&wake_mask);
384
385 while (1)
386 {
387 /* Since all signals are blocked, there's no need to check
388 for EINTR here. */
389 ret = waitpid (pid, status, flags);
390 out_errno = errno;
391
392 if (ret == -1 && out_errno != ECHILD)
393 break;
394 else if (ret > 0)
395 break;
396
397 if (flags & __WCLONE)
398 {
399 /* We've tried both flavors now. If WNOHANG is set,
400 there's nothing else to do, just bail out. */
401 if (wnohang)
402 break;
403
404 if (debug_threads)
405 fprintf (stderr, "blocking\n");
406
407 /* Block waiting for signals. */
408 sigsuspend (&wake_mask);
409 }
410
411 flags ^= __WCLONE;
412 }
413
414 sigprocmask (SIG_SETMASK, &org_mask, NULL);
415 }
416 else
417 {
418 do
419 ret = waitpid (pid, status, flags);
420 while (ret == -1 && errno == EINTR);
421 out_errno = errno;
422 }
423
424 if (debug_threads)
425 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
426 pid, flags, status ? *status : -1, ret);
427
428 errno = out_errno;
429 return ret;
430 }
431
432 /* Handle a GNU/Linux extended wait response. If we see a clone
433 event, we need to add the new LWP to our list (and not report the
434 trap to higher layers). */
435
436 static void
437 handle_extended_wait (struct lwp_info *event_child, int wstat)
438 {
439 int event = wstat >> 16;
440 struct lwp_info *new_lwp;
441
442 if (event == PTRACE_EVENT_CLONE)
443 {
444 ptid_t ptid;
445 unsigned long new_pid;
446 int ret, status;
447
448 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
449
450 /* If we haven't already seen the new PID stop, wait for it now. */
451 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
452 {
453 /* The new child has a pending SIGSTOP. We can't affect it until it
454 hits the SIGSTOP, but we're already attached. */
455
456 ret = my_waitpid (new_pid, &status, __WALL);
457
458 if (ret == -1)
459 perror_with_name ("waiting for new child");
460 else if (ret != new_pid)
461 warning ("wait returned unexpected PID %d", ret);
462 else if (!WIFSTOPPED (status))
463 warning ("wait returned unexpected status 0x%x", status);
464 }
465
466 linux_enable_event_reporting (new_pid);
467
468 ptid = ptid_build (pid_of (event_child), new_pid, 0);
469 new_lwp = (struct lwp_info *) add_lwp (ptid);
470 add_thread (ptid, new_lwp);
471
472 /* Either we're going to immediately resume the new thread
473 or leave it stopped. linux_resume_one_lwp is a nop if it
474 thinks the thread is currently running, so set this first
475 before calling linux_resume_one_lwp. */
476 new_lwp->stopped = 1;
477
478 /* If we're suspending all threads, leave this one suspended
479 too. */
480 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
481 new_lwp->suspended = 1;
482
483 /* Normally we will get the pending SIGSTOP. But in some cases
484 we might get another signal delivered to the group first.
485 If we do get another signal, be sure not to lose it. */
486 if (WSTOPSIG (status) == SIGSTOP)
487 {
488 if (stopping_threads != NOT_STOPPING_THREADS)
489 new_lwp->stop_pc = get_stop_pc (new_lwp);
490 else
491 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
492 }
493 else
494 {
495 new_lwp->stop_expected = 1;
496
497 if (stopping_threads != NOT_STOPPING_THREADS)
498 {
499 new_lwp->stop_pc = get_stop_pc (new_lwp);
500 new_lwp->status_pending_p = 1;
501 new_lwp->status_pending = status;
502 }
503 else
504 /* Pass the signal on. This is what GDB does - except
505 shouldn't we really report it instead? */
506 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
507 }
508
509 /* Always resume the current thread. If we are stopping
510 threads, it will have a pending SIGSTOP; we may as well
511 collect it now. */
512 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
513 }
514 }
515
516 /* Return the PC as read from the regcache of LWP, without any
517 adjustment. */
518
519 static CORE_ADDR
520 get_pc (struct lwp_info *lwp)
521 {
522 struct thread_info *saved_inferior;
523 struct regcache *regcache;
524 CORE_ADDR pc;
525
526 if (the_low_target.get_pc == NULL)
527 return 0;
528
529 saved_inferior = current_inferior;
530 current_inferior = get_lwp_thread (lwp);
531
532 regcache = get_thread_regcache (current_inferior, 1);
533 pc = (*the_low_target.get_pc) (regcache);
534
535 if (debug_threads)
536 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
537
538 current_inferior = saved_inferior;
539 return pc;
540 }
541
542 /* This function should only be called if LWP got a SIGTRAP.
543 The SIGTRAP could mean several things.
544
545 On i386, where decr_pc_after_break is non-zero:
546 If we were single-stepping this process using PTRACE_SINGLESTEP,
547 we will get only the one SIGTRAP (even if the instruction we
548 stepped over was a breakpoint). The value of $eip will be the
549 next instruction.
550 If we continue the process using PTRACE_CONT, we will get a
551 SIGTRAP when we hit a breakpoint. The value of $eip will be
552 the instruction after the breakpoint (i.e. needs to be
553 decremented). If we report the SIGTRAP to GDB, we must also
554 report the undecremented PC. If we cancel the SIGTRAP, we
555 must resume at the decremented PC.
556
557 (Presumably, not yet tested) On a non-decr_pc_after_break machine
558 with hardware or kernel single-step:
559 If we single-step over a breakpoint instruction, our PC will
560 point at the following instruction. If we continue and hit a
561 breakpoint instruction, our PC will point at the breakpoint
562 instruction. */
563
564 static CORE_ADDR
565 get_stop_pc (struct lwp_info *lwp)
566 {
567 CORE_ADDR stop_pc;
568
569 if (the_low_target.get_pc == NULL)
570 return 0;
571
572 stop_pc = get_pc (lwp);
573
574 if (WSTOPSIG (lwp->last_status) == SIGTRAP
575 && !lwp->stepping
576 && !lwp->stopped_by_watchpoint
577 && lwp->last_status >> 16 == 0)
578 stop_pc -= the_low_target.decr_pc_after_break;
579
580 if (debug_threads)
581 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
582
583 return stop_pc;
584 }
585
586 static void *
587 add_lwp (ptid_t ptid)
588 {
589 struct lwp_info *lwp;
590
591 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
592 memset (lwp, 0, sizeof (*lwp));
593
594 lwp->head.id = ptid;
595
596 if (the_low_target.new_thread != NULL)
597 lwp->arch_private = the_low_target.new_thread ();
598
599 add_inferior_to_list (&all_lwps, &lwp->head);
600
601 return lwp;
602 }
603
604 /* Start an inferior process and returns its pid.
605 ALLARGS is a vector of program-name and args. */
606
607 static int
608 linux_create_inferior (char *program, char **allargs)
609 {
610 #ifdef HAVE_PERSONALITY
611 int personality_orig = 0, personality_set = 0;
612 #endif
613 struct lwp_info *new_lwp;
614 int pid;
615 ptid_t ptid;
616
617 #ifdef HAVE_PERSONALITY
618 if (disable_randomization)
619 {
620 errno = 0;
621 personality_orig = personality (0xffffffff);
622 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
623 {
624 personality_set = 1;
625 personality (personality_orig | ADDR_NO_RANDOMIZE);
626 }
627 if (errno != 0 || (personality_set
628 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
629 warning ("Error disabling address space randomization: %s",
630 strerror (errno));
631 }
632 #endif
633
634 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
635 pid = vfork ();
636 #else
637 pid = fork ();
638 #endif
639 if (pid < 0)
640 perror_with_name ("fork");
641
642 if (pid == 0)
643 {
644 ptrace (PTRACE_TRACEME, 0, 0, 0);
645
646 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
647 signal (__SIGRTMIN + 1, SIG_DFL);
648 #endif
649
650 setpgid (0, 0);
651
652 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
653 stdout to stderr so that inferior i/o doesn't corrupt the connection.
654 Also, redirect stdin to /dev/null. */
655 if (remote_connection_is_stdio ())
656 {
657 close (0);
658 open ("/dev/null", O_RDONLY);
659 dup2 (2, 1);
660 if (write (2, "stdin/stdout redirected\n",
661 sizeof ("stdin/stdout redirected\n") - 1) < 0)
662 /* Errors ignored. */;
663 }
664
665 execv (program, allargs);
666 if (errno == ENOENT)
667 execvp (program, allargs);
668
669 fprintf (stderr, "Cannot exec %s: %s.\n", program,
670 strerror (errno));
671 fflush (stderr);
672 _exit (0177);
673 }
674
675 #ifdef HAVE_PERSONALITY
676 if (personality_set)
677 {
678 errno = 0;
679 personality (personality_orig);
680 if (errno != 0)
681 warning ("Error restoring address space randomization: %s",
682 strerror (errno));
683 }
684 #endif
685
686 linux_add_process (pid, 0);
687
688 ptid = ptid_build (pid, pid, 0);
689 new_lwp = add_lwp (ptid);
690 add_thread (ptid, new_lwp);
691 new_lwp->must_set_ptrace_flags = 1;
692
693 return pid;
694 }
695
696 /* Attach to an inferior process. */
697
698 static void
699 linux_attach_lwp_1 (unsigned long lwpid, int initial)
700 {
701 ptid_t ptid;
702 struct lwp_info *new_lwp;
703
704 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
705 {
706 struct buffer buffer;
707
708 if (!initial)
709 {
710 /* If we fail to attach to an LWP, just warn. */
711 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
712 strerror (errno), errno);
713 fflush (stderr);
714 return;
715 }
716
717 /* If we fail to attach to a process, report an error. */
718 buffer_init (&buffer);
719 linux_ptrace_attach_warnings (lwpid, &buffer);
720 buffer_grow_str0 (&buffer, "");
721 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
722 lwpid, strerror (errno), errno);
723 }
724
725 if (initial)
726 /* If lwp is the tgid, we handle adding existing threads later.
727 Otherwise we just add lwp without bothering about any other
728 threads. */
729 ptid = ptid_build (lwpid, lwpid, 0);
730 else
731 {
732 /* Note that extracting the pid from the current inferior is
733 safe, since we're always called in the context of the same
734 process as this new thread. */
735 int pid = pid_of (get_thread_lwp (current_inferior));
736 ptid = ptid_build (pid, lwpid, 0);
737 }
738
739 new_lwp = (struct lwp_info *) add_lwp (ptid);
740 add_thread (ptid, new_lwp);
741
742 /* We need to wait for SIGSTOP before being able to make the next
743 ptrace call on this LWP. */
744 new_lwp->must_set_ptrace_flags = 1;
745
746 if (linux_proc_pid_is_stopped (lwpid))
747 {
748 if (debug_threads)
749 fprintf (stderr,
750 "Attached to a stopped process\n");
751
752 /* The process is definitely stopped. It is in a job control
753 stop, unless the kernel predates the TASK_STOPPED /
754 TASK_TRACED distinction, in which case it might be in a
755 ptrace stop. Make sure it is in a ptrace stop; from there we
756 can kill it, signal it, et cetera.
757
758 First make sure there is a pending SIGSTOP. Since we are
759 already attached, the process can not transition from stopped
760 to running without a PTRACE_CONT; so we know this signal will
761 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
762 probably already in the queue (unless this kernel is old
763 enough to use TASK_STOPPED for ptrace stops); but since
764 SIGSTOP is not an RT signal, it can only be queued once. */
765 kill_lwp (lwpid, SIGSTOP);
766
767 /* Finally, resume the stopped process. This will deliver the
768 SIGSTOP (or a higher priority signal, just like normal
769 PTRACE_ATTACH), which we'll catch later on. */
770 ptrace (PTRACE_CONT, lwpid, 0, 0);
771 }
772
773 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
774 brings it to a halt.
775
776 There are several cases to consider here:
777
778 1) gdbserver has already attached to the process and is being notified
779 of a new thread that is being created.
780 In this case we should ignore that SIGSTOP and resume the
781 process. This is handled below by setting stop_expected = 1,
782 and the fact that add_thread sets last_resume_kind ==
783 resume_continue.
784
785 2) This is the first thread (the process thread), and we're attaching
786 to it via attach_inferior.
787 In this case we want the process thread to stop.
788 This is handled by having linux_attach set last_resume_kind ==
789 resume_stop after we return.
790
791 If the pid we are attaching to is also the tgid, we attach to and
792 stop all the existing threads. Otherwise, we attach to pid and
793 ignore any other threads in the same group as this pid.
794
795 3) GDB is connecting to gdbserver and is requesting an enumeration of all
796 existing threads.
797 In this case we want the thread to stop.
798 FIXME: This case is currently not properly handled.
799 We should wait for the SIGSTOP but don't. Things work apparently
800 because enough time passes between when we ptrace (ATTACH) and when
801 gdb makes the next ptrace call on the thread.
802
803 On the other hand, if we are currently trying to stop all threads, we
804 should treat the new thread as if we had sent it a SIGSTOP. This works
805 because we are guaranteed that the add_lwp call above added us to the
806 end of the list, and so the new thread has not yet reached
807 wait_for_sigstop (but will). */
808 new_lwp->stop_expected = 1;
809 }
810
811 void
812 linux_attach_lwp (unsigned long lwpid)
813 {
814 linux_attach_lwp_1 (lwpid, 0);
815 }
816
817 /* Attach to PID. If PID is the tgid, attach to it and all
818 of its threads. */
819
820 static int
821 linux_attach (unsigned long pid)
822 {
823 /* Attach to PID. We will check for other threads
824 soon. */
825 linux_attach_lwp_1 (pid, 1);
826 linux_add_process (pid, 1);
827
828 if (!non_stop)
829 {
830 struct thread_info *thread;
831
832 /* Don't ignore the initial SIGSTOP if we just attached to this
833 process. It will be collected by wait shortly. */
834 thread = find_thread_ptid (ptid_build (pid, pid, 0));
835 thread->last_resume_kind = resume_stop;
836 }
837
838 if (linux_proc_get_tgid (pid) == pid)
839 {
840 DIR *dir;
841 char pathname[128];
842
843 sprintf (pathname, "/proc/%ld/task", pid);
844
845 dir = opendir (pathname);
846
847 if (!dir)
848 {
849 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
850 fflush (stderr);
851 }
852 else
853 {
854 /* At this point we attached to the tgid. Scan the task for
855 existing threads. */
856 unsigned long lwp;
857 int new_threads_found;
858 int iterations = 0;
859 struct dirent *dp;
860
861 while (iterations < 2)
862 {
863 new_threads_found = 0;
864 /* Add all the other threads. While we go through the
865 threads, new threads may be spawned. Cycle through
866 the list of threads until we have done two iterations without
867 finding new threads. */
868 while ((dp = readdir (dir)) != NULL)
869 {
870 /* Fetch one lwp. */
871 lwp = strtoul (dp->d_name, NULL, 10);
872
873 /* Is this a new thread? */
874 if (lwp
875 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
876 {
877 linux_attach_lwp_1 (lwp, 0);
878 new_threads_found++;
879
880 if (debug_threads)
881 fprintf (stderr, "\
882 Found and attached to new lwp %ld\n", lwp);
883 }
884 }
885
886 if (!new_threads_found)
887 iterations++;
888 else
889 iterations = 0;
890
891 rewinddir (dir);
892 }
893 closedir (dir);
894 }
895 }
896
897 return 0;
898 }
899
900 struct counter
901 {
902 int pid;
903 int count;
904 };
905
906 static int
907 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
908 {
909 struct counter *counter = args;
910
911 if (ptid_get_pid (entry->id) == counter->pid)
912 {
913 if (++counter->count > 1)
914 return 1;
915 }
916
917 return 0;
918 }
919
920 static int
921 last_thread_of_process_p (struct thread_info *thread)
922 {
923 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
924 int pid = ptid_get_pid (ptid);
925 struct counter counter = { pid , 0 };
926
927 return (find_inferior (&all_threads,
928 second_thread_of_pid_p, &counter) == NULL);
929 }
930
931 /* Kill LWP. */
932
933 static void
934 linux_kill_one_lwp (struct lwp_info *lwp)
935 {
936 int pid = lwpid_of (lwp);
937
938 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
939 there is no signal context, and ptrace(PTRACE_KILL) (or
940 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
941 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
942 alternative is to kill with SIGKILL. We only need one SIGKILL
943 per process, not one for each thread. But since we still support
944 linuxthreads, and we also support debugging programs using raw
945 clone without CLONE_THREAD, we send one for each thread. For
946 years, we used PTRACE_KILL only, so we're being a bit paranoid
947 about some old kernels where PTRACE_KILL might work better
948 (dubious if there are any such, but that's why it's paranoia), so
949 we try SIGKILL first, PTRACE_KILL second, and so we're fine
950 everywhere. */
951
952 errno = 0;
953 kill (pid, SIGKILL);
954 if (debug_threads)
955 fprintf (stderr,
956 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
957 target_pid_to_str (ptid_of (lwp)),
958 errno ? strerror (errno) : "OK");
959
960 errno = 0;
961 ptrace (PTRACE_KILL, pid, 0, 0);
962 if (debug_threads)
963 fprintf (stderr,
964 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
965 target_pid_to_str (ptid_of (lwp)),
966 errno ? strerror (errno) : "OK");
967 }
968
969 /* Callback for `find_inferior'. Kills an lwp of a given process,
970 except the leader. */
971
972 static int
973 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
974 {
975 struct thread_info *thread = (struct thread_info *) entry;
976 struct lwp_info *lwp = get_thread_lwp (thread);
977 int wstat;
978 int pid = * (int *) args;
979
980 if (ptid_get_pid (entry->id) != pid)
981 return 0;
982
983 /* We avoid killing the first thread here, because of a Linux kernel (at
984 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
985 the children get a chance to be reaped, it will remain a zombie
986 forever. */
987
988 if (lwpid_of (lwp) == pid)
989 {
990 if (debug_threads)
991 fprintf (stderr, "lkop: is last of process %s\n",
992 target_pid_to_str (entry->id));
993 return 0;
994 }
995
996 do
997 {
998 linux_kill_one_lwp (lwp);
999
1000 /* Make sure it died. The loop is most likely unnecessary. */
1001 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
1002 } while (pid > 0 && WIFSTOPPED (wstat));
1003
1004 return 0;
1005 }
1006
1007 static int
1008 linux_kill (int pid)
1009 {
1010 struct process_info *process;
1011 struct lwp_info *lwp;
1012 int wstat;
1013 int lwpid;
1014
1015 process = find_process_pid (pid);
1016 if (process == NULL)
1017 return -1;
1018
1019 /* If we're killing a running inferior, make sure it is stopped
1020 first, as PTRACE_KILL will not work otherwise. */
1021 stop_all_lwps (0, NULL);
1022
1023 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1024
1025 /* See the comment in linux_kill_one_lwp. We did not kill the first
1026 thread in the list, so do so now. */
1027 lwp = find_lwp_pid (pid_to_ptid (pid));
1028
1029 if (lwp == NULL)
1030 {
1031 if (debug_threads)
1032 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
1033 lwpid_of (lwp), pid);
1034 }
1035 else
1036 {
1037 if (debug_threads)
1038 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
1039 lwpid_of (lwp), pid);
1040
1041 do
1042 {
1043 linux_kill_one_lwp (lwp);
1044
1045 /* Make sure it died. The loop is most likely unnecessary. */
1046 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
1047 } while (lwpid > 0 && WIFSTOPPED (wstat));
1048 }
1049
1050 the_target->mourn (process);
1051
1052 /* Since we presently can only stop all lwps of all processes, we
1053 need to unstop lwps of other processes. */
1054 unstop_all_lwps (0, NULL);
1055 return 0;
1056 }
1057
1058 /* Get pending signal of THREAD, for detaching purposes. This is the
1059 signal the thread last stopped for, which we need to deliver to the
1060 thread when detaching, otherwise, it'd be suppressed/lost. */
1061
1062 static int
1063 get_detach_signal (struct thread_info *thread)
1064 {
1065 enum gdb_signal signo = GDB_SIGNAL_0;
1066 int status;
1067 struct lwp_info *lp = get_thread_lwp (thread);
1068
1069 if (lp->status_pending_p)
1070 status = lp->status_pending;
1071 else
1072 {
1073 /* If the thread had been suspended by gdbserver, and it stopped
1074 cleanly, then it'll have stopped with SIGSTOP. But we don't
1075 want to deliver that SIGSTOP. */
1076 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1077 || thread->last_status.value.sig == GDB_SIGNAL_0)
1078 return 0;
1079
1080 /* Otherwise, we may need to deliver the signal we
1081 intercepted. */
1082 status = lp->last_status;
1083 }
1084
1085 if (!WIFSTOPPED (status))
1086 {
1087 if (debug_threads)
1088 fprintf (stderr,
1089 "GPS: lwp %s hasn't stopped: no pending signal\n",
1090 target_pid_to_str (ptid_of (lp)));
1091 return 0;
1092 }
1093
1094 /* Extended wait statuses aren't real SIGTRAPs. */
1095 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1096 {
1097 if (debug_threads)
1098 fprintf (stderr,
1099 "GPS: lwp %s had stopped with extended "
1100 "status: no pending signal\n",
1101 target_pid_to_str (ptid_of (lp)));
1102 return 0;
1103 }
1104
1105 signo = gdb_signal_from_host (WSTOPSIG (status));
1106
1107 if (program_signals_p && !program_signals[signo])
1108 {
1109 if (debug_threads)
1110 fprintf (stderr,
1111 "GPS: lwp %s had signal %s, but it is in nopass state\n",
1112 target_pid_to_str (ptid_of (lp)),
1113 gdb_signal_to_string (signo));
1114 return 0;
1115 }
1116 else if (!program_signals_p
1117 /* If we have no way to know which signals GDB does not
1118 want to have passed to the program, assume
1119 SIGTRAP/SIGINT, which is GDB's default. */
1120 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1121 {
1122 if (debug_threads)
1123 fprintf (stderr,
1124 "GPS: lwp %s had signal %s, "
1125 "but we don't know if we should pass it. Default to not.\n",
1126 target_pid_to_str (ptid_of (lp)),
1127 gdb_signal_to_string (signo));
1128 return 0;
1129 }
1130 else
1131 {
1132 if (debug_threads)
1133 fprintf (stderr,
1134 "GPS: lwp %s has pending signal %s: delivering it.\n",
1135 target_pid_to_str (ptid_of (lp)),
1136 gdb_signal_to_string (signo));
1137
1138 return WSTOPSIG (status);
1139 }
1140 }
1141
1142 static int
1143 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1144 {
1145 struct thread_info *thread = (struct thread_info *) entry;
1146 struct lwp_info *lwp = get_thread_lwp (thread);
1147 int pid = * (int *) args;
1148 int sig;
1149
1150 if (ptid_get_pid (entry->id) != pid)
1151 return 0;
1152
1153 /* If there is a pending SIGSTOP, get rid of it. */
1154 if (lwp->stop_expected)
1155 {
1156 if (debug_threads)
1157 fprintf (stderr,
1158 "Sending SIGCONT to %s\n",
1159 target_pid_to_str (ptid_of (lwp)));
1160
1161 kill_lwp (lwpid_of (lwp), SIGCONT);
1162 lwp->stop_expected = 0;
1163 }
1164
1165 /* Flush any pending changes to the process's registers. */
1166 regcache_invalidate_one ((struct inferior_list_entry *)
1167 get_lwp_thread (lwp));
1168
1169 /* Pass on any pending signal for this thread. */
1170 sig = get_detach_signal (thread);
1171
1172 /* Finally, let it resume. */
1173 if (the_low_target.prepare_to_resume != NULL)
1174 the_low_target.prepare_to_resume (lwp);
1175 if (ptrace (PTRACE_DETACH, lwpid_of (lwp), 0,
1176 (PTRACE_ARG4_TYPE) (long) sig) < 0)
1177 error (_("Can't detach %s: %s"),
1178 target_pid_to_str (ptid_of (lwp)),
1179 strerror (errno));
1180
1181 delete_lwp (lwp);
1182 return 0;
1183 }
1184
1185 static int
1186 linux_detach (int pid)
1187 {
1188 struct process_info *process;
1189
1190 process = find_process_pid (pid);
1191 if (process == NULL)
1192 return -1;
1193
1194 /* Stop all threads before detaching. First, ptrace requires that
1195 the thread is stopped to sucessfully detach. Second, thread_db
1196 may need to uninstall thread event breakpoints from memory, which
1197 only works with a stopped process anyway. */
1198 stop_all_lwps (0, NULL);
1199
1200 #ifdef USE_THREAD_DB
1201 thread_db_detach (process);
1202 #endif
1203
1204 /* Stabilize threads (move out of jump pads). */
1205 stabilize_threads ();
1206
1207 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1208
1209 the_target->mourn (process);
1210
1211 /* Since we presently can only stop all lwps of all processes, we
1212 need to unstop lwps of other processes. */
1213 unstop_all_lwps (0, NULL);
1214 return 0;
1215 }
1216
1217 /* Remove all LWPs that belong to process PROC from the lwp list. */
1218
1219 static int
1220 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1221 {
1222 struct lwp_info *lwp = (struct lwp_info *) entry;
1223 struct process_info *process = proc;
1224
1225 if (pid_of (lwp) == pid_of (process))
1226 delete_lwp (lwp);
1227
1228 return 0;
1229 }
1230
1231 static void
1232 linux_mourn (struct process_info *process)
1233 {
1234 struct process_info_private *priv;
1235
1236 #ifdef USE_THREAD_DB
1237 thread_db_mourn (process);
1238 #endif
1239
1240 find_inferior (&all_lwps, delete_lwp_callback, process);
1241
1242 /* Freeing all private data. */
1243 priv = process->private;
1244 free (priv->arch_private);
1245 free (priv);
1246 process->private = NULL;
1247
1248 remove_process (process);
1249 }
1250
1251 static void
1252 linux_join (int pid)
1253 {
1254 int status, ret;
1255
1256 do {
1257 ret = my_waitpid (pid, &status, 0);
1258 if (WIFEXITED (status) || WIFSIGNALED (status))
1259 break;
1260 } while (ret != -1 || errno != ECHILD);
1261 }
1262
1263 /* Return nonzero if the given thread is still alive. */
1264 static int
1265 linux_thread_alive (ptid_t ptid)
1266 {
1267 struct lwp_info *lwp = find_lwp_pid (ptid);
1268
1269 /* We assume we always know if a thread exits. If a whole process
1270 exited but we still haven't been able to report it to GDB, we'll
1271 hold on to the last lwp of the dead process. */
1272 if (lwp != NULL)
1273 return !lwp->dead;
1274 else
1275 return 0;
1276 }
1277
1278 /* Return 1 if this lwp has an interesting status pending. */
1279 static int
1280 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1281 {
1282 struct lwp_info *lwp = (struct lwp_info *) entry;
1283 ptid_t ptid = * (ptid_t *) arg;
1284 struct thread_info *thread;
1285
1286 /* Check if we're only interested in events from a specific process
1287 or its lwps. */
1288 if (!ptid_equal (minus_one_ptid, ptid)
1289 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1290 return 0;
1291
1292 thread = get_lwp_thread (lwp);
1293
1294 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1295 report any status pending the LWP may have. */
1296 if (thread->last_resume_kind == resume_stop
1297 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1298 return 0;
1299
1300 return lwp->status_pending_p;
1301 }
1302
1303 static int
1304 same_lwp (struct inferior_list_entry *entry, void *data)
1305 {
1306 ptid_t ptid = *(ptid_t *) data;
1307 int lwp;
1308
1309 if (ptid_get_lwp (ptid) != 0)
1310 lwp = ptid_get_lwp (ptid);
1311 else
1312 lwp = ptid_get_pid (ptid);
1313
1314 if (ptid_get_lwp (entry->id) == lwp)
1315 return 1;
1316
1317 return 0;
1318 }
1319
1320 struct lwp_info *
1321 find_lwp_pid (ptid_t ptid)
1322 {
1323 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1324 }
1325
1326 static struct lwp_info *
1327 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1328 {
1329 int ret;
1330 int to_wait_for = -1;
1331 struct lwp_info *child = NULL;
1332
1333 if (debug_threads)
1334 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1335
1336 if (ptid_equal (ptid, minus_one_ptid))
1337 to_wait_for = -1; /* any child */
1338 else
1339 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1340
1341 options |= __WALL;
1342
1343 retry:
1344
1345 ret = my_waitpid (to_wait_for, wstatp, options);
1346 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1347 return NULL;
1348 else if (ret == -1)
1349 perror_with_name ("waitpid");
1350
1351 if (debug_threads
1352 && (!WIFSTOPPED (*wstatp)
1353 || (WSTOPSIG (*wstatp) != 32
1354 && WSTOPSIG (*wstatp) != 33)))
1355 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1356
1357 child = find_lwp_pid (pid_to_ptid (ret));
1358
1359 /* If we didn't find a process, one of two things presumably happened:
1360 - A process we started and then detached from has exited. Ignore it.
1361 - A process we are controlling has forked and the new child's stop
1362 was reported to us by the kernel. Save its PID. */
1363 if (child == NULL && WIFSTOPPED (*wstatp))
1364 {
1365 add_to_pid_list (&stopped_pids, ret, *wstatp);
1366 goto retry;
1367 }
1368 else if (child == NULL)
1369 goto retry;
1370
1371 child->stopped = 1;
1372
1373 child->last_status = *wstatp;
1374
1375 /* Architecture-specific setup after inferior is running.
1376 This needs to happen after we have attached to the inferior
1377 and it is stopped for the first time, but before we access
1378 any inferior registers. */
1379 if (new_inferior)
1380 {
1381 the_low_target.arch_setup ();
1382 #ifdef HAVE_LINUX_REGSETS
1383 memset (disabled_regsets, 0, num_regsets);
1384 #endif
1385 new_inferior = 0;
1386 }
1387
1388 /* Fetch the possibly triggered data watchpoint info and store it in
1389 CHILD.
1390
1391 On some archs, like x86, that use debug registers to set
1392 watchpoints, it's possible that the way to know which watched
1393 address trapped, is to check the register that is used to select
1394 which address to watch. Problem is, between setting the
1395 watchpoint and reading back which data address trapped, the user
1396 may change the set of watchpoints, and, as a consequence, GDB
1397 changes the debug registers in the inferior. To avoid reading
1398 back a stale stopped-data-address when that happens, we cache in
1399 LP the fact that a watchpoint trapped, and the corresponding data
1400 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1401 changes the debug registers meanwhile, we have the cached data we
1402 can rely on. */
1403
1404 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1405 {
1406 if (the_low_target.stopped_by_watchpoint == NULL)
1407 {
1408 child->stopped_by_watchpoint = 0;
1409 }
1410 else
1411 {
1412 struct thread_info *saved_inferior;
1413
1414 saved_inferior = current_inferior;
1415 current_inferior = get_lwp_thread (child);
1416
1417 child->stopped_by_watchpoint
1418 = the_low_target.stopped_by_watchpoint ();
1419
1420 if (child->stopped_by_watchpoint)
1421 {
1422 if (the_low_target.stopped_data_address != NULL)
1423 child->stopped_data_address
1424 = the_low_target.stopped_data_address ();
1425 else
1426 child->stopped_data_address = 0;
1427 }
1428
1429 current_inferior = saved_inferior;
1430 }
1431 }
1432
1433 /* Store the STOP_PC, with adjustment applied. This depends on the
1434 architecture being defined already (so that CHILD has a valid
1435 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1436 not). */
1437 if (WIFSTOPPED (*wstatp))
1438 child->stop_pc = get_stop_pc (child);
1439
1440 if (debug_threads
1441 && WIFSTOPPED (*wstatp)
1442 && the_low_target.get_pc != NULL)
1443 {
1444 struct thread_info *saved_inferior = current_inferior;
1445 struct regcache *regcache;
1446 CORE_ADDR pc;
1447
1448 current_inferior = get_lwp_thread (child);
1449 regcache = get_thread_regcache (current_inferior, 1);
1450 pc = (*the_low_target.get_pc) (regcache);
1451 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1452 current_inferior = saved_inferior;
1453 }
1454
1455 return child;
1456 }
1457
1458 /* This function should only be called if the LWP got a SIGTRAP.
1459
1460 Handle any tracepoint steps or hits. Return true if a tracepoint
1461 event was handled, 0 otherwise. */
1462
1463 static int
1464 handle_tracepoints (struct lwp_info *lwp)
1465 {
1466 struct thread_info *tinfo = get_lwp_thread (lwp);
1467 int tpoint_related_event = 0;
1468
1469 /* If this tracepoint hit causes a tracing stop, we'll immediately
1470 uninsert tracepoints. To do this, we temporarily pause all
1471 threads, unpatch away, and then unpause threads. We need to make
1472 sure the unpausing doesn't resume LWP too. */
1473 lwp->suspended++;
1474
1475 /* And we need to be sure that any all-threads-stopping doesn't try
1476 to move threads out of the jump pads, as it could deadlock the
1477 inferior (LWP could be in the jump pad, maybe even holding the
1478 lock.) */
1479
1480 /* Do any necessary step collect actions. */
1481 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1482
1483 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1484
1485 /* See if we just hit a tracepoint and do its main collect
1486 actions. */
1487 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1488
1489 lwp->suspended--;
1490
1491 gdb_assert (lwp->suspended == 0);
1492 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1493
1494 if (tpoint_related_event)
1495 {
1496 if (debug_threads)
1497 fprintf (stderr, "got a tracepoint event\n");
1498 return 1;
1499 }
1500
1501 return 0;
1502 }
1503
1504 /* Convenience wrapper. Returns true if LWP is presently collecting a
1505 fast tracepoint. */
1506
1507 static int
1508 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1509 struct fast_tpoint_collect_status *status)
1510 {
1511 CORE_ADDR thread_area;
1512
1513 if (the_low_target.get_thread_area == NULL)
1514 return 0;
1515
1516 /* Get the thread area address. This is used to recognize which
1517 thread is which when tracing with the in-process agent library.
1518 We don't read anything from the address, and treat it as opaque;
1519 it's the address itself that we assume is unique per-thread. */
1520 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1521 return 0;
1522
1523 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1524 }
1525
1526 /* The reason we resume in the caller, is because we want to be able
1527 to pass lwp->status_pending as WSTAT, and we need to clear
1528 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1529 refuses to resume. */
1530
1531 static int
1532 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1533 {
1534 struct thread_info *saved_inferior;
1535
1536 saved_inferior = current_inferior;
1537 current_inferior = get_lwp_thread (lwp);
1538
1539 if ((wstat == NULL
1540 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1541 && supports_fast_tracepoints ()
1542 && agent_loaded_p ())
1543 {
1544 struct fast_tpoint_collect_status status;
1545 int r;
1546
1547 if (debug_threads)
1548 fprintf (stderr, "\
1549 Checking whether LWP %ld needs to move out of the jump pad.\n",
1550 lwpid_of (lwp));
1551
1552 r = linux_fast_tracepoint_collecting (lwp, &status);
1553
1554 if (wstat == NULL
1555 || (WSTOPSIG (*wstat) != SIGILL
1556 && WSTOPSIG (*wstat) != SIGFPE
1557 && WSTOPSIG (*wstat) != SIGSEGV
1558 && WSTOPSIG (*wstat) != SIGBUS))
1559 {
1560 lwp->collecting_fast_tracepoint = r;
1561
1562 if (r != 0)
1563 {
1564 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1565 {
1566 /* Haven't executed the original instruction yet.
1567 Set breakpoint there, and wait till it's hit,
1568 then single-step until exiting the jump pad. */
1569 lwp->exit_jump_pad_bkpt
1570 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1571 }
1572
1573 if (debug_threads)
1574 fprintf (stderr, "\
1575 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1576 lwpid_of (lwp));
1577 current_inferior = saved_inferior;
1578
1579 return 1;
1580 }
1581 }
1582 else
1583 {
1584 /* If we get a synchronous signal while collecting, *and*
1585 while executing the (relocated) original instruction,
1586 reset the PC to point at the tpoint address, before
1587 reporting to GDB. Otherwise, it's an IPA lib bug: just
1588 report the signal to GDB, and pray for the best. */
1589
1590 lwp->collecting_fast_tracepoint = 0;
1591
1592 if (r != 0
1593 && (status.adjusted_insn_addr <= lwp->stop_pc
1594 && lwp->stop_pc < status.adjusted_insn_addr_end))
1595 {
1596 siginfo_t info;
1597 struct regcache *regcache;
1598
1599 /* The si_addr on a few signals references the address
1600 of the faulting instruction. Adjust that as
1601 well. */
1602 if ((WSTOPSIG (*wstat) == SIGILL
1603 || WSTOPSIG (*wstat) == SIGFPE
1604 || WSTOPSIG (*wstat) == SIGBUS
1605 || WSTOPSIG (*wstat) == SIGSEGV)
1606 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1607 /* Final check just to make sure we don't clobber
1608 the siginfo of non-kernel-sent signals. */
1609 && (uintptr_t) info.si_addr == lwp->stop_pc)
1610 {
1611 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1612 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1613 }
1614
1615 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1616 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1617 lwp->stop_pc = status.tpoint_addr;
1618
1619 /* Cancel any fast tracepoint lock this thread was
1620 holding. */
1621 force_unlock_trace_buffer ();
1622 }
1623
1624 if (lwp->exit_jump_pad_bkpt != NULL)
1625 {
1626 if (debug_threads)
1627 fprintf (stderr,
1628 "Cancelling fast exit-jump-pad: removing bkpt. "
1629 "stopping all threads momentarily.\n");
1630
1631 stop_all_lwps (1, lwp);
1632 cancel_breakpoints ();
1633
1634 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1635 lwp->exit_jump_pad_bkpt = NULL;
1636
1637 unstop_all_lwps (1, lwp);
1638
1639 gdb_assert (lwp->suspended >= 0);
1640 }
1641 }
1642 }
1643
1644 if (debug_threads)
1645 fprintf (stderr, "\
1646 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1647 lwpid_of (lwp));
1648
1649 current_inferior = saved_inferior;
1650 return 0;
1651 }
1652
1653 /* Enqueue one signal in the "signals to report later when out of the
1654 jump pad" list. */
1655
1656 static void
1657 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1658 {
1659 struct pending_signals *p_sig;
1660
1661 if (debug_threads)
1662 fprintf (stderr, "\
1663 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1664
1665 if (debug_threads)
1666 {
1667 struct pending_signals *sig;
1668
1669 for (sig = lwp->pending_signals_to_report;
1670 sig != NULL;
1671 sig = sig->prev)
1672 fprintf (stderr,
1673 " Already queued %d\n",
1674 sig->signal);
1675
1676 fprintf (stderr, " (no more currently queued signals)\n");
1677 }
1678
1679 /* Don't enqueue non-RT signals if they are already in the deferred
1680 queue. (SIGSTOP being the easiest signal to see ending up here
1681 twice) */
1682 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1683 {
1684 struct pending_signals *sig;
1685
1686 for (sig = lwp->pending_signals_to_report;
1687 sig != NULL;
1688 sig = sig->prev)
1689 {
1690 if (sig->signal == WSTOPSIG (*wstat))
1691 {
1692 if (debug_threads)
1693 fprintf (stderr,
1694 "Not requeuing already queued non-RT signal %d"
1695 " for LWP %ld\n",
1696 sig->signal,
1697 lwpid_of (lwp));
1698 return;
1699 }
1700 }
1701 }
1702
1703 p_sig = xmalloc (sizeof (*p_sig));
1704 p_sig->prev = lwp->pending_signals_to_report;
1705 p_sig->signal = WSTOPSIG (*wstat);
1706 memset (&p_sig->info, 0, sizeof (siginfo_t));
1707 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1708
1709 lwp->pending_signals_to_report = p_sig;
1710 }
1711
1712 /* Dequeue one signal from the "signals to report later when out of
1713 the jump pad" list. */
1714
1715 static int
1716 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1717 {
1718 if (lwp->pending_signals_to_report != NULL)
1719 {
1720 struct pending_signals **p_sig;
1721
1722 p_sig = &lwp->pending_signals_to_report;
1723 while ((*p_sig)->prev != NULL)
1724 p_sig = &(*p_sig)->prev;
1725
1726 *wstat = W_STOPCODE ((*p_sig)->signal);
1727 if ((*p_sig)->info.si_signo != 0)
1728 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1729 free (*p_sig);
1730 *p_sig = NULL;
1731
1732 if (debug_threads)
1733 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1734 WSTOPSIG (*wstat), lwpid_of (lwp));
1735
1736 if (debug_threads)
1737 {
1738 struct pending_signals *sig;
1739
1740 for (sig = lwp->pending_signals_to_report;
1741 sig != NULL;
1742 sig = sig->prev)
1743 fprintf (stderr,
1744 " Still queued %d\n",
1745 sig->signal);
1746
1747 fprintf (stderr, " (no more queued signals)\n");
1748 }
1749
1750 return 1;
1751 }
1752
1753 return 0;
1754 }
1755
1756 /* Arrange for a breakpoint to be hit again later. We don't keep the
1757 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1758 will handle the current event, eventually we will resume this LWP,
1759 and this breakpoint will trap again. */
1760
1761 static int
1762 cancel_breakpoint (struct lwp_info *lwp)
1763 {
1764 struct thread_info *saved_inferior;
1765
1766 /* There's nothing to do if we don't support breakpoints. */
1767 if (!supports_breakpoints ())
1768 return 0;
1769
1770 /* breakpoint_at reads from current inferior. */
1771 saved_inferior = current_inferior;
1772 current_inferior = get_lwp_thread (lwp);
1773
1774 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1775 {
1776 if (debug_threads)
1777 fprintf (stderr,
1778 "CB: Push back breakpoint for %s\n",
1779 target_pid_to_str (ptid_of (lwp)));
1780
1781 /* Back up the PC if necessary. */
1782 if (the_low_target.decr_pc_after_break)
1783 {
1784 struct regcache *regcache
1785 = get_thread_regcache (current_inferior, 1);
1786 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1787 }
1788
1789 current_inferior = saved_inferior;
1790 return 1;
1791 }
1792 else
1793 {
1794 if (debug_threads)
1795 fprintf (stderr,
1796 "CB: No breakpoint found at %s for [%s]\n",
1797 paddress (lwp->stop_pc),
1798 target_pid_to_str (ptid_of (lwp)));
1799 }
1800
1801 current_inferior = saved_inferior;
1802 return 0;
1803 }
1804
1805 /* When the event-loop is doing a step-over, this points at the thread
1806 being stepped. */
1807 ptid_t step_over_bkpt;
1808
1809 /* Wait for an event from child PID. If PID is -1, wait for any
1810 child. Store the stop status through the status pointer WSTAT.
1811 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1812 event was found and OPTIONS contains WNOHANG. Return the PID of
1813 the stopped child otherwise. */
1814
1815 static int
1816 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1817 {
1818 struct lwp_info *event_child, *requested_child;
1819 ptid_t wait_ptid;
1820
1821 event_child = NULL;
1822 requested_child = NULL;
1823
1824 /* Check for a lwp with a pending status. */
1825
1826 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1827 {
1828 event_child = (struct lwp_info *)
1829 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1830 if (debug_threads && event_child)
1831 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1832 }
1833 else
1834 {
1835 requested_child = find_lwp_pid (ptid);
1836
1837 if (stopping_threads == NOT_STOPPING_THREADS
1838 && requested_child->status_pending_p
1839 && requested_child->collecting_fast_tracepoint)
1840 {
1841 enqueue_one_deferred_signal (requested_child,
1842 &requested_child->status_pending);
1843 requested_child->status_pending_p = 0;
1844 requested_child->status_pending = 0;
1845 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1846 }
1847
1848 if (requested_child->suspended
1849 && requested_child->status_pending_p)
1850 fatal ("requesting an event out of a suspended child?");
1851
1852 if (requested_child->status_pending_p)
1853 event_child = requested_child;
1854 }
1855
1856 if (event_child != NULL)
1857 {
1858 if (debug_threads)
1859 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1860 lwpid_of (event_child), event_child->status_pending);
1861 *wstat = event_child->status_pending;
1862 event_child->status_pending_p = 0;
1863 event_child->status_pending = 0;
1864 current_inferior = get_lwp_thread (event_child);
1865 return lwpid_of (event_child);
1866 }
1867
1868 if (ptid_is_pid (ptid))
1869 {
1870 /* A request to wait for a specific tgid. This is not possible
1871 with waitpid, so instead, we wait for any child, and leave
1872 children we're not interested in right now with a pending
1873 status to report later. */
1874 wait_ptid = minus_one_ptid;
1875 }
1876 else
1877 wait_ptid = ptid;
1878
1879 /* We only enter this loop if no process has a pending wait status. Thus
1880 any action taken in response to a wait status inside this loop is
1881 responding as soon as we detect the status, not after any pending
1882 events. */
1883 while (1)
1884 {
1885 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1886
1887 if ((options & WNOHANG) && event_child == NULL)
1888 {
1889 if (debug_threads)
1890 fprintf (stderr, "WNOHANG set, no event found\n");
1891 return 0;
1892 }
1893
1894 if (event_child == NULL)
1895 error ("event from unknown child");
1896
1897 if (ptid_is_pid (ptid)
1898 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1899 {
1900 if (! WIFSTOPPED (*wstat))
1901 mark_lwp_dead (event_child, *wstat);
1902 else
1903 {
1904 event_child->status_pending_p = 1;
1905 event_child->status_pending = *wstat;
1906 }
1907 continue;
1908 }
1909
1910 current_inferior = get_lwp_thread (event_child);
1911
1912 /* Check for thread exit. */
1913 if (! WIFSTOPPED (*wstat))
1914 {
1915 if (debug_threads)
1916 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1917
1918 /* If the last thread is exiting, just return. */
1919 if (last_thread_of_process_p (current_inferior))
1920 {
1921 if (debug_threads)
1922 fprintf (stderr, "LWP %ld is last lwp of process\n",
1923 lwpid_of (event_child));
1924 return lwpid_of (event_child);
1925 }
1926
1927 if (!non_stop)
1928 {
1929 current_inferior = (struct thread_info *) all_threads.head;
1930 if (debug_threads)
1931 fprintf (stderr, "Current inferior is now %ld\n",
1932 lwpid_of (get_thread_lwp (current_inferior)));
1933 }
1934 else
1935 {
1936 current_inferior = NULL;
1937 if (debug_threads)
1938 fprintf (stderr, "Current inferior is now <NULL>\n");
1939 }
1940
1941 /* If we were waiting for this particular child to do something...
1942 well, it did something. */
1943 if (requested_child != NULL)
1944 {
1945 int lwpid = lwpid_of (event_child);
1946
1947 /* Cancel the step-over operation --- the thread that
1948 started it is gone. */
1949 if (finish_step_over (event_child))
1950 unstop_all_lwps (1, event_child);
1951 delete_lwp (event_child);
1952 return lwpid;
1953 }
1954
1955 delete_lwp (event_child);
1956
1957 /* Wait for a more interesting event. */
1958 continue;
1959 }
1960
1961 if (event_child->must_set_ptrace_flags)
1962 {
1963 linux_enable_event_reporting (lwpid_of (event_child));
1964 event_child->must_set_ptrace_flags = 0;
1965 }
1966
1967 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1968 && *wstat >> 16 != 0)
1969 {
1970 handle_extended_wait (event_child, *wstat);
1971 continue;
1972 }
1973
1974 if (WIFSTOPPED (*wstat)
1975 && WSTOPSIG (*wstat) == SIGSTOP
1976 && event_child->stop_expected)
1977 {
1978 int should_stop;
1979
1980 if (debug_threads)
1981 fprintf (stderr, "Expected stop.\n");
1982 event_child->stop_expected = 0;
1983
1984 should_stop = (current_inferior->last_resume_kind == resume_stop
1985 || stopping_threads != NOT_STOPPING_THREADS);
1986
1987 if (!should_stop)
1988 {
1989 linux_resume_one_lwp (event_child,
1990 event_child->stepping, 0, NULL);
1991 continue;
1992 }
1993 }
1994
1995 return lwpid_of (event_child);
1996 }
1997
1998 /* NOTREACHED */
1999 return 0;
2000 }
2001
2002 /* Count the LWP's that have had events. */
2003
2004 static int
2005 count_events_callback (struct inferior_list_entry *entry, void *data)
2006 {
2007 struct lwp_info *lp = (struct lwp_info *) entry;
2008 struct thread_info *thread = get_lwp_thread (lp);
2009 int *count = data;
2010
2011 gdb_assert (count != NULL);
2012
2013 /* Count only resumed LWPs that have a SIGTRAP event pending that
2014 should be reported to GDB. */
2015 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2016 && thread->last_resume_kind != resume_stop
2017 && lp->status_pending_p
2018 && WIFSTOPPED (lp->status_pending)
2019 && WSTOPSIG (lp->status_pending) == SIGTRAP
2020 && !breakpoint_inserted_here (lp->stop_pc))
2021 (*count)++;
2022
2023 return 0;
2024 }
2025
2026 /* Select the LWP (if any) that is currently being single-stepped. */
2027
2028 static int
2029 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2030 {
2031 struct lwp_info *lp = (struct lwp_info *) entry;
2032 struct thread_info *thread = get_lwp_thread (lp);
2033
2034 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2035 && thread->last_resume_kind == resume_step
2036 && lp->status_pending_p)
2037 return 1;
2038 else
2039 return 0;
2040 }
2041
2042 /* Select the Nth LWP that has had a SIGTRAP event that should be
2043 reported to GDB. */
2044
2045 static int
2046 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2047 {
2048 struct lwp_info *lp = (struct lwp_info *) entry;
2049 struct thread_info *thread = get_lwp_thread (lp);
2050 int *selector = data;
2051
2052 gdb_assert (selector != NULL);
2053
2054 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2055 if (thread->last_resume_kind != resume_stop
2056 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2057 && lp->status_pending_p
2058 && WIFSTOPPED (lp->status_pending)
2059 && WSTOPSIG (lp->status_pending) == SIGTRAP
2060 && !breakpoint_inserted_here (lp->stop_pc))
2061 if ((*selector)-- == 0)
2062 return 1;
2063
2064 return 0;
2065 }
2066
2067 static int
2068 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2069 {
2070 struct lwp_info *lp = (struct lwp_info *) entry;
2071 struct thread_info *thread = get_lwp_thread (lp);
2072 struct lwp_info *event_lp = data;
2073
2074 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2075 if (lp == event_lp)
2076 return 0;
2077
2078 /* If a LWP other than the LWP that we're reporting an event for has
2079 hit a GDB breakpoint (as opposed to some random trap signal),
2080 then just arrange for it to hit it again later. We don't keep
2081 the SIGTRAP status and don't forward the SIGTRAP signal to the
2082 LWP. We will handle the current event, eventually we will resume
2083 all LWPs, and this one will get its breakpoint trap again.
2084
2085 If we do not do this, then we run the risk that the user will
2086 delete or disable the breakpoint, but the LWP will have already
2087 tripped on it. */
2088
2089 if (thread->last_resume_kind != resume_stop
2090 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2091 && lp->status_pending_p
2092 && WIFSTOPPED (lp->status_pending)
2093 && WSTOPSIG (lp->status_pending) == SIGTRAP
2094 && !lp->stepping
2095 && !lp->stopped_by_watchpoint
2096 && cancel_breakpoint (lp))
2097 /* Throw away the SIGTRAP. */
2098 lp->status_pending_p = 0;
2099
2100 return 0;
2101 }
2102
2103 static void
2104 linux_cancel_breakpoints (void)
2105 {
2106 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
2107 }
2108
2109 /* Select one LWP out of those that have events pending. */
2110
2111 static void
2112 select_event_lwp (struct lwp_info **orig_lp)
2113 {
2114 int num_events = 0;
2115 int random_selector;
2116 struct lwp_info *event_lp;
2117
2118 /* Give preference to any LWP that is being single-stepped. */
2119 event_lp
2120 = (struct lwp_info *) find_inferior (&all_lwps,
2121 select_singlestep_lwp_callback, NULL);
2122 if (event_lp != NULL)
2123 {
2124 if (debug_threads)
2125 fprintf (stderr,
2126 "SEL: Select single-step %s\n",
2127 target_pid_to_str (ptid_of (event_lp)));
2128 }
2129 else
2130 {
2131 /* No single-stepping LWP. Select one at random, out of those
2132 which have had SIGTRAP events. */
2133
2134 /* First see how many SIGTRAP events we have. */
2135 find_inferior (&all_lwps, count_events_callback, &num_events);
2136
2137 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2138 random_selector = (int)
2139 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2140
2141 if (debug_threads && num_events > 1)
2142 fprintf (stderr,
2143 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2144 num_events, random_selector);
2145
2146 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
2147 select_event_lwp_callback,
2148 &random_selector);
2149 }
2150
2151 if (event_lp != NULL)
2152 {
2153 /* Switch the event LWP. */
2154 *orig_lp = event_lp;
2155 }
2156 }
2157
2158 /* Decrement the suspend count of an LWP. */
2159
2160 static int
2161 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2162 {
2163 struct lwp_info *lwp = (struct lwp_info *) entry;
2164
2165 /* Ignore EXCEPT. */
2166 if (lwp == except)
2167 return 0;
2168
2169 lwp->suspended--;
2170
2171 gdb_assert (lwp->suspended >= 0);
2172 return 0;
2173 }
2174
2175 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2176 NULL. */
2177
2178 static void
2179 unsuspend_all_lwps (struct lwp_info *except)
2180 {
2181 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2182 }
2183
2184 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2185 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2186 void *data);
2187 static int lwp_running (struct inferior_list_entry *entry, void *data);
2188 static ptid_t linux_wait_1 (ptid_t ptid,
2189 struct target_waitstatus *ourstatus,
2190 int target_options);
2191
2192 /* Stabilize threads (move out of jump pads).
2193
2194 If a thread is midway collecting a fast tracepoint, we need to
2195 finish the collection and move it out of the jump pad before
2196 reporting the signal.
2197
2198 This avoids recursion while collecting (when a signal arrives
2199 midway, and the signal handler itself collects), which would trash
2200 the trace buffer. In case the user set a breakpoint in a signal
2201 handler, this avoids the backtrace showing the jump pad, etc..
2202 Most importantly, there are certain things we can't do safely if
2203 threads are stopped in a jump pad (or in its callee's). For
2204 example:
2205
2206 - starting a new trace run. A thread still collecting the
2207 previous run, could trash the trace buffer when resumed. The trace
2208 buffer control structures would have been reset but the thread had
2209 no way to tell. The thread could even midway memcpy'ing to the
2210 buffer, which would mean that when resumed, it would clobber the
2211 trace buffer that had been set for a new run.
2212
2213 - we can't rewrite/reuse the jump pads for new tracepoints
2214 safely. Say you do tstart while a thread is stopped midway while
2215 collecting. When the thread is later resumed, it finishes the
2216 collection, and returns to the jump pad, to execute the original
2217 instruction that was under the tracepoint jump at the time the
2218 older run had been started. If the jump pad had been rewritten
2219 since for something else in the new run, the thread would now
2220 execute the wrong / random instructions. */
2221
2222 static void
2223 linux_stabilize_threads (void)
2224 {
2225 struct thread_info *save_inferior;
2226 struct lwp_info *lwp_stuck;
2227
2228 lwp_stuck
2229 = (struct lwp_info *) find_inferior (&all_lwps,
2230 stuck_in_jump_pad_callback, NULL);
2231 if (lwp_stuck != NULL)
2232 {
2233 if (debug_threads)
2234 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2235 lwpid_of (lwp_stuck));
2236 return;
2237 }
2238
2239 save_inferior = current_inferior;
2240
2241 stabilizing_threads = 1;
2242
2243 /* Kick 'em all. */
2244 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2245
2246 /* Loop until all are stopped out of the jump pads. */
2247 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2248 {
2249 struct target_waitstatus ourstatus;
2250 struct lwp_info *lwp;
2251 int wstat;
2252
2253 /* Note that we go through the full wait even loop. While
2254 moving threads out of jump pad, we need to be able to step
2255 over internal breakpoints and such. */
2256 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2257
2258 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2259 {
2260 lwp = get_thread_lwp (current_inferior);
2261
2262 /* Lock it. */
2263 lwp->suspended++;
2264
2265 if (ourstatus.value.sig != GDB_SIGNAL_0
2266 || current_inferior->last_resume_kind == resume_stop)
2267 {
2268 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2269 enqueue_one_deferred_signal (lwp, &wstat);
2270 }
2271 }
2272 }
2273
2274 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2275
2276 stabilizing_threads = 0;
2277
2278 current_inferior = save_inferior;
2279
2280 if (debug_threads)
2281 {
2282 lwp_stuck
2283 = (struct lwp_info *) find_inferior (&all_lwps,
2284 stuck_in_jump_pad_callback, NULL);
2285 if (lwp_stuck != NULL)
2286 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2287 lwpid_of (lwp_stuck));
2288 }
2289 }
2290
2291 /* Wait for process, returns status. */
2292
2293 static ptid_t
2294 linux_wait_1 (ptid_t ptid,
2295 struct target_waitstatus *ourstatus, int target_options)
2296 {
2297 int w;
2298 struct lwp_info *event_child;
2299 int options;
2300 int pid;
2301 int step_over_finished;
2302 int bp_explains_trap;
2303 int maybe_internal_trap;
2304 int report_to_gdb;
2305 int trace_event;
2306
2307 /* Translate generic target options into linux options. */
2308 options = __WALL;
2309 if (target_options & TARGET_WNOHANG)
2310 options |= WNOHANG;
2311
2312 retry:
2313 bp_explains_trap = 0;
2314 trace_event = 0;
2315 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2316
2317 /* If we were only supposed to resume one thread, only wait for
2318 that thread - if it's still alive. If it died, however - which
2319 can happen if we're coming from the thread death case below -
2320 then we need to make sure we restart the other threads. We could
2321 pick a thread at random or restart all; restarting all is less
2322 arbitrary. */
2323 if (!non_stop
2324 && !ptid_equal (cont_thread, null_ptid)
2325 && !ptid_equal (cont_thread, minus_one_ptid))
2326 {
2327 struct thread_info *thread;
2328
2329 thread = (struct thread_info *) find_inferior_id (&all_threads,
2330 cont_thread);
2331
2332 /* No stepping, no signal - unless one is pending already, of course. */
2333 if (thread == NULL)
2334 {
2335 struct thread_resume resume_info;
2336 resume_info.thread = minus_one_ptid;
2337 resume_info.kind = resume_continue;
2338 resume_info.sig = 0;
2339 linux_resume (&resume_info, 1);
2340 }
2341 else
2342 ptid = cont_thread;
2343 }
2344
2345 if (ptid_equal (step_over_bkpt, null_ptid))
2346 pid = linux_wait_for_event (ptid, &w, options);
2347 else
2348 {
2349 if (debug_threads)
2350 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2351 target_pid_to_str (step_over_bkpt));
2352 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2353 }
2354
2355 if (pid == 0) /* only if TARGET_WNOHANG */
2356 return null_ptid;
2357
2358 event_child = get_thread_lwp (current_inferior);
2359
2360 /* If we are waiting for a particular child, and it exited,
2361 linux_wait_for_event will return its exit status. Similarly if
2362 the last child exited. If this is not the last child, however,
2363 do not report it as exited until there is a 'thread exited' response
2364 available in the remote protocol. Instead, just wait for another event.
2365 This should be safe, because if the thread crashed we will already
2366 have reported the termination signal to GDB; that should stop any
2367 in-progress stepping operations, etc.
2368
2369 Report the exit status of the last thread to exit. This matches
2370 LinuxThreads' behavior. */
2371
2372 if (last_thread_of_process_p (current_inferior))
2373 {
2374 if (WIFEXITED (w) || WIFSIGNALED (w))
2375 {
2376 if (WIFEXITED (w))
2377 {
2378 ourstatus->kind = TARGET_WAITKIND_EXITED;
2379 ourstatus->value.integer = WEXITSTATUS (w);
2380
2381 if (debug_threads)
2382 fprintf (stderr,
2383 "\nChild exited with retcode = %x \n",
2384 WEXITSTATUS (w));
2385 }
2386 else
2387 {
2388 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2389 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2390
2391 if (debug_threads)
2392 fprintf (stderr,
2393 "\nChild terminated with signal = %x \n",
2394 WTERMSIG (w));
2395
2396 }
2397
2398 return ptid_of (event_child);
2399 }
2400 }
2401 else
2402 {
2403 if (!WIFSTOPPED (w))
2404 goto retry;
2405 }
2406
2407 /* If this event was not handled before, and is not a SIGTRAP, we
2408 report it. SIGILL and SIGSEGV are also treated as traps in case
2409 a breakpoint is inserted at the current PC. If this target does
2410 not support internal breakpoints at all, we also report the
2411 SIGTRAP without further processing; it's of no concern to us. */
2412 maybe_internal_trap
2413 = (supports_breakpoints ()
2414 && (WSTOPSIG (w) == SIGTRAP
2415 || ((WSTOPSIG (w) == SIGILL
2416 || WSTOPSIG (w) == SIGSEGV)
2417 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2418
2419 if (maybe_internal_trap)
2420 {
2421 /* Handle anything that requires bookkeeping before deciding to
2422 report the event or continue waiting. */
2423
2424 /* First check if we can explain the SIGTRAP with an internal
2425 breakpoint, or if we should possibly report the event to GDB.
2426 Do this before anything that may remove or insert a
2427 breakpoint. */
2428 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2429
2430 /* We have a SIGTRAP, possibly a step-over dance has just
2431 finished. If so, tweak the state machine accordingly,
2432 reinsert breakpoints and delete any reinsert (software
2433 single-step) breakpoints. */
2434 step_over_finished = finish_step_over (event_child);
2435
2436 /* Now invoke the callbacks of any internal breakpoints there. */
2437 check_breakpoints (event_child->stop_pc);
2438
2439 /* Handle tracepoint data collecting. This may overflow the
2440 trace buffer, and cause a tracing stop, removing
2441 breakpoints. */
2442 trace_event = handle_tracepoints (event_child);
2443
2444 if (bp_explains_trap)
2445 {
2446 /* If we stepped or ran into an internal breakpoint, we've
2447 already handled it. So next time we resume (from this
2448 PC), we should step over it. */
2449 if (debug_threads)
2450 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2451
2452 if (breakpoint_here (event_child->stop_pc))
2453 event_child->need_step_over = 1;
2454 }
2455 }
2456 else
2457 {
2458 /* We have some other signal, possibly a step-over dance was in
2459 progress, and it should be cancelled too. */
2460 step_over_finished = finish_step_over (event_child);
2461 }
2462
2463 /* We have all the data we need. Either report the event to GDB, or
2464 resume threads and keep waiting for more. */
2465
2466 /* If we're collecting a fast tracepoint, finish the collection and
2467 move out of the jump pad before delivering a signal. See
2468 linux_stabilize_threads. */
2469
2470 if (WIFSTOPPED (w)
2471 && WSTOPSIG (w) != SIGTRAP
2472 && supports_fast_tracepoints ()
2473 && agent_loaded_p ())
2474 {
2475 if (debug_threads)
2476 fprintf (stderr,
2477 "Got signal %d for LWP %ld. Check if we need "
2478 "to defer or adjust it.\n",
2479 WSTOPSIG (w), lwpid_of (event_child));
2480
2481 /* Allow debugging the jump pad itself. */
2482 if (current_inferior->last_resume_kind != resume_step
2483 && maybe_move_out_of_jump_pad (event_child, &w))
2484 {
2485 enqueue_one_deferred_signal (event_child, &w);
2486
2487 if (debug_threads)
2488 fprintf (stderr,
2489 "Signal %d for LWP %ld deferred (in jump pad)\n",
2490 WSTOPSIG (w), lwpid_of (event_child));
2491
2492 linux_resume_one_lwp (event_child, 0, 0, NULL);
2493 goto retry;
2494 }
2495 }
2496
2497 if (event_child->collecting_fast_tracepoint)
2498 {
2499 if (debug_threads)
2500 fprintf (stderr, "\
2501 LWP %ld was trying to move out of the jump pad (%d). \
2502 Check if we're already there.\n",
2503 lwpid_of (event_child),
2504 event_child->collecting_fast_tracepoint);
2505
2506 trace_event = 1;
2507
2508 event_child->collecting_fast_tracepoint
2509 = linux_fast_tracepoint_collecting (event_child, NULL);
2510
2511 if (event_child->collecting_fast_tracepoint != 1)
2512 {
2513 /* No longer need this breakpoint. */
2514 if (event_child->exit_jump_pad_bkpt != NULL)
2515 {
2516 if (debug_threads)
2517 fprintf (stderr,
2518 "No longer need exit-jump-pad bkpt; removing it."
2519 "stopping all threads momentarily.\n");
2520
2521 /* Other running threads could hit this breakpoint.
2522 We don't handle moribund locations like GDB does,
2523 instead we always pause all threads when removing
2524 breakpoints, so that any step-over or
2525 decr_pc_after_break adjustment is always taken
2526 care of while the breakpoint is still
2527 inserted. */
2528 stop_all_lwps (1, event_child);
2529 cancel_breakpoints ();
2530
2531 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2532 event_child->exit_jump_pad_bkpt = NULL;
2533
2534 unstop_all_lwps (1, event_child);
2535
2536 gdb_assert (event_child->suspended >= 0);
2537 }
2538 }
2539
2540 if (event_child->collecting_fast_tracepoint == 0)
2541 {
2542 if (debug_threads)
2543 fprintf (stderr,
2544 "fast tracepoint finished "
2545 "collecting successfully.\n");
2546
2547 /* We may have a deferred signal to report. */
2548 if (dequeue_one_deferred_signal (event_child, &w))
2549 {
2550 if (debug_threads)
2551 fprintf (stderr, "dequeued one signal.\n");
2552 }
2553 else
2554 {
2555 if (debug_threads)
2556 fprintf (stderr, "no deferred signals.\n");
2557
2558 if (stabilizing_threads)
2559 {
2560 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2561 ourstatus->value.sig = GDB_SIGNAL_0;
2562 return ptid_of (event_child);
2563 }
2564 }
2565 }
2566 }
2567
2568 /* Check whether GDB would be interested in this event. */
2569
2570 /* If GDB is not interested in this signal, don't stop other
2571 threads, and don't report it to GDB. Just resume the inferior
2572 right away. We do this for threading-related signals as well as
2573 any that GDB specifically requested we ignore. But never ignore
2574 SIGSTOP if we sent it ourselves, and do not ignore signals when
2575 stepping - they may require special handling to skip the signal
2576 handler. */
2577 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2578 thread library? */
2579 if (WIFSTOPPED (w)
2580 && current_inferior->last_resume_kind != resume_step
2581 && (
2582 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2583 (current_process ()->private->thread_db != NULL
2584 && (WSTOPSIG (w) == __SIGRTMIN
2585 || WSTOPSIG (w) == __SIGRTMIN + 1))
2586 ||
2587 #endif
2588 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2589 && !(WSTOPSIG (w) == SIGSTOP
2590 && current_inferior->last_resume_kind == resume_stop))))
2591 {
2592 siginfo_t info, *info_p;
2593
2594 if (debug_threads)
2595 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2596 WSTOPSIG (w), lwpid_of (event_child));
2597
2598 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2599 info_p = &info;
2600 else
2601 info_p = NULL;
2602 linux_resume_one_lwp (event_child, event_child->stepping,
2603 WSTOPSIG (w), info_p);
2604 goto retry;
2605 }
2606
2607 /* If GDB wanted this thread to single step, we always want to
2608 report the SIGTRAP, and let GDB handle it. Watchpoints should
2609 always be reported. So should signals we can't explain. A
2610 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2611 not support Z0 breakpoints. If we do, we're be able to handle
2612 GDB breakpoints on top of internal breakpoints, by handling the
2613 internal breakpoint and still reporting the event to GDB. If we
2614 don't, we're out of luck, GDB won't see the breakpoint hit. */
2615 report_to_gdb = (!maybe_internal_trap
2616 || current_inferior->last_resume_kind == resume_step
2617 || event_child->stopped_by_watchpoint
2618 || (!step_over_finished
2619 && !bp_explains_trap && !trace_event)
2620 || (gdb_breakpoint_here (event_child->stop_pc)
2621 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2622 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2623
2624 run_breakpoint_commands (event_child->stop_pc);
2625
2626 /* We found no reason GDB would want us to stop. We either hit one
2627 of our own breakpoints, or finished an internal step GDB
2628 shouldn't know about. */
2629 if (!report_to_gdb)
2630 {
2631 if (debug_threads)
2632 {
2633 if (bp_explains_trap)
2634 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2635 if (step_over_finished)
2636 fprintf (stderr, "Step-over finished.\n");
2637 if (trace_event)
2638 fprintf (stderr, "Tracepoint event.\n");
2639 }
2640
2641 /* We're not reporting this breakpoint to GDB, so apply the
2642 decr_pc_after_break adjustment to the inferior's regcache
2643 ourselves. */
2644
2645 if (the_low_target.set_pc != NULL)
2646 {
2647 struct regcache *regcache
2648 = get_thread_regcache (get_lwp_thread (event_child), 1);
2649 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2650 }
2651
2652 /* We may have finished stepping over a breakpoint. If so,
2653 we've stopped and suspended all LWPs momentarily except the
2654 stepping one. This is where we resume them all again. We're
2655 going to keep waiting, so use proceed, which handles stepping
2656 over the next breakpoint. */
2657 if (debug_threads)
2658 fprintf (stderr, "proceeding all threads.\n");
2659
2660 if (step_over_finished)
2661 unsuspend_all_lwps (event_child);
2662
2663 proceed_all_lwps ();
2664 goto retry;
2665 }
2666
2667 if (debug_threads)
2668 {
2669 if (current_inferior->last_resume_kind == resume_step)
2670 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2671 if (event_child->stopped_by_watchpoint)
2672 fprintf (stderr, "Stopped by watchpoint.\n");
2673 if (gdb_breakpoint_here (event_child->stop_pc))
2674 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2675 if (debug_threads)
2676 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2677 }
2678
2679 /* Alright, we're going to report a stop. */
2680
2681 if (!non_stop && !stabilizing_threads)
2682 {
2683 /* In all-stop, stop all threads. */
2684 stop_all_lwps (0, NULL);
2685
2686 /* If we're not waiting for a specific LWP, choose an event LWP
2687 from among those that have had events. Giving equal priority
2688 to all LWPs that have had events helps prevent
2689 starvation. */
2690 if (ptid_equal (ptid, minus_one_ptid))
2691 {
2692 event_child->status_pending_p = 1;
2693 event_child->status_pending = w;
2694
2695 select_event_lwp (&event_child);
2696
2697 event_child->status_pending_p = 0;
2698 w = event_child->status_pending;
2699 }
2700
2701 /* Now that we've selected our final event LWP, cancel any
2702 breakpoints in other LWPs that have hit a GDB breakpoint.
2703 See the comment in cancel_breakpoints_callback to find out
2704 why. */
2705 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2706
2707 /* If we were going a step-over, all other threads but the stepping one
2708 had been paused in start_step_over, with their suspend counts
2709 incremented. We don't want to do a full unstop/unpause, because we're
2710 in all-stop mode (so we want threads stopped), but we still need to
2711 unsuspend the other threads, to decrement their `suspended' count
2712 back. */
2713 if (step_over_finished)
2714 unsuspend_all_lwps (event_child);
2715
2716 /* Stabilize threads (move out of jump pads). */
2717 stabilize_threads ();
2718 }
2719 else
2720 {
2721 /* If we just finished a step-over, then all threads had been
2722 momentarily paused. In all-stop, that's fine, we want
2723 threads stopped by now anyway. In non-stop, we need to
2724 re-resume threads that GDB wanted to be running. */
2725 if (step_over_finished)
2726 unstop_all_lwps (1, event_child);
2727 }
2728
2729 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2730
2731 if (current_inferior->last_resume_kind == resume_stop
2732 && WSTOPSIG (w) == SIGSTOP)
2733 {
2734 /* A thread that has been requested to stop by GDB with vCont;t,
2735 and it stopped cleanly, so report as SIG0. The use of
2736 SIGSTOP is an implementation detail. */
2737 ourstatus->value.sig = GDB_SIGNAL_0;
2738 }
2739 else if (current_inferior->last_resume_kind == resume_stop
2740 && WSTOPSIG (w) != SIGSTOP)
2741 {
2742 /* A thread that has been requested to stop by GDB with vCont;t,
2743 but, it stopped for other reasons. */
2744 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2745 }
2746 else
2747 {
2748 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2749 }
2750
2751 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2752
2753 if (debug_threads)
2754 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2755 target_pid_to_str (ptid_of (event_child)),
2756 ourstatus->kind,
2757 ourstatus->value.sig);
2758
2759 return ptid_of (event_child);
2760 }
2761
2762 /* Get rid of any pending event in the pipe. */
2763 static void
2764 async_file_flush (void)
2765 {
2766 int ret;
2767 char buf;
2768
2769 do
2770 ret = read (linux_event_pipe[0], &buf, 1);
2771 while (ret >= 0 || (ret == -1 && errno == EINTR));
2772 }
2773
2774 /* Put something in the pipe, so the event loop wakes up. */
2775 static void
2776 async_file_mark (void)
2777 {
2778 int ret;
2779
2780 async_file_flush ();
2781
2782 do
2783 ret = write (linux_event_pipe[1], "+", 1);
2784 while (ret == 0 || (ret == -1 && errno == EINTR));
2785
2786 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2787 be awakened anyway. */
2788 }
2789
2790 static ptid_t
2791 linux_wait (ptid_t ptid,
2792 struct target_waitstatus *ourstatus, int target_options)
2793 {
2794 ptid_t event_ptid;
2795
2796 if (debug_threads)
2797 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2798
2799 /* Flush the async file first. */
2800 if (target_is_async_p ())
2801 async_file_flush ();
2802
2803 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2804
2805 /* If at least one stop was reported, there may be more. A single
2806 SIGCHLD can signal more than one child stop. */
2807 if (target_is_async_p ()
2808 && (target_options & TARGET_WNOHANG) != 0
2809 && !ptid_equal (event_ptid, null_ptid))
2810 async_file_mark ();
2811
2812 return event_ptid;
2813 }
2814
2815 /* Send a signal to an LWP. */
2816
2817 static int
2818 kill_lwp (unsigned long lwpid, int signo)
2819 {
2820 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2821 fails, then we are not using nptl threads and we should be using kill. */
2822
2823 #ifdef __NR_tkill
2824 {
2825 static int tkill_failed;
2826
2827 if (!tkill_failed)
2828 {
2829 int ret;
2830
2831 errno = 0;
2832 ret = syscall (__NR_tkill, lwpid, signo);
2833 if (errno != ENOSYS)
2834 return ret;
2835 tkill_failed = 1;
2836 }
2837 }
2838 #endif
2839
2840 return kill (lwpid, signo);
2841 }
2842
2843 void
2844 linux_stop_lwp (struct lwp_info *lwp)
2845 {
2846 send_sigstop (lwp);
2847 }
2848
2849 static void
2850 send_sigstop (struct lwp_info *lwp)
2851 {
2852 int pid;
2853
2854 pid = lwpid_of (lwp);
2855
2856 /* If we already have a pending stop signal for this process, don't
2857 send another. */
2858 if (lwp->stop_expected)
2859 {
2860 if (debug_threads)
2861 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2862
2863 return;
2864 }
2865
2866 if (debug_threads)
2867 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2868
2869 lwp->stop_expected = 1;
2870 kill_lwp (pid, SIGSTOP);
2871 }
2872
2873 static int
2874 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2875 {
2876 struct lwp_info *lwp = (struct lwp_info *) entry;
2877
2878 /* Ignore EXCEPT. */
2879 if (lwp == except)
2880 return 0;
2881
2882 if (lwp->stopped)
2883 return 0;
2884
2885 send_sigstop (lwp);
2886 return 0;
2887 }
2888
2889 /* Increment the suspend count of an LWP, and stop it, if not stopped
2890 yet. */
2891 static int
2892 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2893 void *except)
2894 {
2895 struct lwp_info *lwp = (struct lwp_info *) entry;
2896
2897 /* Ignore EXCEPT. */
2898 if (lwp == except)
2899 return 0;
2900
2901 lwp->suspended++;
2902
2903 return send_sigstop_callback (entry, except);
2904 }
2905
2906 static void
2907 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2908 {
2909 /* It's dead, really. */
2910 lwp->dead = 1;
2911
2912 /* Store the exit status for later. */
2913 lwp->status_pending_p = 1;
2914 lwp->status_pending = wstat;
2915
2916 /* Prevent trying to stop it. */
2917 lwp->stopped = 1;
2918
2919 /* No further stops are expected from a dead lwp. */
2920 lwp->stop_expected = 0;
2921 }
2922
2923 static void
2924 wait_for_sigstop (struct inferior_list_entry *entry)
2925 {
2926 struct lwp_info *lwp = (struct lwp_info *) entry;
2927 struct thread_info *saved_inferior;
2928 int wstat;
2929 ptid_t saved_tid;
2930 ptid_t ptid;
2931 int pid;
2932
2933 if (lwp->stopped)
2934 {
2935 if (debug_threads)
2936 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2937 lwpid_of (lwp));
2938 return;
2939 }
2940
2941 saved_inferior = current_inferior;
2942 if (saved_inferior != NULL)
2943 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2944 else
2945 saved_tid = null_ptid; /* avoid bogus unused warning */
2946
2947 ptid = lwp->head.id;
2948
2949 if (debug_threads)
2950 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2951
2952 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2953
2954 /* If we stopped with a non-SIGSTOP signal, save it for later
2955 and record the pending SIGSTOP. If the process exited, just
2956 return. */
2957 if (WIFSTOPPED (wstat))
2958 {
2959 if (debug_threads)
2960 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2961 lwpid_of (lwp), WSTOPSIG (wstat));
2962
2963 if (WSTOPSIG (wstat) != SIGSTOP)
2964 {
2965 if (debug_threads)
2966 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2967 lwpid_of (lwp), wstat);
2968
2969 lwp->status_pending_p = 1;
2970 lwp->status_pending = wstat;
2971 }
2972 }
2973 else
2974 {
2975 if (debug_threads)
2976 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2977
2978 lwp = find_lwp_pid (pid_to_ptid (pid));
2979 if (lwp)
2980 {
2981 /* Leave this status pending for the next time we're able to
2982 report it. In the mean time, we'll report this lwp as
2983 dead to GDB, so GDB doesn't try to read registers and
2984 memory from it. This can only happen if this was the
2985 last thread of the process; otherwise, PID is removed
2986 from the thread tables before linux_wait_for_event
2987 returns. */
2988 mark_lwp_dead (lwp, wstat);
2989 }
2990 }
2991
2992 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2993 current_inferior = saved_inferior;
2994 else
2995 {
2996 if (debug_threads)
2997 fprintf (stderr, "Previously current thread died.\n");
2998
2999 if (non_stop)
3000 {
3001 /* We can't change the current inferior behind GDB's back,
3002 otherwise, a subsequent command may apply to the wrong
3003 process. */
3004 current_inferior = NULL;
3005 }
3006 else
3007 {
3008 /* Set a valid thread as current. */
3009 set_desired_inferior (0);
3010 }
3011 }
3012 }
3013
3014 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3015 move it out, because we need to report the stop event to GDB. For
3016 example, if the user puts a breakpoint in the jump pad, it's
3017 because she wants to debug it. */
3018
3019 static int
3020 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3021 {
3022 struct lwp_info *lwp = (struct lwp_info *) entry;
3023 struct thread_info *thread = get_lwp_thread (lwp);
3024
3025 gdb_assert (lwp->suspended == 0);
3026 gdb_assert (lwp->stopped);
3027
3028 /* Allow debugging the jump pad, gdb_collect, etc.. */
3029 return (supports_fast_tracepoints ()
3030 && agent_loaded_p ()
3031 && (gdb_breakpoint_here (lwp->stop_pc)
3032 || lwp->stopped_by_watchpoint
3033 || thread->last_resume_kind == resume_step)
3034 && linux_fast_tracepoint_collecting (lwp, NULL));
3035 }
3036
3037 static void
3038 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3039 {
3040 struct lwp_info *lwp = (struct lwp_info *) entry;
3041 struct thread_info *thread = get_lwp_thread (lwp);
3042 int *wstat;
3043
3044 gdb_assert (lwp->suspended == 0);
3045 gdb_assert (lwp->stopped);
3046
3047 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3048
3049 /* Allow debugging the jump pad, gdb_collect, etc. */
3050 if (!gdb_breakpoint_here (lwp->stop_pc)
3051 && !lwp->stopped_by_watchpoint
3052 && thread->last_resume_kind != resume_step
3053 && maybe_move_out_of_jump_pad (lwp, wstat))
3054 {
3055 if (debug_threads)
3056 fprintf (stderr,
3057 "LWP %ld needs stabilizing (in jump pad)\n",
3058 lwpid_of (lwp));
3059
3060 if (wstat)
3061 {
3062 lwp->status_pending_p = 0;
3063 enqueue_one_deferred_signal (lwp, wstat);
3064
3065 if (debug_threads)
3066 fprintf (stderr,
3067 "Signal %d for LWP %ld deferred "
3068 "(in jump pad)\n",
3069 WSTOPSIG (*wstat), lwpid_of (lwp));
3070 }
3071
3072 linux_resume_one_lwp (lwp, 0, 0, NULL);
3073 }
3074 else
3075 lwp->suspended++;
3076 }
3077
3078 static int
3079 lwp_running (struct inferior_list_entry *entry, void *data)
3080 {
3081 struct lwp_info *lwp = (struct lwp_info *) entry;
3082
3083 if (lwp->dead)
3084 return 0;
3085 if (lwp->stopped)
3086 return 0;
3087 return 1;
3088 }
3089
3090 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3091 If SUSPEND, then also increase the suspend count of every LWP,
3092 except EXCEPT. */
3093
3094 static void
3095 stop_all_lwps (int suspend, struct lwp_info *except)
3096 {
3097 /* Should not be called recursively. */
3098 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3099
3100 stopping_threads = (suspend
3101 ? STOPPING_AND_SUSPENDING_THREADS
3102 : STOPPING_THREADS);
3103
3104 if (suspend)
3105 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
3106 else
3107 find_inferior (&all_lwps, send_sigstop_callback, except);
3108 for_each_inferior (&all_lwps, wait_for_sigstop);
3109 stopping_threads = NOT_STOPPING_THREADS;
3110 }
3111
3112 /* Resume execution of the inferior process.
3113 If STEP is nonzero, single-step it.
3114 If SIGNAL is nonzero, give it that signal. */
3115
3116 static void
3117 linux_resume_one_lwp (struct lwp_info *lwp,
3118 int step, int signal, siginfo_t *info)
3119 {
3120 struct thread_info *saved_inferior;
3121 int fast_tp_collecting;
3122
3123 if (lwp->stopped == 0)
3124 return;
3125
3126 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3127
3128 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3129
3130 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3131 user used the "jump" command, or "set $pc = foo"). */
3132 if (lwp->stop_pc != get_pc (lwp))
3133 {
3134 /* Collecting 'while-stepping' actions doesn't make sense
3135 anymore. */
3136 release_while_stepping_state_list (get_lwp_thread (lwp));
3137 }
3138
3139 /* If we have pending signals or status, and a new signal, enqueue the
3140 signal. Also enqueue the signal if we are waiting to reinsert a
3141 breakpoint; it will be picked up again below. */
3142 if (signal != 0
3143 && (lwp->status_pending_p
3144 || lwp->pending_signals != NULL
3145 || lwp->bp_reinsert != 0
3146 || fast_tp_collecting))
3147 {
3148 struct pending_signals *p_sig;
3149 p_sig = xmalloc (sizeof (*p_sig));
3150 p_sig->prev = lwp->pending_signals;
3151 p_sig->signal = signal;
3152 if (info == NULL)
3153 memset (&p_sig->info, 0, sizeof (siginfo_t));
3154 else
3155 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3156 lwp->pending_signals = p_sig;
3157 }
3158
3159 if (lwp->status_pending_p)
3160 {
3161 if (debug_threads)
3162 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3163 " has pending status\n",
3164 lwpid_of (lwp), step ? "step" : "continue", signal,
3165 lwp->stop_expected ? "expected" : "not expected");
3166 return;
3167 }
3168
3169 saved_inferior = current_inferior;
3170 current_inferior = get_lwp_thread (lwp);
3171
3172 if (debug_threads)
3173 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
3174 lwpid_of (lwp), step ? "step" : "continue", signal,
3175 lwp->stop_expected ? "expected" : "not expected");
3176
3177 /* This bit needs some thinking about. If we get a signal that
3178 we must report while a single-step reinsert is still pending,
3179 we often end up resuming the thread. It might be better to
3180 (ew) allow a stack of pending events; then we could be sure that
3181 the reinsert happened right away and not lose any signals.
3182
3183 Making this stack would also shrink the window in which breakpoints are
3184 uninserted (see comment in linux_wait_for_lwp) but not enough for
3185 complete correctness, so it won't solve that problem. It may be
3186 worthwhile just to solve this one, however. */
3187 if (lwp->bp_reinsert != 0)
3188 {
3189 if (debug_threads)
3190 fprintf (stderr, " pending reinsert at 0x%s\n",
3191 paddress (lwp->bp_reinsert));
3192
3193 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
3194 {
3195 if (fast_tp_collecting == 0)
3196 {
3197 if (step == 0)
3198 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3199 if (lwp->suspended)
3200 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3201 lwp->suspended);
3202 }
3203
3204 step = 1;
3205 }
3206
3207 /* Postpone any pending signal. It was enqueued above. */
3208 signal = 0;
3209 }
3210
3211 if (fast_tp_collecting == 1)
3212 {
3213 if (debug_threads)
3214 fprintf (stderr, "\
3215 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3216 lwpid_of (lwp));
3217
3218 /* Postpone any pending signal. It was enqueued above. */
3219 signal = 0;
3220 }
3221 else if (fast_tp_collecting == 2)
3222 {
3223 if (debug_threads)
3224 fprintf (stderr, "\
3225 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3226 lwpid_of (lwp));
3227
3228 if (can_hardware_single_step ())
3229 step = 1;
3230 else
3231 fatal ("moving out of jump pad single-stepping"
3232 " not implemented on this target");
3233
3234 /* Postpone any pending signal. It was enqueued above. */
3235 signal = 0;
3236 }
3237
3238 /* If we have while-stepping actions in this thread set it stepping.
3239 If we have a signal to deliver, it may or may not be set to
3240 SIG_IGN, we don't know. Assume so, and allow collecting
3241 while-stepping into a signal handler. A possible smart thing to
3242 do would be to set an internal breakpoint at the signal return
3243 address, continue, and carry on catching this while-stepping
3244 action only when that breakpoint is hit. A future
3245 enhancement. */
3246 if (get_lwp_thread (lwp)->while_stepping != NULL
3247 && can_hardware_single_step ())
3248 {
3249 if (debug_threads)
3250 fprintf (stderr,
3251 "lwp %ld has a while-stepping action -> forcing step.\n",
3252 lwpid_of (lwp));
3253 step = 1;
3254 }
3255
3256 if (debug_threads && the_low_target.get_pc != NULL)
3257 {
3258 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3259 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3260 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
3261 }
3262
3263 /* If we have pending signals, consume one unless we are trying to
3264 reinsert a breakpoint or we're trying to finish a fast tracepoint
3265 collect. */
3266 if (lwp->pending_signals != NULL
3267 && lwp->bp_reinsert == 0
3268 && fast_tp_collecting == 0)
3269 {
3270 struct pending_signals **p_sig;
3271
3272 p_sig = &lwp->pending_signals;
3273 while ((*p_sig)->prev != NULL)
3274 p_sig = &(*p_sig)->prev;
3275
3276 signal = (*p_sig)->signal;
3277 if ((*p_sig)->info.si_signo != 0)
3278 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
3279
3280 free (*p_sig);
3281 *p_sig = NULL;
3282 }
3283
3284 if (the_low_target.prepare_to_resume != NULL)
3285 the_low_target.prepare_to_resume (lwp);
3286
3287 regcache_invalidate_one ((struct inferior_list_entry *)
3288 get_lwp_thread (lwp));
3289 errno = 0;
3290 lwp->stopped = 0;
3291 lwp->stopped_by_watchpoint = 0;
3292 lwp->stepping = step;
3293 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
3294 /* Coerce to a uintptr_t first to avoid potential gcc warning
3295 of coercing an 8 byte integer to a 4 byte pointer. */
3296 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
3297
3298 current_inferior = saved_inferior;
3299 if (errno)
3300 {
3301 /* ESRCH from ptrace either means that the thread was already
3302 running (an error) or that it is gone (a race condition). If
3303 it's gone, we will get a notification the next time we wait,
3304 so we can ignore the error. We could differentiate these
3305 two, but it's tricky without waiting; the thread still exists
3306 as a zombie, so sending it signal 0 would succeed. So just
3307 ignore ESRCH. */
3308 if (errno == ESRCH)
3309 return;
3310
3311 perror_with_name ("ptrace");
3312 }
3313 }
3314
3315 struct thread_resume_array
3316 {
3317 struct thread_resume *resume;
3318 size_t n;
3319 };
3320
3321 /* This function is called once per thread. We look up the thread
3322 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3323 resume request.
3324
3325 This algorithm is O(threads * resume elements), but resume elements
3326 is small (and will remain small at least until GDB supports thread
3327 suspension). */
3328 static int
3329 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3330 {
3331 struct lwp_info *lwp;
3332 struct thread_info *thread;
3333 int ndx;
3334 struct thread_resume_array *r;
3335
3336 thread = (struct thread_info *) entry;
3337 lwp = get_thread_lwp (thread);
3338 r = arg;
3339
3340 for (ndx = 0; ndx < r->n; ndx++)
3341 {
3342 ptid_t ptid = r->resume[ndx].thread;
3343 if (ptid_equal (ptid, minus_one_ptid)
3344 || ptid_equal (ptid, entry->id)
3345 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3346 of PID'. */
3347 || (ptid_get_pid (ptid) == pid_of (lwp)
3348 && (ptid_is_pid (ptid)
3349 || ptid_get_lwp (ptid) == -1)))
3350 {
3351 if (r->resume[ndx].kind == resume_stop
3352 && thread->last_resume_kind == resume_stop)
3353 {
3354 if (debug_threads)
3355 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3356 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3357 ? "stopped"
3358 : "stopping",
3359 lwpid_of (lwp));
3360
3361 continue;
3362 }
3363
3364 lwp->resume = &r->resume[ndx];
3365 thread->last_resume_kind = lwp->resume->kind;
3366
3367 /* If we had a deferred signal to report, dequeue one now.
3368 This can happen if LWP gets more than one signal while
3369 trying to get out of a jump pad. */
3370 if (lwp->stopped
3371 && !lwp->status_pending_p
3372 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3373 {
3374 lwp->status_pending_p = 1;
3375
3376 if (debug_threads)
3377 fprintf (stderr,
3378 "Dequeueing deferred signal %d for LWP %ld, "
3379 "leaving status pending.\n",
3380 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3381 }
3382
3383 return 0;
3384 }
3385 }
3386
3387 /* No resume action for this thread. */
3388 lwp->resume = NULL;
3389
3390 return 0;
3391 }
3392
3393
3394 /* Set *FLAG_P if this lwp has an interesting status pending. */
3395 static int
3396 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3397 {
3398 struct lwp_info *lwp = (struct lwp_info *) entry;
3399
3400 /* LWPs which will not be resumed are not interesting, because
3401 we might not wait for them next time through linux_wait. */
3402 if (lwp->resume == NULL)
3403 return 0;
3404
3405 if (lwp->status_pending_p)
3406 * (int *) flag_p = 1;
3407
3408 return 0;
3409 }
3410
3411 /* Return 1 if this lwp that GDB wants running is stopped at an
3412 internal breakpoint that we need to step over. It assumes that any
3413 required STOP_PC adjustment has already been propagated to the
3414 inferior's regcache. */
3415
3416 static int
3417 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3418 {
3419 struct lwp_info *lwp = (struct lwp_info *) entry;
3420 struct thread_info *thread;
3421 struct thread_info *saved_inferior;
3422 CORE_ADDR pc;
3423
3424 /* LWPs which will not be resumed are not interesting, because we
3425 might not wait for them next time through linux_wait. */
3426
3427 if (!lwp->stopped)
3428 {
3429 if (debug_threads)
3430 fprintf (stderr,
3431 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3432 lwpid_of (lwp));
3433 return 0;
3434 }
3435
3436 thread = get_lwp_thread (lwp);
3437
3438 if (thread->last_resume_kind == resume_stop)
3439 {
3440 if (debug_threads)
3441 fprintf (stderr,
3442 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3443 lwpid_of (lwp));
3444 return 0;
3445 }
3446
3447 gdb_assert (lwp->suspended >= 0);
3448
3449 if (lwp->suspended)
3450 {
3451 if (debug_threads)
3452 fprintf (stderr,
3453 "Need step over [LWP %ld]? Ignoring, suspended\n",
3454 lwpid_of (lwp));
3455 return 0;
3456 }
3457
3458 if (!lwp->need_step_over)
3459 {
3460 if (debug_threads)
3461 fprintf (stderr,
3462 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3463 }
3464
3465 if (lwp->status_pending_p)
3466 {
3467 if (debug_threads)
3468 fprintf (stderr,
3469 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3470 lwpid_of (lwp));
3471 return 0;
3472 }
3473
3474 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3475 or we have. */
3476 pc = get_pc (lwp);
3477
3478 /* If the PC has changed since we stopped, then don't do anything,
3479 and let the breakpoint/tracepoint be hit. This happens if, for
3480 instance, GDB handled the decr_pc_after_break subtraction itself,
3481 GDB is OOL stepping this thread, or the user has issued a "jump"
3482 command, or poked thread's registers herself. */
3483 if (pc != lwp->stop_pc)
3484 {
3485 if (debug_threads)
3486 fprintf (stderr,
3487 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3488 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3489 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3490
3491 lwp->need_step_over = 0;
3492 return 0;
3493 }
3494
3495 saved_inferior = current_inferior;
3496 current_inferior = thread;
3497
3498 /* We can only step over breakpoints we know about. */
3499 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3500 {
3501 /* Don't step over a breakpoint that GDB expects to hit
3502 though. If the condition is being evaluated on the target's side
3503 and it evaluate to false, step over this breakpoint as well. */
3504 if (gdb_breakpoint_here (pc)
3505 && gdb_condition_true_at_breakpoint (pc)
3506 && gdb_no_commands_at_breakpoint (pc))
3507 {
3508 if (debug_threads)
3509 fprintf (stderr,
3510 "Need step over [LWP %ld]? yes, but found"
3511 " GDB breakpoint at 0x%s; skipping step over\n",
3512 lwpid_of (lwp), paddress (pc));
3513
3514 current_inferior = saved_inferior;
3515 return 0;
3516 }
3517 else
3518 {
3519 if (debug_threads)
3520 fprintf (stderr,
3521 "Need step over [LWP %ld]? yes, "
3522 "found breakpoint at 0x%s\n",
3523 lwpid_of (lwp), paddress (pc));
3524
3525 /* We've found an lwp that needs stepping over --- return 1 so
3526 that find_inferior stops looking. */
3527 current_inferior = saved_inferior;
3528
3529 /* If the step over is cancelled, this is set again. */
3530 lwp->need_step_over = 0;
3531 return 1;
3532 }
3533 }
3534
3535 current_inferior = saved_inferior;
3536
3537 if (debug_threads)
3538 fprintf (stderr,
3539 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3540 lwpid_of (lwp), paddress (pc));
3541
3542 return 0;
3543 }
3544
3545 /* Start a step-over operation on LWP. When LWP stopped at a
3546 breakpoint, to make progress, we need to remove the breakpoint out
3547 of the way. If we let other threads run while we do that, they may
3548 pass by the breakpoint location and miss hitting it. To avoid
3549 that, a step-over momentarily stops all threads while LWP is
3550 single-stepped while the breakpoint is temporarily uninserted from
3551 the inferior. When the single-step finishes, we reinsert the
3552 breakpoint, and let all threads that are supposed to be running,
3553 run again.
3554
3555 On targets that don't support hardware single-step, we don't
3556 currently support full software single-stepping. Instead, we only
3557 support stepping over the thread event breakpoint, by asking the
3558 low target where to place a reinsert breakpoint. Since this
3559 routine assumes the breakpoint being stepped over is a thread event
3560 breakpoint, it usually assumes the return address of the current
3561 function is a good enough place to set the reinsert breakpoint. */
3562
3563 static int
3564 start_step_over (struct lwp_info *lwp)
3565 {
3566 struct thread_info *saved_inferior;
3567 CORE_ADDR pc;
3568 int step;
3569
3570 if (debug_threads)
3571 fprintf (stderr,
3572 "Starting step-over on LWP %ld. Stopping all threads\n",
3573 lwpid_of (lwp));
3574
3575 stop_all_lwps (1, lwp);
3576 gdb_assert (lwp->suspended == 0);
3577
3578 if (debug_threads)
3579 fprintf (stderr, "Done stopping all threads for step-over.\n");
3580
3581 /* Note, we should always reach here with an already adjusted PC,
3582 either by GDB (if we're resuming due to GDB's request), or by our
3583 caller, if we just finished handling an internal breakpoint GDB
3584 shouldn't care about. */
3585 pc = get_pc (lwp);
3586
3587 saved_inferior = current_inferior;
3588 current_inferior = get_lwp_thread (lwp);
3589
3590 lwp->bp_reinsert = pc;
3591 uninsert_breakpoints_at (pc);
3592 uninsert_fast_tracepoint_jumps_at (pc);
3593
3594 if (can_hardware_single_step ())
3595 {
3596 step = 1;
3597 }
3598 else
3599 {
3600 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3601 set_reinsert_breakpoint (raddr);
3602 step = 0;
3603 }
3604
3605 current_inferior = saved_inferior;
3606
3607 linux_resume_one_lwp (lwp, step, 0, NULL);
3608
3609 /* Require next event from this LWP. */
3610 step_over_bkpt = lwp->head.id;
3611 return 1;
3612 }
3613
3614 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3615 start_step_over, if still there, and delete any reinsert
3616 breakpoints we've set, on non hardware single-step targets. */
3617
3618 static int
3619 finish_step_over (struct lwp_info *lwp)
3620 {
3621 if (lwp->bp_reinsert != 0)
3622 {
3623 if (debug_threads)
3624 fprintf (stderr, "Finished step over.\n");
3625
3626 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3627 may be no breakpoint to reinsert there by now. */
3628 reinsert_breakpoints_at (lwp->bp_reinsert);
3629 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3630
3631 lwp->bp_reinsert = 0;
3632
3633 /* Delete any software-single-step reinsert breakpoints. No
3634 longer needed. We don't have to worry about other threads
3635 hitting this trap, and later not being able to explain it,
3636 because we were stepping over a breakpoint, and we hold all
3637 threads but LWP stopped while doing that. */
3638 if (!can_hardware_single_step ())
3639 delete_reinsert_breakpoints ();
3640
3641 step_over_bkpt = null_ptid;
3642 return 1;
3643 }
3644 else
3645 return 0;
3646 }
3647
3648 /* This function is called once per thread. We check the thread's resume
3649 request, which will tell us whether to resume, step, or leave the thread
3650 stopped; and what signal, if any, it should be sent.
3651
3652 For threads which we aren't explicitly told otherwise, we preserve
3653 the stepping flag; this is used for stepping over gdbserver-placed
3654 breakpoints.
3655
3656 If pending_flags was set in any thread, we queue any needed
3657 signals, since we won't actually resume. We already have a pending
3658 event to report, so we don't need to preserve any step requests;
3659 they should be re-issued if necessary. */
3660
3661 static int
3662 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3663 {
3664 struct lwp_info *lwp;
3665 struct thread_info *thread;
3666 int step;
3667 int leave_all_stopped = * (int *) arg;
3668 int leave_pending;
3669
3670 thread = (struct thread_info *) entry;
3671 lwp = get_thread_lwp (thread);
3672
3673 if (lwp->resume == NULL)
3674 return 0;
3675
3676 if (lwp->resume->kind == resume_stop)
3677 {
3678 if (debug_threads)
3679 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3680
3681 if (!lwp->stopped)
3682 {
3683 if (debug_threads)
3684 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3685
3686 /* Stop the thread, and wait for the event asynchronously,
3687 through the event loop. */
3688 send_sigstop (lwp);
3689 }
3690 else
3691 {
3692 if (debug_threads)
3693 fprintf (stderr, "already stopped LWP %ld\n",
3694 lwpid_of (lwp));
3695
3696 /* The LWP may have been stopped in an internal event that
3697 was not meant to be notified back to GDB (e.g., gdbserver
3698 breakpoint), so we should be reporting a stop event in
3699 this case too. */
3700
3701 /* If the thread already has a pending SIGSTOP, this is a
3702 no-op. Otherwise, something later will presumably resume
3703 the thread and this will cause it to cancel any pending
3704 operation, due to last_resume_kind == resume_stop. If
3705 the thread already has a pending status to report, we
3706 will still report it the next time we wait - see
3707 status_pending_p_callback. */
3708
3709 /* If we already have a pending signal to report, then
3710 there's no need to queue a SIGSTOP, as this means we're
3711 midway through moving the LWP out of the jumppad, and we
3712 will report the pending signal as soon as that is
3713 finished. */
3714 if (lwp->pending_signals_to_report == NULL)
3715 send_sigstop (lwp);
3716 }
3717
3718 /* For stop requests, we're done. */
3719 lwp->resume = NULL;
3720 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3721 return 0;
3722 }
3723
3724 /* If this thread which is about to be resumed has a pending status,
3725 then don't resume any threads - we can just report the pending
3726 status. Make sure to queue any signals that would otherwise be
3727 sent. In all-stop mode, we do this decision based on if *any*
3728 thread has a pending status. If there's a thread that needs the
3729 step-over-breakpoint dance, then don't resume any other thread
3730 but that particular one. */
3731 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3732
3733 if (!leave_pending)
3734 {
3735 if (debug_threads)
3736 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3737
3738 step = (lwp->resume->kind == resume_step);
3739 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3740 }
3741 else
3742 {
3743 if (debug_threads)
3744 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3745
3746 /* If we have a new signal, enqueue the signal. */
3747 if (lwp->resume->sig != 0)
3748 {
3749 struct pending_signals *p_sig;
3750 p_sig = xmalloc (sizeof (*p_sig));
3751 p_sig->prev = lwp->pending_signals;
3752 p_sig->signal = lwp->resume->sig;
3753 memset (&p_sig->info, 0, sizeof (siginfo_t));
3754
3755 /* If this is the same signal we were previously stopped by,
3756 make sure to queue its siginfo. We can ignore the return
3757 value of ptrace; if it fails, we'll skip
3758 PTRACE_SETSIGINFO. */
3759 if (WIFSTOPPED (lwp->last_status)
3760 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3761 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3762
3763 lwp->pending_signals = p_sig;
3764 }
3765 }
3766
3767 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3768 lwp->resume = NULL;
3769 return 0;
3770 }
3771
3772 static void
3773 linux_resume (struct thread_resume *resume_info, size_t n)
3774 {
3775 struct thread_resume_array array = { resume_info, n };
3776 struct lwp_info *need_step_over = NULL;
3777 int any_pending;
3778 int leave_all_stopped;
3779
3780 find_inferior (&all_threads, linux_set_resume_request, &array);
3781
3782 /* If there is a thread which would otherwise be resumed, which has
3783 a pending status, then don't resume any threads - we can just
3784 report the pending status. Make sure to queue any signals that
3785 would otherwise be sent. In non-stop mode, we'll apply this
3786 logic to each thread individually. We consume all pending events
3787 before considering to start a step-over (in all-stop). */
3788 any_pending = 0;
3789 if (!non_stop)
3790 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3791
3792 /* If there is a thread which would otherwise be resumed, which is
3793 stopped at a breakpoint that needs stepping over, then don't
3794 resume any threads - have it step over the breakpoint with all
3795 other threads stopped, then resume all threads again. Make sure
3796 to queue any signals that would otherwise be delivered or
3797 queued. */
3798 if (!any_pending && supports_breakpoints ())
3799 need_step_over
3800 = (struct lwp_info *) find_inferior (&all_lwps,
3801 need_step_over_p, NULL);
3802
3803 leave_all_stopped = (need_step_over != NULL || any_pending);
3804
3805 if (debug_threads)
3806 {
3807 if (need_step_over != NULL)
3808 fprintf (stderr, "Not resuming all, need step over\n");
3809 else if (any_pending)
3810 fprintf (stderr,
3811 "Not resuming, all-stop and found "
3812 "an LWP with pending status\n");
3813 else
3814 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3815 }
3816
3817 /* Even if we're leaving threads stopped, queue all signals we'd
3818 otherwise deliver. */
3819 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3820
3821 if (need_step_over)
3822 start_step_over (need_step_over);
3823 }
3824
3825 /* This function is called once per thread. We check the thread's
3826 last resume request, which will tell us whether to resume, step, or
3827 leave the thread stopped. Any signal the client requested to be
3828 delivered has already been enqueued at this point.
3829
3830 If any thread that GDB wants running is stopped at an internal
3831 breakpoint that needs stepping over, we start a step-over operation
3832 on that particular thread, and leave all others stopped. */
3833
3834 static int
3835 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3836 {
3837 struct lwp_info *lwp = (struct lwp_info *) entry;
3838 struct thread_info *thread;
3839 int step;
3840
3841 if (lwp == except)
3842 return 0;
3843
3844 if (debug_threads)
3845 fprintf (stderr,
3846 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3847
3848 if (!lwp->stopped)
3849 {
3850 if (debug_threads)
3851 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3852 return 0;
3853 }
3854
3855 thread = get_lwp_thread (lwp);
3856
3857 if (thread->last_resume_kind == resume_stop
3858 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3859 {
3860 if (debug_threads)
3861 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3862 lwpid_of (lwp));
3863 return 0;
3864 }
3865
3866 if (lwp->status_pending_p)
3867 {
3868 if (debug_threads)
3869 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3870 lwpid_of (lwp));
3871 return 0;
3872 }
3873
3874 gdb_assert (lwp->suspended >= 0);
3875
3876 if (lwp->suspended)
3877 {
3878 if (debug_threads)
3879 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3880 return 0;
3881 }
3882
3883 if (thread->last_resume_kind == resume_stop
3884 && lwp->pending_signals_to_report == NULL
3885 && lwp->collecting_fast_tracepoint == 0)
3886 {
3887 /* We haven't reported this LWP as stopped yet (otherwise, the
3888 last_status.kind check above would catch it, and we wouldn't
3889 reach here. This LWP may have been momentarily paused by a
3890 stop_all_lwps call while handling for example, another LWP's
3891 step-over. In that case, the pending expected SIGSTOP signal
3892 that was queued at vCont;t handling time will have already
3893 been consumed by wait_for_sigstop, and so we need to requeue
3894 another one here. Note that if the LWP already has a SIGSTOP
3895 pending, this is a no-op. */
3896
3897 if (debug_threads)
3898 fprintf (stderr,
3899 "Client wants LWP %ld to stop. "
3900 "Making sure it has a SIGSTOP pending\n",
3901 lwpid_of (lwp));
3902
3903 send_sigstop (lwp);
3904 }
3905
3906 step = thread->last_resume_kind == resume_step;
3907 linux_resume_one_lwp (lwp, step, 0, NULL);
3908 return 0;
3909 }
3910
3911 static int
3912 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3913 {
3914 struct lwp_info *lwp = (struct lwp_info *) entry;
3915
3916 if (lwp == except)
3917 return 0;
3918
3919 lwp->suspended--;
3920 gdb_assert (lwp->suspended >= 0);
3921
3922 return proceed_one_lwp (entry, except);
3923 }
3924
3925 /* When we finish a step-over, set threads running again. If there's
3926 another thread that may need a step-over, now's the time to start
3927 it. Eventually, we'll move all threads past their breakpoints. */
3928
3929 static void
3930 proceed_all_lwps (void)
3931 {
3932 struct lwp_info *need_step_over;
3933
3934 /* If there is a thread which would otherwise be resumed, which is
3935 stopped at a breakpoint that needs stepping over, then don't
3936 resume any threads - have it step over the breakpoint with all
3937 other threads stopped, then resume all threads again. */
3938
3939 if (supports_breakpoints ())
3940 {
3941 need_step_over
3942 = (struct lwp_info *) find_inferior (&all_lwps,
3943 need_step_over_p, NULL);
3944
3945 if (need_step_over != NULL)
3946 {
3947 if (debug_threads)
3948 fprintf (stderr, "proceed_all_lwps: found "
3949 "thread %ld needing a step-over\n",
3950 lwpid_of (need_step_over));
3951
3952 start_step_over (need_step_over);
3953 return;
3954 }
3955 }
3956
3957 if (debug_threads)
3958 fprintf (stderr, "Proceeding, no step-over needed\n");
3959
3960 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3961 }
3962
3963 /* Stopped LWPs that the client wanted to be running, that don't have
3964 pending statuses, are set to run again, except for EXCEPT, if not
3965 NULL. This undoes a stop_all_lwps call. */
3966
3967 static void
3968 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3969 {
3970 if (debug_threads)
3971 {
3972 if (except)
3973 fprintf (stderr,
3974 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3975 else
3976 fprintf (stderr,
3977 "unstopping all lwps\n");
3978 }
3979
3980 if (unsuspend)
3981 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3982 else
3983 find_inferior (&all_lwps, proceed_one_lwp, except);
3984 }
3985
3986
3987 #ifdef HAVE_LINUX_REGSETS
3988
3989 #define use_linux_regsets 1
3990
3991 static int
3992 regsets_fetch_inferior_registers (struct regcache *regcache)
3993 {
3994 struct regset_info *regset;
3995 int saw_general_regs = 0;
3996 int pid;
3997 struct iovec iov;
3998
3999 regset = target_regsets;
4000
4001 pid = lwpid_of (get_thread_lwp (current_inferior));
4002 while (regset->size >= 0)
4003 {
4004 void *buf, *data;
4005 int nt_type, res;
4006
4007 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
4008 {
4009 regset ++;
4010 continue;
4011 }
4012
4013 buf = xmalloc (regset->size);
4014
4015 nt_type = regset->nt_type;
4016 if (nt_type)
4017 {
4018 iov.iov_base = buf;
4019 iov.iov_len = regset->size;
4020 data = (void *) &iov;
4021 }
4022 else
4023 data = buf;
4024
4025 #ifndef __sparc__
4026 res = ptrace (regset->get_request, pid,
4027 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4028 #else
4029 res = ptrace (regset->get_request, pid, data, nt_type);
4030 #endif
4031 if (res < 0)
4032 {
4033 if (errno == EIO)
4034 {
4035 /* If we get EIO on a regset, do not try it again for
4036 this process. */
4037 disabled_regsets[regset - target_regsets] = 1;
4038 free (buf);
4039 continue;
4040 }
4041 else
4042 {
4043 char s[256];
4044 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4045 pid);
4046 perror (s);
4047 }
4048 }
4049 else if (regset->type == GENERAL_REGS)
4050 saw_general_regs = 1;
4051 regset->store_function (regcache, buf);
4052 regset ++;
4053 free (buf);
4054 }
4055 if (saw_general_regs)
4056 return 0;
4057 else
4058 return 1;
4059 }
4060
4061 static int
4062 regsets_store_inferior_registers (struct regcache *regcache)
4063 {
4064 struct regset_info *regset;
4065 int saw_general_regs = 0;
4066 int pid;
4067 struct iovec iov;
4068
4069 regset = target_regsets;
4070
4071 pid = lwpid_of (get_thread_lwp (current_inferior));
4072 while (regset->size >= 0)
4073 {
4074 void *buf, *data;
4075 int nt_type, res;
4076
4077 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
4078 {
4079 regset ++;
4080 continue;
4081 }
4082
4083 buf = xmalloc (regset->size);
4084
4085 /* First fill the buffer with the current register set contents,
4086 in case there are any items in the kernel's regset that are
4087 not in gdbserver's regcache. */
4088
4089 nt_type = regset->nt_type;
4090 if (nt_type)
4091 {
4092 iov.iov_base = buf;
4093 iov.iov_len = regset->size;
4094 data = (void *) &iov;
4095 }
4096 else
4097 data = buf;
4098
4099 #ifndef __sparc__
4100 res = ptrace (regset->get_request, pid,
4101 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4102 #else
4103 res = ptrace (regset->get_request, pid, data, nt_type);
4104 #endif
4105
4106 if (res == 0)
4107 {
4108 /* Then overlay our cached registers on that. */
4109 regset->fill_function (regcache, buf);
4110
4111 /* Only now do we write the register set. */
4112 #ifndef __sparc__
4113 res = ptrace (regset->set_request, pid,
4114 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4115 #else
4116 res = ptrace (regset->set_request, pid, data, nt_type);
4117 #endif
4118 }
4119
4120 if (res < 0)
4121 {
4122 if (errno == EIO)
4123 {
4124 /* If we get EIO on a regset, do not try it again for
4125 this process. */
4126 disabled_regsets[regset - target_regsets] = 1;
4127 free (buf);
4128 continue;
4129 }
4130 else if (errno == ESRCH)
4131 {
4132 /* At this point, ESRCH should mean the process is
4133 already gone, in which case we simply ignore attempts
4134 to change its registers. See also the related
4135 comment in linux_resume_one_lwp. */
4136 free (buf);
4137 return 0;
4138 }
4139 else
4140 {
4141 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4142 }
4143 }
4144 else if (regset->type == GENERAL_REGS)
4145 saw_general_regs = 1;
4146 regset ++;
4147 free (buf);
4148 }
4149 if (saw_general_regs)
4150 return 0;
4151 else
4152 return 1;
4153 }
4154
4155 #else /* !HAVE_LINUX_REGSETS */
4156
4157 #define use_linux_regsets 0
4158 #define regsets_fetch_inferior_registers(regcache) 1
4159 #define regsets_store_inferior_registers(regcache) 1
4160
4161 #endif
4162
4163 /* Return 1 if register REGNO is supported by one of the regset ptrace
4164 calls or 0 if it has to be transferred individually. */
4165
4166 static int
4167 linux_register_in_regsets (int regno)
4168 {
4169 unsigned char mask = 1 << (regno % 8);
4170 size_t index = regno / 8;
4171
4172 return (use_linux_regsets
4173 && (the_low_target.regset_bitmap == NULL
4174 || (the_low_target.regset_bitmap[index] & mask) != 0));
4175 }
4176
4177 #ifdef HAVE_LINUX_USRREGS
4178
4179 int
4180 register_addr (int regnum)
4181 {
4182 int addr;
4183
4184 if (regnum < 0 || regnum >= the_low_target.num_regs)
4185 error ("Invalid register number %d.", regnum);
4186
4187 addr = the_low_target.regmap[regnum];
4188
4189 return addr;
4190 }
4191
4192 /* Fetch one register. */
4193 static void
4194 fetch_register (struct regcache *regcache, int regno)
4195 {
4196 CORE_ADDR regaddr;
4197 int i, size;
4198 char *buf;
4199 int pid;
4200
4201 if (regno >= the_low_target.num_regs)
4202 return;
4203 if ((*the_low_target.cannot_fetch_register) (regno))
4204 return;
4205
4206 regaddr = register_addr (regno);
4207 if (regaddr == -1)
4208 return;
4209
4210 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4211 & -sizeof (PTRACE_XFER_TYPE));
4212 buf = alloca (size);
4213
4214 pid = lwpid_of (get_thread_lwp (current_inferior));
4215 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4216 {
4217 errno = 0;
4218 *(PTRACE_XFER_TYPE *) (buf + i) =
4219 ptrace (PTRACE_PEEKUSER, pid,
4220 /* Coerce to a uintptr_t first to avoid potential gcc warning
4221 of coercing an 8 byte integer to a 4 byte pointer. */
4222 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
4223 regaddr += sizeof (PTRACE_XFER_TYPE);
4224 if (errno != 0)
4225 error ("reading register %d: %s", regno, strerror (errno));
4226 }
4227
4228 if (the_low_target.supply_ptrace_register)
4229 the_low_target.supply_ptrace_register (regcache, regno, buf);
4230 else
4231 supply_register (regcache, regno, buf);
4232 }
4233
4234 /* Store one register. */
4235 static void
4236 store_register (struct regcache *regcache, int regno)
4237 {
4238 CORE_ADDR regaddr;
4239 int i, size;
4240 char *buf;
4241 int pid;
4242
4243 if (regno >= the_low_target.num_regs)
4244 return;
4245 if ((*the_low_target.cannot_store_register) (regno))
4246 return;
4247
4248 regaddr = register_addr (regno);
4249 if (regaddr == -1)
4250 return;
4251
4252 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4253 & -sizeof (PTRACE_XFER_TYPE));
4254 buf = alloca (size);
4255 memset (buf, 0, size);
4256
4257 if (the_low_target.collect_ptrace_register)
4258 the_low_target.collect_ptrace_register (regcache, regno, buf);
4259 else
4260 collect_register (regcache, regno, buf);
4261
4262 pid = lwpid_of (get_thread_lwp (current_inferior));
4263 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4264 {
4265 errno = 0;
4266 ptrace (PTRACE_POKEUSER, pid,
4267 /* Coerce to a uintptr_t first to avoid potential gcc warning
4268 about coercing an 8 byte integer to a 4 byte pointer. */
4269 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
4270 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
4271 if (errno != 0)
4272 {
4273 /* At this point, ESRCH should mean the process is
4274 already gone, in which case we simply ignore attempts
4275 to change its registers. See also the related
4276 comment in linux_resume_one_lwp. */
4277 if (errno == ESRCH)
4278 return;
4279
4280 if ((*the_low_target.cannot_store_register) (regno) == 0)
4281 error ("writing register %d: %s", regno, strerror (errno));
4282 }
4283 regaddr += sizeof (PTRACE_XFER_TYPE);
4284 }
4285 }
4286
4287 /* Fetch all registers, or just one, from the child process.
4288 If REGNO is -1, do this for all registers, skipping any that are
4289 assumed to have been retrieved by regsets_fetch_inferior_registers,
4290 unless ALL is non-zero.
4291 Otherwise, REGNO specifies which register (so we can save time). */
4292 static void
4293 usr_fetch_inferior_registers (struct regcache *regcache, int regno, int all)
4294 {
4295 if (regno == -1)
4296 {
4297 for (regno = 0; regno < the_low_target.num_regs; regno++)
4298 if (all || !linux_register_in_regsets (regno))
4299 fetch_register (regcache, regno);
4300 }
4301 else
4302 fetch_register (regcache, regno);
4303 }
4304
4305 /* Store our register values back into the inferior.
4306 If REGNO is -1, do this for all registers, skipping any that are
4307 assumed to have been saved by regsets_store_inferior_registers,
4308 unless ALL is non-zero.
4309 Otherwise, REGNO specifies which register (so we can save time). */
4310 static void
4311 usr_store_inferior_registers (struct regcache *regcache, int regno, int all)
4312 {
4313 if (regno == -1)
4314 {
4315 for (regno = 0; regno < the_low_target.num_regs; regno++)
4316 if (all || !linux_register_in_regsets (regno))
4317 store_register (regcache, regno);
4318 }
4319 else
4320 store_register (regcache, regno);
4321 }
4322
4323 #else /* !HAVE_LINUX_USRREGS */
4324
4325 #define usr_fetch_inferior_registers(regcache, regno, all) do {} while (0)
4326 #define usr_store_inferior_registers(regcache, regno, all) do {} while (0)
4327
4328 #endif
4329
4330
4331 void
4332 linux_fetch_registers (struct regcache *regcache, int regno)
4333 {
4334 int use_regsets;
4335 int all = 0;
4336
4337 if (regno == -1)
4338 {
4339 if (the_low_target.fetch_register != NULL)
4340 for (regno = 0; regno < the_low_target.num_regs; regno++)
4341 (*the_low_target.fetch_register) (regcache, regno);
4342
4343 all = regsets_fetch_inferior_registers (regcache);
4344 usr_fetch_inferior_registers (regcache, -1, all);
4345 }
4346 else
4347 {
4348 if (the_low_target.fetch_register != NULL
4349 && (*the_low_target.fetch_register) (regcache, regno))
4350 return;
4351
4352 use_regsets = linux_register_in_regsets (regno);
4353 if (use_regsets)
4354 all = regsets_fetch_inferior_registers (regcache);
4355 if (!use_regsets || all)
4356 usr_fetch_inferior_registers (regcache, regno, 1);
4357 }
4358 }
4359
4360 void
4361 linux_store_registers (struct regcache *regcache, int regno)
4362 {
4363 int use_regsets;
4364 int all = 0;
4365
4366 if (regno == -1)
4367 {
4368 all = regsets_store_inferior_registers (regcache);
4369 usr_store_inferior_registers (regcache, regno, all);
4370 }
4371 else
4372 {
4373 use_regsets = linux_register_in_regsets (regno);
4374 if (use_regsets)
4375 all = regsets_store_inferior_registers (regcache);
4376 if (!use_regsets || all)
4377 usr_store_inferior_registers (regcache, regno, 1);
4378 }
4379 }
4380
4381
4382 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4383 to debugger memory starting at MYADDR. */
4384
4385 static int
4386 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4387 {
4388 int pid = lwpid_of (get_thread_lwp (current_inferior));
4389 register PTRACE_XFER_TYPE *buffer;
4390 register CORE_ADDR addr;
4391 register int count;
4392 char filename[64];
4393 register int i;
4394 int ret;
4395 int fd;
4396
4397 /* Try using /proc. Don't bother for one word. */
4398 if (len >= 3 * sizeof (long))
4399 {
4400 int bytes;
4401
4402 /* We could keep this file open and cache it - possibly one per
4403 thread. That requires some juggling, but is even faster. */
4404 sprintf (filename, "/proc/%d/mem", pid);
4405 fd = open (filename, O_RDONLY | O_LARGEFILE);
4406 if (fd == -1)
4407 goto no_proc;
4408
4409 /* If pread64 is available, use it. It's faster if the kernel
4410 supports it (only one syscall), and it's 64-bit safe even on
4411 32-bit platforms (for instance, SPARC debugging a SPARC64
4412 application). */
4413 #ifdef HAVE_PREAD64
4414 bytes = pread64 (fd, myaddr, len, memaddr);
4415 #else
4416 bytes = -1;
4417 if (lseek (fd, memaddr, SEEK_SET) != -1)
4418 bytes = read (fd, myaddr, len);
4419 #endif
4420
4421 close (fd);
4422 if (bytes == len)
4423 return 0;
4424
4425 /* Some data was read, we'll try to get the rest with ptrace. */
4426 if (bytes > 0)
4427 {
4428 memaddr += bytes;
4429 myaddr += bytes;
4430 len -= bytes;
4431 }
4432 }
4433
4434 no_proc:
4435 /* Round starting address down to longword boundary. */
4436 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4437 /* Round ending address up; get number of longwords that makes. */
4438 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4439 / sizeof (PTRACE_XFER_TYPE));
4440 /* Allocate buffer of that many longwords. */
4441 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4442
4443 /* Read all the longwords */
4444 errno = 0;
4445 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4446 {
4447 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4448 about coercing an 8 byte integer to a 4 byte pointer. */
4449 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4450 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4451 if (errno)
4452 break;
4453 }
4454 ret = errno;
4455
4456 /* Copy appropriate bytes out of the buffer. */
4457 if (i > 0)
4458 {
4459 i *= sizeof (PTRACE_XFER_TYPE);
4460 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4461 memcpy (myaddr,
4462 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4463 i < len ? i : len);
4464 }
4465
4466 return ret;
4467 }
4468
4469 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4470 memory at MEMADDR. On failure (cannot write to the inferior)
4471 returns the value of errno. */
4472
4473 static int
4474 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4475 {
4476 register int i;
4477 /* Round starting address down to longword boundary. */
4478 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4479 /* Round ending address up; get number of longwords that makes. */
4480 register int count
4481 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4482 / sizeof (PTRACE_XFER_TYPE);
4483
4484 /* Allocate buffer of that many longwords. */
4485 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4486 alloca (count * sizeof (PTRACE_XFER_TYPE));
4487
4488 int pid = lwpid_of (get_thread_lwp (current_inferior));
4489
4490 if (debug_threads)
4491 {
4492 /* Dump up to four bytes. */
4493 unsigned int val = * (unsigned int *) myaddr;
4494 if (len == 1)
4495 val = val & 0xff;
4496 else if (len == 2)
4497 val = val & 0xffff;
4498 else if (len == 3)
4499 val = val & 0xffffff;
4500 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4501 val, (long)memaddr);
4502 }
4503
4504 /* Fill start and end extra bytes of buffer with existing memory data. */
4505
4506 errno = 0;
4507 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4508 about coercing an 8 byte integer to a 4 byte pointer. */
4509 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4510 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4511 if (errno)
4512 return errno;
4513
4514 if (count > 1)
4515 {
4516 errno = 0;
4517 buffer[count - 1]
4518 = ptrace (PTRACE_PEEKTEXT, pid,
4519 /* Coerce to a uintptr_t first to avoid potential gcc warning
4520 about coercing an 8 byte integer to a 4 byte pointer. */
4521 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4522 * sizeof (PTRACE_XFER_TYPE)),
4523 0);
4524 if (errno)
4525 return errno;
4526 }
4527
4528 /* Copy data to be written over corresponding part of buffer. */
4529
4530 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4531 myaddr, len);
4532
4533 /* Write the entire buffer. */
4534
4535 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4536 {
4537 errno = 0;
4538 ptrace (PTRACE_POKETEXT, pid,
4539 /* Coerce to a uintptr_t first to avoid potential gcc warning
4540 about coercing an 8 byte integer to a 4 byte pointer. */
4541 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4542 (PTRACE_ARG4_TYPE) buffer[i]);
4543 if (errno)
4544 return errno;
4545 }
4546
4547 return 0;
4548 }
4549
4550 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4551 static int linux_supports_tracefork_flag;
4552
4553 static void
4554 linux_enable_event_reporting (int pid)
4555 {
4556 if (!linux_supports_tracefork_flag)
4557 return;
4558
4559 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4560 }
4561
4562 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4563
4564 static int
4565 linux_tracefork_grandchild (void *arg)
4566 {
4567 _exit (0);
4568 }
4569
4570 #define STACK_SIZE 4096
4571
4572 static int
4573 linux_tracefork_child (void *arg)
4574 {
4575 ptrace (PTRACE_TRACEME, 0, 0, 0);
4576 kill (getpid (), SIGSTOP);
4577
4578 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4579
4580 if (fork () == 0)
4581 linux_tracefork_grandchild (NULL);
4582
4583 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4584
4585 #ifdef __ia64__
4586 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4587 CLONE_VM | SIGCHLD, NULL);
4588 #else
4589 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
4590 CLONE_VM | SIGCHLD, NULL);
4591 #endif
4592
4593 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4594
4595 _exit (0);
4596 }
4597
4598 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4599 sure that we can enable the option, and that it had the desired
4600 effect. */
4601
4602 static void
4603 linux_test_for_tracefork (void)
4604 {
4605 int child_pid, ret, status;
4606 long second_pid;
4607 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4608 char *stack = xmalloc (STACK_SIZE * 4);
4609 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4610
4611 linux_supports_tracefork_flag = 0;
4612
4613 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4614
4615 child_pid = fork ();
4616 if (child_pid == 0)
4617 linux_tracefork_child (NULL);
4618
4619 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4620
4621 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4622 #ifdef __ia64__
4623 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4624 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4625 #else /* !__ia64__ */
4626 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4627 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4628 #endif /* !__ia64__ */
4629
4630 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4631
4632 if (child_pid == -1)
4633 perror_with_name ("clone");
4634
4635 ret = my_waitpid (child_pid, &status, 0);
4636 if (ret == -1)
4637 perror_with_name ("waitpid");
4638 else if (ret != child_pid)
4639 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4640 if (! WIFSTOPPED (status))
4641 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4642
4643 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4644 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4645 if (ret != 0)
4646 {
4647 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4648 if (ret != 0)
4649 {
4650 warning ("linux_test_for_tracefork: failed to kill child");
4651 return;
4652 }
4653
4654 ret = my_waitpid (child_pid, &status, 0);
4655 if (ret != child_pid)
4656 warning ("linux_test_for_tracefork: failed to wait for killed child");
4657 else if (!WIFSIGNALED (status))
4658 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4659 "killed child", status);
4660
4661 return;
4662 }
4663
4664 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4665 if (ret != 0)
4666 warning ("linux_test_for_tracefork: failed to resume child");
4667
4668 ret = my_waitpid (child_pid, &status, 0);
4669
4670 if (ret == child_pid && WIFSTOPPED (status)
4671 && status >> 16 == PTRACE_EVENT_FORK)
4672 {
4673 second_pid = 0;
4674 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4675 if (ret == 0 && second_pid != 0)
4676 {
4677 int second_status;
4678
4679 linux_supports_tracefork_flag = 1;
4680 my_waitpid (second_pid, &second_status, 0);
4681 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4682 if (ret != 0)
4683 warning ("linux_test_for_tracefork: failed to kill second child");
4684 my_waitpid (second_pid, &status, 0);
4685 }
4686 }
4687 else
4688 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4689 "(%d, status 0x%x)", ret, status);
4690
4691 do
4692 {
4693 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4694 if (ret != 0)
4695 warning ("linux_test_for_tracefork: failed to kill child");
4696 my_waitpid (child_pid, &status, 0);
4697 }
4698 while (WIFSTOPPED (status));
4699
4700 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4701 free (stack);
4702 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4703 }
4704
4705
4706 static void
4707 linux_look_up_symbols (void)
4708 {
4709 #ifdef USE_THREAD_DB
4710 struct process_info *proc = current_process ();
4711
4712 if (proc->private->thread_db != NULL)
4713 return;
4714
4715 /* If the kernel supports tracing forks then it also supports tracing
4716 clones, and then we don't need to use the magic thread event breakpoint
4717 to learn about threads. */
4718 thread_db_init (!linux_supports_tracefork_flag);
4719 #endif
4720 }
4721
4722 static void
4723 linux_request_interrupt (void)
4724 {
4725 extern unsigned long signal_pid;
4726
4727 if (!ptid_equal (cont_thread, null_ptid)
4728 && !ptid_equal (cont_thread, minus_one_ptid))
4729 {
4730 struct lwp_info *lwp;
4731 int lwpid;
4732
4733 lwp = get_thread_lwp (current_inferior);
4734 lwpid = lwpid_of (lwp);
4735 kill_lwp (lwpid, SIGINT);
4736 }
4737 else
4738 kill_lwp (signal_pid, SIGINT);
4739 }
4740
4741 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4742 to debugger memory starting at MYADDR. */
4743
4744 static int
4745 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4746 {
4747 char filename[PATH_MAX];
4748 int fd, n;
4749 int pid = lwpid_of (get_thread_lwp (current_inferior));
4750
4751 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4752
4753 fd = open (filename, O_RDONLY);
4754 if (fd < 0)
4755 return -1;
4756
4757 if (offset != (CORE_ADDR) 0
4758 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4759 n = -1;
4760 else
4761 n = read (fd, myaddr, len);
4762
4763 close (fd);
4764
4765 return n;
4766 }
4767
4768 /* These breakpoint and watchpoint related wrapper functions simply
4769 pass on the function call if the target has registered a
4770 corresponding function. */
4771
4772 static int
4773 linux_insert_point (char type, CORE_ADDR addr, int len)
4774 {
4775 if (the_low_target.insert_point != NULL)
4776 return the_low_target.insert_point (type, addr, len);
4777 else
4778 /* Unsupported (see target.h). */
4779 return 1;
4780 }
4781
4782 static int
4783 linux_remove_point (char type, CORE_ADDR addr, int len)
4784 {
4785 if (the_low_target.remove_point != NULL)
4786 return the_low_target.remove_point (type, addr, len);
4787 else
4788 /* Unsupported (see target.h). */
4789 return 1;
4790 }
4791
4792 static int
4793 linux_stopped_by_watchpoint (void)
4794 {
4795 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4796
4797 return lwp->stopped_by_watchpoint;
4798 }
4799
4800 static CORE_ADDR
4801 linux_stopped_data_address (void)
4802 {
4803 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4804
4805 return lwp->stopped_data_address;
4806 }
4807
4808 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4809 #if ! (defined(PT_TEXT_ADDR) \
4810 || defined(PT_DATA_ADDR) \
4811 || defined(PT_TEXT_END_ADDR))
4812 #if defined(__mcoldfire__)
4813 /* These should really be defined in the kernel's ptrace.h header. */
4814 #define PT_TEXT_ADDR 49*4
4815 #define PT_DATA_ADDR 50*4
4816 #define PT_TEXT_END_ADDR 51*4
4817 #elif defined(BFIN)
4818 #define PT_TEXT_ADDR 220
4819 #define PT_TEXT_END_ADDR 224
4820 #define PT_DATA_ADDR 228
4821 #elif defined(__TMS320C6X__)
4822 #define PT_TEXT_ADDR (0x10000*4)
4823 #define PT_DATA_ADDR (0x10004*4)
4824 #define PT_TEXT_END_ADDR (0x10008*4)
4825 #endif
4826 #endif
4827
4828 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4829 to tell gdb about. */
4830
4831 static int
4832 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4833 {
4834 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4835 unsigned long text, text_end, data;
4836 int pid = lwpid_of (get_thread_lwp (current_inferior));
4837
4838 errno = 0;
4839
4840 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4841 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4842 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4843
4844 if (errno == 0)
4845 {
4846 /* Both text and data offsets produced at compile-time (and so
4847 used by gdb) are relative to the beginning of the program,
4848 with the data segment immediately following the text segment.
4849 However, the actual runtime layout in memory may put the data
4850 somewhere else, so when we send gdb a data base-address, we
4851 use the real data base address and subtract the compile-time
4852 data base-address from it (which is just the length of the
4853 text segment). BSS immediately follows data in both
4854 cases. */
4855 *text_p = text;
4856 *data_p = data - (text_end - text);
4857
4858 return 1;
4859 }
4860 #endif
4861 return 0;
4862 }
4863 #endif
4864
4865 static int
4866 linux_qxfer_osdata (const char *annex,
4867 unsigned char *readbuf, unsigned const char *writebuf,
4868 CORE_ADDR offset, int len)
4869 {
4870 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4871 }
4872
4873 /* Convert a native/host siginfo object, into/from the siginfo in the
4874 layout of the inferiors' architecture. */
4875
4876 static void
4877 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4878 {
4879 int done = 0;
4880
4881 if (the_low_target.siginfo_fixup != NULL)
4882 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4883
4884 /* If there was no callback, or the callback didn't do anything,
4885 then just do a straight memcpy. */
4886 if (!done)
4887 {
4888 if (direction == 1)
4889 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4890 else
4891 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4892 }
4893 }
4894
4895 static int
4896 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4897 unsigned const char *writebuf, CORE_ADDR offset, int len)
4898 {
4899 int pid;
4900 siginfo_t siginfo;
4901 char inf_siginfo[sizeof (siginfo_t)];
4902
4903 if (current_inferior == NULL)
4904 return -1;
4905
4906 pid = lwpid_of (get_thread_lwp (current_inferior));
4907
4908 if (debug_threads)
4909 fprintf (stderr, "%s siginfo for lwp %d.\n",
4910 readbuf != NULL ? "Reading" : "Writing",
4911 pid);
4912
4913 if (offset >= sizeof (siginfo))
4914 return -1;
4915
4916 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4917 return -1;
4918
4919 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4920 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4921 inferior with a 64-bit GDBSERVER should look the same as debugging it
4922 with a 32-bit GDBSERVER, we need to convert it. */
4923 siginfo_fixup (&siginfo, inf_siginfo, 0);
4924
4925 if (offset + len > sizeof (siginfo))
4926 len = sizeof (siginfo) - offset;
4927
4928 if (readbuf != NULL)
4929 memcpy (readbuf, inf_siginfo + offset, len);
4930 else
4931 {
4932 memcpy (inf_siginfo + offset, writebuf, len);
4933
4934 /* Convert back to ptrace layout before flushing it out. */
4935 siginfo_fixup (&siginfo, inf_siginfo, 1);
4936
4937 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4938 return -1;
4939 }
4940
4941 return len;
4942 }
4943
4944 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4945 so we notice when children change state; as the handler for the
4946 sigsuspend in my_waitpid. */
4947
4948 static void
4949 sigchld_handler (int signo)
4950 {
4951 int old_errno = errno;
4952
4953 if (debug_threads)
4954 {
4955 do
4956 {
4957 /* fprintf is not async-signal-safe, so call write
4958 directly. */
4959 if (write (2, "sigchld_handler\n",
4960 sizeof ("sigchld_handler\n") - 1) < 0)
4961 break; /* just ignore */
4962 } while (0);
4963 }
4964
4965 if (target_is_async_p ())
4966 async_file_mark (); /* trigger a linux_wait */
4967
4968 errno = old_errno;
4969 }
4970
4971 static int
4972 linux_supports_non_stop (void)
4973 {
4974 return 1;
4975 }
4976
4977 static int
4978 linux_async (int enable)
4979 {
4980 int previous = (linux_event_pipe[0] != -1);
4981
4982 if (debug_threads)
4983 fprintf (stderr, "linux_async (%d), previous=%d\n",
4984 enable, previous);
4985
4986 if (previous != enable)
4987 {
4988 sigset_t mask;
4989 sigemptyset (&mask);
4990 sigaddset (&mask, SIGCHLD);
4991
4992 sigprocmask (SIG_BLOCK, &mask, NULL);
4993
4994 if (enable)
4995 {
4996 if (pipe (linux_event_pipe) == -1)
4997 fatal ("creating event pipe failed.");
4998
4999 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5000 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5001
5002 /* Register the event loop handler. */
5003 add_file_handler (linux_event_pipe[0],
5004 handle_target_event, NULL);
5005
5006 /* Always trigger a linux_wait. */
5007 async_file_mark ();
5008 }
5009 else
5010 {
5011 delete_file_handler (linux_event_pipe[0]);
5012
5013 close (linux_event_pipe[0]);
5014 close (linux_event_pipe[1]);
5015 linux_event_pipe[0] = -1;
5016 linux_event_pipe[1] = -1;
5017 }
5018
5019 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5020 }
5021
5022 return previous;
5023 }
5024
5025 static int
5026 linux_start_non_stop (int nonstop)
5027 {
5028 /* Register or unregister from event-loop accordingly. */
5029 linux_async (nonstop);
5030 return 0;
5031 }
5032
5033 static int
5034 linux_supports_multi_process (void)
5035 {
5036 return 1;
5037 }
5038
5039 static int
5040 linux_supports_disable_randomization (void)
5041 {
5042 #ifdef HAVE_PERSONALITY
5043 return 1;
5044 #else
5045 return 0;
5046 #endif
5047 }
5048
5049 static int
5050 linux_supports_agent (void)
5051 {
5052 return 1;
5053 }
5054
5055 /* Enumerate spufs IDs for process PID. */
5056 static int
5057 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5058 {
5059 int pos = 0;
5060 int written = 0;
5061 char path[128];
5062 DIR *dir;
5063 struct dirent *entry;
5064
5065 sprintf (path, "/proc/%ld/fd", pid);
5066 dir = opendir (path);
5067 if (!dir)
5068 return -1;
5069
5070 rewinddir (dir);
5071 while ((entry = readdir (dir)) != NULL)
5072 {
5073 struct stat st;
5074 struct statfs stfs;
5075 int fd;
5076
5077 fd = atoi (entry->d_name);
5078 if (!fd)
5079 continue;
5080
5081 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5082 if (stat (path, &st) != 0)
5083 continue;
5084 if (!S_ISDIR (st.st_mode))
5085 continue;
5086
5087 if (statfs (path, &stfs) != 0)
5088 continue;
5089 if (stfs.f_type != SPUFS_MAGIC)
5090 continue;
5091
5092 if (pos >= offset && pos + 4 <= offset + len)
5093 {
5094 *(unsigned int *)(buf + pos - offset) = fd;
5095 written += 4;
5096 }
5097 pos += 4;
5098 }
5099
5100 closedir (dir);
5101 return written;
5102 }
5103
5104 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5105 object type, using the /proc file system. */
5106 static int
5107 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5108 unsigned const char *writebuf,
5109 CORE_ADDR offset, int len)
5110 {
5111 long pid = lwpid_of (get_thread_lwp (current_inferior));
5112 char buf[128];
5113 int fd = 0;
5114 int ret = 0;
5115
5116 if (!writebuf && !readbuf)
5117 return -1;
5118
5119 if (!*annex)
5120 {
5121 if (!readbuf)
5122 return -1;
5123 else
5124 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5125 }
5126
5127 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5128 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5129 if (fd <= 0)
5130 return -1;
5131
5132 if (offset != 0
5133 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5134 {
5135 close (fd);
5136 return 0;
5137 }
5138
5139 if (writebuf)
5140 ret = write (fd, writebuf, (size_t) len);
5141 else
5142 ret = read (fd, readbuf, (size_t) len);
5143
5144 close (fd);
5145 return ret;
5146 }
5147
5148 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5149 struct target_loadseg
5150 {
5151 /* Core address to which the segment is mapped. */
5152 Elf32_Addr addr;
5153 /* VMA recorded in the program header. */
5154 Elf32_Addr p_vaddr;
5155 /* Size of this segment in memory. */
5156 Elf32_Word p_memsz;
5157 };
5158
5159 # if defined PT_GETDSBT
5160 struct target_loadmap
5161 {
5162 /* Protocol version number, must be zero. */
5163 Elf32_Word version;
5164 /* Pointer to the DSBT table, its size, and the DSBT index. */
5165 unsigned *dsbt_table;
5166 unsigned dsbt_size, dsbt_index;
5167 /* Number of segments in this map. */
5168 Elf32_Word nsegs;
5169 /* The actual memory map. */
5170 struct target_loadseg segs[/*nsegs*/];
5171 };
5172 # define LINUX_LOADMAP PT_GETDSBT
5173 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5174 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5175 # else
5176 struct target_loadmap
5177 {
5178 /* Protocol version number, must be zero. */
5179 Elf32_Half version;
5180 /* Number of segments in this map. */
5181 Elf32_Half nsegs;
5182 /* The actual memory map. */
5183 struct target_loadseg segs[/*nsegs*/];
5184 };
5185 # define LINUX_LOADMAP PTRACE_GETFDPIC
5186 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5187 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5188 # endif
5189
5190 static int
5191 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5192 unsigned char *myaddr, unsigned int len)
5193 {
5194 int pid = lwpid_of (get_thread_lwp (current_inferior));
5195 int addr = -1;
5196 struct target_loadmap *data = NULL;
5197 unsigned int actual_length, copy_length;
5198
5199 if (strcmp (annex, "exec") == 0)
5200 addr = (int) LINUX_LOADMAP_EXEC;
5201 else if (strcmp (annex, "interp") == 0)
5202 addr = (int) LINUX_LOADMAP_INTERP;
5203 else
5204 return -1;
5205
5206 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5207 return -1;
5208
5209 if (data == NULL)
5210 return -1;
5211
5212 actual_length = sizeof (struct target_loadmap)
5213 + sizeof (struct target_loadseg) * data->nsegs;
5214
5215 if (offset < 0 || offset > actual_length)
5216 return -1;
5217
5218 copy_length = actual_length - offset < len ? actual_length - offset : len;
5219 memcpy (myaddr, (char *) data + offset, copy_length);
5220 return copy_length;
5221 }
5222 #else
5223 # define linux_read_loadmap NULL
5224 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5225
5226 static void
5227 linux_process_qsupported (const char *query)
5228 {
5229 if (the_low_target.process_qsupported != NULL)
5230 the_low_target.process_qsupported (query);
5231 }
5232
5233 static int
5234 linux_supports_tracepoints (void)
5235 {
5236 if (*the_low_target.supports_tracepoints == NULL)
5237 return 0;
5238
5239 return (*the_low_target.supports_tracepoints) ();
5240 }
5241
5242 static CORE_ADDR
5243 linux_read_pc (struct regcache *regcache)
5244 {
5245 if (the_low_target.get_pc == NULL)
5246 return 0;
5247
5248 return (*the_low_target.get_pc) (regcache);
5249 }
5250
5251 static void
5252 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5253 {
5254 gdb_assert (the_low_target.set_pc != NULL);
5255
5256 (*the_low_target.set_pc) (regcache, pc);
5257 }
5258
5259 static int
5260 linux_thread_stopped (struct thread_info *thread)
5261 {
5262 return get_thread_lwp (thread)->stopped;
5263 }
5264
5265 /* This exposes stop-all-threads functionality to other modules. */
5266
5267 static void
5268 linux_pause_all (int freeze)
5269 {
5270 stop_all_lwps (freeze, NULL);
5271 }
5272
5273 /* This exposes unstop-all-threads functionality to other gdbserver
5274 modules. */
5275
5276 static void
5277 linux_unpause_all (int unfreeze)
5278 {
5279 unstop_all_lwps (unfreeze, NULL);
5280 }
5281
5282 static int
5283 linux_prepare_to_access_memory (void)
5284 {
5285 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5286 running LWP. */
5287 if (non_stop)
5288 linux_pause_all (1);
5289 return 0;
5290 }
5291
5292 static void
5293 linux_done_accessing_memory (void)
5294 {
5295 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5296 running LWP. */
5297 if (non_stop)
5298 linux_unpause_all (1);
5299 }
5300
5301 static int
5302 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5303 CORE_ADDR collector,
5304 CORE_ADDR lockaddr,
5305 ULONGEST orig_size,
5306 CORE_ADDR *jump_entry,
5307 CORE_ADDR *trampoline,
5308 ULONGEST *trampoline_size,
5309 unsigned char *jjump_pad_insn,
5310 ULONGEST *jjump_pad_insn_size,
5311 CORE_ADDR *adjusted_insn_addr,
5312 CORE_ADDR *adjusted_insn_addr_end,
5313 char *err)
5314 {
5315 return (*the_low_target.install_fast_tracepoint_jump_pad)
5316 (tpoint, tpaddr, collector, lockaddr, orig_size,
5317 jump_entry, trampoline, trampoline_size,
5318 jjump_pad_insn, jjump_pad_insn_size,
5319 adjusted_insn_addr, adjusted_insn_addr_end,
5320 err);
5321 }
5322
5323 static struct emit_ops *
5324 linux_emit_ops (void)
5325 {
5326 if (the_low_target.emit_ops != NULL)
5327 return (*the_low_target.emit_ops) ();
5328 else
5329 return NULL;
5330 }
5331
5332 static int
5333 linux_get_min_fast_tracepoint_insn_len (void)
5334 {
5335 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5336 }
5337
5338 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5339
5340 static int
5341 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5342 CORE_ADDR *phdr_memaddr, int *num_phdr)
5343 {
5344 char filename[PATH_MAX];
5345 int fd;
5346 const int auxv_size = is_elf64
5347 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5348 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5349
5350 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5351
5352 fd = open (filename, O_RDONLY);
5353 if (fd < 0)
5354 return 1;
5355
5356 *phdr_memaddr = 0;
5357 *num_phdr = 0;
5358 while (read (fd, buf, auxv_size) == auxv_size
5359 && (*phdr_memaddr == 0 || *num_phdr == 0))
5360 {
5361 if (is_elf64)
5362 {
5363 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5364
5365 switch (aux->a_type)
5366 {
5367 case AT_PHDR:
5368 *phdr_memaddr = aux->a_un.a_val;
5369 break;
5370 case AT_PHNUM:
5371 *num_phdr = aux->a_un.a_val;
5372 break;
5373 }
5374 }
5375 else
5376 {
5377 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5378
5379 switch (aux->a_type)
5380 {
5381 case AT_PHDR:
5382 *phdr_memaddr = aux->a_un.a_val;
5383 break;
5384 case AT_PHNUM:
5385 *num_phdr = aux->a_un.a_val;
5386 break;
5387 }
5388 }
5389 }
5390
5391 close (fd);
5392
5393 if (*phdr_memaddr == 0 || *num_phdr == 0)
5394 {
5395 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5396 "phdr_memaddr = %ld, phdr_num = %d",
5397 (long) *phdr_memaddr, *num_phdr);
5398 return 2;
5399 }
5400
5401 return 0;
5402 }
5403
5404 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5405
5406 static CORE_ADDR
5407 get_dynamic (const int pid, const int is_elf64)
5408 {
5409 CORE_ADDR phdr_memaddr, relocation;
5410 int num_phdr, i;
5411 unsigned char *phdr_buf;
5412 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5413
5414 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5415 return 0;
5416
5417 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5418 phdr_buf = alloca (num_phdr * phdr_size);
5419
5420 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5421 return 0;
5422
5423 /* Compute relocation: it is expected to be 0 for "regular" executables,
5424 non-zero for PIE ones. */
5425 relocation = -1;
5426 for (i = 0; relocation == -1 && i < num_phdr; i++)
5427 if (is_elf64)
5428 {
5429 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5430
5431 if (p->p_type == PT_PHDR)
5432 relocation = phdr_memaddr - p->p_vaddr;
5433 }
5434 else
5435 {
5436 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5437
5438 if (p->p_type == PT_PHDR)
5439 relocation = phdr_memaddr - p->p_vaddr;
5440 }
5441
5442 if (relocation == -1)
5443 {
5444 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5445 any real world executables, including PIE executables, have always
5446 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5447 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5448 or present DT_DEBUG anyway (fpc binaries are statically linked).
5449
5450 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5451
5452 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5453
5454 return 0;
5455 }
5456
5457 for (i = 0; i < num_phdr; i++)
5458 {
5459 if (is_elf64)
5460 {
5461 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5462
5463 if (p->p_type == PT_DYNAMIC)
5464 return p->p_vaddr + relocation;
5465 }
5466 else
5467 {
5468 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5469
5470 if (p->p_type == PT_DYNAMIC)
5471 return p->p_vaddr + relocation;
5472 }
5473 }
5474
5475 return 0;
5476 }
5477
5478 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5479 can be 0 if the inferior does not yet have the library list initialized.
5480 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5481 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5482
5483 static CORE_ADDR
5484 get_r_debug (const int pid, const int is_elf64)
5485 {
5486 CORE_ADDR dynamic_memaddr;
5487 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5488 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5489 CORE_ADDR map = -1;
5490
5491 dynamic_memaddr = get_dynamic (pid, is_elf64);
5492 if (dynamic_memaddr == 0)
5493 return map;
5494
5495 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5496 {
5497 if (is_elf64)
5498 {
5499 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5500 #ifdef DT_MIPS_RLD_MAP
5501 union
5502 {
5503 Elf64_Xword map;
5504 unsigned char buf[sizeof (Elf64_Xword)];
5505 }
5506 rld_map;
5507
5508 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5509 {
5510 if (linux_read_memory (dyn->d_un.d_val,
5511 rld_map.buf, sizeof (rld_map.buf)) == 0)
5512 return rld_map.map;
5513 else
5514 break;
5515 }
5516 #endif /* DT_MIPS_RLD_MAP */
5517
5518 if (dyn->d_tag == DT_DEBUG && map == -1)
5519 map = dyn->d_un.d_val;
5520
5521 if (dyn->d_tag == DT_NULL)
5522 break;
5523 }
5524 else
5525 {
5526 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5527 #ifdef DT_MIPS_RLD_MAP
5528 union
5529 {
5530 Elf32_Word map;
5531 unsigned char buf[sizeof (Elf32_Word)];
5532 }
5533 rld_map;
5534
5535 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5536 {
5537 if (linux_read_memory (dyn->d_un.d_val,
5538 rld_map.buf, sizeof (rld_map.buf)) == 0)
5539 return rld_map.map;
5540 else
5541 break;
5542 }
5543 #endif /* DT_MIPS_RLD_MAP */
5544
5545 if (dyn->d_tag == DT_DEBUG && map == -1)
5546 map = dyn->d_un.d_val;
5547
5548 if (dyn->d_tag == DT_NULL)
5549 break;
5550 }
5551
5552 dynamic_memaddr += dyn_size;
5553 }
5554
5555 return map;
5556 }
5557
5558 /* Read one pointer from MEMADDR in the inferior. */
5559
5560 static int
5561 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5562 {
5563 int ret;
5564
5565 /* Go through a union so this works on either big or little endian
5566 hosts, when the inferior's pointer size is smaller than the size
5567 of CORE_ADDR. It is assumed the inferior's endianness is the
5568 same of the superior's. */
5569 union
5570 {
5571 CORE_ADDR core_addr;
5572 unsigned int ui;
5573 unsigned char uc;
5574 } addr;
5575
5576 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5577 if (ret == 0)
5578 {
5579 if (ptr_size == sizeof (CORE_ADDR))
5580 *ptr = addr.core_addr;
5581 else if (ptr_size == sizeof (unsigned int))
5582 *ptr = addr.ui;
5583 else
5584 gdb_assert_not_reached ("unhandled pointer size");
5585 }
5586 return ret;
5587 }
5588
5589 struct link_map_offsets
5590 {
5591 /* Offset and size of r_debug.r_version. */
5592 int r_version_offset;
5593
5594 /* Offset and size of r_debug.r_map. */
5595 int r_map_offset;
5596
5597 /* Offset to l_addr field in struct link_map. */
5598 int l_addr_offset;
5599
5600 /* Offset to l_name field in struct link_map. */
5601 int l_name_offset;
5602
5603 /* Offset to l_ld field in struct link_map. */
5604 int l_ld_offset;
5605
5606 /* Offset to l_next field in struct link_map. */
5607 int l_next_offset;
5608
5609 /* Offset to l_prev field in struct link_map. */
5610 int l_prev_offset;
5611 };
5612
5613 /* Construct qXfer:libraries-svr4:read reply. */
5614
5615 static int
5616 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5617 unsigned const char *writebuf,
5618 CORE_ADDR offset, int len)
5619 {
5620 char *document;
5621 unsigned document_len;
5622 struct process_info_private *const priv = current_process ()->private;
5623 char filename[PATH_MAX];
5624 int pid, is_elf64;
5625
5626 static const struct link_map_offsets lmo_32bit_offsets =
5627 {
5628 0, /* r_version offset. */
5629 4, /* r_debug.r_map offset. */
5630 0, /* l_addr offset in link_map. */
5631 4, /* l_name offset in link_map. */
5632 8, /* l_ld offset in link_map. */
5633 12, /* l_next offset in link_map. */
5634 16 /* l_prev offset in link_map. */
5635 };
5636
5637 static const struct link_map_offsets lmo_64bit_offsets =
5638 {
5639 0, /* r_version offset. */
5640 8, /* r_debug.r_map offset. */
5641 0, /* l_addr offset in link_map. */
5642 8, /* l_name offset in link_map. */
5643 16, /* l_ld offset in link_map. */
5644 24, /* l_next offset in link_map. */
5645 32 /* l_prev offset in link_map. */
5646 };
5647 const struct link_map_offsets *lmo;
5648 unsigned int machine;
5649
5650 if (writebuf != NULL)
5651 return -2;
5652 if (readbuf == NULL)
5653 return -1;
5654
5655 pid = lwpid_of (get_thread_lwp (current_inferior));
5656 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5657 is_elf64 = elf_64_file_p (filename, &machine);
5658 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5659
5660 if (priv->r_debug == 0)
5661 priv->r_debug = get_r_debug (pid, is_elf64);
5662
5663 /* We failed to find DT_DEBUG. Such situation will not change for this
5664 inferior - do not retry it. Report it to GDB as E01, see for the reasons
5665 at the GDB solib-svr4.c side. */
5666 if (priv->r_debug == (CORE_ADDR) -1)
5667 return -1;
5668
5669 if (priv->r_debug == 0)
5670 {
5671 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5672 }
5673 else
5674 {
5675 int allocated = 1024;
5676 char *p;
5677 const int ptr_size = is_elf64 ? 8 : 4;
5678 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5679 int r_version, header_done = 0;
5680
5681 document = xmalloc (allocated);
5682 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5683 p = document + strlen (document);
5684
5685 r_version = 0;
5686 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5687 (unsigned char *) &r_version,
5688 sizeof (r_version)) != 0
5689 || r_version != 1)
5690 {
5691 warning ("unexpected r_debug version %d", r_version);
5692 goto done;
5693 }
5694
5695 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5696 &lm_addr, ptr_size) != 0)
5697 {
5698 warning ("unable to read r_map from 0x%lx",
5699 (long) priv->r_debug + lmo->r_map_offset);
5700 goto done;
5701 }
5702
5703 lm_prev = 0;
5704 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5705 &l_name, ptr_size) == 0
5706 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5707 &l_addr, ptr_size) == 0
5708 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5709 &l_ld, ptr_size) == 0
5710 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5711 &l_prev, ptr_size) == 0
5712 && read_one_ptr (lm_addr + lmo->l_next_offset,
5713 &l_next, ptr_size) == 0)
5714 {
5715 unsigned char libname[PATH_MAX];
5716
5717 if (lm_prev != l_prev)
5718 {
5719 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5720 (long) lm_prev, (long) l_prev);
5721 break;
5722 }
5723
5724 /* Not checking for error because reading may stop before
5725 we've got PATH_MAX worth of characters. */
5726 libname[0] = '\0';
5727 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5728 libname[sizeof (libname) - 1] = '\0';
5729 if (libname[0] != '\0')
5730 {
5731 /* 6x the size for xml_escape_text below. */
5732 size_t len = 6 * strlen ((char *) libname);
5733 char *name;
5734
5735 if (!header_done)
5736 {
5737 /* Terminate `<library-list-svr4'. */
5738 *p++ = '>';
5739 header_done = 1;
5740 }
5741
5742 while (allocated < p - document + len + 200)
5743 {
5744 /* Expand to guarantee sufficient storage. */
5745 uintptr_t document_len = p - document;
5746
5747 document = xrealloc (document, 2 * allocated);
5748 allocated *= 2;
5749 p = document + document_len;
5750 }
5751
5752 name = xml_escape_text ((char *) libname);
5753 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5754 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5755 name, (unsigned long) lm_addr,
5756 (unsigned long) l_addr, (unsigned long) l_ld);
5757 free (name);
5758 }
5759 else if (lm_prev == 0)
5760 {
5761 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5762 p = p + strlen (p);
5763 }
5764
5765 if (l_next == 0)
5766 break;
5767
5768 lm_prev = lm_addr;
5769 lm_addr = l_next;
5770 }
5771 done:
5772 if (!header_done)
5773 {
5774 /* Empty list; terminate `<library-list-svr4'. */
5775 strcpy (p, "/>");
5776 }
5777 else
5778 strcpy (p, "</library-list-svr4>");
5779 }
5780
5781 document_len = strlen (document);
5782 if (offset < document_len)
5783 document_len -= offset;
5784 else
5785 document_len = 0;
5786 if (len > document_len)
5787 len = document_len;
5788
5789 memcpy (readbuf, document + offset, len);
5790 xfree (document);
5791
5792 return len;
5793 }
5794
5795 static struct target_ops linux_target_ops = {
5796 linux_create_inferior,
5797 linux_attach,
5798 linux_kill,
5799 linux_detach,
5800 linux_mourn,
5801 linux_join,
5802 linux_thread_alive,
5803 linux_resume,
5804 linux_wait,
5805 linux_fetch_registers,
5806 linux_store_registers,
5807 linux_prepare_to_access_memory,
5808 linux_done_accessing_memory,
5809 linux_read_memory,
5810 linux_write_memory,
5811 linux_look_up_symbols,
5812 linux_request_interrupt,
5813 linux_read_auxv,
5814 linux_insert_point,
5815 linux_remove_point,
5816 linux_stopped_by_watchpoint,
5817 linux_stopped_data_address,
5818 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
5819 linux_read_offsets,
5820 #else
5821 NULL,
5822 #endif
5823 #ifdef USE_THREAD_DB
5824 thread_db_get_tls_address,
5825 #else
5826 NULL,
5827 #endif
5828 linux_qxfer_spu,
5829 hostio_last_error_from_errno,
5830 linux_qxfer_osdata,
5831 linux_xfer_siginfo,
5832 linux_supports_non_stop,
5833 linux_async,
5834 linux_start_non_stop,
5835 linux_supports_multi_process,
5836 #ifdef USE_THREAD_DB
5837 thread_db_handle_monitor_command,
5838 #else
5839 NULL,
5840 #endif
5841 linux_common_core_of_thread,
5842 linux_read_loadmap,
5843 linux_process_qsupported,
5844 linux_supports_tracepoints,
5845 linux_read_pc,
5846 linux_write_pc,
5847 linux_thread_stopped,
5848 NULL,
5849 linux_pause_all,
5850 linux_unpause_all,
5851 linux_cancel_breakpoints,
5852 linux_stabilize_threads,
5853 linux_install_fast_tracepoint_jump_pad,
5854 linux_emit_ops,
5855 linux_supports_disable_randomization,
5856 linux_get_min_fast_tracepoint_insn_len,
5857 linux_qxfer_libraries_svr4,
5858 linux_supports_agent,
5859 };
5860
5861 static void
5862 linux_init_signals ()
5863 {
5864 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5865 to find what the cancel signal actually is. */
5866 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5867 signal (__SIGRTMIN+1, SIG_IGN);
5868 #endif
5869 }
5870
5871 void
5872 initialize_low (void)
5873 {
5874 struct sigaction sigchld_action;
5875 memset (&sigchld_action, 0, sizeof (sigchld_action));
5876 set_target_ops (&linux_target_ops);
5877 set_breakpoint_data (the_low_target.breakpoint,
5878 the_low_target.breakpoint_len);
5879 linux_init_signals ();
5880 linux_test_for_tracefork ();
5881 linux_ptrace_init_warnings ();
5882 #ifdef HAVE_LINUX_REGSETS
5883 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5884 ;
5885 disabled_regsets = xmalloc (num_regsets);
5886 #endif
5887
5888 sigchld_action.sa_handler = sigchld_handler;
5889 sigemptyset (&sigchld_action.sa_mask);
5890 sigchld_action.sa_flags = SA_RESTART;
5891 sigaction (SIGCHLD, &sigchld_action, NULL);
5892 }
This page took 0.15666 seconds and 4 git commands to generate.