12208dcc1e11752d8b63d4dfccc572c06252015b
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2013 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <stdio.h>
28 #include <sys/ptrace.h>
29 #include "linux-ptrace.h"
30 #include "linux-procfs.h"
31 #include <signal.h>
32 #include <sys/ioctl.h>
33 #include <fcntl.h>
34 #include <string.h>
35 #include <stdlib.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/syscall.h>
39 #include <sched.h>
40 #include <ctype.h>
41 #include <pwd.h>
42 #include <sys/types.h>
43 #include <dirent.h>
44 #include "gdb_stat.h"
45 #include <sys/vfs.h>
46 #include <sys/uio.h>
47 #ifndef ELFMAG0
48 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
49 then ELFMAG0 will have been defined. If it didn't get included by
50 gdb_proc_service.h then including it will likely introduce a duplicate
51 definition of elf_fpregset_t. */
52 #include <elf.h>
53 #endif
54
55 #ifndef SPUFS_MAGIC
56 #define SPUFS_MAGIC 0x23c9b64e
57 #endif
58
59 #ifdef HAVE_PERSONALITY
60 # include <sys/personality.h>
61 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
62 # define ADDR_NO_RANDOMIZE 0x0040000
63 # endif
64 #endif
65
66 #ifndef O_LARGEFILE
67 #define O_LARGEFILE 0
68 #endif
69
70 #ifndef W_STOPCODE
71 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
72 #endif
73
74 /* This is the kernel's hard limit. Not to be confused with
75 SIGRTMIN. */
76 #ifndef __SIGRTMIN
77 #define __SIGRTMIN 32
78 #endif
79
80 /* Some targets did not define these ptrace constants from the start,
81 so gdbserver defines them locally here. In the future, these may
82 be removed after they are added to asm/ptrace.h. */
83 #if !(defined(PT_TEXT_ADDR) \
84 || defined(PT_DATA_ADDR) \
85 || defined(PT_TEXT_END_ADDR))
86 #if defined(__mcoldfire__)
87 /* These are still undefined in 3.10 kernels. */
88 #define PT_TEXT_ADDR 49*4
89 #define PT_DATA_ADDR 50*4
90 #define PT_TEXT_END_ADDR 51*4
91 /* BFIN already defines these since at least 2.6.32 kernels. */
92 #elif defined(BFIN)
93 #define PT_TEXT_ADDR 220
94 #define PT_TEXT_END_ADDR 224
95 #define PT_DATA_ADDR 228
96 /* These are still undefined in 3.10 kernels. */
97 #elif defined(__TMS320C6X__)
98 #define PT_TEXT_ADDR (0x10000*4)
99 #define PT_DATA_ADDR (0x10004*4)
100 #define PT_TEXT_END_ADDR (0x10008*4)
101 #endif
102 #endif
103
104 #ifdef HAVE_LINUX_BTRACE
105 # include "linux-btrace.h"
106 #endif
107
108 #ifndef HAVE_ELF32_AUXV_T
109 /* Copied from glibc's elf.h. */
110 typedef struct
111 {
112 uint32_t a_type; /* Entry type */
113 union
114 {
115 uint32_t a_val; /* Integer value */
116 /* We use to have pointer elements added here. We cannot do that,
117 though, since it does not work when using 32-bit definitions
118 on 64-bit platforms and vice versa. */
119 } a_un;
120 } Elf32_auxv_t;
121 #endif
122
123 #ifndef HAVE_ELF64_AUXV_T
124 /* Copied from glibc's elf.h. */
125 typedef struct
126 {
127 uint64_t a_type; /* Entry type */
128 union
129 {
130 uint64_t a_val; /* Integer value */
131 /* We use to have pointer elements added here. We cannot do that,
132 though, since it does not work when using 32-bit definitions
133 on 64-bit platforms and vice versa. */
134 } a_un;
135 } Elf64_auxv_t;
136 #endif
137
138 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
139 representation of the thread ID.
140
141 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
142 the same as the LWP ID.
143
144 ``all_processes'' is keyed by the "overall process ID", which
145 GNU/Linux calls tgid, "thread group ID". */
146
147 struct inferior_list all_lwps;
148
149 /* A list of all unknown processes which receive stop signals. Some
150 other process will presumably claim each of these as forked
151 children momentarily. */
152
153 struct simple_pid_list
154 {
155 /* The process ID. */
156 int pid;
157
158 /* The status as reported by waitpid. */
159 int status;
160
161 /* Next in chain. */
162 struct simple_pid_list *next;
163 };
164 struct simple_pid_list *stopped_pids;
165
166 /* Trivial list manipulation functions to keep track of a list of new
167 stopped processes. */
168
169 static void
170 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
171 {
172 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
173
174 new_pid->pid = pid;
175 new_pid->status = status;
176 new_pid->next = *listp;
177 *listp = new_pid;
178 }
179
180 static int
181 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
182 {
183 struct simple_pid_list **p;
184
185 for (p = listp; *p != NULL; p = &(*p)->next)
186 if ((*p)->pid == pid)
187 {
188 struct simple_pid_list *next = (*p)->next;
189
190 *statusp = (*p)->status;
191 xfree (*p);
192 *p = next;
193 return 1;
194 }
195 return 0;
196 }
197
198 enum stopping_threads_kind
199 {
200 /* Not stopping threads presently. */
201 NOT_STOPPING_THREADS,
202
203 /* Stopping threads. */
204 STOPPING_THREADS,
205
206 /* Stopping and suspending threads. */
207 STOPPING_AND_SUSPENDING_THREADS
208 };
209
210 /* This is set while stop_all_lwps is in effect. */
211 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
212
213 /* FIXME make into a target method? */
214 int using_threads = 1;
215
216 /* True if we're presently stabilizing threads (moving them out of
217 jump pads). */
218 static int stabilizing_threads;
219
220 static void linux_resume_one_lwp (struct lwp_info *lwp,
221 int step, int signal, siginfo_t *info);
222 static void linux_resume (struct thread_resume *resume_info, size_t n);
223 static void stop_all_lwps (int suspend, struct lwp_info *except);
224 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
225 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
226 static void *add_lwp (ptid_t ptid);
227 static int linux_stopped_by_watchpoint (void);
228 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
229 static void proceed_all_lwps (void);
230 static int finish_step_over (struct lwp_info *lwp);
231 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
232 static int kill_lwp (unsigned long lwpid, int signo);
233
234 /* True if the low target can hardware single-step. Such targets
235 don't need a BREAKPOINT_REINSERT_ADDR callback. */
236
237 static int
238 can_hardware_single_step (void)
239 {
240 return (the_low_target.breakpoint_reinsert_addr == NULL);
241 }
242
243 /* True if the low target supports memory breakpoints. If so, we'll
244 have a GET_PC implementation. */
245
246 static int
247 supports_breakpoints (void)
248 {
249 return (the_low_target.get_pc != NULL);
250 }
251
252 /* Returns true if this target can support fast tracepoints. This
253 does not mean that the in-process agent has been loaded in the
254 inferior. */
255
256 static int
257 supports_fast_tracepoints (void)
258 {
259 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
260 }
261
262 /* True if LWP is stopped in its stepping range. */
263
264 static int
265 lwp_in_step_range (struct lwp_info *lwp)
266 {
267 CORE_ADDR pc = lwp->stop_pc;
268
269 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
270 }
271
272 struct pending_signals
273 {
274 int signal;
275 siginfo_t info;
276 struct pending_signals *prev;
277 };
278
279 /* The read/write ends of the pipe registered as waitable file in the
280 event loop. */
281 static int linux_event_pipe[2] = { -1, -1 };
282
283 /* True if we're currently in async mode. */
284 #define target_is_async_p() (linux_event_pipe[0] != -1)
285
286 static void send_sigstop (struct lwp_info *lwp);
287 static void wait_for_sigstop (struct inferior_list_entry *entry);
288
289 /* Return non-zero if HEADER is a 64-bit ELF file. */
290
291 static int
292 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
293 {
294 if (header->e_ident[EI_MAG0] == ELFMAG0
295 && header->e_ident[EI_MAG1] == ELFMAG1
296 && header->e_ident[EI_MAG2] == ELFMAG2
297 && header->e_ident[EI_MAG3] == ELFMAG3)
298 {
299 *machine = header->e_machine;
300 return header->e_ident[EI_CLASS] == ELFCLASS64;
301
302 }
303 *machine = EM_NONE;
304 return -1;
305 }
306
307 /* Return non-zero if FILE is a 64-bit ELF file,
308 zero if the file is not a 64-bit ELF file,
309 and -1 if the file is not accessible or doesn't exist. */
310
311 static int
312 elf_64_file_p (const char *file, unsigned int *machine)
313 {
314 Elf64_Ehdr header;
315 int fd;
316
317 fd = open (file, O_RDONLY);
318 if (fd < 0)
319 return -1;
320
321 if (read (fd, &header, sizeof (header)) != sizeof (header))
322 {
323 close (fd);
324 return 0;
325 }
326 close (fd);
327
328 return elf_64_header_p (&header, machine);
329 }
330
331 /* Accepts an integer PID; Returns true if the executable PID is
332 running is a 64-bit ELF file.. */
333
334 int
335 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
336 {
337 char file[PATH_MAX];
338
339 sprintf (file, "/proc/%d/exe", pid);
340 return elf_64_file_p (file, machine);
341 }
342
343 static void
344 delete_lwp (struct lwp_info *lwp)
345 {
346 remove_thread (get_lwp_thread (lwp));
347 remove_inferior (&all_lwps, &lwp->head);
348 free (lwp->arch_private);
349 free (lwp);
350 }
351
352 /* Add a process to the common process list, and set its private
353 data. */
354
355 static struct process_info *
356 linux_add_process (int pid, int attached)
357 {
358 struct process_info *proc;
359
360 proc = add_process (pid, attached);
361 proc->private = xcalloc (1, sizeof (*proc->private));
362
363 /* Set the arch when the first LWP stops. */
364 proc->private->new_inferior = 1;
365
366 if (the_low_target.new_process != NULL)
367 proc->private->arch_private = the_low_target.new_process ();
368
369 return proc;
370 }
371
372 /* Handle a GNU/Linux extended wait response. If we see a clone
373 event, we need to add the new LWP to our list (and not report the
374 trap to higher layers). */
375
376 static void
377 handle_extended_wait (struct lwp_info *event_child, int wstat)
378 {
379 int event = wstat >> 16;
380 struct lwp_info *new_lwp;
381
382 if (event == PTRACE_EVENT_CLONE)
383 {
384 ptid_t ptid;
385 unsigned long new_pid;
386 int ret, status;
387
388 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), (PTRACE_TYPE_ARG3) 0,
389 &new_pid);
390
391 /* If we haven't already seen the new PID stop, wait for it now. */
392 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
393 {
394 /* The new child has a pending SIGSTOP. We can't affect it until it
395 hits the SIGSTOP, but we're already attached. */
396
397 ret = my_waitpid (new_pid, &status, __WALL);
398
399 if (ret == -1)
400 perror_with_name ("waiting for new child");
401 else if (ret != new_pid)
402 warning ("wait returned unexpected PID %d", ret);
403 else if (!WIFSTOPPED (status))
404 warning ("wait returned unexpected status 0x%x", status);
405 }
406
407 ptid = ptid_build (pid_of (event_child), new_pid, 0);
408 new_lwp = (struct lwp_info *) add_lwp (ptid);
409 add_thread (ptid, new_lwp);
410
411 /* Either we're going to immediately resume the new thread
412 or leave it stopped. linux_resume_one_lwp is a nop if it
413 thinks the thread is currently running, so set this first
414 before calling linux_resume_one_lwp. */
415 new_lwp->stopped = 1;
416
417 /* If we're suspending all threads, leave this one suspended
418 too. */
419 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
420 new_lwp->suspended = 1;
421
422 /* Normally we will get the pending SIGSTOP. But in some cases
423 we might get another signal delivered to the group first.
424 If we do get another signal, be sure not to lose it. */
425 if (WSTOPSIG (status) == SIGSTOP)
426 {
427 if (stopping_threads != NOT_STOPPING_THREADS)
428 new_lwp->stop_pc = get_stop_pc (new_lwp);
429 else
430 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
431 }
432 else
433 {
434 new_lwp->stop_expected = 1;
435
436 if (stopping_threads != NOT_STOPPING_THREADS)
437 {
438 new_lwp->stop_pc = get_stop_pc (new_lwp);
439 new_lwp->status_pending_p = 1;
440 new_lwp->status_pending = status;
441 }
442 else
443 /* Pass the signal on. This is what GDB does - except
444 shouldn't we really report it instead? */
445 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
446 }
447
448 /* Always resume the current thread. If we are stopping
449 threads, it will have a pending SIGSTOP; we may as well
450 collect it now. */
451 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
452 }
453 }
454
455 /* Return the PC as read from the regcache of LWP, without any
456 adjustment. */
457
458 static CORE_ADDR
459 get_pc (struct lwp_info *lwp)
460 {
461 struct thread_info *saved_inferior;
462 struct regcache *regcache;
463 CORE_ADDR pc;
464
465 if (the_low_target.get_pc == NULL)
466 return 0;
467
468 saved_inferior = current_inferior;
469 current_inferior = get_lwp_thread (lwp);
470
471 regcache = get_thread_regcache (current_inferior, 1);
472 pc = (*the_low_target.get_pc) (regcache);
473
474 if (debug_threads)
475 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
476
477 current_inferior = saved_inferior;
478 return pc;
479 }
480
481 /* This function should only be called if LWP got a SIGTRAP.
482 The SIGTRAP could mean several things.
483
484 On i386, where decr_pc_after_break is non-zero:
485 If we were single-stepping this process using PTRACE_SINGLESTEP,
486 we will get only the one SIGTRAP (even if the instruction we
487 stepped over was a breakpoint). The value of $eip will be the
488 next instruction.
489 If we continue the process using PTRACE_CONT, we will get a
490 SIGTRAP when we hit a breakpoint. The value of $eip will be
491 the instruction after the breakpoint (i.e. needs to be
492 decremented). If we report the SIGTRAP to GDB, we must also
493 report the undecremented PC. If we cancel the SIGTRAP, we
494 must resume at the decremented PC.
495
496 (Presumably, not yet tested) On a non-decr_pc_after_break machine
497 with hardware or kernel single-step:
498 If we single-step over a breakpoint instruction, our PC will
499 point at the following instruction. If we continue and hit a
500 breakpoint instruction, our PC will point at the breakpoint
501 instruction. */
502
503 static CORE_ADDR
504 get_stop_pc (struct lwp_info *lwp)
505 {
506 CORE_ADDR stop_pc;
507
508 if (the_low_target.get_pc == NULL)
509 return 0;
510
511 stop_pc = get_pc (lwp);
512
513 if (WSTOPSIG (lwp->last_status) == SIGTRAP
514 && !lwp->stepping
515 && !lwp->stopped_by_watchpoint
516 && lwp->last_status >> 16 == 0)
517 stop_pc -= the_low_target.decr_pc_after_break;
518
519 if (debug_threads)
520 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
521
522 return stop_pc;
523 }
524
525 static void *
526 add_lwp (ptid_t ptid)
527 {
528 struct lwp_info *lwp;
529
530 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
531 memset (lwp, 0, sizeof (*lwp));
532
533 lwp->head.id = ptid;
534
535 if (the_low_target.new_thread != NULL)
536 lwp->arch_private = the_low_target.new_thread ();
537
538 add_inferior_to_list (&all_lwps, &lwp->head);
539
540 return lwp;
541 }
542
543 /* Start an inferior process and returns its pid.
544 ALLARGS is a vector of program-name and args. */
545
546 static int
547 linux_create_inferior (char *program, char **allargs)
548 {
549 #ifdef HAVE_PERSONALITY
550 int personality_orig = 0, personality_set = 0;
551 #endif
552 struct lwp_info *new_lwp;
553 int pid;
554 ptid_t ptid;
555
556 #ifdef HAVE_PERSONALITY
557 if (disable_randomization)
558 {
559 errno = 0;
560 personality_orig = personality (0xffffffff);
561 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
562 {
563 personality_set = 1;
564 personality (personality_orig | ADDR_NO_RANDOMIZE);
565 }
566 if (errno != 0 || (personality_set
567 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
568 warning ("Error disabling address space randomization: %s",
569 strerror (errno));
570 }
571 #endif
572
573 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
574 pid = vfork ();
575 #else
576 pid = fork ();
577 #endif
578 if (pid < 0)
579 perror_with_name ("fork");
580
581 if (pid == 0)
582 {
583 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
584
585 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
586 signal (__SIGRTMIN + 1, SIG_DFL);
587 #endif
588
589 setpgid (0, 0);
590
591 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
592 stdout to stderr so that inferior i/o doesn't corrupt the connection.
593 Also, redirect stdin to /dev/null. */
594 if (remote_connection_is_stdio ())
595 {
596 close (0);
597 open ("/dev/null", O_RDONLY);
598 dup2 (2, 1);
599 if (write (2, "stdin/stdout redirected\n",
600 sizeof ("stdin/stdout redirected\n") - 1) < 0)
601 {
602 /* Errors ignored. */;
603 }
604 }
605
606 execv (program, allargs);
607 if (errno == ENOENT)
608 execvp (program, allargs);
609
610 fprintf (stderr, "Cannot exec %s: %s.\n", program,
611 strerror (errno));
612 fflush (stderr);
613 _exit (0177);
614 }
615
616 #ifdef HAVE_PERSONALITY
617 if (personality_set)
618 {
619 errno = 0;
620 personality (personality_orig);
621 if (errno != 0)
622 warning ("Error restoring address space randomization: %s",
623 strerror (errno));
624 }
625 #endif
626
627 linux_add_process (pid, 0);
628
629 ptid = ptid_build (pid, pid, 0);
630 new_lwp = add_lwp (ptid);
631 add_thread (ptid, new_lwp);
632 new_lwp->must_set_ptrace_flags = 1;
633
634 return pid;
635 }
636
637 /* Attach to an inferior process. */
638
639 static void
640 linux_attach_lwp_1 (unsigned long lwpid, int initial)
641 {
642 ptid_t ptid;
643 struct lwp_info *new_lwp;
644
645 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
646 != 0)
647 {
648 struct buffer buffer;
649
650 if (!initial)
651 {
652 /* If we fail to attach to an LWP, just warn. */
653 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
654 strerror (errno), errno);
655 fflush (stderr);
656 return;
657 }
658
659 /* If we fail to attach to a process, report an error. */
660 buffer_init (&buffer);
661 linux_ptrace_attach_warnings (lwpid, &buffer);
662 buffer_grow_str0 (&buffer, "");
663 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
664 lwpid, strerror (errno), errno);
665 }
666
667 if (initial)
668 /* If lwp is the tgid, we handle adding existing threads later.
669 Otherwise we just add lwp without bothering about any other
670 threads. */
671 ptid = ptid_build (lwpid, lwpid, 0);
672 else
673 {
674 /* Note that extracting the pid from the current inferior is
675 safe, since we're always called in the context of the same
676 process as this new thread. */
677 int pid = pid_of (get_thread_lwp (current_inferior));
678 ptid = ptid_build (pid, lwpid, 0);
679 }
680
681 new_lwp = (struct lwp_info *) add_lwp (ptid);
682 add_thread (ptid, new_lwp);
683
684 /* We need to wait for SIGSTOP before being able to make the next
685 ptrace call on this LWP. */
686 new_lwp->must_set_ptrace_flags = 1;
687
688 if (linux_proc_pid_is_stopped (lwpid))
689 {
690 if (debug_threads)
691 fprintf (stderr,
692 "Attached to a stopped process\n");
693
694 /* The process is definitely stopped. It is in a job control
695 stop, unless the kernel predates the TASK_STOPPED /
696 TASK_TRACED distinction, in which case it might be in a
697 ptrace stop. Make sure it is in a ptrace stop; from there we
698 can kill it, signal it, et cetera.
699
700 First make sure there is a pending SIGSTOP. Since we are
701 already attached, the process can not transition from stopped
702 to running without a PTRACE_CONT; so we know this signal will
703 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
704 probably already in the queue (unless this kernel is old
705 enough to use TASK_STOPPED for ptrace stops); but since
706 SIGSTOP is not an RT signal, it can only be queued once. */
707 kill_lwp (lwpid, SIGSTOP);
708
709 /* Finally, resume the stopped process. This will deliver the
710 SIGSTOP (or a higher priority signal, just like normal
711 PTRACE_ATTACH), which we'll catch later on. */
712 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
713 }
714
715 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
716 brings it to a halt.
717
718 There are several cases to consider here:
719
720 1) gdbserver has already attached to the process and is being notified
721 of a new thread that is being created.
722 In this case we should ignore that SIGSTOP and resume the
723 process. This is handled below by setting stop_expected = 1,
724 and the fact that add_thread sets last_resume_kind ==
725 resume_continue.
726
727 2) This is the first thread (the process thread), and we're attaching
728 to it via attach_inferior.
729 In this case we want the process thread to stop.
730 This is handled by having linux_attach set last_resume_kind ==
731 resume_stop after we return.
732
733 If the pid we are attaching to is also the tgid, we attach to and
734 stop all the existing threads. Otherwise, we attach to pid and
735 ignore any other threads in the same group as this pid.
736
737 3) GDB is connecting to gdbserver and is requesting an enumeration of all
738 existing threads.
739 In this case we want the thread to stop.
740 FIXME: This case is currently not properly handled.
741 We should wait for the SIGSTOP but don't. Things work apparently
742 because enough time passes between when we ptrace (ATTACH) and when
743 gdb makes the next ptrace call on the thread.
744
745 On the other hand, if we are currently trying to stop all threads, we
746 should treat the new thread as if we had sent it a SIGSTOP. This works
747 because we are guaranteed that the add_lwp call above added us to the
748 end of the list, and so the new thread has not yet reached
749 wait_for_sigstop (but will). */
750 new_lwp->stop_expected = 1;
751 }
752
753 void
754 linux_attach_lwp (unsigned long lwpid)
755 {
756 linux_attach_lwp_1 (lwpid, 0);
757 }
758
759 /* Attach to PID. If PID is the tgid, attach to it and all
760 of its threads. */
761
762 static int
763 linux_attach (unsigned long pid)
764 {
765 /* Attach to PID. We will check for other threads
766 soon. */
767 linux_attach_lwp_1 (pid, 1);
768 linux_add_process (pid, 1);
769
770 if (!non_stop)
771 {
772 struct thread_info *thread;
773
774 /* Don't ignore the initial SIGSTOP if we just attached to this
775 process. It will be collected by wait shortly. */
776 thread = find_thread_ptid (ptid_build (pid, pid, 0));
777 thread->last_resume_kind = resume_stop;
778 }
779
780 if (linux_proc_get_tgid (pid) == pid)
781 {
782 DIR *dir;
783 char pathname[128];
784
785 sprintf (pathname, "/proc/%ld/task", pid);
786
787 dir = opendir (pathname);
788
789 if (!dir)
790 {
791 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
792 fflush (stderr);
793 }
794 else
795 {
796 /* At this point we attached to the tgid. Scan the task for
797 existing threads. */
798 unsigned long lwp;
799 int new_threads_found;
800 int iterations = 0;
801 struct dirent *dp;
802
803 while (iterations < 2)
804 {
805 new_threads_found = 0;
806 /* Add all the other threads. While we go through the
807 threads, new threads may be spawned. Cycle through
808 the list of threads until we have done two iterations without
809 finding new threads. */
810 while ((dp = readdir (dir)) != NULL)
811 {
812 /* Fetch one lwp. */
813 lwp = strtoul (dp->d_name, NULL, 10);
814
815 /* Is this a new thread? */
816 if (lwp
817 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
818 {
819 linux_attach_lwp_1 (lwp, 0);
820 new_threads_found++;
821
822 if (debug_threads)
823 fprintf (stderr, "\
824 Found and attached to new lwp %ld\n", lwp);
825 }
826 }
827
828 if (!new_threads_found)
829 iterations++;
830 else
831 iterations = 0;
832
833 rewinddir (dir);
834 }
835 closedir (dir);
836 }
837 }
838
839 return 0;
840 }
841
842 struct counter
843 {
844 int pid;
845 int count;
846 };
847
848 static int
849 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
850 {
851 struct counter *counter = args;
852
853 if (ptid_get_pid (entry->id) == counter->pid)
854 {
855 if (++counter->count > 1)
856 return 1;
857 }
858
859 return 0;
860 }
861
862 static int
863 last_thread_of_process_p (struct thread_info *thread)
864 {
865 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
866 int pid = ptid_get_pid (ptid);
867 struct counter counter = { pid , 0 };
868
869 return (find_inferior (&all_threads,
870 second_thread_of_pid_p, &counter) == NULL);
871 }
872
873 /* Kill LWP. */
874
875 static void
876 linux_kill_one_lwp (struct lwp_info *lwp)
877 {
878 int pid = lwpid_of (lwp);
879
880 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
881 there is no signal context, and ptrace(PTRACE_KILL) (or
882 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
883 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
884 alternative is to kill with SIGKILL. We only need one SIGKILL
885 per process, not one for each thread. But since we still support
886 linuxthreads, and we also support debugging programs using raw
887 clone without CLONE_THREAD, we send one for each thread. For
888 years, we used PTRACE_KILL only, so we're being a bit paranoid
889 about some old kernels where PTRACE_KILL might work better
890 (dubious if there are any such, but that's why it's paranoia), so
891 we try SIGKILL first, PTRACE_KILL second, and so we're fine
892 everywhere. */
893
894 errno = 0;
895 kill (pid, SIGKILL);
896 if (debug_threads)
897 fprintf (stderr,
898 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
899 target_pid_to_str (ptid_of (lwp)),
900 errno ? strerror (errno) : "OK");
901
902 errno = 0;
903 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
904 if (debug_threads)
905 fprintf (stderr,
906 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
907 target_pid_to_str (ptid_of (lwp)),
908 errno ? strerror (errno) : "OK");
909 }
910
911 /* Callback for `find_inferior'. Kills an lwp of a given process,
912 except the leader. */
913
914 static int
915 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
916 {
917 struct thread_info *thread = (struct thread_info *) entry;
918 struct lwp_info *lwp = get_thread_lwp (thread);
919 int wstat;
920 int pid = * (int *) args;
921
922 if (ptid_get_pid (entry->id) != pid)
923 return 0;
924
925 /* We avoid killing the first thread here, because of a Linux kernel (at
926 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
927 the children get a chance to be reaped, it will remain a zombie
928 forever. */
929
930 if (lwpid_of (lwp) == pid)
931 {
932 if (debug_threads)
933 fprintf (stderr, "lkop: is last of process %s\n",
934 target_pid_to_str (entry->id));
935 return 0;
936 }
937
938 do
939 {
940 linux_kill_one_lwp (lwp);
941
942 /* Make sure it died. The loop is most likely unnecessary. */
943 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
944 } while (pid > 0 && WIFSTOPPED (wstat));
945
946 return 0;
947 }
948
949 static int
950 linux_kill (int pid)
951 {
952 struct process_info *process;
953 struct lwp_info *lwp;
954 int wstat;
955 int lwpid;
956
957 process = find_process_pid (pid);
958 if (process == NULL)
959 return -1;
960
961 /* If we're killing a running inferior, make sure it is stopped
962 first, as PTRACE_KILL will not work otherwise. */
963 stop_all_lwps (0, NULL);
964
965 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
966
967 /* See the comment in linux_kill_one_lwp. We did not kill the first
968 thread in the list, so do so now. */
969 lwp = find_lwp_pid (pid_to_ptid (pid));
970
971 if (lwp == NULL)
972 {
973 if (debug_threads)
974 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
975 lwpid_of (lwp), pid);
976 }
977 else
978 {
979 if (debug_threads)
980 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
981 lwpid_of (lwp), pid);
982
983 do
984 {
985 linux_kill_one_lwp (lwp);
986
987 /* Make sure it died. The loop is most likely unnecessary. */
988 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
989 } while (lwpid > 0 && WIFSTOPPED (wstat));
990 }
991
992 the_target->mourn (process);
993
994 /* Since we presently can only stop all lwps of all processes, we
995 need to unstop lwps of other processes. */
996 unstop_all_lwps (0, NULL);
997 return 0;
998 }
999
1000 /* Get pending signal of THREAD, for detaching purposes. This is the
1001 signal the thread last stopped for, which we need to deliver to the
1002 thread when detaching, otherwise, it'd be suppressed/lost. */
1003
1004 static int
1005 get_detach_signal (struct thread_info *thread)
1006 {
1007 enum gdb_signal signo = GDB_SIGNAL_0;
1008 int status;
1009 struct lwp_info *lp = get_thread_lwp (thread);
1010
1011 if (lp->status_pending_p)
1012 status = lp->status_pending;
1013 else
1014 {
1015 /* If the thread had been suspended by gdbserver, and it stopped
1016 cleanly, then it'll have stopped with SIGSTOP. But we don't
1017 want to deliver that SIGSTOP. */
1018 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1019 || thread->last_status.value.sig == GDB_SIGNAL_0)
1020 return 0;
1021
1022 /* Otherwise, we may need to deliver the signal we
1023 intercepted. */
1024 status = lp->last_status;
1025 }
1026
1027 if (!WIFSTOPPED (status))
1028 {
1029 if (debug_threads)
1030 fprintf (stderr,
1031 "GPS: lwp %s hasn't stopped: no pending signal\n",
1032 target_pid_to_str (ptid_of (lp)));
1033 return 0;
1034 }
1035
1036 /* Extended wait statuses aren't real SIGTRAPs. */
1037 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1038 {
1039 if (debug_threads)
1040 fprintf (stderr,
1041 "GPS: lwp %s had stopped with extended "
1042 "status: no pending signal\n",
1043 target_pid_to_str (ptid_of (lp)));
1044 return 0;
1045 }
1046
1047 signo = gdb_signal_from_host (WSTOPSIG (status));
1048
1049 if (program_signals_p && !program_signals[signo])
1050 {
1051 if (debug_threads)
1052 fprintf (stderr,
1053 "GPS: lwp %s had signal %s, but it is in nopass state\n",
1054 target_pid_to_str (ptid_of (lp)),
1055 gdb_signal_to_string (signo));
1056 return 0;
1057 }
1058 else if (!program_signals_p
1059 /* If we have no way to know which signals GDB does not
1060 want to have passed to the program, assume
1061 SIGTRAP/SIGINT, which is GDB's default. */
1062 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1063 {
1064 if (debug_threads)
1065 fprintf (stderr,
1066 "GPS: lwp %s had signal %s, "
1067 "but we don't know if we should pass it. Default to not.\n",
1068 target_pid_to_str (ptid_of (lp)),
1069 gdb_signal_to_string (signo));
1070 return 0;
1071 }
1072 else
1073 {
1074 if (debug_threads)
1075 fprintf (stderr,
1076 "GPS: lwp %s has pending signal %s: delivering it.\n",
1077 target_pid_to_str (ptid_of (lp)),
1078 gdb_signal_to_string (signo));
1079
1080 return WSTOPSIG (status);
1081 }
1082 }
1083
1084 static int
1085 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1086 {
1087 struct thread_info *thread = (struct thread_info *) entry;
1088 struct lwp_info *lwp = get_thread_lwp (thread);
1089 int pid = * (int *) args;
1090 int sig;
1091
1092 if (ptid_get_pid (entry->id) != pid)
1093 return 0;
1094
1095 /* If there is a pending SIGSTOP, get rid of it. */
1096 if (lwp->stop_expected)
1097 {
1098 if (debug_threads)
1099 fprintf (stderr,
1100 "Sending SIGCONT to %s\n",
1101 target_pid_to_str (ptid_of (lwp)));
1102
1103 kill_lwp (lwpid_of (lwp), SIGCONT);
1104 lwp->stop_expected = 0;
1105 }
1106
1107 /* Flush any pending changes to the process's registers. */
1108 regcache_invalidate_thread (get_lwp_thread (lwp));
1109
1110 /* Pass on any pending signal for this thread. */
1111 sig = get_detach_signal (thread);
1112
1113 /* Finally, let it resume. */
1114 if (the_low_target.prepare_to_resume != NULL)
1115 the_low_target.prepare_to_resume (lwp);
1116 if (ptrace (PTRACE_DETACH, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
1117 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1118 error (_("Can't detach %s: %s"),
1119 target_pid_to_str (ptid_of (lwp)),
1120 strerror (errno));
1121
1122 delete_lwp (lwp);
1123 return 0;
1124 }
1125
1126 static int
1127 linux_detach (int pid)
1128 {
1129 struct process_info *process;
1130
1131 process = find_process_pid (pid);
1132 if (process == NULL)
1133 return -1;
1134
1135 /* Stop all threads before detaching. First, ptrace requires that
1136 the thread is stopped to sucessfully detach. Second, thread_db
1137 may need to uninstall thread event breakpoints from memory, which
1138 only works with a stopped process anyway. */
1139 stop_all_lwps (0, NULL);
1140
1141 #ifdef USE_THREAD_DB
1142 thread_db_detach (process);
1143 #endif
1144
1145 /* Stabilize threads (move out of jump pads). */
1146 stabilize_threads ();
1147
1148 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1149
1150 the_target->mourn (process);
1151
1152 /* Since we presently can only stop all lwps of all processes, we
1153 need to unstop lwps of other processes. */
1154 unstop_all_lwps (0, NULL);
1155 return 0;
1156 }
1157
1158 /* Remove all LWPs that belong to process PROC from the lwp list. */
1159
1160 static int
1161 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1162 {
1163 struct lwp_info *lwp = (struct lwp_info *) entry;
1164 struct process_info *process = proc;
1165
1166 if (pid_of (lwp) == pid_of (process))
1167 delete_lwp (lwp);
1168
1169 return 0;
1170 }
1171
1172 static void
1173 linux_mourn (struct process_info *process)
1174 {
1175 struct process_info_private *priv;
1176
1177 #ifdef USE_THREAD_DB
1178 thread_db_mourn (process);
1179 #endif
1180
1181 find_inferior (&all_lwps, delete_lwp_callback, process);
1182
1183 /* Freeing all private data. */
1184 priv = process->private;
1185 free (priv->arch_private);
1186 free (priv);
1187 process->private = NULL;
1188
1189 remove_process (process);
1190 }
1191
1192 static void
1193 linux_join (int pid)
1194 {
1195 int status, ret;
1196
1197 do {
1198 ret = my_waitpid (pid, &status, 0);
1199 if (WIFEXITED (status) || WIFSIGNALED (status))
1200 break;
1201 } while (ret != -1 || errno != ECHILD);
1202 }
1203
1204 /* Return nonzero if the given thread is still alive. */
1205 static int
1206 linux_thread_alive (ptid_t ptid)
1207 {
1208 struct lwp_info *lwp = find_lwp_pid (ptid);
1209
1210 /* We assume we always know if a thread exits. If a whole process
1211 exited but we still haven't been able to report it to GDB, we'll
1212 hold on to the last lwp of the dead process. */
1213 if (lwp != NULL)
1214 return !lwp->dead;
1215 else
1216 return 0;
1217 }
1218
1219 /* Return 1 if this lwp has an interesting status pending. */
1220 static int
1221 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1222 {
1223 struct lwp_info *lwp = (struct lwp_info *) entry;
1224 ptid_t ptid = * (ptid_t *) arg;
1225 struct thread_info *thread;
1226
1227 /* Check if we're only interested in events from a specific process
1228 or its lwps. */
1229 if (!ptid_equal (minus_one_ptid, ptid)
1230 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1231 return 0;
1232
1233 thread = get_lwp_thread (lwp);
1234
1235 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1236 report any status pending the LWP may have. */
1237 if (thread->last_resume_kind == resume_stop
1238 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1239 return 0;
1240
1241 return lwp->status_pending_p;
1242 }
1243
1244 static int
1245 same_lwp (struct inferior_list_entry *entry, void *data)
1246 {
1247 ptid_t ptid = *(ptid_t *) data;
1248 int lwp;
1249
1250 if (ptid_get_lwp (ptid) != 0)
1251 lwp = ptid_get_lwp (ptid);
1252 else
1253 lwp = ptid_get_pid (ptid);
1254
1255 if (ptid_get_lwp (entry->id) == lwp)
1256 return 1;
1257
1258 return 0;
1259 }
1260
1261 struct lwp_info *
1262 find_lwp_pid (ptid_t ptid)
1263 {
1264 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1265 }
1266
1267 static struct lwp_info *
1268 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1269 {
1270 int ret;
1271 int to_wait_for = -1;
1272 struct lwp_info *child = NULL;
1273
1274 if (debug_threads)
1275 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1276
1277 if (ptid_equal (ptid, minus_one_ptid))
1278 to_wait_for = -1; /* any child */
1279 else
1280 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1281
1282 options |= __WALL;
1283
1284 retry:
1285
1286 ret = my_waitpid (to_wait_for, wstatp, options);
1287 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1288 return NULL;
1289 else if (ret == -1)
1290 perror_with_name ("waitpid");
1291
1292 if (debug_threads
1293 && (!WIFSTOPPED (*wstatp)
1294 || (WSTOPSIG (*wstatp) != 32
1295 && WSTOPSIG (*wstatp) != 33)))
1296 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1297
1298 child = find_lwp_pid (pid_to_ptid (ret));
1299
1300 /* If we didn't find a process, one of two things presumably happened:
1301 - A process we started and then detached from has exited. Ignore it.
1302 - A process we are controlling has forked and the new child's stop
1303 was reported to us by the kernel. Save its PID. */
1304 if (child == NULL && WIFSTOPPED (*wstatp))
1305 {
1306 add_to_pid_list (&stopped_pids, ret, *wstatp);
1307 goto retry;
1308 }
1309 else if (child == NULL)
1310 goto retry;
1311
1312 child->stopped = 1;
1313
1314 child->last_status = *wstatp;
1315
1316 if (WIFSTOPPED (*wstatp))
1317 {
1318 struct process_info *proc;
1319
1320 /* Architecture-specific setup after inferior is running. This
1321 needs to happen after we have attached to the inferior and it
1322 is stopped for the first time, but before we access any
1323 inferior registers. */
1324 proc = find_process_pid (pid_of (child));
1325 if (proc->private->new_inferior)
1326 {
1327 struct thread_info *saved_inferior;
1328
1329 saved_inferior = current_inferior;
1330 current_inferior = get_lwp_thread (child);
1331
1332 the_low_target.arch_setup ();
1333
1334 current_inferior = saved_inferior;
1335
1336 proc->private->new_inferior = 0;
1337 }
1338 }
1339
1340 /* Fetch the possibly triggered data watchpoint info and store it in
1341 CHILD.
1342
1343 On some archs, like x86, that use debug registers to set
1344 watchpoints, it's possible that the way to know which watched
1345 address trapped, is to check the register that is used to select
1346 which address to watch. Problem is, between setting the
1347 watchpoint and reading back which data address trapped, the user
1348 may change the set of watchpoints, and, as a consequence, GDB
1349 changes the debug registers in the inferior. To avoid reading
1350 back a stale stopped-data-address when that happens, we cache in
1351 LP the fact that a watchpoint trapped, and the corresponding data
1352 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1353 changes the debug registers meanwhile, we have the cached data we
1354 can rely on. */
1355
1356 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1357 {
1358 if (the_low_target.stopped_by_watchpoint == NULL)
1359 {
1360 child->stopped_by_watchpoint = 0;
1361 }
1362 else
1363 {
1364 struct thread_info *saved_inferior;
1365
1366 saved_inferior = current_inferior;
1367 current_inferior = get_lwp_thread (child);
1368
1369 child->stopped_by_watchpoint
1370 = the_low_target.stopped_by_watchpoint ();
1371
1372 if (child->stopped_by_watchpoint)
1373 {
1374 if (the_low_target.stopped_data_address != NULL)
1375 child->stopped_data_address
1376 = the_low_target.stopped_data_address ();
1377 else
1378 child->stopped_data_address = 0;
1379 }
1380
1381 current_inferior = saved_inferior;
1382 }
1383 }
1384
1385 /* Store the STOP_PC, with adjustment applied. This depends on the
1386 architecture being defined already (so that CHILD has a valid
1387 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1388 not). */
1389 if (WIFSTOPPED (*wstatp))
1390 child->stop_pc = get_stop_pc (child);
1391
1392 if (debug_threads
1393 && WIFSTOPPED (*wstatp)
1394 && the_low_target.get_pc != NULL)
1395 {
1396 struct thread_info *saved_inferior = current_inferior;
1397 struct regcache *regcache;
1398 CORE_ADDR pc;
1399
1400 current_inferior = get_lwp_thread (child);
1401 regcache = get_thread_regcache (current_inferior, 1);
1402 pc = (*the_low_target.get_pc) (regcache);
1403 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1404 current_inferior = saved_inferior;
1405 }
1406
1407 return child;
1408 }
1409
1410 /* This function should only be called if the LWP got a SIGTRAP.
1411
1412 Handle any tracepoint steps or hits. Return true if a tracepoint
1413 event was handled, 0 otherwise. */
1414
1415 static int
1416 handle_tracepoints (struct lwp_info *lwp)
1417 {
1418 struct thread_info *tinfo = get_lwp_thread (lwp);
1419 int tpoint_related_event = 0;
1420
1421 /* If this tracepoint hit causes a tracing stop, we'll immediately
1422 uninsert tracepoints. To do this, we temporarily pause all
1423 threads, unpatch away, and then unpause threads. We need to make
1424 sure the unpausing doesn't resume LWP too. */
1425 lwp->suspended++;
1426
1427 /* And we need to be sure that any all-threads-stopping doesn't try
1428 to move threads out of the jump pads, as it could deadlock the
1429 inferior (LWP could be in the jump pad, maybe even holding the
1430 lock.) */
1431
1432 /* Do any necessary step collect actions. */
1433 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1434
1435 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1436
1437 /* See if we just hit a tracepoint and do its main collect
1438 actions. */
1439 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1440
1441 lwp->suspended--;
1442
1443 gdb_assert (lwp->suspended == 0);
1444 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1445
1446 if (tpoint_related_event)
1447 {
1448 if (debug_threads)
1449 fprintf (stderr, "got a tracepoint event\n");
1450 return 1;
1451 }
1452
1453 return 0;
1454 }
1455
1456 /* Convenience wrapper. Returns true if LWP is presently collecting a
1457 fast tracepoint. */
1458
1459 static int
1460 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1461 struct fast_tpoint_collect_status *status)
1462 {
1463 CORE_ADDR thread_area;
1464
1465 if (the_low_target.get_thread_area == NULL)
1466 return 0;
1467
1468 /* Get the thread area address. This is used to recognize which
1469 thread is which when tracing with the in-process agent library.
1470 We don't read anything from the address, and treat it as opaque;
1471 it's the address itself that we assume is unique per-thread. */
1472 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1473 return 0;
1474
1475 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1476 }
1477
1478 /* The reason we resume in the caller, is because we want to be able
1479 to pass lwp->status_pending as WSTAT, and we need to clear
1480 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1481 refuses to resume. */
1482
1483 static int
1484 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1485 {
1486 struct thread_info *saved_inferior;
1487
1488 saved_inferior = current_inferior;
1489 current_inferior = get_lwp_thread (lwp);
1490
1491 if ((wstat == NULL
1492 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1493 && supports_fast_tracepoints ()
1494 && agent_loaded_p ())
1495 {
1496 struct fast_tpoint_collect_status status;
1497 int r;
1498
1499 if (debug_threads)
1500 fprintf (stderr, "\
1501 Checking whether LWP %ld needs to move out of the jump pad.\n",
1502 lwpid_of (lwp));
1503
1504 r = linux_fast_tracepoint_collecting (lwp, &status);
1505
1506 if (wstat == NULL
1507 || (WSTOPSIG (*wstat) != SIGILL
1508 && WSTOPSIG (*wstat) != SIGFPE
1509 && WSTOPSIG (*wstat) != SIGSEGV
1510 && WSTOPSIG (*wstat) != SIGBUS))
1511 {
1512 lwp->collecting_fast_tracepoint = r;
1513
1514 if (r != 0)
1515 {
1516 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1517 {
1518 /* Haven't executed the original instruction yet.
1519 Set breakpoint there, and wait till it's hit,
1520 then single-step until exiting the jump pad. */
1521 lwp->exit_jump_pad_bkpt
1522 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1523 }
1524
1525 if (debug_threads)
1526 fprintf (stderr, "\
1527 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1528 lwpid_of (lwp));
1529 current_inferior = saved_inferior;
1530
1531 return 1;
1532 }
1533 }
1534 else
1535 {
1536 /* If we get a synchronous signal while collecting, *and*
1537 while executing the (relocated) original instruction,
1538 reset the PC to point at the tpoint address, before
1539 reporting to GDB. Otherwise, it's an IPA lib bug: just
1540 report the signal to GDB, and pray for the best. */
1541
1542 lwp->collecting_fast_tracepoint = 0;
1543
1544 if (r != 0
1545 && (status.adjusted_insn_addr <= lwp->stop_pc
1546 && lwp->stop_pc < status.adjusted_insn_addr_end))
1547 {
1548 siginfo_t info;
1549 struct regcache *regcache;
1550
1551 /* The si_addr on a few signals references the address
1552 of the faulting instruction. Adjust that as
1553 well. */
1554 if ((WSTOPSIG (*wstat) == SIGILL
1555 || WSTOPSIG (*wstat) == SIGFPE
1556 || WSTOPSIG (*wstat) == SIGBUS
1557 || WSTOPSIG (*wstat) == SIGSEGV)
1558 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp),
1559 (PTRACE_TYPE_ARG3) 0, &info) == 0
1560 /* Final check just to make sure we don't clobber
1561 the siginfo of non-kernel-sent signals. */
1562 && (uintptr_t) info.si_addr == lwp->stop_pc)
1563 {
1564 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1565 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp),
1566 (PTRACE_TYPE_ARG3) 0, &info);
1567 }
1568
1569 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1570 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1571 lwp->stop_pc = status.tpoint_addr;
1572
1573 /* Cancel any fast tracepoint lock this thread was
1574 holding. */
1575 force_unlock_trace_buffer ();
1576 }
1577
1578 if (lwp->exit_jump_pad_bkpt != NULL)
1579 {
1580 if (debug_threads)
1581 fprintf (stderr,
1582 "Cancelling fast exit-jump-pad: removing bkpt. "
1583 "stopping all threads momentarily.\n");
1584
1585 stop_all_lwps (1, lwp);
1586 cancel_breakpoints ();
1587
1588 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1589 lwp->exit_jump_pad_bkpt = NULL;
1590
1591 unstop_all_lwps (1, lwp);
1592
1593 gdb_assert (lwp->suspended >= 0);
1594 }
1595 }
1596 }
1597
1598 if (debug_threads)
1599 fprintf (stderr, "\
1600 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1601 lwpid_of (lwp));
1602
1603 current_inferior = saved_inferior;
1604 return 0;
1605 }
1606
1607 /* Enqueue one signal in the "signals to report later when out of the
1608 jump pad" list. */
1609
1610 static void
1611 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1612 {
1613 struct pending_signals *p_sig;
1614
1615 if (debug_threads)
1616 fprintf (stderr, "\
1617 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1618
1619 if (debug_threads)
1620 {
1621 struct pending_signals *sig;
1622
1623 for (sig = lwp->pending_signals_to_report;
1624 sig != NULL;
1625 sig = sig->prev)
1626 fprintf (stderr,
1627 " Already queued %d\n",
1628 sig->signal);
1629
1630 fprintf (stderr, " (no more currently queued signals)\n");
1631 }
1632
1633 /* Don't enqueue non-RT signals if they are already in the deferred
1634 queue. (SIGSTOP being the easiest signal to see ending up here
1635 twice) */
1636 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1637 {
1638 struct pending_signals *sig;
1639
1640 for (sig = lwp->pending_signals_to_report;
1641 sig != NULL;
1642 sig = sig->prev)
1643 {
1644 if (sig->signal == WSTOPSIG (*wstat))
1645 {
1646 if (debug_threads)
1647 fprintf (stderr,
1648 "Not requeuing already queued non-RT signal %d"
1649 " for LWP %ld\n",
1650 sig->signal,
1651 lwpid_of (lwp));
1652 return;
1653 }
1654 }
1655 }
1656
1657 p_sig = xmalloc (sizeof (*p_sig));
1658 p_sig->prev = lwp->pending_signals_to_report;
1659 p_sig->signal = WSTOPSIG (*wstat);
1660 memset (&p_sig->info, 0, sizeof (siginfo_t));
1661 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
1662 &p_sig->info);
1663
1664 lwp->pending_signals_to_report = p_sig;
1665 }
1666
1667 /* Dequeue one signal from the "signals to report later when out of
1668 the jump pad" list. */
1669
1670 static int
1671 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1672 {
1673 if (lwp->pending_signals_to_report != NULL)
1674 {
1675 struct pending_signals **p_sig;
1676
1677 p_sig = &lwp->pending_signals_to_report;
1678 while ((*p_sig)->prev != NULL)
1679 p_sig = &(*p_sig)->prev;
1680
1681 *wstat = W_STOPCODE ((*p_sig)->signal);
1682 if ((*p_sig)->info.si_signo != 0)
1683 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
1684 &(*p_sig)->info);
1685 free (*p_sig);
1686 *p_sig = NULL;
1687
1688 if (debug_threads)
1689 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1690 WSTOPSIG (*wstat), lwpid_of (lwp));
1691
1692 if (debug_threads)
1693 {
1694 struct pending_signals *sig;
1695
1696 for (sig = lwp->pending_signals_to_report;
1697 sig != NULL;
1698 sig = sig->prev)
1699 fprintf (stderr,
1700 " Still queued %d\n",
1701 sig->signal);
1702
1703 fprintf (stderr, " (no more queued signals)\n");
1704 }
1705
1706 return 1;
1707 }
1708
1709 return 0;
1710 }
1711
1712 /* Arrange for a breakpoint to be hit again later. We don't keep the
1713 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1714 will handle the current event, eventually we will resume this LWP,
1715 and this breakpoint will trap again. */
1716
1717 static int
1718 cancel_breakpoint (struct lwp_info *lwp)
1719 {
1720 struct thread_info *saved_inferior;
1721
1722 /* There's nothing to do if we don't support breakpoints. */
1723 if (!supports_breakpoints ())
1724 return 0;
1725
1726 /* breakpoint_at reads from current inferior. */
1727 saved_inferior = current_inferior;
1728 current_inferior = get_lwp_thread (lwp);
1729
1730 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1731 {
1732 if (debug_threads)
1733 fprintf (stderr,
1734 "CB: Push back breakpoint for %s\n",
1735 target_pid_to_str (ptid_of (lwp)));
1736
1737 /* Back up the PC if necessary. */
1738 if (the_low_target.decr_pc_after_break)
1739 {
1740 struct regcache *regcache
1741 = get_thread_regcache (current_inferior, 1);
1742 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1743 }
1744
1745 current_inferior = saved_inferior;
1746 return 1;
1747 }
1748 else
1749 {
1750 if (debug_threads)
1751 fprintf (stderr,
1752 "CB: No breakpoint found at %s for [%s]\n",
1753 paddress (lwp->stop_pc),
1754 target_pid_to_str (ptid_of (lwp)));
1755 }
1756
1757 current_inferior = saved_inferior;
1758 return 0;
1759 }
1760
1761 /* When the event-loop is doing a step-over, this points at the thread
1762 being stepped. */
1763 ptid_t step_over_bkpt;
1764
1765 /* Wait for an event from child PID. If PID is -1, wait for any
1766 child. Store the stop status through the status pointer WSTAT.
1767 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1768 event was found and OPTIONS contains WNOHANG. Return the PID of
1769 the stopped child otherwise. */
1770
1771 static int
1772 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1773 {
1774 struct lwp_info *event_child, *requested_child;
1775 ptid_t wait_ptid;
1776
1777 event_child = NULL;
1778 requested_child = NULL;
1779
1780 /* Check for a lwp with a pending status. */
1781
1782 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1783 {
1784 event_child = (struct lwp_info *)
1785 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1786 if (debug_threads && event_child)
1787 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1788 }
1789 else
1790 {
1791 requested_child = find_lwp_pid (ptid);
1792
1793 if (stopping_threads == NOT_STOPPING_THREADS
1794 && requested_child->status_pending_p
1795 && requested_child->collecting_fast_tracepoint)
1796 {
1797 enqueue_one_deferred_signal (requested_child,
1798 &requested_child->status_pending);
1799 requested_child->status_pending_p = 0;
1800 requested_child->status_pending = 0;
1801 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1802 }
1803
1804 if (requested_child->suspended
1805 && requested_child->status_pending_p)
1806 fatal ("requesting an event out of a suspended child?");
1807
1808 if (requested_child->status_pending_p)
1809 event_child = requested_child;
1810 }
1811
1812 if (event_child != NULL)
1813 {
1814 if (debug_threads)
1815 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1816 lwpid_of (event_child), event_child->status_pending);
1817 *wstat = event_child->status_pending;
1818 event_child->status_pending_p = 0;
1819 event_child->status_pending = 0;
1820 current_inferior = get_lwp_thread (event_child);
1821 return lwpid_of (event_child);
1822 }
1823
1824 if (ptid_is_pid (ptid))
1825 {
1826 /* A request to wait for a specific tgid. This is not possible
1827 with waitpid, so instead, we wait for any child, and leave
1828 children we're not interested in right now with a pending
1829 status to report later. */
1830 wait_ptid = minus_one_ptid;
1831 }
1832 else
1833 wait_ptid = ptid;
1834
1835 /* We only enter this loop if no process has a pending wait status. Thus
1836 any action taken in response to a wait status inside this loop is
1837 responding as soon as we detect the status, not after any pending
1838 events. */
1839 while (1)
1840 {
1841 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1842
1843 if ((options & WNOHANG) && event_child == NULL)
1844 {
1845 if (debug_threads)
1846 fprintf (stderr, "WNOHANG set, no event found\n");
1847 return 0;
1848 }
1849
1850 if (event_child == NULL)
1851 error ("event from unknown child");
1852
1853 if (ptid_is_pid (ptid)
1854 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1855 {
1856 if (! WIFSTOPPED (*wstat))
1857 mark_lwp_dead (event_child, *wstat);
1858 else
1859 {
1860 event_child->status_pending_p = 1;
1861 event_child->status_pending = *wstat;
1862 }
1863 continue;
1864 }
1865
1866 current_inferior = get_lwp_thread (event_child);
1867
1868 /* Check for thread exit. */
1869 if (! WIFSTOPPED (*wstat))
1870 {
1871 if (debug_threads)
1872 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1873
1874 /* If the last thread is exiting, just return. */
1875 if (last_thread_of_process_p (current_inferior))
1876 {
1877 if (debug_threads)
1878 fprintf (stderr, "LWP %ld is last lwp of process\n",
1879 lwpid_of (event_child));
1880 return lwpid_of (event_child);
1881 }
1882
1883 if (!non_stop)
1884 {
1885 current_inferior = (struct thread_info *) all_threads.head;
1886 if (debug_threads)
1887 fprintf (stderr, "Current inferior is now %ld\n",
1888 lwpid_of (get_thread_lwp (current_inferior)));
1889 }
1890 else
1891 {
1892 current_inferior = NULL;
1893 if (debug_threads)
1894 fprintf (stderr, "Current inferior is now <NULL>\n");
1895 }
1896
1897 /* If we were waiting for this particular child to do something...
1898 well, it did something. */
1899 if (requested_child != NULL)
1900 {
1901 int lwpid = lwpid_of (event_child);
1902
1903 /* Cancel the step-over operation --- the thread that
1904 started it is gone. */
1905 if (finish_step_over (event_child))
1906 unstop_all_lwps (1, event_child);
1907 delete_lwp (event_child);
1908 return lwpid;
1909 }
1910
1911 delete_lwp (event_child);
1912
1913 /* Wait for a more interesting event. */
1914 continue;
1915 }
1916
1917 if (event_child->must_set_ptrace_flags)
1918 {
1919 linux_enable_event_reporting (lwpid_of (event_child));
1920 event_child->must_set_ptrace_flags = 0;
1921 }
1922
1923 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1924 && *wstat >> 16 != 0)
1925 {
1926 handle_extended_wait (event_child, *wstat);
1927 continue;
1928 }
1929
1930 if (WIFSTOPPED (*wstat)
1931 && WSTOPSIG (*wstat) == SIGSTOP
1932 && event_child->stop_expected)
1933 {
1934 int should_stop;
1935
1936 if (debug_threads)
1937 fprintf (stderr, "Expected stop.\n");
1938 event_child->stop_expected = 0;
1939
1940 should_stop = (current_inferior->last_resume_kind == resume_stop
1941 || stopping_threads != NOT_STOPPING_THREADS);
1942
1943 if (!should_stop)
1944 {
1945 linux_resume_one_lwp (event_child,
1946 event_child->stepping, 0, NULL);
1947 continue;
1948 }
1949 }
1950
1951 return lwpid_of (event_child);
1952 }
1953
1954 /* NOTREACHED */
1955 return 0;
1956 }
1957
1958 /* Count the LWP's that have had events. */
1959
1960 static int
1961 count_events_callback (struct inferior_list_entry *entry, void *data)
1962 {
1963 struct lwp_info *lp = (struct lwp_info *) entry;
1964 struct thread_info *thread = get_lwp_thread (lp);
1965 int *count = data;
1966
1967 gdb_assert (count != NULL);
1968
1969 /* Count only resumed LWPs that have a SIGTRAP event pending that
1970 should be reported to GDB. */
1971 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1972 && thread->last_resume_kind != resume_stop
1973 && lp->status_pending_p
1974 && WIFSTOPPED (lp->status_pending)
1975 && WSTOPSIG (lp->status_pending) == SIGTRAP
1976 && !breakpoint_inserted_here (lp->stop_pc))
1977 (*count)++;
1978
1979 return 0;
1980 }
1981
1982 /* Select the LWP (if any) that is currently being single-stepped. */
1983
1984 static int
1985 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1986 {
1987 struct lwp_info *lp = (struct lwp_info *) entry;
1988 struct thread_info *thread = get_lwp_thread (lp);
1989
1990 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1991 && thread->last_resume_kind == resume_step
1992 && lp->status_pending_p)
1993 return 1;
1994 else
1995 return 0;
1996 }
1997
1998 /* Select the Nth LWP that has had a SIGTRAP event that should be
1999 reported to GDB. */
2000
2001 static int
2002 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2003 {
2004 struct lwp_info *lp = (struct lwp_info *) entry;
2005 struct thread_info *thread = get_lwp_thread (lp);
2006 int *selector = data;
2007
2008 gdb_assert (selector != NULL);
2009
2010 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2011 if (thread->last_resume_kind != resume_stop
2012 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2013 && lp->status_pending_p
2014 && WIFSTOPPED (lp->status_pending)
2015 && WSTOPSIG (lp->status_pending) == SIGTRAP
2016 && !breakpoint_inserted_here (lp->stop_pc))
2017 if ((*selector)-- == 0)
2018 return 1;
2019
2020 return 0;
2021 }
2022
2023 static int
2024 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2025 {
2026 struct lwp_info *lp = (struct lwp_info *) entry;
2027 struct thread_info *thread = get_lwp_thread (lp);
2028 struct lwp_info *event_lp = data;
2029
2030 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2031 if (lp == event_lp)
2032 return 0;
2033
2034 /* If a LWP other than the LWP that we're reporting an event for has
2035 hit a GDB breakpoint (as opposed to some random trap signal),
2036 then just arrange for it to hit it again later. We don't keep
2037 the SIGTRAP status and don't forward the SIGTRAP signal to the
2038 LWP. We will handle the current event, eventually we will resume
2039 all LWPs, and this one will get its breakpoint trap again.
2040
2041 If we do not do this, then we run the risk that the user will
2042 delete or disable the breakpoint, but the LWP will have already
2043 tripped on it. */
2044
2045 if (thread->last_resume_kind != resume_stop
2046 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2047 && lp->status_pending_p
2048 && WIFSTOPPED (lp->status_pending)
2049 && WSTOPSIG (lp->status_pending) == SIGTRAP
2050 && !lp->stepping
2051 && !lp->stopped_by_watchpoint
2052 && cancel_breakpoint (lp))
2053 /* Throw away the SIGTRAP. */
2054 lp->status_pending_p = 0;
2055
2056 return 0;
2057 }
2058
2059 static void
2060 linux_cancel_breakpoints (void)
2061 {
2062 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
2063 }
2064
2065 /* Select one LWP out of those that have events pending. */
2066
2067 static void
2068 select_event_lwp (struct lwp_info **orig_lp)
2069 {
2070 int num_events = 0;
2071 int random_selector;
2072 struct lwp_info *event_lp;
2073
2074 /* Give preference to any LWP that is being single-stepped. */
2075 event_lp
2076 = (struct lwp_info *) find_inferior (&all_lwps,
2077 select_singlestep_lwp_callback, NULL);
2078 if (event_lp != NULL)
2079 {
2080 if (debug_threads)
2081 fprintf (stderr,
2082 "SEL: Select single-step %s\n",
2083 target_pid_to_str (ptid_of (event_lp)));
2084 }
2085 else
2086 {
2087 /* No single-stepping LWP. Select one at random, out of those
2088 which have had SIGTRAP events. */
2089
2090 /* First see how many SIGTRAP events we have. */
2091 find_inferior (&all_lwps, count_events_callback, &num_events);
2092
2093 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2094 random_selector = (int)
2095 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2096
2097 if (debug_threads && num_events > 1)
2098 fprintf (stderr,
2099 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2100 num_events, random_selector);
2101
2102 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
2103 select_event_lwp_callback,
2104 &random_selector);
2105 }
2106
2107 if (event_lp != NULL)
2108 {
2109 /* Switch the event LWP. */
2110 *orig_lp = event_lp;
2111 }
2112 }
2113
2114 /* Decrement the suspend count of an LWP. */
2115
2116 static int
2117 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2118 {
2119 struct lwp_info *lwp = (struct lwp_info *) entry;
2120
2121 /* Ignore EXCEPT. */
2122 if (lwp == except)
2123 return 0;
2124
2125 lwp->suspended--;
2126
2127 gdb_assert (lwp->suspended >= 0);
2128 return 0;
2129 }
2130
2131 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2132 NULL. */
2133
2134 static void
2135 unsuspend_all_lwps (struct lwp_info *except)
2136 {
2137 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2138 }
2139
2140 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2141 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2142 void *data);
2143 static int lwp_running (struct inferior_list_entry *entry, void *data);
2144 static ptid_t linux_wait_1 (ptid_t ptid,
2145 struct target_waitstatus *ourstatus,
2146 int target_options);
2147
2148 /* Stabilize threads (move out of jump pads).
2149
2150 If a thread is midway collecting a fast tracepoint, we need to
2151 finish the collection and move it out of the jump pad before
2152 reporting the signal.
2153
2154 This avoids recursion while collecting (when a signal arrives
2155 midway, and the signal handler itself collects), which would trash
2156 the trace buffer. In case the user set a breakpoint in a signal
2157 handler, this avoids the backtrace showing the jump pad, etc..
2158 Most importantly, there are certain things we can't do safely if
2159 threads are stopped in a jump pad (or in its callee's). For
2160 example:
2161
2162 - starting a new trace run. A thread still collecting the
2163 previous run, could trash the trace buffer when resumed. The trace
2164 buffer control structures would have been reset but the thread had
2165 no way to tell. The thread could even midway memcpy'ing to the
2166 buffer, which would mean that when resumed, it would clobber the
2167 trace buffer that had been set for a new run.
2168
2169 - we can't rewrite/reuse the jump pads for new tracepoints
2170 safely. Say you do tstart while a thread is stopped midway while
2171 collecting. When the thread is later resumed, it finishes the
2172 collection, and returns to the jump pad, to execute the original
2173 instruction that was under the tracepoint jump at the time the
2174 older run had been started. If the jump pad had been rewritten
2175 since for something else in the new run, the thread would now
2176 execute the wrong / random instructions. */
2177
2178 static void
2179 linux_stabilize_threads (void)
2180 {
2181 struct thread_info *save_inferior;
2182 struct lwp_info *lwp_stuck;
2183
2184 lwp_stuck
2185 = (struct lwp_info *) find_inferior (&all_lwps,
2186 stuck_in_jump_pad_callback, NULL);
2187 if (lwp_stuck != NULL)
2188 {
2189 if (debug_threads)
2190 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2191 lwpid_of (lwp_stuck));
2192 return;
2193 }
2194
2195 save_inferior = current_inferior;
2196
2197 stabilizing_threads = 1;
2198
2199 /* Kick 'em all. */
2200 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2201
2202 /* Loop until all are stopped out of the jump pads. */
2203 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2204 {
2205 struct target_waitstatus ourstatus;
2206 struct lwp_info *lwp;
2207 int wstat;
2208
2209 /* Note that we go through the full wait even loop. While
2210 moving threads out of jump pad, we need to be able to step
2211 over internal breakpoints and such. */
2212 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2213
2214 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2215 {
2216 lwp = get_thread_lwp (current_inferior);
2217
2218 /* Lock it. */
2219 lwp->suspended++;
2220
2221 if (ourstatus.value.sig != GDB_SIGNAL_0
2222 || current_inferior->last_resume_kind == resume_stop)
2223 {
2224 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2225 enqueue_one_deferred_signal (lwp, &wstat);
2226 }
2227 }
2228 }
2229
2230 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2231
2232 stabilizing_threads = 0;
2233
2234 current_inferior = save_inferior;
2235
2236 if (debug_threads)
2237 {
2238 lwp_stuck
2239 = (struct lwp_info *) find_inferior (&all_lwps,
2240 stuck_in_jump_pad_callback, NULL);
2241 if (lwp_stuck != NULL)
2242 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2243 lwpid_of (lwp_stuck));
2244 }
2245 }
2246
2247 /* Wait for process, returns status. */
2248
2249 static ptid_t
2250 linux_wait_1 (ptid_t ptid,
2251 struct target_waitstatus *ourstatus, int target_options)
2252 {
2253 int w;
2254 struct lwp_info *event_child;
2255 int options;
2256 int pid;
2257 int step_over_finished;
2258 int bp_explains_trap;
2259 int maybe_internal_trap;
2260 int report_to_gdb;
2261 int trace_event;
2262 int in_step_range;
2263
2264 /* Translate generic target options into linux options. */
2265 options = __WALL;
2266 if (target_options & TARGET_WNOHANG)
2267 options |= WNOHANG;
2268
2269 retry:
2270 bp_explains_trap = 0;
2271 trace_event = 0;
2272 in_step_range = 0;
2273 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2274
2275 /* If we were only supposed to resume one thread, only wait for
2276 that thread - if it's still alive. If it died, however - which
2277 can happen if we're coming from the thread death case below -
2278 then we need to make sure we restart the other threads. We could
2279 pick a thread at random or restart all; restarting all is less
2280 arbitrary. */
2281 if (!non_stop
2282 && !ptid_equal (cont_thread, null_ptid)
2283 && !ptid_equal (cont_thread, minus_one_ptid))
2284 {
2285 struct thread_info *thread;
2286
2287 thread = (struct thread_info *) find_inferior_id (&all_threads,
2288 cont_thread);
2289
2290 /* No stepping, no signal - unless one is pending already, of course. */
2291 if (thread == NULL)
2292 {
2293 struct thread_resume resume_info;
2294 resume_info.thread = minus_one_ptid;
2295 resume_info.kind = resume_continue;
2296 resume_info.sig = 0;
2297 linux_resume (&resume_info, 1);
2298 }
2299 else
2300 ptid = cont_thread;
2301 }
2302
2303 if (ptid_equal (step_over_bkpt, null_ptid))
2304 pid = linux_wait_for_event (ptid, &w, options);
2305 else
2306 {
2307 if (debug_threads)
2308 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2309 target_pid_to_str (step_over_bkpt));
2310 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2311 }
2312
2313 if (pid == 0) /* only if TARGET_WNOHANG */
2314 return null_ptid;
2315
2316 event_child = get_thread_lwp (current_inferior);
2317
2318 /* If we are waiting for a particular child, and it exited,
2319 linux_wait_for_event will return its exit status. Similarly if
2320 the last child exited. If this is not the last child, however,
2321 do not report it as exited until there is a 'thread exited' response
2322 available in the remote protocol. Instead, just wait for another event.
2323 This should be safe, because if the thread crashed we will already
2324 have reported the termination signal to GDB; that should stop any
2325 in-progress stepping operations, etc.
2326
2327 Report the exit status of the last thread to exit. This matches
2328 LinuxThreads' behavior. */
2329
2330 if (last_thread_of_process_p (current_inferior))
2331 {
2332 if (WIFEXITED (w) || WIFSIGNALED (w))
2333 {
2334 if (WIFEXITED (w))
2335 {
2336 ourstatus->kind = TARGET_WAITKIND_EXITED;
2337 ourstatus->value.integer = WEXITSTATUS (w);
2338
2339 if (debug_threads)
2340 fprintf (stderr,
2341 "\nChild exited with retcode = %x \n",
2342 WEXITSTATUS (w));
2343 }
2344 else
2345 {
2346 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2347 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2348
2349 if (debug_threads)
2350 fprintf (stderr,
2351 "\nChild terminated with signal = %x \n",
2352 WTERMSIG (w));
2353
2354 }
2355
2356 return ptid_of (event_child);
2357 }
2358 }
2359 else
2360 {
2361 if (!WIFSTOPPED (w))
2362 goto retry;
2363 }
2364
2365 /* If this event was not handled before, and is not a SIGTRAP, we
2366 report it. SIGILL and SIGSEGV are also treated as traps in case
2367 a breakpoint is inserted at the current PC. If this target does
2368 not support internal breakpoints at all, we also report the
2369 SIGTRAP without further processing; it's of no concern to us. */
2370 maybe_internal_trap
2371 = (supports_breakpoints ()
2372 && (WSTOPSIG (w) == SIGTRAP
2373 || ((WSTOPSIG (w) == SIGILL
2374 || WSTOPSIG (w) == SIGSEGV)
2375 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2376
2377 if (maybe_internal_trap)
2378 {
2379 /* Handle anything that requires bookkeeping before deciding to
2380 report the event or continue waiting. */
2381
2382 /* First check if we can explain the SIGTRAP with an internal
2383 breakpoint, or if we should possibly report the event to GDB.
2384 Do this before anything that may remove or insert a
2385 breakpoint. */
2386 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2387
2388 /* We have a SIGTRAP, possibly a step-over dance has just
2389 finished. If so, tweak the state machine accordingly,
2390 reinsert breakpoints and delete any reinsert (software
2391 single-step) breakpoints. */
2392 step_over_finished = finish_step_over (event_child);
2393
2394 /* Now invoke the callbacks of any internal breakpoints there. */
2395 check_breakpoints (event_child->stop_pc);
2396
2397 /* Handle tracepoint data collecting. This may overflow the
2398 trace buffer, and cause a tracing stop, removing
2399 breakpoints. */
2400 trace_event = handle_tracepoints (event_child);
2401
2402 if (bp_explains_trap)
2403 {
2404 /* If we stepped or ran into an internal breakpoint, we've
2405 already handled it. So next time we resume (from this
2406 PC), we should step over it. */
2407 if (debug_threads)
2408 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2409
2410 if (breakpoint_here (event_child->stop_pc))
2411 event_child->need_step_over = 1;
2412 }
2413 }
2414 else
2415 {
2416 /* We have some other signal, possibly a step-over dance was in
2417 progress, and it should be cancelled too. */
2418 step_over_finished = finish_step_over (event_child);
2419 }
2420
2421 /* We have all the data we need. Either report the event to GDB, or
2422 resume threads and keep waiting for more. */
2423
2424 /* If we're collecting a fast tracepoint, finish the collection and
2425 move out of the jump pad before delivering a signal. See
2426 linux_stabilize_threads. */
2427
2428 if (WIFSTOPPED (w)
2429 && WSTOPSIG (w) != SIGTRAP
2430 && supports_fast_tracepoints ()
2431 && agent_loaded_p ())
2432 {
2433 if (debug_threads)
2434 fprintf (stderr,
2435 "Got signal %d for LWP %ld. Check if we need "
2436 "to defer or adjust it.\n",
2437 WSTOPSIG (w), lwpid_of (event_child));
2438
2439 /* Allow debugging the jump pad itself. */
2440 if (current_inferior->last_resume_kind != resume_step
2441 && maybe_move_out_of_jump_pad (event_child, &w))
2442 {
2443 enqueue_one_deferred_signal (event_child, &w);
2444
2445 if (debug_threads)
2446 fprintf (stderr,
2447 "Signal %d for LWP %ld deferred (in jump pad)\n",
2448 WSTOPSIG (w), lwpid_of (event_child));
2449
2450 linux_resume_one_lwp (event_child, 0, 0, NULL);
2451 goto retry;
2452 }
2453 }
2454
2455 if (event_child->collecting_fast_tracepoint)
2456 {
2457 if (debug_threads)
2458 fprintf (stderr, "\
2459 LWP %ld was trying to move out of the jump pad (%d). \
2460 Check if we're already there.\n",
2461 lwpid_of (event_child),
2462 event_child->collecting_fast_tracepoint);
2463
2464 trace_event = 1;
2465
2466 event_child->collecting_fast_tracepoint
2467 = linux_fast_tracepoint_collecting (event_child, NULL);
2468
2469 if (event_child->collecting_fast_tracepoint != 1)
2470 {
2471 /* No longer need this breakpoint. */
2472 if (event_child->exit_jump_pad_bkpt != NULL)
2473 {
2474 if (debug_threads)
2475 fprintf (stderr,
2476 "No longer need exit-jump-pad bkpt; removing it."
2477 "stopping all threads momentarily.\n");
2478
2479 /* Other running threads could hit this breakpoint.
2480 We don't handle moribund locations like GDB does,
2481 instead we always pause all threads when removing
2482 breakpoints, so that any step-over or
2483 decr_pc_after_break adjustment is always taken
2484 care of while the breakpoint is still
2485 inserted. */
2486 stop_all_lwps (1, event_child);
2487 cancel_breakpoints ();
2488
2489 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2490 event_child->exit_jump_pad_bkpt = NULL;
2491
2492 unstop_all_lwps (1, event_child);
2493
2494 gdb_assert (event_child->suspended >= 0);
2495 }
2496 }
2497
2498 if (event_child->collecting_fast_tracepoint == 0)
2499 {
2500 if (debug_threads)
2501 fprintf (stderr,
2502 "fast tracepoint finished "
2503 "collecting successfully.\n");
2504
2505 /* We may have a deferred signal to report. */
2506 if (dequeue_one_deferred_signal (event_child, &w))
2507 {
2508 if (debug_threads)
2509 fprintf (stderr, "dequeued one signal.\n");
2510 }
2511 else
2512 {
2513 if (debug_threads)
2514 fprintf (stderr, "no deferred signals.\n");
2515
2516 if (stabilizing_threads)
2517 {
2518 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2519 ourstatus->value.sig = GDB_SIGNAL_0;
2520 return ptid_of (event_child);
2521 }
2522 }
2523 }
2524 }
2525
2526 /* Check whether GDB would be interested in this event. */
2527
2528 /* If GDB is not interested in this signal, don't stop other
2529 threads, and don't report it to GDB. Just resume the inferior
2530 right away. We do this for threading-related signals as well as
2531 any that GDB specifically requested we ignore. But never ignore
2532 SIGSTOP if we sent it ourselves, and do not ignore signals when
2533 stepping - they may require special handling to skip the signal
2534 handler. */
2535 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2536 thread library? */
2537 if (WIFSTOPPED (w)
2538 && current_inferior->last_resume_kind != resume_step
2539 && (
2540 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2541 (current_process ()->private->thread_db != NULL
2542 && (WSTOPSIG (w) == __SIGRTMIN
2543 || WSTOPSIG (w) == __SIGRTMIN + 1))
2544 ||
2545 #endif
2546 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2547 && !(WSTOPSIG (w) == SIGSTOP
2548 && current_inferior->last_resume_kind == resume_stop))))
2549 {
2550 siginfo_t info, *info_p;
2551
2552 if (debug_threads)
2553 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2554 WSTOPSIG (w), lwpid_of (event_child));
2555
2556 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child),
2557 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2558 info_p = &info;
2559 else
2560 info_p = NULL;
2561 linux_resume_one_lwp (event_child, event_child->stepping,
2562 WSTOPSIG (w), info_p);
2563 goto retry;
2564 }
2565
2566 /* Note that all addresses are always "out of the step range" when
2567 there's no range to begin with. */
2568 in_step_range = lwp_in_step_range (event_child);
2569
2570 /* If GDB wanted this thread to single step, and the thread is out
2571 of the step range, we always want to report the SIGTRAP, and let
2572 GDB handle it. Watchpoints should always be reported. So should
2573 signals we can't explain. A SIGTRAP we can't explain could be a
2574 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2575 do, we're be able to handle GDB breakpoints on top of internal
2576 breakpoints, by handling the internal breakpoint and still
2577 reporting the event to GDB. If we don't, we're out of luck, GDB
2578 won't see the breakpoint hit. */
2579 report_to_gdb = (!maybe_internal_trap
2580 || (current_inferior->last_resume_kind == resume_step
2581 && !in_step_range)
2582 || event_child->stopped_by_watchpoint
2583 || (!step_over_finished && !in_step_range
2584 && !bp_explains_trap && !trace_event)
2585 || (gdb_breakpoint_here (event_child->stop_pc)
2586 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2587 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2588
2589 run_breakpoint_commands (event_child->stop_pc);
2590
2591 /* We found no reason GDB would want us to stop. We either hit one
2592 of our own breakpoints, or finished an internal step GDB
2593 shouldn't know about. */
2594 if (!report_to_gdb)
2595 {
2596 if (debug_threads)
2597 {
2598 if (bp_explains_trap)
2599 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2600 if (step_over_finished)
2601 fprintf (stderr, "Step-over finished.\n");
2602 if (trace_event)
2603 fprintf (stderr, "Tracepoint event.\n");
2604 if (lwp_in_step_range (event_child))
2605 fprintf (stderr, "Range stepping pc 0x%s [0x%s, 0x%s).\n",
2606 paddress (event_child->stop_pc),
2607 paddress (event_child->step_range_start),
2608 paddress (event_child->step_range_end));
2609 }
2610
2611 /* We're not reporting this breakpoint to GDB, so apply the
2612 decr_pc_after_break adjustment to the inferior's regcache
2613 ourselves. */
2614
2615 if (the_low_target.set_pc != NULL)
2616 {
2617 struct regcache *regcache
2618 = get_thread_regcache (get_lwp_thread (event_child), 1);
2619 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2620 }
2621
2622 /* We may have finished stepping over a breakpoint. If so,
2623 we've stopped and suspended all LWPs momentarily except the
2624 stepping one. This is where we resume them all again. We're
2625 going to keep waiting, so use proceed, which handles stepping
2626 over the next breakpoint. */
2627 if (debug_threads)
2628 fprintf (stderr, "proceeding all threads.\n");
2629
2630 if (step_over_finished)
2631 unsuspend_all_lwps (event_child);
2632
2633 proceed_all_lwps ();
2634 goto retry;
2635 }
2636
2637 if (debug_threads)
2638 {
2639 if (current_inferior->last_resume_kind == resume_step)
2640 {
2641 if (event_child->step_range_start == event_child->step_range_end)
2642 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2643 else if (!lwp_in_step_range (event_child))
2644 fprintf (stderr, "Out of step range, reporting event.\n");
2645 }
2646 if (event_child->stopped_by_watchpoint)
2647 fprintf (stderr, "Stopped by watchpoint.\n");
2648 if (gdb_breakpoint_here (event_child->stop_pc))
2649 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2650 if (debug_threads)
2651 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2652 }
2653
2654 /* Alright, we're going to report a stop. */
2655
2656 if (!non_stop && !stabilizing_threads)
2657 {
2658 /* In all-stop, stop all threads. */
2659 stop_all_lwps (0, NULL);
2660
2661 /* If we're not waiting for a specific LWP, choose an event LWP
2662 from among those that have had events. Giving equal priority
2663 to all LWPs that have had events helps prevent
2664 starvation. */
2665 if (ptid_equal (ptid, minus_one_ptid))
2666 {
2667 event_child->status_pending_p = 1;
2668 event_child->status_pending = w;
2669
2670 select_event_lwp (&event_child);
2671
2672 event_child->status_pending_p = 0;
2673 w = event_child->status_pending;
2674 }
2675
2676 /* Now that we've selected our final event LWP, cancel any
2677 breakpoints in other LWPs that have hit a GDB breakpoint.
2678 See the comment in cancel_breakpoints_callback to find out
2679 why. */
2680 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2681
2682 /* If we were going a step-over, all other threads but the stepping one
2683 had been paused in start_step_over, with their suspend counts
2684 incremented. We don't want to do a full unstop/unpause, because we're
2685 in all-stop mode (so we want threads stopped), but we still need to
2686 unsuspend the other threads, to decrement their `suspended' count
2687 back. */
2688 if (step_over_finished)
2689 unsuspend_all_lwps (event_child);
2690
2691 /* Stabilize threads (move out of jump pads). */
2692 stabilize_threads ();
2693 }
2694 else
2695 {
2696 /* If we just finished a step-over, then all threads had been
2697 momentarily paused. In all-stop, that's fine, we want
2698 threads stopped by now anyway. In non-stop, we need to
2699 re-resume threads that GDB wanted to be running. */
2700 if (step_over_finished)
2701 unstop_all_lwps (1, event_child);
2702 }
2703
2704 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2705
2706 if (current_inferior->last_resume_kind == resume_stop
2707 && WSTOPSIG (w) == SIGSTOP)
2708 {
2709 /* A thread that has been requested to stop by GDB with vCont;t,
2710 and it stopped cleanly, so report as SIG0. The use of
2711 SIGSTOP is an implementation detail. */
2712 ourstatus->value.sig = GDB_SIGNAL_0;
2713 }
2714 else if (current_inferior->last_resume_kind == resume_stop
2715 && WSTOPSIG (w) != SIGSTOP)
2716 {
2717 /* A thread that has been requested to stop by GDB with vCont;t,
2718 but, it stopped for other reasons. */
2719 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2720 }
2721 else
2722 {
2723 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2724 }
2725
2726 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2727
2728 if (debug_threads)
2729 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2730 target_pid_to_str (ptid_of (event_child)),
2731 ourstatus->kind,
2732 ourstatus->value.sig);
2733
2734 return ptid_of (event_child);
2735 }
2736
2737 /* Get rid of any pending event in the pipe. */
2738 static void
2739 async_file_flush (void)
2740 {
2741 int ret;
2742 char buf;
2743
2744 do
2745 ret = read (linux_event_pipe[0], &buf, 1);
2746 while (ret >= 0 || (ret == -1 && errno == EINTR));
2747 }
2748
2749 /* Put something in the pipe, so the event loop wakes up. */
2750 static void
2751 async_file_mark (void)
2752 {
2753 int ret;
2754
2755 async_file_flush ();
2756
2757 do
2758 ret = write (linux_event_pipe[1], "+", 1);
2759 while (ret == 0 || (ret == -1 && errno == EINTR));
2760
2761 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2762 be awakened anyway. */
2763 }
2764
2765 static ptid_t
2766 linux_wait (ptid_t ptid,
2767 struct target_waitstatus *ourstatus, int target_options)
2768 {
2769 ptid_t event_ptid;
2770
2771 if (debug_threads)
2772 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2773
2774 /* Flush the async file first. */
2775 if (target_is_async_p ())
2776 async_file_flush ();
2777
2778 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2779
2780 /* If at least one stop was reported, there may be more. A single
2781 SIGCHLD can signal more than one child stop. */
2782 if (target_is_async_p ()
2783 && (target_options & TARGET_WNOHANG) != 0
2784 && !ptid_equal (event_ptid, null_ptid))
2785 async_file_mark ();
2786
2787 return event_ptid;
2788 }
2789
2790 /* Send a signal to an LWP. */
2791
2792 static int
2793 kill_lwp (unsigned long lwpid, int signo)
2794 {
2795 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2796 fails, then we are not using nptl threads and we should be using kill. */
2797
2798 #ifdef __NR_tkill
2799 {
2800 static int tkill_failed;
2801
2802 if (!tkill_failed)
2803 {
2804 int ret;
2805
2806 errno = 0;
2807 ret = syscall (__NR_tkill, lwpid, signo);
2808 if (errno != ENOSYS)
2809 return ret;
2810 tkill_failed = 1;
2811 }
2812 }
2813 #endif
2814
2815 return kill (lwpid, signo);
2816 }
2817
2818 void
2819 linux_stop_lwp (struct lwp_info *lwp)
2820 {
2821 send_sigstop (lwp);
2822 }
2823
2824 static void
2825 send_sigstop (struct lwp_info *lwp)
2826 {
2827 int pid;
2828
2829 pid = lwpid_of (lwp);
2830
2831 /* If we already have a pending stop signal for this process, don't
2832 send another. */
2833 if (lwp->stop_expected)
2834 {
2835 if (debug_threads)
2836 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2837
2838 return;
2839 }
2840
2841 if (debug_threads)
2842 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2843
2844 lwp->stop_expected = 1;
2845 kill_lwp (pid, SIGSTOP);
2846 }
2847
2848 static int
2849 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2850 {
2851 struct lwp_info *lwp = (struct lwp_info *) entry;
2852
2853 /* Ignore EXCEPT. */
2854 if (lwp == except)
2855 return 0;
2856
2857 if (lwp->stopped)
2858 return 0;
2859
2860 send_sigstop (lwp);
2861 return 0;
2862 }
2863
2864 /* Increment the suspend count of an LWP, and stop it, if not stopped
2865 yet. */
2866 static int
2867 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2868 void *except)
2869 {
2870 struct lwp_info *lwp = (struct lwp_info *) entry;
2871
2872 /* Ignore EXCEPT. */
2873 if (lwp == except)
2874 return 0;
2875
2876 lwp->suspended++;
2877
2878 return send_sigstop_callback (entry, except);
2879 }
2880
2881 static void
2882 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2883 {
2884 /* It's dead, really. */
2885 lwp->dead = 1;
2886
2887 /* Store the exit status for later. */
2888 lwp->status_pending_p = 1;
2889 lwp->status_pending = wstat;
2890
2891 /* Prevent trying to stop it. */
2892 lwp->stopped = 1;
2893
2894 /* No further stops are expected from a dead lwp. */
2895 lwp->stop_expected = 0;
2896 }
2897
2898 static void
2899 wait_for_sigstop (struct inferior_list_entry *entry)
2900 {
2901 struct lwp_info *lwp = (struct lwp_info *) entry;
2902 struct thread_info *saved_inferior;
2903 int wstat;
2904 ptid_t saved_tid;
2905 ptid_t ptid;
2906 int pid;
2907
2908 if (lwp->stopped)
2909 {
2910 if (debug_threads)
2911 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2912 lwpid_of (lwp));
2913 return;
2914 }
2915
2916 saved_inferior = current_inferior;
2917 if (saved_inferior != NULL)
2918 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2919 else
2920 saved_tid = null_ptid; /* avoid bogus unused warning */
2921
2922 ptid = lwp->head.id;
2923
2924 if (debug_threads)
2925 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2926
2927 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2928
2929 /* If we stopped with a non-SIGSTOP signal, save it for later
2930 and record the pending SIGSTOP. If the process exited, just
2931 return. */
2932 if (WIFSTOPPED (wstat))
2933 {
2934 if (debug_threads)
2935 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2936 lwpid_of (lwp), WSTOPSIG (wstat));
2937
2938 if (WSTOPSIG (wstat) != SIGSTOP)
2939 {
2940 if (debug_threads)
2941 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2942 lwpid_of (lwp), wstat);
2943
2944 lwp->status_pending_p = 1;
2945 lwp->status_pending = wstat;
2946 }
2947 }
2948 else
2949 {
2950 if (debug_threads)
2951 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2952
2953 lwp = find_lwp_pid (pid_to_ptid (pid));
2954 if (lwp)
2955 {
2956 /* Leave this status pending for the next time we're able to
2957 report it. In the mean time, we'll report this lwp as
2958 dead to GDB, so GDB doesn't try to read registers and
2959 memory from it. This can only happen if this was the
2960 last thread of the process; otherwise, PID is removed
2961 from the thread tables before linux_wait_for_event
2962 returns. */
2963 mark_lwp_dead (lwp, wstat);
2964 }
2965 }
2966
2967 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2968 current_inferior = saved_inferior;
2969 else
2970 {
2971 if (debug_threads)
2972 fprintf (stderr, "Previously current thread died.\n");
2973
2974 if (non_stop)
2975 {
2976 /* We can't change the current inferior behind GDB's back,
2977 otherwise, a subsequent command may apply to the wrong
2978 process. */
2979 current_inferior = NULL;
2980 }
2981 else
2982 {
2983 /* Set a valid thread as current. */
2984 set_desired_inferior (0);
2985 }
2986 }
2987 }
2988
2989 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2990 move it out, because we need to report the stop event to GDB. For
2991 example, if the user puts a breakpoint in the jump pad, it's
2992 because she wants to debug it. */
2993
2994 static int
2995 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2996 {
2997 struct lwp_info *lwp = (struct lwp_info *) entry;
2998 struct thread_info *thread = get_lwp_thread (lwp);
2999
3000 gdb_assert (lwp->suspended == 0);
3001 gdb_assert (lwp->stopped);
3002
3003 /* Allow debugging the jump pad, gdb_collect, etc.. */
3004 return (supports_fast_tracepoints ()
3005 && agent_loaded_p ()
3006 && (gdb_breakpoint_here (lwp->stop_pc)
3007 || lwp->stopped_by_watchpoint
3008 || thread->last_resume_kind == resume_step)
3009 && linux_fast_tracepoint_collecting (lwp, NULL));
3010 }
3011
3012 static void
3013 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3014 {
3015 struct lwp_info *lwp = (struct lwp_info *) entry;
3016 struct thread_info *thread = get_lwp_thread (lwp);
3017 int *wstat;
3018
3019 gdb_assert (lwp->suspended == 0);
3020 gdb_assert (lwp->stopped);
3021
3022 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3023
3024 /* Allow debugging the jump pad, gdb_collect, etc. */
3025 if (!gdb_breakpoint_here (lwp->stop_pc)
3026 && !lwp->stopped_by_watchpoint
3027 && thread->last_resume_kind != resume_step
3028 && maybe_move_out_of_jump_pad (lwp, wstat))
3029 {
3030 if (debug_threads)
3031 fprintf (stderr,
3032 "LWP %ld needs stabilizing (in jump pad)\n",
3033 lwpid_of (lwp));
3034
3035 if (wstat)
3036 {
3037 lwp->status_pending_p = 0;
3038 enqueue_one_deferred_signal (lwp, wstat);
3039
3040 if (debug_threads)
3041 fprintf (stderr,
3042 "Signal %d for LWP %ld deferred "
3043 "(in jump pad)\n",
3044 WSTOPSIG (*wstat), lwpid_of (lwp));
3045 }
3046
3047 linux_resume_one_lwp (lwp, 0, 0, NULL);
3048 }
3049 else
3050 lwp->suspended++;
3051 }
3052
3053 static int
3054 lwp_running (struct inferior_list_entry *entry, void *data)
3055 {
3056 struct lwp_info *lwp = (struct lwp_info *) entry;
3057
3058 if (lwp->dead)
3059 return 0;
3060 if (lwp->stopped)
3061 return 0;
3062 return 1;
3063 }
3064
3065 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3066 If SUSPEND, then also increase the suspend count of every LWP,
3067 except EXCEPT. */
3068
3069 static void
3070 stop_all_lwps (int suspend, struct lwp_info *except)
3071 {
3072 /* Should not be called recursively. */
3073 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3074
3075 stopping_threads = (suspend
3076 ? STOPPING_AND_SUSPENDING_THREADS
3077 : STOPPING_THREADS);
3078
3079 if (suspend)
3080 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
3081 else
3082 find_inferior (&all_lwps, send_sigstop_callback, except);
3083 for_each_inferior (&all_lwps, wait_for_sigstop);
3084 stopping_threads = NOT_STOPPING_THREADS;
3085 }
3086
3087 /* Resume execution of the inferior process.
3088 If STEP is nonzero, single-step it.
3089 If SIGNAL is nonzero, give it that signal. */
3090
3091 static void
3092 linux_resume_one_lwp (struct lwp_info *lwp,
3093 int step, int signal, siginfo_t *info)
3094 {
3095 struct thread_info *saved_inferior;
3096 int fast_tp_collecting;
3097
3098 if (lwp->stopped == 0)
3099 return;
3100
3101 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3102
3103 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3104
3105 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3106 user used the "jump" command, or "set $pc = foo"). */
3107 if (lwp->stop_pc != get_pc (lwp))
3108 {
3109 /* Collecting 'while-stepping' actions doesn't make sense
3110 anymore. */
3111 release_while_stepping_state_list (get_lwp_thread (lwp));
3112 }
3113
3114 /* If we have pending signals or status, and a new signal, enqueue the
3115 signal. Also enqueue the signal if we are waiting to reinsert a
3116 breakpoint; it will be picked up again below. */
3117 if (signal != 0
3118 && (lwp->status_pending_p
3119 || lwp->pending_signals != NULL
3120 || lwp->bp_reinsert != 0
3121 || fast_tp_collecting))
3122 {
3123 struct pending_signals *p_sig;
3124 p_sig = xmalloc (sizeof (*p_sig));
3125 p_sig->prev = lwp->pending_signals;
3126 p_sig->signal = signal;
3127 if (info == NULL)
3128 memset (&p_sig->info, 0, sizeof (siginfo_t));
3129 else
3130 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3131 lwp->pending_signals = p_sig;
3132 }
3133
3134 if (lwp->status_pending_p)
3135 {
3136 if (debug_threads)
3137 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3138 " has pending status\n",
3139 lwpid_of (lwp), step ? "step" : "continue", signal,
3140 lwp->stop_expected ? "expected" : "not expected");
3141 return;
3142 }
3143
3144 saved_inferior = current_inferior;
3145 current_inferior = get_lwp_thread (lwp);
3146
3147 if (debug_threads)
3148 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
3149 lwpid_of (lwp), step ? "step" : "continue", signal,
3150 lwp->stop_expected ? "expected" : "not expected");
3151
3152 /* This bit needs some thinking about. If we get a signal that
3153 we must report while a single-step reinsert is still pending,
3154 we often end up resuming the thread. It might be better to
3155 (ew) allow a stack of pending events; then we could be sure that
3156 the reinsert happened right away and not lose any signals.
3157
3158 Making this stack would also shrink the window in which breakpoints are
3159 uninserted (see comment in linux_wait_for_lwp) but not enough for
3160 complete correctness, so it won't solve that problem. It may be
3161 worthwhile just to solve this one, however. */
3162 if (lwp->bp_reinsert != 0)
3163 {
3164 if (debug_threads)
3165 fprintf (stderr, " pending reinsert at 0x%s\n",
3166 paddress (lwp->bp_reinsert));
3167
3168 if (can_hardware_single_step ())
3169 {
3170 if (fast_tp_collecting == 0)
3171 {
3172 if (step == 0)
3173 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3174 if (lwp->suspended)
3175 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3176 lwp->suspended);
3177 }
3178
3179 step = 1;
3180 }
3181
3182 /* Postpone any pending signal. It was enqueued above. */
3183 signal = 0;
3184 }
3185
3186 if (fast_tp_collecting == 1)
3187 {
3188 if (debug_threads)
3189 fprintf (stderr, "\
3190 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3191 lwpid_of (lwp));
3192
3193 /* Postpone any pending signal. It was enqueued above. */
3194 signal = 0;
3195 }
3196 else if (fast_tp_collecting == 2)
3197 {
3198 if (debug_threads)
3199 fprintf (stderr, "\
3200 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3201 lwpid_of (lwp));
3202
3203 if (can_hardware_single_step ())
3204 step = 1;
3205 else
3206 fatal ("moving out of jump pad single-stepping"
3207 " not implemented on this target");
3208
3209 /* Postpone any pending signal. It was enqueued above. */
3210 signal = 0;
3211 }
3212
3213 /* If we have while-stepping actions in this thread set it stepping.
3214 If we have a signal to deliver, it may or may not be set to
3215 SIG_IGN, we don't know. Assume so, and allow collecting
3216 while-stepping into a signal handler. A possible smart thing to
3217 do would be to set an internal breakpoint at the signal return
3218 address, continue, and carry on catching this while-stepping
3219 action only when that breakpoint is hit. A future
3220 enhancement. */
3221 if (get_lwp_thread (lwp)->while_stepping != NULL
3222 && can_hardware_single_step ())
3223 {
3224 if (debug_threads)
3225 fprintf (stderr,
3226 "lwp %ld has a while-stepping action -> forcing step.\n",
3227 lwpid_of (lwp));
3228 step = 1;
3229 }
3230
3231 if (debug_threads && the_low_target.get_pc != NULL)
3232 {
3233 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3234 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3235 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
3236 }
3237
3238 /* If we have pending signals, consume one unless we are trying to
3239 reinsert a breakpoint or we're trying to finish a fast tracepoint
3240 collect. */
3241 if (lwp->pending_signals != NULL
3242 && lwp->bp_reinsert == 0
3243 && fast_tp_collecting == 0)
3244 {
3245 struct pending_signals **p_sig;
3246
3247 p_sig = &lwp->pending_signals;
3248 while ((*p_sig)->prev != NULL)
3249 p_sig = &(*p_sig)->prev;
3250
3251 signal = (*p_sig)->signal;
3252 if ((*p_sig)->info.si_signo != 0)
3253 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
3254 &(*p_sig)->info);
3255
3256 free (*p_sig);
3257 *p_sig = NULL;
3258 }
3259
3260 if (the_low_target.prepare_to_resume != NULL)
3261 the_low_target.prepare_to_resume (lwp);
3262
3263 regcache_invalidate_thread (get_lwp_thread (lwp));
3264 errno = 0;
3265 lwp->stopped = 0;
3266 lwp->stopped_by_watchpoint = 0;
3267 lwp->stepping = step;
3268 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp),
3269 (PTRACE_TYPE_ARG3) 0,
3270 /* Coerce to a uintptr_t first to avoid potential gcc warning
3271 of coercing an 8 byte integer to a 4 byte pointer. */
3272 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3273
3274 current_inferior = saved_inferior;
3275 if (errno)
3276 {
3277 /* ESRCH from ptrace either means that the thread was already
3278 running (an error) or that it is gone (a race condition). If
3279 it's gone, we will get a notification the next time we wait,
3280 so we can ignore the error. We could differentiate these
3281 two, but it's tricky without waiting; the thread still exists
3282 as a zombie, so sending it signal 0 would succeed. So just
3283 ignore ESRCH. */
3284 if (errno == ESRCH)
3285 return;
3286
3287 perror_with_name ("ptrace");
3288 }
3289 }
3290
3291 struct thread_resume_array
3292 {
3293 struct thread_resume *resume;
3294 size_t n;
3295 };
3296
3297 /* This function is called once per thread. We look up the thread
3298 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3299 resume request.
3300
3301 This algorithm is O(threads * resume elements), but resume elements
3302 is small (and will remain small at least until GDB supports thread
3303 suspension). */
3304 static int
3305 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3306 {
3307 struct lwp_info *lwp;
3308 struct thread_info *thread;
3309 int ndx;
3310 struct thread_resume_array *r;
3311
3312 thread = (struct thread_info *) entry;
3313 lwp = get_thread_lwp (thread);
3314 r = arg;
3315
3316 for (ndx = 0; ndx < r->n; ndx++)
3317 {
3318 ptid_t ptid = r->resume[ndx].thread;
3319 if (ptid_equal (ptid, minus_one_ptid)
3320 || ptid_equal (ptid, entry->id)
3321 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3322 of PID'. */
3323 || (ptid_get_pid (ptid) == pid_of (lwp)
3324 && (ptid_is_pid (ptid)
3325 || ptid_get_lwp (ptid) == -1)))
3326 {
3327 if (r->resume[ndx].kind == resume_stop
3328 && thread->last_resume_kind == resume_stop)
3329 {
3330 if (debug_threads)
3331 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3332 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3333 ? "stopped"
3334 : "stopping",
3335 lwpid_of (lwp));
3336
3337 continue;
3338 }
3339
3340 lwp->resume = &r->resume[ndx];
3341 thread->last_resume_kind = lwp->resume->kind;
3342
3343 lwp->step_range_start = lwp->resume->step_range_start;
3344 lwp->step_range_end = lwp->resume->step_range_end;
3345
3346 /* If we had a deferred signal to report, dequeue one now.
3347 This can happen if LWP gets more than one signal while
3348 trying to get out of a jump pad. */
3349 if (lwp->stopped
3350 && !lwp->status_pending_p
3351 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3352 {
3353 lwp->status_pending_p = 1;
3354
3355 if (debug_threads)
3356 fprintf (stderr,
3357 "Dequeueing deferred signal %d for LWP %ld, "
3358 "leaving status pending.\n",
3359 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3360 }
3361
3362 return 0;
3363 }
3364 }
3365
3366 /* No resume action for this thread. */
3367 lwp->resume = NULL;
3368
3369 return 0;
3370 }
3371
3372
3373 /* Set *FLAG_P if this lwp has an interesting status pending. */
3374 static int
3375 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3376 {
3377 struct lwp_info *lwp = (struct lwp_info *) entry;
3378
3379 /* LWPs which will not be resumed are not interesting, because
3380 we might not wait for them next time through linux_wait. */
3381 if (lwp->resume == NULL)
3382 return 0;
3383
3384 if (lwp->status_pending_p)
3385 * (int *) flag_p = 1;
3386
3387 return 0;
3388 }
3389
3390 /* Return 1 if this lwp that GDB wants running is stopped at an
3391 internal breakpoint that we need to step over. It assumes that any
3392 required STOP_PC adjustment has already been propagated to the
3393 inferior's regcache. */
3394
3395 static int
3396 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3397 {
3398 struct lwp_info *lwp = (struct lwp_info *) entry;
3399 struct thread_info *thread;
3400 struct thread_info *saved_inferior;
3401 CORE_ADDR pc;
3402
3403 /* LWPs which will not be resumed are not interesting, because we
3404 might not wait for them next time through linux_wait. */
3405
3406 if (!lwp->stopped)
3407 {
3408 if (debug_threads)
3409 fprintf (stderr,
3410 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3411 lwpid_of (lwp));
3412 return 0;
3413 }
3414
3415 thread = get_lwp_thread (lwp);
3416
3417 if (thread->last_resume_kind == resume_stop)
3418 {
3419 if (debug_threads)
3420 fprintf (stderr,
3421 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3422 lwpid_of (lwp));
3423 return 0;
3424 }
3425
3426 gdb_assert (lwp->suspended >= 0);
3427
3428 if (lwp->suspended)
3429 {
3430 if (debug_threads)
3431 fprintf (stderr,
3432 "Need step over [LWP %ld]? Ignoring, suspended\n",
3433 lwpid_of (lwp));
3434 return 0;
3435 }
3436
3437 if (!lwp->need_step_over)
3438 {
3439 if (debug_threads)
3440 fprintf (stderr,
3441 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3442 }
3443
3444 if (lwp->status_pending_p)
3445 {
3446 if (debug_threads)
3447 fprintf (stderr,
3448 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3449 lwpid_of (lwp));
3450 return 0;
3451 }
3452
3453 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3454 or we have. */
3455 pc = get_pc (lwp);
3456
3457 /* If the PC has changed since we stopped, then don't do anything,
3458 and let the breakpoint/tracepoint be hit. This happens if, for
3459 instance, GDB handled the decr_pc_after_break subtraction itself,
3460 GDB is OOL stepping this thread, or the user has issued a "jump"
3461 command, or poked thread's registers herself. */
3462 if (pc != lwp->stop_pc)
3463 {
3464 if (debug_threads)
3465 fprintf (stderr,
3466 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3467 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3468 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3469
3470 lwp->need_step_over = 0;
3471 return 0;
3472 }
3473
3474 saved_inferior = current_inferior;
3475 current_inferior = thread;
3476
3477 /* We can only step over breakpoints we know about. */
3478 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3479 {
3480 /* Don't step over a breakpoint that GDB expects to hit
3481 though. If the condition is being evaluated on the target's side
3482 and it evaluate to false, step over this breakpoint as well. */
3483 if (gdb_breakpoint_here (pc)
3484 && gdb_condition_true_at_breakpoint (pc)
3485 && gdb_no_commands_at_breakpoint (pc))
3486 {
3487 if (debug_threads)
3488 fprintf (stderr,
3489 "Need step over [LWP %ld]? yes, but found"
3490 " GDB breakpoint at 0x%s; skipping step over\n",
3491 lwpid_of (lwp), paddress (pc));
3492
3493 current_inferior = saved_inferior;
3494 return 0;
3495 }
3496 else
3497 {
3498 if (debug_threads)
3499 fprintf (stderr,
3500 "Need step over [LWP %ld]? yes, "
3501 "found breakpoint at 0x%s\n",
3502 lwpid_of (lwp), paddress (pc));
3503
3504 /* We've found an lwp that needs stepping over --- return 1 so
3505 that find_inferior stops looking. */
3506 current_inferior = saved_inferior;
3507
3508 /* If the step over is cancelled, this is set again. */
3509 lwp->need_step_over = 0;
3510 return 1;
3511 }
3512 }
3513
3514 current_inferior = saved_inferior;
3515
3516 if (debug_threads)
3517 fprintf (stderr,
3518 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3519 lwpid_of (lwp), paddress (pc));
3520
3521 return 0;
3522 }
3523
3524 /* Start a step-over operation on LWP. When LWP stopped at a
3525 breakpoint, to make progress, we need to remove the breakpoint out
3526 of the way. If we let other threads run while we do that, they may
3527 pass by the breakpoint location and miss hitting it. To avoid
3528 that, a step-over momentarily stops all threads while LWP is
3529 single-stepped while the breakpoint is temporarily uninserted from
3530 the inferior. When the single-step finishes, we reinsert the
3531 breakpoint, and let all threads that are supposed to be running,
3532 run again.
3533
3534 On targets that don't support hardware single-step, we don't
3535 currently support full software single-stepping. Instead, we only
3536 support stepping over the thread event breakpoint, by asking the
3537 low target where to place a reinsert breakpoint. Since this
3538 routine assumes the breakpoint being stepped over is a thread event
3539 breakpoint, it usually assumes the return address of the current
3540 function is a good enough place to set the reinsert breakpoint. */
3541
3542 static int
3543 start_step_over (struct lwp_info *lwp)
3544 {
3545 struct thread_info *saved_inferior;
3546 CORE_ADDR pc;
3547 int step;
3548
3549 if (debug_threads)
3550 fprintf (stderr,
3551 "Starting step-over on LWP %ld. Stopping all threads\n",
3552 lwpid_of (lwp));
3553
3554 stop_all_lwps (1, lwp);
3555 gdb_assert (lwp->suspended == 0);
3556
3557 if (debug_threads)
3558 fprintf (stderr, "Done stopping all threads for step-over.\n");
3559
3560 /* Note, we should always reach here with an already adjusted PC,
3561 either by GDB (if we're resuming due to GDB's request), or by our
3562 caller, if we just finished handling an internal breakpoint GDB
3563 shouldn't care about. */
3564 pc = get_pc (lwp);
3565
3566 saved_inferior = current_inferior;
3567 current_inferior = get_lwp_thread (lwp);
3568
3569 lwp->bp_reinsert = pc;
3570 uninsert_breakpoints_at (pc);
3571 uninsert_fast_tracepoint_jumps_at (pc);
3572
3573 if (can_hardware_single_step ())
3574 {
3575 step = 1;
3576 }
3577 else
3578 {
3579 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3580 set_reinsert_breakpoint (raddr);
3581 step = 0;
3582 }
3583
3584 current_inferior = saved_inferior;
3585
3586 linux_resume_one_lwp (lwp, step, 0, NULL);
3587
3588 /* Require next event from this LWP. */
3589 step_over_bkpt = lwp->head.id;
3590 return 1;
3591 }
3592
3593 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3594 start_step_over, if still there, and delete any reinsert
3595 breakpoints we've set, on non hardware single-step targets. */
3596
3597 static int
3598 finish_step_over (struct lwp_info *lwp)
3599 {
3600 if (lwp->bp_reinsert != 0)
3601 {
3602 if (debug_threads)
3603 fprintf (stderr, "Finished step over.\n");
3604
3605 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3606 may be no breakpoint to reinsert there by now. */
3607 reinsert_breakpoints_at (lwp->bp_reinsert);
3608 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3609
3610 lwp->bp_reinsert = 0;
3611
3612 /* Delete any software-single-step reinsert breakpoints. No
3613 longer needed. We don't have to worry about other threads
3614 hitting this trap, and later not being able to explain it,
3615 because we were stepping over a breakpoint, and we hold all
3616 threads but LWP stopped while doing that. */
3617 if (!can_hardware_single_step ())
3618 delete_reinsert_breakpoints ();
3619
3620 step_over_bkpt = null_ptid;
3621 return 1;
3622 }
3623 else
3624 return 0;
3625 }
3626
3627 /* This function is called once per thread. We check the thread's resume
3628 request, which will tell us whether to resume, step, or leave the thread
3629 stopped; and what signal, if any, it should be sent.
3630
3631 For threads which we aren't explicitly told otherwise, we preserve
3632 the stepping flag; this is used for stepping over gdbserver-placed
3633 breakpoints.
3634
3635 If pending_flags was set in any thread, we queue any needed
3636 signals, since we won't actually resume. We already have a pending
3637 event to report, so we don't need to preserve any step requests;
3638 they should be re-issued if necessary. */
3639
3640 static int
3641 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3642 {
3643 struct lwp_info *lwp;
3644 struct thread_info *thread;
3645 int step;
3646 int leave_all_stopped = * (int *) arg;
3647 int leave_pending;
3648
3649 thread = (struct thread_info *) entry;
3650 lwp = get_thread_lwp (thread);
3651
3652 if (lwp->resume == NULL)
3653 return 0;
3654
3655 if (lwp->resume->kind == resume_stop)
3656 {
3657 if (debug_threads)
3658 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3659
3660 if (!lwp->stopped)
3661 {
3662 if (debug_threads)
3663 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3664
3665 /* Stop the thread, and wait for the event asynchronously,
3666 through the event loop. */
3667 send_sigstop (lwp);
3668 }
3669 else
3670 {
3671 if (debug_threads)
3672 fprintf (stderr, "already stopped LWP %ld\n",
3673 lwpid_of (lwp));
3674
3675 /* The LWP may have been stopped in an internal event that
3676 was not meant to be notified back to GDB (e.g., gdbserver
3677 breakpoint), so we should be reporting a stop event in
3678 this case too. */
3679
3680 /* If the thread already has a pending SIGSTOP, this is a
3681 no-op. Otherwise, something later will presumably resume
3682 the thread and this will cause it to cancel any pending
3683 operation, due to last_resume_kind == resume_stop. If
3684 the thread already has a pending status to report, we
3685 will still report it the next time we wait - see
3686 status_pending_p_callback. */
3687
3688 /* If we already have a pending signal to report, then
3689 there's no need to queue a SIGSTOP, as this means we're
3690 midway through moving the LWP out of the jumppad, and we
3691 will report the pending signal as soon as that is
3692 finished. */
3693 if (lwp->pending_signals_to_report == NULL)
3694 send_sigstop (lwp);
3695 }
3696
3697 /* For stop requests, we're done. */
3698 lwp->resume = NULL;
3699 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3700 return 0;
3701 }
3702
3703 /* If this thread which is about to be resumed has a pending status,
3704 then don't resume any threads - we can just report the pending
3705 status. Make sure to queue any signals that would otherwise be
3706 sent. In all-stop mode, we do this decision based on if *any*
3707 thread has a pending status. If there's a thread that needs the
3708 step-over-breakpoint dance, then don't resume any other thread
3709 but that particular one. */
3710 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3711
3712 if (!leave_pending)
3713 {
3714 if (debug_threads)
3715 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3716
3717 step = (lwp->resume->kind == resume_step);
3718 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3719 }
3720 else
3721 {
3722 if (debug_threads)
3723 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3724
3725 /* If we have a new signal, enqueue the signal. */
3726 if (lwp->resume->sig != 0)
3727 {
3728 struct pending_signals *p_sig;
3729 p_sig = xmalloc (sizeof (*p_sig));
3730 p_sig->prev = lwp->pending_signals;
3731 p_sig->signal = lwp->resume->sig;
3732 memset (&p_sig->info, 0, sizeof (siginfo_t));
3733
3734 /* If this is the same signal we were previously stopped by,
3735 make sure to queue its siginfo. We can ignore the return
3736 value of ptrace; if it fails, we'll skip
3737 PTRACE_SETSIGINFO. */
3738 if (WIFSTOPPED (lwp->last_status)
3739 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3740 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
3741 &p_sig->info);
3742
3743 lwp->pending_signals = p_sig;
3744 }
3745 }
3746
3747 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3748 lwp->resume = NULL;
3749 return 0;
3750 }
3751
3752 static void
3753 linux_resume (struct thread_resume *resume_info, size_t n)
3754 {
3755 struct thread_resume_array array = { resume_info, n };
3756 struct lwp_info *need_step_over = NULL;
3757 int any_pending;
3758 int leave_all_stopped;
3759
3760 find_inferior (&all_threads, linux_set_resume_request, &array);
3761
3762 /* If there is a thread which would otherwise be resumed, which has
3763 a pending status, then don't resume any threads - we can just
3764 report the pending status. Make sure to queue any signals that
3765 would otherwise be sent. In non-stop mode, we'll apply this
3766 logic to each thread individually. We consume all pending events
3767 before considering to start a step-over (in all-stop). */
3768 any_pending = 0;
3769 if (!non_stop)
3770 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3771
3772 /* If there is a thread which would otherwise be resumed, which is
3773 stopped at a breakpoint that needs stepping over, then don't
3774 resume any threads - have it step over the breakpoint with all
3775 other threads stopped, then resume all threads again. Make sure
3776 to queue any signals that would otherwise be delivered or
3777 queued. */
3778 if (!any_pending && supports_breakpoints ())
3779 need_step_over
3780 = (struct lwp_info *) find_inferior (&all_lwps,
3781 need_step_over_p, NULL);
3782
3783 leave_all_stopped = (need_step_over != NULL || any_pending);
3784
3785 if (debug_threads)
3786 {
3787 if (need_step_over != NULL)
3788 fprintf (stderr, "Not resuming all, need step over\n");
3789 else if (any_pending)
3790 fprintf (stderr,
3791 "Not resuming, all-stop and found "
3792 "an LWP with pending status\n");
3793 else
3794 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3795 }
3796
3797 /* Even if we're leaving threads stopped, queue all signals we'd
3798 otherwise deliver. */
3799 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3800
3801 if (need_step_over)
3802 start_step_over (need_step_over);
3803 }
3804
3805 /* This function is called once per thread. We check the thread's
3806 last resume request, which will tell us whether to resume, step, or
3807 leave the thread stopped. Any signal the client requested to be
3808 delivered has already been enqueued at this point.
3809
3810 If any thread that GDB wants running is stopped at an internal
3811 breakpoint that needs stepping over, we start a step-over operation
3812 on that particular thread, and leave all others stopped. */
3813
3814 static int
3815 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3816 {
3817 struct lwp_info *lwp = (struct lwp_info *) entry;
3818 struct thread_info *thread;
3819 int step;
3820
3821 if (lwp == except)
3822 return 0;
3823
3824 if (debug_threads)
3825 fprintf (stderr,
3826 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3827
3828 if (!lwp->stopped)
3829 {
3830 if (debug_threads)
3831 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3832 return 0;
3833 }
3834
3835 thread = get_lwp_thread (lwp);
3836
3837 if (thread->last_resume_kind == resume_stop
3838 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3839 {
3840 if (debug_threads)
3841 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3842 lwpid_of (lwp));
3843 return 0;
3844 }
3845
3846 if (lwp->status_pending_p)
3847 {
3848 if (debug_threads)
3849 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3850 lwpid_of (lwp));
3851 return 0;
3852 }
3853
3854 gdb_assert (lwp->suspended >= 0);
3855
3856 if (lwp->suspended)
3857 {
3858 if (debug_threads)
3859 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3860 return 0;
3861 }
3862
3863 if (thread->last_resume_kind == resume_stop
3864 && lwp->pending_signals_to_report == NULL
3865 && lwp->collecting_fast_tracepoint == 0)
3866 {
3867 /* We haven't reported this LWP as stopped yet (otherwise, the
3868 last_status.kind check above would catch it, and we wouldn't
3869 reach here. This LWP may have been momentarily paused by a
3870 stop_all_lwps call while handling for example, another LWP's
3871 step-over. In that case, the pending expected SIGSTOP signal
3872 that was queued at vCont;t handling time will have already
3873 been consumed by wait_for_sigstop, and so we need to requeue
3874 another one here. Note that if the LWP already has a SIGSTOP
3875 pending, this is a no-op. */
3876
3877 if (debug_threads)
3878 fprintf (stderr,
3879 "Client wants LWP %ld to stop. "
3880 "Making sure it has a SIGSTOP pending\n",
3881 lwpid_of (lwp));
3882
3883 send_sigstop (lwp);
3884 }
3885
3886 step = thread->last_resume_kind == resume_step;
3887 linux_resume_one_lwp (lwp, step, 0, NULL);
3888 return 0;
3889 }
3890
3891 static int
3892 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3893 {
3894 struct lwp_info *lwp = (struct lwp_info *) entry;
3895
3896 if (lwp == except)
3897 return 0;
3898
3899 lwp->suspended--;
3900 gdb_assert (lwp->suspended >= 0);
3901
3902 return proceed_one_lwp (entry, except);
3903 }
3904
3905 /* When we finish a step-over, set threads running again. If there's
3906 another thread that may need a step-over, now's the time to start
3907 it. Eventually, we'll move all threads past their breakpoints. */
3908
3909 static void
3910 proceed_all_lwps (void)
3911 {
3912 struct lwp_info *need_step_over;
3913
3914 /* If there is a thread which would otherwise be resumed, which is
3915 stopped at a breakpoint that needs stepping over, then don't
3916 resume any threads - have it step over the breakpoint with all
3917 other threads stopped, then resume all threads again. */
3918
3919 if (supports_breakpoints ())
3920 {
3921 need_step_over
3922 = (struct lwp_info *) find_inferior (&all_lwps,
3923 need_step_over_p, NULL);
3924
3925 if (need_step_over != NULL)
3926 {
3927 if (debug_threads)
3928 fprintf (stderr, "proceed_all_lwps: found "
3929 "thread %ld needing a step-over\n",
3930 lwpid_of (need_step_over));
3931
3932 start_step_over (need_step_over);
3933 return;
3934 }
3935 }
3936
3937 if (debug_threads)
3938 fprintf (stderr, "Proceeding, no step-over needed\n");
3939
3940 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3941 }
3942
3943 /* Stopped LWPs that the client wanted to be running, that don't have
3944 pending statuses, are set to run again, except for EXCEPT, if not
3945 NULL. This undoes a stop_all_lwps call. */
3946
3947 static void
3948 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3949 {
3950 if (debug_threads)
3951 {
3952 if (except)
3953 fprintf (stderr,
3954 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3955 else
3956 fprintf (stderr,
3957 "unstopping all lwps\n");
3958 }
3959
3960 if (unsuspend)
3961 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3962 else
3963 find_inferior (&all_lwps, proceed_one_lwp, except);
3964 }
3965
3966
3967 #ifdef HAVE_LINUX_REGSETS
3968
3969 #define use_linux_regsets 1
3970
3971 /* Returns true if REGSET has been disabled. */
3972
3973 static int
3974 regset_disabled (struct regsets_info *info, struct regset_info *regset)
3975 {
3976 return (info->disabled_regsets != NULL
3977 && info->disabled_regsets[regset - info->regsets]);
3978 }
3979
3980 /* Disable REGSET. */
3981
3982 static void
3983 disable_regset (struct regsets_info *info, struct regset_info *regset)
3984 {
3985 int dr_offset;
3986
3987 dr_offset = regset - info->regsets;
3988 if (info->disabled_regsets == NULL)
3989 info->disabled_regsets = xcalloc (1, info->num_regsets);
3990 info->disabled_regsets[dr_offset] = 1;
3991 }
3992
3993 static int
3994 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
3995 struct regcache *regcache)
3996 {
3997 struct regset_info *regset;
3998 int saw_general_regs = 0;
3999 int pid;
4000 struct iovec iov;
4001
4002 regset = regsets_info->regsets;
4003
4004 pid = lwpid_of (get_thread_lwp (current_inferior));
4005 while (regset->size >= 0)
4006 {
4007 void *buf, *data;
4008 int nt_type, res;
4009
4010 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4011 {
4012 regset ++;
4013 continue;
4014 }
4015
4016 buf = xmalloc (regset->size);
4017
4018 nt_type = regset->nt_type;
4019 if (nt_type)
4020 {
4021 iov.iov_base = buf;
4022 iov.iov_len = regset->size;
4023 data = (void *) &iov;
4024 }
4025 else
4026 data = buf;
4027
4028 #ifndef __sparc__
4029 res = ptrace (regset->get_request, pid,
4030 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4031 #else
4032 res = ptrace (regset->get_request, pid, data, nt_type);
4033 #endif
4034 if (res < 0)
4035 {
4036 if (errno == EIO)
4037 {
4038 /* If we get EIO on a regset, do not try it again for
4039 this process mode. */
4040 disable_regset (regsets_info, regset);
4041 free (buf);
4042 continue;
4043 }
4044 else
4045 {
4046 char s[256];
4047 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4048 pid);
4049 perror (s);
4050 }
4051 }
4052 else if (regset->type == GENERAL_REGS)
4053 saw_general_regs = 1;
4054 regset->store_function (regcache, buf);
4055 regset ++;
4056 free (buf);
4057 }
4058 if (saw_general_regs)
4059 return 0;
4060 else
4061 return 1;
4062 }
4063
4064 static int
4065 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4066 struct regcache *regcache)
4067 {
4068 struct regset_info *regset;
4069 int saw_general_regs = 0;
4070 int pid;
4071 struct iovec iov;
4072
4073 regset = regsets_info->regsets;
4074
4075 pid = lwpid_of (get_thread_lwp (current_inferior));
4076 while (regset->size >= 0)
4077 {
4078 void *buf, *data;
4079 int nt_type, res;
4080
4081 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4082 {
4083 regset ++;
4084 continue;
4085 }
4086
4087 buf = xmalloc (regset->size);
4088
4089 /* First fill the buffer with the current register set contents,
4090 in case there are any items in the kernel's regset that are
4091 not in gdbserver's regcache. */
4092
4093 nt_type = regset->nt_type;
4094 if (nt_type)
4095 {
4096 iov.iov_base = buf;
4097 iov.iov_len = regset->size;
4098 data = (void *) &iov;
4099 }
4100 else
4101 data = buf;
4102
4103 #ifndef __sparc__
4104 res = ptrace (regset->get_request, pid,
4105 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4106 #else
4107 res = ptrace (regset->get_request, pid, data, nt_type);
4108 #endif
4109
4110 if (res == 0)
4111 {
4112 /* Then overlay our cached registers on that. */
4113 regset->fill_function (regcache, buf);
4114
4115 /* Only now do we write the register set. */
4116 #ifndef __sparc__
4117 res = ptrace (regset->set_request, pid,
4118 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4119 #else
4120 res = ptrace (regset->set_request, pid, data, nt_type);
4121 #endif
4122 }
4123
4124 if (res < 0)
4125 {
4126 if (errno == EIO)
4127 {
4128 /* If we get EIO on a regset, do not try it again for
4129 this process mode. */
4130 disable_regset (regsets_info, regset);
4131 free (buf);
4132 continue;
4133 }
4134 else if (errno == ESRCH)
4135 {
4136 /* At this point, ESRCH should mean the process is
4137 already gone, in which case we simply ignore attempts
4138 to change its registers. See also the related
4139 comment in linux_resume_one_lwp. */
4140 free (buf);
4141 return 0;
4142 }
4143 else
4144 {
4145 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4146 }
4147 }
4148 else if (regset->type == GENERAL_REGS)
4149 saw_general_regs = 1;
4150 regset ++;
4151 free (buf);
4152 }
4153 if (saw_general_regs)
4154 return 0;
4155 else
4156 return 1;
4157 }
4158
4159 #else /* !HAVE_LINUX_REGSETS */
4160
4161 #define use_linux_regsets 0
4162 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4163 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4164
4165 #endif
4166
4167 /* Return 1 if register REGNO is supported by one of the regset ptrace
4168 calls or 0 if it has to be transferred individually. */
4169
4170 static int
4171 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4172 {
4173 unsigned char mask = 1 << (regno % 8);
4174 size_t index = regno / 8;
4175
4176 return (use_linux_regsets
4177 && (regs_info->regset_bitmap == NULL
4178 || (regs_info->regset_bitmap[index] & mask) != 0));
4179 }
4180
4181 #ifdef HAVE_LINUX_USRREGS
4182
4183 int
4184 register_addr (const struct usrregs_info *usrregs, int regnum)
4185 {
4186 int addr;
4187
4188 if (regnum < 0 || regnum >= usrregs->num_regs)
4189 error ("Invalid register number %d.", regnum);
4190
4191 addr = usrregs->regmap[regnum];
4192
4193 return addr;
4194 }
4195
4196 /* Fetch one register. */
4197 static void
4198 fetch_register (const struct usrregs_info *usrregs,
4199 struct regcache *regcache, int regno)
4200 {
4201 CORE_ADDR regaddr;
4202 int i, size;
4203 char *buf;
4204 int pid;
4205
4206 if (regno >= usrregs->num_regs)
4207 return;
4208 if ((*the_low_target.cannot_fetch_register) (regno))
4209 return;
4210
4211 regaddr = register_addr (usrregs, regno);
4212 if (regaddr == -1)
4213 return;
4214
4215 size = ((register_size (regcache->tdesc, regno)
4216 + sizeof (PTRACE_XFER_TYPE) - 1)
4217 & -sizeof (PTRACE_XFER_TYPE));
4218 buf = alloca (size);
4219
4220 pid = lwpid_of (get_thread_lwp (current_inferior));
4221 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4222 {
4223 errno = 0;
4224 *(PTRACE_XFER_TYPE *) (buf + i) =
4225 ptrace (PTRACE_PEEKUSER, pid,
4226 /* Coerce to a uintptr_t first to avoid potential gcc warning
4227 of coercing an 8 byte integer to a 4 byte pointer. */
4228 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4229 regaddr += sizeof (PTRACE_XFER_TYPE);
4230 if (errno != 0)
4231 error ("reading register %d: %s", regno, strerror (errno));
4232 }
4233
4234 if (the_low_target.supply_ptrace_register)
4235 the_low_target.supply_ptrace_register (regcache, regno, buf);
4236 else
4237 supply_register (regcache, regno, buf);
4238 }
4239
4240 /* Store one register. */
4241 static void
4242 store_register (const struct usrregs_info *usrregs,
4243 struct regcache *regcache, int regno)
4244 {
4245 CORE_ADDR regaddr;
4246 int i, size;
4247 char *buf;
4248 int pid;
4249
4250 if (regno >= usrregs->num_regs)
4251 return;
4252 if ((*the_low_target.cannot_store_register) (regno))
4253 return;
4254
4255 regaddr = register_addr (usrregs, regno);
4256 if (regaddr == -1)
4257 return;
4258
4259 size = ((register_size (regcache->tdesc, regno)
4260 + sizeof (PTRACE_XFER_TYPE) - 1)
4261 & -sizeof (PTRACE_XFER_TYPE));
4262 buf = alloca (size);
4263 memset (buf, 0, size);
4264
4265 if (the_low_target.collect_ptrace_register)
4266 the_low_target.collect_ptrace_register (regcache, regno, buf);
4267 else
4268 collect_register (regcache, regno, buf);
4269
4270 pid = lwpid_of (get_thread_lwp (current_inferior));
4271 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4272 {
4273 errno = 0;
4274 ptrace (PTRACE_POKEUSER, pid,
4275 /* Coerce to a uintptr_t first to avoid potential gcc warning
4276 about coercing an 8 byte integer to a 4 byte pointer. */
4277 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4278 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4279 if (errno != 0)
4280 {
4281 /* At this point, ESRCH should mean the process is
4282 already gone, in which case we simply ignore attempts
4283 to change its registers. See also the related
4284 comment in linux_resume_one_lwp. */
4285 if (errno == ESRCH)
4286 return;
4287
4288 if ((*the_low_target.cannot_store_register) (regno) == 0)
4289 error ("writing register %d: %s", regno, strerror (errno));
4290 }
4291 regaddr += sizeof (PTRACE_XFER_TYPE);
4292 }
4293 }
4294
4295 /* Fetch all registers, or just one, from the child process.
4296 If REGNO is -1, do this for all registers, skipping any that are
4297 assumed to have been retrieved by regsets_fetch_inferior_registers,
4298 unless ALL is non-zero.
4299 Otherwise, REGNO specifies which register (so we can save time). */
4300 static void
4301 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4302 struct regcache *regcache, int regno, int all)
4303 {
4304 struct usrregs_info *usr = regs_info->usrregs;
4305
4306 if (regno == -1)
4307 {
4308 for (regno = 0; regno < usr->num_regs; regno++)
4309 if (all || !linux_register_in_regsets (regs_info, regno))
4310 fetch_register (usr, regcache, regno);
4311 }
4312 else
4313 fetch_register (usr, regcache, regno);
4314 }
4315
4316 /* Store our register values back into the inferior.
4317 If REGNO is -1, do this for all registers, skipping any that are
4318 assumed to have been saved by regsets_store_inferior_registers,
4319 unless ALL is non-zero.
4320 Otherwise, REGNO specifies which register (so we can save time). */
4321 static void
4322 usr_store_inferior_registers (const struct regs_info *regs_info,
4323 struct regcache *regcache, int regno, int all)
4324 {
4325 struct usrregs_info *usr = regs_info->usrregs;
4326
4327 if (regno == -1)
4328 {
4329 for (regno = 0; regno < usr->num_regs; regno++)
4330 if (all || !linux_register_in_regsets (regs_info, regno))
4331 store_register (usr, regcache, regno);
4332 }
4333 else
4334 store_register (usr, regcache, regno);
4335 }
4336
4337 #else /* !HAVE_LINUX_USRREGS */
4338
4339 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4340 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4341
4342 #endif
4343
4344
4345 void
4346 linux_fetch_registers (struct regcache *regcache, int regno)
4347 {
4348 int use_regsets;
4349 int all = 0;
4350 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4351
4352 if (regno == -1)
4353 {
4354 if (the_low_target.fetch_register != NULL
4355 && regs_info->usrregs != NULL)
4356 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4357 (*the_low_target.fetch_register) (regcache, regno);
4358
4359 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4360 if (regs_info->usrregs != NULL)
4361 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4362 }
4363 else
4364 {
4365 if (the_low_target.fetch_register != NULL
4366 && (*the_low_target.fetch_register) (regcache, regno))
4367 return;
4368
4369 use_regsets = linux_register_in_regsets (regs_info, regno);
4370 if (use_regsets)
4371 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4372 regcache);
4373 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4374 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4375 }
4376 }
4377
4378 void
4379 linux_store_registers (struct regcache *regcache, int regno)
4380 {
4381 int use_regsets;
4382 int all = 0;
4383 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4384
4385 if (regno == -1)
4386 {
4387 all = regsets_store_inferior_registers (regs_info->regsets_info,
4388 regcache);
4389 if (regs_info->usrregs != NULL)
4390 usr_store_inferior_registers (regs_info, regcache, regno, all);
4391 }
4392 else
4393 {
4394 use_regsets = linux_register_in_regsets (regs_info, regno);
4395 if (use_regsets)
4396 all = regsets_store_inferior_registers (regs_info->regsets_info,
4397 regcache);
4398 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4399 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4400 }
4401 }
4402
4403
4404 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4405 to debugger memory starting at MYADDR. */
4406
4407 static int
4408 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4409 {
4410 int pid = lwpid_of (get_thread_lwp (current_inferior));
4411 register PTRACE_XFER_TYPE *buffer;
4412 register CORE_ADDR addr;
4413 register int count;
4414 char filename[64];
4415 register int i;
4416 int ret;
4417 int fd;
4418
4419 /* Try using /proc. Don't bother for one word. */
4420 if (len >= 3 * sizeof (long))
4421 {
4422 int bytes;
4423
4424 /* We could keep this file open and cache it - possibly one per
4425 thread. That requires some juggling, but is even faster. */
4426 sprintf (filename, "/proc/%d/mem", pid);
4427 fd = open (filename, O_RDONLY | O_LARGEFILE);
4428 if (fd == -1)
4429 goto no_proc;
4430
4431 /* If pread64 is available, use it. It's faster if the kernel
4432 supports it (only one syscall), and it's 64-bit safe even on
4433 32-bit platforms (for instance, SPARC debugging a SPARC64
4434 application). */
4435 #ifdef HAVE_PREAD64
4436 bytes = pread64 (fd, myaddr, len, memaddr);
4437 #else
4438 bytes = -1;
4439 if (lseek (fd, memaddr, SEEK_SET) != -1)
4440 bytes = read (fd, myaddr, len);
4441 #endif
4442
4443 close (fd);
4444 if (bytes == len)
4445 return 0;
4446
4447 /* Some data was read, we'll try to get the rest with ptrace. */
4448 if (bytes > 0)
4449 {
4450 memaddr += bytes;
4451 myaddr += bytes;
4452 len -= bytes;
4453 }
4454 }
4455
4456 no_proc:
4457 /* Round starting address down to longword boundary. */
4458 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4459 /* Round ending address up; get number of longwords that makes. */
4460 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4461 / sizeof (PTRACE_XFER_TYPE));
4462 /* Allocate buffer of that many longwords. */
4463 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4464
4465 /* Read all the longwords */
4466 errno = 0;
4467 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4468 {
4469 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4470 about coercing an 8 byte integer to a 4 byte pointer. */
4471 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4472 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4473 (PTRACE_TYPE_ARG4) 0);
4474 if (errno)
4475 break;
4476 }
4477 ret = errno;
4478
4479 /* Copy appropriate bytes out of the buffer. */
4480 if (i > 0)
4481 {
4482 i *= sizeof (PTRACE_XFER_TYPE);
4483 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4484 memcpy (myaddr,
4485 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4486 i < len ? i : len);
4487 }
4488
4489 return ret;
4490 }
4491
4492 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4493 memory at MEMADDR. On failure (cannot write to the inferior)
4494 returns the value of errno. Always succeeds if LEN is zero. */
4495
4496 static int
4497 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4498 {
4499 register int i;
4500 /* Round starting address down to longword boundary. */
4501 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4502 /* Round ending address up; get number of longwords that makes. */
4503 register int count
4504 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4505 / sizeof (PTRACE_XFER_TYPE);
4506
4507 /* Allocate buffer of that many longwords. */
4508 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4509 alloca (count * sizeof (PTRACE_XFER_TYPE));
4510
4511 int pid = lwpid_of (get_thread_lwp (current_inferior));
4512
4513 if (len == 0)
4514 {
4515 /* Zero length write always succeeds. */
4516 return 0;
4517 }
4518
4519 if (debug_threads)
4520 {
4521 /* Dump up to four bytes. */
4522 unsigned int val = * (unsigned int *) myaddr;
4523 if (len == 1)
4524 val = val & 0xff;
4525 else if (len == 2)
4526 val = val & 0xffff;
4527 else if (len == 3)
4528 val = val & 0xffffff;
4529 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4530 val, (long)memaddr);
4531 }
4532
4533 /* Fill start and end extra bytes of buffer with existing memory data. */
4534
4535 errno = 0;
4536 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4537 about coercing an 8 byte integer to a 4 byte pointer. */
4538 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4539 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4540 (PTRACE_TYPE_ARG4) 0);
4541 if (errno)
4542 return errno;
4543
4544 if (count > 1)
4545 {
4546 errno = 0;
4547 buffer[count - 1]
4548 = ptrace (PTRACE_PEEKTEXT, pid,
4549 /* Coerce to a uintptr_t first to avoid potential gcc warning
4550 about coercing an 8 byte integer to a 4 byte pointer. */
4551 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4552 * sizeof (PTRACE_XFER_TYPE)),
4553 (PTRACE_TYPE_ARG4) 0);
4554 if (errno)
4555 return errno;
4556 }
4557
4558 /* Copy data to be written over corresponding part of buffer. */
4559
4560 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4561 myaddr, len);
4562
4563 /* Write the entire buffer. */
4564
4565 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4566 {
4567 errno = 0;
4568 ptrace (PTRACE_POKETEXT, pid,
4569 /* Coerce to a uintptr_t first to avoid potential gcc warning
4570 about coercing an 8 byte integer to a 4 byte pointer. */
4571 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4572 (PTRACE_TYPE_ARG4) buffer[i]);
4573 if (errno)
4574 return errno;
4575 }
4576
4577 return 0;
4578 }
4579
4580 static void
4581 linux_look_up_symbols (void)
4582 {
4583 #ifdef USE_THREAD_DB
4584 struct process_info *proc = current_process ();
4585
4586 if (proc->private->thread_db != NULL)
4587 return;
4588
4589 /* If the kernel supports tracing clones, then we don't need to
4590 use the magic thread event breakpoint to learn about
4591 threads. */
4592 thread_db_init (!linux_supports_traceclone ());
4593 #endif
4594 }
4595
4596 static void
4597 linux_request_interrupt (void)
4598 {
4599 extern unsigned long signal_pid;
4600
4601 if (!ptid_equal (cont_thread, null_ptid)
4602 && !ptid_equal (cont_thread, minus_one_ptid))
4603 {
4604 struct lwp_info *lwp;
4605 int lwpid;
4606
4607 lwp = get_thread_lwp (current_inferior);
4608 lwpid = lwpid_of (lwp);
4609 kill_lwp (lwpid, SIGINT);
4610 }
4611 else
4612 kill_lwp (signal_pid, SIGINT);
4613 }
4614
4615 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4616 to debugger memory starting at MYADDR. */
4617
4618 static int
4619 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4620 {
4621 char filename[PATH_MAX];
4622 int fd, n;
4623 int pid = lwpid_of (get_thread_lwp (current_inferior));
4624
4625 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4626
4627 fd = open (filename, O_RDONLY);
4628 if (fd < 0)
4629 return -1;
4630
4631 if (offset != (CORE_ADDR) 0
4632 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4633 n = -1;
4634 else
4635 n = read (fd, myaddr, len);
4636
4637 close (fd);
4638
4639 return n;
4640 }
4641
4642 /* These breakpoint and watchpoint related wrapper functions simply
4643 pass on the function call if the target has registered a
4644 corresponding function. */
4645
4646 static int
4647 linux_insert_point (char type, CORE_ADDR addr, int len)
4648 {
4649 if (the_low_target.insert_point != NULL)
4650 return the_low_target.insert_point (type, addr, len);
4651 else
4652 /* Unsupported (see target.h). */
4653 return 1;
4654 }
4655
4656 static int
4657 linux_remove_point (char type, CORE_ADDR addr, int len)
4658 {
4659 if (the_low_target.remove_point != NULL)
4660 return the_low_target.remove_point (type, addr, len);
4661 else
4662 /* Unsupported (see target.h). */
4663 return 1;
4664 }
4665
4666 static int
4667 linux_stopped_by_watchpoint (void)
4668 {
4669 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4670
4671 return lwp->stopped_by_watchpoint;
4672 }
4673
4674 static CORE_ADDR
4675 linux_stopped_data_address (void)
4676 {
4677 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4678
4679 return lwp->stopped_data_address;
4680 }
4681
4682 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4683 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4684 && defined(PT_TEXT_END_ADDR)
4685
4686 /* This is only used for targets that define PT_TEXT_ADDR,
4687 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4688 the target has different ways of acquiring this information, like
4689 loadmaps. */
4690
4691 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4692 to tell gdb about. */
4693
4694 static int
4695 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4696 {
4697 unsigned long text, text_end, data;
4698 int pid = lwpid_of (get_thread_lwp (current_inferior));
4699
4700 errno = 0;
4701
4702 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4703 (PTRACE_TYPE_ARG4) 0);
4704 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4705 (PTRACE_TYPE_ARG4) 0);
4706 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4707 (PTRACE_TYPE_ARG4) 0);
4708
4709 if (errno == 0)
4710 {
4711 /* Both text and data offsets produced at compile-time (and so
4712 used by gdb) are relative to the beginning of the program,
4713 with the data segment immediately following the text segment.
4714 However, the actual runtime layout in memory may put the data
4715 somewhere else, so when we send gdb a data base-address, we
4716 use the real data base address and subtract the compile-time
4717 data base-address from it (which is just the length of the
4718 text segment). BSS immediately follows data in both
4719 cases. */
4720 *text_p = text;
4721 *data_p = data - (text_end - text);
4722
4723 return 1;
4724 }
4725 return 0;
4726 }
4727 #endif
4728
4729 static int
4730 linux_qxfer_osdata (const char *annex,
4731 unsigned char *readbuf, unsigned const char *writebuf,
4732 CORE_ADDR offset, int len)
4733 {
4734 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4735 }
4736
4737 /* Convert a native/host siginfo object, into/from the siginfo in the
4738 layout of the inferiors' architecture. */
4739
4740 static void
4741 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4742 {
4743 int done = 0;
4744
4745 if (the_low_target.siginfo_fixup != NULL)
4746 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4747
4748 /* If there was no callback, or the callback didn't do anything,
4749 then just do a straight memcpy. */
4750 if (!done)
4751 {
4752 if (direction == 1)
4753 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4754 else
4755 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4756 }
4757 }
4758
4759 static int
4760 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4761 unsigned const char *writebuf, CORE_ADDR offset, int len)
4762 {
4763 int pid;
4764 siginfo_t siginfo;
4765 char inf_siginfo[sizeof (siginfo_t)];
4766
4767 if (current_inferior == NULL)
4768 return -1;
4769
4770 pid = lwpid_of (get_thread_lwp (current_inferior));
4771
4772 if (debug_threads)
4773 fprintf (stderr, "%s siginfo for lwp %d.\n",
4774 readbuf != NULL ? "Reading" : "Writing",
4775 pid);
4776
4777 if (offset >= sizeof (siginfo))
4778 return -1;
4779
4780 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4781 return -1;
4782
4783 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4784 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4785 inferior with a 64-bit GDBSERVER should look the same as debugging it
4786 with a 32-bit GDBSERVER, we need to convert it. */
4787 siginfo_fixup (&siginfo, inf_siginfo, 0);
4788
4789 if (offset + len > sizeof (siginfo))
4790 len = sizeof (siginfo) - offset;
4791
4792 if (readbuf != NULL)
4793 memcpy (readbuf, inf_siginfo + offset, len);
4794 else
4795 {
4796 memcpy (inf_siginfo + offset, writebuf, len);
4797
4798 /* Convert back to ptrace layout before flushing it out. */
4799 siginfo_fixup (&siginfo, inf_siginfo, 1);
4800
4801 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4802 return -1;
4803 }
4804
4805 return len;
4806 }
4807
4808 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4809 so we notice when children change state; as the handler for the
4810 sigsuspend in my_waitpid. */
4811
4812 static void
4813 sigchld_handler (int signo)
4814 {
4815 int old_errno = errno;
4816
4817 if (debug_threads)
4818 {
4819 do
4820 {
4821 /* fprintf is not async-signal-safe, so call write
4822 directly. */
4823 if (write (2, "sigchld_handler\n",
4824 sizeof ("sigchld_handler\n") - 1) < 0)
4825 break; /* just ignore */
4826 } while (0);
4827 }
4828
4829 if (target_is_async_p ())
4830 async_file_mark (); /* trigger a linux_wait */
4831
4832 errno = old_errno;
4833 }
4834
4835 static int
4836 linux_supports_non_stop (void)
4837 {
4838 return 1;
4839 }
4840
4841 static int
4842 linux_async (int enable)
4843 {
4844 int previous = (linux_event_pipe[0] != -1);
4845
4846 if (debug_threads)
4847 fprintf (stderr, "linux_async (%d), previous=%d\n",
4848 enable, previous);
4849
4850 if (previous != enable)
4851 {
4852 sigset_t mask;
4853 sigemptyset (&mask);
4854 sigaddset (&mask, SIGCHLD);
4855
4856 sigprocmask (SIG_BLOCK, &mask, NULL);
4857
4858 if (enable)
4859 {
4860 if (pipe (linux_event_pipe) == -1)
4861 fatal ("creating event pipe failed.");
4862
4863 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4864 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4865
4866 /* Register the event loop handler. */
4867 add_file_handler (linux_event_pipe[0],
4868 handle_target_event, NULL);
4869
4870 /* Always trigger a linux_wait. */
4871 async_file_mark ();
4872 }
4873 else
4874 {
4875 delete_file_handler (linux_event_pipe[0]);
4876
4877 close (linux_event_pipe[0]);
4878 close (linux_event_pipe[1]);
4879 linux_event_pipe[0] = -1;
4880 linux_event_pipe[1] = -1;
4881 }
4882
4883 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4884 }
4885
4886 return previous;
4887 }
4888
4889 static int
4890 linux_start_non_stop (int nonstop)
4891 {
4892 /* Register or unregister from event-loop accordingly. */
4893 linux_async (nonstop);
4894 return 0;
4895 }
4896
4897 static int
4898 linux_supports_multi_process (void)
4899 {
4900 return 1;
4901 }
4902
4903 static int
4904 linux_supports_disable_randomization (void)
4905 {
4906 #ifdef HAVE_PERSONALITY
4907 return 1;
4908 #else
4909 return 0;
4910 #endif
4911 }
4912
4913 static int
4914 linux_supports_agent (void)
4915 {
4916 return 1;
4917 }
4918
4919 static int
4920 linux_supports_range_stepping (void)
4921 {
4922 if (*the_low_target.supports_range_stepping == NULL)
4923 return 0;
4924
4925 return (*the_low_target.supports_range_stepping) ();
4926 }
4927
4928 /* Enumerate spufs IDs for process PID. */
4929 static int
4930 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4931 {
4932 int pos = 0;
4933 int written = 0;
4934 char path[128];
4935 DIR *dir;
4936 struct dirent *entry;
4937
4938 sprintf (path, "/proc/%ld/fd", pid);
4939 dir = opendir (path);
4940 if (!dir)
4941 return -1;
4942
4943 rewinddir (dir);
4944 while ((entry = readdir (dir)) != NULL)
4945 {
4946 struct stat st;
4947 struct statfs stfs;
4948 int fd;
4949
4950 fd = atoi (entry->d_name);
4951 if (!fd)
4952 continue;
4953
4954 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4955 if (stat (path, &st) != 0)
4956 continue;
4957 if (!S_ISDIR (st.st_mode))
4958 continue;
4959
4960 if (statfs (path, &stfs) != 0)
4961 continue;
4962 if (stfs.f_type != SPUFS_MAGIC)
4963 continue;
4964
4965 if (pos >= offset && pos + 4 <= offset + len)
4966 {
4967 *(unsigned int *)(buf + pos - offset) = fd;
4968 written += 4;
4969 }
4970 pos += 4;
4971 }
4972
4973 closedir (dir);
4974 return written;
4975 }
4976
4977 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4978 object type, using the /proc file system. */
4979 static int
4980 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4981 unsigned const char *writebuf,
4982 CORE_ADDR offset, int len)
4983 {
4984 long pid = lwpid_of (get_thread_lwp (current_inferior));
4985 char buf[128];
4986 int fd = 0;
4987 int ret = 0;
4988
4989 if (!writebuf && !readbuf)
4990 return -1;
4991
4992 if (!*annex)
4993 {
4994 if (!readbuf)
4995 return -1;
4996 else
4997 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4998 }
4999
5000 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5001 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5002 if (fd <= 0)
5003 return -1;
5004
5005 if (offset != 0
5006 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5007 {
5008 close (fd);
5009 return 0;
5010 }
5011
5012 if (writebuf)
5013 ret = write (fd, writebuf, (size_t) len);
5014 else
5015 ret = read (fd, readbuf, (size_t) len);
5016
5017 close (fd);
5018 return ret;
5019 }
5020
5021 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5022 struct target_loadseg
5023 {
5024 /* Core address to which the segment is mapped. */
5025 Elf32_Addr addr;
5026 /* VMA recorded in the program header. */
5027 Elf32_Addr p_vaddr;
5028 /* Size of this segment in memory. */
5029 Elf32_Word p_memsz;
5030 };
5031
5032 # if defined PT_GETDSBT
5033 struct target_loadmap
5034 {
5035 /* Protocol version number, must be zero. */
5036 Elf32_Word version;
5037 /* Pointer to the DSBT table, its size, and the DSBT index. */
5038 unsigned *dsbt_table;
5039 unsigned dsbt_size, dsbt_index;
5040 /* Number of segments in this map. */
5041 Elf32_Word nsegs;
5042 /* The actual memory map. */
5043 struct target_loadseg segs[/*nsegs*/];
5044 };
5045 # define LINUX_LOADMAP PT_GETDSBT
5046 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5047 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5048 # else
5049 struct target_loadmap
5050 {
5051 /* Protocol version number, must be zero. */
5052 Elf32_Half version;
5053 /* Number of segments in this map. */
5054 Elf32_Half nsegs;
5055 /* The actual memory map. */
5056 struct target_loadseg segs[/*nsegs*/];
5057 };
5058 # define LINUX_LOADMAP PTRACE_GETFDPIC
5059 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5060 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5061 # endif
5062
5063 static int
5064 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5065 unsigned char *myaddr, unsigned int len)
5066 {
5067 int pid = lwpid_of (get_thread_lwp (current_inferior));
5068 int addr = -1;
5069 struct target_loadmap *data = NULL;
5070 unsigned int actual_length, copy_length;
5071
5072 if (strcmp (annex, "exec") == 0)
5073 addr = (int) LINUX_LOADMAP_EXEC;
5074 else if (strcmp (annex, "interp") == 0)
5075 addr = (int) LINUX_LOADMAP_INTERP;
5076 else
5077 return -1;
5078
5079 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5080 return -1;
5081
5082 if (data == NULL)
5083 return -1;
5084
5085 actual_length = sizeof (struct target_loadmap)
5086 + sizeof (struct target_loadseg) * data->nsegs;
5087
5088 if (offset < 0 || offset > actual_length)
5089 return -1;
5090
5091 copy_length = actual_length - offset < len ? actual_length - offset : len;
5092 memcpy (myaddr, (char *) data + offset, copy_length);
5093 return copy_length;
5094 }
5095 #else
5096 # define linux_read_loadmap NULL
5097 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5098
5099 static void
5100 linux_process_qsupported (const char *query)
5101 {
5102 if (the_low_target.process_qsupported != NULL)
5103 the_low_target.process_qsupported (query);
5104 }
5105
5106 static int
5107 linux_supports_tracepoints (void)
5108 {
5109 if (*the_low_target.supports_tracepoints == NULL)
5110 return 0;
5111
5112 return (*the_low_target.supports_tracepoints) ();
5113 }
5114
5115 static CORE_ADDR
5116 linux_read_pc (struct regcache *regcache)
5117 {
5118 if (the_low_target.get_pc == NULL)
5119 return 0;
5120
5121 return (*the_low_target.get_pc) (regcache);
5122 }
5123
5124 static void
5125 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5126 {
5127 gdb_assert (the_low_target.set_pc != NULL);
5128
5129 (*the_low_target.set_pc) (regcache, pc);
5130 }
5131
5132 static int
5133 linux_thread_stopped (struct thread_info *thread)
5134 {
5135 return get_thread_lwp (thread)->stopped;
5136 }
5137
5138 /* This exposes stop-all-threads functionality to other modules. */
5139
5140 static void
5141 linux_pause_all (int freeze)
5142 {
5143 stop_all_lwps (freeze, NULL);
5144 }
5145
5146 /* This exposes unstop-all-threads functionality to other gdbserver
5147 modules. */
5148
5149 static void
5150 linux_unpause_all (int unfreeze)
5151 {
5152 unstop_all_lwps (unfreeze, NULL);
5153 }
5154
5155 static int
5156 linux_prepare_to_access_memory (void)
5157 {
5158 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5159 running LWP. */
5160 if (non_stop)
5161 linux_pause_all (1);
5162 return 0;
5163 }
5164
5165 static void
5166 linux_done_accessing_memory (void)
5167 {
5168 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5169 running LWP. */
5170 if (non_stop)
5171 linux_unpause_all (1);
5172 }
5173
5174 static int
5175 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5176 CORE_ADDR collector,
5177 CORE_ADDR lockaddr,
5178 ULONGEST orig_size,
5179 CORE_ADDR *jump_entry,
5180 CORE_ADDR *trampoline,
5181 ULONGEST *trampoline_size,
5182 unsigned char *jjump_pad_insn,
5183 ULONGEST *jjump_pad_insn_size,
5184 CORE_ADDR *adjusted_insn_addr,
5185 CORE_ADDR *adjusted_insn_addr_end,
5186 char *err)
5187 {
5188 return (*the_low_target.install_fast_tracepoint_jump_pad)
5189 (tpoint, tpaddr, collector, lockaddr, orig_size,
5190 jump_entry, trampoline, trampoline_size,
5191 jjump_pad_insn, jjump_pad_insn_size,
5192 adjusted_insn_addr, adjusted_insn_addr_end,
5193 err);
5194 }
5195
5196 static struct emit_ops *
5197 linux_emit_ops (void)
5198 {
5199 if (the_low_target.emit_ops != NULL)
5200 return (*the_low_target.emit_ops) ();
5201 else
5202 return NULL;
5203 }
5204
5205 static int
5206 linux_get_min_fast_tracepoint_insn_len (void)
5207 {
5208 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5209 }
5210
5211 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5212
5213 static int
5214 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5215 CORE_ADDR *phdr_memaddr, int *num_phdr)
5216 {
5217 char filename[PATH_MAX];
5218 int fd;
5219 const int auxv_size = is_elf64
5220 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5221 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5222
5223 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5224
5225 fd = open (filename, O_RDONLY);
5226 if (fd < 0)
5227 return 1;
5228
5229 *phdr_memaddr = 0;
5230 *num_phdr = 0;
5231 while (read (fd, buf, auxv_size) == auxv_size
5232 && (*phdr_memaddr == 0 || *num_phdr == 0))
5233 {
5234 if (is_elf64)
5235 {
5236 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5237
5238 switch (aux->a_type)
5239 {
5240 case AT_PHDR:
5241 *phdr_memaddr = aux->a_un.a_val;
5242 break;
5243 case AT_PHNUM:
5244 *num_phdr = aux->a_un.a_val;
5245 break;
5246 }
5247 }
5248 else
5249 {
5250 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5251
5252 switch (aux->a_type)
5253 {
5254 case AT_PHDR:
5255 *phdr_memaddr = aux->a_un.a_val;
5256 break;
5257 case AT_PHNUM:
5258 *num_phdr = aux->a_un.a_val;
5259 break;
5260 }
5261 }
5262 }
5263
5264 close (fd);
5265
5266 if (*phdr_memaddr == 0 || *num_phdr == 0)
5267 {
5268 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5269 "phdr_memaddr = %ld, phdr_num = %d",
5270 (long) *phdr_memaddr, *num_phdr);
5271 return 2;
5272 }
5273
5274 return 0;
5275 }
5276
5277 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5278
5279 static CORE_ADDR
5280 get_dynamic (const int pid, const int is_elf64)
5281 {
5282 CORE_ADDR phdr_memaddr, relocation;
5283 int num_phdr, i;
5284 unsigned char *phdr_buf;
5285 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5286
5287 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5288 return 0;
5289
5290 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5291 phdr_buf = alloca (num_phdr * phdr_size);
5292
5293 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5294 return 0;
5295
5296 /* Compute relocation: it is expected to be 0 for "regular" executables,
5297 non-zero for PIE ones. */
5298 relocation = -1;
5299 for (i = 0; relocation == -1 && i < num_phdr; i++)
5300 if (is_elf64)
5301 {
5302 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5303
5304 if (p->p_type == PT_PHDR)
5305 relocation = phdr_memaddr - p->p_vaddr;
5306 }
5307 else
5308 {
5309 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5310
5311 if (p->p_type == PT_PHDR)
5312 relocation = phdr_memaddr - p->p_vaddr;
5313 }
5314
5315 if (relocation == -1)
5316 {
5317 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5318 any real world executables, including PIE executables, have always
5319 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5320 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5321 or present DT_DEBUG anyway (fpc binaries are statically linked).
5322
5323 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5324
5325 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5326
5327 return 0;
5328 }
5329
5330 for (i = 0; i < num_phdr; i++)
5331 {
5332 if (is_elf64)
5333 {
5334 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5335
5336 if (p->p_type == PT_DYNAMIC)
5337 return p->p_vaddr + relocation;
5338 }
5339 else
5340 {
5341 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5342
5343 if (p->p_type == PT_DYNAMIC)
5344 return p->p_vaddr + relocation;
5345 }
5346 }
5347
5348 return 0;
5349 }
5350
5351 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5352 can be 0 if the inferior does not yet have the library list initialized.
5353 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5354 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5355
5356 static CORE_ADDR
5357 get_r_debug (const int pid, const int is_elf64)
5358 {
5359 CORE_ADDR dynamic_memaddr;
5360 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5361 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5362 CORE_ADDR map = -1;
5363
5364 dynamic_memaddr = get_dynamic (pid, is_elf64);
5365 if (dynamic_memaddr == 0)
5366 return map;
5367
5368 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5369 {
5370 if (is_elf64)
5371 {
5372 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5373 #ifdef DT_MIPS_RLD_MAP
5374 union
5375 {
5376 Elf64_Xword map;
5377 unsigned char buf[sizeof (Elf64_Xword)];
5378 }
5379 rld_map;
5380
5381 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5382 {
5383 if (linux_read_memory (dyn->d_un.d_val,
5384 rld_map.buf, sizeof (rld_map.buf)) == 0)
5385 return rld_map.map;
5386 else
5387 break;
5388 }
5389 #endif /* DT_MIPS_RLD_MAP */
5390
5391 if (dyn->d_tag == DT_DEBUG && map == -1)
5392 map = dyn->d_un.d_val;
5393
5394 if (dyn->d_tag == DT_NULL)
5395 break;
5396 }
5397 else
5398 {
5399 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5400 #ifdef DT_MIPS_RLD_MAP
5401 union
5402 {
5403 Elf32_Word map;
5404 unsigned char buf[sizeof (Elf32_Word)];
5405 }
5406 rld_map;
5407
5408 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5409 {
5410 if (linux_read_memory (dyn->d_un.d_val,
5411 rld_map.buf, sizeof (rld_map.buf)) == 0)
5412 return rld_map.map;
5413 else
5414 break;
5415 }
5416 #endif /* DT_MIPS_RLD_MAP */
5417
5418 if (dyn->d_tag == DT_DEBUG && map == -1)
5419 map = dyn->d_un.d_val;
5420
5421 if (dyn->d_tag == DT_NULL)
5422 break;
5423 }
5424
5425 dynamic_memaddr += dyn_size;
5426 }
5427
5428 return map;
5429 }
5430
5431 /* Read one pointer from MEMADDR in the inferior. */
5432
5433 static int
5434 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5435 {
5436 int ret;
5437
5438 /* Go through a union so this works on either big or little endian
5439 hosts, when the inferior's pointer size is smaller than the size
5440 of CORE_ADDR. It is assumed the inferior's endianness is the
5441 same of the superior's. */
5442 union
5443 {
5444 CORE_ADDR core_addr;
5445 unsigned int ui;
5446 unsigned char uc;
5447 } addr;
5448
5449 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5450 if (ret == 0)
5451 {
5452 if (ptr_size == sizeof (CORE_ADDR))
5453 *ptr = addr.core_addr;
5454 else if (ptr_size == sizeof (unsigned int))
5455 *ptr = addr.ui;
5456 else
5457 gdb_assert_not_reached ("unhandled pointer size");
5458 }
5459 return ret;
5460 }
5461
5462 struct link_map_offsets
5463 {
5464 /* Offset and size of r_debug.r_version. */
5465 int r_version_offset;
5466
5467 /* Offset and size of r_debug.r_map. */
5468 int r_map_offset;
5469
5470 /* Offset to l_addr field in struct link_map. */
5471 int l_addr_offset;
5472
5473 /* Offset to l_name field in struct link_map. */
5474 int l_name_offset;
5475
5476 /* Offset to l_ld field in struct link_map. */
5477 int l_ld_offset;
5478
5479 /* Offset to l_next field in struct link_map. */
5480 int l_next_offset;
5481
5482 /* Offset to l_prev field in struct link_map. */
5483 int l_prev_offset;
5484 };
5485
5486 /* Construct qXfer:libraries-svr4:read reply. */
5487
5488 static int
5489 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5490 unsigned const char *writebuf,
5491 CORE_ADDR offset, int len)
5492 {
5493 char *document;
5494 unsigned document_len;
5495 struct process_info_private *const priv = current_process ()->private;
5496 char filename[PATH_MAX];
5497 int pid, is_elf64;
5498
5499 static const struct link_map_offsets lmo_32bit_offsets =
5500 {
5501 0, /* r_version offset. */
5502 4, /* r_debug.r_map offset. */
5503 0, /* l_addr offset in link_map. */
5504 4, /* l_name offset in link_map. */
5505 8, /* l_ld offset in link_map. */
5506 12, /* l_next offset in link_map. */
5507 16 /* l_prev offset in link_map. */
5508 };
5509
5510 static const struct link_map_offsets lmo_64bit_offsets =
5511 {
5512 0, /* r_version offset. */
5513 8, /* r_debug.r_map offset. */
5514 0, /* l_addr offset in link_map. */
5515 8, /* l_name offset in link_map. */
5516 16, /* l_ld offset in link_map. */
5517 24, /* l_next offset in link_map. */
5518 32 /* l_prev offset in link_map. */
5519 };
5520 const struct link_map_offsets *lmo;
5521 unsigned int machine;
5522 int ptr_size;
5523 CORE_ADDR lm_addr = 0, lm_prev = 0;
5524 int allocated = 1024;
5525 char *p;
5526 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5527 int header_done = 0;
5528
5529 if (writebuf != NULL)
5530 return -2;
5531 if (readbuf == NULL)
5532 return -1;
5533
5534 pid = lwpid_of (get_thread_lwp (current_inferior));
5535 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5536 is_elf64 = elf_64_file_p (filename, &machine);
5537 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5538 ptr_size = is_elf64 ? 8 : 4;
5539
5540 while (annex[0] != '\0')
5541 {
5542 const char *sep;
5543 CORE_ADDR *addrp;
5544 int len;
5545
5546 sep = strchr (annex, '=');
5547 if (sep == NULL)
5548 break;
5549
5550 len = sep - annex;
5551 if (len == 5 && strncmp (annex, "start", 5) == 0)
5552 addrp = &lm_addr;
5553 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5554 addrp = &lm_prev;
5555 else
5556 {
5557 annex = strchr (sep, ';');
5558 if (annex == NULL)
5559 break;
5560 annex++;
5561 continue;
5562 }
5563
5564 annex = decode_address_to_semicolon (addrp, sep + 1);
5565 }
5566
5567 if (lm_addr == 0)
5568 {
5569 int r_version = 0;
5570
5571 if (priv->r_debug == 0)
5572 priv->r_debug = get_r_debug (pid, is_elf64);
5573
5574 /* We failed to find DT_DEBUG. Such situation will not change
5575 for this inferior - do not retry it. Report it to GDB as
5576 E01, see for the reasons at the GDB solib-svr4.c side. */
5577 if (priv->r_debug == (CORE_ADDR) -1)
5578 return -1;
5579
5580 if (priv->r_debug != 0)
5581 {
5582 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5583 (unsigned char *) &r_version,
5584 sizeof (r_version)) != 0
5585 || r_version != 1)
5586 {
5587 warning ("unexpected r_debug version %d", r_version);
5588 }
5589 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5590 &lm_addr, ptr_size) != 0)
5591 {
5592 warning ("unable to read r_map from 0x%lx",
5593 (long) priv->r_debug + lmo->r_map_offset);
5594 }
5595 }
5596 }
5597
5598 document = xmalloc (allocated);
5599 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5600 p = document + strlen (document);
5601
5602 while (lm_addr
5603 && read_one_ptr (lm_addr + lmo->l_name_offset,
5604 &l_name, ptr_size) == 0
5605 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5606 &l_addr, ptr_size) == 0
5607 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5608 &l_ld, ptr_size) == 0
5609 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5610 &l_prev, ptr_size) == 0
5611 && read_one_ptr (lm_addr + lmo->l_next_offset,
5612 &l_next, ptr_size) == 0)
5613 {
5614 unsigned char libname[PATH_MAX];
5615
5616 if (lm_prev != l_prev)
5617 {
5618 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5619 (long) lm_prev, (long) l_prev);
5620 break;
5621 }
5622
5623 /* Ignore the first entry even if it has valid name as the first entry
5624 corresponds to the main executable. The first entry should not be
5625 skipped if the dynamic loader was loaded late by a static executable
5626 (see solib-svr4.c parameter ignore_first). But in such case the main
5627 executable does not have PT_DYNAMIC present and this function already
5628 exited above due to failed get_r_debug. */
5629 if (lm_prev == 0)
5630 {
5631 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5632 p = p + strlen (p);
5633 }
5634 else
5635 {
5636 /* Not checking for error because reading may stop before
5637 we've got PATH_MAX worth of characters. */
5638 libname[0] = '\0';
5639 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5640 libname[sizeof (libname) - 1] = '\0';
5641 if (libname[0] != '\0')
5642 {
5643 /* 6x the size for xml_escape_text below. */
5644 size_t len = 6 * strlen ((char *) libname);
5645 char *name;
5646
5647 if (!header_done)
5648 {
5649 /* Terminate `<library-list-svr4'. */
5650 *p++ = '>';
5651 header_done = 1;
5652 }
5653
5654 while (allocated < p - document + len + 200)
5655 {
5656 /* Expand to guarantee sufficient storage. */
5657 uintptr_t document_len = p - document;
5658
5659 document = xrealloc (document, 2 * allocated);
5660 allocated *= 2;
5661 p = document + document_len;
5662 }
5663
5664 name = xml_escape_text ((char *) libname);
5665 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5666 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5667 name, (unsigned long) lm_addr,
5668 (unsigned long) l_addr, (unsigned long) l_ld);
5669 free (name);
5670 }
5671 }
5672
5673 lm_prev = lm_addr;
5674 lm_addr = l_next;
5675 }
5676
5677 if (!header_done)
5678 {
5679 /* Empty list; terminate `<library-list-svr4'. */
5680 strcpy (p, "/>");
5681 }
5682 else
5683 strcpy (p, "</library-list-svr4>");
5684
5685 document_len = strlen (document);
5686 if (offset < document_len)
5687 document_len -= offset;
5688 else
5689 document_len = 0;
5690 if (len > document_len)
5691 len = document_len;
5692
5693 memcpy (readbuf, document + offset, len);
5694 xfree (document);
5695
5696 return len;
5697 }
5698
5699 #ifdef HAVE_LINUX_BTRACE
5700
5701 /* Enable branch tracing. */
5702
5703 static struct btrace_target_info *
5704 linux_low_enable_btrace (ptid_t ptid)
5705 {
5706 struct btrace_target_info *tinfo;
5707
5708 tinfo = linux_enable_btrace (ptid);
5709
5710 if (tinfo != NULL)
5711 {
5712 struct thread_info *thread = find_thread_ptid (ptid);
5713 struct regcache *regcache = get_thread_regcache (thread, 0);
5714
5715 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5716 }
5717
5718 return tinfo;
5719 }
5720
5721 /* Read branch trace data as btrace xml document. */
5722
5723 static void
5724 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5725 int type)
5726 {
5727 VEC (btrace_block_s) *btrace;
5728 struct btrace_block *block;
5729 int i;
5730
5731 btrace = linux_read_btrace (tinfo, type);
5732
5733 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5734 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
5735
5736 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
5737 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5738 paddress (block->begin), paddress (block->end));
5739
5740 buffer_grow_str (buffer, "</btrace>\n");
5741
5742 VEC_free (btrace_block_s, btrace);
5743 }
5744 #endif /* HAVE_LINUX_BTRACE */
5745
5746 static struct target_ops linux_target_ops = {
5747 linux_create_inferior,
5748 linux_attach,
5749 linux_kill,
5750 linux_detach,
5751 linux_mourn,
5752 linux_join,
5753 linux_thread_alive,
5754 linux_resume,
5755 linux_wait,
5756 linux_fetch_registers,
5757 linux_store_registers,
5758 linux_prepare_to_access_memory,
5759 linux_done_accessing_memory,
5760 linux_read_memory,
5761 linux_write_memory,
5762 linux_look_up_symbols,
5763 linux_request_interrupt,
5764 linux_read_auxv,
5765 linux_insert_point,
5766 linux_remove_point,
5767 linux_stopped_by_watchpoint,
5768 linux_stopped_data_address,
5769 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5770 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5771 && defined(PT_TEXT_END_ADDR)
5772 linux_read_offsets,
5773 #else
5774 NULL,
5775 #endif
5776 #ifdef USE_THREAD_DB
5777 thread_db_get_tls_address,
5778 #else
5779 NULL,
5780 #endif
5781 linux_qxfer_spu,
5782 hostio_last_error_from_errno,
5783 linux_qxfer_osdata,
5784 linux_xfer_siginfo,
5785 linux_supports_non_stop,
5786 linux_async,
5787 linux_start_non_stop,
5788 linux_supports_multi_process,
5789 #ifdef USE_THREAD_DB
5790 thread_db_handle_monitor_command,
5791 #else
5792 NULL,
5793 #endif
5794 linux_common_core_of_thread,
5795 linux_read_loadmap,
5796 linux_process_qsupported,
5797 linux_supports_tracepoints,
5798 linux_read_pc,
5799 linux_write_pc,
5800 linux_thread_stopped,
5801 NULL,
5802 linux_pause_all,
5803 linux_unpause_all,
5804 linux_cancel_breakpoints,
5805 linux_stabilize_threads,
5806 linux_install_fast_tracepoint_jump_pad,
5807 linux_emit_ops,
5808 linux_supports_disable_randomization,
5809 linux_get_min_fast_tracepoint_insn_len,
5810 linux_qxfer_libraries_svr4,
5811 linux_supports_agent,
5812 #ifdef HAVE_LINUX_BTRACE
5813 linux_supports_btrace,
5814 linux_low_enable_btrace,
5815 linux_disable_btrace,
5816 linux_low_read_btrace,
5817 #else
5818 NULL,
5819 NULL,
5820 NULL,
5821 NULL,
5822 #endif
5823 linux_supports_range_stepping,
5824 };
5825
5826 static void
5827 linux_init_signals ()
5828 {
5829 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5830 to find what the cancel signal actually is. */
5831 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5832 signal (__SIGRTMIN+1, SIG_IGN);
5833 #endif
5834 }
5835
5836 #ifdef HAVE_LINUX_REGSETS
5837 void
5838 initialize_regsets_info (struct regsets_info *info)
5839 {
5840 for (info->num_regsets = 0;
5841 info->regsets[info->num_regsets].size >= 0;
5842 info->num_regsets++)
5843 ;
5844 }
5845 #endif
5846
5847 void
5848 initialize_low (void)
5849 {
5850 struct sigaction sigchld_action;
5851 memset (&sigchld_action, 0, sizeof (sigchld_action));
5852 set_target_ops (&linux_target_ops);
5853 set_breakpoint_data (the_low_target.breakpoint,
5854 the_low_target.breakpoint_len);
5855 linux_init_signals ();
5856 linux_ptrace_init_warnings ();
5857
5858 sigchld_action.sa_handler = sigchld_handler;
5859 sigemptyset (&sigchld_action.sa_mask);
5860 sigchld_action.sa_flags = SA_RESTART;
5861 sigaction (SIGCHLD, &sigchld_action, NULL);
5862
5863 initialize_low_arch ();
5864 }
This page took 0.182024 seconds and 4 git commands to generate.