gdbserver: Rephrase loops in regsets_fetch/store_inferior_registers
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2014 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <sys/ptrace.h>
28 #include "nat/linux-ptrace.h"
29 #include "nat/linux-procfs.h"
30 #include <signal.h>
31 #include <sys/ioctl.h>
32 #include <fcntl.h>
33 #include <unistd.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40 #include <sys/stat.h>
41 #include <sys/vfs.h>
42 #include <sys/uio.h>
43 #include "filestuff.h"
44 #include "tracepoint.h"
45 #include "hostio.h"
46 #ifndef ELFMAG0
47 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51 #include <elf.h>
52 #endif
53
54 #ifndef SPUFS_MAGIC
55 #define SPUFS_MAGIC 0x23c9b64e
56 #endif
57
58 #ifdef HAVE_PERSONALITY
59 # include <sys/personality.h>
60 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
61 # define ADDR_NO_RANDOMIZE 0x0040000
62 # endif
63 #endif
64
65 #ifndef O_LARGEFILE
66 #define O_LARGEFILE 0
67 #endif
68
69 #ifndef W_STOPCODE
70 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
71 #endif
72
73 /* This is the kernel's hard limit. Not to be confused with
74 SIGRTMIN. */
75 #ifndef __SIGRTMIN
76 #define __SIGRTMIN 32
77 #endif
78
79 /* Some targets did not define these ptrace constants from the start,
80 so gdbserver defines them locally here. In the future, these may
81 be removed after they are added to asm/ptrace.h. */
82 #if !(defined(PT_TEXT_ADDR) \
83 || defined(PT_DATA_ADDR) \
84 || defined(PT_TEXT_END_ADDR))
85 #if defined(__mcoldfire__)
86 /* These are still undefined in 3.10 kernels. */
87 #define PT_TEXT_ADDR 49*4
88 #define PT_DATA_ADDR 50*4
89 #define PT_TEXT_END_ADDR 51*4
90 /* BFIN already defines these since at least 2.6.32 kernels. */
91 #elif defined(BFIN)
92 #define PT_TEXT_ADDR 220
93 #define PT_TEXT_END_ADDR 224
94 #define PT_DATA_ADDR 228
95 /* These are still undefined in 3.10 kernels. */
96 #elif defined(__TMS320C6X__)
97 #define PT_TEXT_ADDR (0x10000*4)
98 #define PT_DATA_ADDR (0x10004*4)
99 #define PT_TEXT_END_ADDR (0x10008*4)
100 #endif
101 #endif
102
103 #ifdef HAVE_LINUX_BTRACE
104 # include "nat/linux-btrace.h"
105 #endif
106
107 #ifndef HAVE_ELF32_AUXV_T
108 /* Copied from glibc's elf.h. */
109 typedef struct
110 {
111 uint32_t a_type; /* Entry type */
112 union
113 {
114 uint32_t a_val; /* Integer value */
115 /* We use to have pointer elements added here. We cannot do that,
116 though, since it does not work when using 32-bit definitions
117 on 64-bit platforms and vice versa. */
118 } a_un;
119 } Elf32_auxv_t;
120 #endif
121
122 #ifndef HAVE_ELF64_AUXV_T
123 /* Copied from glibc's elf.h. */
124 typedef struct
125 {
126 uint64_t a_type; /* Entry type */
127 union
128 {
129 uint64_t a_val; /* Integer value */
130 /* We use to have pointer elements added here. We cannot do that,
131 though, since it does not work when using 32-bit definitions
132 on 64-bit platforms and vice versa. */
133 } a_un;
134 } Elf64_auxv_t;
135 #endif
136
137 /* A list of all unknown processes which receive stop signals. Some
138 other process will presumably claim each of these as forked
139 children momentarily. */
140
141 struct simple_pid_list
142 {
143 /* The process ID. */
144 int pid;
145
146 /* The status as reported by waitpid. */
147 int status;
148
149 /* Next in chain. */
150 struct simple_pid_list *next;
151 };
152 struct simple_pid_list *stopped_pids;
153
154 /* Trivial list manipulation functions to keep track of a list of new
155 stopped processes. */
156
157 static void
158 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
159 {
160 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
161
162 new_pid->pid = pid;
163 new_pid->status = status;
164 new_pid->next = *listp;
165 *listp = new_pid;
166 }
167
168 static int
169 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
170 {
171 struct simple_pid_list **p;
172
173 for (p = listp; *p != NULL; p = &(*p)->next)
174 if ((*p)->pid == pid)
175 {
176 struct simple_pid_list *next = (*p)->next;
177
178 *statusp = (*p)->status;
179 xfree (*p);
180 *p = next;
181 return 1;
182 }
183 return 0;
184 }
185
186 enum stopping_threads_kind
187 {
188 /* Not stopping threads presently. */
189 NOT_STOPPING_THREADS,
190
191 /* Stopping threads. */
192 STOPPING_THREADS,
193
194 /* Stopping and suspending threads. */
195 STOPPING_AND_SUSPENDING_THREADS
196 };
197
198 /* This is set while stop_all_lwps is in effect. */
199 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
200
201 /* FIXME make into a target method? */
202 int using_threads = 1;
203
204 /* True if we're presently stabilizing threads (moving them out of
205 jump pads). */
206 static int stabilizing_threads;
207
208 static void linux_resume_one_lwp (struct lwp_info *lwp,
209 int step, int signal, siginfo_t *info);
210 static void linux_resume (struct thread_resume *resume_info, size_t n);
211 static void stop_all_lwps (int suspend, struct lwp_info *except);
212 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
213 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
214 int *wstat, int options);
215 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
216 static struct lwp_info *add_lwp (ptid_t ptid);
217 static int linux_stopped_by_watchpoint (void);
218 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
219 static void proceed_all_lwps (void);
220 static int finish_step_over (struct lwp_info *lwp);
221 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
222 static int kill_lwp (unsigned long lwpid, int signo);
223
224 /* True if the low target can hardware single-step. Such targets
225 don't need a BREAKPOINT_REINSERT_ADDR callback. */
226
227 static int
228 can_hardware_single_step (void)
229 {
230 return (the_low_target.breakpoint_reinsert_addr == NULL);
231 }
232
233 /* True if the low target supports memory breakpoints. If so, we'll
234 have a GET_PC implementation. */
235
236 static int
237 supports_breakpoints (void)
238 {
239 return (the_low_target.get_pc != NULL);
240 }
241
242 /* Returns true if this target can support fast tracepoints. This
243 does not mean that the in-process agent has been loaded in the
244 inferior. */
245
246 static int
247 supports_fast_tracepoints (void)
248 {
249 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
250 }
251
252 /* True if LWP is stopped in its stepping range. */
253
254 static int
255 lwp_in_step_range (struct lwp_info *lwp)
256 {
257 CORE_ADDR pc = lwp->stop_pc;
258
259 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
260 }
261
262 struct pending_signals
263 {
264 int signal;
265 siginfo_t info;
266 struct pending_signals *prev;
267 };
268
269 /* The read/write ends of the pipe registered as waitable file in the
270 event loop. */
271 static int linux_event_pipe[2] = { -1, -1 };
272
273 /* True if we're currently in async mode. */
274 #define target_is_async_p() (linux_event_pipe[0] != -1)
275
276 static void send_sigstop (struct lwp_info *lwp);
277 static void wait_for_sigstop (void);
278
279 /* Return non-zero if HEADER is a 64-bit ELF file. */
280
281 static int
282 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
283 {
284 if (header->e_ident[EI_MAG0] == ELFMAG0
285 && header->e_ident[EI_MAG1] == ELFMAG1
286 && header->e_ident[EI_MAG2] == ELFMAG2
287 && header->e_ident[EI_MAG3] == ELFMAG3)
288 {
289 *machine = header->e_machine;
290 return header->e_ident[EI_CLASS] == ELFCLASS64;
291
292 }
293 *machine = EM_NONE;
294 return -1;
295 }
296
297 /* Return non-zero if FILE is a 64-bit ELF file,
298 zero if the file is not a 64-bit ELF file,
299 and -1 if the file is not accessible or doesn't exist. */
300
301 static int
302 elf_64_file_p (const char *file, unsigned int *machine)
303 {
304 Elf64_Ehdr header;
305 int fd;
306
307 fd = open (file, O_RDONLY);
308 if (fd < 0)
309 return -1;
310
311 if (read (fd, &header, sizeof (header)) != sizeof (header))
312 {
313 close (fd);
314 return 0;
315 }
316 close (fd);
317
318 return elf_64_header_p (&header, machine);
319 }
320
321 /* Accepts an integer PID; Returns true if the executable PID is
322 running is a 64-bit ELF file.. */
323
324 int
325 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
326 {
327 char file[PATH_MAX];
328
329 sprintf (file, "/proc/%d/exe", pid);
330 return elf_64_file_p (file, machine);
331 }
332
333 static void
334 delete_lwp (struct lwp_info *lwp)
335 {
336 struct thread_info *thr = get_lwp_thread (lwp);
337
338 if (debug_threads)
339 debug_printf ("deleting %ld\n", lwpid_of (thr));
340
341 remove_thread (thr);
342 free (lwp->arch_private);
343 free (lwp);
344 }
345
346 /* Add a process to the common process list, and set its private
347 data. */
348
349 static struct process_info *
350 linux_add_process (int pid, int attached)
351 {
352 struct process_info *proc;
353
354 proc = add_process (pid, attached);
355 proc->private = xcalloc (1, sizeof (*proc->private));
356
357 /* Set the arch when the first LWP stops. */
358 proc->private->new_inferior = 1;
359
360 if (the_low_target.new_process != NULL)
361 proc->private->arch_private = the_low_target.new_process ();
362
363 return proc;
364 }
365
366 /* Handle a GNU/Linux extended wait response. If we see a clone
367 event, we need to add the new LWP to our list (and not report the
368 trap to higher layers). */
369
370 static void
371 handle_extended_wait (struct lwp_info *event_child, int wstat)
372 {
373 int event = linux_ptrace_get_extended_event (wstat);
374 struct thread_info *event_thr = get_lwp_thread (event_child);
375 struct lwp_info *new_lwp;
376
377 if (event == PTRACE_EVENT_CLONE)
378 {
379 ptid_t ptid;
380 unsigned long new_pid;
381 int ret, status;
382
383 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
384 &new_pid);
385
386 /* If we haven't already seen the new PID stop, wait for it now. */
387 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
388 {
389 /* The new child has a pending SIGSTOP. We can't affect it until it
390 hits the SIGSTOP, but we're already attached. */
391
392 ret = my_waitpid (new_pid, &status, __WALL);
393
394 if (ret == -1)
395 perror_with_name ("waiting for new child");
396 else if (ret != new_pid)
397 warning ("wait returned unexpected PID %d", ret);
398 else if (!WIFSTOPPED (status))
399 warning ("wait returned unexpected status 0x%x", status);
400 }
401
402 if (debug_threads)
403 debug_printf ("HEW: Got clone event "
404 "from LWP %ld, new child is LWP %ld\n",
405 lwpid_of (event_thr), new_pid);
406
407 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
408 new_lwp = add_lwp (ptid);
409
410 /* Either we're going to immediately resume the new thread
411 or leave it stopped. linux_resume_one_lwp is a nop if it
412 thinks the thread is currently running, so set this first
413 before calling linux_resume_one_lwp. */
414 new_lwp->stopped = 1;
415
416 /* If we're suspending all threads, leave this one suspended
417 too. */
418 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
419 new_lwp->suspended = 1;
420
421 /* Normally we will get the pending SIGSTOP. But in some cases
422 we might get another signal delivered to the group first.
423 If we do get another signal, be sure not to lose it. */
424 if (WSTOPSIG (status) == SIGSTOP)
425 {
426 if (stopping_threads != NOT_STOPPING_THREADS)
427 new_lwp->stop_pc = get_stop_pc (new_lwp);
428 else
429 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
430 }
431 else
432 {
433 new_lwp->stop_expected = 1;
434
435 if (stopping_threads != NOT_STOPPING_THREADS)
436 {
437 new_lwp->stop_pc = get_stop_pc (new_lwp);
438 new_lwp->status_pending_p = 1;
439 new_lwp->status_pending = status;
440 }
441 else
442 /* Pass the signal on. This is what GDB does - except
443 shouldn't we really report it instead? */
444 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
445 }
446
447 /* Always resume the current thread. If we are stopping
448 threads, it will have a pending SIGSTOP; we may as well
449 collect it now. */
450 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
451 }
452 }
453
454 /* Return the PC as read from the regcache of LWP, without any
455 adjustment. */
456
457 static CORE_ADDR
458 get_pc (struct lwp_info *lwp)
459 {
460 struct thread_info *saved_thread;
461 struct regcache *regcache;
462 CORE_ADDR pc;
463
464 if (the_low_target.get_pc == NULL)
465 return 0;
466
467 saved_thread = current_thread;
468 current_thread = get_lwp_thread (lwp);
469
470 regcache = get_thread_regcache (current_thread, 1);
471 pc = (*the_low_target.get_pc) (regcache);
472
473 if (debug_threads)
474 debug_printf ("pc is 0x%lx\n", (long) pc);
475
476 current_thread = saved_thread;
477 return pc;
478 }
479
480 /* This function should only be called if LWP got a SIGTRAP.
481 The SIGTRAP could mean several things.
482
483 On i386, where decr_pc_after_break is non-zero:
484 If we were single-stepping this process using PTRACE_SINGLESTEP,
485 we will get only the one SIGTRAP (even if the instruction we
486 stepped over was a breakpoint). The value of $eip will be the
487 next instruction.
488 If we continue the process using PTRACE_CONT, we will get a
489 SIGTRAP when we hit a breakpoint. The value of $eip will be
490 the instruction after the breakpoint (i.e. needs to be
491 decremented). If we report the SIGTRAP to GDB, we must also
492 report the undecremented PC. If we cancel the SIGTRAP, we
493 must resume at the decremented PC.
494
495 (Presumably, not yet tested) On a non-decr_pc_after_break machine
496 with hardware or kernel single-step:
497 If we single-step over a breakpoint instruction, our PC will
498 point at the following instruction. If we continue and hit a
499 breakpoint instruction, our PC will point at the breakpoint
500 instruction. */
501
502 static CORE_ADDR
503 get_stop_pc (struct lwp_info *lwp)
504 {
505 CORE_ADDR stop_pc;
506
507 if (the_low_target.get_pc == NULL)
508 return 0;
509
510 stop_pc = get_pc (lwp);
511
512 if (WSTOPSIG (lwp->last_status) == SIGTRAP
513 && !lwp->stepping
514 && !lwp->stopped_by_watchpoint
515 && !linux_is_extended_waitstatus (lwp->last_status))
516 stop_pc -= the_low_target.decr_pc_after_break;
517
518 if (debug_threads)
519 debug_printf ("stop pc is 0x%lx\n", (long) stop_pc);
520
521 return stop_pc;
522 }
523
524 static struct lwp_info *
525 add_lwp (ptid_t ptid)
526 {
527 struct lwp_info *lwp;
528
529 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
530 memset (lwp, 0, sizeof (*lwp));
531
532 if (the_low_target.new_thread != NULL)
533 lwp->arch_private = the_low_target.new_thread ();
534
535 lwp->thread = add_thread (ptid, lwp);
536
537 return lwp;
538 }
539
540 /* Start an inferior process and returns its pid.
541 ALLARGS is a vector of program-name and args. */
542
543 static int
544 linux_create_inferior (char *program, char **allargs)
545 {
546 #ifdef HAVE_PERSONALITY
547 int personality_orig = 0, personality_set = 0;
548 #endif
549 struct lwp_info *new_lwp;
550 int pid;
551 ptid_t ptid;
552
553 #ifdef HAVE_PERSONALITY
554 if (disable_randomization)
555 {
556 errno = 0;
557 personality_orig = personality (0xffffffff);
558 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
559 {
560 personality_set = 1;
561 personality (personality_orig | ADDR_NO_RANDOMIZE);
562 }
563 if (errno != 0 || (personality_set
564 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
565 warning ("Error disabling address space randomization: %s",
566 strerror (errno));
567 }
568 #endif
569
570 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
571 pid = vfork ();
572 #else
573 pid = fork ();
574 #endif
575 if (pid < 0)
576 perror_with_name ("fork");
577
578 if (pid == 0)
579 {
580 close_most_fds ();
581 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
582
583 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
584 signal (__SIGRTMIN + 1, SIG_DFL);
585 #endif
586
587 setpgid (0, 0);
588
589 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
590 stdout to stderr so that inferior i/o doesn't corrupt the connection.
591 Also, redirect stdin to /dev/null. */
592 if (remote_connection_is_stdio ())
593 {
594 close (0);
595 open ("/dev/null", O_RDONLY);
596 dup2 (2, 1);
597 if (write (2, "stdin/stdout redirected\n",
598 sizeof ("stdin/stdout redirected\n") - 1) < 0)
599 {
600 /* Errors ignored. */;
601 }
602 }
603
604 execv (program, allargs);
605 if (errno == ENOENT)
606 execvp (program, allargs);
607
608 fprintf (stderr, "Cannot exec %s: %s.\n", program,
609 strerror (errno));
610 fflush (stderr);
611 _exit (0177);
612 }
613
614 #ifdef HAVE_PERSONALITY
615 if (personality_set)
616 {
617 errno = 0;
618 personality (personality_orig);
619 if (errno != 0)
620 warning ("Error restoring address space randomization: %s",
621 strerror (errno));
622 }
623 #endif
624
625 linux_add_process (pid, 0);
626
627 ptid = ptid_build (pid, pid, 0);
628 new_lwp = add_lwp (ptid);
629 new_lwp->must_set_ptrace_flags = 1;
630
631 return pid;
632 }
633
634 char *
635 linux_attach_fail_reason_string (ptid_t ptid, int err)
636 {
637 static char *reason_string;
638 struct buffer buffer;
639 char *warnings;
640 long lwpid = ptid_get_lwp (ptid);
641
642 xfree (reason_string);
643
644 buffer_init (&buffer);
645 linux_ptrace_attach_fail_reason (lwpid, &buffer);
646 buffer_grow_str0 (&buffer, "");
647 warnings = buffer_finish (&buffer);
648 if (warnings[0] != '\0')
649 reason_string = xstrprintf ("%s (%d), %s",
650 strerror (err), err, warnings);
651 else
652 reason_string = xstrprintf ("%s (%d)",
653 strerror (err), err);
654 xfree (warnings);
655 return reason_string;
656 }
657
658 /* Attach to an inferior process. */
659
660 int
661 linux_attach_lwp (ptid_t ptid)
662 {
663 struct lwp_info *new_lwp;
664 int lwpid = ptid_get_lwp (ptid);
665
666 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
667 != 0)
668 return errno;
669
670 new_lwp = add_lwp (ptid);
671
672 /* We need to wait for SIGSTOP before being able to make the next
673 ptrace call on this LWP. */
674 new_lwp->must_set_ptrace_flags = 1;
675
676 if (linux_proc_pid_is_stopped (lwpid))
677 {
678 if (debug_threads)
679 debug_printf ("Attached to a stopped process\n");
680
681 /* The process is definitely stopped. It is in a job control
682 stop, unless the kernel predates the TASK_STOPPED /
683 TASK_TRACED distinction, in which case it might be in a
684 ptrace stop. Make sure it is in a ptrace stop; from there we
685 can kill it, signal it, et cetera.
686
687 First make sure there is a pending SIGSTOP. Since we are
688 already attached, the process can not transition from stopped
689 to running without a PTRACE_CONT; so we know this signal will
690 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
691 probably already in the queue (unless this kernel is old
692 enough to use TASK_STOPPED for ptrace stops); but since
693 SIGSTOP is not an RT signal, it can only be queued once. */
694 kill_lwp (lwpid, SIGSTOP);
695
696 /* Finally, resume the stopped process. This will deliver the
697 SIGSTOP (or a higher priority signal, just like normal
698 PTRACE_ATTACH), which we'll catch later on. */
699 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
700 }
701
702 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
703 brings it to a halt.
704
705 There are several cases to consider here:
706
707 1) gdbserver has already attached to the process and is being notified
708 of a new thread that is being created.
709 In this case we should ignore that SIGSTOP and resume the
710 process. This is handled below by setting stop_expected = 1,
711 and the fact that add_thread sets last_resume_kind ==
712 resume_continue.
713
714 2) This is the first thread (the process thread), and we're attaching
715 to it via attach_inferior.
716 In this case we want the process thread to stop.
717 This is handled by having linux_attach set last_resume_kind ==
718 resume_stop after we return.
719
720 If the pid we are attaching to is also the tgid, we attach to and
721 stop all the existing threads. Otherwise, we attach to pid and
722 ignore any other threads in the same group as this pid.
723
724 3) GDB is connecting to gdbserver and is requesting an enumeration of all
725 existing threads.
726 In this case we want the thread to stop.
727 FIXME: This case is currently not properly handled.
728 We should wait for the SIGSTOP but don't. Things work apparently
729 because enough time passes between when we ptrace (ATTACH) and when
730 gdb makes the next ptrace call on the thread.
731
732 On the other hand, if we are currently trying to stop all threads, we
733 should treat the new thread as if we had sent it a SIGSTOP. This works
734 because we are guaranteed that the add_lwp call above added us to the
735 end of the list, and so the new thread has not yet reached
736 wait_for_sigstop (but will). */
737 new_lwp->stop_expected = 1;
738
739 return 0;
740 }
741
742 /* Attach to PID. If PID is the tgid, attach to it and all
743 of its threads. */
744
745 static int
746 linux_attach (unsigned long pid)
747 {
748 ptid_t ptid = ptid_build (pid, pid, 0);
749 int err;
750
751 /* Attach to PID. We will check for other threads
752 soon. */
753 err = linux_attach_lwp (ptid);
754 if (err != 0)
755 error ("Cannot attach to process %ld: %s",
756 pid, linux_attach_fail_reason_string (ptid, err));
757
758 linux_add_process (pid, 1);
759
760 if (!non_stop)
761 {
762 struct thread_info *thread;
763
764 /* Don't ignore the initial SIGSTOP if we just attached to this
765 process. It will be collected by wait shortly. */
766 thread = find_thread_ptid (ptid_build (pid, pid, 0));
767 thread->last_resume_kind = resume_stop;
768 }
769
770 if (linux_proc_get_tgid (pid) == pid)
771 {
772 DIR *dir;
773 char pathname[128];
774
775 sprintf (pathname, "/proc/%ld/task", pid);
776
777 dir = opendir (pathname);
778
779 if (!dir)
780 {
781 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
782 fflush (stderr);
783 }
784 else
785 {
786 /* At this point we attached to the tgid. Scan the task for
787 existing threads. */
788 int new_threads_found;
789 int iterations = 0;
790
791 while (iterations < 2)
792 {
793 struct dirent *dp;
794
795 new_threads_found = 0;
796 /* Add all the other threads. While we go through the
797 threads, new threads may be spawned. Cycle through
798 the list of threads until we have done two iterations without
799 finding new threads. */
800 while ((dp = readdir (dir)) != NULL)
801 {
802 unsigned long lwp;
803 ptid_t ptid;
804
805 /* Fetch one lwp. */
806 lwp = strtoul (dp->d_name, NULL, 10);
807
808 ptid = ptid_build (pid, lwp, 0);
809
810 /* Is this a new thread? */
811 if (lwp != 0 && find_thread_ptid (ptid) == NULL)
812 {
813 int err;
814
815 if (debug_threads)
816 debug_printf ("Found new lwp %ld\n", lwp);
817
818 err = linux_attach_lwp (ptid);
819 if (err != 0)
820 warning ("Cannot attach to lwp %ld: %s",
821 lwp,
822 linux_attach_fail_reason_string (ptid, err));
823
824 new_threads_found++;
825 }
826 }
827
828 if (!new_threads_found)
829 iterations++;
830 else
831 iterations = 0;
832
833 rewinddir (dir);
834 }
835 closedir (dir);
836 }
837 }
838
839 return 0;
840 }
841
842 struct counter
843 {
844 int pid;
845 int count;
846 };
847
848 static int
849 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
850 {
851 struct counter *counter = args;
852
853 if (ptid_get_pid (entry->id) == counter->pid)
854 {
855 if (++counter->count > 1)
856 return 1;
857 }
858
859 return 0;
860 }
861
862 static int
863 last_thread_of_process_p (int pid)
864 {
865 struct counter counter = { pid , 0 };
866
867 return (find_inferior (&all_threads,
868 second_thread_of_pid_p, &counter) == NULL);
869 }
870
871 /* Kill LWP. */
872
873 static void
874 linux_kill_one_lwp (struct lwp_info *lwp)
875 {
876 struct thread_info *thr = get_lwp_thread (lwp);
877 int pid = lwpid_of (thr);
878
879 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
880 there is no signal context, and ptrace(PTRACE_KILL) (or
881 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
882 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
883 alternative is to kill with SIGKILL. We only need one SIGKILL
884 per process, not one for each thread. But since we still support
885 linuxthreads, and we also support debugging programs using raw
886 clone without CLONE_THREAD, we send one for each thread. For
887 years, we used PTRACE_KILL only, so we're being a bit paranoid
888 about some old kernels where PTRACE_KILL might work better
889 (dubious if there are any such, but that's why it's paranoia), so
890 we try SIGKILL first, PTRACE_KILL second, and so we're fine
891 everywhere. */
892
893 errno = 0;
894 kill_lwp (pid, SIGKILL);
895 if (debug_threads)
896 {
897 int save_errno = errno;
898
899 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
900 target_pid_to_str (ptid_of (thr)),
901 save_errno ? strerror (save_errno) : "OK");
902 }
903
904 errno = 0;
905 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
906 if (debug_threads)
907 {
908 int save_errno = errno;
909
910 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
911 target_pid_to_str (ptid_of (thr)),
912 save_errno ? strerror (save_errno) : "OK");
913 }
914 }
915
916 /* Kill LWP and wait for it to die. */
917
918 static void
919 kill_wait_lwp (struct lwp_info *lwp)
920 {
921 struct thread_info *thr = get_lwp_thread (lwp);
922 int pid = ptid_get_pid (ptid_of (thr));
923 int lwpid = ptid_get_lwp (ptid_of (thr));
924 int wstat;
925 int res;
926
927 if (debug_threads)
928 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
929
930 do
931 {
932 linux_kill_one_lwp (lwp);
933
934 /* Make sure it died. Notes:
935
936 - The loop is most likely unnecessary.
937
938 - We don't use linux_wait_for_event as that could delete lwps
939 while we're iterating over them. We're not interested in
940 any pending status at this point, only in making sure all
941 wait status on the kernel side are collected until the
942 process is reaped.
943
944 - We don't use __WALL here as the __WALL emulation relies on
945 SIGCHLD, and killing a stopped process doesn't generate
946 one, nor an exit status.
947 */
948 res = my_waitpid (lwpid, &wstat, 0);
949 if (res == -1 && errno == ECHILD)
950 res = my_waitpid (lwpid, &wstat, __WCLONE);
951 } while (res > 0 && WIFSTOPPED (wstat));
952
953 gdb_assert (res > 0);
954 }
955
956 /* Callback for `find_inferior'. Kills an lwp of a given process,
957 except the leader. */
958
959 static int
960 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
961 {
962 struct thread_info *thread = (struct thread_info *) entry;
963 struct lwp_info *lwp = get_thread_lwp (thread);
964 int pid = * (int *) args;
965
966 if (ptid_get_pid (entry->id) != pid)
967 return 0;
968
969 /* We avoid killing the first thread here, because of a Linux kernel (at
970 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
971 the children get a chance to be reaped, it will remain a zombie
972 forever. */
973
974 if (lwpid_of (thread) == pid)
975 {
976 if (debug_threads)
977 debug_printf ("lkop: is last of process %s\n",
978 target_pid_to_str (entry->id));
979 return 0;
980 }
981
982 kill_wait_lwp (lwp);
983 return 0;
984 }
985
986 static int
987 linux_kill (int pid)
988 {
989 struct process_info *process;
990 struct lwp_info *lwp;
991
992 process = find_process_pid (pid);
993 if (process == NULL)
994 return -1;
995
996 /* If we're killing a running inferior, make sure it is stopped
997 first, as PTRACE_KILL will not work otherwise. */
998 stop_all_lwps (0, NULL);
999
1000 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1001
1002 /* See the comment in linux_kill_one_lwp. We did not kill the first
1003 thread in the list, so do so now. */
1004 lwp = find_lwp_pid (pid_to_ptid (pid));
1005
1006 if (lwp == NULL)
1007 {
1008 if (debug_threads)
1009 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1010 pid);
1011 }
1012 else
1013 kill_wait_lwp (lwp);
1014
1015 the_target->mourn (process);
1016
1017 /* Since we presently can only stop all lwps of all processes, we
1018 need to unstop lwps of other processes. */
1019 unstop_all_lwps (0, NULL);
1020 return 0;
1021 }
1022
1023 /* Get pending signal of THREAD, for detaching purposes. This is the
1024 signal the thread last stopped for, which we need to deliver to the
1025 thread when detaching, otherwise, it'd be suppressed/lost. */
1026
1027 static int
1028 get_detach_signal (struct thread_info *thread)
1029 {
1030 enum gdb_signal signo = GDB_SIGNAL_0;
1031 int status;
1032 struct lwp_info *lp = get_thread_lwp (thread);
1033
1034 if (lp->status_pending_p)
1035 status = lp->status_pending;
1036 else
1037 {
1038 /* If the thread had been suspended by gdbserver, and it stopped
1039 cleanly, then it'll have stopped with SIGSTOP. But we don't
1040 want to deliver that SIGSTOP. */
1041 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1042 || thread->last_status.value.sig == GDB_SIGNAL_0)
1043 return 0;
1044
1045 /* Otherwise, we may need to deliver the signal we
1046 intercepted. */
1047 status = lp->last_status;
1048 }
1049
1050 if (!WIFSTOPPED (status))
1051 {
1052 if (debug_threads)
1053 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1054 target_pid_to_str (ptid_of (thread)));
1055 return 0;
1056 }
1057
1058 /* Extended wait statuses aren't real SIGTRAPs. */
1059 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1060 {
1061 if (debug_threads)
1062 debug_printf ("GPS: lwp %s had stopped with extended "
1063 "status: no pending signal\n",
1064 target_pid_to_str (ptid_of (thread)));
1065 return 0;
1066 }
1067
1068 signo = gdb_signal_from_host (WSTOPSIG (status));
1069
1070 if (program_signals_p && !program_signals[signo])
1071 {
1072 if (debug_threads)
1073 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1074 target_pid_to_str (ptid_of (thread)),
1075 gdb_signal_to_string (signo));
1076 return 0;
1077 }
1078 else if (!program_signals_p
1079 /* If we have no way to know which signals GDB does not
1080 want to have passed to the program, assume
1081 SIGTRAP/SIGINT, which is GDB's default. */
1082 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1083 {
1084 if (debug_threads)
1085 debug_printf ("GPS: lwp %s had signal %s, "
1086 "but we don't know if we should pass it. "
1087 "Default to not.\n",
1088 target_pid_to_str (ptid_of (thread)),
1089 gdb_signal_to_string (signo));
1090 return 0;
1091 }
1092 else
1093 {
1094 if (debug_threads)
1095 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1096 target_pid_to_str (ptid_of (thread)),
1097 gdb_signal_to_string (signo));
1098
1099 return WSTOPSIG (status);
1100 }
1101 }
1102
1103 static int
1104 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1105 {
1106 struct thread_info *thread = (struct thread_info *) entry;
1107 struct lwp_info *lwp = get_thread_lwp (thread);
1108 int pid = * (int *) args;
1109 int sig;
1110
1111 if (ptid_get_pid (entry->id) != pid)
1112 return 0;
1113
1114 /* If there is a pending SIGSTOP, get rid of it. */
1115 if (lwp->stop_expected)
1116 {
1117 if (debug_threads)
1118 debug_printf ("Sending SIGCONT to %s\n",
1119 target_pid_to_str (ptid_of (thread)));
1120
1121 kill_lwp (lwpid_of (thread), SIGCONT);
1122 lwp->stop_expected = 0;
1123 }
1124
1125 /* Flush any pending changes to the process's registers. */
1126 regcache_invalidate_thread (thread);
1127
1128 /* Pass on any pending signal for this thread. */
1129 sig = get_detach_signal (thread);
1130
1131 /* Finally, let it resume. */
1132 if (the_low_target.prepare_to_resume != NULL)
1133 the_low_target.prepare_to_resume (lwp);
1134 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1135 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1136 error (_("Can't detach %s: %s"),
1137 target_pid_to_str (ptid_of (thread)),
1138 strerror (errno));
1139
1140 delete_lwp (lwp);
1141 return 0;
1142 }
1143
1144 static int
1145 linux_detach (int pid)
1146 {
1147 struct process_info *process;
1148
1149 process = find_process_pid (pid);
1150 if (process == NULL)
1151 return -1;
1152
1153 /* Stop all threads before detaching. First, ptrace requires that
1154 the thread is stopped to sucessfully detach. Second, thread_db
1155 may need to uninstall thread event breakpoints from memory, which
1156 only works with a stopped process anyway. */
1157 stop_all_lwps (0, NULL);
1158
1159 #ifdef USE_THREAD_DB
1160 thread_db_detach (process);
1161 #endif
1162
1163 /* Stabilize threads (move out of jump pads). */
1164 stabilize_threads ();
1165
1166 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1167
1168 the_target->mourn (process);
1169
1170 /* Since we presently can only stop all lwps of all processes, we
1171 need to unstop lwps of other processes. */
1172 unstop_all_lwps (0, NULL);
1173 return 0;
1174 }
1175
1176 /* Remove all LWPs that belong to process PROC from the lwp list. */
1177
1178 static int
1179 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1180 {
1181 struct thread_info *thread = (struct thread_info *) entry;
1182 struct lwp_info *lwp = get_thread_lwp (thread);
1183 struct process_info *process = proc;
1184
1185 if (pid_of (thread) == pid_of (process))
1186 delete_lwp (lwp);
1187
1188 return 0;
1189 }
1190
1191 static void
1192 linux_mourn (struct process_info *process)
1193 {
1194 struct process_info_private *priv;
1195
1196 #ifdef USE_THREAD_DB
1197 thread_db_mourn (process);
1198 #endif
1199
1200 find_inferior (&all_threads, delete_lwp_callback, process);
1201
1202 /* Freeing all private data. */
1203 priv = process->private;
1204 free (priv->arch_private);
1205 free (priv);
1206 process->private = NULL;
1207
1208 remove_process (process);
1209 }
1210
1211 static void
1212 linux_join (int pid)
1213 {
1214 int status, ret;
1215
1216 do {
1217 ret = my_waitpid (pid, &status, 0);
1218 if (WIFEXITED (status) || WIFSIGNALED (status))
1219 break;
1220 } while (ret != -1 || errno != ECHILD);
1221 }
1222
1223 /* Return nonzero if the given thread is still alive. */
1224 static int
1225 linux_thread_alive (ptid_t ptid)
1226 {
1227 struct lwp_info *lwp = find_lwp_pid (ptid);
1228
1229 /* We assume we always know if a thread exits. If a whole process
1230 exited but we still haven't been able to report it to GDB, we'll
1231 hold on to the last lwp of the dead process. */
1232 if (lwp != NULL)
1233 return !lwp->dead;
1234 else
1235 return 0;
1236 }
1237
1238 /* Return 1 if this lwp has an interesting status pending. */
1239 static int
1240 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1241 {
1242 struct thread_info *thread = (struct thread_info *) entry;
1243 struct lwp_info *lwp = get_thread_lwp (thread);
1244 ptid_t ptid = * (ptid_t *) arg;
1245
1246 /* Check if we're only interested in events from a specific process
1247 or its lwps. */
1248 if (!ptid_equal (minus_one_ptid, ptid)
1249 && ptid_get_pid (ptid) != ptid_get_pid (thread->entry.id))
1250 return 0;
1251
1252 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1253 report any status pending the LWP may have. */
1254 if (thread->last_resume_kind == resume_stop
1255 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1256 return 0;
1257
1258 return lwp->status_pending_p;
1259 }
1260
1261 static int
1262 same_lwp (struct inferior_list_entry *entry, void *data)
1263 {
1264 ptid_t ptid = *(ptid_t *) data;
1265 int lwp;
1266
1267 if (ptid_get_lwp (ptid) != 0)
1268 lwp = ptid_get_lwp (ptid);
1269 else
1270 lwp = ptid_get_pid (ptid);
1271
1272 if (ptid_get_lwp (entry->id) == lwp)
1273 return 1;
1274
1275 return 0;
1276 }
1277
1278 struct lwp_info *
1279 find_lwp_pid (ptid_t ptid)
1280 {
1281 struct inferior_list_entry *thread
1282 = find_inferior (&all_threads, same_lwp, &ptid);
1283
1284 if (thread == NULL)
1285 return NULL;
1286
1287 return get_thread_lwp ((struct thread_info *) thread);
1288 }
1289
1290 /* Return the number of known LWPs in the tgid given by PID. */
1291
1292 static int
1293 num_lwps (int pid)
1294 {
1295 struct inferior_list_entry *inf, *tmp;
1296 int count = 0;
1297
1298 ALL_INFERIORS (&all_threads, inf, tmp)
1299 {
1300 if (ptid_get_pid (inf->id) == pid)
1301 count++;
1302 }
1303
1304 return count;
1305 }
1306
1307 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1308 their exits until all other threads in the group have exited. */
1309
1310 static void
1311 check_zombie_leaders (void)
1312 {
1313 struct process_info *proc, *tmp;
1314
1315 ALL_PROCESSES (proc, tmp)
1316 {
1317 pid_t leader_pid = pid_of (proc);
1318 struct lwp_info *leader_lp;
1319
1320 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1321
1322 if (debug_threads)
1323 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1324 "num_lwps=%d, zombie=%d\n",
1325 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1326 linux_proc_pid_is_zombie (leader_pid));
1327
1328 if (leader_lp != NULL
1329 /* Check if there are other threads in the group, as we may
1330 have raced with the inferior simply exiting. */
1331 && !last_thread_of_process_p (leader_pid)
1332 && linux_proc_pid_is_zombie (leader_pid))
1333 {
1334 /* A leader zombie can mean one of two things:
1335
1336 - It exited, and there's an exit status pending
1337 available, or only the leader exited (not the whole
1338 program). In the latter case, we can't waitpid the
1339 leader's exit status until all other threads are gone.
1340
1341 - There are 3 or more threads in the group, and a thread
1342 other than the leader exec'd. On an exec, the Linux
1343 kernel destroys all other threads (except the execing
1344 one) in the thread group, and resets the execing thread's
1345 tid to the tgid. No exit notification is sent for the
1346 execing thread -- from the ptracer's perspective, it
1347 appears as though the execing thread just vanishes.
1348 Until we reap all other threads except the leader and the
1349 execing thread, the leader will be zombie, and the
1350 execing thread will be in `D (disc sleep)'. As soon as
1351 all other threads are reaped, the execing thread changes
1352 it's tid to the tgid, and the previous (zombie) leader
1353 vanishes, giving place to the "new" leader. We could try
1354 distinguishing the exit and exec cases, by waiting once
1355 more, and seeing if something comes out, but it doesn't
1356 sound useful. The previous leader _does_ go away, and
1357 we'll re-add the new one once we see the exec event
1358 (which is just the same as what would happen if the
1359 previous leader did exit voluntarily before some other
1360 thread execs). */
1361
1362 if (debug_threads)
1363 fprintf (stderr,
1364 "CZL: Thread group leader %d zombie "
1365 "(it exited, or another thread execd).\n",
1366 leader_pid);
1367
1368 delete_lwp (leader_lp);
1369 }
1370 }
1371 }
1372
1373 /* Callback for `find_inferior'. Returns the first LWP that is not
1374 stopped. ARG is a PTID filter. */
1375
1376 static int
1377 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1378 {
1379 struct thread_info *thr = (struct thread_info *) entry;
1380 struct lwp_info *lwp;
1381 ptid_t filter = *(ptid_t *) arg;
1382
1383 if (!ptid_match (ptid_of (thr), filter))
1384 return 0;
1385
1386 lwp = get_thread_lwp (thr);
1387 if (!lwp->stopped)
1388 return 1;
1389
1390 return 0;
1391 }
1392
1393 /* This function should only be called if the LWP got a SIGTRAP.
1394
1395 Handle any tracepoint steps or hits. Return true if a tracepoint
1396 event was handled, 0 otherwise. */
1397
1398 static int
1399 handle_tracepoints (struct lwp_info *lwp)
1400 {
1401 struct thread_info *tinfo = get_lwp_thread (lwp);
1402 int tpoint_related_event = 0;
1403
1404 /* If this tracepoint hit causes a tracing stop, we'll immediately
1405 uninsert tracepoints. To do this, we temporarily pause all
1406 threads, unpatch away, and then unpause threads. We need to make
1407 sure the unpausing doesn't resume LWP too. */
1408 lwp->suspended++;
1409
1410 /* And we need to be sure that any all-threads-stopping doesn't try
1411 to move threads out of the jump pads, as it could deadlock the
1412 inferior (LWP could be in the jump pad, maybe even holding the
1413 lock.) */
1414
1415 /* Do any necessary step collect actions. */
1416 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1417
1418 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1419
1420 /* See if we just hit a tracepoint and do its main collect
1421 actions. */
1422 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1423
1424 lwp->suspended--;
1425
1426 gdb_assert (lwp->suspended == 0);
1427 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1428
1429 if (tpoint_related_event)
1430 {
1431 if (debug_threads)
1432 debug_printf ("got a tracepoint event\n");
1433 return 1;
1434 }
1435
1436 return 0;
1437 }
1438
1439 /* Convenience wrapper. Returns true if LWP is presently collecting a
1440 fast tracepoint. */
1441
1442 static int
1443 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1444 struct fast_tpoint_collect_status *status)
1445 {
1446 CORE_ADDR thread_area;
1447 struct thread_info *thread = get_lwp_thread (lwp);
1448
1449 if (the_low_target.get_thread_area == NULL)
1450 return 0;
1451
1452 /* Get the thread area address. This is used to recognize which
1453 thread is which when tracing with the in-process agent library.
1454 We don't read anything from the address, and treat it as opaque;
1455 it's the address itself that we assume is unique per-thread. */
1456 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1457 return 0;
1458
1459 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1460 }
1461
1462 /* The reason we resume in the caller, is because we want to be able
1463 to pass lwp->status_pending as WSTAT, and we need to clear
1464 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1465 refuses to resume. */
1466
1467 static int
1468 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1469 {
1470 struct thread_info *saved_thread;
1471
1472 saved_thread = current_thread;
1473 current_thread = get_lwp_thread (lwp);
1474
1475 if ((wstat == NULL
1476 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1477 && supports_fast_tracepoints ()
1478 && agent_loaded_p ())
1479 {
1480 struct fast_tpoint_collect_status status;
1481 int r;
1482
1483 if (debug_threads)
1484 debug_printf ("Checking whether LWP %ld needs to move out of the "
1485 "jump pad.\n",
1486 lwpid_of (current_thread));
1487
1488 r = linux_fast_tracepoint_collecting (lwp, &status);
1489
1490 if (wstat == NULL
1491 || (WSTOPSIG (*wstat) != SIGILL
1492 && WSTOPSIG (*wstat) != SIGFPE
1493 && WSTOPSIG (*wstat) != SIGSEGV
1494 && WSTOPSIG (*wstat) != SIGBUS))
1495 {
1496 lwp->collecting_fast_tracepoint = r;
1497
1498 if (r != 0)
1499 {
1500 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1501 {
1502 /* Haven't executed the original instruction yet.
1503 Set breakpoint there, and wait till it's hit,
1504 then single-step until exiting the jump pad. */
1505 lwp->exit_jump_pad_bkpt
1506 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1507 }
1508
1509 if (debug_threads)
1510 debug_printf ("Checking whether LWP %ld needs to move out of "
1511 "the jump pad...it does\n",
1512 lwpid_of (current_thread));
1513 current_thread = saved_thread;
1514
1515 return 1;
1516 }
1517 }
1518 else
1519 {
1520 /* If we get a synchronous signal while collecting, *and*
1521 while executing the (relocated) original instruction,
1522 reset the PC to point at the tpoint address, before
1523 reporting to GDB. Otherwise, it's an IPA lib bug: just
1524 report the signal to GDB, and pray for the best. */
1525
1526 lwp->collecting_fast_tracepoint = 0;
1527
1528 if (r != 0
1529 && (status.adjusted_insn_addr <= lwp->stop_pc
1530 && lwp->stop_pc < status.adjusted_insn_addr_end))
1531 {
1532 siginfo_t info;
1533 struct regcache *regcache;
1534
1535 /* The si_addr on a few signals references the address
1536 of the faulting instruction. Adjust that as
1537 well. */
1538 if ((WSTOPSIG (*wstat) == SIGILL
1539 || WSTOPSIG (*wstat) == SIGFPE
1540 || WSTOPSIG (*wstat) == SIGBUS
1541 || WSTOPSIG (*wstat) == SIGSEGV)
1542 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1543 (PTRACE_TYPE_ARG3) 0, &info) == 0
1544 /* Final check just to make sure we don't clobber
1545 the siginfo of non-kernel-sent signals. */
1546 && (uintptr_t) info.si_addr == lwp->stop_pc)
1547 {
1548 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1549 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1550 (PTRACE_TYPE_ARG3) 0, &info);
1551 }
1552
1553 regcache = get_thread_regcache (current_thread, 1);
1554 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1555 lwp->stop_pc = status.tpoint_addr;
1556
1557 /* Cancel any fast tracepoint lock this thread was
1558 holding. */
1559 force_unlock_trace_buffer ();
1560 }
1561
1562 if (lwp->exit_jump_pad_bkpt != NULL)
1563 {
1564 if (debug_threads)
1565 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1566 "stopping all threads momentarily.\n");
1567
1568 stop_all_lwps (1, lwp);
1569 cancel_breakpoints ();
1570
1571 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1572 lwp->exit_jump_pad_bkpt = NULL;
1573
1574 unstop_all_lwps (1, lwp);
1575
1576 gdb_assert (lwp->suspended >= 0);
1577 }
1578 }
1579 }
1580
1581 if (debug_threads)
1582 debug_printf ("Checking whether LWP %ld needs to move out of the "
1583 "jump pad...no\n",
1584 lwpid_of (current_thread));
1585
1586 current_thread = saved_thread;
1587 return 0;
1588 }
1589
1590 /* Enqueue one signal in the "signals to report later when out of the
1591 jump pad" list. */
1592
1593 static void
1594 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1595 {
1596 struct pending_signals *p_sig;
1597 struct thread_info *thread = get_lwp_thread (lwp);
1598
1599 if (debug_threads)
1600 debug_printf ("Deferring signal %d for LWP %ld.\n",
1601 WSTOPSIG (*wstat), lwpid_of (thread));
1602
1603 if (debug_threads)
1604 {
1605 struct pending_signals *sig;
1606
1607 for (sig = lwp->pending_signals_to_report;
1608 sig != NULL;
1609 sig = sig->prev)
1610 debug_printf (" Already queued %d\n",
1611 sig->signal);
1612
1613 debug_printf (" (no more currently queued signals)\n");
1614 }
1615
1616 /* Don't enqueue non-RT signals if they are already in the deferred
1617 queue. (SIGSTOP being the easiest signal to see ending up here
1618 twice) */
1619 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1620 {
1621 struct pending_signals *sig;
1622
1623 for (sig = lwp->pending_signals_to_report;
1624 sig != NULL;
1625 sig = sig->prev)
1626 {
1627 if (sig->signal == WSTOPSIG (*wstat))
1628 {
1629 if (debug_threads)
1630 debug_printf ("Not requeuing already queued non-RT signal %d"
1631 " for LWP %ld\n",
1632 sig->signal,
1633 lwpid_of (thread));
1634 return;
1635 }
1636 }
1637 }
1638
1639 p_sig = xmalloc (sizeof (*p_sig));
1640 p_sig->prev = lwp->pending_signals_to_report;
1641 p_sig->signal = WSTOPSIG (*wstat);
1642 memset (&p_sig->info, 0, sizeof (siginfo_t));
1643 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1644 &p_sig->info);
1645
1646 lwp->pending_signals_to_report = p_sig;
1647 }
1648
1649 /* Dequeue one signal from the "signals to report later when out of
1650 the jump pad" list. */
1651
1652 static int
1653 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1654 {
1655 struct thread_info *thread = get_lwp_thread (lwp);
1656
1657 if (lwp->pending_signals_to_report != NULL)
1658 {
1659 struct pending_signals **p_sig;
1660
1661 p_sig = &lwp->pending_signals_to_report;
1662 while ((*p_sig)->prev != NULL)
1663 p_sig = &(*p_sig)->prev;
1664
1665 *wstat = W_STOPCODE ((*p_sig)->signal);
1666 if ((*p_sig)->info.si_signo != 0)
1667 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1668 &(*p_sig)->info);
1669 free (*p_sig);
1670 *p_sig = NULL;
1671
1672 if (debug_threads)
1673 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1674 WSTOPSIG (*wstat), lwpid_of (thread));
1675
1676 if (debug_threads)
1677 {
1678 struct pending_signals *sig;
1679
1680 for (sig = lwp->pending_signals_to_report;
1681 sig != NULL;
1682 sig = sig->prev)
1683 debug_printf (" Still queued %d\n",
1684 sig->signal);
1685
1686 debug_printf (" (no more queued signals)\n");
1687 }
1688
1689 return 1;
1690 }
1691
1692 return 0;
1693 }
1694
1695 /* Arrange for a breakpoint to be hit again later. We don't keep the
1696 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1697 will handle the current event, eventually we will resume this LWP,
1698 and this breakpoint will trap again. */
1699
1700 static int
1701 cancel_breakpoint (struct lwp_info *lwp)
1702 {
1703 struct thread_info *saved_thread;
1704
1705 /* There's nothing to do if we don't support breakpoints. */
1706 if (!supports_breakpoints ())
1707 return 0;
1708
1709 /* breakpoint_at reads from current inferior. */
1710 saved_thread = current_thread;
1711 current_thread = get_lwp_thread (lwp);
1712
1713 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1714 {
1715 if (debug_threads)
1716 debug_printf ("CB: Push back breakpoint for %s\n",
1717 target_pid_to_str (ptid_of (current_thread)));
1718
1719 /* Back up the PC if necessary. */
1720 if (the_low_target.decr_pc_after_break)
1721 {
1722 struct regcache *regcache
1723 = get_thread_regcache (current_thread, 1);
1724 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1725 }
1726
1727 current_thread = saved_thread;
1728 return 1;
1729 }
1730 else
1731 {
1732 if (debug_threads)
1733 debug_printf ("CB: No breakpoint found at %s for [%s]\n",
1734 paddress (lwp->stop_pc),
1735 target_pid_to_str (ptid_of (current_thread)));
1736 }
1737
1738 current_thread = saved_thread;
1739 return 0;
1740 }
1741
1742 /* Return true if the event in LP may be caused by breakpoint. */
1743
1744 static int
1745 lp_status_maybe_breakpoint (struct lwp_info *lp)
1746 {
1747 return (lp->status_pending_p
1748 && WIFSTOPPED (lp->status_pending)
1749 && (WSTOPSIG (lp->status_pending) == SIGTRAP
1750 /* SIGILL and SIGSEGV are also treated as traps in case a
1751 breakpoint is inserted at the current PC. */
1752 || WSTOPSIG (lp->status_pending) == SIGILL
1753 || WSTOPSIG (lp->status_pending) == SIGSEGV));
1754 }
1755
1756 /* Do low-level handling of the event, and check if we should go on
1757 and pass it to caller code. Return the affected lwp if we are, or
1758 NULL otherwise. */
1759
1760 static struct lwp_info *
1761 linux_low_filter_event (ptid_t filter_ptid, int lwpid, int wstat)
1762 {
1763 struct lwp_info *child;
1764 struct thread_info *thread;
1765
1766 child = find_lwp_pid (pid_to_ptid (lwpid));
1767
1768 /* If we didn't find a process, one of two things presumably happened:
1769 - A process we started and then detached from has exited. Ignore it.
1770 - A process we are controlling has forked and the new child's stop
1771 was reported to us by the kernel. Save its PID. */
1772 if (child == NULL && WIFSTOPPED (wstat))
1773 {
1774 add_to_pid_list (&stopped_pids, lwpid, wstat);
1775 return NULL;
1776 }
1777 else if (child == NULL)
1778 return NULL;
1779
1780 thread = get_lwp_thread (child);
1781
1782 child->stopped = 1;
1783
1784 child->last_status = wstat;
1785
1786 if (WIFSTOPPED (wstat))
1787 {
1788 struct process_info *proc;
1789
1790 /* Architecture-specific setup after inferior is running. This
1791 needs to happen after we have attached to the inferior and it
1792 is stopped for the first time, but before we access any
1793 inferior registers. */
1794 proc = find_process_pid (pid_of (thread));
1795 if (proc->private->new_inferior)
1796 {
1797 struct thread_info *saved_thread;
1798
1799 saved_thread = current_thread;
1800 current_thread = thread;
1801
1802 the_low_target.arch_setup ();
1803
1804 current_thread = saved_thread;
1805
1806 proc->private->new_inferior = 0;
1807 }
1808 }
1809
1810 /* Store the STOP_PC, with adjustment applied. This depends on the
1811 architecture being defined already (so that CHILD has a valid
1812 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1813 not). */
1814 if (WIFSTOPPED (wstat))
1815 {
1816 if (debug_threads
1817 && the_low_target.get_pc != NULL)
1818 {
1819 struct thread_info *saved_thread;
1820 struct regcache *regcache;
1821 CORE_ADDR pc;
1822
1823 saved_thread = current_thread;
1824 current_thread = thread;
1825 regcache = get_thread_regcache (current_thread, 1);
1826 pc = (*the_low_target.get_pc) (regcache);
1827 debug_printf ("linux_low_filter_event: pc is 0x%lx\n", (long) pc);
1828 current_thread = saved_thread;
1829 }
1830
1831 child->stop_pc = get_stop_pc (child);
1832 }
1833
1834 /* Fetch the possibly triggered data watchpoint info and store it in
1835 CHILD.
1836
1837 On some archs, like x86, that use debug registers to set
1838 watchpoints, it's possible that the way to know which watched
1839 address trapped, is to check the register that is used to select
1840 which address to watch. Problem is, between setting the
1841 watchpoint and reading back which data address trapped, the user
1842 may change the set of watchpoints, and, as a consequence, GDB
1843 changes the debug registers in the inferior. To avoid reading
1844 back a stale stopped-data-address when that happens, we cache in
1845 LP the fact that a watchpoint trapped, and the corresponding data
1846 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1847 changes the debug registers meanwhile, we have the cached data we
1848 can rely on. */
1849
1850 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP)
1851 {
1852 if (the_low_target.stopped_by_watchpoint == NULL)
1853 {
1854 child->stopped_by_watchpoint = 0;
1855 }
1856 else
1857 {
1858 struct thread_info *saved_thread;
1859
1860 saved_thread = current_thread;
1861 current_thread = thread;
1862
1863 child->stopped_by_watchpoint
1864 = the_low_target.stopped_by_watchpoint ();
1865
1866 if (child->stopped_by_watchpoint)
1867 {
1868 if (the_low_target.stopped_data_address != NULL)
1869 child->stopped_data_address
1870 = the_low_target.stopped_data_address ();
1871 else
1872 child->stopped_data_address = 0;
1873 }
1874
1875 current_thread = saved_thread;
1876 }
1877 }
1878
1879 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1880 {
1881 linux_enable_event_reporting (lwpid);
1882 child->must_set_ptrace_flags = 0;
1883 }
1884
1885 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1886 && linux_is_extended_waitstatus (wstat))
1887 {
1888 handle_extended_wait (child, wstat);
1889 return NULL;
1890 }
1891
1892 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
1893 && child->stop_expected)
1894 {
1895 if (debug_threads)
1896 debug_printf ("Expected stop.\n");
1897 child->stop_expected = 0;
1898
1899 if (thread->last_resume_kind == resume_stop)
1900 {
1901 /* We want to report the stop to the core. Treat the
1902 SIGSTOP as a normal event. */
1903 }
1904 else if (stopping_threads != NOT_STOPPING_THREADS)
1905 {
1906 /* Stopping threads. We don't want this SIGSTOP to end up
1907 pending in the FILTER_PTID handling below. */
1908 return NULL;
1909 }
1910 else
1911 {
1912 /* Filter out the event. */
1913 linux_resume_one_lwp (child, child->stepping, 0, NULL);
1914 return NULL;
1915 }
1916 }
1917
1918 /* Check if the thread has exited. */
1919 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat))
1920 && num_lwps (pid_of (thread)) > 1)
1921 {
1922 if (debug_threads)
1923 debug_printf ("LLW: %d exited.\n", lwpid);
1924
1925 /* If there is at least one more LWP, then the exit signal
1926 was not the end of the debugged application and should be
1927 ignored. */
1928 delete_lwp (child);
1929 return NULL;
1930 }
1931
1932 if (!ptid_match (ptid_of (thread), filter_ptid))
1933 {
1934 if (debug_threads)
1935 debug_printf ("LWP %d got an event %06x, leaving pending.\n",
1936 lwpid, wstat);
1937
1938 if (WIFSTOPPED (wstat))
1939 {
1940 child->status_pending_p = 1;
1941 child->status_pending = wstat;
1942
1943 if (WSTOPSIG (wstat) != SIGSTOP)
1944 {
1945 /* Cancel breakpoint hits. The breakpoint may be
1946 removed before we fetch events from this process to
1947 report to the core. It is best not to assume the
1948 moribund breakpoints heuristic always handles these
1949 cases --- it could be too many events go through to
1950 the core before this one is handled. All-stop always
1951 cancels breakpoint hits in all threads. */
1952 if (non_stop
1953 && lp_status_maybe_breakpoint (child)
1954 && cancel_breakpoint (child))
1955 {
1956 /* Throw away the SIGTRAP. */
1957 child->status_pending_p = 0;
1958
1959 if (debug_threads)
1960 debug_printf ("LLW: LWP %d hit a breakpoint while"
1961 " waiting for another process;"
1962 " cancelled it\n", lwpid);
1963 }
1964 }
1965 }
1966 else if (WIFEXITED (wstat) || WIFSIGNALED (wstat))
1967 {
1968 if (debug_threads)
1969 debug_printf ("LLWE: process %d exited while fetching "
1970 "event from another LWP\n", lwpid);
1971
1972 /* This was the last lwp in the process. Since events are
1973 serialized to GDB core, and we can't report this one
1974 right now, but GDB core and the other target layers will
1975 want to be notified about the exit code/signal, leave the
1976 status pending for the next time we're able to report
1977 it. */
1978 mark_lwp_dead (child, wstat);
1979 }
1980
1981 return NULL;
1982 }
1983
1984 return child;
1985 }
1986
1987 /* When the event-loop is doing a step-over, this points at the thread
1988 being stepped. */
1989 ptid_t step_over_bkpt;
1990
1991 /* Wait for an event from child(ren) WAIT_PTID, and return any that
1992 match FILTER_PTID (leaving others pending). The PTIDs can be:
1993 minus_one_ptid, to specify any child; a pid PTID, specifying all
1994 lwps of a thread group; or a PTID representing a single lwp. Store
1995 the stop status through the status pointer WSTAT. OPTIONS is
1996 passed to the waitpid call. Return 0 if no event was found and
1997 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
1998 was found. Return the PID of the stopped child otherwise. */
1999
2000 static int
2001 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2002 int *wstatp, int options)
2003 {
2004 struct thread_info *event_thread;
2005 struct lwp_info *event_child, *requested_child;
2006 sigset_t block_mask, prev_mask;
2007
2008 retry:
2009 /* N.B. event_thread points to the thread_info struct that contains
2010 event_child. Keep them in sync. */
2011 event_thread = NULL;
2012 event_child = NULL;
2013 requested_child = NULL;
2014
2015 /* Check for a lwp with a pending status. */
2016
2017 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2018 {
2019 event_thread = (struct thread_info *)
2020 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2021 if (event_thread != NULL)
2022 event_child = get_thread_lwp (event_thread);
2023 if (debug_threads && event_thread)
2024 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2025 }
2026 else if (!ptid_equal (filter_ptid, null_ptid))
2027 {
2028 requested_child = find_lwp_pid (filter_ptid);
2029
2030 if (stopping_threads == NOT_STOPPING_THREADS
2031 && requested_child->status_pending_p
2032 && requested_child->collecting_fast_tracepoint)
2033 {
2034 enqueue_one_deferred_signal (requested_child,
2035 &requested_child->status_pending);
2036 requested_child->status_pending_p = 0;
2037 requested_child->status_pending = 0;
2038 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2039 }
2040
2041 if (requested_child->suspended
2042 && requested_child->status_pending_p)
2043 {
2044 internal_error (__FILE__, __LINE__,
2045 "requesting an event out of a"
2046 " suspended child?");
2047 }
2048
2049 if (requested_child->status_pending_p)
2050 {
2051 event_child = requested_child;
2052 event_thread = get_lwp_thread (event_child);
2053 }
2054 }
2055
2056 if (event_child != NULL)
2057 {
2058 if (debug_threads)
2059 debug_printf ("Got an event from pending child %ld (%04x)\n",
2060 lwpid_of (event_thread), event_child->status_pending);
2061 *wstatp = event_child->status_pending;
2062 event_child->status_pending_p = 0;
2063 event_child->status_pending = 0;
2064 current_thread = event_thread;
2065 return lwpid_of (event_thread);
2066 }
2067
2068 /* But if we don't find a pending event, we'll have to wait.
2069
2070 We only enter this loop if no process has a pending wait status.
2071 Thus any action taken in response to a wait status inside this
2072 loop is responding as soon as we detect the status, not after any
2073 pending events. */
2074
2075 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2076 all signals while here. */
2077 sigfillset (&block_mask);
2078 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2079
2080 while (event_child == NULL)
2081 {
2082 pid_t ret = 0;
2083
2084 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2085 quirks:
2086
2087 - If the thread group leader exits while other threads in the
2088 thread group still exist, waitpid(TGID, ...) hangs. That
2089 waitpid won't return an exit status until the other threads
2090 in the group are reaped.
2091
2092 - When a non-leader thread execs, that thread just vanishes
2093 without reporting an exit (so we'd hang if we waited for it
2094 explicitly in that case). The exec event is reported to
2095 the TGID pid (although we don't currently enable exec
2096 events). */
2097 errno = 0;
2098 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2099
2100 if (debug_threads)
2101 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2102 ret, errno ? strerror (errno) : "ERRNO-OK");
2103
2104 if (ret > 0)
2105 {
2106 if (debug_threads)
2107 {
2108 debug_printf ("LLW: waitpid %ld received %s\n",
2109 (long) ret, status_to_str (*wstatp));
2110 }
2111
2112 event_child = linux_low_filter_event (filter_ptid,
2113 ret, *wstatp);
2114 if (event_child != NULL)
2115 {
2116 /* We got an event to report to the core. */
2117 event_thread = get_lwp_thread (event_child);
2118 break;
2119 }
2120
2121 /* Retry until nothing comes out of waitpid. A single
2122 SIGCHLD can indicate more than one child stopped. */
2123 continue;
2124 }
2125
2126 /* Check for zombie thread group leaders. Those can't be reaped
2127 until all other threads in the thread group are. */
2128 check_zombie_leaders ();
2129
2130 /* If there are no resumed children left in the set of LWPs we
2131 want to wait for, bail. We can't just block in
2132 waitpid/sigsuspend, because lwps might have been left stopped
2133 in trace-stop state, and we'd be stuck forever waiting for
2134 their status to change (which would only happen if we resumed
2135 them). Even if WNOHANG is set, this return code is preferred
2136 over 0 (below), as it is more detailed. */
2137 if ((find_inferior (&all_threads,
2138 not_stopped_callback,
2139 &wait_ptid) == NULL))
2140 {
2141 if (debug_threads)
2142 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2143 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2144 return -1;
2145 }
2146
2147 /* No interesting event to report to the caller. */
2148 if ((options & WNOHANG))
2149 {
2150 if (debug_threads)
2151 debug_printf ("WNOHANG set, no event found\n");
2152
2153 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2154 return 0;
2155 }
2156
2157 /* Block until we get an event reported with SIGCHLD. */
2158 if (debug_threads)
2159 debug_printf ("sigsuspend'ing\n");
2160
2161 sigsuspend (&prev_mask);
2162 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2163 goto retry;
2164 }
2165
2166 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2167
2168 current_thread = event_thread;
2169
2170 /* Check for thread exit. */
2171 if (! WIFSTOPPED (*wstatp))
2172 {
2173 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2174
2175 if (debug_threads)
2176 debug_printf ("LWP %d is the last lwp of process. "
2177 "Process %ld exiting.\n",
2178 pid_of (event_thread), lwpid_of (event_thread));
2179 return lwpid_of (event_thread);
2180 }
2181
2182 return lwpid_of (event_thread);
2183 }
2184
2185 /* Wait for an event from child(ren) PTID. PTIDs can be:
2186 minus_one_ptid, to specify any child; a pid PTID, specifying all
2187 lwps of a thread group; or a PTID representing a single lwp. Store
2188 the stop status through the status pointer WSTAT. OPTIONS is
2189 passed to the waitpid call. Return 0 if no event was found and
2190 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2191 was found. Return the PID of the stopped child otherwise. */
2192
2193 static int
2194 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2195 {
2196 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2197 }
2198
2199 /* Count the LWP's that have had events. */
2200
2201 static int
2202 count_events_callback (struct inferior_list_entry *entry, void *data)
2203 {
2204 struct thread_info *thread = (struct thread_info *) entry;
2205 struct lwp_info *lp = get_thread_lwp (thread);
2206 int *count = data;
2207
2208 gdb_assert (count != NULL);
2209
2210 /* Count only resumed LWPs that have a SIGTRAP event pending that
2211 should be reported to GDB. */
2212 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2213 && thread->last_resume_kind != resume_stop
2214 && lp_status_maybe_breakpoint (lp)
2215 && !breakpoint_inserted_here (lp->stop_pc))
2216 (*count)++;
2217
2218 return 0;
2219 }
2220
2221 /* Select the LWP (if any) that is currently being single-stepped. */
2222
2223 static int
2224 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2225 {
2226 struct thread_info *thread = (struct thread_info *) entry;
2227 struct lwp_info *lp = get_thread_lwp (thread);
2228
2229 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2230 && thread->last_resume_kind == resume_step
2231 && lp->status_pending_p)
2232 return 1;
2233 else
2234 return 0;
2235 }
2236
2237 /* Select the Nth LWP that has had a SIGTRAP event that should be
2238 reported to GDB. */
2239
2240 static int
2241 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2242 {
2243 struct thread_info *thread = (struct thread_info *) entry;
2244 struct lwp_info *lp = get_thread_lwp (thread);
2245 int *selector = data;
2246
2247 gdb_assert (selector != NULL);
2248
2249 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2250 if (thread->last_resume_kind != resume_stop
2251 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2252 && lp_status_maybe_breakpoint (lp)
2253 && !breakpoint_inserted_here (lp->stop_pc))
2254 if ((*selector)-- == 0)
2255 return 1;
2256
2257 return 0;
2258 }
2259
2260 static int
2261 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2262 {
2263 struct thread_info *thread = (struct thread_info *) entry;
2264 struct lwp_info *lp = get_thread_lwp (thread);
2265 struct lwp_info *event_lp = data;
2266
2267 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2268 if (lp == event_lp)
2269 return 0;
2270
2271 /* If a LWP other than the LWP that we're reporting an event for has
2272 hit a GDB breakpoint (as opposed to some random trap signal),
2273 then just arrange for it to hit it again later. We don't keep
2274 the SIGTRAP status and don't forward the SIGTRAP signal to the
2275 LWP. We will handle the current event, eventually we will resume
2276 all LWPs, and this one will get its breakpoint trap again.
2277
2278 If we do not do this, then we run the risk that the user will
2279 delete or disable the breakpoint, but the LWP will have already
2280 tripped on it. */
2281
2282 if (thread->last_resume_kind != resume_stop
2283 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2284 && lp_status_maybe_breakpoint (lp)
2285 && !lp->stepping
2286 && !lp->stopped_by_watchpoint
2287 && cancel_breakpoint (lp))
2288 /* Throw away the SIGTRAP. */
2289 lp->status_pending_p = 0;
2290
2291 return 0;
2292 }
2293
2294 static void
2295 linux_cancel_breakpoints (void)
2296 {
2297 find_inferior (&all_threads, cancel_breakpoints_callback, NULL);
2298 }
2299
2300 /* Select one LWP out of those that have events pending. */
2301
2302 static void
2303 select_event_lwp (struct lwp_info **orig_lp)
2304 {
2305 int num_events = 0;
2306 int random_selector;
2307 struct thread_info *event_thread;
2308
2309 /* Give preference to any LWP that is being single-stepped. */
2310 event_thread
2311 = (struct thread_info *) find_inferior (&all_threads,
2312 select_singlestep_lwp_callback,
2313 NULL);
2314 if (event_thread != NULL)
2315 {
2316 if (debug_threads)
2317 debug_printf ("SEL: Select single-step %s\n",
2318 target_pid_to_str (ptid_of (event_thread)));
2319 }
2320 else
2321 {
2322 /* No single-stepping LWP. Select one at random, out of those
2323 which have had SIGTRAP events. */
2324
2325 /* First see how many SIGTRAP events we have. */
2326 find_inferior (&all_threads, count_events_callback, &num_events);
2327
2328 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2329 random_selector = (int)
2330 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2331
2332 if (debug_threads && num_events > 1)
2333 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2334 num_events, random_selector);
2335
2336 event_thread
2337 = (struct thread_info *) find_inferior (&all_threads,
2338 select_event_lwp_callback,
2339 &random_selector);
2340 }
2341
2342 if (event_thread != NULL)
2343 {
2344 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2345
2346 /* Switch the event LWP. */
2347 *orig_lp = event_lp;
2348 }
2349 }
2350
2351 /* Decrement the suspend count of an LWP. */
2352
2353 static int
2354 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2355 {
2356 struct thread_info *thread = (struct thread_info *) entry;
2357 struct lwp_info *lwp = get_thread_lwp (thread);
2358
2359 /* Ignore EXCEPT. */
2360 if (lwp == except)
2361 return 0;
2362
2363 lwp->suspended--;
2364
2365 gdb_assert (lwp->suspended >= 0);
2366 return 0;
2367 }
2368
2369 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2370 NULL. */
2371
2372 static void
2373 unsuspend_all_lwps (struct lwp_info *except)
2374 {
2375 find_inferior (&all_threads, unsuspend_one_lwp, except);
2376 }
2377
2378 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2379 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2380 void *data);
2381 static int lwp_running (struct inferior_list_entry *entry, void *data);
2382 static ptid_t linux_wait_1 (ptid_t ptid,
2383 struct target_waitstatus *ourstatus,
2384 int target_options);
2385
2386 /* Stabilize threads (move out of jump pads).
2387
2388 If a thread is midway collecting a fast tracepoint, we need to
2389 finish the collection and move it out of the jump pad before
2390 reporting the signal.
2391
2392 This avoids recursion while collecting (when a signal arrives
2393 midway, and the signal handler itself collects), which would trash
2394 the trace buffer. In case the user set a breakpoint in a signal
2395 handler, this avoids the backtrace showing the jump pad, etc..
2396 Most importantly, there are certain things we can't do safely if
2397 threads are stopped in a jump pad (or in its callee's). For
2398 example:
2399
2400 - starting a new trace run. A thread still collecting the
2401 previous run, could trash the trace buffer when resumed. The trace
2402 buffer control structures would have been reset but the thread had
2403 no way to tell. The thread could even midway memcpy'ing to the
2404 buffer, which would mean that when resumed, it would clobber the
2405 trace buffer that had been set for a new run.
2406
2407 - we can't rewrite/reuse the jump pads for new tracepoints
2408 safely. Say you do tstart while a thread is stopped midway while
2409 collecting. When the thread is later resumed, it finishes the
2410 collection, and returns to the jump pad, to execute the original
2411 instruction that was under the tracepoint jump at the time the
2412 older run had been started. If the jump pad had been rewritten
2413 since for something else in the new run, the thread would now
2414 execute the wrong / random instructions. */
2415
2416 static void
2417 linux_stabilize_threads (void)
2418 {
2419 struct thread_info *saved_thread;
2420 struct thread_info *thread_stuck;
2421
2422 thread_stuck
2423 = (struct thread_info *) find_inferior (&all_threads,
2424 stuck_in_jump_pad_callback,
2425 NULL);
2426 if (thread_stuck != NULL)
2427 {
2428 if (debug_threads)
2429 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2430 lwpid_of (thread_stuck));
2431 return;
2432 }
2433
2434 saved_thread = current_thread;
2435
2436 stabilizing_threads = 1;
2437
2438 /* Kick 'em all. */
2439 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2440
2441 /* Loop until all are stopped out of the jump pads. */
2442 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2443 {
2444 struct target_waitstatus ourstatus;
2445 struct lwp_info *lwp;
2446 int wstat;
2447
2448 /* Note that we go through the full wait even loop. While
2449 moving threads out of jump pad, we need to be able to step
2450 over internal breakpoints and such. */
2451 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2452
2453 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2454 {
2455 lwp = get_thread_lwp (current_thread);
2456
2457 /* Lock it. */
2458 lwp->suspended++;
2459
2460 if (ourstatus.value.sig != GDB_SIGNAL_0
2461 || current_thread->last_resume_kind == resume_stop)
2462 {
2463 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2464 enqueue_one_deferred_signal (lwp, &wstat);
2465 }
2466 }
2467 }
2468
2469 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2470
2471 stabilizing_threads = 0;
2472
2473 current_thread = saved_thread;
2474
2475 if (debug_threads)
2476 {
2477 thread_stuck
2478 = (struct thread_info *) find_inferior (&all_threads,
2479 stuck_in_jump_pad_callback,
2480 NULL);
2481 if (thread_stuck != NULL)
2482 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2483 lwpid_of (thread_stuck));
2484 }
2485 }
2486
2487 /* Wait for process, returns status. */
2488
2489 static ptid_t
2490 linux_wait_1 (ptid_t ptid,
2491 struct target_waitstatus *ourstatus, int target_options)
2492 {
2493 int w;
2494 struct lwp_info *event_child;
2495 int options;
2496 int pid;
2497 int step_over_finished;
2498 int bp_explains_trap;
2499 int maybe_internal_trap;
2500 int report_to_gdb;
2501 int trace_event;
2502 int in_step_range;
2503
2504 if (debug_threads)
2505 {
2506 debug_enter ();
2507 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2508 }
2509
2510 /* Translate generic target options into linux options. */
2511 options = __WALL;
2512 if (target_options & TARGET_WNOHANG)
2513 options |= WNOHANG;
2514
2515 retry:
2516 bp_explains_trap = 0;
2517 trace_event = 0;
2518 in_step_range = 0;
2519 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2520
2521 if (ptid_equal (step_over_bkpt, null_ptid))
2522 pid = linux_wait_for_event (ptid, &w, options);
2523 else
2524 {
2525 if (debug_threads)
2526 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2527 target_pid_to_str (step_over_bkpt));
2528 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2529 }
2530
2531 if (pid == 0)
2532 {
2533 gdb_assert (target_options & TARGET_WNOHANG);
2534
2535 if (debug_threads)
2536 {
2537 debug_printf ("linux_wait_1 ret = null_ptid, "
2538 "TARGET_WAITKIND_IGNORE\n");
2539 debug_exit ();
2540 }
2541
2542 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2543 return null_ptid;
2544 }
2545 else if (pid == -1)
2546 {
2547 if (debug_threads)
2548 {
2549 debug_printf ("linux_wait_1 ret = null_ptid, "
2550 "TARGET_WAITKIND_NO_RESUMED\n");
2551 debug_exit ();
2552 }
2553
2554 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2555 return null_ptid;
2556 }
2557
2558 event_child = get_thread_lwp (current_thread);
2559
2560 /* linux_wait_for_event only returns an exit status for the last
2561 child of a process. Report it. */
2562 if (WIFEXITED (w) || WIFSIGNALED (w))
2563 {
2564 if (WIFEXITED (w))
2565 {
2566 ourstatus->kind = TARGET_WAITKIND_EXITED;
2567 ourstatus->value.integer = WEXITSTATUS (w);
2568
2569 if (debug_threads)
2570 {
2571 debug_printf ("linux_wait_1 ret = %s, exited with "
2572 "retcode %d\n",
2573 target_pid_to_str (ptid_of (current_thread)),
2574 WEXITSTATUS (w));
2575 debug_exit ();
2576 }
2577 }
2578 else
2579 {
2580 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2581 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2582
2583 if (debug_threads)
2584 {
2585 debug_printf ("linux_wait_1 ret = %s, terminated with "
2586 "signal %d\n",
2587 target_pid_to_str (ptid_of (current_thread)),
2588 WTERMSIG (w));
2589 debug_exit ();
2590 }
2591 }
2592
2593 return ptid_of (current_thread);
2594 }
2595
2596 /* If this event was not handled before, and is not a SIGTRAP, we
2597 report it. SIGILL and SIGSEGV are also treated as traps in case
2598 a breakpoint is inserted at the current PC. If this target does
2599 not support internal breakpoints at all, we also report the
2600 SIGTRAP without further processing; it's of no concern to us. */
2601 maybe_internal_trap
2602 = (supports_breakpoints ()
2603 && (WSTOPSIG (w) == SIGTRAP
2604 || ((WSTOPSIG (w) == SIGILL
2605 || WSTOPSIG (w) == SIGSEGV)
2606 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2607
2608 if (maybe_internal_trap)
2609 {
2610 /* Handle anything that requires bookkeeping before deciding to
2611 report the event or continue waiting. */
2612
2613 /* First check if we can explain the SIGTRAP with an internal
2614 breakpoint, or if we should possibly report the event to GDB.
2615 Do this before anything that may remove or insert a
2616 breakpoint. */
2617 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2618
2619 /* We have a SIGTRAP, possibly a step-over dance has just
2620 finished. If so, tweak the state machine accordingly,
2621 reinsert breakpoints and delete any reinsert (software
2622 single-step) breakpoints. */
2623 step_over_finished = finish_step_over (event_child);
2624
2625 /* Now invoke the callbacks of any internal breakpoints there. */
2626 check_breakpoints (event_child->stop_pc);
2627
2628 /* Handle tracepoint data collecting. This may overflow the
2629 trace buffer, and cause a tracing stop, removing
2630 breakpoints. */
2631 trace_event = handle_tracepoints (event_child);
2632
2633 if (bp_explains_trap)
2634 {
2635 /* If we stepped or ran into an internal breakpoint, we've
2636 already handled it. So next time we resume (from this
2637 PC), we should step over it. */
2638 if (debug_threads)
2639 debug_printf ("Hit a gdbserver breakpoint.\n");
2640
2641 if (breakpoint_here (event_child->stop_pc))
2642 event_child->need_step_over = 1;
2643 }
2644 }
2645 else
2646 {
2647 /* We have some other signal, possibly a step-over dance was in
2648 progress, and it should be cancelled too. */
2649 step_over_finished = finish_step_over (event_child);
2650 }
2651
2652 /* We have all the data we need. Either report the event to GDB, or
2653 resume threads and keep waiting for more. */
2654
2655 /* If we're collecting a fast tracepoint, finish the collection and
2656 move out of the jump pad before delivering a signal. See
2657 linux_stabilize_threads. */
2658
2659 if (WIFSTOPPED (w)
2660 && WSTOPSIG (w) != SIGTRAP
2661 && supports_fast_tracepoints ()
2662 && agent_loaded_p ())
2663 {
2664 if (debug_threads)
2665 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2666 "to defer or adjust it.\n",
2667 WSTOPSIG (w), lwpid_of (current_thread));
2668
2669 /* Allow debugging the jump pad itself. */
2670 if (current_thread->last_resume_kind != resume_step
2671 && maybe_move_out_of_jump_pad (event_child, &w))
2672 {
2673 enqueue_one_deferred_signal (event_child, &w);
2674
2675 if (debug_threads)
2676 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2677 WSTOPSIG (w), lwpid_of (current_thread));
2678
2679 linux_resume_one_lwp (event_child, 0, 0, NULL);
2680 goto retry;
2681 }
2682 }
2683
2684 if (event_child->collecting_fast_tracepoint)
2685 {
2686 if (debug_threads)
2687 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2688 "Check if we're already there.\n",
2689 lwpid_of (current_thread),
2690 event_child->collecting_fast_tracepoint);
2691
2692 trace_event = 1;
2693
2694 event_child->collecting_fast_tracepoint
2695 = linux_fast_tracepoint_collecting (event_child, NULL);
2696
2697 if (event_child->collecting_fast_tracepoint != 1)
2698 {
2699 /* No longer need this breakpoint. */
2700 if (event_child->exit_jump_pad_bkpt != NULL)
2701 {
2702 if (debug_threads)
2703 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2704 "stopping all threads momentarily.\n");
2705
2706 /* Other running threads could hit this breakpoint.
2707 We don't handle moribund locations like GDB does,
2708 instead we always pause all threads when removing
2709 breakpoints, so that any step-over or
2710 decr_pc_after_break adjustment is always taken
2711 care of while the breakpoint is still
2712 inserted. */
2713 stop_all_lwps (1, event_child);
2714 cancel_breakpoints ();
2715
2716 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2717 event_child->exit_jump_pad_bkpt = NULL;
2718
2719 unstop_all_lwps (1, event_child);
2720
2721 gdb_assert (event_child->suspended >= 0);
2722 }
2723 }
2724
2725 if (event_child->collecting_fast_tracepoint == 0)
2726 {
2727 if (debug_threads)
2728 debug_printf ("fast tracepoint finished "
2729 "collecting successfully.\n");
2730
2731 /* We may have a deferred signal to report. */
2732 if (dequeue_one_deferred_signal (event_child, &w))
2733 {
2734 if (debug_threads)
2735 debug_printf ("dequeued one signal.\n");
2736 }
2737 else
2738 {
2739 if (debug_threads)
2740 debug_printf ("no deferred signals.\n");
2741
2742 if (stabilizing_threads)
2743 {
2744 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2745 ourstatus->value.sig = GDB_SIGNAL_0;
2746
2747 if (debug_threads)
2748 {
2749 debug_printf ("linux_wait_1 ret = %s, stopped "
2750 "while stabilizing threads\n",
2751 target_pid_to_str (ptid_of (current_thread)));
2752 debug_exit ();
2753 }
2754
2755 return ptid_of (current_thread);
2756 }
2757 }
2758 }
2759 }
2760
2761 /* Check whether GDB would be interested in this event. */
2762
2763 /* If GDB is not interested in this signal, don't stop other
2764 threads, and don't report it to GDB. Just resume the inferior
2765 right away. We do this for threading-related signals as well as
2766 any that GDB specifically requested we ignore. But never ignore
2767 SIGSTOP if we sent it ourselves, and do not ignore signals when
2768 stepping - they may require special handling to skip the signal
2769 handler. */
2770 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2771 thread library? */
2772 if (WIFSTOPPED (w)
2773 && current_thread->last_resume_kind != resume_step
2774 && (
2775 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2776 (current_process ()->private->thread_db != NULL
2777 && (WSTOPSIG (w) == __SIGRTMIN
2778 || WSTOPSIG (w) == __SIGRTMIN + 1))
2779 ||
2780 #endif
2781 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2782 && !(WSTOPSIG (w) == SIGSTOP
2783 && current_thread->last_resume_kind == resume_stop))))
2784 {
2785 siginfo_t info, *info_p;
2786
2787 if (debug_threads)
2788 debug_printf ("Ignored signal %d for LWP %ld.\n",
2789 WSTOPSIG (w), lwpid_of (current_thread));
2790
2791 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2792 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2793 info_p = &info;
2794 else
2795 info_p = NULL;
2796 linux_resume_one_lwp (event_child, event_child->stepping,
2797 WSTOPSIG (w), info_p);
2798 goto retry;
2799 }
2800
2801 /* Note that all addresses are always "out of the step range" when
2802 there's no range to begin with. */
2803 in_step_range = lwp_in_step_range (event_child);
2804
2805 /* If GDB wanted this thread to single step, and the thread is out
2806 of the step range, we always want to report the SIGTRAP, and let
2807 GDB handle it. Watchpoints should always be reported. So should
2808 signals we can't explain. A SIGTRAP we can't explain could be a
2809 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2810 do, we're be able to handle GDB breakpoints on top of internal
2811 breakpoints, by handling the internal breakpoint and still
2812 reporting the event to GDB. If we don't, we're out of luck, GDB
2813 won't see the breakpoint hit. */
2814 report_to_gdb = (!maybe_internal_trap
2815 || (current_thread->last_resume_kind == resume_step
2816 && !in_step_range)
2817 || event_child->stopped_by_watchpoint
2818 || (!step_over_finished && !in_step_range
2819 && !bp_explains_trap && !trace_event)
2820 || (gdb_breakpoint_here (event_child->stop_pc)
2821 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2822 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2823
2824 run_breakpoint_commands (event_child->stop_pc);
2825
2826 /* We found no reason GDB would want us to stop. We either hit one
2827 of our own breakpoints, or finished an internal step GDB
2828 shouldn't know about. */
2829 if (!report_to_gdb)
2830 {
2831 if (debug_threads)
2832 {
2833 if (bp_explains_trap)
2834 debug_printf ("Hit a gdbserver breakpoint.\n");
2835 if (step_over_finished)
2836 debug_printf ("Step-over finished.\n");
2837 if (trace_event)
2838 debug_printf ("Tracepoint event.\n");
2839 if (lwp_in_step_range (event_child))
2840 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2841 paddress (event_child->stop_pc),
2842 paddress (event_child->step_range_start),
2843 paddress (event_child->step_range_end));
2844 }
2845
2846 /* We're not reporting this breakpoint to GDB, so apply the
2847 decr_pc_after_break adjustment to the inferior's regcache
2848 ourselves. */
2849
2850 if (the_low_target.set_pc != NULL)
2851 {
2852 struct regcache *regcache
2853 = get_thread_regcache (current_thread, 1);
2854 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2855 }
2856
2857 /* We may have finished stepping over a breakpoint. If so,
2858 we've stopped and suspended all LWPs momentarily except the
2859 stepping one. This is where we resume them all again. We're
2860 going to keep waiting, so use proceed, which handles stepping
2861 over the next breakpoint. */
2862 if (debug_threads)
2863 debug_printf ("proceeding all threads.\n");
2864
2865 if (step_over_finished)
2866 unsuspend_all_lwps (event_child);
2867
2868 proceed_all_lwps ();
2869 goto retry;
2870 }
2871
2872 if (debug_threads)
2873 {
2874 if (current_thread->last_resume_kind == resume_step)
2875 {
2876 if (event_child->step_range_start == event_child->step_range_end)
2877 debug_printf ("GDB wanted to single-step, reporting event.\n");
2878 else if (!lwp_in_step_range (event_child))
2879 debug_printf ("Out of step range, reporting event.\n");
2880 }
2881 if (event_child->stopped_by_watchpoint)
2882 debug_printf ("Stopped by watchpoint.\n");
2883 if (gdb_breakpoint_here (event_child->stop_pc))
2884 debug_printf ("Stopped by GDB breakpoint.\n");
2885 if (debug_threads)
2886 debug_printf ("Hit a non-gdbserver trap event.\n");
2887 }
2888
2889 /* Alright, we're going to report a stop. */
2890
2891 if (!non_stop && !stabilizing_threads)
2892 {
2893 /* In all-stop, stop all threads. */
2894 stop_all_lwps (0, NULL);
2895
2896 /* If we're not waiting for a specific LWP, choose an event LWP
2897 from among those that have had events. Giving equal priority
2898 to all LWPs that have had events helps prevent
2899 starvation. */
2900 if (ptid_equal (ptid, minus_one_ptid))
2901 {
2902 event_child->status_pending_p = 1;
2903 event_child->status_pending = w;
2904
2905 select_event_lwp (&event_child);
2906
2907 /* current_thread and event_child must stay in sync. */
2908 current_thread = get_lwp_thread (event_child);
2909
2910 event_child->status_pending_p = 0;
2911 w = event_child->status_pending;
2912 }
2913
2914 /* Now that we've selected our final event LWP, cancel any
2915 breakpoints in other LWPs that have hit a GDB breakpoint.
2916 See the comment in cancel_breakpoints_callback to find out
2917 why. */
2918 find_inferior (&all_threads, cancel_breakpoints_callback, event_child);
2919
2920 /* If we were going a step-over, all other threads but the stepping one
2921 had been paused in start_step_over, with their suspend counts
2922 incremented. We don't want to do a full unstop/unpause, because we're
2923 in all-stop mode (so we want threads stopped), but we still need to
2924 unsuspend the other threads, to decrement their `suspended' count
2925 back. */
2926 if (step_over_finished)
2927 unsuspend_all_lwps (event_child);
2928
2929 /* Stabilize threads (move out of jump pads). */
2930 stabilize_threads ();
2931 }
2932 else
2933 {
2934 /* If we just finished a step-over, then all threads had been
2935 momentarily paused. In all-stop, that's fine, we want
2936 threads stopped by now anyway. In non-stop, we need to
2937 re-resume threads that GDB wanted to be running. */
2938 if (step_over_finished)
2939 unstop_all_lwps (1, event_child);
2940 }
2941
2942 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2943
2944 if (current_thread->last_resume_kind == resume_stop
2945 && WSTOPSIG (w) == SIGSTOP)
2946 {
2947 /* A thread that has been requested to stop by GDB with vCont;t,
2948 and it stopped cleanly, so report as SIG0. The use of
2949 SIGSTOP is an implementation detail. */
2950 ourstatus->value.sig = GDB_SIGNAL_0;
2951 }
2952 else if (current_thread->last_resume_kind == resume_stop
2953 && WSTOPSIG (w) != SIGSTOP)
2954 {
2955 /* A thread that has been requested to stop by GDB with vCont;t,
2956 but, it stopped for other reasons. */
2957 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2958 }
2959 else
2960 {
2961 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2962 }
2963
2964 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2965
2966 if (debug_threads)
2967 {
2968 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
2969 target_pid_to_str (ptid_of (current_thread)),
2970 ourstatus->kind, ourstatus->value.sig);
2971 debug_exit ();
2972 }
2973
2974 return ptid_of (current_thread);
2975 }
2976
2977 /* Get rid of any pending event in the pipe. */
2978 static void
2979 async_file_flush (void)
2980 {
2981 int ret;
2982 char buf;
2983
2984 do
2985 ret = read (linux_event_pipe[0], &buf, 1);
2986 while (ret >= 0 || (ret == -1 && errno == EINTR));
2987 }
2988
2989 /* Put something in the pipe, so the event loop wakes up. */
2990 static void
2991 async_file_mark (void)
2992 {
2993 int ret;
2994
2995 async_file_flush ();
2996
2997 do
2998 ret = write (linux_event_pipe[1], "+", 1);
2999 while (ret == 0 || (ret == -1 && errno == EINTR));
3000
3001 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3002 be awakened anyway. */
3003 }
3004
3005 static ptid_t
3006 linux_wait (ptid_t ptid,
3007 struct target_waitstatus *ourstatus, int target_options)
3008 {
3009 ptid_t event_ptid;
3010
3011 /* Flush the async file first. */
3012 if (target_is_async_p ())
3013 async_file_flush ();
3014
3015 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3016
3017 /* If at least one stop was reported, there may be more. A single
3018 SIGCHLD can signal more than one child stop. */
3019 if (target_is_async_p ()
3020 && (target_options & TARGET_WNOHANG) != 0
3021 && !ptid_equal (event_ptid, null_ptid))
3022 async_file_mark ();
3023
3024 return event_ptid;
3025 }
3026
3027 /* Send a signal to an LWP. */
3028
3029 static int
3030 kill_lwp (unsigned long lwpid, int signo)
3031 {
3032 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3033 fails, then we are not using nptl threads and we should be using kill. */
3034
3035 #ifdef __NR_tkill
3036 {
3037 static int tkill_failed;
3038
3039 if (!tkill_failed)
3040 {
3041 int ret;
3042
3043 errno = 0;
3044 ret = syscall (__NR_tkill, lwpid, signo);
3045 if (errno != ENOSYS)
3046 return ret;
3047 tkill_failed = 1;
3048 }
3049 }
3050 #endif
3051
3052 return kill (lwpid, signo);
3053 }
3054
3055 void
3056 linux_stop_lwp (struct lwp_info *lwp)
3057 {
3058 send_sigstop (lwp);
3059 }
3060
3061 static void
3062 send_sigstop (struct lwp_info *lwp)
3063 {
3064 int pid;
3065
3066 pid = lwpid_of (get_lwp_thread (lwp));
3067
3068 /* If we already have a pending stop signal for this process, don't
3069 send another. */
3070 if (lwp->stop_expected)
3071 {
3072 if (debug_threads)
3073 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3074
3075 return;
3076 }
3077
3078 if (debug_threads)
3079 debug_printf ("Sending sigstop to lwp %d\n", pid);
3080
3081 lwp->stop_expected = 1;
3082 kill_lwp (pid, SIGSTOP);
3083 }
3084
3085 static int
3086 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3087 {
3088 struct thread_info *thread = (struct thread_info *) entry;
3089 struct lwp_info *lwp = get_thread_lwp (thread);
3090
3091 /* Ignore EXCEPT. */
3092 if (lwp == except)
3093 return 0;
3094
3095 if (lwp->stopped)
3096 return 0;
3097
3098 send_sigstop (lwp);
3099 return 0;
3100 }
3101
3102 /* Increment the suspend count of an LWP, and stop it, if not stopped
3103 yet. */
3104 static int
3105 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3106 void *except)
3107 {
3108 struct thread_info *thread = (struct thread_info *) entry;
3109 struct lwp_info *lwp = get_thread_lwp (thread);
3110
3111 /* Ignore EXCEPT. */
3112 if (lwp == except)
3113 return 0;
3114
3115 lwp->suspended++;
3116
3117 return send_sigstop_callback (entry, except);
3118 }
3119
3120 static void
3121 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3122 {
3123 /* It's dead, really. */
3124 lwp->dead = 1;
3125
3126 /* Store the exit status for later. */
3127 lwp->status_pending_p = 1;
3128 lwp->status_pending = wstat;
3129
3130 /* Prevent trying to stop it. */
3131 lwp->stopped = 1;
3132
3133 /* No further stops are expected from a dead lwp. */
3134 lwp->stop_expected = 0;
3135 }
3136
3137 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3138
3139 static void
3140 wait_for_sigstop (void)
3141 {
3142 struct thread_info *saved_thread;
3143 ptid_t saved_tid;
3144 int wstat;
3145 int ret;
3146
3147 saved_thread = current_thread;
3148 if (saved_thread != NULL)
3149 saved_tid = saved_thread->entry.id;
3150 else
3151 saved_tid = null_ptid; /* avoid bogus unused warning */
3152
3153 if (debug_threads)
3154 debug_printf ("wait_for_sigstop: pulling events\n");
3155
3156 /* Passing NULL_PTID as filter indicates we want all events to be
3157 left pending. Eventually this returns when there are no
3158 unwaited-for children left. */
3159 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3160 &wstat, __WALL);
3161 gdb_assert (ret == -1);
3162
3163 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3164 current_thread = saved_thread;
3165 else
3166 {
3167 if (debug_threads)
3168 debug_printf ("Previously current thread died.\n");
3169
3170 if (non_stop)
3171 {
3172 /* We can't change the current inferior behind GDB's back,
3173 otherwise, a subsequent command may apply to the wrong
3174 process. */
3175 current_thread = NULL;
3176 }
3177 else
3178 {
3179 /* Set a valid thread as current. */
3180 set_desired_thread (0);
3181 }
3182 }
3183 }
3184
3185 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3186 move it out, because we need to report the stop event to GDB. For
3187 example, if the user puts a breakpoint in the jump pad, it's
3188 because she wants to debug it. */
3189
3190 static int
3191 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3192 {
3193 struct thread_info *thread = (struct thread_info *) entry;
3194 struct lwp_info *lwp = get_thread_lwp (thread);
3195
3196 gdb_assert (lwp->suspended == 0);
3197 gdb_assert (lwp->stopped);
3198
3199 /* Allow debugging the jump pad, gdb_collect, etc.. */
3200 return (supports_fast_tracepoints ()
3201 && agent_loaded_p ()
3202 && (gdb_breakpoint_here (lwp->stop_pc)
3203 || lwp->stopped_by_watchpoint
3204 || thread->last_resume_kind == resume_step)
3205 && linux_fast_tracepoint_collecting (lwp, NULL));
3206 }
3207
3208 static void
3209 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3210 {
3211 struct thread_info *thread = (struct thread_info *) entry;
3212 struct lwp_info *lwp = get_thread_lwp (thread);
3213 int *wstat;
3214
3215 gdb_assert (lwp->suspended == 0);
3216 gdb_assert (lwp->stopped);
3217
3218 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3219
3220 /* Allow debugging the jump pad, gdb_collect, etc. */
3221 if (!gdb_breakpoint_here (lwp->stop_pc)
3222 && !lwp->stopped_by_watchpoint
3223 && thread->last_resume_kind != resume_step
3224 && maybe_move_out_of_jump_pad (lwp, wstat))
3225 {
3226 if (debug_threads)
3227 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3228 lwpid_of (thread));
3229
3230 if (wstat)
3231 {
3232 lwp->status_pending_p = 0;
3233 enqueue_one_deferred_signal (lwp, wstat);
3234
3235 if (debug_threads)
3236 debug_printf ("Signal %d for LWP %ld deferred "
3237 "(in jump pad)\n",
3238 WSTOPSIG (*wstat), lwpid_of (thread));
3239 }
3240
3241 linux_resume_one_lwp (lwp, 0, 0, NULL);
3242 }
3243 else
3244 lwp->suspended++;
3245 }
3246
3247 static int
3248 lwp_running (struct inferior_list_entry *entry, void *data)
3249 {
3250 struct thread_info *thread = (struct thread_info *) entry;
3251 struct lwp_info *lwp = get_thread_lwp (thread);
3252
3253 if (lwp->dead)
3254 return 0;
3255 if (lwp->stopped)
3256 return 0;
3257 return 1;
3258 }
3259
3260 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3261 If SUSPEND, then also increase the suspend count of every LWP,
3262 except EXCEPT. */
3263
3264 static void
3265 stop_all_lwps (int suspend, struct lwp_info *except)
3266 {
3267 /* Should not be called recursively. */
3268 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3269
3270 if (debug_threads)
3271 {
3272 debug_enter ();
3273 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3274 suspend ? "stop-and-suspend" : "stop",
3275 except != NULL
3276 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3277 : "none");
3278 }
3279
3280 stopping_threads = (suspend
3281 ? STOPPING_AND_SUSPENDING_THREADS
3282 : STOPPING_THREADS);
3283
3284 if (suspend)
3285 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3286 else
3287 find_inferior (&all_threads, send_sigstop_callback, except);
3288 wait_for_sigstop ();
3289 stopping_threads = NOT_STOPPING_THREADS;
3290
3291 if (debug_threads)
3292 {
3293 debug_printf ("stop_all_lwps done, setting stopping_threads "
3294 "back to !stopping\n");
3295 debug_exit ();
3296 }
3297 }
3298
3299 /* Resume execution of the inferior process.
3300 If STEP is nonzero, single-step it.
3301 If SIGNAL is nonzero, give it that signal. */
3302
3303 static void
3304 linux_resume_one_lwp (struct lwp_info *lwp,
3305 int step, int signal, siginfo_t *info)
3306 {
3307 struct thread_info *thread = get_lwp_thread (lwp);
3308 struct thread_info *saved_thread;
3309 int fast_tp_collecting;
3310
3311 if (lwp->stopped == 0)
3312 return;
3313
3314 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3315
3316 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3317
3318 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3319 user used the "jump" command, or "set $pc = foo"). */
3320 if (lwp->stop_pc != get_pc (lwp))
3321 {
3322 /* Collecting 'while-stepping' actions doesn't make sense
3323 anymore. */
3324 release_while_stepping_state_list (thread);
3325 }
3326
3327 /* If we have pending signals or status, and a new signal, enqueue the
3328 signal. Also enqueue the signal if we are waiting to reinsert a
3329 breakpoint; it will be picked up again below. */
3330 if (signal != 0
3331 && (lwp->status_pending_p
3332 || lwp->pending_signals != NULL
3333 || lwp->bp_reinsert != 0
3334 || fast_tp_collecting))
3335 {
3336 struct pending_signals *p_sig;
3337 p_sig = xmalloc (sizeof (*p_sig));
3338 p_sig->prev = lwp->pending_signals;
3339 p_sig->signal = signal;
3340 if (info == NULL)
3341 memset (&p_sig->info, 0, sizeof (siginfo_t));
3342 else
3343 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3344 lwp->pending_signals = p_sig;
3345 }
3346
3347 if (lwp->status_pending_p)
3348 {
3349 if (debug_threads)
3350 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3351 " has pending status\n",
3352 lwpid_of (thread), step ? "step" : "continue", signal,
3353 lwp->stop_expected ? "expected" : "not expected");
3354 return;
3355 }
3356
3357 saved_thread = current_thread;
3358 current_thread = thread;
3359
3360 if (debug_threads)
3361 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3362 lwpid_of (thread), step ? "step" : "continue", signal,
3363 lwp->stop_expected ? "expected" : "not expected");
3364
3365 /* This bit needs some thinking about. If we get a signal that
3366 we must report while a single-step reinsert is still pending,
3367 we often end up resuming the thread. It might be better to
3368 (ew) allow a stack of pending events; then we could be sure that
3369 the reinsert happened right away and not lose any signals.
3370
3371 Making this stack would also shrink the window in which breakpoints are
3372 uninserted (see comment in linux_wait_for_lwp) but not enough for
3373 complete correctness, so it won't solve that problem. It may be
3374 worthwhile just to solve this one, however. */
3375 if (lwp->bp_reinsert != 0)
3376 {
3377 if (debug_threads)
3378 debug_printf (" pending reinsert at 0x%s\n",
3379 paddress (lwp->bp_reinsert));
3380
3381 if (can_hardware_single_step ())
3382 {
3383 if (fast_tp_collecting == 0)
3384 {
3385 if (step == 0)
3386 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3387 if (lwp->suspended)
3388 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3389 lwp->suspended);
3390 }
3391
3392 step = 1;
3393 }
3394
3395 /* Postpone any pending signal. It was enqueued above. */
3396 signal = 0;
3397 }
3398
3399 if (fast_tp_collecting == 1)
3400 {
3401 if (debug_threads)
3402 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3403 " (exit-jump-pad-bkpt)\n",
3404 lwpid_of (thread));
3405
3406 /* Postpone any pending signal. It was enqueued above. */
3407 signal = 0;
3408 }
3409 else if (fast_tp_collecting == 2)
3410 {
3411 if (debug_threads)
3412 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3413 " single-stepping\n",
3414 lwpid_of (thread));
3415
3416 if (can_hardware_single_step ())
3417 step = 1;
3418 else
3419 {
3420 internal_error (__FILE__, __LINE__,
3421 "moving out of jump pad single-stepping"
3422 " not implemented on this target");
3423 }
3424
3425 /* Postpone any pending signal. It was enqueued above. */
3426 signal = 0;
3427 }
3428
3429 /* If we have while-stepping actions in this thread set it stepping.
3430 If we have a signal to deliver, it may or may not be set to
3431 SIG_IGN, we don't know. Assume so, and allow collecting
3432 while-stepping into a signal handler. A possible smart thing to
3433 do would be to set an internal breakpoint at the signal return
3434 address, continue, and carry on catching this while-stepping
3435 action only when that breakpoint is hit. A future
3436 enhancement. */
3437 if (thread->while_stepping != NULL
3438 && can_hardware_single_step ())
3439 {
3440 if (debug_threads)
3441 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3442 lwpid_of (thread));
3443 step = 1;
3444 }
3445
3446 if (debug_threads && the_low_target.get_pc != NULL)
3447 {
3448 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3449 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3450 debug_printf (" resuming from pc 0x%lx\n", (long) pc);
3451 }
3452
3453 /* If we have pending signals, consume one unless we are trying to
3454 reinsert a breakpoint or we're trying to finish a fast tracepoint
3455 collect. */
3456 if (lwp->pending_signals != NULL
3457 && lwp->bp_reinsert == 0
3458 && fast_tp_collecting == 0)
3459 {
3460 struct pending_signals **p_sig;
3461
3462 p_sig = &lwp->pending_signals;
3463 while ((*p_sig)->prev != NULL)
3464 p_sig = &(*p_sig)->prev;
3465
3466 signal = (*p_sig)->signal;
3467 if ((*p_sig)->info.si_signo != 0)
3468 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3469 &(*p_sig)->info);
3470
3471 free (*p_sig);
3472 *p_sig = NULL;
3473 }
3474
3475 if (the_low_target.prepare_to_resume != NULL)
3476 the_low_target.prepare_to_resume (lwp);
3477
3478 regcache_invalidate_thread (thread);
3479 errno = 0;
3480 lwp->stopped = 0;
3481 lwp->stopped_by_watchpoint = 0;
3482 lwp->stepping = step;
3483 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3484 (PTRACE_TYPE_ARG3) 0,
3485 /* Coerce to a uintptr_t first to avoid potential gcc warning
3486 of coercing an 8 byte integer to a 4 byte pointer. */
3487 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3488
3489 current_thread = saved_thread;
3490 if (errno)
3491 {
3492 /* ESRCH from ptrace either means that the thread was already
3493 running (an error) or that it is gone (a race condition). If
3494 it's gone, we will get a notification the next time we wait,
3495 so we can ignore the error. We could differentiate these
3496 two, but it's tricky without waiting; the thread still exists
3497 as a zombie, so sending it signal 0 would succeed. So just
3498 ignore ESRCH. */
3499 if (errno == ESRCH)
3500 return;
3501
3502 perror_with_name ("ptrace");
3503 }
3504 }
3505
3506 struct thread_resume_array
3507 {
3508 struct thread_resume *resume;
3509 size_t n;
3510 };
3511
3512 /* This function is called once per thread via find_inferior.
3513 ARG is a pointer to a thread_resume_array struct.
3514 We look up the thread specified by ENTRY in ARG, and mark the thread
3515 with a pointer to the appropriate resume request.
3516
3517 This algorithm is O(threads * resume elements), but resume elements
3518 is small (and will remain small at least until GDB supports thread
3519 suspension). */
3520
3521 static int
3522 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3523 {
3524 struct thread_info *thread = (struct thread_info *) entry;
3525 struct lwp_info *lwp = get_thread_lwp (thread);
3526 int ndx;
3527 struct thread_resume_array *r;
3528
3529 r = arg;
3530
3531 for (ndx = 0; ndx < r->n; ndx++)
3532 {
3533 ptid_t ptid = r->resume[ndx].thread;
3534 if (ptid_equal (ptid, minus_one_ptid)
3535 || ptid_equal (ptid, entry->id)
3536 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3537 of PID'. */
3538 || (ptid_get_pid (ptid) == pid_of (thread)
3539 && (ptid_is_pid (ptid)
3540 || ptid_get_lwp (ptid) == -1)))
3541 {
3542 if (r->resume[ndx].kind == resume_stop
3543 && thread->last_resume_kind == resume_stop)
3544 {
3545 if (debug_threads)
3546 debug_printf ("already %s LWP %ld at GDB's request\n",
3547 (thread->last_status.kind
3548 == TARGET_WAITKIND_STOPPED)
3549 ? "stopped"
3550 : "stopping",
3551 lwpid_of (thread));
3552
3553 continue;
3554 }
3555
3556 lwp->resume = &r->resume[ndx];
3557 thread->last_resume_kind = lwp->resume->kind;
3558
3559 lwp->step_range_start = lwp->resume->step_range_start;
3560 lwp->step_range_end = lwp->resume->step_range_end;
3561
3562 /* If we had a deferred signal to report, dequeue one now.
3563 This can happen if LWP gets more than one signal while
3564 trying to get out of a jump pad. */
3565 if (lwp->stopped
3566 && !lwp->status_pending_p
3567 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3568 {
3569 lwp->status_pending_p = 1;
3570
3571 if (debug_threads)
3572 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3573 "leaving status pending.\n",
3574 WSTOPSIG (lwp->status_pending),
3575 lwpid_of (thread));
3576 }
3577
3578 return 0;
3579 }
3580 }
3581
3582 /* No resume action for this thread. */
3583 lwp->resume = NULL;
3584
3585 return 0;
3586 }
3587
3588 /* find_inferior callback for linux_resume.
3589 Set *FLAG_P if this lwp has an interesting status pending. */
3590
3591 static int
3592 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3593 {
3594 struct thread_info *thread = (struct thread_info *) entry;
3595 struct lwp_info *lwp = get_thread_lwp (thread);
3596
3597 /* LWPs which will not be resumed are not interesting, because
3598 we might not wait for them next time through linux_wait. */
3599 if (lwp->resume == NULL)
3600 return 0;
3601
3602 if (lwp->status_pending_p)
3603 * (int *) flag_p = 1;
3604
3605 return 0;
3606 }
3607
3608 /* Return 1 if this lwp that GDB wants running is stopped at an
3609 internal breakpoint that we need to step over. It assumes that any
3610 required STOP_PC adjustment has already been propagated to the
3611 inferior's regcache. */
3612
3613 static int
3614 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3615 {
3616 struct thread_info *thread = (struct thread_info *) entry;
3617 struct lwp_info *lwp = get_thread_lwp (thread);
3618 struct thread_info *saved_thread;
3619 CORE_ADDR pc;
3620
3621 /* LWPs which will not be resumed are not interesting, because we
3622 might not wait for them next time through linux_wait. */
3623
3624 if (!lwp->stopped)
3625 {
3626 if (debug_threads)
3627 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3628 lwpid_of (thread));
3629 return 0;
3630 }
3631
3632 if (thread->last_resume_kind == resume_stop)
3633 {
3634 if (debug_threads)
3635 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3636 " stopped\n",
3637 lwpid_of (thread));
3638 return 0;
3639 }
3640
3641 gdb_assert (lwp->suspended >= 0);
3642
3643 if (lwp->suspended)
3644 {
3645 if (debug_threads)
3646 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3647 lwpid_of (thread));
3648 return 0;
3649 }
3650
3651 if (!lwp->need_step_over)
3652 {
3653 if (debug_threads)
3654 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
3655 }
3656
3657 if (lwp->status_pending_p)
3658 {
3659 if (debug_threads)
3660 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3661 " status.\n",
3662 lwpid_of (thread));
3663 return 0;
3664 }
3665
3666 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3667 or we have. */
3668 pc = get_pc (lwp);
3669
3670 /* If the PC has changed since we stopped, then don't do anything,
3671 and let the breakpoint/tracepoint be hit. This happens if, for
3672 instance, GDB handled the decr_pc_after_break subtraction itself,
3673 GDB is OOL stepping this thread, or the user has issued a "jump"
3674 command, or poked thread's registers herself. */
3675 if (pc != lwp->stop_pc)
3676 {
3677 if (debug_threads)
3678 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3679 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3680 lwpid_of (thread),
3681 paddress (lwp->stop_pc), paddress (pc));
3682
3683 lwp->need_step_over = 0;
3684 return 0;
3685 }
3686
3687 saved_thread = current_thread;
3688 current_thread = thread;
3689
3690 /* We can only step over breakpoints we know about. */
3691 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3692 {
3693 /* Don't step over a breakpoint that GDB expects to hit
3694 though. If the condition is being evaluated on the target's side
3695 and it evaluate to false, step over this breakpoint as well. */
3696 if (gdb_breakpoint_here (pc)
3697 && gdb_condition_true_at_breakpoint (pc)
3698 && gdb_no_commands_at_breakpoint (pc))
3699 {
3700 if (debug_threads)
3701 debug_printf ("Need step over [LWP %ld]? yes, but found"
3702 " GDB breakpoint at 0x%s; skipping step over\n",
3703 lwpid_of (thread), paddress (pc));
3704
3705 current_thread = saved_thread;
3706 return 0;
3707 }
3708 else
3709 {
3710 if (debug_threads)
3711 debug_printf ("Need step over [LWP %ld]? yes, "
3712 "found breakpoint at 0x%s\n",
3713 lwpid_of (thread), paddress (pc));
3714
3715 /* We've found an lwp that needs stepping over --- return 1 so
3716 that find_inferior stops looking. */
3717 current_thread = saved_thread;
3718
3719 /* If the step over is cancelled, this is set again. */
3720 lwp->need_step_over = 0;
3721 return 1;
3722 }
3723 }
3724
3725 current_thread = saved_thread;
3726
3727 if (debug_threads)
3728 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3729 " at 0x%s\n",
3730 lwpid_of (thread), paddress (pc));
3731
3732 return 0;
3733 }
3734
3735 /* Start a step-over operation on LWP. When LWP stopped at a
3736 breakpoint, to make progress, we need to remove the breakpoint out
3737 of the way. If we let other threads run while we do that, they may
3738 pass by the breakpoint location and miss hitting it. To avoid
3739 that, a step-over momentarily stops all threads while LWP is
3740 single-stepped while the breakpoint is temporarily uninserted from
3741 the inferior. When the single-step finishes, we reinsert the
3742 breakpoint, and let all threads that are supposed to be running,
3743 run again.
3744
3745 On targets that don't support hardware single-step, we don't
3746 currently support full software single-stepping. Instead, we only
3747 support stepping over the thread event breakpoint, by asking the
3748 low target where to place a reinsert breakpoint. Since this
3749 routine assumes the breakpoint being stepped over is a thread event
3750 breakpoint, it usually assumes the return address of the current
3751 function is a good enough place to set the reinsert breakpoint. */
3752
3753 static int
3754 start_step_over (struct lwp_info *lwp)
3755 {
3756 struct thread_info *thread = get_lwp_thread (lwp);
3757 struct thread_info *saved_thread;
3758 CORE_ADDR pc;
3759 int step;
3760
3761 if (debug_threads)
3762 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3763 lwpid_of (thread));
3764
3765 stop_all_lwps (1, lwp);
3766 gdb_assert (lwp->suspended == 0);
3767
3768 if (debug_threads)
3769 debug_printf ("Done stopping all threads for step-over.\n");
3770
3771 /* Note, we should always reach here with an already adjusted PC,
3772 either by GDB (if we're resuming due to GDB's request), or by our
3773 caller, if we just finished handling an internal breakpoint GDB
3774 shouldn't care about. */
3775 pc = get_pc (lwp);
3776
3777 saved_thread = current_thread;
3778 current_thread = thread;
3779
3780 lwp->bp_reinsert = pc;
3781 uninsert_breakpoints_at (pc);
3782 uninsert_fast_tracepoint_jumps_at (pc);
3783
3784 if (can_hardware_single_step ())
3785 {
3786 step = 1;
3787 }
3788 else
3789 {
3790 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3791 set_reinsert_breakpoint (raddr);
3792 step = 0;
3793 }
3794
3795 current_thread = saved_thread;
3796
3797 linux_resume_one_lwp (lwp, step, 0, NULL);
3798
3799 /* Require next event from this LWP. */
3800 step_over_bkpt = thread->entry.id;
3801 return 1;
3802 }
3803
3804 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3805 start_step_over, if still there, and delete any reinsert
3806 breakpoints we've set, on non hardware single-step targets. */
3807
3808 static int
3809 finish_step_over (struct lwp_info *lwp)
3810 {
3811 if (lwp->bp_reinsert != 0)
3812 {
3813 if (debug_threads)
3814 debug_printf ("Finished step over.\n");
3815
3816 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3817 may be no breakpoint to reinsert there by now. */
3818 reinsert_breakpoints_at (lwp->bp_reinsert);
3819 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3820
3821 lwp->bp_reinsert = 0;
3822
3823 /* Delete any software-single-step reinsert breakpoints. No
3824 longer needed. We don't have to worry about other threads
3825 hitting this trap, and later not being able to explain it,
3826 because we were stepping over a breakpoint, and we hold all
3827 threads but LWP stopped while doing that. */
3828 if (!can_hardware_single_step ())
3829 delete_reinsert_breakpoints ();
3830
3831 step_over_bkpt = null_ptid;
3832 return 1;
3833 }
3834 else
3835 return 0;
3836 }
3837
3838 /* This function is called once per thread. We check the thread's resume
3839 request, which will tell us whether to resume, step, or leave the thread
3840 stopped; and what signal, if any, it should be sent.
3841
3842 For threads which we aren't explicitly told otherwise, we preserve
3843 the stepping flag; this is used for stepping over gdbserver-placed
3844 breakpoints.
3845
3846 If pending_flags was set in any thread, we queue any needed
3847 signals, since we won't actually resume. We already have a pending
3848 event to report, so we don't need to preserve any step requests;
3849 they should be re-issued if necessary. */
3850
3851 static int
3852 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3853 {
3854 struct thread_info *thread = (struct thread_info *) entry;
3855 struct lwp_info *lwp = get_thread_lwp (thread);
3856 int step;
3857 int leave_all_stopped = * (int *) arg;
3858 int leave_pending;
3859
3860 if (lwp->resume == NULL)
3861 return 0;
3862
3863 if (lwp->resume->kind == resume_stop)
3864 {
3865 if (debug_threads)
3866 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
3867
3868 if (!lwp->stopped)
3869 {
3870 if (debug_threads)
3871 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
3872
3873 /* Stop the thread, and wait for the event asynchronously,
3874 through the event loop. */
3875 send_sigstop (lwp);
3876 }
3877 else
3878 {
3879 if (debug_threads)
3880 debug_printf ("already stopped LWP %ld\n",
3881 lwpid_of (thread));
3882
3883 /* The LWP may have been stopped in an internal event that
3884 was not meant to be notified back to GDB (e.g., gdbserver
3885 breakpoint), so we should be reporting a stop event in
3886 this case too. */
3887
3888 /* If the thread already has a pending SIGSTOP, this is a
3889 no-op. Otherwise, something later will presumably resume
3890 the thread and this will cause it to cancel any pending
3891 operation, due to last_resume_kind == resume_stop. If
3892 the thread already has a pending status to report, we
3893 will still report it the next time we wait - see
3894 status_pending_p_callback. */
3895
3896 /* If we already have a pending signal to report, then
3897 there's no need to queue a SIGSTOP, as this means we're
3898 midway through moving the LWP out of the jumppad, and we
3899 will report the pending signal as soon as that is
3900 finished. */
3901 if (lwp->pending_signals_to_report == NULL)
3902 send_sigstop (lwp);
3903 }
3904
3905 /* For stop requests, we're done. */
3906 lwp->resume = NULL;
3907 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3908 return 0;
3909 }
3910
3911 /* If this thread which is about to be resumed has a pending status,
3912 then don't resume any threads - we can just report the pending
3913 status. Make sure to queue any signals that would otherwise be
3914 sent. In all-stop mode, we do this decision based on if *any*
3915 thread has a pending status. If there's a thread that needs the
3916 step-over-breakpoint dance, then don't resume any other thread
3917 but that particular one. */
3918 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3919
3920 if (!leave_pending)
3921 {
3922 if (debug_threads)
3923 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
3924
3925 step = (lwp->resume->kind == resume_step);
3926 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3927 }
3928 else
3929 {
3930 if (debug_threads)
3931 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
3932
3933 /* If we have a new signal, enqueue the signal. */
3934 if (lwp->resume->sig != 0)
3935 {
3936 struct pending_signals *p_sig;
3937 p_sig = xmalloc (sizeof (*p_sig));
3938 p_sig->prev = lwp->pending_signals;
3939 p_sig->signal = lwp->resume->sig;
3940 memset (&p_sig->info, 0, sizeof (siginfo_t));
3941
3942 /* If this is the same signal we were previously stopped by,
3943 make sure to queue its siginfo. We can ignore the return
3944 value of ptrace; if it fails, we'll skip
3945 PTRACE_SETSIGINFO. */
3946 if (WIFSTOPPED (lwp->last_status)
3947 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3948 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3949 &p_sig->info);
3950
3951 lwp->pending_signals = p_sig;
3952 }
3953 }
3954
3955 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3956 lwp->resume = NULL;
3957 return 0;
3958 }
3959
3960 static void
3961 linux_resume (struct thread_resume *resume_info, size_t n)
3962 {
3963 struct thread_resume_array array = { resume_info, n };
3964 struct thread_info *need_step_over = NULL;
3965 int any_pending;
3966 int leave_all_stopped;
3967
3968 if (debug_threads)
3969 {
3970 debug_enter ();
3971 debug_printf ("linux_resume:\n");
3972 }
3973
3974 find_inferior (&all_threads, linux_set_resume_request, &array);
3975
3976 /* If there is a thread which would otherwise be resumed, which has
3977 a pending status, then don't resume any threads - we can just
3978 report the pending status. Make sure to queue any signals that
3979 would otherwise be sent. In non-stop mode, we'll apply this
3980 logic to each thread individually. We consume all pending events
3981 before considering to start a step-over (in all-stop). */
3982 any_pending = 0;
3983 if (!non_stop)
3984 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
3985
3986 /* If there is a thread which would otherwise be resumed, which is
3987 stopped at a breakpoint that needs stepping over, then don't
3988 resume any threads - have it step over the breakpoint with all
3989 other threads stopped, then resume all threads again. Make sure
3990 to queue any signals that would otherwise be delivered or
3991 queued. */
3992 if (!any_pending && supports_breakpoints ())
3993 need_step_over
3994 = (struct thread_info *) find_inferior (&all_threads,
3995 need_step_over_p, NULL);
3996
3997 leave_all_stopped = (need_step_over != NULL || any_pending);
3998
3999 if (debug_threads)
4000 {
4001 if (need_step_over != NULL)
4002 debug_printf ("Not resuming all, need step over\n");
4003 else if (any_pending)
4004 debug_printf ("Not resuming, all-stop and found "
4005 "an LWP with pending status\n");
4006 else
4007 debug_printf ("Resuming, no pending status or step over needed\n");
4008 }
4009
4010 /* Even if we're leaving threads stopped, queue all signals we'd
4011 otherwise deliver. */
4012 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4013
4014 if (need_step_over)
4015 start_step_over (get_thread_lwp (need_step_over));
4016
4017 if (debug_threads)
4018 {
4019 debug_printf ("linux_resume done\n");
4020 debug_exit ();
4021 }
4022 }
4023
4024 /* This function is called once per thread. We check the thread's
4025 last resume request, which will tell us whether to resume, step, or
4026 leave the thread stopped. Any signal the client requested to be
4027 delivered has already been enqueued at this point.
4028
4029 If any thread that GDB wants running is stopped at an internal
4030 breakpoint that needs stepping over, we start a step-over operation
4031 on that particular thread, and leave all others stopped. */
4032
4033 static int
4034 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4035 {
4036 struct thread_info *thread = (struct thread_info *) entry;
4037 struct lwp_info *lwp = get_thread_lwp (thread);
4038 int step;
4039
4040 if (lwp == except)
4041 return 0;
4042
4043 if (debug_threads)
4044 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4045
4046 if (!lwp->stopped)
4047 {
4048 if (debug_threads)
4049 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4050 return 0;
4051 }
4052
4053 if (thread->last_resume_kind == resume_stop
4054 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4055 {
4056 if (debug_threads)
4057 debug_printf (" client wants LWP to remain %ld stopped\n",
4058 lwpid_of (thread));
4059 return 0;
4060 }
4061
4062 if (lwp->status_pending_p)
4063 {
4064 if (debug_threads)
4065 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4066 lwpid_of (thread));
4067 return 0;
4068 }
4069
4070 gdb_assert (lwp->suspended >= 0);
4071
4072 if (lwp->suspended)
4073 {
4074 if (debug_threads)
4075 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4076 return 0;
4077 }
4078
4079 if (thread->last_resume_kind == resume_stop
4080 && lwp->pending_signals_to_report == NULL
4081 && lwp->collecting_fast_tracepoint == 0)
4082 {
4083 /* We haven't reported this LWP as stopped yet (otherwise, the
4084 last_status.kind check above would catch it, and we wouldn't
4085 reach here. This LWP may have been momentarily paused by a
4086 stop_all_lwps call while handling for example, another LWP's
4087 step-over. In that case, the pending expected SIGSTOP signal
4088 that was queued at vCont;t handling time will have already
4089 been consumed by wait_for_sigstop, and so we need to requeue
4090 another one here. Note that if the LWP already has a SIGSTOP
4091 pending, this is a no-op. */
4092
4093 if (debug_threads)
4094 debug_printf ("Client wants LWP %ld to stop. "
4095 "Making sure it has a SIGSTOP pending\n",
4096 lwpid_of (thread));
4097
4098 send_sigstop (lwp);
4099 }
4100
4101 step = thread->last_resume_kind == resume_step;
4102 linux_resume_one_lwp (lwp, step, 0, NULL);
4103 return 0;
4104 }
4105
4106 static int
4107 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4108 {
4109 struct thread_info *thread = (struct thread_info *) entry;
4110 struct lwp_info *lwp = get_thread_lwp (thread);
4111
4112 if (lwp == except)
4113 return 0;
4114
4115 lwp->suspended--;
4116 gdb_assert (lwp->suspended >= 0);
4117
4118 return proceed_one_lwp (entry, except);
4119 }
4120
4121 /* When we finish a step-over, set threads running again. If there's
4122 another thread that may need a step-over, now's the time to start
4123 it. Eventually, we'll move all threads past their breakpoints. */
4124
4125 static void
4126 proceed_all_lwps (void)
4127 {
4128 struct thread_info *need_step_over;
4129
4130 /* If there is a thread which would otherwise be resumed, which is
4131 stopped at a breakpoint that needs stepping over, then don't
4132 resume any threads - have it step over the breakpoint with all
4133 other threads stopped, then resume all threads again. */
4134
4135 if (supports_breakpoints ())
4136 {
4137 need_step_over
4138 = (struct thread_info *) find_inferior (&all_threads,
4139 need_step_over_p, NULL);
4140
4141 if (need_step_over != NULL)
4142 {
4143 if (debug_threads)
4144 debug_printf ("proceed_all_lwps: found "
4145 "thread %ld needing a step-over\n",
4146 lwpid_of (need_step_over));
4147
4148 start_step_over (get_thread_lwp (need_step_over));
4149 return;
4150 }
4151 }
4152
4153 if (debug_threads)
4154 debug_printf ("Proceeding, no step-over needed\n");
4155
4156 find_inferior (&all_threads, proceed_one_lwp, NULL);
4157 }
4158
4159 /* Stopped LWPs that the client wanted to be running, that don't have
4160 pending statuses, are set to run again, except for EXCEPT, if not
4161 NULL. This undoes a stop_all_lwps call. */
4162
4163 static void
4164 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4165 {
4166 if (debug_threads)
4167 {
4168 debug_enter ();
4169 if (except)
4170 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4171 lwpid_of (get_lwp_thread (except)));
4172 else
4173 debug_printf ("unstopping all lwps\n");
4174 }
4175
4176 if (unsuspend)
4177 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4178 else
4179 find_inferior (&all_threads, proceed_one_lwp, except);
4180
4181 if (debug_threads)
4182 {
4183 debug_printf ("unstop_all_lwps done\n");
4184 debug_exit ();
4185 }
4186 }
4187
4188
4189 #ifdef HAVE_LINUX_REGSETS
4190
4191 #define use_linux_regsets 1
4192
4193 /* Returns true if REGSET has been disabled. */
4194
4195 static int
4196 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4197 {
4198 return (info->disabled_regsets != NULL
4199 && info->disabled_regsets[regset - info->regsets]);
4200 }
4201
4202 /* Disable REGSET. */
4203
4204 static void
4205 disable_regset (struct regsets_info *info, struct regset_info *regset)
4206 {
4207 int dr_offset;
4208
4209 dr_offset = regset - info->regsets;
4210 if (info->disabled_regsets == NULL)
4211 info->disabled_regsets = xcalloc (1, info->num_regsets);
4212 info->disabled_regsets[dr_offset] = 1;
4213 }
4214
4215 static int
4216 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4217 struct regcache *regcache)
4218 {
4219 struct regset_info *regset;
4220 int saw_general_regs = 0;
4221 int pid;
4222 struct iovec iov;
4223
4224 pid = lwpid_of (current_thread);
4225 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4226 {
4227 void *buf, *data;
4228 int nt_type, res;
4229
4230 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4231 continue;
4232
4233 buf = xmalloc (regset->size);
4234
4235 nt_type = regset->nt_type;
4236 if (nt_type)
4237 {
4238 iov.iov_base = buf;
4239 iov.iov_len = regset->size;
4240 data = (void *) &iov;
4241 }
4242 else
4243 data = buf;
4244
4245 #ifndef __sparc__
4246 res = ptrace (regset->get_request, pid,
4247 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4248 #else
4249 res = ptrace (regset->get_request, pid, data, nt_type);
4250 #endif
4251 if (res < 0)
4252 {
4253 if (errno == EIO)
4254 {
4255 /* If we get EIO on a regset, do not try it again for
4256 this process mode. */
4257 disable_regset (regsets_info, regset);
4258 free (buf);
4259 continue;
4260 }
4261 else
4262 {
4263 char s[256];
4264 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4265 pid);
4266 perror (s);
4267 }
4268 }
4269 else if (regset->type == GENERAL_REGS)
4270 saw_general_regs = 1;
4271 regset->store_function (regcache, buf);
4272 free (buf);
4273 }
4274 if (saw_general_regs)
4275 return 0;
4276 else
4277 return 1;
4278 }
4279
4280 static int
4281 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4282 struct regcache *regcache)
4283 {
4284 struct regset_info *regset;
4285 int saw_general_regs = 0;
4286 int pid;
4287 struct iovec iov;
4288
4289 pid = lwpid_of (current_thread);
4290 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4291 {
4292 void *buf, *data;
4293 int nt_type, res;
4294
4295 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4296 continue;
4297
4298 buf = xmalloc (regset->size);
4299
4300 /* First fill the buffer with the current register set contents,
4301 in case there are any items in the kernel's regset that are
4302 not in gdbserver's regcache. */
4303
4304 nt_type = regset->nt_type;
4305 if (nt_type)
4306 {
4307 iov.iov_base = buf;
4308 iov.iov_len = regset->size;
4309 data = (void *) &iov;
4310 }
4311 else
4312 data = buf;
4313
4314 #ifndef __sparc__
4315 res = ptrace (regset->get_request, pid,
4316 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4317 #else
4318 res = ptrace (regset->get_request, pid, data, nt_type);
4319 #endif
4320
4321 if (res == 0)
4322 {
4323 /* Then overlay our cached registers on that. */
4324 regset->fill_function (regcache, buf);
4325
4326 /* Only now do we write the register set. */
4327 #ifndef __sparc__
4328 res = ptrace (regset->set_request, pid,
4329 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4330 #else
4331 res = ptrace (regset->set_request, pid, data, nt_type);
4332 #endif
4333 }
4334
4335 if (res < 0)
4336 {
4337 if (errno == EIO)
4338 {
4339 /* If we get EIO on a regset, do not try it again for
4340 this process mode. */
4341 disable_regset (regsets_info, regset);
4342 }
4343 else if (errno == ESRCH)
4344 {
4345 /* At this point, ESRCH should mean the process is
4346 already gone, in which case we simply ignore attempts
4347 to change its registers. See also the related
4348 comment in linux_resume_one_lwp. */
4349 free (buf);
4350 return 0;
4351 }
4352 else
4353 {
4354 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4355 }
4356 }
4357 else if (regset->type == GENERAL_REGS)
4358 saw_general_regs = 1;
4359 free (buf);
4360 }
4361 if (saw_general_regs)
4362 return 0;
4363 else
4364 return 1;
4365 }
4366
4367 #else /* !HAVE_LINUX_REGSETS */
4368
4369 #define use_linux_regsets 0
4370 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4371 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4372
4373 #endif
4374
4375 /* Return 1 if register REGNO is supported by one of the regset ptrace
4376 calls or 0 if it has to be transferred individually. */
4377
4378 static int
4379 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4380 {
4381 unsigned char mask = 1 << (regno % 8);
4382 size_t index = regno / 8;
4383
4384 return (use_linux_regsets
4385 && (regs_info->regset_bitmap == NULL
4386 || (regs_info->regset_bitmap[index] & mask) != 0));
4387 }
4388
4389 #ifdef HAVE_LINUX_USRREGS
4390
4391 int
4392 register_addr (const struct usrregs_info *usrregs, int regnum)
4393 {
4394 int addr;
4395
4396 if (regnum < 0 || regnum >= usrregs->num_regs)
4397 error ("Invalid register number %d.", regnum);
4398
4399 addr = usrregs->regmap[regnum];
4400
4401 return addr;
4402 }
4403
4404 /* Fetch one register. */
4405 static void
4406 fetch_register (const struct usrregs_info *usrregs,
4407 struct regcache *regcache, int regno)
4408 {
4409 CORE_ADDR regaddr;
4410 int i, size;
4411 char *buf;
4412 int pid;
4413
4414 if (regno >= usrregs->num_regs)
4415 return;
4416 if ((*the_low_target.cannot_fetch_register) (regno))
4417 return;
4418
4419 regaddr = register_addr (usrregs, regno);
4420 if (regaddr == -1)
4421 return;
4422
4423 size = ((register_size (regcache->tdesc, regno)
4424 + sizeof (PTRACE_XFER_TYPE) - 1)
4425 & -sizeof (PTRACE_XFER_TYPE));
4426 buf = alloca (size);
4427
4428 pid = lwpid_of (current_thread);
4429 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4430 {
4431 errno = 0;
4432 *(PTRACE_XFER_TYPE *) (buf + i) =
4433 ptrace (PTRACE_PEEKUSER, pid,
4434 /* Coerce to a uintptr_t first to avoid potential gcc warning
4435 of coercing an 8 byte integer to a 4 byte pointer. */
4436 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4437 regaddr += sizeof (PTRACE_XFER_TYPE);
4438 if (errno != 0)
4439 error ("reading register %d: %s", regno, strerror (errno));
4440 }
4441
4442 if (the_low_target.supply_ptrace_register)
4443 the_low_target.supply_ptrace_register (regcache, regno, buf);
4444 else
4445 supply_register (regcache, regno, buf);
4446 }
4447
4448 /* Store one register. */
4449 static void
4450 store_register (const struct usrregs_info *usrregs,
4451 struct regcache *regcache, int regno)
4452 {
4453 CORE_ADDR regaddr;
4454 int i, size;
4455 char *buf;
4456 int pid;
4457
4458 if (regno >= usrregs->num_regs)
4459 return;
4460 if ((*the_low_target.cannot_store_register) (regno))
4461 return;
4462
4463 regaddr = register_addr (usrregs, regno);
4464 if (regaddr == -1)
4465 return;
4466
4467 size = ((register_size (regcache->tdesc, regno)
4468 + sizeof (PTRACE_XFER_TYPE) - 1)
4469 & -sizeof (PTRACE_XFER_TYPE));
4470 buf = alloca (size);
4471 memset (buf, 0, size);
4472
4473 if (the_low_target.collect_ptrace_register)
4474 the_low_target.collect_ptrace_register (regcache, regno, buf);
4475 else
4476 collect_register (regcache, regno, buf);
4477
4478 pid = lwpid_of (current_thread);
4479 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4480 {
4481 errno = 0;
4482 ptrace (PTRACE_POKEUSER, pid,
4483 /* Coerce to a uintptr_t first to avoid potential gcc warning
4484 about coercing an 8 byte integer to a 4 byte pointer. */
4485 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4486 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4487 if (errno != 0)
4488 {
4489 /* At this point, ESRCH should mean the process is
4490 already gone, in which case we simply ignore attempts
4491 to change its registers. See also the related
4492 comment in linux_resume_one_lwp. */
4493 if (errno == ESRCH)
4494 return;
4495
4496 if ((*the_low_target.cannot_store_register) (regno) == 0)
4497 error ("writing register %d: %s", regno, strerror (errno));
4498 }
4499 regaddr += sizeof (PTRACE_XFER_TYPE);
4500 }
4501 }
4502
4503 /* Fetch all registers, or just one, from the child process.
4504 If REGNO is -1, do this for all registers, skipping any that are
4505 assumed to have been retrieved by regsets_fetch_inferior_registers,
4506 unless ALL is non-zero.
4507 Otherwise, REGNO specifies which register (so we can save time). */
4508 static void
4509 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4510 struct regcache *regcache, int regno, int all)
4511 {
4512 struct usrregs_info *usr = regs_info->usrregs;
4513
4514 if (regno == -1)
4515 {
4516 for (regno = 0; regno < usr->num_regs; regno++)
4517 if (all || !linux_register_in_regsets (regs_info, regno))
4518 fetch_register (usr, regcache, regno);
4519 }
4520 else
4521 fetch_register (usr, regcache, regno);
4522 }
4523
4524 /* Store our register values back into the inferior.
4525 If REGNO is -1, do this for all registers, skipping any that are
4526 assumed to have been saved by regsets_store_inferior_registers,
4527 unless ALL is non-zero.
4528 Otherwise, REGNO specifies which register (so we can save time). */
4529 static void
4530 usr_store_inferior_registers (const struct regs_info *regs_info,
4531 struct regcache *regcache, int regno, int all)
4532 {
4533 struct usrregs_info *usr = regs_info->usrregs;
4534
4535 if (regno == -1)
4536 {
4537 for (regno = 0; regno < usr->num_regs; regno++)
4538 if (all || !linux_register_in_regsets (regs_info, regno))
4539 store_register (usr, regcache, regno);
4540 }
4541 else
4542 store_register (usr, regcache, regno);
4543 }
4544
4545 #else /* !HAVE_LINUX_USRREGS */
4546
4547 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4548 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4549
4550 #endif
4551
4552
4553 void
4554 linux_fetch_registers (struct regcache *regcache, int regno)
4555 {
4556 int use_regsets;
4557 int all = 0;
4558 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4559
4560 if (regno == -1)
4561 {
4562 if (the_low_target.fetch_register != NULL
4563 && regs_info->usrregs != NULL)
4564 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4565 (*the_low_target.fetch_register) (regcache, regno);
4566
4567 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4568 if (regs_info->usrregs != NULL)
4569 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4570 }
4571 else
4572 {
4573 if (the_low_target.fetch_register != NULL
4574 && (*the_low_target.fetch_register) (regcache, regno))
4575 return;
4576
4577 use_regsets = linux_register_in_regsets (regs_info, regno);
4578 if (use_regsets)
4579 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4580 regcache);
4581 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4582 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4583 }
4584 }
4585
4586 void
4587 linux_store_registers (struct regcache *regcache, int regno)
4588 {
4589 int use_regsets;
4590 int all = 0;
4591 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4592
4593 if (regno == -1)
4594 {
4595 all = regsets_store_inferior_registers (regs_info->regsets_info,
4596 regcache);
4597 if (regs_info->usrregs != NULL)
4598 usr_store_inferior_registers (regs_info, regcache, regno, all);
4599 }
4600 else
4601 {
4602 use_regsets = linux_register_in_regsets (regs_info, regno);
4603 if (use_regsets)
4604 all = regsets_store_inferior_registers (regs_info->regsets_info,
4605 regcache);
4606 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4607 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4608 }
4609 }
4610
4611
4612 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4613 to debugger memory starting at MYADDR. */
4614
4615 static int
4616 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4617 {
4618 int pid = lwpid_of (current_thread);
4619 register PTRACE_XFER_TYPE *buffer;
4620 register CORE_ADDR addr;
4621 register int count;
4622 char filename[64];
4623 register int i;
4624 int ret;
4625 int fd;
4626
4627 /* Try using /proc. Don't bother for one word. */
4628 if (len >= 3 * sizeof (long))
4629 {
4630 int bytes;
4631
4632 /* We could keep this file open and cache it - possibly one per
4633 thread. That requires some juggling, but is even faster. */
4634 sprintf (filename, "/proc/%d/mem", pid);
4635 fd = open (filename, O_RDONLY | O_LARGEFILE);
4636 if (fd == -1)
4637 goto no_proc;
4638
4639 /* If pread64 is available, use it. It's faster if the kernel
4640 supports it (only one syscall), and it's 64-bit safe even on
4641 32-bit platforms (for instance, SPARC debugging a SPARC64
4642 application). */
4643 #ifdef HAVE_PREAD64
4644 bytes = pread64 (fd, myaddr, len, memaddr);
4645 #else
4646 bytes = -1;
4647 if (lseek (fd, memaddr, SEEK_SET) != -1)
4648 bytes = read (fd, myaddr, len);
4649 #endif
4650
4651 close (fd);
4652 if (bytes == len)
4653 return 0;
4654
4655 /* Some data was read, we'll try to get the rest with ptrace. */
4656 if (bytes > 0)
4657 {
4658 memaddr += bytes;
4659 myaddr += bytes;
4660 len -= bytes;
4661 }
4662 }
4663
4664 no_proc:
4665 /* Round starting address down to longword boundary. */
4666 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4667 /* Round ending address up; get number of longwords that makes. */
4668 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4669 / sizeof (PTRACE_XFER_TYPE));
4670 /* Allocate buffer of that many longwords. */
4671 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4672
4673 /* Read all the longwords */
4674 errno = 0;
4675 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4676 {
4677 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4678 about coercing an 8 byte integer to a 4 byte pointer. */
4679 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4680 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4681 (PTRACE_TYPE_ARG4) 0);
4682 if (errno)
4683 break;
4684 }
4685 ret = errno;
4686
4687 /* Copy appropriate bytes out of the buffer. */
4688 if (i > 0)
4689 {
4690 i *= sizeof (PTRACE_XFER_TYPE);
4691 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4692 memcpy (myaddr,
4693 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4694 i < len ? i : len);
4695 }
4696
4697 return ret;
4698 }
4699
4700 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4701 memory at MEMADDR. On failure (cannot write to the inferior)
4702 returns the value of errno. Always succeeds if LEN is zero. */
4703
4704 static int
4705 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4706 {
4707 register int i;
4708 /* Round starting address down to longword boundary. */
4709 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4710 /* Round ending address up; get number of longwords that makes. */
4711 register int count
4712 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4713 / sizeof (PTRACE_XFER_TYPE);
4714
4715 /* Allocate buffer of that many longwords. */
4716 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4717 alloca (count * sizeof (PTRACE_XFER_TYPE));
4718
4719 int pid = lwpid_of (current_thread);
4720
4721 if (len == 0)
4722 {
4723 /* Zero length write always succeeds. */
4724 return 0;
4725 }
4726
4727 if (debug_threads)
4728 {
4729 /* Dump up to four bytes. */
4730 unsigned int val = * (unsigned int *) myaddr;
4731 if (len == 1)
4732 val = val & 0xff;
4733 else if (len == 2)
4734 val = val & 0xffff;
4735 else if (len == 3)
4736 val = val & 0xffffff;
4737 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4738 val, (long)memaddr);
4739 }
4740
4741 /* Fill start and end extra bytes of buffer with existing memory data. */
4742
4743 errno = 0;
4744 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4745 about coercing an 8 byte integer to a 4 byte pointer. */
4746 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4747 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4748 (PTRACE_TYPE_ARG4) 0);
4749 if (errno)
4750 return errno;
4751
4752 if (count > 1)
4753 {
4754 errno = 0;
4755 buffer[count - 1]
4756 = ptrace (PTRACE_PEEKTEXT, pid,
4757 /* Coerce to a uintptr_t first to avoid potential gcc warning
4758 about coercing an 8 byte integer to a 4 byte pointer. */
4759 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4760 * sizeof (PTRACE_XFER_TYPE)),
4761 (PTRACE_TYPE_ARG4) 0);
4762 if (errno)
4763 return errno;
4764 }
4765
4766 /* Copy data to be written over corresponding part of buffer. */
4767
4768 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4769 myaddr, len);
4770
4771 /* Write the entire buffer. */
4772
4773 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4774 {
4775 errno = 0;
4776 ptrace (PTRACE_POKETEXT, pid,
4777 /* Coerce to a uintptr_t first to avoid potential gcc warning
4778 about coercing an 8 byte integer to a 4 byte pointer. */
4779 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4780 (PTRACE_TYPE_ARG4) buffer[i]);
4781 if (errno)
4782 return errno;
4783 }
4784
4785 return 0;
4786 }
4787
4788 static void
4789 linux_look_up_symbols (void)
4790 {
4791 #ifdef USE_THREAD_DB
4792 struct process_info *proc = current_process ();
4793
4794 if (proc->private->thread_db != NULL)
4795 return;
4796
4797 /* If the kernel supports tracing clones, then we don't need to
4798 use the magic thread event breakpoint to learn about
4799 threads. */
4800 thread_db_init (!linux_supports_traceclone ());
4801 #endif
4802 }
4803
4804 static void
4805 linux_request_interrupt (void)
4806 {
4807 extern unsigned long signal_pid;
4808
4809 /* Send a SIGINT to the process group. This acts just like the user
4810 typed a ^C on the controlling terminal. */
4811 kill (-signal_pid, SIGINT);
4812 }
4813
4814 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4815 to debugger memory starting at MYADDR. */
4816
4817 static int
4818 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4819 {
4820 char filename[PATH_MAX];
4821 int fd, n;
4822 int pid = lwpid_of (current_thread);
4823
4824 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4825
4826 fd = open (filename, O_RDONLY);
4827 if (fd < 0)
4828 return -1;
4829
4830 if (offset != (CORE_ADDR) 0
4831 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4832 n = -1;
4833 else
4834 n = read (fd, myaddr, len);
4835
4836 close (fd);
4837
4838 return n;
4839 }
4840
4841 /* These breakpoint and watchpoint related wrapper functions simply
4842 pass on the function call if the target has registered a
4843 corresponding function. */
4844
4845 static int
4846 linux_supports_z_point_type (char z_type)
4847 {
4848 return (the_low_target.supports_z_point_type != NULL
4849 && the_low_target.supports_z_point_type (z_type));
4850 }
4851
4852 static int
4853 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
4854 int size, struct raw_breakpoint *bp)
4855 {
4856 if (the_low_target.insert_point != NULL)
4857 return the_low_target.insert_point (type, addr, size, bp);
4858 else
4859 /* Unsupported (see target.h). */
4860 return 1;
4861 }
4862
4863 static int
4864 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
4865 int size, struct raw_breakpoint *bp)
4866 {
4867 if (the_low_target.remove_point != NULL)
4868 return the_low_target.remove_point (type, addr, size, bp);
4869 else
4870 /* Unsupported (see target.h). */
4871 return 1;
4872 }
4873
4874 static int
4875 linux_stopped_by_watchpoint (void)
4876 {
4877 struct lwp_info *lwp = get_thread_lwp (current_thread);
4878
4879 return lwp->stopped_by_watchpoint;
4880 }
4881
4882 static CORE_ADDR
4883 linux_stopped_data_address (void)
4884 {
4885 struct lwp_info *lwp = get_thread_lwp (current_thread);
4886
4887 return lwp->stopped_data_address;
4888 }
4889
4890 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4891 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4892 && defined(PT_TEXT_END_ADDR)
4893
4894 /* This is only used for targets that define PT_TEXT_ADDR,
4895 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4896 the target has different ways of acquiring this information, like
4897 loadmaps. */
4898
4899 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4900 to tell gdb about. */
4901
4902 static int
4903 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4904 {
4905 unsigned long text, text_end, data;
4906 int pid = lwpid_of (get_thread_lwp (current_thread));
4907
4908 errno = 0;
4909
4910 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4911 (PTRACE_TYPE_ARG4) 0);
4912 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4913 (PTRACE_TYPE_ARG4) 0);
4914 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4915 (PTRACE_TYPE_ARG4) 0);
4916
4917 if (errno == 0)
4918 {
4919 /* Both text and data offsets produced at compile-time (and so
4920 used by gdb) are relative to the beginning of the program,
4921 with the data segment immediately following the text segment.
4922 However, the actual runtime layout in memory may put the data
4923 somewhere else, so when we send gdb a data base-address, we
4924 use the real data base address and subtract the compile-time
4925 data base-address from it (which is just the length of the
4926 text segment). BSS immediately follows data in both
4927 cases. */
4928 *text_p = text;
4929 *data_p = data - (text_end - text);
4930
4931 return 1;
4932 }
4933 return 0;
4934 }
4935 #endif
4936
4937 static int
4938 linux_qxfer_osdata (const char *annex,
4939 unsigned char *readbuf, unsigned const char *writebuf,
4940 CORE_ADDR offset, int len)
4941 {
4942 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4943 }
4944
4945 /* Convert a native/host siginfo object, into/from the siginfo in the
4946 layout of the inferiors' architecture. */
4947
4948 static void
4949 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4950 {
4951 int done = 0;
4952
4953 if (the_low_target.siginfo_fixup != NULL)
4954 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4955
4956 /* If there was no callback, or the callback didn't do anything,
4957 then just do a straight memcpy. */
4958 if (!done)
4959 {
4960 if (direction == 1)
4961 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4962 else
4963 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4964 }
4965 }
4966
4967 static int
4968 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4969 unsigned const char *writebuf, CORE_ADDR offset, int len)
4970 {
4971 int pid;
4972 siginfo_t siginfo;
4973 char inf_siginfo[sizeof (siginfo_t)];
4974
4975 if (current_thread == NULL)
4976 return -1;
4977
4978 pid = lwpid_of (current_thread);
4979
4980 if (debug_threads)
4981 debug_printf ("%s siginfo for lwp %d.\n",
4982 readbuf != NULL ? "Reading" : "Writing",
4983 pid);
4984
4985 if (offset >= sizeof (siginfo))
4986 return -1;
4987
4988 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4989 return -1;
4990
4991 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4992 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4993 inferior with a 64-bit GDBSERVER should look the same as debugging it
4994 with a 32-bit GDBSERVER, we need to convert it. */
4995 siginfo_fixup (&siginfo, inf_siginfo, 0);
4996
4997 if (offset + len > sizeof (siginfo))
4998 len = sizeof (siginfo) - offset;
4999
5000 if (readbuf != NULL)
5001 memcpy (readbuf, inf_siginfo + offset, len);
5002 else
5003 {
5004 memcpy (inf_siginfo + offset, writebuf, len);
5005
5006 /* Convert back to ptrace layout before flushing it out. */
5007 siginfo_fixup (&siginfo, inf_siginfo, 1);
5008
5009 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5010 return -1;
5011 }
5012
5013 return len;
5014 }
5015
5016 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5017 so we notice when children change state; as the handler for the
5018 sigsuspend in my_waitpid. */
5019
5020 static void
5021 sigchld_handler (int signo)
5022 {
5023 int old_errno = errno;
5024
5025 if (debug_threads)
5026 {
5027 do
5028 {
5029 /* fprintf is not async-signal-safe, so call write
5030 directly. */
5031 if (write (2, "sigchld_handler\n",
5032 sizeof ("sigchld_handler\n") - 1) < 0)
5033 break; /* just ignore */
5034 } while (0);
5035 }
5036
5037 if (target_is_async_p ())
5038 async_file_mark (); /* trigger a linux_wait */
5039
5040 errno = old_errno;
5041 }
5042
5043 static int
5044 linux_supports_non_stop (void)
5045 {
5046 return 1;
5047 }
5048
5049 static int
5050 linux_async (int enable)
5051 {
5052 int previous = target_is_async_p ();
5053
5054 if (debug_threads)
5055 debug_printf ("linux_async (%d), previous=%d\n",
5056 enable, previous);
5057
5058 if (previous != enable)
5059 {
5060 sigset_t mask;
5061 sigemptyset (&mask);
5062 sigaddset (&mask, SIGCHLD);
5063
5064 sigprocmask (SIG_BLOCK, &mask, NULL);
5065
5066 if (enable)
5067 {
5068 if (pipe (linux_event_pipe) == -1)
5069 {
5070 linux_event_pipe[0] = -1;
5071 linux_event_pipe[1] = -1;
5072 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5073
5074 warning ("creating event pipe failed.");
5075 return previous;
5076 }
5077
5078 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5079 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5080
5081 /* Register the event loop handler. */
5082 add_file_handler (linux_event_pipe[0],
5083 handle_target_event, NULL);
5084
5085 /* Always trigger a linux_wait. */
5086 async_file_mark ();
5087 }
5088 else
5089 {
5090 delete_file_handler (linux_event_pipe[0]);
5091
5092 close (linux_event_pipe[0]);
5093 close (linux_event_pipe[1]);
5094 linux_event_pipe[0] = -1;
5095 linux_event_pipe[1] = -1;
5096 }
5097
5098 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5099 }
5100
5101 return previous;
5102 }
5103
5104 static int
5105 linux_start_non_stop (int nonstop)
5106 {
5107 /* Register or unregister from event-loop accordingly. */
5108 linux_async (nonstop);
5109
5110 if (target_is_async_p () != (nonstop != 0))
5111 return -1;
5112
5113 return 0;
5114 }
5115
5116 static int
5117 linux_supports_multi_process (void)
5118 {
5119 return 1;
5120 }
5121
5122 static int
5123 linux_supports_disable_randomization (void)
5124 {
5125 #ifdef HAVE_PERSONALITY
5126 return 1;
5127 #else
5128 return 0;
5129 #endif
5130 }
5131
5132 static int
5133 linux_supports_agent (void)
5134 {
5135 return 1;
5136 }
5137
5138 static int
5139 linux_supports_range_stepping (void)
5140 {
5141 if (*the_low_target.supports_range_stepping == NULL)
5142 return 0;
5143
5144 return (*the_low_target.supports_range_stepping) ();
5145 }
5146
5147 /* Enumerate spufs IDs for process PID. */
5148 static int
5149 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5150 {
5151 int pos = 0;
5152 int written = 0;
5153 char path[128];
5154 DIR *dir;
5155 struct dirent *entry;
5156
5157 sprintf (path, "/proc/%ld/fd", pid);
5158 dir = opendir (path);
5159 if (!dir)
5160 return -1;
5161
5162 rewinddir (dir);
5163 while ((entry = readdir (dir)) != NULL)
5164 {
5165 struct stat st;
5166 struct statfs stfs;
5167 int fd;
5168
5169 fd = atoi (entry->d_name);
5170 if (!fd)
5171 continue;
5172
5173 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5174 if (stat (path, &st) != 0)
5175 continue;
5176 if (!S_ISDIR (st.st_mode))
5177 continue;
5178
5179 if (statfs (path, &stfs) != 0)
5180 continue;
5181 if (stfs.f_type != SPUFS_MAGIC)
5182 continue;
5183
5184 if (pos >= offset && pos + 4 <= offset + len)
5185 {
5186 *(unsigned int *)(buf + pos - offset) = fd;
5187 written += 4;
5188 }
5189 pos += 4;
5190 }
5191
5192 closedir (dir);
5193 return written;
5194 }
5195
5196 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5197 object type, using the /proc file system. */
5198 static int
5199 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5200 unsigned const char *writebuf,
5201 CORE_ADDR offset, int len)
5202 {
5203 long pid = lwpid_of (current_thread);
5204 char buf[128];
5205 int fd = 0;
5206 int ret = 0;
5207
5208 if (!writebuf && !readbuf)
5209 return -1;
5210
5211 if (!*annex)
5212 {
5213 if (!readbuf)
5214 return -1;
5215 else
5216 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5217 }
5218
5219 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5220 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5221 if (fd <= 0)
5222 return -1;
5223
5224 if (offset != 0
5225 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5226 {
5227 close (fd);
5228 return 0;
5229 }
5230
5231 if (writebuf)
5232 ret = write (fd, writebuf, (size_t) len);
5233 else
5234 ret = read (fd, readbuf, (size_t) len);
5235
5236 close (fd);
5237 return ret;
5238 }
5239
5240 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5241 struct target_loadseg
5242 {
5243 /* Core address to which the segment is mapped. */
5244 Elf32_Addr addr;
5245 /* VMA recorded in the program header. */
5246 Elf32_Addr p_vaddr;
5247 /* Size of this segment in memory. */
5248 Elf32_Word p_memsz;
5249 };
5250
5251 # if defined PT_GETDSBT
5252 struct target_loadmap
5253 {
5254 /* Protocol version number, must be zero. */
5255 Elf32_Word version;
5256 /* Pointer to the DSBT table, its size, and the DSBT index. */
5257 unsigned *dsbt_table;
5258 unsigned dsbt_size, dsbt_index;
5259 /* Number of segments in this map. */
5260 Elf32_Word nsegs;
5261 /* The actual memory map. */
5262 struct target_loadseg segs[/*nsegs*/];
5263 };
5264 # define LINUX_LOADMAP PT_GETDSBT
5265 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5266 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5267 # else
5268 struct target_loadmap
5269 {
5270 /* Protocol version number, must be zero. */
5271 Elf32_Half version;
5272 /* Number of segments in this map. */
5273 Elf32_Half nsegs;
5274 /* The actual memory map. */
5275 struct target_loadseg segs[/*nsegs*/];
5276 };
5277 # define LINUX_LOADMAP PTRACE_GETFDPIC
5278 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5279 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5280 # endif
5281
5282 static int
5283 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5284 unsigned char *myaddr, unsigned int len)
5285 {
5286 int pid = lwpid_of (current_thread);
5287 int addr = -1;
5288 struct target_loadmap *data = NULL;
5289 unsigned int actual_length, copy_length;
5290
5291 if (strcmp (annex, "exec") == 0)
5292 addr = (int) LINUX_LOADMAP_EXEC;
5293 else if (strcmp (annex, "interp") == 0)
5294 addr = (int) LINUX_LOADMAP_INTERP;
5295 else
5296 return -1;
5297
5298 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5299 return -1;
5300
5301 if (data == NULL)
5302 return -1;
5303
5304 actual_length = sizeof (struct target_loadmap)
5305 + sizeof (struct target_loadseg) * data->nsegs;
5306
5307 if (offset < 0 || offset > actual_length)
5308 return -1;
5309
5310 copy_length = actual_length - offset < len ? actual_length - offset : len;
5311 memcpy (myaddr, (char *) data + offset, copy_length);
5312 return copy_length;
5313 }
5314 #else
5315 # define linux_read_loadmap NULL
5316 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5317
5318 static void
5319 linux_process_qsupported (const char *query)
5320 {
5321 if (the_low_target.process_qsupported != NULL)
5322 the_low_target.process_qsupported (query);
5323 }
5324
5325 static int
5326 linux_supports_tracepoints (void)
5327 {
5328 if (*the_low_target.supports_tracepoints == NULL)
5329 return 0;
5330
5331 return (*the_low_target.supports_tracepoints) ();
5332 }
5333
5334 static CORE_ADDR
5335 linux_read_pc (struct regcache *regcache)
5336 {
5337 if (the_low_target.get_pc == NULL)
5338 return 0;
5339
5340 return (*the_low_target.get_pc) (regcache);
5341 }
5342
5343 static void
5344 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5345 {
5346 gdb_assert (the_low_target.set_pc != NULL);
5347
5348 (*the_low_target.set_pc) (regcache, pc);
5349 }
5350
5351 static int
5352 linux_thread_stopped (struct thread_info *thread)
5353 {
5354 return get_thread_lwp (thread)->stopped;
5355 }
5356
5357 /* This exposes stop-all-threads functionality to other modules. */
5358
5359 static void
5360 linux_pause_all (int freeze)
5361 {
5362 stop_all_lwps (freeze, NULL);
5363 }
5364
5365 /* This exposes unstop-all-threads functionality to other gdbserver
5366 modules. */
5367
5368 static void
5369 linux_unpause_all (int unfreeze)
5370 {
5371 unstop_all_lwps (unfreeze, NULL);
5372 }
5373
5374 static int
5375 linux_prepare_to_access_memory (void)
5376 {
5377 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5378 running LWP. */
5379 if (non_stop)
5380 linux_pause_all (1);
5381 return 0;
5382 }
5383
5384 static void
5385 linux_done_accessing_memory (void)
5386 {
5387 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5388 running LWP. */
5389 if (non_stop)
5390 linux_unpause_all (1);
5391 }
5392
5393 static int
5394 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5395 CORE_ADDR collector,
5396 CORE_ADDR lockaddr,
5397 ULONGEST orig_size,
5398 CORE_ADDR *jump_entry,
5399 CORE_ADDR *trampoline,
5400 ULONGEST *trampoline_size,
5401 unsigned char *jjump_pad_insn,
5402 ULONGEST *jjump_pad_insn_size,
5403 CORE_ADDR *adjusted_insn_addr,
5404 CORE_ADDR *adjusted_insn_addr_end,
5405 char *err)
5406 {
5407 return (*the_low_target.install_fast_tracepoint_jump_pad)
5408 (tpoint, tpaddr, collector, lockaddr, orig_size,
5409 jump_entry, trampoline, trampoline_size,
5410 jjump_pad_insn, jjump_pad_insn_size,
5411 adjusted_insn_addr, adjusted_insn_addr_end,
5412 err);
5413 }
5414
5415 static struct emit_ops *
5416 linux_emit_ops (void)
5417 {
5418 if (the_low_target.emit_ops != NULL)
5419 return (*the_low_target.emit_ops) ();
5420 else
5421 return NULL;
5422 }
5423
5424 static int
5425 linux_get_min_fast_tracepoint_insn_len (void)
5426 {
5427 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5428 }
5429
5430 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5431
5432 static int
5433 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5434 CORE_ADDR *phdr_memaddr, int *num_phdr)
5435 {
5436 char filename[PATH_MAX];
5437 int fd;
5438 const int auxv_size = is_elf64
5439 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5440 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5441
5442 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5443
5444 fd = open (filename, O_RDONLY);
5445 if (fd < 0)
5446 return 1;
5447
5448 *phdr_memaddr = 0;
5449 *num_phdr = 0;
5450 while (read (fd, buf, auxv_size) == auxv_size
5451 && (*phdr_memaddr == 0 || *num_phdr == 0))
5452 {
5453 if (is_elf64)
5454 {
5455 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5456
5457 switch (aux->a_type)
5458 {
5459 case AT_PHDR:
5460 *phdr_memaddr = aux->a_un.a_val;
5461 break;
5462 case AT_PHNUM:
5463 *num_phdr = aux->a_un.a_val;
5464 break;
5465 }
5466 }
5467 else
5468 {
5469 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5470
5471 switch (aux->a_type)
5472 {
5473 case AT_PHDR:
5474 *phdr_memaddr = aux->a_un.a_val;
5475 break;
5476 case AT_PHNUM:
5477 *num_phdr = aux->a_un.a_val;
5478 break;
5479 }
5480 }
5481 }
5482
5483 close (fd);
5484
5485 if (*phdr_memaddr == 0 || *num_phdr == 0)
5486 {
5487 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5488 "phdr_memaddr = %ld, phdr_num = %d",
5489 (long) *phdr_memaddr, *num_phdr);
5490 return 2;
5491 }
5492
5493 return 0;
5494 }
5495
5496 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5497
5498 static CORE_ADDR
5499 get_dynamic (const int pid, const int is_elf64)
5500 {
5501 CORE_ADDR phdr_memaddr, relocation;
5502 int num_phdr, i;
5503 unsigned char *phdr_buf;
5504 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5505
5506 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5507 return 0;
5508
5509 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5510 phdr_buf = alloca (num_phdr * phdr_size);
5511
5512 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5513 return 0;
5514
5515 /* Compute relocation: it is expected to be 0 for "regular" executables,
5516 non-zero for PIE ones. */
5517 relocation = -1;
5518 for (i = 0; relocation == -1 && i < num_phdr; i++)
5519 if (is_elf64)
5520 {
5521 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5522
5523 if (p->p_type == PT_PHDR)
5524 relocation = phdr_memaddr - p->p_vaddr;
5525 }
5526 else
5527 {
5528 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5529
5530 if (p->p_type == PT_PHDR)
5531 relocation = phdr_memaddr - p->p_vaddr;
5532 }
5533
5534 if (relocation == -1)
5535 {
5536 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5537 any real world executables, including PIE executables, have always
5538 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5539 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5540 or present DT_DEBUG anyway (fpc binaries are statically linked).
5541
5542 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5543
5544 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5545
5546 return 0;
5547 }
5548
5549 for (i = 0; i < num_phdr; i++)
5550 {
5551 if (is_elf64)
5552 {
5553 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5554
5555 if (p->p_type == PT_DYNAMIC)
5556 return p->p_vaddr + relocation;
5557 }
5558 else
5559 {
5560 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5561
5562 if (p->p_type == PT_DYNAMIC)
5563 return p->p_vaddr + relocation;
5564 }
5565 }
5566
5567 return 0;
5568 }
5569
5570 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5571 can be 0 if the inferior does not yet have the library list initialized.
5572 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5573 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5574
5575 static CORE_ADDR
5576 get_r_debug (const int pid, const int is_elf64)
5577 {
5578 CORE_ADDR dynamic_memaddr;
5579 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5580 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5581 CORE_ADDR map = -1;
5582
5583 dynamic_memaddr = get_dynamic (pid, is_elf64);
5584 if (dynamic_memaddr == 0)
5585 return map;
5586
5587 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5588 {
5589 if (is_elf64)
5590 {
5591 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5592 #ifdef DT_MIPS_RLD_MAP
5593 union
5594 {
5595 Elf64_Xword map;
5596 unsigned char buf[sizeof (Elf64_Xword)];
5597 }
5598 rld_map;
5599
5600 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5601 {
5602 if (linux_read_memory (dyn->d_un.d_val,
5603 rld_map.buf, sizeof (rld_map.buf)) == 0)
5604 return rld_map.map;
5605 else
5606 break;
5607 }
5608 #endif /* DT_MIPS_RLD_MAP */
5609
5610 if (dyn->d_tag == DT_DEBUG && map == -1)
5611 map = dyn->d_un.d_val;
5612
5613 if (dyn->d_tag == DT_NULL)
5614 break;
5615 }
5616 else
5617 {
5618 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5619 #ifdef DT_MIPS_RLD_MAP
5620 union
5621 {
5622 Elf32_Word map;
5623 unsigned char buf[sizeof (Elf32_Word)];
5624 }
5625 rld_map;
5626
5627 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5628 {
5629 if (linux_read_memory (dyn->d_un.d_val,
5630 rld_map.buf, sizeof (rld_map.buf)) == 0)
5631 return rld_map.map;
5632 else
5633 break;
5634 }
5635 #endif /* DT_MIPS_RLD_MAP */
5636
5637 if (dyn->d_tag == DT_DEBUG && map == -1)
5638 map = dyn->d_un.d_val;
5639
5640 if (dyn->d_tag == DT_NULL)
5641 break;
5642 }
5643
5644 dynamic_memaddr += dyn_size;
5645 }
5646
5647 return map;
5648 }
5649
5650 /* Read one pointer from MEMADDR in the inferior. */
5651
5652 static int
5653 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5654 {
5655 int ret;
5656
5657 /* Go through a union so this works on either big or little endian
5658 hosts, when the inferior's pointer size is smaller than the size
5659 of CORE_ADDR. It is assumed the inferior's endianness is the
5660 same of the superior's. */
5661 union
5662 {
5663 CORE_ADDR core_addr;
5664 unsigned int ui;
5665 unsigned char uc;
5666 } addr;
5667
5668 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5669 if (ret == 0)
5670 {
5671 if (ptr_size == sizeof (CORE_ADDR))
5672 *ptr = addr.core_addr;
5673 else if (ptr_size == sizeof (unsigned int))
5674 *ptr = addr.ui;
5675 else
5676 gdb_assert_not_reached ("unhandled pointer size");
5677 }
5678 return ret;
5679 }
5680
5681 struct link_map_offsets
5682 {
5683 /* Offset and size of r_debug.r_version. */
5684 int r_version_offset;
5685
5686 /* Offset and size of r_debug.r_map. */
5687 int r_map_offset;
5688
5689 /* Offset to l_addr field in struct link_map. */
5690 int l_addr_offset;
5691
5692 /* Offset to l_name field in struct link_map. */
5693 int l_name_offset;
5694
5695 /* Offset to l_ld field in struct link_map. */
5696 int l_ld_offset;
5697
5698 /* Offset to l_next field in struct link_map. */
5699 int l_next_offset;
5700
5701 /* Offset to l_prev field in struct link_map. */
5702 int l_prev_offset;
5703 };
5704
5705 /* Construct qXfer:libraries-svr4:read reply. */
5706
5707 static int
5708 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5709 unsigned const char *writebuf,
5710 CORE_ADDR offset, int len)
5711 {
5712 char *document;
5713 unsigned document_len;
5714 struct process_info_private *const priv = current_process ()->private;
5715 char filename[PATH_MAX];
5716 int pid, is_elf64;
5717
5718 static const struct link_map_offsets lmo_32bit_offsets =
5719 {
5720 0, /* r_version offset. */
5721 4, /* r_debug.r_map offset. */
5722 0, /* l_addr offset in link_map. */
5723 4, /* l_name offset in link_map. */
5724 8, /* l_ld offset in link_map. */
5725 12, /* l_next offset in link_map. */
5726 16 /* l_prev offset in link_map. */
5727 };
5728
5729 static const struct link_map_offsets lmo_64bit_offsets =
5730 {
5731 0, /* r_version offset. */
5732 8, /* r_debug.r_map offset. */
5733 0, /* l_addr offset in link_map. */
5734 8, /* l_name offset in link_map. */
5735 16, /* l_ld offset in link_map. */
5736 24, /* l_next offset in link_map. */
5737 32 /* l_prev offset in link_map. */
5738 };
5739 const struct link_map_offsets *lmo;
5740 unsigned int machine;
5741 int ptr_size;
5742 CORE_ADDR lm_addr = 0, lm_prev = 0;
5743 int allocated = 1024;
5744 char *p;
5745 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5746 int header_done = 0;
5747
5748 if (writebuf != NULL)
5749 return -2;
5750 if (readbuf == NULL)
5751 return -1;
5752
5753 pid = lwpid_of (current_thread);
5754 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5755 is_elf64 = elf_64_file_p (filename, &machine);
5756 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5757 ptr_size = is_elf64 ? 8 : 4;
5758
5759 while (annex[0] != '\0')
5760 {
5761 const char *sep;
5762 CORE_ADDR *addrp;
5763 int len;
5764
5765 sep = strchr (annex, '=');
5766 if (sep == NULL)
5767 break;
5768
5769 len = sep - annex;
5770 if (len == 5 && strncmp (annex, "start", 5) == 0)
5771 addrp = &lm_addr;
5772 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5773 addrp = &lm_prev;
5774 else
5775 {
5776 annex = strchr (sep, ';');
5777 if (annex == NULL)
5778 break;
5779 annex++;
5780 continue;
5781 }
5782
5783 annex = decode_address_to_semicolon (addrp, sep + 1);
5784 }
5785
5786 if (lm_addr == 0)
5787 {
5788 int r_version = 0;
5789
5790 if (priv->r_debug == 0)
5791 priv->r_debug = get_r_debug (pid, is_elf64);
5792
5793 /* We failed to find DT_DEBUG. Such situation will not change
5794 for this inferior - do not retry it. Report it to GDB as
5795 E01, see for the reasons at the GDB solib-svr4.c side. */
5796 if (priv->r_debug == (CORE_ADDR) -1)
5797 return -1;
5798
5799 if (priv->r_debug != 0)
5800 {
5801 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5802 (unsigned char *) &r_version,
5803 sizeof (r_version)) != 0
5804 || r_version != 1)
5805 {
5806 warning ("unexpected r_debug version %d", r_version);
5807 }
5808 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5809 &lm_addr, ptr_size) != 0)
5810 {
5811 warning ("unable to read r_map from 0x%lx",
5812 (long) priv->r_debug + lmo->r_map_offset);
5813 }
5814 }
5815 }
5816
5817 document = xmalloc (allocated);
5818 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5819 p = document + strlen (document);
5820
5821 while (lm_addr
5822 && read_one_ptr (lm_addr + lmo->l_name_offset,
5823 &l_name, ptr_size) == 0
5824 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5825 &l_addr, ptr_size) == 0
5826 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5827 &l_ld, ptr_size) == 0
5828 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5829 &l_prev, ptr_size) == 0
5830 && read_one_ptr (lm_addr + lmo->l_next_offset,
5831 &l_next, ptr_size) == 0)
5832 {
5833 unsigned char libname[PATH_MAX];
5834
5835 if (lm_prev != l_prev)
5836 {
5837 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5838 (long) lm_prev, (long) l_prev);
5839 break;
5840 }
5841
5842 /* Ignore the first entry even if it has valid name as the first entry
5843 corresponds to the main executable. The first entry should not be
5844 skipped if the dynamic loader was loaded late by a static executable
5845 (see solib-svr4.c parameter ignore_first). But in such case the main
5846 executable does not have PT_DYNAMIC present and this function already
5847 exited above due to failed get_r_debug. */
5848 if (lm_prev == 0)
5849 {
5850 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5851 p = p + strlen (p);
5852 }
5853 else
5854 {
5855 /* Not checking for error because reading may stop before
5856 we've got PATH_MAX worth of characters. */
5857 libname[0] = '\0';
5858 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5859 libname[sizeof (libname) - 1] = '\0';
5860 if (libname[0] != '\0')
5861 {
5862 /* 6x the size for xml_escape_text below. */
5863 size_t len = 6 * strlen ((char *) libname);
5864 char *name;
5865
5866 if (!header_done)
5867 {
5868 /* Terminate `<library-list-svr4'. */
5869 *p++ = '>';
5870 header_done = 1;
5871 }
5872
5873 while (allocated < p - document + len + 200)
5874 {
5875 /* Expand to guarantee sufficient storage. */
5876 uintptr_t document_len = p - document;
5877
5878 document = xrealloc (document, 2 * allocated);
5879 allocated *= 2;
5880 p = document + document_len;
5881 }
5882
5883 name = xml_escape_text ((char *) libname);
5884 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5885 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5886 name, (unsigned long) lm_addr,
5887 (unsigned long) l_addr, (unsigned long) l_ld);
5888 free (name);
5889 }
5890 }
5891
5892 lm_prev = lm_addr;
5893 lm_addr = l_next;
5894 }
5895
5896 if (!header_done)
5897 {
5898 /* Empty list; terminate `<library-list-svr4'. */
5899 strcpy (p, "/>");
5900 }
5901 else
5902 strcpy (p, "</library-list-svr4>");
5903
5904 document_len = strlen (document);
5905 if (offset < document_len)
5906 document_len -= offset;
5907 else
5908 document_len = 0;
5909 if (len > document_len)
5910 len = document_len;
5911
5912 memcpy (readbuf, document + offset, len);
5913 xfree (document);
5914
5915 return len;
5916 }
5917
5918 #ifdef HAVE_LINUX_BTRACE
5919
5920 /* See to_enable_btrace target method. */
5921
5922 static struct btrace_target_info *
5923 linux_low_enable_btrace (ptid_t ptid)
5924 {
5925 struct btrace_target_info *tinfo;
5926
5927 tinfo = linux_enable_btrace (ptid);
5928
5929 if (tinfo != NULL)
5930 {
5931 struct thread_info *thread = find_thread_ptid (ptid);
5932 struct regcache *regcache = get_thread_regcache (thread, 0);
5933
5934 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5935 }
5936
5937 return tinfo;
5938 }
5939
5940 /* See to_disable_btrace target method. */
5941
5942 static int
5943 linux_low_disable_btrace (struct btrace_target_info *tinfo)
5944 {
5945 enum btrace_error err;
5946
5947 err = linux_disable_btrace (tinfo);
5948 return (err == BTRACE_ERR_NONE ? 0 : -1);
5949 }
5950
5951 /* See to_read_btrace target method. */
5952
5953 static int
5954 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5955 int type)
5956 {
5957 VEC (btrace_block_s) *btrace;
5958 struct btrace_block *block;
5959 enum btrace_error err;
5960 int i;
5961
5962 btrace = NULL;
5963 err = linux_read_btrace (&btrace, tinfo, type);
5964 if (err != BTRACE_ERR_NONE)
5965 {
5966 if (err == BTRACE_ERR_OVERFLOW)
5967 buffer_grow_str0 (buffer, "E.Overflow.");
5968 else
5969 buffer_grow_str0 (buffer, "E.Generic Error.");
5970
5971 return -1;
5972 }
5973
5974 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5975 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
5976
5977 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
5978 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5979 paddress (block->begin), paddress (block->end));
5980
5981 buffer_grow_str0 (buffer, "</btrace>\n");
5982
5983 VEC_free (btrace_block_s, btrace);
5984
5985 return 0;
5986 }
5987 #endif /* HAVE_LINUX_BTRACE */
5988
5989 static struct target_ops linux_target_ops = {
5990 linux_create_inferior,
5991 linux_attach,
5992 linux_kill,
5993 linux_detach,
5994 linux_mourn,
5995 linux_join,
5996 linux_thread_alive,
5997 linux_resume,
5998 linux_wait,
5999 linux_fetch_registers,
6000 linux_store_registers,
6001 linux_prepare_to_access_memory,
6002 linux_done_accessing_memory,
6003 linux_read_memory,
6004 linux_write_memory,
6005 linux_look_up_symbols,
6006 linux_request_interrupt,
6007 linux_read_auxv,
6008 linux_supports_z_point_type,
6009 linux_insert_point,
6010 linux_remove_point,
6011 linux_stopped_by_watchpoint,
6012 linux_stopped_data_address,
6013 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6014 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6015 && defined(PT_TEXT_END_ADDR)
6016 linux_read_offsets,
6017 #else
6018 NULL,
6019 #endif
6020 #ifdef USE_THREAD_DB
6021 thread_db_get_tls_address,
6022 #else
6023 NULL,
6024 #endif
6025 linux_qxfer_spu,
6026 hostio_last_error_from_errno,
6027 linux_qxfer_osdata,
6028 linux_xfer_siginfo,
6029 linux_supports_non_stop,
6030 linux_async,
6031 linux_start_non_stop,
6032 linux_supports_multi_process,
6033 #ifdef USE_THREAD_DB
6034 thread_db_handle_monitor_command,
6035 #else
6036 NULL,
6037 #endif
6038 linux_common_core_of_thread,
6039 linux_read_loadmap,
6040 linux_process_qsupported,
6041 linux_supports_tracepoints,
6042 linux_read_pc,
6043 linux_write_pc,
6044 linux_thread_stopped,
6045 NULL,
6046 linux_pause_all,
6047 linux_unpause_all,
6048 linux_cancel_breakpoints,
6049 linux_stabilize_threads,
6050 linux_install_fast_tracepoint_jump_pad,
6051 linux_emit_ops,
6052 linux_supports_disable_randomization,
6053 linux_get_min_fast_tracepoint_insn_len,
6054 linux_qxfer_libraries_svr4,
6055 linux_supports_agent,
6056 #ifdef HAVE_LINUX_BTRACE
6057 linux_supports_btrace,
6058 linux_low_enable_btrace,
6059 linux_low_disable_btrace,
6060 linux_low_read_btrace,
6061 #else
6062 NULL,
6063 NULL,
6064 NULL,
6065 NULL,
6066 #endif
6067 linux_supports_range_stepping,
6068 };
6069
6070 static void
6071 linux_init_signals ()
6072 {
6073 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6074 to find what the cancel signal actually is. */
6075 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6076 signal (__SIGRTMIN+1, SIG_IGN);
6077 #endif
6078 }
6079
6080 #ifdef HAVE_LINUX_REGSETS
6081 void
6082 initialize_regsets_info (struct regsets_info *info)
6083 {
6084 for (info->num_regsets = 0;
6085 info->regsets[info->num_regsets].size >= 0;
6086 info->num_regsets++)
6087 ;
6088 }
6089 #endif
6090
6091 void
6092 initialize_low (void)
6093 {
6094 struct sigaction sigchld_action;
6095 memset (&sigchld_action, 0, sizeof (sigchld_action));
6096 set_target_ops (&linux_target_ops);
6097 set_breakpoint_data (the_low_target.breakpoint,
6098 the_low_target.breakpoint_len);
6099 linux_init_signals ();
6100 linux_ptrace_init_warnings ();
6101
6102 sigchld_action.sa_handler = sigchld_handler;
6103 sigemptyset (&sigchld_action.sa_mask);
6104 sigchld_action.sa_flags = SA_RESTART;
6105 sigaction (SIGCHLD, &sigchld_action, NULL);
6106
6107 initialize_low_arch ();
6108 }
This page took 0.208038 seconds and 5 git commands to generate.