* cris.h (R_CRIS_32_IE): New relocation.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40
41 #ifndef PTRACE_GETSIGINFO
42 # define PTRACE_GETSIGINFO 0x4202
43 # define PTRACE_SETSIGINFO 0x4203
44 #endif
45
46 #ifndef O_LARGEFILE
47 #define O_LARGEFILE 0
48 #endif
49
50 /* If the system headers did not provide the constants, hard-code the normal
51 values. */
52 #ifndef PTRACE_EVENT_FORK
53
54 #define PTRACE_SETOPTIONS 0x4200
55 #define PTRACE_GETEVENTMSG 0x4201
56
57 /* options set using PTRACE_SETOPTIONS */
58 #define PTRACE_O_TRACESYSGOOD 0x00000001
59 #define PTRACE_O_TRACEFORK 0x00000002
60 #define PTRACE_O_TRACEVFORK 0x00000004
61 #define PTRACE_O_TRACECLONE 0x00000008
62 #define PTRACE_O_TRACEEXEC 0x00000010
63 #define PTRACE_O_TRACEVFORKDONE 0x00000020
64 #define PTRACE_O_TRACEEXIT 0x00000040
65
66 /* Wait extended result codes for the above trace options. */
67 #define PTRACE_EVENT_FORK 1
68 #define PTRACE_EVENT_VFORK 2
69 #define PTRACE_EVENT_CLONE 3
70 #define PTRACE_EVENT_EXEC 4
71 #define PTRACE_EVENT_VFORK_DONE 5
72 #define PTRACE_EVENT_EXIT 6
73
74 #endif /* PTRACE_EVENT_FORK */
75
76 /* We can't always assume that this flag is available, but all systems
77 with the ptrace event handlers also have __WALL, so it's safe to use
78 in some contexts. */
79 #ifndef __WALL
80 #define __WALL 0x40000000 /* Wait for any child. */
81 #endif
82
83 #ifdef __UCLIBC__
84 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
85 #define HAS_NOMMU
86 #endif
87 #endif
88
89 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
90 representation of the thread ID.
91
92 ``all_processes'' is keyed by the process ID - which on Linux is (presently)
93 the same as the LWP ID. */
94
95 struct inferior_list all_processes;
96
97 /* A list of all unknown processes which receive stop signals. Some other
98 process will presumably claim each of these as forked children
99 momentarily. */
100
101 struct inferior_list stopped_pids;
102
103 /* FIXME this is a bit of a hack, and could be removed. */
104 int stopping_threads;
105
106 /* FIXME make into a target method? */
107 int using_threads = 1;
108 static int thread_db_active;
109
110 static int must_set_ptrace_flags;
111
112 /* This flag is true iff we've just created or attached to a new inferior
113 but it has not stopped yet. As soon as it does, we need to call the
114 low target's arch_setup callback. */
115 static int new_inferior;
116
117 static void linux_resume_one_process (struct inferior_list_entry *entry,
118 int step, int signal, siginfo_t *info);
119 static void linux_resume (struct thread_resume *resume_info);
120 static void stop_all_processes (void);
121 static int linux_wait_for_event (struct thread_info *child);
122 static int check_removed_breakpoint (struct process_info *event_child);
123 static void *add_process (unsigned long pid);
124 static int my_waitpid (int pid, int *status, int flags);
125
126 struct pending_signals
127 {
128 int signal;
129 siginfo_t info;
130 struct pending_signals *prev;
131 };
132
133 #define PTRACE_ARG3_TYPE long
134 #define PTRACE_XFER_TYPE long
135
136 #ifdef HAVE_LINUX_REGSETS
137 static char *disabled_regsets;
138 static int num_regsets;
139 #endif
140
141 #define pid_of(proc) ((proc)->head.id)
142
143 /* FIXME: Delete eventually. */
144 #define inferior_pid (pid_of (get_thread_process (current_inferior)))
145
146 static void
147 handle_extended_wait (struct process_info *event_child, int wstat)
148 {
149 int event = wstat >> 16;
150 struct process_info *new_process;
151
152 if (event == PTRACE_EVENT_CLONE)
153 {
154 unsigned long new_pid;
155 int ret, status = W_STOPCODE (SIGSTOP);
156
157 ptrace (PTRACE_GETEVENTMSG, inferior_pid, 0, &new_pid);
158
159 /* If we haven't already seen the new PID stop, wait for it now. */
160 if (! pull_pid_from_list (&stopped_pids, new_pid))
161 {
162 /* The new child has a pending SIGSTOP. We can't affect it until it
163 hits the SIGSTOP, but we're already attached. */
164
165 ret = my_waitpid (new_pid, &status, __WALL);
166
167 if (ret == -1)
168 perror_with_name ("waiting for new child");
169 else if (ret != new_pid)
170 warning ("wait returned unexpected PID %d", ret);
171 else if (!WIFSTOPPED (status))
172 warning ("wait returned unexpected status 0x%x", status);
173 }
174
175 ptrace (PTRACE_SETOPTIONS, new_pid, 0, PTRACE_O_TRACECLONE);
176
177 new_process = (struct process_info *) add_process (new_pid);
178 add_thread (new_pid, new_process, new_pid);
179 new_thread_notify (thread_id_to_gdb_id (new_process->lwpid));
180
181 /* Normally we will get the pending SIGSTOP. But in some cases
182 we might get another signal delivered to the group first.
183 If we do, be sure not to lose it. */
184 if (WSTOPSIG (status) == SIGSTOP)
185 {
186 if (stopping_threads)
187 new_process->stopped = 1;
188 else
189 ptrace (PTRACE_CONT, new_pid, 0, 0);
190 }
191 else
192 {
193 new_process->stop_expected = 1;
194 if (stopping_threads)
195 {
196 new_process->stopped = 1;
197 new_process->status_pending_p = 1;
198 new_process->status_pending = status;
199 }
200 else
201 /* Pass the signal on. This is what GDB does - except
202 shouldn't we really report it instead? */
203 ptrace (PTRACE_CONT, new_pid, 0, WSTOPSIG (status));
204 }
205
206 /* Always resume the current thread. If we are stopping
207 threads, it will have a pending SIGSTOP; we may as well
208 collect it now. */
209 linux_resume_one_process (&event_child->head,
210 event_child->stepping, 0, NULL);
211 }
212 }
213
214 /* This function should only be called if the process got a SIGTRAP.
215 The SIGTRAP could mean several things.
216
217 On i386, where decr_pc_after_break is non-zero:
218 If we were single-stepping this process using PTRACE_SINGLESTEP,
219 we will get only the one SIGTRAP (even if the instruction we
220 stepped over was a breakpoint). The value of $eip will be the
221 next instruction.
222 If we continue the process using PTRACE_CONT, we will get a
223 SIGTRAP when we hit a breakpoint. The value of $eip will be
224 the instruction after the breakpoint (i.e. needs to be
225 decremented). If we report the SIGTRAP to GDB, we must also
226 report the undecremented PC. If we cancel the SIGTRAP, we
227 must resume at the decremented PC.
228
229 (Presumably, not yet tested) On a non-decr_pc_after_break machine
230 with hardware or kernel single-step:
231 If we single-step over a breakpoint instruction, our PC will
232 point at the following instruction. If we continue and hit a
233 breakpoint instruction, our PC will point at the breakpoint
234 instruction. */
235
236 static CORE_ADDR
237 get_stop_pc (void)
238 {
239 CORE_ADDR stop_pc = (*the_low_target.get_pc) ();
240
241 if (get_thread_process (current_inferior)->stepping)
242 return stop_pc;
243 else
244 return stop_pc - the_low_target.decr_pc_after_break;
245 }
246
247 static void *
248 add_process (unsigned long pid)
249 {
250 struct process_info *process;
251
252 process = (struct process_info *) xmalloc (sizeof (*process));
253 memset (process, 0, sizeof (*process));
254
255 process->head.id = pid;
256 process->lwpid = pid;
257
258 add_inferior_to_list (&all_processes, &process->head);
259
260 return process;
261 }
262
263 /* Start an inferior process and returns its pid.
264 ALLARGS is a vector of program-name and args. */
265
266 static int
267 linux_create_inferior (char *program, char **allargs)
268 {
269 void *new_process;
270 int pid;
271
272 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
273 pid = vfork ();
274 #else
275 pid = fork ();
276 #endif
277 if (pid < 0)
278 perror_with_name ("fork");
279
280 if (pid == 0)
281 {
282 ptrace (PTRACE_TRACEME, 0, 0, 0);
283
284 signal (__SIGRTMIN + 1, SIG_DFL);
285
286 setpgid (0, 0);
287
288 execv (program, allargs);
289 if (errno == ENOENT)
290 execvp (program, allargs);
291
292 fprintf (stderr, "Cannot exec %s: %s.\n", program,
293 strerror (errno));
294 fflush (stderr);
295 _exit (0177);
296 }
297
298 new_process = add_process (pid);
299 add_thread (pid, new_process, pid);
300 must_set_ptrace_flags = 1;
301 new_inferior = 1;
302
303 return pid;
304 }
305
306 /* Attach to an inferior process. */
307
308 void
309 linux_attach_lwp (unsigned long pid)
310 {
311 struct process_info *new_process;
312
313 if (ptrace (PTRACE_ATTACH, pid, 0, 0) != 0)
314 {
315 if (all_threads.head != NULL)
316 {
317 /* If we fail to attach to an LWP, just warn. */
318 fprintf (stderr, "Cannot attach to process %ld: %s (%d)\n", pid,
319 strerror (errno), errno);
320 fflush (stderr);
321 return;
322 }
323 else
324 /* If we fail to attach to a process, report an error. */
325 error ("Cannot attach to process %ld: %s (%d)\n", pid,
326 strerror (errno), errno);
327 }
328
329 ptrace (PTRACE_SETOPTIONS, pid, 0, PTRACE_O_TRACECLONE);
330
331 new_process = (struct process_info *) add_process (pid);
332 add_thread (pid, new_process, pid);
333 new_thread_notify (thread_id_to_gdb_id (new_process->lwpid));
334
335 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
336 brings it to a halt. We should ignore that SIGSTOP and resume the process
337 (unless this is the first process, in which case the flag will be cleared
338 in linux_attach).
339
340 On the other hand, if we are currently trying to stop all threads, we
341 should treat the new thread as if we had sent it a SIGSTOP. This works
342 because we are guaranteed that add_process added us to the end of the
343 list, and so the new thread has not yet reached wait_for_sigstop (but
344 will). */
345 if (! stopping_threads)
346 new_process->stop_expected = 1;
347 }
348
349 int
350 linux_attach (unsigned long pid)
351 {
352 struct process_info *process;
353
354 linux_attach_lwp (pid);
355
356 /* Don't ignore the initial SIGSTOP if we just attached to this process.
357 It will be collected by wait shortly. */
358 process = (struct process_info *) find_inferior_id (&all_processes, pid);
359 process->stop_expected = 0;
360
361 new_inferior = 1;
362
363 return 0;
364 }
365
366 /* Kill the inferior process. Make us have no inferior. */
367
368 static void
369 linux_kill_one_process (struct inferior_list_entry *entry)
370 {
371 struct thread_info *thread = (struct thread_info *) entry;
372 struct process_info *process = get_thread_process (thread);
373 int wstat;
374
375 /* We avoid killing the first thread here, because of a Linux kernel (at
376 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
377 the children get a chance to be reaped, it will remain a zombie
378 forever. */
379 if (entry == all_threads.head)
380 return;
381
382 do
383 {
384 ptrace (PTRACE_KILL, pid_of (process), 0, 0);
385
386 /* Make sure it died. The loop is most likely unnecessary. */
387 wstat = linux_wait_for_event (thread);
388 } while (WIFSTOPPED (wstat));
389 }
390
391 static void
392 linux_kill (void)
393 {
394 struct thread_info *thread = (struct thread_info *) all_threads.head;
395 struct process_info *process;
396 int wstat;
397
398 if (thread == NULL)
399 return;
400
401 for_each_inferior (&all_threads, linux_kill_one_process);
402
403 /* See the comment in linux_kill_one_process. We did not kill the first
404 thread in the list, so do so now. */
405 process = get_thread_process (thread);
406 do
407 {
408 ptrace (PTRACE_KILL, pid_of (process), 0, 0);
409
410 /* Make sure it died. The loop is most likely unnecessary. */
411 wstat = linux_wait_for_event (thread);
412 } while (WIFSTOPPED (wstat));
413
414 clear_inferiors ();
415 free (all_processes.head);
416 all_processes.head = all_processes.tail = NULL;
417 }
418
419 static void
420 linux_detach_one_process (struct inferior_list_entry *entry)
421 {
422 struct thread_info *thread = (struct thread_info *) entry;
423 struct process_info *process = get_thread_process (thread);
424
425 /* Make sure the process isn't stopped at a breakpoint that's
426 no longer there. */
427 check_removed_breakpoint (process);
428
429 /* If this process is stopped but is expecting a SIGSTOP, then make
430 sure we take care of that now. This isn't absolutely guaranteed
431 to collect the SIGSTOP, but is fairly likely to. */
432 if (process->stop_expected)
433 {
434 /* Clear stop_expected, so that the SIGSTOP will be reported. */
435 process->stop_expected = 0;
436 if (process->stopped)
437 linux_resume_one_process (&process->head, 0, 0, NULL);
438 linux_wait_for_event (thread);
439 }
440
441 /* Flush any pending changes to the process's registers. */
442 regcache_invalidate_one ((struct inferior_list_entry *)
443 get_process_thread (process));
444
445 /* Finally, let it resume. */
446 ptrace (PTRACE_DETACH, pid_of (process), 0, 0);
447 }
448
449 static int
450 linux_detach (void)
451 {
452 delete_all_breakpoints ();
453 for_each_inferior (&all_threads, linux_detach_one_process);
454 clear_inferiors ();
455 free (all_processes.head);
456 all_processes.head = all_processes.tail = NULL;
457 return 0;
458 }
459
460 static void
461 linux_join (void)
462 {
463 extern unsigned long signal_pid;
464 int status, ret;
465
466 do {
467 ret = waitpid (signal_pid, &status, 0);
468 if (WIFEXITED (status) || WIFSIGNALED (status))
469 break;
470 } while (ret != -1 || errno != ECHILD);
471 }
472
473 /* Return nonzero if the given thread is still alive. */
474 static int
475 linux_thread_alive (unsigned long lwpid)
476 {
477 if (find_inferior_id (&all_threads, lwpid) != NULL)
478 return 1;
479 else
480 return 0;
481 }
482
483 /* Return nonzero if this process stopped at a breakpoint which
484 no longer appears to be inserted. Also adjust the PC
485 appropriately to resume where the breakpoint used to be. */
486 static int
487 check_removed_breakpoint (struct process_info *event_child)
488 {
489 CORE_ADDR stop_pc;
490 struct thread_info *saved_inferior;
491
492 if (event_child->pending_is_breakpoint == 0)
493 return 0;
494
495 if (debug_threads)
496 fprintf (stderr, "Checking for breakpoint in process %ld.\n",
497 event_child->lwpid);
498
499 saved_inferior = current_inferior;
500 current_inferior = get_process_thread (event_child);
501
502 stop_pc = get_stop_pc ();
503
504 /* If the PC has changed since we stopped, then we shouldn't do
505 anything. This happens if, for instance, GDB handled the
506 decr_pc_after_break subtraction itself. */
507 if (stop_pc != event_child->pending_stop_pc)
508 {
509 if (debug_threads)
510 fprintf (stderr, "Ignoring, PC was changed. Old PC was 0x%08llx\n",
511 event_child->pending_stop_pc);
512
513 event_child->pending_is_breakpoint = 0;
514 current_inferior = saved_inferior;
515 return 0;
516 }
517
518 /* If the breakpoint is still there, we will report hitting it. */
519 if ((*the_low_target.breakpoint_at) (stop_pc))
520 {
521 if (debug_threads)
522 fprintf (stderr, "Ignoring, breakpoint is still present.\n");
523 current_inferior = saved_inferior;
524 return 0;
525 }
526
527 if (debug_threads)
528 fprintf (stderr, "Removed breakpoint.\n");
529
530 /* For decr_pc_after_break targets, here is where we perform the
531 decrement. We go immediately from this function to resuming,
532 and can not safely call get_stop_pc () again. */
533 if (the_low_target.set_pc != NULL)
534 (*the_low_target.set_pc) (stop_pc);
535
536 /* We consumed the pending SIGTRAP. */
537 event_child->pending_is_breakpoint = 0;
538 event_child->status_pending_p = 0;
539 event_child->status_pending = 0;
540
541 current_inferior = saved_inferior;
542 return 1;
543 }
544
545 /* Return 1 if this process has an interesting status pending. This function
546 may silently resume an inferior process. */
547 static int
548 status_pending_p (struct inferior_list_entry *entry, void *dummy)
549 {
550 struct process_info *process = (struct process_info *) entry;
551
552 if (process->status_pending_p)
553 if (check_removed_breakpoint (process))
554 {
555 /* This thread was stopped at a breakpoint, and the breakpoint
556 is now gone. We were told to continue (or step...) all threads,
557 so GDB isn't trying to single-step past this breakpoint.
558 So instead of reporting the old SIGTRAP, pretend we got to
559 the breakpoint just after it was removed instead of just
560 before; resume the process. */
561 linux_resume_one_process (&process->head, 0, 0, NULL);
562 return 0;
563 }
564
565 return process->status_pending_p;
566 }
567
568 static void
569 linux_wait_for_process (struct process_info **childp, int *wstatp)
570 {
571 int ret;
572 int to_wait_for = -1;
573
574 if (*childp != NULL)
575 to_wait_for = (*childp)->lwpid;
576
577 retry:
578 while (1)
579 {
580 ret = waitpid (to_wait_for, wstatp, WNOHANG);
581
582 if (ret == -1)
583 {
584 if (errno != ECHILD)
585 perror_with_name ("waitpid");
586 }
587 else if (ret > 0)
588 break;
589
590 ret = waitpid (to_wait_for, wstatp, WNOHANG | __WCLONE);
591
592 if (ret == -1)
593 {
594 if (errno != ECHILD)
595 perror_with_name ("waitpid (WCLONE)");
596 }
597 else if (ret > 0)
598 break;
599
600 usleep (1000);
601 }
602
603 if (debug_threads
604 && (!WIFSTOPPED (*wstatp)
605 || (WSTOPSIG (*wstatp) != 32
606 && WSTOPSIG (*wstatp) != 33)))
607 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
608
609 if (to_wait_for == -1)
610 *childp = (struct process_info *) find_inferior_id (&all_processes, ret);
611
612 /* If we didn't find a process, one of two things presumably happened:
613 - A process we started and then detached from has exited. Ignore it.
614 - A process we are controlling has forked and the new child's stop
615 was reported to us by the kernel. Save its PID. */
616 if (*childp == NULL && WIFSTOPPED (*wstatp))
617 {
618 add_pid_to_list (&stopped_pids, ret);
619 goto retry;
620 }
621 else if (*childp == NULL)
622 goto retry;
623
624 (*childp)->stopped = 1;
625 (*childp)->pending_is_breakpoint = 0;
626
627 (*childp)->last_status = *wstatp;
628
629 /* Architecture-specific setup after inferior is running.
630 This needs to happen after we have attached to the inferior
631 and it is stopped for the first time, but before we access
632 any inferior registers. */
633 if (new_inferior)
634 {
635 the_low_target.arch_setup ();
636 #ifdef HAVE_LINUX_REGSETS
637 memset (disabled_regsets, 0, num_regsets);
638 #endif
639 new_inferior = 0;
640 }
641
642 if (debug_threads
643 && WIFSTOPPED (*wstatp))
644 {
645 struct thread_info *saved_inferior = current_inferior;
646 current_inferior = (struct thread_info *)
647 find_inferior_id (&all_threads, (*childp)->lwpid);
648 /* For testing only; i386_stop_pc prints out a diagnostic. */
649 if (the_low_target.get_pc != NULL)
650 get_stop_pc ();
651 current_inferior = saved_inferior;
652 }
653 }
654
655 static int
656 linux_wait_for_event (struct thread_info *child)
657 {
658 CORE_ADDR stop_pc;
659 struct process_info *event_child;
660 int wstat;
661 int bp_status;
662
663 /* Check for a process with a pending status. */
664 /* It is possible that the user changed the pending task's registers since
665 it stopped. We correctly handle the change of PC if we hit a breakpoint
666 (in check_removed_breakpoint); signals should be reported anyway. */
667 if (child == NULL)
668 {
669 event_child = (struct process_info *)
670 find_inferior (&all_processes, status_pending_p, NULL);
671 if (debug_threads && event_child)
672 fprintf (stderr, "Got a pending child %ld\n", event_child->lwpid);
673 }
674 else
675 {
676 event_child = get_thread_process (child);
677 if (event_child->status_pending_p
678 && check_removed_breakpoint (event_child))
679 event_child = NULL;
680 }
681
682 if (event_child != NULL)
683 {
684 if (event_child->status_pending_p)
685 {
686 if (debug_threads)
687 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
688 event_child->lwpid, event_child->status_pending);
689 wstat = event_child->status_pending;
690 event_child->status_pending_p = 0;
691 event_child->status_pending = 0;
692 current_inferior = get_process_thread (event_child);
693 return wstat;
694 }
695 }
696
697 /* We only enter this loop if no process has a pending wait status. Thus
698 any action taken in response to a wait status inside this loop is
699 responding as soon as we detect the status, not after any pending
700 events. */
701 while (1)
702 {
703 if (child == NULL)
704 event_child = NULL;
705 else
706 event_child = get_thread_process (child);
707
708 linux_wait_for_process (&event_child, &wstat);
709
710 if (event_child == NULL)
711 error ("event from unknown child");
712
713 current_inferior = (struct thread_info *)
714 find_inferior_id (&all_threads, event_child->lwpid);
715
716 /* Check for thread exit. */
717 if (! WIFSTOPPED (wstat))
718 {
719 if (debug_threads)
720 fprintf (stderr, "LWP %ld exiting\n", event_child->head.id);
721
722 /* If the last thread is exiting, just return. */
723 if (all_threads.head == all_threads.tail)
724 return wstat;
725
726 dead_thread_notify (thread_id_to_gdb_id (event_child->lwpid));
727
728 remove_inferior (&all_processes, &event_child->head);
729 free (event_child);
730 remove_thread (current_inferior);
731 current_inferior = (struct thread_info *) all_threads.head;
732
733 /* If we were waiting for this particular child to do something...
734 well, it did something. */
735 if (child != NULL)
736 return wstat;
737
738 /* Wait for a more interesting event. */
739 continue;
740 }
741
742 if (WIFSTOPPED (wstat)
743 && WSTOPSIG (wstat) == SIGSTOP
744 && event_child->stop_expected)
745 {
746 if (debug_threads)
747 fprintf (stderr, "Expected stop.\n");
748 event_child->stop_expected = 0;
749 linux_resume_one_process (&event_child->head,
750 event_child->stepping, 0, NULL);
751 continue;
752 }
753
754 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
755 && wstat >> 16 != 0)
756 {
757 handle_extended_wait (event_child, wstat);
758 continue;
759 }
760
761 /* If GDB is not interested in this signal, don't stop other
762 threads, and don't report it to GDB. Just resume the
763 inferior right away. We do this for threading-related
764 signals as well as any that GDB specifically requested we
765 ignore. But never ignore SIGSTOP if we sent it ourselves,
766 and do not ignore signals when stepping - they may require
767 special handling to skip the signal handler. */
768 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
769 thread library? */
770 if (WIFSTOPPED (wstat)
771 && !event_child->stepping
772 && (
773 #ifdef USE_THREAD_DB
774 (thread_db_active && (WSTOPSIG (wstat) == __SIGRTMIN
775 || WSTOPSIG (wstat) == __SIGRTMIN + 1))
776 ||
777 #endif
778 (pass_signals[target_signal_from_host (WSTOPSIG (wstat))]
779 && (WSTOPSIG (wstat) != SIGSTOP || !stopping_threads))))
780 {
781 siginfo_t info, *info_p;
782
783 if (debug_threads)
784 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
785 WSTOPSIG (wstat), event_child->head.id);
786
787 if (ptrace (PTRACE_GETSIGINFO, event_child->lwpid, 0, &info) == 0)
788 info_p = &info;
789 else
790 info_p = NULL;
791 linux_resume_one_process (&event_child->head,
792 event_child->stepping,
793 WSTOPSIG (wstat), info_p);
794 continue;
795 }
796
797 /* If this event was not handled above, and is not a SIGTRAP, report
798 it. */
799 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGTRAP)
800 return wstat;
801
802 /* If this target does not support breakpoints, we simply report the
803 SIGTRAP; it's of no concern to us. */
804 if (the_low_target.get_pc == NULL)
805 return wstat;
806
807 stop_pc = get_stop_pc ();
808
809 /* bp_reinsert will only be set if we were single-stepping.
810 Notice that we will resume the process after hitting
811 a gdbserver breakpoint; single-stepping to/over one
812 is not supported (yet). */
813 if (event_child->bp_reinsert != 0)
814 {
815 if (debug_threads)
816 fprintf (stderr, "Reinserted breakpoint.\n");
817 reinsert_breakpoint (event_child->bp_reinsert);
818 event_child->bp_reinsert = 0;
819
820 /* Clear the single-stepping flag and SIGTRAP as we resume. */
821 linux_resume_one_process (&event_child->head, 0, 0, NULL);
822 continue;
823 }
824
825 bp_status = check_breakpoints (stop_pc);
826
827 if (bp_status != 0)
828 {
829 if (debug_threads)
830 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
831
832 /* We hit one of our own breakpoints. We mark it as a pending
833 breakpoint, so that check_removed_breakpoint () will do the PC
834 adjustment for us at the appropriate time. */
835 event_child->pending_is_breakpoint = 1;
836 event_child->pending_stop_pc = stop_pc;
837
838 /* We may need to put the breakpoint back. We continue in the event
839 loop instead of simply replacing the breakpoint right away,
840 in order to not lose signals sent to the thread that hit the
841 breakpoint. Unfortunately this increases the window where another
842 thread could sneak past the removed breakpoint. For the current
843 use of server-side breakpoints (thread creation) this is
844 acceptable; but it needs to be considered before this breakpoint
845 mechanism can be used in more general ways. For some breakpoints
846 it may be necessary to stop all other threads, but that should
847 be avoided where possible.
848
849 If breakpoint_reinsert_addr is NULL, that means that we can
850 use PTRACE_SINGLESTEP on this platform. Uninsert the breakpoint,
851 mark it for reinsertion, and single-step.
852
853 Otherwise, call the target function to figure out where we need
854 our temporary breakpoint, create it, and continue executing this
855 process. */
856 if (bp_status == 2)
857 /* No need to reinsert. */
858 linux_resume_one_process (&event_child->head, 0, 0, NULL);
859 else if (the_low_target.breakpoint_reinsert_addr == NULL)
860 {
861 event_child->bp_reinsert = stop_pc;
862 uninsert_breakpoint (stop_pc);
863 linux_resume_one_process (&event_child->head, 1, 0, NULL);
864 }
865 else
866 {
867 reinsert_breakpoint_by_bp
868 (stop_pc, (*the_low_target.breakpoint_reinsert_addr) ());
869 linux_resume_one_process (&event_child->head, 0, 0, NULL);
870 }
871
872 continue;
873 }
874
875 if (debug_threads)
876 fprintf (stderr, "Hit a non-gdbserver breakpoint.\n");
877
878 /* If we were single-stepping, we definitely want to report the
879 SIGTRAP. The single-step operation has completed, so also
880 clear the stepping flag; in general this does not matter,
881 because the SIGTRAP will be reported to the client, which
882 will give us a new action for this thread, but clear it for
883 consistency anyway. It's safe to clear the stepping flag
884 because the only consumer of get_stop_pc () after this point
885 is check_removed_breakpoint, and pending_is_breakpoint is not
886 set. It might be wiser to use a step_completed flag instead. */
887 if (event_child->stepping)
888 {
889 event_child->stepping = 0;
890 return wstat;
891 }
892
893 /* A SIGTRAP that we can't explain. It may have been a breakpoint.
894 Check if it is a breakpoint, and if so mark the process information
895 accordingly. This will handle both the necessary fiddling with the
896 PC on decr_pc_after_break targets and suppressing extra threads
897 hitting a breakpoint if two hit it at once and then GDB removes it
898 after the first is reported. Arguably it would be better to report
899 multiple threads hitting breakpoints simultaneously, but the current
900 remote protocol does not allow this. */
901 if ((*the_low_target.breakpoint_at) (stop_pc))
902 {
903 event_child->pending_is_breakpoint = 1;
904 event_child->pending_stop_pc = stop_pc;
905 }
906
907 return wstat;
908 }
909
910 /* NOTREACHED */
911 return 0;
912 }
913
914 /* Wait for process, returns status. */
915
916 static unsigned char
917 linux_wait (char *status)
918 {
919 int w;
920 struct thread_info *child = NULL;
921
922 retry:
923 /* If we were only supposed to resume one thread, only wait for
924 that thread - if it's still alive. If it died, however - which
925 can happen if we're coming from the thread death case below -
926 then we need to make sure we restart the other threads. We could
927 pick a thread at random or restart all; restarting all is less
928 arbitrary. */
929 if (cont_thread != 0 && cont_thread != -1)
930 {
931 child = (struct thread_info *) find_inferior_id (&all_threads,
932 cont_thread);
933
934 /* No stepping, no signal - unless one is pending already, of course. */
935 if (child == NULL)
936 {
937 struct thread_resume resume_info;
938 resume_info.thread = -1;
939 resume_info.step = resume_info.sig = resume_info.leave_stopped = 0;
940 linux_resume (&resume_info);
941 }
942 }
943
944 w = linux_wait_for_event (child);
945 stop_all_processes ();
946
947 if (must_set_ptrace_flags)
948 {
949 ptrace (PTRACE_SETOPTIONS, inferior_pid, 0, PTRACE_O_TRACECLONE);
950 must_set_ptrace_flags = 0;
951 }
952
953 /* If we are waiting for a particular child, and it exited,
954 linux_wait_for_event will return its exit status. Similarly if
955 the last child exited. If this is not the last child, however,
956 do not report it as exited until there is a 'thread exited' response
957 available in the remote protocol. Instead, just wait for another event.
958 This should be safe, because if the thread crashed we will already
959 have reported the termination signal to GDB; that should stop any
960 in-progress stepping operations, etc.
961
962 Report the exit status of the last thread to exit. This matches
963 LinuxThreads' behavior. */
964
965 if (all_threads.head == all_threads.tail)
966 {
967 if (WIFEXITED (w))
968 {
969 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
970 *status = 'W';
971 clear_inferiors ();
972 free (all_processes.head);
973 all_processes.head = all_processes.tail = NULL;
974 return WEXITSTATUS (w);
975 }
976 else if (!WIFSTOPPED (w))
977 {
978 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
979 *status = 'X';
980 clear_inferiors ();
981 free (all_processes.head);
982 all_processes.head = all_processes.tail = NULL;
983 return target_signal_from_host (WTERMSIG (w));
984 }
985 }
986 else
987 {
988 if (!WIFSTOPPED (w))
989 goto retry;
990 }
991
992 *status = 'T';
993 return target_signal_from_host (WSTOPSIG (w));
994 }
995
996 /* Send a signal to an LWP. For LinuxThreads, kill is enough; however, if
997 thread groups are in use, we need to use tkill. */
998
999 static int
1000 kill_lwp (unsigned long lwpid, int signo)
1001 {
1002 static int tkill_failed;
1003
1004 errno = 0;
1005
1006 #ifdef SYS_tkill
1007 if (!tkill_failed)
1008 {
1009 int ret = syscall (SYS_tkill, lwpid, signo);
1010 if (errno != ENOSYS)
1011 return ret;
1012 errno = 0;
1013 tkill_failed = 1;
1014 }
1015 #endif
1016
1017 return kill (lwpid, signo);
1018 }
1019
1020 static void
1021 send_sigstop (struct inferior_list_entry *entry)
1022 {
1023 struct process_info *process = (struct process_info *) entry;
1024
1025 if (process->stopped)
1026 return;
1027
1028 /* If we already have a pending stop signal for this process, don't
1029 send another. */
1030 if (process->stop_expected)
1031 {
1032 if (debug_threads)
1033 fprintf (stderr, "Have pending sigstop for process %ld\n",
1034 process->lwpid);
1035
1036 /* We clear the stop_expected flag so that wait_for_sigstop
1037 will receive the SIGSTOP event (instead of silently resuming and
1038 waiting again). It'll be reset below. */
1039 process->stop_expected = 0;
1040 return;
1041 }
1042
1043 if (debug_threads)
1044 fprintf (stderr, "Sending sigstop to process %ld\n", process->head.id);
1045
1046 kill_lwp (process->head.id, SIGSTOP);
1047 }
1048
1049 static void
1050 wait_for_sigstop (struct inferior_list_entry *entry)
1051 {
1052 struct process_info *process = (struct process_info *) entry;
1053 struct thread_info *saved_inferior, *thread;
1054 int wstat;
1055 unsigned long saved_tid;
1056
1057 if (process->stopped)
1058 return;
1059
1060 saved_inferior = current_inferior;
1061 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
1062 thread = (struct thread_info *) find_inferior_id (&all_threads,
1063 process->lwpid);
1064 wstat = linux_wait_for_event (thread);
1065
1066 /* If we stopped with a non-SIGSTOP signal, save it for later
1067 and record the pending SIGSTOP. If the process exited, just
1068 return. */
1069 if (WIFSTOPPED (wstat)
1070 && WSTOPSIG (wstat) != SIGSTOP)
1071 {
1072 if (debug_threads)
1073 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
1074 process->lwpid, wstat);
1075 process->status_pending_p = 1;
1076 process->status_pending = wstat;
1077 process->stop_expected = 1;
1078 }
1079
1080 if (linux_thread_alive (saved_tid))
1081 current_inferior = saved_inferior;
1082 else
1083 {
1084 if (debug_threads)
1085 fprintf (stderr, "Previously current thread died.\n");
1086
1087 /* Set a valid thread as current. */
1088 set_desired_inferior (0);
1089 }
1090 }
1091
1092 static void
1093 stop_all_processes (void)
1094 {
1095 stopping_threads = 1;
1096 for_each_inferior (&all_processes, send_sigstop);
1097 for_each_inferior (&all_processes, wait_for_sigstop);
1098 stopping_threads = 0;
1099 }
1100
1101 /* Resume execution of the inferior process.
1102 If STEP is nonzero, single-step it.
1103 If SIGNAL is nonzero, give it that signal. */
1104
1105 static void
1106 linux_resume_one_process (struct inferior_list_entry *entry,
1107 int step, int signal, siginfo_t *info)
1108 {
1109 struct process_info *process = (struct process_info *) entry;
1110 struct thread_info *saved_inferior;
1111
1112 if (process->stopped == 0)
1113 return;
1114
1115 /* If we have pending signals or status, and a new signal, enqueue the
1116 signal. Also enqueue the signal if we are waiting to reinsert a
1117 breakpoint; it will be picked up again below. */
1118 if (signal != 0
1119 && (process->status_pending_p || process->pending_signals != NULL
1120 || process->bp_reinsert != 0))
1121 {
1122 struct pending_signals *p_sig;
1123 p_sig = xmalloc (sizeof (*p_sig));
1124 p_sig->prev = process->pending_signals;
1125 p_sig->signal = signal;
1126 if (info == NULL)
1127 memset (&p_sig->info, 0, sizeof (siginfo_t));
1128 else
1129 memcpy (&p_sig->info, info, sizeof (siginfo_t));
1130 process->pending_signals = p_sig;
1131 }
1132
1133 if (process->status_pending_p && !check_removed_breakpoint (process))
1134 return;
1135
1136 saved_inferior = current_inferior;
1137 current_inferior = get_process_thread (process);
1138
1139 if (debug_threads)
1140 fprintf (stderr, "Resuming process %ld (%s, signal %d, stop %s)\n", inferior_pid,
1141 step ? "step" : "continue", signal,
1142 process->stop_expected ? "expected" : "not expected");
1143
1144 /* This bit needs some thinking about. If we get a signal that
1145 we must report while a single-step reinsert is still pending,
1146 we often end up resuming the thread. It might be better to
1147 (ew) allow a stack of pending events; then we could be sure that
1148 the reinsert happened right away and not lose any signals.
1149
1150 Making this stack would also shrink the window in which breakpoints are
1151 uninserted (see comment in linux_wait_for_process) but not enough for
1152 complete correctness, so it won't solve that problem. It may be
1153 worthwhile just to solve this one, however. */
1154 if (process->bp_reinsert != 0)
1155 {
1156 if (debug_threads)
1157 fprintf (stderr, " pending reinsert at %08lx", (long)process->bp_reinsert);
1158 if (step == 0)
1159 fprintf (stderr, "BAD - reinserting but not stepping.\n");
1160 step = 1;
1161
1162 /* Postpone any pending signal. It was enqueued above. */
1163 signal = 0;
1164 }
1165
1166 check_removed_breakpoint (process);
1167
1168 if (debug_threads && the_low_target.get_pc != NULL)
1169 {
1170 fprintf (stderr, " ");
1171 (*the_low_target.get_pc) ();
1172 }
1173
1174 /* If we have pending signals, consume one unless we are trying to reinsert
1175 a breakpoint. */
1176 if (process->pending_signals != NULL && process->bp_reinsert == 0)
1177 {
1178 struct pending_signals **p_sig;
1179
1180 p_sig = &process->pending_signals;
1181 while ((*p_sig)->prev != NULL)
1182 p_sig = &(*p_sig)->prev;
1183
1184 signal = (*p_sig)->signal;
1185 if ((*p_sig)->info.si_signo != 0)
1186 ptrace (PTRACE_SETSIGINFO, process->lwpid, 0, &(*p_sig)->info);
1187
1188 free (*p_sig);
1189 *p_sig = NULL;
1190 }
1191
1192 regcache_invalidate_one ((struct inferior_list_entry *)
1193 get_process_thread (process));
1194 errno = 0;
1195 process->stopped = 0;
1196 process->stepping = step;
1197 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, process->lwpid, 0, signal);
1198
1199 current_inferior = saved_inferior;
1200 if (errno)
1201 {
1202 /* ESRCH from ptrace either means that the thread was already
1203 running (an error) or that it is gone (a race condition). If
1204 it's gone, we will get a notification the next time we wait,
1205 so we can ignore the error. We could differentiate these
1206 two, but it's tricky without waiting; the thread still exists
1207 as a zombie, so sending it signal 0 would succeed. So just
1208 ignore ESRCH. */
1209 if (errno == ESRCH)
1210 return;
1211
1212 perror_with_name ("ptrace");
1213 }
1214 }
1215
1216 static struct thread_resume *resume_ptr;
1217
1218 /* This function is called once per thread. We look up the thread
1219 in RESUME_PTR, and mark the thread with a pointer to the appropriate
1220 resume request.
1221
1222 This algorithm is O(threads * resume elements), but resume elements
1223 is small (and will remain small at least until GDB supports thread
1224 suspension). */
1225 static void
1226 linux_set_resume_request (struct inferior_list_entry *entry)
1227 {
1228 struct process_info *process;
1229 struct thread_info *thread;
1230 int ndx;
1231
1232 thread = (struct thread_info *) entry;
1233 process = get_thread_process (thread);
1234
1235 ndx = 0;
1236 while (resume_ptr[ndx].thread != -1 && resume_ptr[ndx].thread != entry->id)
1237 ndx++;
1238
1239 process->resume = &resume_ptr[ndx];
1240 }
1241
1242 /* This function is called once per thread. We check the thread's resume
1243 request, which will tell us whether to resume, step, or leave the thread
1244 stopped; and what signal, if any, it should be sent. For threads which
1245 we aren't explicitly told otherwise, we preserve the stepping flag; this
1246 is used for stepping over gdbserver-placed breakpoints. */
1247
1248 static void
1249 linux_continue_one_thread (struct inferior_list_entry *entry)
1250 {
1251 struct process_info *process;
1252 struct thread_info *thread;
1253 int step;
1254
1255 thread = (struct thread_info *) entry;
1256 process = get_thread_process (thread);
1257
1258 if (process->resume->leave_stopped)
1259 return;
1260
1261 if (process->resume->thread == -1)
1262 step = process->stepping || process->resume->step;
1263 else
1264 step = process->resume->step;
1265
1266 linux_resume_one_process (&process->head, step, process->resume->sig, NULL);
1267
1268 process->resume = NULL;
1269 }
1270
1271 /* This function is called once per thread. We check the thread's resume
1272 request, which will tell us whether to resume, step, or leave the thread
1273 stopped; and what signal, if any, it should be sent. We queue any needed
1274 signals, since we won't actually resume. We already have a pending event
1275 to report, so we don't need to preserve any step requests; they should
1276 be re-issued if necessary. */
1277
1278 static void
1279 linux_queue_one_thread (struct inferior_list_entry *entry)
1280 {
1281 struct process_info *process;
1282 struct thread_info *thread;
1283
1284 thread = (struct thread_info *) entry;
1285 process = get_thread_process (thread);
1286
1287 if (process->resume->leave_stopped)
1288 return;
1289
1290 /* If we have a new signal, enqueue the signal. */
1291 if (process->resume->sig != 0)
1292 {
1293 struct pending_signals *p_sig;
1294 p_sig = xmalloc (sizeof (*p_sig));
1295 p_sig->prev = process->pending_signals;
1296 p_sig->signal = process->resume->sig;
1297 memset (&p_sig->info, 0, sizeof (siginfo_t));
1298
1299 /* If this is the same signal we were previously stopped by,
1300 make sure to queue its siginfo. We can ignore the return
1301 value of ptrace; if it fails, we'll skip
1302 PTRACE_SETSIGINFO. */
1303 if (WIFSTOPPED (process->last_status)
1304 && WSTOPSIG (process->last_status) == process->resume->sig)
1305 ptrace (PTRACE_GETSIGINFO, process->lwpid, 0, &p_sig->info);
1306
1307 process->pending_signals = p_sig;
1308 }
1309
1310 process->resume = NULL;
1311 }
1312
1313 /* Set DUMMY if this process has an interesting status pending. */
1314 static int
1315 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
1316 {
1317 struct process_info *process = (struct process_info *) entry;
1318
1319 /* Processes which will not be resumed are not interesting, because
1320 we might not wait for them next time through linux_wait. */
1321 if (process->resume->leave_stopped)
1322 return 0;
1323
1324 /* If this thread has a removed breakpoint, we won't have any
1325 events to report later, so check now. check_removed_breakpoint
1326 may clear status_pending_p. We avoid calling check_removed_breakpoint
1327 for any thread that we are not otherwise going to resume - this
1328 lets us preserve stopped status when two threads hit a breakpoint.
1329 GDB removes the breakpoint to single-step a particular thread
1330 past it, then re-inserts it and resumes all threads. We want
1331 to report the second thread without resuming it in the interim. */
1332 if (process->status_pending_p)
1333 check_removed_breakpoint (process);
1334
1335 if (process->status_pending_p)
1336 * (int *) flag_p = 1;
1337
1338 return 0;
1339 }
1340
1341 static void
1342 linux_resume (struct thread_resume *resume_info)
1343 {
1344 int pending_flag;
1345
1346 /* Yes, the use of a global here is rather ugly. */
1347 resume_ptr = resume_info;
1348
1349 for_each_inferior (&all_threads, linux_set_resume_request);
1350
1351 /* If there is a thread which would otherwise be resumed, which
1352 has a pending status, then don't resume any threads - we can just
1353 report the pending status. Make sure to queue any signals
1354 that would otherwise be sent. */
1355 pending_flag = 0;
1356 find_inferior (&all_processes, resume_status_pending_p, &pending_flag);
1357
1358 if (debug_threads)
1359 {
1360 if (pending_flag)
1361 fprintf (stderr, "Not resuming, pending status\n");
1362 else
1363 fprintf (stderr, "Resuming, no pending status\n");
1364 }
1365
1366 if (pending_flag)
1367 for_each_inferior (&all_threads, linux_queue_one_thread);
1368 else
1369 for_each_inferior (&all_threads, linux_continue_one_thread);
1370 }
1371
1372 #ifdef HAVE_LINUX_USRREGS
1373
1374 int
1375 register_addr (int regnum)
1376 {
1377 int addr;
1378
1379 if (regnum < 0 || regnum >= the_low_target.num_regs)
1380 error ("Invalid register number %d.", regnum);
1381
1382 addr = the_low_target.regmap[regnum];
1383
1384 return addr;
1385 }
1386
1387 /* Fetch one register. */
1388 static void
1389 fetch_register (int regno)
1390 {
1391 CORE_ADDR regaddr;
1392 int i, size;
1393 char *buf;
1394
1395 if (regno >= the_low_target.num_regs)
1396 return;
1397 if ((*the_low_target.cannot_fetch_register) (regno))
1398 return;
1399
1400 regaddr = register_addr (regno);
1401 if (regaddr == -1)
1402 return;
1403 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
1404 & - sizeof (PTRACE_XFER_TYPE);
1405 buf = alloca (size);
1406 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
1407 {
1408 errno = 0;
1409 *(PTRACE_XFER_TYPE *) (buf + i) =
1410 ptrace (PTRACE_PEEKUSER, inferior_pid, (PTRACE_ARG3_TYPE) regaddr, 0);
1411 regaddr += sizeof (PTRACE_XFER_TYPE);
1412 if (errno != 0)
1413 {
1414 /* Warning, not error, in case we are attached; sometimes the
1415 kernel doesn't let us at the registers. */
1416 char *err = strerror (errno);
1417 char *msg = alloca (strlen (err) + 128);
1418 sprintf (msg, "reading register %d: %s", regno, err);
1419 error (msg);
1420 goto error_exit;
1421 }
1422 }
1423
1424 if (the_low_target.supply_ptrace_register)
1425 the_low_target.supply_ptrace_register (regno, buf);
1426 else
1427 supply_register (regno, buf);
1428
1429 error_exit:;
1430 }
1431
1432 /* Fetch all registers, or just one, from the child process. */
1433 static void
1434 usr_fetch_inferior_registers (int regno)
1435 {
1436 if (regno == -1 || regno == 0)
1437 for (regno = 0; regno < the_low_target.num_regs; regno++)
1438 fetch_register (regno);
1439 else
1440 fetch_register (regno);
1441 }
1442
1443 /* Store our register values back into the inferior.
1444 If REGNO is -1, do this for all registers.
1445 Otherwise, REGNO specifies which register (so we can save time). */
1446 static void
1447 usr_store_inferior_registers (int regno)
1448 {
1449 CORE_ADDR regaddr;
1450 int i, size;
1451 char *buf;
1452
1453 if (regno >= 0)
1454 {
1455 if (regno >= the_low_target.num_regs)
1456 return;
1457
1458 if ((*the_low_target.cannot_store_register) (regno) == 1)
1459 return;
1460
1461 regaddr = register_addr (regno);
1462 if (regaddr == -1)
1463 return;
1464 errno = 0;
1465 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
1466 & - sizeof (PTRACE_XFER_TYPE);
1467 buf = alloca (size);
1468 memset (buf, 0, size);
1469
1470 if (the_low_target.collect_ptrace_register)
1471 the_low_target.collect_ptrace_register (regno, buf);
1472 else
1473 collect_register (regno, buf);
1474
1475 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
1476 {
1477 errno = 0;
1478 ptrace (PTRACE_POKEUSER, inferior_pid, (PTRACE_ARG3_TYPE) regaddr,
1479 *(PTRACE_XFER_TYPE *) (buf + i));
1480 if (errno != 0)
1481 {
1482 /* At this point, ESRCH should mean the process is already gone,
1483 in which case we simply ignore attempts to change its registers.
1484 See also the related comment in linux_resume_one_process. */
1485 if (errno == ESRCH)
1486 return;
1487
1488 if ((*the_low_target.cannot_store_register) (regno) == 0)
1489 {
1490 char *err = strerror (errno);
1491 char *msg = alloca (strlen (err) + 128);
1492 sprintf (msg, "writing register %d: %s",
1493 regno, err);
1494 error (msg);
1495 return;
1496 }
1497 }
1498 regaddr += sizeof (PTRACE_XFER_TYPE);
1499 }
1500 }
1501 else
1502 for (regno = 0; regno < the_low_target.num_regs; regno++)
1503 usr_store_inferior_registers (regno);
1504 }
1505 #endif /* HAVE_LINUX_USRREGS */
1506
1507
1508
1509 #ifdef HAVE_LINUX_REGSETS
1510
1511 static int
1512 regsets_fetch_inferior_registers ()
1513 {
1514 struct regset_info *regset;
1515 int saw_general_regs = 0;
1516
1517 regset = target_regsets;
1518
1519 while (regset->size >= 0)
1520 {
1521 void *buf;
1522 int res;
1523
1524 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
1525 {
1526 regset ++;
1527 continue;
1528 }
1529
1530 buf = xmalloc (regset->size);
1531 #ifndef __sparc__
1532 res = ptrace (regset->get_request, inferior_pid, 0, buf);
1533 #else
1534 res = ptrace (regset->get_request, inferior_pid, buf, 0);
1535 #endif
1536 if (res < 0)
1537 {
1538 if (errno == EIO)
1539 {
1540 /* If we get EIO on a regset, do not try it again for
1541 this process. */
1542 disabled_regsets[regset - target_regsets] = 1;
1543 continue;
1544 }
1545 else
1546 {
1547 char s[256];
1548 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%ld",
1549 inferior_pid);
1550 perror (s);
1551 }
1552 }
1553 else if (regset->type == GENERAL_REGS)
1554 saw_general_regs = 1;
1555 regset->store_function (buf);
1556 regset ++;
1557 }
1558 if (saw_general_regs)
1559 return 0;
1560 else
1561 return 1;
1562 }
1563
1564 static int
1565 regsets_store_inferior_registers ()
1566 {
1567 struct regset_info *regset;
1568 int saw_general_regs = 0;
1569
1570 regset = target_regsets;
1571
1572 while (regset->size >= 0)
1573 {
1574 void *buf;
1575 int res;
1576
1577 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
1578 {
1579 regset ++;
1580 continue;
1581 }
1582
1583 buf = xmalloc (regset->size);
1584
1585 /* First fill the buffer with the current register set contents,
1586 in case there are any items in the kernel's regset that are
1587 not in gdbserver's regcache. */
1588 #ifndef __sparc__
1589 res = ptrace (regset->get_request, inferior_pid, 0, buf);
1590 #else
1591 res = ptrace (regset->get_request, inferior_pid, buf, 0);
1592 #endif
1593
1594 if (res == 0)
1595 {
1596 /* Then overlay our cached registers on that. */
1597 regset->fill_function (buf);
1598
1599 /* Only now do we write the register set. */
1600 #ifndef __sparc__
1601 res = ptrace (regset->set_request, inferior_pid, 0, buf);
1602 #else
1603 res = ptrace (regset->set_request, inferior_pid, buf, 0);
1604 #endif
1605 }
1606
1607 if (res < 0)
1608 {
1609 if (errno == EIO)
1610 {
1611 /* If we get EIO on a regset, do not try it again for
1612 this process. */
1613 disabled_regsets[regset - target_regsets] = 1;
1614 continue;
1615 }
1616 else if (errno == ESRCH)
1617 {
1618 /* At this point, ESRCH should mean the process is already gone,
1619 in which case we simply ignore attempts to change its registers.
1620 See also the related comment in linux_resume_one_process. */
1621 return 0;
1622 }
1623 else
1624 {
1625 perror ("Warning: ptrace(regsets_store_inferior_registers)");
1626 }
1627 }
1628 else if (regset->type == GENERAL_REGS)
1629 saw_general_regs = 1;
1630 regset ++;
1631 free (buf);
1632 }
1633 if (saw_general_regs)
1634 return 0;
1635 else
1636 return 1;
1637 return 0;
1638 }
1639
1640 #endif /* HAVE_LINUX_REGSETS */
1641
1642
1643 void
1644 linux_fetch_registers (int regno)
1645 {
1646 #ifdef HAVE_LINUX_REGSETS
1647 if (regsets_fetch_inferior_registers () == 0)
1648 return;
1649 #endif
1650 #ifdef HAVE_LINUX_USRREGS
1651 usr_fetch_inferior_registers (regno);
1652 #endif
1653 }
1654
1655 void
1656 linux_store_registers (int regno)
1657 {
1658 #ifdef HAVE_LINUX_REGSETS
1659 if (regsets_store_inferior_registers () == 0)
1660 return;
1661 #endif
1662 #ifdef HAVE_LINUX_USRREGS
1663 usr_store_inferior_registers (regno);
1664 #endif
1665 }
1666
1667
1668 /* Copy LEN bytes from inferior's memory starting at MEMADDR
1669 to debugger memory starting at MYADDR. */
1670
1671 static int
1672 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
1673 {
1674 register int i;
1675 /* Round starting address down to longword boundary. */
1676 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
1677 /* Round ending address up; get number of longwords that makes. */
1678 register int count
1679 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
1680 / sizeof (PTRACE_XFER_TYPE);
1681 /* Allocate buffer of that many longwords. */
1682 register PTRACE_XFER_TYPE *buffer
1683 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
1684 int fd;
1685 char filename[64];
1686
1687 /* Try using /proc. Don't bother for one word. */
1688 if (len >= 3 * sizeof (long))
1689 {
1690 /* We could keep this file open and cache it - possibly one per
1691 thread. That requires some juggling, but is even faster. */
1692 sprintf (filename, "/proc/%ld/mem", inferior_pid);
1693 fd = open (filename, O_RDONLY | O_LARGEFILE);
1694 if (fd == -1)
1695 goto no_proc;
1696
1697 /* If pread64 is available, use it. It's faster if the kernel
1698 supports it (only one syscall), and it's 64-bit safe even on
1699 32-bit platforms (for instance, SPARC debugging a SPARC64
1700 application). */
1701 #ifdef HAVE_PREAD64
1702 if (pread64 (fd, myaddr, len, memaddr) != len)
1703 #else
1704 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, memaddr, len) != len)
1705 #endif
1706 {
1707 close (fd);
1708 goto no_proc;
1709 }
1710
1711 close (fd);
1712 return 0;
1713 }
1714
1715 no_proc:
1716 /* Read all the longwords */
1717 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
1718 {
1719 errno = 0;
1720 buffer[i] = ptrace (PTRACE_PEEKTEXT, inferior_pid, (PTRACE_ARG3_TYPE) addr, 0);
1721 if (errno)
1722 return errno;
1723 }
1724
1725 /* Copy appropriate bytes out of the buffer. */
1726 memcpy (myaddr, (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), len);
1727
1728 return 0;
1729 }
1730
1731 /* Copy LEN bytes of data from debugger memory at MYADDR
1732 to inferior's memory at MEMADDR.
1733 On failure (cannot write the inferior)
1734 returns the value of errno. */
1735
1736 static int
1737 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
1738 {
1739 register int i;
1740 /* Round starting address down to longword boundary. */
1741 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
1742 /* Round ending address up; get number of longwords that makes. */
1743 register int count
1744 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
1745 /* Allocate buffer of that many longwords. */
1746 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
1747
1748 if (debug_threads)
1749 {
1750 fprintf (stderr, "Writing %02x to %08lx\n", (unsigned)myaddr[0], (long)memaddr);
1751 }
1752
1753 /* Fill start and end extra bytes of buffer with existing memory data. */
1754
1755 buffer[0] = ptrace (PTRACE_PEEKTEXT, inferior_pid,
1756 (PTRACE_ARG3_TYPE) addr, 0);
1757
1758 if (count > 1)
1759 {
1760 buffer[count - 1]
1761 = ptrace (PTRACE_PEEKTEXT, inferior_pid,
1762 (PTRACE_ARG3_TYPE) (addr + (count - 1)
1763 * sizeof (PTRACE_XFER_TYPE)),
1764 0);
1765 }
1766
1767 /* Copy data to be written over corresponding part of buffer */
1768
1769 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
1770
1771 /* Write the entire buffer. */
1772
1773 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
1774 {
1775 errno = 0;
1776 ptrace (PTRACE_POKETEXT, inferior_pid, (PTRACE_ARG3_TYPE) addr, buffer[i]);
1777 if (errno)
1778 return errno;
1779 }
1780
1781 return 0;
1782 }
1783
1784 static int linux_supports_tracefork_flag;
1785
1786 /* Helper functions for linux_test_for_tracefork, called via clone (). */
1787
1788 static int
1789 linux_tracefork_grandchild (void *arg)
1790 {
1791 _exit (0);
1792 }
1793
1794 #define STACK_SIZE 4096
1795
1796 static int
1797 linux_tracefork_child (void *arg)
1798 {
1799 ptrace (PTRACE_TRACEME, 0, 0, 0);
1800 kill (getpid (), SIGSTOP);
1801 #ifdef __ia64__
1802 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
1803 CLONE_VM | SIGCHLD, NULL);
1804 #else
1805 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
1806 CLONE_VM | SIGCHLD, NULL);
1807 #endif
1808 _exit (0);
1809 }
1810
1811 /* Wrapper function for waitpid which handles EINTR. */
1812
1813 static int
1814 my_waitpid (int pid, int *status, int flags)
1815 {
1816 int ret;
1817 do
1818 {
1819 ret = waitpid (pid, status, flags);
1820 }
1821 while (ret == -1 && errno == EINTR);
1822
1823 return ret;
1824 }
1825
1826 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
1827 sure that we can enable the option, and that it had the desired
1828 effect. */
1829
1830 static void
1831 linux_test_for_tracefork (void)
1832 {
1833 int child_pid, ret, status;
1834 long second_pid;
1835 char *stack = xmalloc (STACK_SIZE * 4);
1836
1837 linux_supports_tracefork_flag = 0;
1838
1839 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
1840 #ifdef __ia64__
1841 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
1842 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
1843 #else
1844 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
1845 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
1846 #endif
1847 if (child_pid == -1)
1848 perror_with_name ("clone");
1849
1850 ret = my_waitpid (child_pid, &status, 0);
1851 if (ret == -1)
1852 perror_with_name ("waitpid");
1853 else if (ret != child_pid)
1854 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
1855 if (! WIFSTOPPED (status))
1856 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
1857
1858 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
1859 if (ret != 0)
1860 {
1861 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
1862 if (ret != 0)
1863 {
1864 warning ("linux_test_for_tracefork: failed to kill child");
1865 return;
1866 }
1867
1868 ret = my_waitpid (child_pid, &status, 0);
1869 if (ret != child_pid)
1870 warning ("linux_test_for_tracefork: failed to wait for killed child");
1871 else if (!WIFSIGNALED (status))
1872 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
1873 "killed child", status);
1874
1875 return;
1876 }
1877
1878 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
1879 if (ret != 0)
1880 warning ("linux_test_for_tracefork: failed to resume child");
1881
1882 ret = my_waitpid (child_pid, &status, 0);
1883
1884 if (ret == child_pid && WIFSTOPPED (status)
1885 && status >> 16 == PTRACE_EVENT_FORK)
1886 {
1887 second_pid = 0;
1888 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
1889 if (ret == 0 && second_pid != 0)
1890 {
1891 int second_status;
1892
1893 linux_supports_tracefork_flag = 1;
1894 my_waitpid (second_pid, &second_status, 0);
1895 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
1896 if (ret != 0)
1897 warning ("linux_test_for_tracefork: failed to kill second child");
1898 my_waitpid (second_pid, &status, 0);
1899 }
1900 }
1901 else
1902 warning ("linux_test_for_tracefork: unexpected result from waitpid "
1903 "(%d, status 0x%x)", ret, status);
1904
1905 do
1906 {
1907 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
1908 if (ret != 0)
1909 warning ("linux_test_for_tracefork: failed to kill child");
1910 my_waitpid (child_pid, &status, 0);
1911 }
1912 while (WIFSTOPPED (status));
1913
1914 free (stack);
1915 }
1916
1917
1918 static void
1919 linux_look_up_symbols (void)
1920 {
1921 #ifdef USE_THREAD_DB
1922 if (thread_db_active)
1923 return;
1924
1925 thread_db_active = thread_db_init (!linux_supports_tracefork_flag);
1926 #endif
1927 }
1928
1929 static void
1930 linux_request_interrupt (void)
1931 {
1932 extern unsigned long signal_pid;
1933
1934 if (cont_thread != 0 && cont_thread != -1)
1935 {
1936 struct process_info *process;
1937
1938 process = get_thread_process (current_inferior);
1939 kill_lwp (process->lwpid, SIGINT);
1940 }
1941 else
1942 kill_lwp (signal_pid, SIGINT);
1943 }
1944
1945 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
1946 to debugger memory starting at MYADDR. */
1947
1948 static int
1949 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
1950 {
1951 char filename[PATH_MAX];
1952 int fd, n;
1953
1954 snprintf (filename, sizeof filename, "/proc/%ld/auxv", inferior_pid);
1955
1956 fd = open (filename, O_RDONLY);
1957 if (fd < 0)
1958 return -1;
1959
1960 if (offset != (CORE_ADDR) 0
1961 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
1962 n = -1;
1963 else
1964 n = read (fd, myaddr, len);
1965
1966 close (fd);
1967
1968 return n;
1969 }
1970
1971 /* These watchpoint related wrapper functions simply pass on the function call
1972 if the target has registered a corresponding function. */
1973
1974 static int
1975 linux_insert_watchpoint (char type, CORE_ADDR addr, int len)
1976 {
1977 if (the_low_target.insert_watchpoint != NULL)
1978 return the_low_target.insert_watchpoint (type, addr, len);
1979 else
1980 /* Unsupported (see target.h). */
1981 return 1;
1982 }
1983
1984 static int
1985 linux_remove_watchpoint (char type, CORE_ADDR addr, int len)
1986 {
1987 if (the_low_target.remove_watchpoint != NULL)
1988 return the_low_target.remove_watchpoint (type, addr, len);
1989 else
1990 /* Unsupported (see target.h). */
1991 return 1;
1992 }
1993
1994 static int
1995 linux_stopped_by_watchpoint (void)
1996 {
1997 if (the_low_target.stopped_by_watchpoint != NULL)
1998 return the_low_target.stopped_by_watchpoint ();
1999 else
2000 return 0;
2001 }
2002
2003 static CORE_ADDR
2004 linux_stopped_data_address (void)
2005 {
2006 if (the_low_target.stopped_data_address != NULL)
2007 return the_low_target.stopped_data_address ();
2008 else
2009 return 0;
2010 }
2011
2012 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
2013 #if defined(__mcoldfire__)
2014 /* These should really be defined in the kernel's ptrace.h header. */
2015 #define PT_TEXT_ADDR 49*4
2016 #define PT_DATA_ADDR 50*4
2017 #define PT_TEXT_END_ADDR 51*4
2018 #endif
2019
2020 /* Under uClinux, programs are loaded at non-zero offsets, which we need
2021 to tell gdb about. */
2022
2023 static int
2024 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
2025 {
2026 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
2027 unsigned long text, text_end, data;
2028 int pid = get_thread_process (current_inferior)->head.id;
2029
2030 errno = 0;
2031
2032 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
2033 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
2034 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
2035
2036 if (errno == 0)
2037 {
2038 /* Both text and data offsets produced at compile-time (and so
2039 used by gdb) are relative to the beginning of the program,
2040 with the data segment immediately following the text segment.
2041 However, the actual runtime layout in memory may put the data
2042 somewhere else, so when we send gdb a data base-address, we
2043 use the real data base address and subtract the compile-time
2044 data base-address from it (which is just the length of the
2045 text segment). BSS immediately follows data in both
2046 cases. */
2047 *text_p = text;
2048 *data_p = data - (text_end - text);
2049
2050 return 1;
2051 }
2052 #endif
2053 return 0;
2054 }
2055 #endif
2056
2057 static int
2058 linux_qxfer_osdata (const char *annex,
2059 unsigned char *readbuf, unsigned const char *writebuf,
2060 CORE_ADDR offset, int len)
2061 {
2062 /* We make the process list snapshot when the object starts to be
2063 read. */
2064 static const char *buf;
2065 static long len_avail = -1;
2066 static struct buffer buffer;
2067
2068 DIR *dirp;
2069
2070 if (strcmp (annex, "processes") != 0)
2071 return 0;
2072
2073 if (!readbuf || writebuf)
2074 return 0;
2075
2076 if (offset == 0)
2077 {
2078 if (len_avail != -1 && len_avail != 0)
2079 buffer_free (&buffer);
2080 len_avail = 0;
2081 buf = NULL;
2082 buffer_init (&buffer);
2083 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
2084
2085 dirp = opendir ("/proc");
2086 if (dirp)
2087 {
2088 struct dirent *dp;
2089 while ((dp = readdir (dirp)) != NULL)
2090 {
2091 struct stat statbuf;
2092 char procentry[sizeof ("/proc/4294967295")];
2093
2094 if (!isdigit (dp->d_name[0])
2095 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
2096 continue;
2097
2098 sprintf (procentry, "/proc/%s", dp->d_name);
2099 if (stat (procentry, &statbuf) == 0
2100 && S_ISDIR (statbuf.st_mode))
2101 {
2102 char pathname[128];
2103 FILE *f;
2104 char cmd[MAXPATHLEN + 1];
2105 struct passwd *entry;
2106
2107 sprintf (pathname, "/proc/%s/cmdline", dp->d_name);
2108 entry = getpwuid (statbuf.st_uid);
2109
2110 if ((f = fopen (pathname, "r")) != NULL)
2111 {
2112 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
2113 if (len > 0)
2114 {
2115 int i;
2116 for (i = 0; i < len; i++)
2117 if (cmd[i] == '\0')
2118 cmd[i] = ' ';
2119 cmd[len] = '\0';
2120
2121 buffer_xml_printf (
2122 &buffer,
2123 "<item>"
2124 "<column name=\"pid\">%s</column>"
2125 "<column name=\"user\">%s</column>"
2126 "<column name=\"command\">%s</column>"
2127 "</item>",
2128 dp->d_name,
2129 entry ? entry->pw_name : "?",
2130 cmd);
2131 }
2132 fclose (f);
2133 }
2134 }
2135 }
2136
2137 closedir (dirp);
2138 }
2139 buffer_grow_str0 (&buffer, "</osdata>\n");
2140 buf = buffer_finish (&buffer);
2141 len_avail = strlen (buf);
2142 }
2143
2144 if (offset >= len_avail)
2145 {
2146 /* Done. Get rid of the data. */
2147 buffer_free (&buffer);
2148 buf = NULL;
2149 len_avail = 0;
2150 return 0;
2151 }
2152
2153 if (len > len_avail - offset)
2154 len = len_avail - offset;
2155 memcpy (readbuf, buf + offset, len);
2156
2157 return len;
2158 }
2159
2160 static struct target_ops linux_target_ops = {
2161 linux_create_inferior,
2162 linux_attach,
2163 linux_kill,
2164 linux_detach,
2165 linux_join,
2166 linux_thread_alive,
2167 linux_resume,
2168 linux_wait,
2169 linux_fetch_registers,
2170 linux_store_registers,
2171 linux_read_memory,
2172 linux_write_memory,
2173 linux_look_up_symbols,
2174 linux_request_interrupt,
2175 linux_read_auxv,
2176 linux_insert_watchpoint,
2177 linux_remove_watchpoint,
2178 linux_stopped_by_watchpoint,
2179 linux_stopped_data_address,
2180 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
2181 linux_read_offsets,
2182 #else
2183 NULL,
2184 #endif
2185 #ifdef USE_THREAD_DB
2186 thread_db_get_tls_address,
2187 #else
2188 NULL,
2189 #endif
2190 NULL,
2191 hostio_last_error_from_errno,
2192 linux_qxfer_osdata,
2193 };
2194
2195 static void
2196 linux_init_signals ()
2197 {
2198 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
2199 to find what the cancel signal actually is. */
2200 signal (__SIGRTMIN+1, SIG_IGN);
2201 }
2202
2203 void
2204 initialize_low (void)
2205 {
2206 thread_db_active = 0;
2207 set_target_ops (&linux_target_ops);
2208 set_breakpoint_data (the_low_target.breakpoint,
2209 the_low_target.breakpoint_len);
2210 linux_init_signals ();
2211 linux_test_for_tracefork ();
2212 #ifdef HAVE_LINUX_REGSETS
2213 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
2214 ;
2215 disabled_regsets = xmalloc (num_regsets);
2216 #endif
2217 }
This page took 0.08236 seconds and 4 git commands to generate.