* linux-low.c (linux_kill): Handle being called with no threads.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
22
23 #include "server.h"
24 #include "linux-low.h"
25
26 #include <sys/wait.h>
27 #include <stdio.h>
28 #include <sys/param.h>
29 #include <sys/dir.h>
30 #include <sys/ptrace.h>
31 #include <sys/user.h>
32 #include <signal.h>
33 #include <sys/ioctl.h>
34 #include <fcntl.h>
35 #include <string.h>
36 #include <stdlib.h>
37 #include <unistd.h>
38 #include <errno.h>
39 #include <sys/syscall.h>
40
41 #ifndef PTRACE_GETSIGINFO
42 # define PTRACE_GETSIGINFO 0x4202
43 # define PTRACE_SETSIGINFO 0x4203
44 #endif
45
46 /* ``all_threads'' is keyed by the LWP ID - it should be the thread ID instead,
47 however. This requires changing the ID in place when we go from !using_threads
48 to using_threads, immediately.
49
50 ``all_processes'' is keyed by the process ID - which on Linux is (presently)
51 the same as the LWP ID. */
52
53 struct inferior_list all_processes;
54
55 /* FIXME this is a bit of a hack, and could be removed. */
56 int stopping_threads;
57
58 /* FIXME make into a target method? */
59 int using_threads;
60
61 static void linux_resume_one_process (struct inferior_list_entry *entry,
62 int step, int signal, siginfo_t *info);
63 static void linux_resume (struct thread_resume *resume_info);
64 static void stop_all_processes (void);
65 static int linux_wait_for_event (struct thread_info *child);
66
67 struct pending_signals
68 {
69 int signal;
70 siginfo_t info;
71 struct pending_signals *prev;
72 };
73
74 #define PTRACE_ARG3_TYPE long
75 #define PTRACE_XFER_TYPE long
76
77 #ifdef HAVE_LINUX_REGSETS
78 static int use_regsets_p = 1;
79 #endif
80
81 int debug_threads = 0;
82
83 #define pid_of(proc) ((proc)->head.id)
84
85 /* FIXME: Delete eventually. */
86 #define inferior_pid (pid_of (get_thread_process (current_inferior)))
87
88 /* This function should only be called if the process got a SIGTRAP.
89 The SIGTRAP could mean several things.
90
91 On i386, where decr_pc_after_break is non-zero:
92 If we were single-stepping this process using PTRACE_SINGLESTEP,
93 we will get only the one SIGTRAP (even if the instruction we
94 stepped over was a breakpoint). The value of $eip will be the
95 next instruction.
96 If we continue the process using PTRACE_CONT, we will get a
97 SIGTRAP when we hit a breakpoint. The value of $eip will be
98 the instruction after the breakpoint (i.e. needs to be
99 decremented). If we report the SIGTRAP to GDB, we must also
100 report the undecremented PC. If we cancel the SIGTRAP, we
101 must resume at the decremented PC.
102
103 (Presumably, not yet tested) On a non-decr_pc_after_break machine
104 with hardware or kernel single-step:
105 If we single-step over a breakpoint instruction, our PC will
106 point at the following instruction. If we continue and hit a
107 breakpoint instruction, our PC will point at the breakpoint
108 instruction. */
109
110 static CORE_ADDR
111 get_stop_pc (void)
112 {
113 CORE_ADDR stop_pc = (*the_low_target.get_pc) ();
114
115 if (get_thread_process (current_inferior)->stepping)
116 return stop_pc;
117 else
118 return stop_pc - the_low_target.decr_pc_after_break;
119 }
120
121 static void *
122 add_process (unsigned long pid)
123 {
124 struct process_info *process;
125
126 process = (struct process_info *) malloc (sizeof (*process));
127 memset (process, 0, sizeof (*process));
128
129 process->head.id = pid;
130
131 /* Default to tid == lwpid == pid. */
132 process->tid = pid;
133 process->lwpid = pid;
134
135 add_inferior_to_list (&all_processes, &process->head);
136
137 return process;
138 }
139
140 /* Start an inferior process and returns its pid.
141 ALLARGS is a vector of program-name and args. */
142
143 static int
144 linux_create_inferior (char *program, char **allargs)
145 {
146 void *new_process;
147 int pid;
148
149 #if defined(__UCLIBC__) && !defined(__UCLIBC_HAS_MMU__)
150 pid = vfork ();
151 #else
152 pid = fork ();
153 #endif
154 if (pid < 0)
155 perror_with_name ("fork");
156
157 if (pid == 0)
158 {
159 ptrace (PTRACE_TRACEME, 0, 0, 0);
160
161 signal (__SIGRTMIN + 1, SIG_DFL);
162
163 setpgid (0, 0);
164
165 execv (program, allargs);
166
167 fprintf (stderr, "Cannot exec %s: %s.\n", program,
168 strerror (errno));
169 fflush (stderr);
170 _exit (0177);
171 }
172
173 new_process = add_process (pid);
174 add_thread (pid, new_process, pid);
175
176 return pid;
177 }
178
179 /* Attach to an inferior process. */
180
181 void
182 linux_attach_lwp (unsigned long pid, unsigned long tid)
183 {
184 struct process_info *new_process;
185
186 if (ptrace (PTRACE_ATTACH, pid, 0, 0) != 0)
187 {
188 fprintf (stderr, "Cannot attach to process %ld: %s (%d)\n", pid,
189 strerror (errno), errno);
190 fflush (stderr);
191
192 /* If we fail to attach to an LWP, just return. */
193 if (!using_threads)
194 _exit (0177);
195 return;
196 }
197
198 new_process = (struct process_info *) add_process (pid);
199 add_thread (tid, new_process, pid);
200
201 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
202 brings it to a halt. We should ignore that SIGSTOP and resume the process
203 (unless this is the first process, in which case the flag will be cleared
204 in linux_attach).
205
206 On the other hand, if we are currently trying to stop all threads, we
207 should treat the new thread as if we had sent it a SIGSTOP. This works
208 because we are guaranteed that add_process added us to the end of the
209 list, and so the new thread has not yet reached wait_for_sigstop (but
210 will). */
211 if (! stopping_threads)
212 new_process->stop_expected = 1;
213 }
214
215 int
216 linux_attach (unsigned long pid)
217 {
218 struct process_info *process;
219
220 linux_attach_lwp (pid, pid);
221
222 /* Don't ignore the initial SIGSTOP if we just attached to this process. */
223 process = (struct process_info *) find_inferior_id (&all_processes, pid);
224 process->stop_expected = 0;
225
226 return 0;
227 }
228
229 /* Kill the inferior process. Make us have no inferior. */
230
231 static void
232 linux_kill_one_process (struct inferior_list_entry *entry)
233 {
234 struct thread_info *thread = (struct thread_info *) entry;
235 struct process_info *process = get_thread_process (thread);
236 int wstat;
237
238 /* We avoid killing the first thread here, because of a Linux kernel (at
239 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
240 the children get a chance to be reaped, it will remain a zombie
241 forever. */
242 if (entry == all_threads.head)
243 return;
244
245 do
246 {
247 ptrace (PTRACE_KILL, pid_of (process), 0, 0);
248
249 /* Make sure it died. The loop is most likely unnecessary. */
250 wstat = linux_wait_for_event (thread);
251 } while (WIFSTOPPED (wstat));
252 }
253
254 static void
255 linux_kill (void)
256 {
257 struct thread_info *thread = (struct thread_info *) all_threads.head;
258 struct process_info *process;
259 int wstat;
260
261 if (thread == NULL)
262 return;
263
264 for_each_inferior (&all_threads, linux_kill_one_process);
265
266 /* See the comment in linux_kill_one_process. We did not kill the first
267 thread in the list, so do so now. */
268 process = get_thread_process (thread);
269 do
270 {
271 ptrace (PTRACE_KILL, pid_of (process), 0, 0);
272
273 /* Make sure it died. The loop is most likely unnecessary. */
274 wstat = linux_wait_for_event (thread);
275 } while (WIFSTOPPED (wstat));
276 }
277
278 static void
279 linux_detach_one_process (struct inferior_list_entry *entry)
280 {
281 struct thread_info *thread = (struct thread_info *) entry;
282 struct process_info *process = get_thread_process (thread);
283
284 ptrace (PTRACE_DETACH, pid_of (process), 0, 0);
285 }
286
287 static void
288 linux_detach (void)
289 {
290 for_each_inferior (&all_threads, linux_detach_one_process);
291 }
292
293 /* Return nonzero if the given thread is still alive. */
294 static int
295 linux_thread_alive (unsigned long tid)
296 {
297 if (find_inferior_id (&all_threads, tid) != NULL)
298 return 1;
299 else
300 return 0;
301 }
302
303 /* Return nonzero if this process stopped at a breakpoint which
304 no longer appears to be inserted. Also adjust the PC
305 appropriately to resume where the breakpoint used to be. */
306 static int
307 check_removed_breakpoint (struct process_info *event_child)
308 {
309 CORE_ADDR stop_pc;
310 struct thread_info *saved_inferior;
311
312 if (event_child->pending_is_breakpoint == 0)
313 return 0;
314
315 if (debug_threads)
316 fprintf (stderr, "Checking for breakpoint.\n");
317
318 saved_inferior = current_inferior;
319 current_inferior = get_process_thread (event_child);
320
321 stop_pc = get_stop_pc ();
322
323 /* If the PC has changed since we stopped, then we shouldn't do
324 anything. This happens if, for instance, GDB handled the
325 decr_pc_after_break subtraction itself. */
326 if (stop_pc != event_child->pending_stop_pc)
327 {
328 if (debug_threads)
329 fprintf (stderr, "Ignoring, PC was changed.\n");
330
331 event_child->pending_is_breakpoint = 0;
332 current_inferior = saved_inferior;
333 return 0;
334 }
335
336 /* If the breakpoint is still there, we will report hitting it. */
337 if ((*the_low_target.breakpoint_at) (stop_pc))
338 {
339 if (debug_threads)
340 fprintf (stderr, "Ignoring, breakpoint is still present.\n");
341 current_inferior = saved_inferior;
342 return 0;
343 }
344
345 if (debug_threads)
346 fprintf (stderr, "Removed breakpoint.\n");
347
348 /* For decr_pc_after_break targets, here is where we perform the
349 decrement. We go immediately from this function to resuming,
350 and can not safely call get_stop_pc () again. */
351 if (the_low_target.set_pc != NULL)
352 (*the_low_target.set_pc) (stop_pc);
353
354 /* We consumed the pending SIGTRAP. */
355 event_child->pending_is_breakpoint = 0;
356 event_child->status_pending_p = 0;
357 event_child->status_pending = 0;
358
359 current_inferior = saved_inferior;
360 return 1;
361 }
362
363 /* Return 1 if this process has an interesting status pending. This function
364 may silently resume an inferior process. */
365 static int
366 status_pending_p (struct inferior_list_entry *entry, void *dummy)
367 {
368 struct process_info *process = (struct process_info *) entry;
369
370 if (process->status_pending_p)
371 if (check_removed_breakpoint (process))
372 {
373 /* This thread was stopped at a breakpoint, and the breakpoint
374 is now gone. We were told to continue (or step...) all threads,
375 so GDB isn't trying to single-step past this breakpoint.
376 So instead of reporting the old SIGTRAP, pretend we got to
377 the breakpoint just after it was removed instead of just
378 before; resume the process. */
379 linux_resume_one_process (&process->head, 0, 0, NULL);
380 return 0;
381 }
382
383 return process->status_pending_p;
384 }
385
386 static void
387 linux_wait_for_process (struct process_info **childp, int *wstatp)
388 {
389 int ret;
390 int to_wait_for = -1;
391
392 if (*childp != NULL)
393 to_wait_for = (*childp)->lwpid;
394
395 while (1)
396 {
397 ret = waitpid (to_wait_for, wstatp, WNOHANG);
398
399 if (ret == -1)
400 {
401 if (errno != ECHILD)
402 perror_with_name ("waitpid");
403 }
404 else if (ret > 0)
405 break;
406
407 ret = waitpid (to_wait_for, wstatp, WNOHANG | __WCLONE);
408
409 if (ret == -1)
410 {
411 if (errno != ECHILD)
412 perror_with_name ("waitpid (WCLONE)");
413 }
414 else if (ret > 0)
415 break;
416
417 usleep (1000);
418 }
419
420 if (debug_threads
421 && (!WIFSTOPPED (*wstatp)
422 || (WSTOPSIG (*wstatp) != 32
423 && WSTOPSIG (*wstatp) != 33)))
424 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
425
426 if (to_wait_for == -1)
427 *childp = (struct process_info *) find_inferior_id (&all_processes, ret);
428
429 (*childp)->stopped = 1;
430 (*childp)->pending_is_breakpoint = 0;
431
432 (*childp)->last_status = *wstatp;
433
434 if (debug_threads
435 && WIFSTOPPED (*wstatp))
436 {
437 current_inferior = (struct thread_info *)
438 find_inferior_id (&all_threads, (*childp)->tid);
439 /* For testing only; i386_stop_pc prints out a diagnostic. */
440 if (the_low_target.get_pc != NULL)
441 get_stop_pc ();
442 }
443 }
444
445 static int
446 linux_wait_for_event (struct thread_info *child)
447 {
448 CORE_ADDR stop_pc;
449 struct process_info *event_child;
450 int wstat;
451
452 /* Check for a process with a pending status. */
453 /* It is possible that the user changed the pending task's registers since
454 it stopped. We correctly handle the change of PC if we hit a breakpoint
455 (in check_removed_breakpoint); signals should be reported anyway. */
456 if (child == NULL)
457 {
458 event_child = (struct process_info *)
459 find_inferior (&all_processes, status_pending_p, NULL);
460 if (debug_threads && event_child)
461 fprintf (stderr, "Got a pending child %ld\n", event_child->lwpid);
462 }
463 else
464 {
465 event_child = get_thread_process (child);
466 if (event_child->status_pending_p
467 && check_removed_breakpoint (event_child))
468 event_child = NULL;
469 }
470
471 if (event_child != NULL)
472 {
473 if (event_child->status_pending_p)
474 {
475 if (debug_threads)
476 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
477 event_child->lwpid, event_child->status_pending);
478 wstat = event_child->status_pending;
479 event_child->status_pending_p = 0;
480 event_child->status_pending = 0;
481 current_inferior = get_process_thread (event_child);
482 return wstat;
483 }
484 }
485
486 /* We only enter this loop if no process has a pending wait status. Thus
487 any action taken in response to a wait status inside this loop is
488 responding as soon as we detect the status, not after any pending
489 events. */
490 while (1)
491 {
492 if (child == NULL)
493 event_child = NULL;
494 else
495 event_child = get_thread_process (child);
496
497 linux_wait_for_process (&event_child, &wstat);
498
499 if (event_child == NULL)
500 error ("event from unknown child");
501
502 current_inferior = (struct thread_info *)
503 find_inferior_id (&all_threads, event_child->tid);
504
505 /* Check for thread exit. */
506 if (using_threads && ! WIFSTOPPED (wstat))
507 {
508 if (debug_threads)
509 fprintf (stderr, "Thread %ld (LWP %ld) exiting\n",
510 event_child->tid, event_child->head.id);
511
512 /* If the last thread is exiting, just return. */
513 if (all_threads.head == all_threads.tail)
514 return wstat;
515
516 dead_thread_notify (event_child->tid);
517
518 remove_inferior (&all_processes, &event_child->head);
519 free (event_child);
520 remove_thread (current_inferior);
521 current_inferior = (struct thread_info *) all_threads.head;
522
523 /* If we were waiting for this particular child to do something...
524 well, it did something. */
525 if (child != NULL)
526 return wstat;
527
528 /* Wait for a more interesting event. */
529 continue;
530 }
531
532 if (using_threads
533 && WIFSTOPPED (wstat)
534 && WSTOPSIG (wstat) == SIGSTOP
535 && event_child->stop_expected)
536 {
537 if (debug_threads)
538 fprintf (stderr, "Expected stop.\n");
539 event_child->stop_expected = 0;
540 linux_resume_one_process (&event_child->head,
541 event_child->stepping, 0, NULL);
542 continue;
543 }
544
545 /* If GDB is not interested in this signal, don't stop other
546 threads, and don't report it to GDB. Just resume the
547 inferior right away. We do this for threading-related
548 signals as well as any that GDB specifically requested
549 we ignore. But never ignore SIGSTOP if we sent it
550 ourselves. */
551 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
552 thread library? */
553 if (WIFSTOPPED (wstat)
554 && ((using_threads && (WSTOPSIG (wstat) == __SIGRTMIN
555 || WSTOPSIG (wstat) == __SIGRTMIN + 1))
556 || (pass_signals[target_signal_from_host (WSTOPSIG (wstat))]
557 && (WSTOPSIG (wstat) != SIGSTOP
558 || !event_child->sigstop_sent))))
559 {
560 siginfo_t info, *info_p;
561
562 if (debug_threads)
563 fprintf (stderr, "Ignored signal %d for %ld (LWP %ld).\n",
564 WSTOPSIG (wstat), event_child->tid,
565 event_child->head.id);
566
567 if (ptrace (PTRACE_GETSIGINFO, event_child->lwpid, 0, &info) == 0)
568 info_p = &info;
569 else
570 info_p = NULL;
571 linux_resume_one_process (&event_child->head,
572 event_child->stepping,
573 WSTOPSIG (wstat), info_p);
574 continue;
575 }
576
577 /* If this event was not handled above, and is not a SIGTRAP, report
578 it. */
579 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGTRAP)
580 return wstat;
581
582 /* If this target does not support breakpoints, we simply report the
583 SIGTRAP; it's of no concern to us. */
584 if (the_low_target.get_pc == NULL)
585 return wstat;
586
587 stop_pc = get_stop_pc ();
588
589 /* bp_reinsert will only be set if we were single-stepping.
590 Notice that we will resume the process after hitting
591 a gdbserver breakpoint; single-stepping to/over one
592 is not supported (yet). */
593 if (event_child->bp_reinsert != 0)
594 {
595 if (debug_threads)
596 fprintf (stderr, "Reinserted breakpoint.\n");
597 reinsert_breakpoint (event_child->bp_reinsert);
598 event_child->bp_reinsert = 0;
599
600 /* Clear the single-stepping flag and SIGTRAP as we resume. */
601 linux_resume_one_process (&event_child->head, 0, 0, NULL);
602 continue;
603 }
604
605 if (debug_threads)
606 fprintf (stderr, "Hit a (non-reinsert) breakpoint.\n");
607
608 if (check_breakpoints (stop_pc) != 0)
609 {
610 /* We hit one of our own breakpoints. We mark it as a pending
611 breakpoint, so that check_removed_breakpoint () will do the PC
612 adjustment for us at the appropriate time. */
613 event_child->pending_is_breakpoint = 1;
614 event_child->pending_stop_pc = stop_pc;
615
616 /* Now we need to put the breakpoint back. We continue in the event
617 loop instead of simply replacing the breakpoint right away,
618 in order to not lose signals sent to the thread that hit the
619 breakpoint. Unfortunately this increases the window where another
620 thread could sneak past the removed breakpoint. For the current
621 use of server-side breakpoints (thread creation) this is
622 acceptable; but it needs to be considered before this breakpoint
623 mechanism can be used in more general ways. For some breakpoints
624 it may be necessary to stop all other threads, but that should
625 be avoided where possible.
626
627 If breakpoint_reinsert_addr is NULL, that means that we can
628 use PTRACE_SINGLESTEP on this platform. Uninsert the breakpoint,
629 mark it for reinsertion, and single-step.
630
631 Otherwise, call the target function to figure out where we need
632 our temporary breakpoint, create it, and continue executing this
633 process. */
634 if (the_low_target.breakpoint_reinsert_addr == NULL)
635 {
636 event_child->bp_reinsert = stop_pc;
637 uninsert_breakpoint (stop_pc);
638 linux_resume_one_process (&event_child->head, 1, 0, NULL);
639 }
640 else
641 {
642 reinsert_breakpoint_by_bp
643 (stop_pc, (*the_low_target.breakpoint_reinsert_addr) ());
644 linux_resume_one_process (&event_child->head, 0, 0, NULL);
645 }
646
647 continue;
648 }
649
650 /* If we were single-stepping, we definitely want to report the
651 SIGTRAP. The single-step operation has completed, so also
652 clear the stepping flag; in general this does not matter,
653 because the SIGTRAP will be reported to the client, which
654 will give us a new action for this thread, but clear it for
655 consistency anyway. It's safe to clear the stepping flag
656 because the only consumer of get_stop_pc () after this point
657 is check_removed_breakpoint, and pending_is_breakpoint is not
658 set. It might be wiser to use a step_completed flag instead. */
659 if (event_child->stepping)
660 {
661 event_child->stepping = 0;
662 return wstat;
663 }
664
665 /* A SIGTRAP that we can't explain. It may have been a breakpoint.
666 Check if it is a breakpoint, and if so mark the process information
667 accordingly. This will handle both the necessary fiddling with the
668 PC on decr_pc_after_break targets and suppressing extra threads
669 hitting a breakpoint if two hit it at once and then GDB removes it
670 after the first is reported. Arguably it would be better to report
671 multiple threads hitting breakpoints simultaneously, but the current
672 remote protocol does not allow this. */
673 if ((*the_low_target.breakpoint_at) (stop_pc))
674 {
675 event_child->pending_is_breakpoint = 1;
676 event_child->pending_stop_pc = stop_pc;
677 }
678
679 return wstat;
680 }
681
682 /* NOTREACHED */
683 return 0;
684 }
685
686 /* Wait for process, returns status. */
687
688 static unsigned char
689 linux_wait (char *status)
690 {
691 int w;
692 struct thread_info *child = NULL;
693
694 retry:
695 /* If we were only supposed to resume one thread, only wait for
696 that thread - if it's still alive. If it died, however - which
697 can happen if we're coming from the thread death case below -
698 then we need to make sure we restart the other threads. We could
699 pick a thread at random or restart all; restarting all is less
700 arbitrary. */
701 if (cont_thread != 0 && cont_thread != -1)
702 {
703 child = (struct thread_info *) find_inferior_id (&all_threads,
704 cont_thread);
705
706 /* No stepping, no signal - unless one is pending already, of course. */
707 if (child == NULL)
708 {
709 struct thread_resume resume_info;
710 resume_info.thread = -1;
711 resume_info.step = resume_info.sig = resume_info.leave_stopped = 0;
712 linux_resume (&resume_info);
713 }
714 }
715
716 enable_async_io ();
717 unblock_async_io ();
718 w = linux_wait_for_event (child);
719 stop_all_processes ();
720 disable_async_io ();
721
722 /* If we are waiting for a particular child, and it exited,
723 linux_wait_for_event will return its exit status. Similarly if
724 the last child exited. If this is not the last child, however,
725 do not report it as exited until there is a 'thread exited' response
726 available in the remote protocol. Instead, just wait for another event.
727 This should be safe, because if the thread crashed we will already
728 have reported the termination signal to GDB; that should stop any
729 in-progress stepping operations, etc.
730
731 Report the exit status of the last thread to exit. This matches
732 LinuxThreads' behavior. */
733
734 if (all_threads.head == all_threads.tail)
735 {
736 if (WIFEXITED (w))
737 {
738 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
739 *status = 'W';
740 clear_inferiors ();
741 free (all_processes.head);
742 all_processes.head = all_processes.tail = NULL;
743 return WEXITSTATUS (w);
744 }
745 else if (!WIFSTOPPED (w))
746 {
747 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
748 *status = 'X';
749 clear_inferiors ();
750 free (all_processes.head);
751 all_processes.head = all_processes.tail = NULL;
752 return target_signal_from_host (WTERMSIG (w));
753 }
754 }
755 else
756 {
757 if (!WIFSTOPPED (w))
758 goto retry;
759 }
760
761 *status = 'T';
762 return target_signal_from_host (WSTOPSIG (w));
763 }
764
765 /* Send a signal to an LWP. For LinuxThreads, kill is enough; however, if
766 thread groups are in use, we need to use tkill. */
767
768 static int
769 kill_lwp (unsigned long lwpid, int signo)
770 {
771 static int tkill_failed;
772
773 errno = 0;
774
775 #ifdef SYS_tkill
776 if (!tkill_failed)
777 {
778 int ret = syscall (SYS_tkill, lwpid, signo);
779 if (errno != ENOSYS)
780 return ret;
781 errno = 0;
782 tkill_failed = 1;
783 }
784 #endif
785
786 return kill (lwpid, signo);
787 }
788
789 static void
790 send_sigstop (struct inferior_list_entry *entry)
791 {
792 struct process_info *process = (struct process_info *) entry;
793
794 if (process->stopped)
795 return;
796
797 /* If we already have a pending stop signal for this process, don't
798 send another. */
799 if (process->stop_expected)
800 {
801 process->stop_expected = 0;
802 return;
803 }
804
805 if (debug_threads)
806 fprintf (stderr, "Sending sigstop to process %ld\n", process->head.id);
807
808 kill_lwp (process->head.id, SIGSTOP);
809 process->sigstop_sent = 1;
810 }
811
812 static void
813 wait_for_sigstop (struct inferior_list_entry *entry)
814 {
815 struct process_info *process = (struct process_info *) entry;
816 struct thread_info *saved_inferior, *thread;
817 int wstat;
818 unsigned long saved_tid;
819
820 if (process->stopped)
821 return;
822
823 saved_inferior = current_inferior;
824 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
825 thread = (struct thread_info *) find_inferior_id (&all_threads,
826 process->tid);
827 wstat = linux_wait_for_event (thread);
828
829 /* If we stopped with a non-SIGSTOP signal, save it for later
830 and record the pending SIGSTOP. If the process exited, just
831 return. */
832 if (WIFSTOPPED (wstat)
833 && WSTOPSIG (wstat) != SIGSTOP)
834 {
835 if (debug_threads)
836 fprintf (stderr, "Stopped with non-sigstop signal\n");
837 process->status_pending_p = 1;
838 process->status_pending = wstat;
839 process->stop_expected = 1;
840 }
841
842 if (linux_thread_alive (saved_tid))
843 current_inferior = saved_inferior;
844 else
845 {
846 if (debug_threads)
847 fprintf (stderr, "Previously current thread died.\n");
848
849 /* Set a valid thread as current. */
850 set_desired_inferior (0);
851 }
852 }
853
854 static void
855 stop_all_processes (void)
856 {
857 stopping_threads = 1;
858 for_each_inferior (&all_processes, send_sigstop);
859 for_each_inferior (&all_processes, wait_for_sigstop);
860 stopping_threads = 0;
861 }
862
863 /* Resume execution of the inferior process.
864 If STEP is nonzero, single-step it.
865 If SIGNAL is nonzero, give it that signal. */
866
867 static void
868 linux_resume_one_process (struct inferior_list_entry *entry,
869 int step, int signal, siginfo_t *info)
870 {
871 struct process_info *process = (struct process_info *) entry;
872 struct thread_info *saved_inferior;
873
874 if (process->stopped == 0)
875 return;
876
877 /* If we have pending signals or status, and a new signal, enqueue the
878 signal. Also enqueue the signal if we are waiting to reinsert a
879 breakpoint; it will be picked up again below. */
880 if (signal != 0
881 && (process->status_pending_p || process->pending_signals != NULL
882 || process->bp_reinsert != 0))
883 {
884 struct pending_signals *p_sig;
885 p_sig = malloc (sizeof (*p_sig));
886 p_sig->prev = process->pending_signals;
887 p_sig->signal = signal;
888 if (info == NULL)
889 memset (&p_sig->info, 0, sizeof (siginfo_t));
890 else
891 memcpy (&p_sig->info, info, sizeof (siginfo_t));
892 process->pending_signals = p_sig;
893 }
894
895 if (process->status_pending_p && !check_removed_breakpoint (process))
896 return;
897
898 saved_inferior = current_inferior;
899 current_inferior = get_process_thread (process);
900
901 if (debug_threads)
902 fprintf (stderr, "Resuming process %ld (%s, signal %d, stop %s)\n", inferior_pid,
903 step ? "step" : "continue", signal,
904 process->stop_expected ? "expected" : "not expected");
905
906 /* This bit needs some thinking about. If we get a signal that
907 we must report while a single-step reinsert is still pending,
908 we often end up resuming the thread. It might be better to
909 (ew) allow a stack of pending events; then we could be sure that
910 the reinsert happened right away and not lose any signals.
911
912 Making this stack would also shrink the window in which breakpoints are
913 uninserted (see comment in linux_wait_for_process) but not enough for
914 complete correctness, so it won't solve that problem. It may be
915 worthwhile just to solve this one, however. */
916 if (process->bp_reinsert != 0)
917 {
918 if (debug_threads)
919 fprintf (stderr, " pending reinsert at %08lx", (long)process->bp_reinsert);
920 if (step == 0)
921 fprintf (stderr, "BAD - reinserting but not stepping.\n");
922 step = 1;
923
924 /* Postpone any pending signal. It was enqueued above. */
925 signal = 0;
926 }
927
928 check_removed_breakpoint (process);
929
930 if (debug_threads && the_low_target.get_pc != NULL)
931 {
932 fprintf (stderr, " ");
933 (*the_low_target.get_pc) ();
934 }
935
936 /* If we have pending signals, consume one unless we are trying to reinsert
937 a breakpoint. */
938 if (process->pending_signals != NULL && process->bp_reinsert == 0)
939 {
940 struct pending_signals **p_sig;
941
942 p_sig = &process->pending_signals;
943 while ((*p_sig)->prev != NULL)
944 p_sig = &(*p_sig)->prev;
945
946 signal = (*p_sig)->signal;
947 if ((*p_sig)->info.si_signo != 0)
948 ptrace (PTRACE_SETSIGINFO, process->lwpid, 0, &(*p_sig)->info);
949
950 free (*p_sig);
951 *p_sig = NULL;
952 }
953
954 regcache_invalidate_one ((struct inferior_list_entry *)
955 get_process_thread (process));
956 errno = 0;
957 process->stopped = 0;
958 process->stepping = step;
959 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, process->lwpid, 0, signal);
960
961 current_inferior = saved_inferior;
962 if (errno)
963 perror_with_name ("ptrace");
964 }
965
966 static struct thread_resume *resume_ptr;
967
968 /* This function is called once per thread. We look up the thread
969 in RESUME_PTR, and mark the thread with a pointer to the appropriate
970 resume request.
971
972 This algorithm is O(threads * resume elements), but resume elements
973 is small (and will remain small at least until GDB supports thread
974 suspension). */
975 static void
976 linux_set_resume_request (struct inferior_list_entry *entry)
977 {
978 struct process_info *process;
979 struct thread_info *thread;
980 int ndx;
981
982 thread = (struct thread_info *) entry;
983 process = get_thread_process (thread);
984
985 ndx = 0;
986 while (resume_ptr[ndx].thread != -1 && resume_ptr[ndx].thread != entry->id)
987 ndx++;
988
989 process->resume = &resume_ptr[ndx];
990 }
991
992 /* This function is called once per thread. We check the thread's resume
993 request, which will tell us whether to resume, step, or leave the thread
994 stopped; and what signal, if any, it should be sent. For threads which
995 we aren't explicitly told otherwise, we preserve the stepping flag; this
996 is used for stepping over gdbserver-placed breakpoints. */
997
998 static void
999 linux_continue_one_thread (struct inferior_list_entry *entry)
1000 {
1001 struct process_info *process;
1002 struct thread_info *thread;
1003 int step;
1004
1005 thread = (struct thread_info *) entry;
1006 process = get_thread_process (thread);
1007
1008 if (process->resume->leave_stopped)
1009 return;
1010
1011 if (process->resume->thread == -1)
1012 step = process->stepping || process->resume->step;
1013 else
1014 step = process->resume->step;
1015
1016 linux_resume_one_process (&process->head, step, process->resume->sig, NULL);
1017
1018 process->resume = NULL;
1019 }
1020
1021 /* This function is called once per thread. We check the thread's resume
1022 request, which will tell us whether to resume, step, or leave the thread
1023 stopped; and what signal, if any, it should be sent. We queue any needed
1024 signals, since we won't actually resume. We already have a pending event
1025 to report, so we don't need to preserve any step requests; they should
1026 be re-issued if necessary. */
1027
1028 static void
1029 linux_queue_one_thread (struct inferior_list_entry *entry)
1030 {
1031 struct process_info *process;
1032 struct thread_info *thread;
1033
1034 thread = (struct thread_info *) entry;
1035 process = get_thread_process (thread);
1036
1037 if (process->resume->leave_stopped)
1038 return;
1039
1040 /* If we have a new signal, enqueue the signal. */
1041 if (process->resume->sig != 0)
1042 {
1043 struct pending_signals *p_sig;
1044 p_sig = malloc (sizeof (*p_sig));
1045 p_sig->prev = process->pending_signals;
1046 p_sig->signal = process->resume->sig;
1047 memset (&p_sig->info, 0, sizeof (siginfo_t));
1048
1049 /* If this is the same signal we were previously stopped by,
1050 make sure to queue its siginfo. We can ignore the return
1051 value of ptrace; if it fails, we'll skip
1052 PTRACE_SETSIGINFO. */
1053 if (WIFSTOPPED (process->last_status)
1054 && WSTOPSIG (process->last_status) == process->resume->sig)
1055 ptrace (PTRACE_GETSIGINFO, process->lwpid, 0, &p_sig->info);
1056
1057 process->pending_signals = p_sig;
1058 }
1059
1060 process->resume = NULL;
1061 }
1062
1063 /* Set DUMMY if this process has an interesting status pending. */
1064 static int
1065 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
1066 {
1067 struct process_info *process = (struct process_info *) entry;
1068
1069 /* Processes which will not be resumed are not interesting, because
1070 we might not wait for them next time through linux_wait. */
1071 if (process->resume->leave_stopped)
1072 return 0;
1073
1074 /* If this thread has a removed breakpoint, we won't have any
1075 events to report later, so check now. check_removed_breakpoint
1076 may clear status_pending_p. We avoid calling check_removed_breakpoint
1077 for any thread that we are not otherwise going to resume - this
1078 lets us preserve stopped status when two threads hit a breakpoint.
1079 GDB removes the breakpoint to single-step a particular thread
1080 past it, then re-inserts it and resumes all threads. We want
1081 to report the second thread without resuming it in the interim. */
1082 if (process->status_pending_p)
1083 check_removed_breakpoint (process);
1084
1085 if (process->status_pending_p)
1086 * (int *) flag_p = 1;
1087
1088 return 0;
1089 }
1090
1091 static void
1092 linux_resume (struct thread_resume *resume_info)
1093 {
1094 int pending_flag;
1095
1096 /* Yes, the use of a global here is rather ugly. */
1097 resume_ptr = resume_info;
1098
1099 for_each_inferior (&all_threads, linux_set_resume_request);
1100
1101 /* If there is a thread which would otherwise be resumed, which
1102 has a pending status, then don't resume any threads - we can just
1103 report the pending status. Make sure to queue any signals
1104 that would otherwise be sent. */
1105 pending_flag = 0;
1106 find_inferior (&all_processes, resume_status_pending_p, &pending_flag);
1107
1108 if (debug_threads)
1109 {
1110 if (pending_flag)
1111 fprintf (stderr, "Not resuming, pending status\n");
1112 else
1113 fprintf (stderr, "Resuming, no pending status\n");
1114 }
1115
1116 if (pending_flag)
1117 for_each_inferior (&all_threads, linux_queue_one_thread);
1118 else
1119 {
1120 block_async_io ();
1121 enable_async_io ();
1122 for_each_inferior (&all_threads, linux_continue_one_thread);
1123 }
1124 }
1125
1126 #ifdef HAVE_LINUX_USRREGS
1127
1128 int
1129 register_addr (int regnum)
1130 {
1131 int addr;
1132
1133 if (regnum < 0 || regnum >= the_low_target.num_regs)
1134 error ("Invalid register number %d.", regnum);
1135
1136 addr = the_low_target.regmap[regnum];
1137
1138 return addr;
1139 }
1140
1141 /* Fetch one register. */
1142 static void
1143 fetch_register (int regno)
1144 {
1145 CORE_ADDR regaddr;
1146 int i, size;
1147 char *buf;
1148
1149 if (regno >= the_low_target.num_regs)
1150 return;
1151 if ((*the_low_target.cannot_fetch_register) (regno))
1152 return;
1153
1154 regaddr = register_addr (regno);
1155 if (regaddr == -1)
1156 return;
1157 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
1158 & - sizeof (PTRACE_XFER_TYPE);
1159 buf = alloca (size);
1160 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
1161 {
1162 errno = 0;
1163 *(PTRACE_XFER_TYPE *) (buf + i) =
1164 ptrace (PTRACE_PEEKUSER, inferior_pid, (PTRACE_ARG3_TYPE) regaddr, 0);
1165 regaddr += sizeof (PTRACE_XFER_TYPE);
1166 if (errno != 0)
1167 {
1168 /* Warning, not error, in case we are attached; sometimes the
1169 kernel doesn't let us at the registers. */
1170 char *err = strerror (errno);
1171 char *msg = alloca (strlen (err) + 128);
1172 sprintf (msg, "reading register %d: %s", regno, err);
1173 error (msg);
1174 goto error_exit;
1175 }
1176 }
1177 if (the_low_target.left_pad_xfer
1178 && register_size (regno) < sizeof (PTRACE_XFER_TYPE))
1179 supply_register (regno, (buf + sizeof (PTRACE_XFER_TYPE)
1180 - register_size (regno)));
1181 else
1182 supply_register (regno, buf);
1183
1184 error_exit:;
1185 }
1186
1187 /* Fetch all registers, or just one, from the child process. */
1188 static void
1189 usr_fetch_inferior_registers (int regno)
1190 {
1191 if (regno == -1 || regno == 0)
1192 for (regno = 0; regno < the_low_target.num_regs; regno++)
1193 fetch_register (regno);
1194 else
1195 fetch_register (regno);
1196 }
1197
1198 /* Store our register values back into the inferior.
1199 If REGNO is -1, do this for all registers.
1200 Otherwise, REGNO specifies which register (so we can save time). */
1201 static void
1202 usr_store_inferior_registers (int regno)
1203 {
1204 CORE_ADDR regaddr;
1205 int i, size;
1206 char *buf;
1207
1208 if (regno >= 0)
1209 {
1210 if (regno >= the_low_target.num_regs)
1211 return;
1212
1213 if ((*the_low_target.cannot_store_register) (regno) == 1)
1214 return;
1215
1216 regaddr = register_addr (regno);
1217 if (regaddr == -1)
1218 return;
1219 errno = 0;
1220 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
1221 & - sizeof (PTRACE_XFER_TYPE);
1222 buf = alloca (size);
1223 memset (buf, 0, size);
1224 if (the_low_target.left_pad_xfer
1225 && register_size (regno) < sizeof (PTRACE_XFER_TYPE))
1226 collect_register (regno, (buf + sizeof (PTRACE_XFER_TYPE)
1227 - register_size (regno)));
1228 else
1229 collect_register (regno, buf);
1230 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
1231 {
1232 errno = 0;
1233 ptrace (PTRACE_POKEUSER, inferior_pid, (PTRACE_ARG3_TYPE) regaddr,
1234 *(PTRACE_XFER_TYPE *) (buf + i));
1235 if (errno != 0)
1236 {
1237 if ((*the_low_target.cannot_store_register) (regno) == 0)
1238 {
1239 char *err = strerror (errno);
1240 char *msg = alloca (strlen (err) + 128);
1241 sprintf (msg, "writing register %d: %s",
1242 regno, err);
1243 error (msg);
1244 return;
1245 }
1246 }
1247 regaddr += sizeof (PTRACE_XFER_TYPE);
1248 }
1249 }
1250 else
1251 for (regno = 0; regno < the_low_target.num_regs; regno++)
1252 usr_store_inferior_registers (regno);
1253 }
1254 #endif /* HAVE_LINUX_USRREGS */
1255
1256
1257
1258 #ifdef HAVE_LINUX_REGSETS
1259
1260 static int
1261 regsets_fetch_inferior_registers ()
1262 {
1263 struct regset_info *regset;
1264 int saw_general_regs = 0;
1265
1266 regset = target_regsets;
1267
1268 while (regset->size >= 0)
1269 {
1270 void *buf;
1271 int res;
1272
1273 if (regset->size == 0)
1274 {
1275 regset ++;
1276 continue;
1277 }
1278
1279 buf = malloc (regset->size);
1280 res = ptrace (regset->get_request, inferior_pid, 0, buf);
1281 if (res < 0)
1282 {
1283 if (errno == EIO)
1284 {
1285 /* If we get EIO on the first regset, do not try regsets again.
1286 If we get EIO on a later regset, disable that regset. */
1287 if (regset == target_regsets)
1288 {
1289 use_regsets_p = 0;
1290 return -1;
1291 }
1292 else
1293 {
1294 regset->size = 0;
1295 continue;
1296 }
1297 }
1298 else
1299 {
1300 char s[256];
1301 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%ld",
1302 inferior_pid);
1303 perror (s);
1304 }
1305 }
1306 else if (regset->type == GENERAL_REGS)
1307 saw_general_regs = 1;
1308 regset->store_function (buf);
1309 regset ++;
1310 }
1311 if (saw_general_regs)
1312 return 0;
1313 else
1314 return 1;
1315 }
1316
1317 static int
1318 regsets_store_inferior_registers ()
1319 {
1320 struct regset_info *regset;
1321 int saw_general_regs = 0;
1322
1323 regset = target_regsets;
1324
1325 while (regset->size >= 0)
1326 {
1327 void *buf;
1328 int res;
1329
1330 if (regset->size == 0)
1331 {
1332 regset ++;
1333 continue;
1334 }
1335
1336 buf = malloc (regset->size);
1337
1338 /* First fill the buffer with the current register set contents,
1339 in case there are any items in the kernel's regset that are
1340 not in gdbserver's regcache. */
1341 res = ptrace (regset->get_request, inferior_pid, 0, buf);
1342
1343 if (res == 0)
1344 {
1345 /* Then overlay our cached registers on that. */
1346 regset->fill_function (buf);
1347
1348 /* Only now do we write the register set. */
1349 res = ptrace (regset->set_request, inferior_pid, 0, buf);
1350 }
1351
1352 if (res < 0)
1353 {
1354 if (errno == EIO)
1355 {
1356 /* If we get EIO on the first regset, do not try regsets again.
1357 If we get EIO on a later regset, disable that regset. */
1358 if (regset == target_regsets)
1359 {
1360 use_regsets_p = 0;
1361 return -1;
1362 }
1363 else
1364 {
1365 regset->size = 0;
1366 continue;
1367 }
1368 }
1369 else
1370 {
1371 perror ("Warning: ptrace(regsets_store_inferior_registers)");
1372 }
1373 }
1374 else if (regset->type == GENERAL_REGS)
1375 saw_general_regs = 1;
1376 regset ++;
1377 free (buf);
1378 }
1379 if (saw_general_regs)
1380 return 0;
1381 else
1382 return 1;
1383 return 0;
1384 }
1385
1386 #endif /* HAVE_LINUX_REGSETS */
1387
1388
1389 void
1390 linux_fetch_registers (int regno)
1391 {
1392 #ifdef HAVE_LINUX_REGSETS
1393 if (use_regsets_p)
1394 {
1395 if (regsets_fetch_inferior_registers () == 0)
1396 return;
1397 }
1398 #endif
1399 #ifdef HAVE_LINUX_USRREGS
1400 usr_fetch_inferior_registers (regno);
1401 #endif
1402 }
1403
1404 void
1405 linux_store_registers (int regno)
1406 {
1407 #ifdef HAVE_LINUX_REGSETS
1408 if (use_regsets_p)
1409 {
1410 if (regsets_store_inferior_registers () == 0)
1411 return;
1412 }
1413 #endif
1414 #ifdef HAVE_LINUX_USRREGS
1415 usr_store_inferior_registers (regno);
1416 #endif
1417 }
1418
1419
1420 /* Copy LEN bytes from inferior's memory starting at MEMADDR
1421 to debugger memory starting at MYADDR. */
1422
1423 static int
1424 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
1425 {
1426 register int i;
1427 /* Round starting address down to longword boundary. */
1428 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
1429 /* Round ending address up; get number of longwords that makes. */
1430 register int count
1431 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
1432 / sizeof (PTRACE_XFER_TYPE);
1433 /* Allocate buffer of that many longwords. */
1434 register PTRACE_XFER_TYPE *buffer
1435 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
1436
1437 /* Read all the longwords */
1438 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
1439 {
1440 errno = 0;
1441 buffer[i] = ptrace (PTRACE_PEEKTEXT, inferior_pid, (PTRACE_ARG3_TYPE) addr, 0);
1442 if (errno)
1443 return errno;
1444 }
1445
1446 /* Copy appropriate bytes out of the buffer. */
1447 memcpy (myaddr, (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), len);
1448
1449 return 0;
1450 }
1451
1452 /* Copy LEN bytes of data from debugger memory at MYADDR
1453 to inferior's memory at MEMADDR.
1454 On failure (cannot write the inferior)
1455 returns the value of errno. */
1456
1457 static int
1458 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
1459 {
1460 register int i;
1461 /* Round starting address down to longword boundary. */
1462 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
1463 /* Round ending address up; get number of longwords that makes. */
1464 register int count
1465 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
1466 /* Allocate buffer of that many longwords. */
1467 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
1468 extern int errno;
1469
1470 if (debug_threads)
1471 {
1472 fprintf (stderr, "Writing %02x to %08lx\n", (unsigned)myaddr[0], (long)memaddr);
1473 }
1474
1475 /* Fill start and end extra bytes of buffer with existing memory data. */
1476
1477 buffer[0] = ptrace (PTRACE_PEEKTEXT, inferior_pid,
1478 (PTRACE_ARG3_TYPE) addr, 0);
1479
1480 if (count > 1)
1481 {
1482 buffer[count - 1]
1483 = ptrace (PTRACE_PEEKTEXT, inferior_pid,
1484 (PTRACE_ARG3_TYPE) (addr + (count - 1)
1485 * sizeof (PTRACE_XFER_TYPE)),
1486 0);
1487 }
1488
1489 /* Copy data to be written over corresponding part of buffer */
1490
1491 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
1492
1493 /* Write the entire buffer. */
1494
1495 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
1496 {
1497 errno = 0;
1498 ptrace (PTRACE_POKETEXT, inferior_pid, (PTRACE_ARG3_TYPE) addr, buffer[i]);
1499 if (errno)
1500 return errno;
1501 }
1502
1503 return 0;
1504 }
1505
1506 static void
1507 linux_look_up_symbols (void)
1508 {
1509 #ifdef USE_THREAD_DB
1510 if (using_threads)
1511 return;
1512
1513 using_threads = thread_db_init ();
1514 #endif
1515 }
1516
1517 static void
1518 linux_send_signal (int signum)
1519 {
1520 extern unsigned long signal_pid;
1521
1522 if (cont_thread != 0 && cont_thread != -1)
1523 {
1524 struct process_info *process;
1525
1526 process = get_thread_process (current_inferior);
1527 kill_lwp (process->lwpid, signum);
1528 }
1529 else
1530 kill_lwp (signal_pid, signum);
1531 }
1532
1533 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
1534 to debugger memory starting at MYADDR. */
1535
1536 static int
1537 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
1538 {
1539 char filename[PATH_MAX];
1540 int fd, n;
1541
1542 snprintf (filename, sizeof filename, "/proc/%ld/auxv", inferior_pid);
1543
1544 fd = open (filename, O_RDONLY);
1545 if (fd < 0)
1546 return -1;
1547
1548 if (offset != (CORE_ADDR) 0
1549 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
1550 n = -1;
1551 else
1552 n = read (fd, myaddr, len);
1553
1554 close (fd);
1555
1556 return n;
1557 }
1558
1559 /* These watchpoint related wrapper functions simply pass on the function call
1560 if the target has registered a corresponding function. */
1561
1562 static int
1563 linux_insert_watchpoint (char type, CORE_ADDR addr, int len)
1564 {
1565 if (the_low_target.insert_watchpoint != NULL)
1566 return the_low_target.insert_watchpoint (type, addr, len);
1567 else
1568 /* Unsupported (see target.h). */
1569 return 1;
1570 }
1571
1572 static int
1573 linux_remove_watchpoint (char type, CORE_ADDR addr, int len)
1574 {
1575 if (the_low_target.remove_watchpoint != NULL)
1576 return the_low_target.remove_watchpoint (type, addr, len);
1577 else
1578 /* Unsupported (see target.h). */
1579 return 1;
1580 }
1581
1582 static int
1583 linux_stopped_by_watchpoint (void)
1584 {
1585 if (the_low_target.stopped_by_watchpoint != NULL)
1586 return the_low_target.stopped_by_watchpoint ();
1587 else
1588 return 0;
1589 }
1590
1591 static CORE_ADDR
1592 linux_stopped_data_address (void)
1593 {
1594 if (the_low_target.stopped_data_address != NULL)
1595 return the_low_target.stopped_data_address ();
1596 else
1597 return 0;
1598 }
1599
1600 #if defined(__UCLIBC__) && !defined(__UCLIBC_HAS_MMU__)
1601 #if defined(__mcoldfire__)
1602 /* These should really be defined in the kernel's ptrace.h header. */
1603 #define PT_TEXT_ADDR 49*4
1604 #define PT_DATA_ADDR 50*4
1605 #define PT_TEXT_END_ADDR 51*4
1606 #endif
1607
1608 /* Under uClinux, programs are loaded at non-zero offsets, which we need
1609 to tell gdb about. */
1610
1611 static int
1612 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
1613 {
1614 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
1615 unsigned long text, text_end, data;
1616 int pid = get_thread_process (current_inferior)->head.id;
1617
1618 errno = 0;
1619
1620 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
1621 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
1622 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
1623
1624 if (errno == 0)
1625 {
1626 /* Both text and data offsets produced at compile-time (and so
1627 used by gdb) are relative to the beginning of the program,
1628 with the data segment immediately following the text segment.
1629 However, the actual runtime layout in memory may put the data
1630 somewhere else, so when we send gdb a data base-address, we
1631 use the real data base address and subtract the compile-time
1632 data base-address from it (which is just the length of the
1633 text segment). BSS immediately follows data in both
1634 cases. */
1635 *text_p = text;
1636 *data_p = data - (text_end - text);
1637
1638 return 1;
1639 }
1640 #endif
1641 return 0;
1642 }
1643 #endif
1644
1645 static struct target_ops linux_target_ops = {
1646 linux_create_inferior,
1647 linux_attach,
1648 linux_kill,
1649 linux_detach,
1650 linux_thread_alive,
1651 linux_resume,
1652 linux_wait,
1653 linux_fetch_registers,
1654 linux_store_registers,
1655 linux_read_memory,
1656 linux_write_memory,
1657 linux_look_up_symbols,
1658 linux_send_signal,
1659 linux_read_auxv,
1660 linux_insert_watchpoint,
1661 linux_remove_watchpoint,
1662 linux_stopped_by_watchpoint,
1663 linux_stopped_data_address,
1664 #if defined(__UCLIBC__) && !defined(__UCLIBC_HAS_MMU__)
1665 linux_read_offsets,
1666 #else
1667 NULL,
1668 #endif
1669 #ifdef USE_THREAD_DB
1670 thread_db_get_tls_address,
1671 #else
1672 NULL,
1673 #endif
1674 };
1675
1676 static void
1677 linux_init_signals ()
1678 {
1679 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
1680 to find what the cancel signal actually is. */
1681 signal (__SIGRTMIN+1, SIG_IGN);
1682 }
1683
1684 void
1685 initialize_low (void)
1686 {
1687 using_threads = 0;
1688 set_target_ops (&linux_target_ops);
1689 set_breakpoint_data (the_low_target.breakpoint,
1690 the_low_target.breakpoint_len);
1691 init_registers ();
1692 linux_init_signals ();
1693 }
This page took 0.065726 seconds and 5 git commands to generate.