c1f533f60a27635746773d7d087de21a0fe528a6
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40
41 #ifndef PTRACE_GETSIGINFO
42 # define PTRACE_GETSIGINFO 0x4202
43 # define PTRACE_SETSIGINFO 0x4203
44 #endif
45
46 #ifndef O_LARGEFILE
47 #define O_LARGEFILE 0
48 #endif
49
50 /* If the system headers did not provide the constants, hard-code the normal
51 values. */
52 #ifndef PTRACE_EVENT_FORK
53
54 #define PTRACE_SETOPTIONS 0x4200
55 #define PTRACE_GETEVENTMSG 0x4201
56
57 /* options set using PTRACE_SETOPTIONS */
58 #define PTRACE_O_TRACESYSGOOD 0x00000001
59 #define PTRACE_O_TRACEFORK 0x00000002
60 #define PTRACE_O_TRACEVFORK 0x00000004
61 #define PTRACE_O_TRACECLONE 0x00000008
62 #define PTRACE_O_TRACEEXEC 0x00000010
63 #define PTRACE_O_TRACEVFORKDONE 0x00000020
64 #define PTRACE_O_TRACEEXIT 0x00000040
65
66 /* Wait extended result codes for the above trace options. */
67 #define PTRACE_EVENT_FORK 1
68 #define PTRACE_EVENT_VFORK 2
69 #define PTRACE_EVENT_CLONE 3
70 #define PTRACE_EVENT_EXEC 4
71 #define PTRACE_EVENT_VFORK_DONE 5
72 #define PTRACE_EVENT_EXIT 6
73
74 #endif /* PTRACE_EVENT_FORK */
75
76 /* We can't always assume that this flag is available, but all systems
77 with the ptrace event handlers also have __WALL, so it's safe to use
78 in some contexts. */
79 #ifndef __WALL
80 #define __WALL 0x40000000 /* Wait for any child. */
81 #endif
82
83 #ifdef __UCLIBC__
84 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
85 #define HAS_NOMMU
86 #endif
87 #endif
88
89 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
90 representation of the thread ID.
91
92 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
93 the same as the LWP ID. */
94
95 struct inferior_list all_lwps;
96
97 /* A list of all unknown processes which receive stop signals. Some other
98 process will presumably claim each of these as forked children
99 momentarily. */
100
101 struct inferior_list stopped_pids;
102
103 /* FIXME this is a bit of a hack, and could be removed. */
104 int stopping_threads;
105
106 /* FIXME make into a target method? */
107 int using_threads = 1;
108 static int thread_db_active;
109
110 static int must_set_ptrace_flags;
111
112 /* This flag is true iff we've just created or attached to a new inferior
113 but it has not stopped yet. As soon as it does, we need to call the
114 low target's arch_setup callback. */
115 static int new_inferior;
116
117 static void linux_resume_one_lwp (struct inferior_list_entry *entry,
118 int step, int signal, siginfo_t *info);
119 static void linux_resume (struct thread_resume *resume_info, size_t n);
120 static void stop_all_lwps (void);
121 static int linux_wait_for_event (struct thread_info *child);
122 static int check_removed_breakpoint (struct lwp_info *event_child);
123 static void *add_lwp (unsigned long pid);
124 static int my_waitpid (int pid, int *status, int flags);
125 static int linux_stopped_by_watchpoint (void);
126
127 struct pending_signals
128 {
129 int signal;
130 siginfo_t info;
131 struct pending_signals *prev;
132 };
133
134 #define PTRACE_ARG3_TYPE long
135 #define PTRACE_XFER_TYPE long
136
137 #ifdef HAVE_LINUX_REGSETS
138 static char *disabled_regsets;
139 static int num_regsets;
140 #endif
141
142 #define pid_of(proc) ((proc)->head.id)
143
144 /* FIXME: Delete eventually. */
145 #define inferior_pid (pid_of (get_thread_lwp (current_inferior)))
146
147 static void
148 handle_extended_wait (struct lwp_info *event_child, int wstat)
149 {
150 int event = wstat >> 16;
151 struct lwp_info *new_lwp;
152
153 if (event == PTRACE_EVENT_CLONE)
154 {
155 unsigned long new_pid;
156 int ret, status = W_STOPCODE (SIGSTOP);
157
158 ptrace (PTRACE_GETEVENTMSG, inferior_pid, 0, &new_pid);
159
160 /* If we haven't already seen the new PID stop, wait for it now. */
161 if (! pull_pid_from_list (&stopped_pids, new_pid))
162 {
163 /* The new child has a pending SIGSTOP. We can't affect it until it
164 hits the SIGSTOP, but we're already attached. */
165
166 ret = my_waitpid (new_pid, &status, __WALL);
167
168 if (ret == -1)
169 perror_with_name ("waiting for new child");
170 else if (ret != new_pid)
171 warning ("wait returned unexpected PID %d", ret);
172 else if (!WIFSTOPPED (status))
173 warning ("wait returned unexpected status 0x%x", status);
174 }
175
176 ptrace (PTRACE_SETOPTIONS, new_pid, 0, PTRACE_O_TRACECLONE);
177
178 new_lwp = (struct lwp_info *) add_lwp (new_pid);
179 add_thread (new_pid, new_lwp, new_pid);
180 new_thread_notify (thread_id_to_gdb_id (new_lwp->lwpid));
181
182 /* Normally we will get the pending SIGSTOP. But in some cases
183 we might get another signal delivered to the group first.
184 If we do get another signal, be sure not to lose it. */
185 if (WSTOPSIG (status) == SIGSTOP)
186 {
187 if (stopping_threads)
188 new_lwp->stopped = 1;
189 else
190 ptrace (PTRACE_CONT, new_pid, 0, 0);
191 }
192 else
193 {
194 new_lwp->stop_expected = 1;
195 if (stopping_threads)
196 {
197 new_lwp->stopped = 1;
198 new_lwp->status_pending_p = 1;
199 new_lwp->status_pending = status;
200 }
201 else
202 /* Pass the signal on. This is what GDB does - except
203 shouldn't we really report it instead? */
204 ptrace (PTRACE_CONT, new_pid, 0, WSTOPSIG (status));
205 }
206
207 /* Always resume the current thread. If we are stopping
208 threads, it will have a pending SIGSTOP; we may as well
209 collect it now. */
210 linux_resume_one_lwp (&event_child->head,
211 event_child->stepping, 0, NULL);
212 }
213 }
214
215 /* This function should only be called if the process got a SIGTRAP.
216 The SIGTRAP could mean several things.
217
218 On i386, where decr_pc_after_break is non-zero:
219 If we were single-stepping this process using PTRACE_SINGLESTEP,
220 we will get only the one SIGTRAP (even if the instruction we
221 stepped over was a breakpoint). The value of $eip will be the
222 next instruction.
223 If we continue the process using PTRACE_CONT, we will get a
224 SIGTRAP when we hit a breakpoint. The value of $eip will be
225 the instruction after the breakpoint (i.e. needs to be
226 decremented). If we report the SIGTRAP to GDB, we must also
227 report the undecremented PC. If we cancel the SIGTRAP, we
228 must resume at the decremented PC.
229
230 (Presumably, not yet tested) On a non-decr_pc_after_break machine
231 with hardware or kernel single-step:
232 If we single-step over a breakpoint instruction, our PC will
233 point at the following instruction. If we continue and hit a
234 breakpoint instruction, our PC will point at the breakpoint
235 instruction. */
236
237 static CORE_ADDR
238 get_stop_pc (void)
239 {
240 CORE_ADDR stop_pc = (*the_low_target.get_pc) ();
241
242 if (get_thread_lwp (current_inferior)->stepping)
243 return stop_pc;
244 else
245 return stop_pc - the_low_target.decr_pc_after_break;
246 }
247
248 static void *
249 add_lwp (unsigned long pid)
250 {
251 struct lwp_info *lwp;
252
253 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
254 memset (lwp, 0, sizeof (*lwp));
255
256 lwp->head.id = pid;
257 lwp->lwpid = pid;
258
259 add_inferior_to_list (&all_lwps, &lwp->head);
260
261 return lwp;
262 }
263
264 /* Start an inferior process and returns its pid.
265 ALLARGS is a vector of program-name and args. */
266
267 static int
268 linux_create_inferior (char *program, char **allargs)
269 {
270 void *new_lwp;
271 int pid;
272
273 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
274 pid = vfork ();
275 #else
276 pid = fork ();
277 #endif
278 if (pid < 0)
279 perror_with_name ("fork");
280
281 if (pid == 0)
282 {
283 ptrace (PTRACE_TRACEME, 0, 0, 0);
284
285 signal (__SIGRTMIN + 1, SIG_DFL);
286
287 setpgid (0, 0);
288
289 execv (program, allargs);
290 if (errno == ENOENT)
291 execvp (program, allargs);
292
293 fprintf (stderr, "Cannot exec %s: %s.\n", program,
294 strerror (errno));
295 fflush (stderr);
296 _exit (0177);
297 }
298
299 new_lwp = add_lwp (pid);
300 add_thread (pid, new_lwp, pid);
301 must_set_ptrace_flags = 1;
302 new_inferior = 1;
303
304 return pid;
305 }
306
307 /* Attach to an inferior process. */
308
309 void
310 linux_attach_lwp (unsigned long pid)
311 {
312 struct lwp_info *new_lwp;
313
314 if (ptrace (PTRACE_ATTACH, pid, 0, 0) != 0)
315 {
316 if (all_threads.head != NULL)
317 {
318 /* If we fail to attach to an LWP, just warn. */
319 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", pid,
320 strerror (errno), errno);
321 fflush (stderr);
322 return;
323 }
324 else
325 /* If we fail to attach to a process, report an error. */
326 error ("Cannot attach to process %ld: %s (%d)\n", pid,
327 strerror (errno), errno);
328 }
329
330 /* FIXME: This intermittently fails.
331 We need to wait for SIGSTOP first. */
332 ptrace (PTRACE_SETOPTIONS, pid, 0, PTRACE_O_TRACECLONE);
333
334 new_lwp = (struct lwp_info *) add_lwp (pid);
335 add_thread (pid, new_lwp, pid);
336 new_thread_notify (thread_id_to_gdb_id (new_lwp->lwpid));
337
338 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
339 brings it to a halt.
340
341 There are several cases to consider here:
342
343 1) gdbserver has already attached to the process and is being notified
344 of a new thread that is being created.
345 In this case we should ignore that SIGSTOP and resume the process.
346 This is handled below by setting stop_expected = 1.
347
348 2) This is the first thread (the process thread), and we're attaching
349 to it via attach_inferior.
350 In this case we want the process thread to stop.
351 This is handled by having linux_attach clear stop_expected after
352 we return.
353 ??? If the process already has several threads we leave the other
354 threads running.
355
356 3) GDB is connecting to gdbserver and is requesting an enumeration of all
357 existing threads.
358 In this case we want the thread to stop.
359 FIXME: This case is currently not properly handled.
360 We should wait for the SIGSTOP but don't. Things work apparently
361 because enough time passes between when we ptrace (ATTACH) and when
362 gdb makes the next ptrace call on the thread.
363
364 On the other hand, if we are currently trying to stop all threads, we
365 should treat the new thread as if we had sent it a SIGSTOP. This works
366 because we are guaranteed that the add_lwp call above added us to the
367 end of the list, and so the new thread has not yet reached
368 wait_for_sigstop (but will). */
369 if (! stopping_threads)
370 new_lwp->stop_expected = 1;
371 }
372
373 int
374 linux_attach (unsigned long pid)
375 {
376 struct lwp_info *lwp;
377
378 linux_attach_lwp (pid);
379
380 /* Don't ignore the initial SIGSTOP if we just attached to this process.
381 It will be collected by wait shortly. */
382 lwp = (struct lwp_info *) find_inferior_id (&all_lwps, pid);
383 lwp->stop_expected = 0;
384
385 new_inferior = 1;
386
387 return 0;
388 }
389
390 /* Kill the inferior process. Make us have no inferior. */
391
392 static void
393 linux_kill_one_lwp (struct inferior_list_entry *entry)
394 {
395 struct thread_info *thread = (struct thread_info *) entry;
396 struct lwp_info *lwp = get_thread_lwp (thread);
397 int wstat;
398
399 /* We avoid killing the first thread here, because of a Linux kernel (at
400 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
401 the children get a chance to be reaped, it will remain a zombie
402 forever. */
403 if (entry == all_threads.head)
404 return;
405
406 do
407 {
408 ptrace (PTRACE_KILL, pid_of (lwp), 0, 0);
409
410 /* Make sure it died. The loop is most likely unnecessary. */
411 wstat = linux_wait_for_event (thread);
412 } while (WIFSTOPPED (wstat));
413 }
414
415 static void
416 linux_kill (void)
417 {
418 struct thread_info *thread = (struct thread_info *) all_threads.head;
419 struct lwp_info *lwp;
420 int wstat;
421
422 if (thread == NULL)
423 return;
424
425 for_each_inferior (&all_threads, linux_kill_one_lwp);
426
427 /* See the comment in linux_kill_one_lwp. We did not kill the first
428 thread in the list, so do so now. */
429 lwp = get_thread_lwp (thread);
430 do
431 {
432 ptrace (PTRACE_KILL, pid_of (lwp), 0, 0);
433
434 /* Make sure it died. The loop is most likely unnecessary. */
435 wstat = linux_wait_for_event (thread);
436 } while (WIFSTOPPED (wstat));
437
438 clear_inferiors ();
439 free (all_lwps.head);
440 all_lwps.head = all_lwps.tail = NULL;
441 }
442
443 static void
444 linux_detach_one_lwp (struct inferior_list_entry *entry)
445 {
446 struct thread_info *thread = (struct thread_info *) entry;
447 struct lwp_info *lwp = get_thread_lwp (thread);
448
449 /* Make sure the process isn't stopped at a breakpoint that's
450 no longer there. */
451 check_removed_breakpoint (lwp);
452
453 /* If this process is stopped but is expecting a SIGSTOP, then make
454 sure we take care of that now. This isn't absolutely guaranteed
455 to collect the SIGSTOP, but is fairly likely to. */
456 if (lwp->stop_expected)
457 {
458 /* Clear stop_expected, so that the SIGSTOP will be reported. */
459 lwp->stop_expected = 0;
460 if (lwp->stopped)
461 linux_resume_one_lwp (&lwp->head, 0, 0, NULL);
462 linux_wait_for_event (thread);
463 }
464
465 /* Flush any pending changes to the process's registers. */
466 regcache_invalidate_one ((struct inferior_list_entry *)
467 get_lwp_thread (lwp));
468
469 /* Finally, let it resume. */
470 ptrace (PTRACE_DETACH, pid_of (lwp), 0, 0);
471 }
472
473 static int
474 linux_detach (void)
475 {
476 delete_all_breakpoints ();
477 for_each_inferior (&all_threads, linux_detach_one_lwp);
478 clear_inferiors ();
479 free (all_lwps.head);
480 all_lwps.head = all_lwps.tail = NULL;
481 return 0;
482 }
483
484 static void
485 linux_join (void)
486 {
487 extern unsigned long signal_pid;
488 int status, ret;
489
490 do {
491 ret = waitpid (signal_pid, &status, 0);
492 if (WIFEXITED (status) || WIFSIGNALED (status))
493 break;
494 } while (ret != -1 || errno != ECHILD);
495 }
496
497 /* Return nonzero if the given thread is still alive. */
498 static int
499 linux_thread_alive (unsigned long lwpid)
500 {
501 if (find_inferior_id (&all_threads, lwpid) != NULL)
502 return 1;
503 else
504 return 0;
505 }
506
507 /* Return nonzero if this process stopped at a breakpoint which
508 no longer appears to be inserted. Also adjust the PC
509 appropriately to resume where the breakpoint used to be. */
510 static int
511 check_removed_breakpoint (struct lwp_info *event_child)
512 {
513 CORE_ADDR stop_pc;
514 struct thread_info *saved_inferior;
515
516 if (event_child->pending_is_breakpoint == 0)
517 return 0;
518
519 if (debug_threads)
520 fprintf (stderr, "Checking for breakpoint in lwp %ld.\n",
521 event_child->lwpid);
522
523 saved_inferior = current_inferior;
524 current_inferior = get_lwp_thread (event_child);
525
526 stop_pc = get_stop_pc ();
527
528 /* If the PC has changed since we stopped, then we shouldn't do
529 anything. This happens if, for instance, GDB handled the
530 decr_pc_after_break subtraction itself. */
531 if (stop_pc != event_child->pending_stop_pc)
532 {
533 if (debug_threads)
534 fprintf (stderr, "Ignoring, PC was changed. Old PC was 0x%08llx\n",
535 event_child->pending_stop_pc);
536
537 event_child->pending_is_breakpoint = 0;
538 current_inferior = saved_inferior;
539 return 0;
540 }
541
542 /* If the breakpoint is still there, we will report hitting it. */
543 if ((*the_low_target.breakpoint_at) (stop_pc))
544 {
545 if (debug_threads)
546 fprintf (stderr, "Ignoring, breakpoint is still present.\n");
547 current_inferior = saved_inferior;
548 return 0;
549 }
550
551 if (debug_threads)
552 fprintf (stderr, "Removed breakpoint.\n");
553
554 /* For decr_pc_after_break targets, here is where we perform the
555 decrement. We go immediately from this function to resuming,
556 and can not safely call get_stop_pc () again. */
557 if (the_low_target.set_pc != NULL)
558 (*the_low_target.set_pc) (stop_pc);
559
560 /* We consumed the pending SIGTRAP. */
561 event_child->pending_is_breakpoint = 0;
562 event_child->status_pending_p = 0;
563 event_child->status_pending = 0;
564
565 current_inferior = saved_inferior;
566 return 1;
567 }
568
569 /* Return 1 if this lwp has an interesting status pending. This
570 function may silently resume an inferior lwp. */
571 static int
572 status_pending_p (struct inferior_list_entry *entry, void *dummy)
573 {
574 struct lwp_info *lwp = (struct lwp_info *) entry;
575
576 if (lwp->status_pending_p)
577 if (check_removed_breakpoint (lwp))
578 {
579 /* This thread was stopped at a breakpoint, and the breakpoint
580 is now gone. We were told to continue (or step...) all threads,
581 so GDB isn't trying to single-step past this breakpoint.
582 So instead of reporting the old SIGTRAP, pretend we got to
583 the breakpoint just after it was removed instead of just
584 before; resume the process. */
585 linux_resume_one_lwp (&lwp->head, 0, 0, NULL);
586 return 0;
587 }
588
589 return lwp->status_pending_p;
590 }
591
592 static void
593 linux_wait_for_lwp (struct lwp_info **childp, int *wstatp)
594 {
595 int ret;
596 int to_wait_for = -1;
597
598 if (*childp != NULL)
599 to_wait_for = (*childp)->lwpid;
600
601 retry:
602 while (1)
603 {
604 ret = waitpid (to_wait_for, wstatp, WNOHANG);
605
606 if (ret == -1)
607 {
608 if (errno != ECHILD)
609 perror_with_name ("waitpid");
610 }
611 else if (ret > 0)
612 break;
613
614 ret = waitpid (to_wait_for, wstatp, WNOHANG | __WCLONE);
615
616 if (ret == -1)
617 {
618 if (errno != ECHILD)
619 perror_with_name ("waitpid (WCLONE)");
620 }
621 else if (ret > 0)
622 break;
623
624 usleep (1000);
625 }
626
627 if (debug_threads
628 && (!WIFSTOPPED (*wstatp)
629 || (WSTOPSIG (*wstatp) != 32
630 && WSTOPSIG (*wstatp) != 33)))
631 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
632
633 if (to_wait_for == -1)
634 *childp = (struct lwp_info *) find_inferior_id (&all_lwps, ret);
635
636 /* If we didn't find a process, one of two things presumably happened:
637 - A process we started and then detached from has exited. Ignore it.
638 - A process we are controlling has forked and the new child's stop
639 was reported to us by the kernel. Save its PID. */
640 if (*childp == NULL && WIFSTOPPED (*wstatp))
641 {
642 add_pid_to_list (&stopped_pids, ret);
643 goto retry;
644 }
645 else if (*childp == NULL)
646 goto retry;
647
648 (*childp)->stopped = 1;
649 (*childp)->pending_is_breakpoint = 0;
650
651 (*childp)->last_status = *wstatp;
652
653 /* Architecture-specific setup after inferior is running.
654 This needs to happen after we have attached to the inferior
655 and it is stopped for the first time, but before we access
656 any inferior registers. */
657 if (new_inferior)
658 {
659 the_low_target.arch_setup ();
660 #ifdef HAVE_LINUX_REGSETS
661 memset (disabled_regsets, 0, num_regsets);
662 #endif
663 new_inferior = 0;
664 }
665
666 if (debug_threads
667 && WIFSTOPPED (*wstatp))
668 {
669 struct thread_info *saved_inferior = current_inferior;
670 current_inferior = (struct thread_info *)
671 find_inferior_id (&all_threads, (*childp)->lwpid);
672 /* For testing only; i386_stop_pc prints out a diagnostic. */
673 if (the_low_target.get_pc != NULL)
674 get_stop_pc ();
675 current_inferior = saved_inferior;
676 }
677 }
678
679 static int
680 linux_wait_for_event (struct thread_info *child)
681 {
682 CORE_ADDR stop_pc;
683 struct lwp_info *event_child;
684 int wstat;
685 int bp_status;
686
687 /* Check for a process with a pending status. */
688 /* It is possible that the user changed the pending task's registers since
689 it stopped. We correctly handle the change of PC if we hit a breakpoint
690 (in check_removed_breakpoint); signals should be reported anyway. */
691 if (child == NULL)
692 {
693 event_child = (struct lwp_info *)
694 find_inferior (&all_lwps, status_pending_p, NULL);
695 if (debug_threads && event_child)
696 fprintf (stderr, "Got a pending child %ld\n", event_child->lwpid);
697 }
698 else
699 {
700 event_child = get_thread_lwp (child);
701 if (event_child->status_pending_p
702 && check_removed_breakpoint (event_child))
703 event_child = NULL;
704 }
705
706 if (event_child != NULL)
707 {
708 if (event_child->status_pending_p)
709 {
710 if (debug_threads)
711 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
712 event_child->lwpid, event_child->status_pending);
713 wstat = event_child->status_pending;
714 event_child->status_pending_p = 0;
715 event_child->status_pending = 0;
716 current_inferior = get_lwp_thread (event_child);
717 return wstat;
718 }
719 }
720
721 /* We only enter this loop if no process has a pending wait status. Thus
722 any action taken in response to a wait status inside this loop is
723 responding as soon as we detect the status, not after any pending
724 events. */
725 while (1)
726 {
727 if (child == NULL)
728 event_child = NULL;
729 else
730 event_child = get_thread_lwp (child);
731
732 linux_wait_for_lwp (&event_child, &wstat);
733
734 if (event_child == NULL)
735 error ("event from unknown child");
736
737 current_inferior = (struct thread_info *)
738 find_inferior_id (&all_threads, event_child->lwpid);
739
740 /* Check for thread exit. */
741 if (! WIFSTOPPED (wstat))
742 {
743 if (debug_threads)
744 fprintf (stderr, "LWP %ld exiting\n", event_child->head.id);
745
746 /* If the last thread is exiting, just return. */
747 if (all_threads.head == all_threads.tail)
748 return wstat;
749
750 dead_thread_notify (thread_id_to_gdb_id (event_child->lwpid));
751
752 remove_inferior (&all_lwps, &event_child->head);
753 free (event_child);
754 remove_thread (current_inferior);
755 current_inferior = (struct thread_info *) all_threads.head;
756
757 /* If we were waiting for this particular child to do something...
758 well, it did something. */
759 if (child != NULL)
760 return wstat;
761
762 /* Wait for a more interesting event. */
763 continue;
764 }
765
766 if (WIFSTOPPED (wstat)
767 && WSTOPSIG (wstat) == SIGSTOP
768 && event_child->stop_expected)
769 {
770 if (debug_threads)
771 fprintf (stderr, "Expected stop.\n");
772 event_child->stop_expected = 0;
773 linux_resume_one_lwp (&event_child->head,
774 event_child->stepping, 0, NULL);
775 continue;
776 }
777
778 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
779 && wstat >> 16 != 0)
780 {
781 handle_extended_wait (event_child, wstat);
782 continue;
783 }
784
785 /* If GDB is not interested in this signal, don't stop other
786 threads, and don't report it to GDB. Just resume the
787 inferior right away. We do this for threading-related
788 signals as well as any that GDB specifically requested we
789 ignore. But never ignore SIGSTOP if we sent it ourselves,
790 and do not ignore signals when stepping - they may require
791 special handling to skip the signal handler. */
792 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
793 thread library? */
794 if (WIFSTOPPED (wstat)
795 && !event_child->stepping
796 && (
797 #ifdef USE_THREAD_DB
798 (thread_db_active && (WSTOPSIG (wstat) == __SIGRTMIN
799 || WSTOPSIG (wstat) == __SIGRTMIN + 1))
800 ||
801 #endif
802 (pass_signals[target_signal_from_host (WSTOPSIG (wstat))]
803 && (WSTOPSIG (wstat) != SIGSTOP || !stopping_threads))))
804 {
805 siginfo_t info, *info_p;
806
807 if (debug_threads)
808 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
809 WSTOPSIG (wstat), event_child->head.id);
810
811 if (ptrace (PTRACE_GETSIGINFO, event_child->lwpid, 0, &info) == 0)
812 info_p = &info;
813 else
814 info_p = NULL;
815 linux_resume_one_lwp (&event_child->head,
816 event_child->stepping,
817 WSTOPSIG (wstat), info_p);
818 continue;
819 }
820
821 /* If this event was not handled above, and is not a SIGTRAP, report
822 it. */
823 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGTRAP)
824 return wstat;
825
826 /* If this target does not support breakpoints, we simply report the
827 SIGTRAP; it's of no concern to us. */
828 if (the_low_target.get_pc == NULL)
829 return wstat;
830
831 stop_pc = get_stop_pc ();
832
833 /* bp_reinsert will only be set if we were single-stepping.
834 Notice that we will resume the process after hitting
835 a gdbserver breakpoint; single-stepping to/over one
836 is not supported (yet). */
837 if (event_child->bp_reinsert != 0)
838 {
839 if (debug_threads)
840 fprintf (stderr, "Reinserted breakpoint.\n");
841 reinsert_breakpoint (event_child->bp_reinsert);
842 event_child->bp_reinsert = 0;
843
844 /* Clear the single-stepping flag and SIGTRAP as we resume. */
845 linux_resume_one_lwp (&event_child->head, 0, 0, NULL);
846 continue;
847 }
848
849 bp_status = check_breakpoints (stop_pc);
850
851 if (bp_status != 0)
852 {
853 if (debug_threads)
854 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
855
856 /* We hit one of our own breakpoints. We mark it as a pending
857 breakpoint, so that check_removed_breakpoint () will do the PC
858 adjustment for us at the appropriate time. */
859 event_child->pending_is_breakpoint = 1;
860 event_child->pending_stop_pc = stop_pc;
861
862 /* We may need to put the breakpoint back. We continue in the event
863 loop instead of simply replacing the breakpoint right away,
864 in order to not lose signals sent to the thread that hit the
865 breakpoint. Unfortunately this increases the window where another
866 thread could sneak past the removed breakpoint. For the current
867 use of server-side breakpoints (thread creation) this is
868 acceptable; but it needs to be considered before this breakpoint
869 mechanism can be used in more general ways. For some breakpoints
870 it may be necessary to stop all other threads, but that should
871 be avoided where possible.
872
873 If breakpoint_reinsert_addr is NULL, that means that we can
874 use PTRACE_SINGLESTEP on this platform. Uninsert the breakpoint,
875 mark it for reinsertion, and single-step.
876
877 Otherwise, call the target function to figure out where we need
878 our temporary breakpoint, create it, and continue executing this
879 process. */
880 if (bp_status == 2)
881 /* No need to reinsert. */
882 linux_resume_one_lwp (&event_child->head, 0, 0, NULL);
883 else if (the_low_target.breakpoint_reinsert_addr == NULL)
884 {
885 event_child->bp_reinsert = stop_pc;
886 uninsert_breakpoint (stop_pc);
887 linux_resume_one_lwp (&event_child->head, 1, 0, NULL);
888 }
889 else
890 {
891 reinsert_breakpoint_by_bp
892 (stop_pc, (*the_low_target.breakpoint_reinsert_addr) ());
893 linux_resume_one_lwp (&event_child->head, 0, 0, NULL);
894 }
895
896 continue;
897 }
898
899 if (debug_threads)
900 fprintf (stderr, "Hit a non-gdbserver breakpoint.\n");
901
902 /* If we were single-stepping, we definitely want to report the
903 SIGTRAP. Although the single-step operation has completed,
904 do not clear clear the stepping flag yet; we need to check it
905 in wait_for_sigstop. */
906 if (event_child->stepping)
907 return wstat;
908
909 /* A SIGTRAP that we can't explain. It may have been a breakpoint.
910 Check if it is a breakpoint, and if so mark the process information
911 accordingly. This will handle both the necessary fiddling with the
912 PC on decr_pc_after_break targets and suppressing extra threads
913 hitting a breakpoint if two hit it at once and then GDB removes it
914 after the first is reported. Arguably it would be better to report
915 multiple threads hitting breakpoints simultaneously, but the current
916 remote protocol does not allow this. */
917 if ((*the_low_target.breakpoint_at) (stop_pc))
918 {
919 event_child->pending_is_breakpoint = 1;
920 event_child->pending_stop_pc = stop_pc;
921 }
922
923 return wstat;
924 }
925
926 /* NOTREACHED */
927 return 0;
928 }
929
930 /* Wait for process, returns status. */
931
932 static unsigned char
933 linux_wait (char *status)
934 {
935 int w;
936 struct thread_info *child = NULL;
937
938 retry:
939 /* If we were only supposed to resume one thread, only wait for
940 that thread - if it's still alive. If it died, however - which
941 can happen if we're coming from the thread death case below -
942 then we need to make sure we restart the other threads. We could
943 pick a thread at random or restart all; restarting all is less
944 arbitrary. */
945 if (cont_thread != 0 && cont_thread != -1)
946 {
947 child = (struct thread_info *) find_inferior_id (&all_threads,
948 cont_thread);
949
950 /* No stepping, no signal - unless one is pending already, of course. */
951 if (child == NULL)
952 {
953 struct thread_resume resume_info;
954 resume_info.thread = -1;
955 resume_info.step = resume_info.sig = 0;
956 linux_resume (&resume_info, 1);
957 }
958 }
959
960 w = linux_wait_for_event (child);
961 stop_all_lwps ();
962
963 if (must_set_ptrace_flags)
964 {
965 ptrace (PTRACE_SETOPTIONS, inferior_pid, 0, PTRACE_O_TRACECLONE);
966 must_set_ptrace_flags = 0;
967 }
968
969 /* If we are waiting for a particular child, and it exited,
970 linux_wait_for_event will return its exit status. Similarly if
971 the last child exited. If this is not the last child, however,
972 do not report it as exited until there is a 'thread exited' response
973 available in the remote protocol. Instead, just wait for another event.
974 This should be safe, because if the thread crashed we will already
975 have reported the termination signal to GDB; that should stop any
976 in-progress stepping operations, etc.
977
978 Report the exit status of the last thread to exit. This matches
979 LinuxThreads' behavior. */
980
981 if (all_threads.head == all_threads.tail)
982 {
983 if (WIFEXITED (w))
984 {
985 fprintf (stderr, "\nChild exited with retcode = %x \n",
986 WEXITSTATUS (w));
987 *status = 'W';
988 clear_inferiors ();
989 free (all_lwps.head);
990 all_lwps.head = all_lwps.tail = NULL;
991 return WEXITSTATUS (w);
992 }
993 else if (!WIFSTOPPED (w))
994 {
995 fprintf (stderr, "\nChild terminated with signal = %x \n",
996 WTERMSIG (w));
997 *status = 'X';
998 clear_inferiors ();
999 free (all_lwps.head);
1000 all_lwps.head = all_lwps.tail = NULL;
1001 return target_signal_from_host (WTERMSIG (w));
1002 }
1003 }
1004 else
1005 {
1006 if (!WIFSTOPPED (w))
1007 goto retry;
1008 }
1009
1010 *status = 'T';
1011 return target_signal_from_host (WSTOPSIG (w));
1012 }
1013
1014 /* Send a signal to an LWP. For LinuxThreads, kill is enough; however, if
1015 thread groups are in use, we need to use tkill. */
1016
1017 static int
1018 kill_lwp (unsigned long lwpid, int signo)
1019 {
1020 static int tkill_failed;
1021
1022 errno = 0;
1023
1024 #ifdef SYS_tkill
1025 if (!tkill_failed)
1026 {
1027 int ret = syscall (SYS_tkill, lwpid, signo);
1028 if (errno != ENOSYS)
1029 return ret;
1030 errno = 0;
1031 tkill_failed = 1;
1032 }
1033 #endif
1034
1035 return kill (lwpid, signo);
1036 }
1037
1038 static void
1039 send_sigstop (struct inferior_list_entry *entry)
1040 {
1041 struct lwp_info *lwp = (struct lwp_info *) entry;
1042
1043 if (lwp->stopped)
1044 return;
1045
1046 /* If we already have a pending stop signal for this process, don't
1047 send another. */
1048 if (lwp->stop_expected)
1049 {
1050 if (debug_threads)
1051 fprintf (stderr, "Have pending sigstop for lwp %ld\n",
1052 lwp->lwpid);
1053
1054 /* We clear the stop_expected flag so that wait_for_sigstop
1055 will receive the SIGSTOP event (instead of silently resuming and
1056 waiting again). It'll be reset below. */
1057 lwp->stop_expected = 0;
1058 return;
1059 }
1060
1061 if (debug_threads)
1062 fprintf (stderr, "Sending sigstop to lwp %ld\n", lwp->head.id);
1063
1064 kill_lwp (lwp->head.id, SIGSTOP);
1065 }
1066
1067 static void
1068 wait_for_sigstop (struct inferior_list_entry *entry)
1069 {
1070 struct lwp_info *lwp = (struct lwp_info *) entry;
1071 struct thread_info *saved_inferior, *thread;
1072 int wstat;
1073 unsigned long saved_tid;
1074
1075 if (lwp->stopped)
1076 return;
1077
1078 saved_inferior = current_inferior;
1079 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
1080 thread = (struct thread_info *) find_inferior_id (&all_threads,
1081 lwp->lwpid);
1082 wstat = linux_wait_for_event (thread);
1083
1084 /* If we stopped with a non-SIGSTOP signal, save it for later
1085 and record the pending SIGSTOP. If the process exited, just
1086 return. */
1087 if (WIFSTOPPED (wstat)
1088 && WSTOPSIG (wstat) != SIGSTOP)
1089 {
1090 if (debug_threads)
1091 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
1092 lwp->lwpid, wstat);
1093
1094 /* Do not leave a pending single-step finish to be reported to
1095 the client. The client will give us a new action for this
1096 thread, possibly a continue request --- otherwise, the client
1097 would consider this pending SIGTRAP reported later a spurious
1098 signal. */
1099 if (WSTOPSIG (wstat) == SIGTRAP
1100 && lwp->stepping
1101 && !linux_stopped_by_watchpoint ())
1102 {
1103 if (debug_threads)
1104 fprintf (stderr, " single-step SIGTRAP ignored\n");
1105 }
1106 else
1107 {
1108 lwp->status_pending_p = 1;
1109 lwp->status_pending = wstat;
1110 }
1111 lwp->stop_expected = 1;
1112 }
1113
1114 if (linux_thread_alive (saved_tid))
1115 current_inferior = saved_inferior;
1116 else
1117 {
1118 if (debug_threads)
1119 fprintf (stderr, "Previously current thread died.\n");
1120
1121 /* Set a valid thread as current. */
1122 set_desired_inferior (0);
1123 }
1124 }
1125
1126 static void
1127 stop_all_lwps (void)
1128 {
1129 stopping_threads = 1;
1130 for_each_inferior (&all_lwps, send_sigstop);
1131 for_each_inferior (&all_lwps, wait_for_sigstop);
1132 stopping_threads = 0;
1133 }
1134
1135 /* Resume execution of the inferior process.
1136 If STEP is nonzero, single-step it.
1137 If SIGNAL is nonzero, give it that signal. */
1138
1139 static void
1140 linux_resume_one_lwp (struct inferior_list_entry *entry,
1141 int step, int signal, siginfo_t *info)
1142 {
1143 struct lwp_info *lwp = (struct lwp_info *) entry;
1144 struct thread_info *saved_inferior;
1145
1146 if (lwp->stopped == 0)
1147 return;
1148
1149 /* If we have pending signals or status, and a new signal, enqueue the
1150 signal. Also enqueue the signal if we are waiting to reinsert a
1151 breakpoint; it will be picked up again below. */
1152 if (signal != 0
1153 && (lwp->status_pending_p || lwp->pending_signals != NULL
1154 || lwp->bp_reinsert != 0))
1155 {
1156 struct pending_signals *p_sig;
1157 p_sig = xmalloc (sizeof (*p_sig));
1158 p_sig->prev = lwp->pending_signals;
1159 p_sig->signal = signal;
1160 if (info == NULL)
1161 memset (&p_sig->info, 0, sizeof (siginfo_t));
1162 else
1163 memcpy (&p_sig->info, info, sizeof (siginfo_t));
1164 lwp->pending_signals = p_sig;
1165 }
1166
1167 if (lwp->status_pending_p && !check_removed_breakpoint (lwp))
1168 return;
1169
1170 saved_inferior = current_inferior;
1171 current_inferior = get_lwp_thread (lwp);
1172
1173 if (debug_threads)
1174 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
1175 inferior_pid, step ? "step" : "continue", signal,
1176 lwp->stop_expected ? "expected" : "not expected");
1177
1178 /* This bit needs some thinking about. If we get a signal that
1179 we must report while a single-step reinsert is still pending,
1180 we often end up resuming the thread. It might be better to
1181 (ew) allow a stack of pending events; then we could be sure that
1182 the reinsert happened right away and not lose any signals.
1183
1184 Making this stack would also shrink the window in which breakpoints are
1185 uninserted (see comment in linux_wait_for_lwp) but not enough for
1186 complete correctness, so it won't solve that problem. It may be
1187 worthwhile just to solve this one, however. */
1188 if (lwp->bp_reinsert != 0)
1189 {
1190 if (debug_threads)
1191 fprintf (stderr, " pending reinsert at %08lx", (long)lwp->bp_reinsert);
1192 if (step == 0)
1193 fprintf (stderr, "BAD - reinserting but not stepping.\n");
1194 step = 1;
1195
1196 /* Postpone any pending signal. It was enqueued above. */
1197 signal = 0;
1198 }
1199
1200 check_removed_breakpoint (lwp);
1201
1202 if (debug_threads && the_low_target.get_pc != NULL)
1203 {
1204 fprintf (stderr, " ");
1205 (*the_low_target.get_pc) ();
1206 }
1207
1208 /* If we have pending signals, consume one unless we are trying to reinsert
1209 a breakpoint. */
1210 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
1211 {
1212 struct pending_signals **p_sig;
1213
1214 p_sig = &lwp->pending_signals;
1215 while ((*p_sig)->prev != NULL)
1216 p_sig = &(*p_sig)->prev;
1217
1218 signal = (*p_sig)->signal;
1219 if ((*p_sig)->info.si_signo != 0)
1220 ptrace (PTRACE_SETSIGINFO, lwp->lwpid, 0, &(*p_sig)->info);
1221
1222 free (*p_sig);
1223 *p_sig = NULL;
1224 }
1225
1226 regcache_invalidate_one ((struct inferior_list_entry *)
1227 get_lwp_thread (lwp));
1228 errno = 0;
1229 lwp->stopped = 0;
1230 lwp->stepping = step;
1231 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwp->lwpid, 0, signal);
1232
1233 current_inferior = saved_inferior;
1234 if (errno)
1235 {
1236 /* ESRCH from ptrace either means that the thread was already
1237 running (an error) or that it is gone (a race condition). If
1238 it's gone, we will get a notification the next time we wait,
1239 so we can ignore the error. We could differentiate these
1240 two, but it's tricky without waiting; the thread still exists
1241 as a zombie, so sending it signal 0 would succeed. So just
1242 ignore ESRCH. */
1243 if (errno == ESRCH)
1244 return;
1245
1246 perror_with_name ("ptrace");
1247 }
1248 }
1249
1250 struct thread_resume_array
1251 {
1252 struct thread_resume *resume;
1253 size_t n;
1254 };
1255
1256 /* This function is called once per thread. We look up the thread
1257 in RESUME_PTR, and mark the thread with a pointer to the appropriate
1258 resume request.
1259
1260 This algorithm is O(threads * resume elements), but resume elements
1261 is small (and will remain small at least until GDB supports thread
1262 suspension). */
1263 static int
1264 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
1265 {
1266 struct lwp_info *lwp;
1267 struct thread_info *thread;
1268 int ndx;
1269 struct thread_resume_array *r;
1270
1271 thread = (struct thread_info *) entry;
1272 lwp = get_thread_lwp (thread);
1273 r = arg;
1274
1275 for (ndx = 0; ndx < r->n; ndx++)
1276 if (r->resume[ndx].thread == -1 || r->resume[ndx].thread == entry->id)
1277 {
1278 lwp->resume = &r->resume[ndx];
1279 return 0;
1280 }
1281
1282 /* No resume action for this thread. */
1283 lwp->resume = NULL;
1284
1285 return 0;
1286 }
1287
1288 /* This function is called once per thread. We check the thread's resume
1289 request, which will tell us whether to resume, step, or leave the thread
1290 stopped; and what signal, if any, it should be sent. For threads which
1291 we aren't explicitly told otherwise, we preserve the stepping flag; this
1292 is used for stepping over gdbserver-placed breakpoints. */
1293
1294 static void
1295 linux_continue_one_thread (struct inferior_list_entry *entry)
1296 {
1297 struct lwp_info *lwp;
1298 struct thread_info *thread;
1299 int step;
1300
1301 thread = (struct thread_info *) entry;
1302 lwp = get_thread_lwp (thread);
1303
1304 if (lwp->resume == NULL)
1305 return;
1306
1307 if (lwp->resume->thread == -1
1308 && lwp->stepping
1309 && lwp->pending_is_breakpoint)
1310 step = 1;
1311 else
1312 step = lwp->resume->step;
1313
1314 linux_resume_one_lwp (&lwp->head, step, lwp->resume->sig, NULL);
1315
1316 lwp->resume = NULL;
1317 }
1318
1319 /* This function is called once per thread. We check the thread's resume
1320 request, which will tell us whether to resume, step, or leave the thread
1321 stopped; and what signal, if any, it should be sent. We queue any needed
1322 signals, since we won't actually resume. We already have a pending event
1323 to report, so we don't need to preserve any step requests; they should
1324 be re-issued if necessary. */
1325
1326 static void
1327 linux_queue_one_thread (struct inferior_list_entry *entry)
1328 {
1329 struct lwp_info *lwp;
1330 struct thread_info *thread;
1331
1332 thread = (struct thread_info *) entry;
1333 lwp = get_thread_lwp (thread);
1334
1335 if (lwp->resume == NULL)
1336 return;
1337
1338 /* If we have a new signal, enqueue the signal. */
1339 if (lwp->resume->sig != 0)
1340 {
1341 struct pending_signals *p_sig;
1342 p_sig = xmalloc (sizeof (*p_sig));
1343 p_sig->prev = lwp->pending_signals;
1344 p_sig->signal = lwp->resume->sig;
1345 memset (&p_sig->info, 0, sizeof (siginfo_t));
1346
1347 /* If this is the same signal we were previously stopped by,
1348 make sure to queue its siginfo. We can ignore the return
1349 value of ptrace; if it fails, we'll skip
1350 PTRACE_SETSIGINFO. */
1351 if (WIFSTOPPED (lwp->last_status)
1352 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
1353 ptrace (PTRACE_GETSIGINFO, lwp->lwpid, 0, &p_sig->info);
1354
1355 lwp->pending_signals = p_sig;
1356 }
1357
1358 lwp->resume = NULL;
1359 }
1360
1361 /* Set DUMMY if this process has an interesting status pending. */
1362 static int
1363 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
1364 {
1365 struct lwp_info *lwp = (struct lwp_info *) entry;
1366
1367 /* Processes which will not be resumed are not interesting, because
1368 we might not wait for them next time through linux_wait. */
1369 if (lwp->resume == NULL)
1370 return 0;
1371
1372 /* If this thread has a removed breakpoint, we won't have any
1373 events to report later, so check now. check_removed_breakpoint
1374 may clear status_pending_p. We avoid calling check_removed_breakpoint
1375 for any thread that we are not otherwise going to resume - this
1376 lets us preserve stopped status when two threads hit a breakpoint.
1377 GDB removes the breakpoint to single-step a particular thread
1378 past it, then re-inserts it and resumes all threads. We want
1379 to report the second thread without resuming it in the interim. */
1380 if (lwp->status_pending_p)
1381 check_removed_breakpoint (lwp);
1382
1383 if (lwp->status_pending_p)
1384 * (int *) flag_p = 1;
1385
1386 return 0;
1387 }
1388
1389 static void
1390 linux_resume (struct thread_resume *resume_info, size_t n)
1391 {
1392 int pending_flag;
1393 struct thread_resume_array array = { resume_info, n };
1394
1395 find_inferior (&all_threads, linux_set_resume_request, &array);
1396
1397 /* If there is a thread which would otherwise be resumed, which
1398 has a pending status, then don't resume any threads - we can just
1399 report the pending status. Make sure to queue any signals
1400 that would otherwise be sent. */
1401 pending_flag = 0;
1402 find_inferior (&all_lwps, resume_status_pending_p, &pending_flag);
1403
1404 if (debug_threads)
1405 {
1406 if (pending_flag)
1407 fprintf (stderr, "Not resuming, pending status\n");
1408 else
1409 fprintf (stderr, "Resuming, no pending status\n");
1410 }
1411
1412 if (pending_flag)
1413 for_each_inferior (&all_threads, linux_queue_one_thread);
1414 else
1415 for_each_inferior (&all_threads, linux_continue_one_thread);
1416 }
1417
1418 #ifdef HAVE_LINUX_USRREGS
1419
1420 int
1421 register_addr (int regnum)
1422 {
1423 int addr;
1424
1425 if (regnum < 0 || regnum >= the_low_target.num_regs)
1426 error ("Invalid register number %d.", regnum);
1427
1428 addr = the_low_target.regmap[regnum];
1429
1430 return addr;
1431 }
1432
1433 /* Fetch one register. */
1434 static void
1435 fetch_register (int regno)
1436 {
1437 CORE_ADDR regaddr;
1438 int i, size;
1439 char *buf;
1440
1441 if (regno >= the_low_target.num_regs)
1442 return;
1443 if ((*the_low_target.cannot_fetch_register) (regno))
1444 return;
1445
1446 regaddr = register_addr (regno);
1447 if (regaddr == -1)
1448 return;
1449 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
1450 & - sizeof (PTRACE_XFER_TYPE));
1451 buf = alloca (size);
1452 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
1453 {
1454 errno = 0;
1455 *(PTRACE_XFER_TYPE *) (buf + i) =
1456 ptrace (PTRACE_PEEKUSER, inferior_pid, (PTRACE_ARG3_TYPE) regaddr, 0);
1457 regaddr += sizeof (PTRACE_XFER_TYPE);
1458 if (errno != 0)
1459 {
1460 /* Warning, not error, in case we are attached; sometimes the
1461 kernel doesn't let us at the registers. */
1462 char *err = strerror (errno);
1463 char *msg = alloca (strlen (err) + 128);
1464 sprintf (msg, "reading register %d: %s", regno, err);
1465 error (msg);
1466 goto error_exit;
1467 }
1468 }
1469
1470 if (the_low_target.supply_ptrace_register)
1471 the_low_target.supply_ptrace_register (regno, buf);
1472 else
1473 supply_register (regno, buf);
1474
1475 error_exit:;
1476 }
1477
1478 /* Fetch all registers, or just one, from the child process. */
1479 static void
1480 usr_fetch_inferior_registers (int regno)
1481 {
1482 if (regno == -1 || regno == 0)
1483 for (regno = 0; regno < the_low_target.num_regs; regno++)
1484 fetch_register (regno);
1485 else
1486 fetch_register (regno);
1487 }
1488
1489 /* Store our register values back into the inferior.
1490 If REGNO is -1, do this for all registers.
1491 Otherwise, REGNO specifies which register (so we can save time). */
1492 static void
1493 usr_store_inferior_registers (int regno)
1494 {
1495 CORE_ADDR regaddr;
1496 int i, size;
1497 char *buf;
1498
1499 if (regno >= 0)
1500 {
1501 if (regno >= the_low_target.num_regs)
1502 return;
1503
1504 if ((*the_low_target.cannot_store_register) (regno) == 1)
1505 return;
1506
1507 regaddr = register_addr (regno);
1508 if (regaddr == -1)
1509 return;
1510 errno = 0;
1511 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
1512 & - sizeof (PTRACE_XFER_TYPE);
1513 buf = alloca (size);
1514 memset (buf, 0, size);
1515
1516 if (the_low_target.collect_ptrace_register)
1517 the_low_target.collect_ptrace_register (regno, buf);
1518 else
1519 collect_register (regno, buf);
1520
1521 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
1522 {
1523 errno = 0;
1524 ptrace (PTRACE_POKEUSER, inferior_pid, (PTRACE_ARG3_TYPE) regaddr,
1525 *(PTRACE_XFER_TYPE *) (buf + i));
1526 if (errno != 0)
1527 {
1528 /* At this point, ESRCH should mean the process is
1529 already gone, in which case we simply ignore attempts
1530 to change its registers. See also the related
1531 comment in linux_resume_one_lwp. */
1532 if (errno == ESRCH)
1533 return;
1534
1535 if ((*the_low_target.cannot_store_register) (regno) == 0)
1536 {
1537 char *err = strerror (errno);
1538 char *msg = alloca (strlen (err) + 128);
1539 sprintf (msg, "writing register %d: %s",
1540 regno, err);
1541 error (msg);
1542 return;
1543 }
1544 }
1545 regaddr += sizeof (PTRACE_XFER_TYPE);
1546 }
1547 }
1548 else
1549 for (regno = 0; regno < the_low_target.num_regs; regno++)
1550 usr_store_inferior_registers (regno);
1551 }
1552 #endif /* HAVE_LINUX_USRREGS */
1553
1554
1555
1556 #ifdef HAVE_LINUX_REGSETS
1557
1558 static int
1559 regsets_fetch_inferior_registers ()
1560 {
1561 struct regset_info *regset;
1562 int saw_general_regs = 0;
1563
1564 regset = target_regsets;
1565
1566 while (regset->size >= 0)
1567 {
1568 void *buf;
1569 int res;
1570
1571 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
1572 {
1573 regset ++;
1574 continue;
1575 }
1576
1577 buf = xmalloc (regset->size);
1578 #ifndef __sparc__
1579 res = ptrace (regset->get_request, inferior_pid, 0, buf);
1580 #else
1581 res = ptrace (regset->get_request, inferior_pid, buf, 0);
1582 #endif
1583 if (res < 0)
1584 {
1585 if (errno == EIO)
1586 {
1587 /* If we get EIO on a regset, do not try it again for
1588 this process. */
1589 disabled_regsets[regset - target_regsets] = 1;
1590 continue;
1591 }
1592 else
1593 {
1594 char s[256];
1595 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%ld",
1596 inferior_pid);
1597 perror (s);
1598 }
1599 }
1600 else if (regset->type == GENERAL_REGS)
1601 saw_general_regs = 1;
1602 regset->store_function (buf);
1603 regset ++;
1604 }
1605 if (saw_general_regs)
1606 return 0;
1607 else
1608 return 1;
1609 }
1610
1611 static int
1612 regsets_store_inferior_registers ()
1613 {
1614 struct regset_info *regset;
1615 int saw_general_regs = 0;
1616
1617 regset = target_regsets;
1618
1619 while (regset->size >= 0)
1620 {
1621 void *buf;
1622 int res;
1623
1624 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
1625 {
1626 regset ++;
1627 continue;
1628 }
1629
1630 buf = xmalloc (regset->size);
1631
1632 /* First fill the buffer with the current register set contents,
1633 in case there are any items in the kernel's regset that are
1634 not in gdbserver's regcache. */
1635 #ifndef __sparc__
1636 res = ptrace (regset->get_request, inferior_pid, 0, buf);
1637 #else
1638 res = ptrace (regset->get_request, inferior_pid, buf, 0);
1639 #endif
1640
1641 if (res == 0)
1642 {
1643 /* Then overlay our cached registers on that. */
1644 regset->fill_function (buf);
1645
1646 /* Only now do we write the register set. */
1647 #ifndef __sparc__
1648 res = ptrace (regset->set_request, inferior_pid, 0, buf);
1649 #else
1650 res = ptrace (regset->set_request, inferior_pid, buf, 0);
1651 #endif
1652 }
1653
1654 if (res < 0)
1655 {
1656 if (errno == EIO)
1657 {
1658 /* If we get EIO on a regset, do not try it again for
1659 this process. */
1660 disabled_regsets[regset - target_regsets] = 1;
1661 continue;
1662 }
1663 else if (errno == ESRCH)
1664 {
1665 /* At this point, ESRCH should mean the process is
1666 already gone, in which case we simply ignore attempts
1667 to change its registers. See also the related
1668 comment in linux_resume_one_lwp. */
1669 return 0;
1670 }
1671 else
1672 {
1673 perror ("Warning: ptrace(regsets_store_inferior_registers)");
1674 }
1675 }
1676 else if (regset->type == GENERAL_REGS)
1677 saw_general_regs = 1;
1678 regset ++;
1679 free (buf);
1680 }
1681 if (saw_general_regs)
1682 return 0;
1683 else
1684 return 1;
1685 return 0;
1686 }
1687
1688 #endif /* HAVE_LINUX_REGSETS */
1689
1690
1691 void
1692 linux_fetch_registers (int regno)
1693 {
1694 #ifdef HAVE_LINUX_REGSETS
1695 if (regsets_fetch_inferior_registers () == 0)
1696 return;
1697 #endif
1698 #ifdef HAVE_LINUX_USRREGS
1699 usr_fetch_inferior_registers (regno);
1700 #endif
1701 }
1702
1703 void
1704 linux_store_registers (int regno)
1705 {
1706 #ifdef HAVE_LINUX_REGSETS
1707 if (regsets_store_inferior_registers () == 0)
1708 return;
1709 #endif
1710 #ifdef HAVE_LINUX_USRREGS
1711 usr_store_inferior_registers (regno);
1712 #endif
1713 }
1714
1715
1716 /* Copy LEN bytes from inferior's memory starting at MEMADDR
1717 to debugger memory starting at MYADDR. */
1718
1719 static int
1720 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
1721 {
1722 register int i;
1723 /* Round starting address down to longword boundary. */
1724 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
1725 /* Round ending address up; get number of longwords that makes. */
1726 register int count
1727 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
1728 / sizeof (PTRACE_XFER_TYPE);
1729 /* Allocate buffer of that many longwords. */
1730 register PTRACE_XFER_TYPE *buffer
1731 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
1732 int fd;
1733 char filename[64];
1734
1735 /* Try using /proc. Don't bother for one word. */
1736 if (len >= 3 * sizeof (long))
1737 {
1738 /* We could keep this file open and cache it - possibly one per
1739 thread. That requires some juggling, but is even faster. */
1740 sprintf (filename, "/proc/%ld/mem", inferior_pid);
1741 fd = open (filename, O_RDONLY | O_LARGEFILE);
1742 if (fd == -1)
1743 goto no_proc;
1744
1745 /* If pread64 is available, use it. It's faster if the kernel
1746 supports it (only one syscall), and it's 64-bit safe even on
1747 32-bit platforms (for instance, SPARC debugging a SPARC64
1748 application). */
1749 #ifdef HAVE_PREAD64
1750 if (pread64 (fd, myaddr, len, memaddr) != len)
1751 #else
1752 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, memaddr, len) != len)
1753 #endif
1754 {
1755 close (fd);
1756 goto no_proc;
1757 }
1758
1759 close (fd);
1760 return 0;
1761 }
1762
1763 no_proc:
1764 /* Read all the longwords */
1765 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
1766 {
1767 errno = 0;
1768 buffer[i] = ptrace (PTRACE_PEEKTEXT, inferior_pid,
1769 (PTRACE_ARG3_TYPE) addr, 0);
1770 if (errno)
1771 return errno;
1772 }
1773
1774 /* Copy appropriate bytes out of the buffer. */
1775 memcpy (myaddr,
1776 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
1777 len);
1778
1779 return 0;
1780 }
1781
1782 /* Copy LEN bytes of data from debugger memory at MYADDR
1783 to inferior's memory at MEMADDR.
1784 On failure (cannot write the inferior)
1785 returns the value of errno. */
1786
1787 static int
1788 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
1789 {
1790 register int i;
1791 /* Round starting address down to longword boundary. */
1792 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
1793 /* Round ending address up; get number of longwords that makes. */
1794 register int count
1795 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
1796 /* Allocate buffer of that many longwords. */
1797 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
1798
1799 if (debug_threads)
1800 {
1801 fprintf (stderr, "Writing %02x to %08lx\n", (unsigned)myaddr[0], (long)memaddr);
1802 }
1803
1804 /* Fill start and end extra bytes of buffer with existing memory data. */
1805
1806 buffer[0] = ptrace (PTRACE_PEEKTEXT, inferior_pid,
1807 (PTRACE_ARG3_TYPE) addr, 0);
1808
1809 if (count > 1)
1810 {
1811 buffer[count - 1]
1812 = ptrace (PTRACE_PEEKTEXT, inferior_pid,
1813 (PTRACE_ARG3_TYPE) (addr + (count - 1)
1814 * sizeof (PTRACE_XFER_TYPE)),
1815 0);
1816 }
1817
1818 /* Copy data to be written over corresponding part of buffer */
1819
1820 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
1821
1822 /* Write the entire buffer. */
1823
1824 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
1825 {
1826 errno = 0;
1827 ptrace (PTRACE_POKETEXT, inferior_pid, (PTRACE_ARG3_TYPE) addr, buffer[i]);
1828 if (errno)
1829 return errno;
1830 }
1831
1832 return 0;
1833 }
1834
1835 static int linux_supports_tracefork_flag;
1836
1837 /* Helper functions for linux_test_for_tracefork, called via clone (). */
1838
1839 static int
1840 linux_tracefork_grandchild (void *arg)
1841 {
1842 _exit (0);
1843 }
1844
1845 #define STACK_SIZE 4096
1846
1847 static int
1848 linux_tracefork_child (void *arg)
1849 {
1850 ptrace (PTRACE_TRACEME, 0, 0, 0);
1851 kill (getpid (), SIGSTOP);
1852 #ifdef __ia64__
1853 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
1854 CLONE_VM | SIGCHLD, NULL);
1855 #else
1856 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
1857 CLONE_VM | SIGCHLD, NULL);
1858 #endif
1859 _exit (0);
1860 }
1861
1862 /* Wrapper function for waitpid which handles EINTR. */
1863
1864 static int
1865 my_waitpid (int pid, int *status, int flags)
1866 {
1867 int ret;
1868 do
1869 {
1870 ret = waitpid (pid, status, flags);
1871 }
1872 while (ret == -1 && errno == EINTR);
1873
1874 return ret;
1875 }
1876
1877 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
1878 sure that we can enable the option, and that it had the desired
1879 effect. */
1880
1881 static void
1882 linux_test_for_tracefork (void)
1883 {
1884 int child_pid, ret, status;
1885 long second_pid;
1886 char *stack = xmalloc (STACK_SIZE * 4);
1887
1888 linux_supports_tracefork_flag = 0;
1889
1890 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
1891 #ifdef __ia64__
1892 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
1893 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
1894 #else
1895 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
1896 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
1897 #endif
1898 if (child_pid == -1)
1899 perror_with_name ("clone");
1900
1901 ret = my_waitpid (child_pid, &status, 0);
1902 if (ret == -1)
1903 perror_with_name ("waitpid");
1904 else if (ret != child_pid)
1905 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
1906 if (! WIFSTOPPED (status))
1907 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
1908
1909 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
1910 if (ret != 0)
1911 {
1912 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
1913 if (ret != 0)
1914 {
1915 warning ("linux_test_for_tracefork: failed to kill child");
1916 return;
1917 }
1918
1919 ret = my_waitpid (child_pid, &status, 0);
1920 if (ret != child_pid)
1921 warning ("linux_test_for_tracefork: failed to wait for killed child");
1922 else if (!WIFSIGNALED (status))
1923 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
1924 "killed child", status);
1925
1926 return;
1927 }
1928
1929 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
1930 if (ret != 0)
1931 warning ("linux_test_for_tracefork: failed to resume child");
1932
1933 ret = my_waitpid (child_pid, &status, 0);
1934
1935 if (ret == child_pid && WIFSTOPPED (status)
1936 && status >> 16 == PTRACE_EVENT_FORK)
1937 {
1938 second_pid = 0;
1939 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
1940 if (ret == 0 && second_pid != 0)
1941 {
1942 int second_status;
1943
1944 linux_supports_tracefork_flag = 1;
1945 my_waitpid (second_pid, &second_status, 0);
1946 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
1947 if (ret != 0)
1948 warning ("linux_test_for_tracefork: failed to kill second child");
1949 my_waitpid (second_pid, &status, 0);
1950 }
1951 }
1952 else
1953 warning ("linux_test_for_tracefork: unexpected result from waitpid "
1954 "(%d, status 0x%x)", ret, status);
1955
1956 do
1957 {
1958 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
1959 if (ret != 0)
1960 warning ("linux_test_for_tracefork: failed to kill child");
1961 my_waitpid (child_pid, &status, 0);
1962 }
1963 while (WIFSTOPPED (status));
1964
1965 free (stack);
1966 }
1967
1968
1969 static void
1970 linux_look_up_symbols (void)
1971 {
1972 #ifdef USE_THREAD_DB
1973 if (thread_db_active)
1974 return;
1975
1976 thread_db_active = thread_db_init (!linux_supports_tracefork_flag);
1977 #endif
1978 }
1979
1980 static void
1981 linux_request_interrupt (void)
1982 {
1983 extern unsigned long signal_pid;
1984
1985 if (cont_thread != 0 && cont_thread != -1)
1986 {
1987 struct lwp_info *lwp;
1988
1989 lwp = get_thread_lwp (current_inferior);
1990 kill_lwp (lwp->lwpid, SIGINT);
1991 }
1992 else
1993 kill_lwp (signal_pid, SIGINT);
1994 }
1995
1996 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
1997 to debugger memory starting at MYADDR. */
1998
1999 static int
2000 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
2001 {
2002 char filename[PATH_MAX];
2003 int fd, n;
2004
2005 snprintf (filename, sizeof filename, "/proc/%ld/auxv", inferior_pid);
2006
2007 fd = open (filename, O_RDONLY);
2008 if (fd < 0)
2009 return -1;
2010
2011 if (offset != (CORE_ADDR) 0
2012 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
2013 n = -1;
2014 else
2015 n = read (fd, myaddr, len);
2016
2017 close (fd);
2018
2019 return n;
2020 }
2021
2022 /* These watchpoint related wrapper functions simply pass on the function call
2023 if the target has registered a corresponding function. */
2024
2025 static int
2026 linux_insert_watchpoint (char type, CORE_ADDR addr, int len)
2027 {
2028 if (the_low_target.insert_watchpoint != NULL)
2029 return the_low_target.insert_watchpoint (type, addr, len);
2030 else
2031 /* Unsupported (see target.h). */
2032 return 1;
2033 }
2034
2035 static int
2036 linux_remove_watchpoint (char type, CORE_ADDR addr, int len)
2037 {
2038 if (the_low_target.remove_watchpoint != NULL)
2039 return the_low_target.remove_watchpoint (type, addr, len);
2040 else
2041 /* Unsupported (see target.h). */
2042 return 1;
2043 }
2044
2045 static int
2046 linux_stopped_by_watchpoint (void)
2047 {
2048 if (the_low_target.stopped_by_watchpoint != NULL)
2049 return the_low_target.stopped_by_watchpoint ();
2050 else
2051 return 0;
2052 }
2053
2054 static CORE_ADDR
2055 linux_stopped_data_address (void)
2056 {
2057 if (the_low_target.stopped_data_address != NULL)
2058 return the_low_target.stopped_data_address ();
2059 else
2060 return 0;
2061 }
2062
2063 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
2064 #if defined(__mcoldfire__)
2065 /* These should really be defined in the kernel's ptrace.h header. */
2066 #define PT_TEXT_ADDR 49*4
2067 #define PT_DATA_ADDR 50*4
2068 #define PT_TEXT_END_ADDR 51*4
2069 #endif
2070
2071 /* Under uClinux, programs are loaded at non-zero offsets, which we need
2072 to tell gdb about. */
2073
2074 static int
2075 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
2076 {
2077 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
2078 unsigned long text, text_end, data;
2079 int pid = get_thread_lwp (current_inferior)->head.id;
2080
2081 errno = 0;
2082
2083 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
2084 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
2085 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
2086
2087 if (errno == 0)
2088 {
2089 /* Both text and data offsets produced at compile-time (and so
2090 used by gdb) are relative to the beginning of the program,
2091 with the data segment immediately following the text segment.
2092 However, the actual runtime layout in memory may put the data
2093 somewhere else, so when we send gdb a data base-address, we
2094 use the real data base address and subtract the compile-time
2095 data base-address from it (which is just the length of the
2096 text segment). BSS immediately follows data in both
2097 cases. */
2098 *text_p = text;
2099 *data_p = data - (text_end - text);
2100
2101 return 1;
2102 }
2103 #endif
2104 return 0;
2105 }
2106 #endif
2107
2108 static int
2109 linux_qxfer_osdata (const char *annex,
2110 unsigned char *readbuf, unsigned const char *writebuf,
2111 CORE_ADDR offset, int len)
2112 {
2113 /* We make the process list snapshot when the object starts to be
2114 read. */
2115 static const char *buf;
2116 static long len_avail = -1;
2117 static struct buffer buffer;
2118
2119 DIR *dirp;
2120
2121 if (strcmp (annex, "processes") != 0)
2122 return 0;
2123
2124 if (!readbuf || writebuf)
2125 return 0;
2126
2127 if (offset == 0)
2128 {
2129 if (len_avail != -1 && len_avail != 0)
2130 buffer_free (&buffer);
2131 len_avail = 0;
2132 buf = NULL;
2133 buffer_init (&buffer);
2134 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
2135
2136 dirp = opendir ("/proc");
2137 if (dirp)
2138 {
2139 struct dirent *dp;
2140 while ((dp = readdir (dirp)) != NULL)
2141 {
2142 struct stat statbuf;
2143 char procentry[sizeof ("/proc/4294967295")];
2144
2145 if (!isdigit (dp->d_name[0])
2146 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
2147 continue;
2148
2149 sprintf (procentry, "/proc/%s", dp->d_name);
2150 if (stat (procentry, &statbuf) == 0
2151 && S_ISDIR (statbuf.st_mode))
2152 {
2153 char pathname[128];
2154 FILE *f;
2155 char cmd[MAXPATHLEN + 1];
2156 struct passwd *entry;
2157
2158 sprintf (pathname, "/proc/%s/cmdline", dp->d_name);
2159 entry = getpwuid (statbuf.st_uid);
2160
2161 if ((f = fopen (pathname, "r")) != NULL)
2162 {
2163 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
2164 if (len > 0)
2165 {
2166 int i;
2167 for (i = 0; i < len; i++)
2168 if (cmd[i] == '\0')
2169 cmd[i] = ' ';
2170 cmd[len] = '\0';
2171
2172 buffer_xml_printf (
2173 &buffer,
2174 "<item>"
2175 "<column name=\"pid\">%s</column>"
2176 "<column name=\"user\">%s</column>"
2177 "<column name=\"command\">%s</column>"
2178 "</item>",
2179 dp->d_name,
2180 entry ? entry->pw_name : "?",
2181 cmd);
2182 }
2183 fclose (f);
2184 }
2185 }
2186 }
2187
2188 closedir (dirp);
2189 }
2190 buffer_grow_str0 (&buffer, "</osdata>\n");
2191 buf = buffer_finish (&buffer);
2192 len_avail = strlen (buf);
2193 }
2194
2195 if (offset >= len_avail)
2196 {
2197 /* Done. Get rid of the data. */
2198 buffer_free (&buffer);
2199 buf = NULL;
2200 len_avail = 0;
2201 return 0;
2202 }
2203
2204 if (len > len_avail - offset)
2205 len = len_avail - offset;
2206 memcpy (readbuf, buf + offset, len);
2207
2208 return len;
2209 }
2210
2211 static int
2212 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
2213 unsigned const char *writebuf, CORE_ADDR offset, int len)
2214 {
2215 struct siginfo siginfo;
2216 long pid = -1;
2217
2218 if (current_inferior == NULL)
2219 return -1;
2220
2221 pid = pid_of (get_thread_lwp (current_inferior));
2222
2223 if (debug_threads)
2224 fprintf (stderr, "%s siginfo for lwp %ld.\n",
2225 readbuf != NULL ? "Reading" : "Writing",
2226 pid);
2227
2228 if (offset > sizeof (siginfo))
2229 return -1;
2230
2231 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
2232 return -1;
2233
2234 if (offset + len > sizeof (siginfo))
2235 len = sizeof (siginfo) - offset;
2236
2237 if (readbuf != NULL)
2238 memcpy (readbuf, (char *) &siginfo + offset, len);
2239 else
2240 {
2241 memcpy ((char *) &siginfo + offset, writebuf, len);
2242 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
2243 return -1;
2244 }
2245
2246 return len;
2247 }
2248
2249 static struct target_ops linux_target_ops = {
2250 linux_create_inferior,
2251 linux_attach,
2252 linux_kill,
2253 linux_detach,
2254 linux_join,
2255 linux_thread_alive,
2256 linux_resume,
2257 linux_wait,
2258 linux_fetch_registers,
2259 linux_store_registers,
2260 linux_read_memory,
2261 linux_write_memory,
2262 linux_look_up_symbols,
2263 linux_request_interrupt,
2264 linux_read_auxv,
2265 linux_insert_watchpoint,
2266 linux_remove_watchpoint,
2267 linux_stopped_by_watchpoint,
2268 linux_stopped_data_address,
2269 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
2270 linux_read_offsets,
2271 #else
2272 NULL,
2273 #endif
2274 #ifdef USE_THREAD_DB
2275 thread_db_get_tls_address,
2276 #else
2277 NULL,
2278 #endif
2279 NULL,
2280 hostio_last_error_from_errno,
2281 linux_qxfer_osdata,
2282 linux_xfer_siginfo,
2283 };
2284
2285 static void
2286 linux_init_signals ()
2287 {
2288 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
2289 to find what the cancel signal actually is. */
2290 signal (__SIGRTMIN+1, SIG_IGN);
2291 }
2292
2293 void
2294 initialize_low (void)
2295 {
2296 thread_db_active = 0;
2297 set_target_ops (&linux_target_ops);
2298 set_breakpoint_data (the_low_target.breakpoint,
2299 the_low_target.breakpoint_len);
2300 linux_init_signals ();
2301 linux_test_for_tracefork ();
2302 #ifdef HAVE_LINUX_REGSETS
2303 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
2304 ;
2305 disabled_regsets = xmalloc (num_regsets);
2306 #endif
2307 }
This page took 0.074002 seconds and 3 git commands to generate.