Associate dummy_frame with ptid
[deliverable/binutils-gdb.git] / gdb / inf-ttrace.c
1 /* Low-level child interface to ttrace.
2
3 Copyright (C) 2004-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 /* The ttrace(2) system call didn't exist before HP-UX 10.30. Don't
23 try to compile this code unless we have it. */
24 #ifdef HAVE_TTRACE
25
26 #include "command.h"
27 #include "gdbcore.h"
28 #include "gdbthread.h"
29 #include "inferior.h"
30 #include "terminal.h"
31 #include "target.h"
32
33 #include "gdb_assert.h"
34 #include <string.h>
35 #include <sys/mman.h>
36 #include <sys/ttrace.h>
37 #include <signal.h>
38
39 #include "inf-child.h"
40 #include "inf-ttrace.h"
41 #include "common/filestuff.h"
42
43 \f
44
45 /* HP-UX uses a threading model where each user-space thread
46 corresponds to a kernel thread. These kernel threads are called
47 lwps. The ttrace(2) interface gives us almost full control over
48 the threads, which makes it very easy to support them in GDB. We
49 identify the threads by process ID and lwp ID. The ttrace(2) also
50 provides us with a thread's user ID (in the `tts_user_tid' member
51 of `ttstate_t') but we don't use that (yet) as it isn't necessary
52 to uniquely label the thread. */
53
54 /* Number of active lwps. */
55 static int inf_ttrace_num_lwps;
56 \f
57
58 /* On HP-UX versions that have the ttrace(2) system call, we can
59 implement "hardware" watchpoints by fiddling with the protection of
60 pages in the address space that contain the variable being watched.
61 In order to implement this, we keep a dictionary of pages for which
62 we have changed the protection. */
63
64 struct inf_ttrace_page
65 {
66 CORE_ADDR addr; /* Page address. */
67 int prot; /* Protection. */
68 int refcount; /* Reference count. */
69 struct inf_ttrace_page *next;
70 struct inf_ttrace_page *prev;
71 };
72
73 struct inf_ttrace_page_dict
74 {
75 struct inf_ttrace_page buckets[128];
76 int pagesize; /* Page size. */
77 int count; /* Number of pages in this dictionary. */
78 } inf_ttrace_page_dict;
79
80 struct inf_ttrace_private_thread_info
81 {
82 int dying;
83 };
84
85 /* Number of lwps that are currently in a system call. */
86 static int inf_ttrace_num_lwps_in_syscall;
87
88 /* Flag to indicate whether we should re-enable page protections after
89 the next wait. */
90 static int inf_ttrace_reenable_page_protections;
91
92 /* Enable system call events for process PID. */
93
94 static void
95 inf_ttrace_enable_syscall_events (pid_t pid)
96 {
97 ttevent_t tte;
98 ttstate_t tts;
99
100 gdb_assert (inf_ttrace_num_lwps_in_syscall == 0);
101
102 if (ttrace (TT_PROC_GET_EVENT_MASK, pid, 0,
103 (uintptr_t)&tte, sizeof tte, 0) == -1)
104 perror_with_name (("ttrace"));
105
106 tte.tte_events |= (TTEVT_SYSCALL_ENTRY | TTEVT_SYSCALL_RETURN);
107
108 if (ttrace (TT_PROC_SET_EVENT_MASK, pid, 0,
109 (uintptr_t)&tte, sizeof tte, 0) == -1)
110 perror_with_name (("ttrace"));
111
112 if (ttrace (TT_PROC_GET_FIRST_LWP_STATE, pid, 0,
113 (uintptr_t)&tts, sizeof tts, 0) == -1)
114 perror_with_name (("ttrace"));
115
116 if (tts.tts_flags & TTS_INSYSCALL)
117 inf_ttrace_num_lwps_in_syscall++;
118
119 /* FIXME: Handle multiple threads. */
120 }
121
122 /* Disable system call events for process PID. */
123
124 static void
125 inf_ttrace_disable_syscall_events (pid_t pid)
126 {
127 ttevent_t tte;
128
129 gdb_assert (inf_ttrace_page_dict.count == 0);
130
131 if (ttrace (TT_PROC_GET_EVENT_MASK, pid, 0,
132 (uintptr_t)&tte, sizeof tte, 0) == -1)
133 perror_with_name (("ttrace"));
134
135 tte.tte_events &= ~(TTEVT_SYSCALL_ENTRY | TTEVT_SYSCALL_RETURN);
136
137 if (ttrace (TT_PROC_SET_EVENT_MASK, pid, 0,
138 (uintptr_t)&tte, sizeof tte, 0) == -1)
139 perror_with_name (("ttrace"));
140
141 inf_ttrace_num_lwps_in_syscall = 0;
142 }
143
144 /* Get information about the page at address ADDR for process PID from
145 the dictionary. */
146
147 static struct inf_ttrace_page *
148 inf_ttrace_get_page (pid_t pid, CORE_ADDR addr)
149 {
150 const int num_buckets = ARRAY_SIZE (inf_ttrace_page_dict.buckets);
151 const int pagesize = inf_ttrace_page_dict.pagesize;
152 int bucket;
153 struct inf_ttrace_page *page;
154
155 bucket = (addr / pagesize) % num_buckets;
156 page = &inf_ttrace_page_dict.buckets[bucket];
157 while (page)
158 {
159 if (page->addr == addr)
160 break;
161
162 page = page->next;
163 }
164
165 return page;
166 }
167
168 /* Add the page at address ADDR for process PID to the dictionary. */
169
170 static struct inf_ttrace_page *
171 inf_ttrace_add_page (pid_t pid, CORE_ADDR addr)
172 {
173 const int num_buckets = ARRAY_SIZE (inf_ttrace_page_dict.buckets);
174 const int pagesize = inf_ttrace_page_dict.pagesize;
175 int bucket;
176 struct inf_ttrace_page *page;
177 struct inf_ttrace_page *prev = NULL;
178
179 bucket = (addr / pagesize) % num_buckets;
180 page = &inf_ttrace_page_dict.buckets[bucket];
181 while (page)
182 {
183 if (page->addr == addr)
184 break;
185
186 prev = page;
187 page = page->next;
188 }
189
190 if (!page)
191 {
192 int prot;
193
194 if (ttrace (TT_PROC_GET_MPROTECT, pid, 0,
195 addr, 0, (uintptr_t)&prot) == -1)
196 perror_with_name (("ttrace"));
197
198 page = XNEW (struct inf_ttrace_page);
199 page->addr = addr;
200 page->prot = prot;
201 page->refcount = 0;
202 page->next = NULL;
203
204 page->prev = prev;
205 prev->next = page;
206
207 inf_ttrace_page_dict.count++;
208 if (inf_ttrace_page_dict.count == 1)
209 inf_ttrace_enable_syscall_events (pid);
210
211 if (inf_ttrace_num_lwps_in_syscall == 0)
212 {
213 if (ttrace (TT_PROC_SET_MPROTECT, pid, 0,
214 addr, pagesize, prot & ~PROT_WRITE) == -1)
215 perror_with_name (("ttrace"));
216 }
217 }
218
219 return page;
220 }
221
222 /* Insert the page at address ADDR of process PID to the dictionary. */
223
224 static void
225 inf_ttrace_insert_page (pid_t pid, CORE_ADDR addr)
226 {
227 struct inf_ttrace_page *page;
228
229 page = inf_ttrace_get_page (pid, addr);
230 if (!page)
231 page = inf_ttrace_add_page (pid, addr);
232
233 page->refcount++;
234 }
235
236 /* Remove the page at address ADDR of process PID from the dictionary. */
237
238 static void
239 inf_ttrace_remove_page (pid_t pid, CORE_ADDR addr)
240 {
241 const int pagesize = inf_ttrace_page_dict.pagesize;
242 struct inf_ttrace_page *page;
243
244 page = inf_ttrace_get_page (pid, addr);
245 page->refcount--;
246
247 gdb_assert (page->refcount >= 0);
248
249 if (page->refcount == 0)
250 {
251 if (inf_ttrace_num_lwps_in_syscall == 0)
252 {
253 if (ttrace (TT_PROC_SET_MPROTECT, pid, 0,
254 addr, pagesize, page->prot) == -1)
255 perror_with_name (("ttrace"));
256 }
257
258 inf_ttrace_page_dict.count--;
259 if (inf_ttrace_page_dict.count == 0)
260 inf_ttrace_disable_syscall_events (pid);
261
262 page->prev->next = page->next;
263 if (page->next)
264 page->next->prev = page->prev;
265
266 xfree (page);
267 }
268 }
269
270 /* Mask the bits in PROT from the page protections that are currently
271 in the dictionary for process PID. */
272
273 static void
274 inf_ttrace_mask_page_protections (pid_t pid, int prot)
275 {
276 const int num_buckets = ARRAY_SIZE (inf_ttrace_page_dict.buckets);
277 const int pagesize = inf_ttrace_page_dict.pagesize;
278 int bucket;
279
280 for (bucket = 0; bucket < num_buckets; bucket++)
281 {
282 struct inf_ttrace_page *page;
283
284 page = inf_ttrace_page_dict.buckets[bucket].next;
285 while (page)
286 {
287 if (ttrace (TT_PROC_SET_MPROTECT, pid, 0,
288 page->addr, pagesize, page->prot & ~prot) == -1)
289 perror_with_name (("ttrace"));
290
291 page = page->next;
292 }
293 }
294 }
295
296 /* Write-protect the pages in the dictionary for process PID. */
297
298 static void
299 inf_ttrace_enable_page_protections (pid_t pid)
300 {
301 inf_ttrace_mask_page_protections (pid, PROT_WRITE);
302 }
303
304 /* Restore the protection of the pages in the dictionary for process
305 PID. */
306
307 static void
308 inf_ttrace_disable_page_protections (pid_t pid)
309 {
310 inf_ttrace_mask_page_protections (pid, 0);
311 }
312
313 /* Insert a "hardware" watchpoint for LEN bytes at address ADDR of
314 type TYPE. */
315
316 static int
317 inf_ttrace_insert_watchpoint (struct target_ops *self,
318 CORE_ADDR addr, int len, int type,
319 struct expression *cond)
320 {
321 const int pagesize = inf_ttrace_page_dict.pagesize;
322 pid_t pid = ptid_get_pid (inferior_ptid);
323 CORE_ADDR page_addr;
324 int num_pages;
325 int page;
326
327 gdb_assert (type == hw_write);
328
329 page_addr = (addr / pagesize) * pagesize;
330 num_pages = (len + pagesize - 1) / pagesize;
331
332 for (page = 0; page < num_pages; page++, page_addr += pagesize)
333 inf_ttrace_insert_page (pid, page_addr);
334
335 return 1;
336 }
337
338 /* Remove a "hardware" watchpoint for LEN bytes at address ADDR of
339 type TYPE. */
340
341 static int
342 inf_ttrace_remove_watchpoint (struct target_ops *self,
343 CORE_ADDR addr, int len, int type,
344 struct expression *cond)
345 {
346 const int pagesize = inf_ttrace_page_dict.pagesize;
347 pid_t pid = ptid_get_pid (inferior_ptid);
348 CORE_ADDR page_addr;
349 int num_pages;
350 int page;
351
352 gdb_assert (type == hw_write);
353
354 page_addr = (addr / pagesize) * pagesize;
355 num_pages = (len + pagesize - 1) / pagesize;
356
357 for (page = 0; page < num_pages; page++, page_addr += pagesize)
358 inf_ttrace_remove_page (pid, page_addr);
359
360 return 1;
361 }
362
363 static int
364 inf_ttrace_can_use_hw_breakpoint (struct target_ops *self,
365 int type, int len, int ot)
366 {
367 return (type == bp_hardware_watchpoint);
368 }
369
370 static int
371 inf_ttrace_region_ok_for_hw_watchpoint (struct target_ops *self,
372 CORE_ADDR addr, int len)
373 {
374 return 1;
375 }
376
377 /* Return non-zero if the current inferior was (potentially) stopped
378 by hitting a "hardware" watchpoint. */
379
380 static int
381 inf_ttrace_stopped_by_watchpoint (struct target_ops *ops)
382 {
383 pid_t pid = ptid_get_pid (inferior_ptid);
384 lwpid_t lwpid = ptid_get_lwp (inferior_ptid);
385 ttstate_t tts;
386
387 if (inf_ttrace_page_dict.count > 0)
388 {
389 if (ttrace (TT_LWP_GET_STATE, pid, lwpid,
390 (uintptr_t)&tts, sizeof tts, 0) == -1)
391 perror_with_name (("ttrace"));
392
393 if (tts.tts_event == TTEVT_SIGNAL
394 && tts.tts_u.tts_signal.tts_signo == SIGBUS)
395 {
396 const int pagesize = inf_ttrace_page_dict.pagesize;
397 void *addr = tts.tts_u.tts_signal.tts_siginfo.si_addr;
398 CORE_ADDR page_addr = ((uintptr_t)addr / pagesize) * pagesize;
399
400 if (inf_ttrace_get_page (pid, page_addr))
401 return 1;
402 }
403 }
404
405 return 0;
406 }
407 \f
408
409 /* When tracking a vfork(2), we cannot detach from the parent until
410 after the child has called exec(3) or has exited. If we are still
411 attached to the parent, this variable will be set to the process ID
412 of the parent. Otherwise it will be set to zero. */
413 static pid_t inf_ttrace_vfork_ppid = -1;
414
415 static int
416 inf_ttrace_follow_fork (struct target_ops *ops, int follow_child,
417 int detach_fork)
418 {
419 pid_t pid, fpid;
420 lwpid_t lwpid, flwpid;
421 ttstate_t tts;
422 struct thread_info *tp = inferior_thread ();
423
424 gdb_assert (tp->pending_follow.kind == TARGET_WAITKIND_FORKED
425 || tp->pending_follow.kind == TARGET_WAITKIND_VFORKED);
426
427 pid = ptid_get_pid (inferior_ptid);
428 lwpid = ptid_get_lwp (inferior_ptid);
429
430 /* Get all important details that core GDB doesn't (and shouldn't)
431 know about. */
432 if (ttrace (TT_LWP_GET_STATE, pid, lwpid,
433 (uintptr_t)&tts, sizeof tts, 0) == -1)
434 perror_with_name (("ttrace"));
435
436 gdb_assert (tts.tts_event == TTEVT_FORK || tts.tts_event == TTEVT_VFORK);
437
438 if (tts.tts_u.tts_fork.tts_isparent)
439 {
440 pid = tts.tts_pid;
441 lwpid = tts.tts_lwpid;
442 fpid = tts.tts_u.tts_fork.tts_fpid;
443 flwpid = tts.tts_u.tts_fork.tts_flwpid;
444 }
445 else
446 {
447 pid = tts.tts_u.tts_fork.tts_fpid;
448 lwpid = tts.tts_u.tts_fork.tts_flwpid;
449 fpid = tts.tts_pid;
450 flwpid = tts.tts_lwpid;
451 }
452
453 if (follow_child)
454 {
455 struct inferior *inf;
456 struct inferior *parent_inf;
457
458 parent_inf = find_inferior_pid (pid);
459
460 inferior_ptid = ptid_build (fpid, flwpid, 0);
461 inf = add_inferior (fpid);
462 inf->attach_flag = parent_inf->attach_flag;
463 inf->pspace = parent_inf->pspace;
464 inf->aspace = parent_inf->aspace;
465 copy_terminal_info (inf, parent_inf);
466 detach_breakpoints (ptid_build (pid, lwpid, 0));
467
468 target_terminal_ours ();
469 fprintf_unfiltered (gdb_stdlog,
470 _("Attaching after fork to child process %ld.\n"),
471 (long)fpid);
472 }
473 else
474 {
475 inferior_ptid = ptid_build (pid, lwpid, 0);
476 /* Detach any remaining breakpoints in the child. In the case
477 of fork events, we do not need to do this, because breakpoints
478 should have already been removed earlier. */
479 if (tts.tts_event == TTEVT_VFORK)
480 detach_breakpoints (ptid_build (fpid, flwpid, 0));
481
482 target_terminal_ours ();
483 fprintf_unfiltered (gdb_stdlog,
484 _("Detaching after fork from child process %ld.\n"),
485 (long)fpid);
486 }
487
488 if (tts.tts_event == TTEVT_VFORK)
489 {
490 gdb_assert (!tts.tts_u.tts_fork.tts_isparent);
491
492 if (follow_child)
493 {
494 /* We can't detach from the parent yet. */
495 inf_ttrace_vfork_ppid = pid;
496
497 reattach_breakpoints (fpid);
498 }
499 else
500 {
501 if (ttrace (TT_PROC_DETACH, fpid, 0, 0, 0, 0) == -1)
502 perror_with_name (("ttrace"));
503
504 /* Wait till we get the TTEVT_VFORK event in the parent.
505 This indicates that the child has called exec(3) or has
506 exited and that the parent is ready to be traced again. */
507 if (ttrace_wait (pid, lwpid, TTRACE_WAITOK, &tts, sizeof tts) == -1)
508 perror_with_name (("ttrace_wait"));
509 gdb_assert (tts.tts_event == TTEVT_VFORK);
510 gdb_assert (tts.tts_u.tts_fork.tts_isparent);
511
512 reattach_breakpoints (pid);
513 }
514 }
515 else
516 {
517 gdb_assert (tts.tts_u.tts_fork.tts_isparent);
518
519 if (follow_child)
520 {
521 if (ttrace (TT_PROC_DETACH, pid, 0, 0, 0, 0) == -1)
522 perror_with_name (("ttrace"));
523 }
524 else
525 {
526 if (ttrace (TT_PROC_DETACH, fpid, 0, 0, 0, 0) == -1)
527 perror_with_name (("ttrace"));
528 }
529 }
530
531 if (follow_child)
532 {
533 struct thread_info *ti;
534
535 /* The child will start out single-threaded. */
536 inf_ttrace_num_lwps = 1;
537 inf_ttrace_num_lwps_in_syscall = 0;
538
539 /* Delete parent. */
540 delete_thread_silent (ptid_build (pid, lwpid, 0));
541 detach_inferior (pid);
542
543 /* Add child thread. inferior_ptid was already set above. */
544 ti = add_thread_silent (inferior_ptid);
545 ti->private =
546 xmalloc (sizeof (struct inf_ttrace_private_thread_info));
547 memset (ti->private, 0,
548 sizeof (struct inf_ttrace_private_thread_info));
549 }
550
551 return 0;
552 }
553 \f
554
555 /* File descriptors for pipes used as semaphores during initial
556 startup of an inferior. */
557 static int inf_ttrace_pfd1[2];
558 static int inf_ttrace_pfd2[2];
559
560 static void
561 do_cleanup_pfds (void *dummy)
562 {
563 close (inf_ttrace_pfd1[0]);
564 close (inf_ttrace_pfd1[1]);
565 close (inf_ttrace_pfd2[0]);
566 close (inf_ttrace_pfd2[1]);
567
568 unmark_fd_no_cloexec (inf_ttrace_pfd1[0]);
569 unmark_fd_no_cloexec (inf_ttrace_pfd1[1]);
570 unmark_fd_no_cloexec (inf_ttrace_pfd2[0]);
571 unmark_fd_no_cloexec (inf_ttrace_pfd2[1]);
572 }
573
574 static void
575 inf_ttrace_prepare (void)
576 {
577 if (pipe (inf_ttrace_pfd1) == -1)
578 perror_with_name (("pipe"));
579
580 if (pipe (inf_ttrace_pfd2) == -1)
581 {
582 close (inf_ttrace_pfd1[0]);
583 close (inf_ttrace_pfd2[0]);
584 perror_with_name (("pipe"));
585 }
586
587 mark_fd_no_cloexec (inf_ttrace_pfd1[0]);
588 mark_fd_no_cloexec (inf_ttrace_pfd1[1]);
589 mark_fd_no_cloexec (inf_ttrace_pfd2[0]);
590 mark_fd_no_cloexec (inf_ttrace_pfd2[1]);
591 }
592
593 /* Prepare to be traced. */
594
595 static void
596 inf_ttrace_me (void)
597 {
598 struct cleanup *old_chain = make_cleanup (do_cleanup_pfds, 0);
599 char c;
600
601 /* "Trace me, Dr. Memory!" */
602 if (ttrace (TT_PROC_SETTRC, 0, 0, 0, TT_VERSION, 0) == -1)
603 perror_with_name (("ttrace"));
604
605 /* Tell our parent that we are ready to be traced. */
606 if (write (inf_ttrace_pfd1[1], &c, sizeof c) != sizeof c)
607 perror_with_name (("write"));
608
609 /* Wait until our parent has set the initial event mask. */
610 if (read (inf_ttrace_pfd2[0], &c, sizeof c) != sizeof c)
611 perror_with_name (("read"));
612
613 do_cleanups (old_chain);
614 }
615
616 /* Start tracing PID. */
617
618 static void
619 inf_ttrace_him (struct target_ops *ops, int pid)
620 {
621 struct cleanup *old_chain = make_cleanup (do_cleanup_pfds, 0);
622 ttevent_t tte;
623 char c;
624
625 /* Wait until our child is ready to be traced. */
626 if (read (inf_ttrace_pfd1[0], &c, sizeof c) != sizeof c)
627 perror_with_name (("read"));
628
629 /* Set the initial event mask. */
630 memset (&tte, 0, sizeof (tte));
631 tte.tte_events |= TTEVT_EXEC | TTEVT_EXIT | TTEVT_FORK | TTEVT_VFORK;
632 tte.tte_events |= TTEVT_LWP_CREATE | TTEVT_LWP_EXIT | TTEVT_LWP_TERMINATE;
633 #ifdef TTEVT_BPT_SSTEP
634 tte.tte_events |= TTEVT_BPT_SSTEP;
635 #endif
636 tte.tte_opts |= TTEO_PROC_INHERIT;
637 if (ttrace (TT_PROC_SET_EVENT_MASK, pid, 0,
638 (uintptr_t)&tte, sizeof tte, 0) == -1)
639 perror_with_name (("ttrace"));
640
641 /* Tell our child that we have set the initial event mask. */
642 if (write (inf_ttrace_pfd2[1], &c, sizeof c) != sizeof c)
643 perror_with_name (("write"));
644
645 do_cleanups (old_chain);
646
647 if (!target_is_pushed (ops))
648 push_target (ops);
649
650 startup_inferior (START_INFERIOR_TRAPS_EXPECTED);
651
652 /* On some targets, there must be some explicit actions taken after
653 the inferior has been started up. */
654 target_post_startup_inferior (pid_to_ptid (pid));
655 }
656
657 static void
658 inf_ttrace_create_inferior (struct target_ops *ops, char *exec_file,
659 char *allargs, char **env, int from_tty)
660 {
661 int pid;
662
663 gdb_assert (inf_ttrace_num_lwps == 0);
664 gdb_assert (inf_ttrace_num_lwps_in_syscall == 0);
665 gdb_assert (inf_ttrace_page_dict.count == 0);
666 gdb_assert (inf_ttrace_reenable_page_protections == 0);
667 gdb_assert (inf_ttrace_vfork_ppid == -1);
668
669 pid = fork_inferior (exec_file, allargs, env, inf_ttrace_me, NULL,
670 inf_ttrace_prepare, NULL, NULL);
671
672 inf_ttrace_him (ops, pid);
673 }
674
675 static void
676 inf_ttrace_mourn_inferior (struct target_ops *ops)
677 {
678 const int num_buckets = ARRAY_SIZE (inf_ttrace_page_dict.buckets);
679 int bucket;
680
681 inf_ttrace_num_lwps = 0;
682 inf_ttrace_num_lwps_in_syscall = 0;
683
684 for (bucket = 0; bucket < num_buckets; bucket++)
685 {
686 struct inf_ttrace_page *page;
687 struct inf_ttrace_page *next;
688
689 page = inf_ttrace_page_dict.buckets[bucket].next;
690 while (page)
691 {
692 next = page->next;
693 xfree (page);
694 page = next;
695 }
696 }
697 inf_ttrace_page_dict.count = 0;
698
699 inf_child_mourn_inferior (ops);
700 }
701
702 /* Assuming we just attached the debugger to a new inferior, create
703 a new thread_info structure for each thread, and add it to our
704 list of threads. */
705
706 static void
707 inf_ttrace_create_threads_after_attach (int pid)
708 {
709 int status;
710 ptid_t ptid;
711 ttstate_t tts;
712 struct thread_info *ti;
713
714 status = ttrace (TT_PROC_GET_FIRST_LWP_STATE, pid, 0,
715 (uintptr_t) &tts, sizeof (ttstate_t), 0);
716 if (status < 0)
717 perror_with_name (_("TT_PROC_GET_FIRST_LWP_STATE ttrace call failed"));
718 gdb_assert (tts.tts_pid == pid);
719
720 /* Add the stopped thread. */
721 ptid = ptid_build (pid, tts.tts_lwpid, 0);
722 ti = add_thread (ptid);
723 ti->private = xzalloc (sizeof (struct inf_ttrace_private_thread_info));
724 inf_ttrace_num_lwps++;
725
726 /* We use the "first stopped thread" as the currently active thread. */
727 inferior_ptid = ptid;
728
729 /* Iterative over all the remaining threads. */
730
731 for (;;)
732 {
733 ptid_t ptid;
734
735 status = ttrace (TT_PROC_GET_NEXT_LWP_STATE, pid, 0,
736 (uintptr_t) &tts, sizeof (ttstate_t), 0);
737 if (status < 0)
738 perror_with_name (_("TT_PROC_GET_NEXT_LWP_STATE ttrace call failed"));
739 if (status == 0)
740 break; /* End of list. */
741
742 ptid = ptid_build (tts.tts_pid, tts.tts_lwpid, 0);
743 ti = add_thread (ptid);
744 ti->private = xzalloc (sizeof (struct inf_ttrace_private_thread_info));
745 inf_ttrace_num_lwps++;
746 }
747 }
748
749 static void
750 inf_ttrace_attach (struct target_ops *ops, const char *args, int from_tty)
751 {
752 char *exec_file;
753 pid_t pid;
754 ttevent_t tte;
755 struct inferior *inf;
756
757 pid = parse_pid_to_attach (args);
758
759 if (pid == getpid ()) /* Trying to masturbate? */
760 error (_("I refuse to debug myself!"));
761
762 if (from_tty)
763 {
764 exec_file = get_exec_file (0);
765
766 if (exec_file)
767 printf_unfiltered (_("Attaching to program: %s, %s\n"), exec_file,
768 target_pid_to_str (pid_to_ptid (pid)));
769 else
770 printf_unfiltered (_("Attaching to %s\n"),
771 target_pid_to_str (pid_to_ptid (pid)));
772
773 gdb_flush (gdb_stdout);
774 }
775
776 gdb_assert (inf_ttrace_num_lwps == 0);
777 gdb_assert (inf_ttrace_num_lwps_in_syscall == 0);
778 gdb_assert (inf_ttrace_vfork_ppid == -1);
779
780 if (ttrace (TT_PROC_ATTACH, pid, 0, TT_KILL_ON_EXIT, TT_VERSION, 0) == -1)
781 perror_with_name (("ttrace"));
782
783 inf = current_inferior ();
784 inferior_appeared (inf, pid);
785 inf->attach_flag = 1;
786
787 /* Set the initial event mask. */
788 memset (&tte, 0, sizeof (tte));
789 tte.tte_events |= TTEVT_EXEC | TTEVT_EXIT | TTEVT_FORK | TTEVT_VFORK;
790 tte.tte_events |= TTEVT_LWP_CREATE | TTEVT_LWP_EXIT | TTEVT_LWP_TERMINATE;
791 #ifdef TTEVT_BPT_SSTEP
792 tte.tte_events |= TTEVT_BPT_SSTEP;
793 #endif
794 tte.tte_opts |= TTEO_PROC_INHERIT;
795 if (ttrace (TT_PROC_SET_EVENT_MASK, pid, 0,
796 (uintptr_t)&tte, sizeof tte, 0) == -1)
797 perror_with_name (("ttrace"));
798
799 if (!target_is_pushed (ops))
800 push_target (ops);
801
802 inf_ttrace_create_threads_after_attach (pid);
803 }
804
805 static void
806 inf_ttrace_detach (struct target_ops *ops, const char *args, int from_tty)
807 {
808 pid_t pid = ptid_get_pid (inferior_ptid);
809 int sig = 0;
810
811 if (from_tty)
812 {
813 char *exec_file = get_exec_file (0);
814 if (exec_file == 0)
815 exec_file = "";
816 printf_unfiltered (_("Detaching from program: %s, %s\n"), exec_file,
817 target_pid_to_str (pid_to_ptid (pid)));
818 gdb_flush (gdb_stdout);
819 }
820 if (args)
821 sig = atoi (args);
822
823 /* ??? The HP-UX 11.0 ttrace(2) manual page doesn't mention that we
824 can pass a signal number here. Does this really work? */
825 if (ttrace (TT_PROC_DETACH, pid, 0, 0, sig, 0) == -1)
826 perror_with_name (("ttrace"));
827
828 if (inf_ttrace_vfork_ppid != -1)
829 {
830 if (ttrace (TT_PROC_DETACH, inf_ttrace_vfork_ppid, 0, 0, 0, 0) == -1)
831 perror_with_name (("ttrace"));
832 inf_ttrace_vfork_ppid = -1;
833 }
834
835 inf_ttrace_num_lwps = 0;
836 inf_ttrace_num_lwps_in_syscall = 0;
837
838 inferior_ptid = null_ptid;
839 detach_inferior (pid);
840
841 inf_child_maybe_unpush_target (ops);
842 }
843
844 static void
845 inf_ttrace_kill (struct target_ops *ops)
846 {
847 pid_t pid = ptid_get_pid (inferior_ptid);
848
849 if (pid == 0)
850 return;
851
852 if (ttrace (TT_PROC_EXIT, pid, 0, 0, 0, 0) == -1)
853 perror_with_name (("ttrace"));
854 /* ??? Is it necessary to call ttrace_wait() here? */
855
856 if (inf_ttrace_vfork_ppid != -1)
857 {
858 if (ttrace (TT_PROC_DETACH, inf_ttrace_vfork_ppid, 0, 0, 0, 0) == -1)
859 perror_with_name (("ttrace"));
860 inf_ttrace_vfork_ppid = -1;
861 }
862
863 target_mourn_inferior ();
864 }
865
866 /* Check is a dying thread is dead by now, and delete it from GDBs
867 thread list if so. */
868 static int
869 inf_ttrace_delete_dead_threads_callback (struct thread_info *info, void *arg)
870 {
871 lwpid_t lwpid;
872 struct inf_ttrace_private_thread_info *p;
873
874 if (is_exited (info->ptid))
875 return 0;
876
877 lwpid = ptid_get_lwp (info->ptid);
878 p = (struct inf_ttrace_private_thread_info *) info->private;
879
880 /* Check if an lwp that was dying is still there or not. */
881 if (p->dying && (kill (lwpid, 0) == -1))
882 /* It's gone now. */
883 delete_thread (info->ptid);
884
885 return 0;
886 }
887
888 /* Resume the lwp pointed to by INFO, with REQUEST, and pass it signal
889 SIG. */
890
891 static void
892 inf_ttrace_resume_lwp (struct thread_info *info, ttreq_t request, int sig)
893 {
894 pid_t pid = ptid_get_pid (info->ptid);
895 lwpid_t lwpid = ptid_get_lwp (info->ptid);
896
897 if (ttrace (request, pid, lwpid, TT_NOPC, sig, 0) == -1)
898 {
899 struct inf_ttrace_private_thread_info *p
900 = (struct inf_ttrace_private_thread_info *) info->private;
901 if (p->dying && errno == EPROTO)
902 /* This is expected, it means the dying lwp is really gone
903 by now. If ttrace had an event to inform the debugger
904 the lwp is really gone, this wouldn't be needed. */
905 delete_thread (info->ptid);
906 else
907 /* This was really unexpected. */
908 perror_with_name (("ttrace"));
909 }
910 }
911
912 /* Callback for iterate_over_threads. */
913
914 static int
915 inf_ttrace_resume_callback (struct thread_info *info, void *arg)
916 {
917 if (!ptid_equal (info->ptid, inferior_ptid) && !is_exited (info->ptid))
918 inf_ttrace_resume_lwp (info, TT_LWP_CONTINUE, 0);
919
920 return 0;
921 }
922
923 static void
924 inf_ttrace_resume (struct target_ops *ops,
925 ptid_t ptid, int step, enum gdb_signal signal)
926 {
927 int resume_all;
928 ttreq_t request = step ? TT_LWP_SINGLE : TT_LWP_CONTINUE;
929 int sig = gdb_signal_to_host (signal);
930 struct thread_info *info;
931
932 /* A specific PTID means `step only this process id'. */
933 resume_all = (ptid_equal (ptid, minus_one_ptid));
934
935 /* If resuming all threads, it's the current thread that should be
936 handled specially. */
937 if (resume_all)
938 ptid = inferior_ptid;
939
940 info = find_thread_ptid (ptid);
941 inf_ttrace_resume_lwp (info, request, sig);
942
943 if (resume_all)
944 /* Let all the other threads run too. */
945 iterate_over_threads (inf_ttrace_resume_callback, NULL);
946 }
947
948 static ptid_t
949 inf_ttrace_wait (struct target_ops *ops,
950 ptid_t ptid, struct target_waitstatus *ourstatus, int options)
951 {
952 pid_t pid = ptid_get_pid (ptid);
953 lwpid_t lwpid = ptid_get_lwp (ptid);
954 ttstate_t tts;
955 struct thread_info *ti;
956 ptid_t related_ptid;
957
958 /* Until proven otherwise. */
959 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
960
961 if (pid == -1)
962 pid = lwpid = 0;
963
964 gdb_assert (pid != 0 || lwpid == 0);
965
966 do
967 {
968 set_sigint_trap ();
969
970 if (ttrace_wait (pid, lwpid, TTRACE_WAITOK, &tts, sizeof tts) == -1)
971 perror_with_name (("ttrace_wait"));
972
973 if (tts.tts_event == TTEVT_VFORK && tts.tts_u.tts_fork.tts_isparent)
974 {
975 if (inf_ttrace_vfork_ppid != -1)
976 {
977 gdb_assert (inf_ttrace_vfork_ppid == tts.tts_pid);
978
979 if (ttrace (TT_PROC_DETACH, tts.tts_pid, 0, 0, 0, 0) == -1)
980 perror_with_name (("ttrace"));
981 inf_ttrace_vfork_ppid = -1;
982 }
983
984 tts.tts_event = TTEVT_NONE;
985 }
986
987 clear_sigint_trap ();
988 }
989 while (tts.tts_event == TTEVT_NONE);
990
991 /* Now that we've waited, we can re-enable the page protections. */
992 if (inf_ttrace_reenable_page_protections)
993 {
994 gdb_assert (inf_ttrace_num_lwps_in_syscall == 0);
995 inf_ttrace_enable_page_protections (tts.tts_pid);
996 inf_ttrace_reenable_page_protections = 0;
997 }
998
999 ptid = ptid_build (tts.tts_pid, tts.tts_lwpid, 0);
1000
1001 if (inf_ttrace_num_lwps == 0)
1002 {
1003 struct thread_info *ti;
1004
1005 inf_ttrace_num_lwps = 1;
1006
1007 /* This is the earliest we hear about the lwp member of
1008 INFERIOR_PTID, after an attach or fork_inferior. */
1009 gdb_assert (ptid_get_lwp (inferior_ptid) == 0);
1010
1011 /* We haven't set the private member on the main thread yet. Do
1012 it now. */
1013 ti = find_thread_ptid (inferior_ptid);
1014 gdb_assert (ti != NULL && ti->private == NULL);
1015 ti->private =
1016 xmalloc (sizeof (struct inf_ttrace_private_thread_info));
1017 memset (ti->private, 0,
1018 sizeof (struct inf_ttrace_private_thread_info));
1019
1020 /* Notify the core that this ptid changed. This changes
1021 inferior_ptid as well. */
1022 thread_change_ptid (inferior_ptid, ptid);
1023 }
1024
1025 switch (tts.tts_event)
1026 {
1027 #ifdef TTEVT_BPT_SSTEP
1028 case TTEVT_BPT_SSTEP:
1029 /* Make it look like a breakpoint. */
1030 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1031 ourstatus->value.sig = GDB_SIGNAL_TRAP;
1032 break;
1033 #endif
1034
1035 case TTEVT_EXEC:
1036 ourstatus->kind = TARGET_WAITKIND_EXECD;
1037 ourstatus->value.execd_pathname =
1038 xmalloc (tts.tts_u.tts_exec.tts_pathlen + 1);
1039 if (ttrace (TT_PROC_GET_PATHNAME, tts.tts_pid, 0,
1040 (uintptr_t)ourstatus->value.execd_pathname,
1041 tts.tts_u.tts_exec.tts_pathlen, 0) == -1)
1042 perror_with_name (("ttrace"));
1043 ourstatus->value.execd_pathname[tts.tts_u.tts_exec.tts_pathlen] = 0;
1044
1045 /* At this point, all inserted breakpoints are gone. Doing this
1046 as soon as we detect an exec prevents the badness of deleting
1047 a breakpoint writing the current "shadow contents" to lift
1048 the bp. That shadow is NOT valid after an exec. */
1049 mark_breakpoints_out ();
1050 break;
1051
1052 case TTEVT_EXIT:
1053 store_waitstatus (ourstatus, tts.tts_u.tts_exit.tts_exitcode);
1054 inf_ttrace_num_lwps = 0;
1055 break;
1056
1057 case TTEVT_FORK:
1058 related_ptid = ptid_build (tts.tts_u.tts_fork.tts_fpid,
1059 tts.tts_u.tts_fork.tts_flwpid, 0);
1060
1061 ourstatus->kind = TARGET_WAITKIND_FORKED;
1062 ourstatus->value.related_pid = related_ptid;
1063
1064 /* Make sure the other end of the fork is stopped too. */
1065 if (ttrace_wait (tts.tts_u.tts_fork.tts_fpid,
1066 tts.tts_u.tts_fork.tts_flwpid,
1067 TTRACE_WAITOK, &tts, sizeof tts) == -1)
1068 perror_with_name (("ttrace_wait"));
1069
1070 gdb_assert (tts.tts_event == TTEVT_FORK);
1071 if (tts.tts_u.tts_fork.tts_isparent)
1072 {
1073 related_ptid = ptid_build (tts.tts_u.tts_fork.tts_fpid,
1074 tts.tts_u.tts_fork.tts_flwpid, 0);
1075 ptid = ptid_build (tts.tts_pid, tts.tts_lwpid, 0);
1076 ourstatus->value.related_pid = related_ptid;
1077 }
1078 break;
1079
1080 case TTEVT_VFORK:
1081 gdb_assert (!tts.tts_u.tts_fork.tts_isparent);
1082
1083 related_ptid = ptid_build (tts.tts_u.tts_fork.tts_fpid,
1084 tts.tts_u.tts_fork.tts_flwpid, 0);
1085
1086 ourstatus->kind = TARGET_WAITKIND_VFORKED;
1087 ourstatus->value.related_pid = related_ptid;
1088
1089 /* HACK: To avoid touching the parent during the vfork, switch
1090 away from it. */
1091 inferior_ptid = ptid;
1092 break;
1093
1094 case TTEVT_LWP_CREATE:
1095 lwpid = tts.tts_u.tts_thread.tts_target_lwpid;
1096 ptid = ptid_build (tts.tts_pid, lwpid, 0);
1097 ti = add_thread (ptid);
1098 ti->private =
1099 xmalloc (sizeof (struct inf_ttrace_private_thread_info));
1100 memset (ti->private, 0,
1101 sizeof (struct inf_ttrace_private_thread_info));
1102 inf_ttrace_num_lwps++;
1103 ptid = ptid_build (tts.tts_pid, tts.tts_lwpid, 0);
1104 /* Let the lwp_create-caller thread continue. */
1105 ttrace (TT_LWP_CONTINUE, ptid_get_pid (ptid),
1106 ptid_get_lwp (ptid), TT_NOPC, 0, 0);
1107 /* Return without stopping the whole process. */
1108 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1109 return ptid;
1110
1111 case TTEVT_LWP_EXIT:
1112 if (print_thread_events)
1113 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (ptid));
1114 ti = find_thread_ptid (ptid);
1115 gdb_assert (ti != NULL);
1116 ((struct inf_ttrace_private_thread_info *)ti->private)->dying = 1;
1117 inf_ttrace_num_lwps--;
1118 /* Let the thread really exit. */
1119 ttrace (TT_LWP_CONTINUE, ptid_get_pid (ptid),
1120 ptid_get_lwp (ptid), TT_NOPC, 0, 0);
1121 /* Return without stopping the whole process. */
1122 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1123 return ptid;
1124
1125 case TTEVT_LWP_TERMINATE:
1126 lwpid = tts.tts_u.tts_thread.tts_target_lwpid;
1127 ptid = ptid_build (tts.tts_pid, lwpid, 0);
1128 if (print_thread_events)
1129 printf_unfiltered(_("[%s has been terminated]\n"),
1130 target_pid_to_str (ptid));
1131 ti = find_thread_ptid (ptid);
1132 gdb_assert (ti != NULL);
1133 ((struct inf_ttrace_private_thread_info *)ti->private)->dying = 1;
1134 inf_ttrace_num_lwps--;
1135
1136 /* Resume the lwp_terminate-caller thread. */
1137 ptid = ptid_build (tts.tts_pid, tts.tts_lwpid, 0);
1138 ttrace (TT_LWP_CONTINUE, ptid_get_pid (ptid),
1139 ptid_get_lwp (ptid), TT_NOPC, 0, 0);
1140 /* Return without stopping the whole process. */
1141 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1142 return ptid;
1143
1144 case TTEVT_SIGNAL:
1145 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1146 ourstatus->value.sig =
1147 gdb_signal_from_host (tts.tts_u.tts_signal.tts_signo);
1148 break;
1149
1150 case TTEVT_SYSCALL_ENTRY:
1151 gdb_assert (inf_ttrace_reenable_page_protections == 0);
1152 inf_ttrace_num_lwps_in_syscall++;
1153 if (inf_ttrace_num_lwps_in_syscall == 1)
1154 {
1155 /* A thread has just entered a system call. Disable any
1156 page protections as the kernel can't deal with them. */
1157 inf_ttrace_disable_page_protections (tts.tts_pid);
1158 }
1159 ourstatus->kind = TARGET_WAITKIND_SYSCALL_ENTRY;
1160 ourstatus->value.syscall_number = tts.tts_scno;
1161 break;
1162
1163 case TTEVT_SYSCALL_RETURN:
1164 if (inf_ttrace_num_lwps_in_syscall > 0)
1165 {
1166 /* If the last thread has just left the system call, this
1167 would be a logical place to re-enable the page
1168 protections, but that doesn't work. We can't re-enable
1169 them until we've done another wait. */
1170 inf_ttrace_reenable_page_protections =
1171 (inf_ttrace_num_lwps_in_syscall == 1);
1172 inf_ttrace_num_lwps_in_syscall--;
1173 }
1174 ourstatus->kind = TARGET_WAITKIND_SYSCALL_RETURN;
1175 ourstatus->value.syscall_number = tts.tts_scno;
1176 break;
1177
1178 default:
1179 gdb_assert (!"Unexpected ttrace event");
1180 break;
1181 }
1182
1183 /* Make sure all threads within the process are stopped. */
1184 if (ttrace (TT_PROC_STOP, tts.tts_pid, 0, 0, 0, 0) == -1)
1185 perror_with_name (("ttrace"));
1186
1187 /* Now that the whole process is stopped, check if any dying thread
1188 is really dead by now. If a dying thread is still alive, it will
1189 be stopped too, and will still show up in `info threads', tagged
1190 with "(Exiting)". We could make `info threads' prune dead
1191 threads instead via inf_ttrace_thread_alive, but doing this here
1192 has the advantage that a frontend is notificed sooner of thread
1193 exits. Note that a dying lwp is still alive, it still has to be
1194 resumed, like any other lwp. */
1195 iterate_over_threads (inf_ttrace_delete_dead_threads_callback, NULL);
1196
1197 return ptid;
1198 }
1199
1200 /* Transfer LEN bytes from ADDR in the inferior's memory into READBUF,
1201 and transfer LEN bytes from WRITEBUF into the inferior's memory at
1202 ADDR. Either READBUF or WRITEBUF may be null, in which case the
1203 corresponding transfer doesn't happen. Return the number of bytes
1204 actually transferred (which may be zero if an error occurs). */
1205
1206 static LONGEST
1207 inf_ttrace_xfer_memory (CORE_ADDR addr, ULONGEST len,
1208 void *readbuf, const void *writebuf)
1209 {
1210 pid_t pid = ptid_get_pid (inferior_ptid);
1211
1212 /* HP-UX treats text space and data space differently. GDB however,
1213 doesn't really know the difference. Therefore we try both. Try
1214 text space before data space though because when we're writing
1215 into text space the instruction cache might need to be flushed. */
1216
1217 if (readbuf
1218 && ttrace (TT_PROC_RDTEXT, pid, 0, addr, len, (uintptr_t)readbuf) == -1
1219 && ttrace (TT_PROC_RDDATA, pid, 0, addr, len, (uintptr_t)readbuf) == -1)
1220 return 0;
1221
1222 if (writebuf
1223 && ttrace (TT_PROC_WRTEXT, pid, 0, addr, len, (uintptr_t)writebuf) == -1
1224 && ttrace (TT_PROC_WRDATA, pid, 0, addr, len, (uintptr_t)writebuf) == -1)
1225 return 0;
1226
1227 return len;
1228 }
1229
1230 static enum target_xfer_status
1231 inf_ttrace_xfer_partial (struct target_ops *ops, enum target_object object,
1232 const char *annex, gdb_byte *readbuf,
1233 const gdb_byte *writebuf,
1234 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
1235 {
1236 switch (object)
1237 {
1238 case TARGET_OBJECT_MEMORY:
1239 {
1240 LONGEST val = inf_ttrace_xfer_memory (offset, len, readbuf, writebuf);
1241
1242 if (val == 0)
1243 return TARGET_XFER_EOF;
1244 else
1245 {
1246 *xfered_len = (ULONGEST) val;
1247 return TARGET_XFER_OK;
1248 }
1249 }
1250
1251 case TARGET_OBJECT_UNWIND_TABLE:
1252 return TARGET_XFER_E_IO;
1253
1254 case TARGET_OBJECT_AUXV:
1255 return TARGET_XFER_E_IO;
1256
1257 case TARGET_OBJECT_WCOOKIE:
1258 return TARGET_XFER_E_IO;
1259
1260 default:
1261 return TARGET_XFER_E_IO;
1262 }
1263 }
1264
1265 /* Print status information about what we're accessing. */
1266
1267 static void
1268 inf_ttrace_files_info (struct target_ops *ignore)
1269 {
1270 struct inferior *inf = current_inferior ();
1271 printf_filtered (_("\tUsing the running image of %s %s.\n"),
1272 inf->attach_flag ? "attached" : "child",
1273 target_pid_to_str (inferior_ptid));
1274 }
1275
1276 static int
1277 inf_ttrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1278 {
1279 return 1;
1280 }
1281
1282 /* Return a string describing the state of the thread specified by
1283 INFO. */
1284
1285 static char *
1286 inf_ttrace_extra_thread_info (struct target_ops *self,
1287 struct thread_info *info)
1288 {
1289 struct inf_ttrace_private_thread_info* private =
1290 (struct inf_ttrace_private_thread_info *) info->private;
1291
1292 if (private != NULL && private->dying)
1293 return "Exiting";
1294
1295 return NULL;
1296 }
1297
1298 static char *
1299 inf_ttrace_pid_to_str (struct target_ops *ops, ptid_t ptid)
1300 {
1301 pid_t pid = ptid_get_pid (ptid);
1302 lwpid_t lwpid = ptid_get_lwp (ptid);
1303 static char buf[128];
1304
1305 if (lwpid == 0)
1306 xsnprintf (buf, sizeof buf, "process %ld",
1307 (long) pid);
1308 else
1309 xsnprintf (buf, sizeof buf, "process %ld, lwp %ld",
1310 (long) pid, (long) lwpid);
1311 return buf;
1312 }
1313 \f
1314
1315 /* Implement the get_ada_task_ptid target_ops method. */
1316
1317 static ptid_t
1318 inf_ttrace_get_ada_task_ptid (struct target_ops *self, long lwp, long thread)
1319 {
1320 return ptid_build (ptid_get_pid (inferior_ptid), lwp, 0);
1321 }
1322
1323 \f
1324 struct target_ops *
1325 inf_ttrace_target (void)
1326 {
1327 struct target_ops *t = inf_child_target ();
1328
1329 t->to_attach = inf_ttrace_attach;
1330 t->to_detach = inf_ttrace_detach;
1331 t->to_resume = inf_ttrace_resume;
1332 t->to_wait = inf_ttrace_wait;
1333 t->to_files_info = inf_ttrace_files_info;
1334 t->to_can_use_hw_breakpoint = inf_ttrace_can_use_hw_breakpoint;
1335 t->to_insert_watchpoint = inf_ttrace_insert_watchpoint;
1336 t->to_remove_watchpoint = inf_ttrace_remove_watchpoint;
1337 t->to_stopped_by_watchpoint = inf_ttrace_stopped_by_watchpoint;
1338 t->to_region_ok_for_hw_watchpoint =
1339 inf_ttrace_region_ok_for_hw_watchpoint;
1340 t->to_kill = inf_ttrace_kill;
1341 t->to_create_inferior = inf_ttrace_create_inferior;
1342 t->to_follow_fork = inf_ttrace_follow_fork;
1343 t->to_mourn_inferior = inf_ttrace_mourn_inferior;
1344 t->to_thread_alive = inf_ttrace_thread_alive;
1345 t->to_extra_thread_info = inf_ttrace_extra_thread_info;
1346 t->to_pid_to_str = inf_ttrace_pid_to_str;
1347 t->to_xfer_partial = inf_ttrace_xfer_partial;
1348 t->to_get_ada_task_ptid = inf_ttrace_get_ada_task_ptid;
1349
1350 return t;
1351 }
1352 #endif
1353 \f
1354
1355 /* Prevent warning from -Wmissing-prototypes. */
1356 void _initialize_inf_ttrace (void);
1357
1358 void
1359 _initialize_inf_ttrace (void)
1360 {
1361 #ifdef HAVE_TTRACE
1362 inf_ttrace_page_dict.pagesize = getpagesize();
1363 #endif
1364 }
This page took 0.057768 seconds and 4 git commands to generate.