2000-12-22 Fernando Nasser <fnasser@redhat.com>
[deliverable/binutils-gdb.git] / gdb / hpux-thread.c
1 /* Low level interface for debugging HPUX/DCE threads for GDB, the GNU debugger.
2 Copyright 1996, 1999 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21 /* This module implements a sort of half target that sits between the
22 machine-independent parts of GDB and the ptrace interface (infptrace.c) to
23 provide access to the HPUX user-mode thread implementation.
24
25 HPUX threads are true user-mode threads, which are invoked via the cma_*
26 and pthread_* (DCE and Posix respectivly) interfaces. These are mostly
27 implemented in user-space, with all thread context kept in various
28 structures that live in the user's heap. For the most part, the kernel has
29 no knowlege of these threads.
30
31 */
32
33 #include "defs.h"
34
35 #define _CMA_NOWRAPPERS_
36
37 #include <cma_tcb_defs.h>
38 #include <cma_deb_core.h>
39 #include "gdbthread.h"
40 #include "target.h"
41 #include "inferior.h"
42 #include <fcntl.h>
43 #include <sys/stat.h>
44 #include "gdbcore.h"
45
46 extern int child_suppress_run;
47 extern struct target_ops child_ops; /* target vector for inftarg.c */
48
49 extern void _initialize_hpux_thread (void);
50
51 struct string_map
52 {
53 int num;
54 char *str;
55 };
56
57 static int hpux_thread_active = 0;
58
59 static int main_pid; /* Real process ID */
60
61 static CORE_ADDR P_cma__g_known_threads;
62 static CORE_ADDR P_cma__g_current_thread;
63
64 static struct cleanup *save_inferior_pid (void);
65
66 static void restore_inferior_pid (int pid);
67
68 static void hpux_thread_resume (int pid, int step, enum target_signal signo);
69
70 static void init_hpux_thread_ops (void);
71
72 static struct target_ops hpux_thread_ops;
73 \f
74 /*
75
76 LOCAL FUNCTION
77
78 save_inferior_pid - Save inferior_pid on the cleanup list
79 restore_inferior_pid - Restore inferior_pid from the cleanup list
80
81 SYNOPSIS
82
83 struct cleanup *save_inferior_pid ()
84 void restore_inferior_pid (int pid)
85
86 DESCRIPTION
87
88 These two functions act in unison to restore inferior_pid in
89 case of an error.
90
91 NOTES
92
93 inferior_pid is a global variable that needs to be changed by many of
94 these routines before calling functions in procfs.c. In order to
95 guarantee that inferior_pid gets restored (in case of errors), you
96 need to call save_inferior_pid before changing it. At the end of the
97 function, you should invoke do_cleanups to restore it.
98
99 */
100
101
102 static struct cleanup *
103 save_inferior_pid (void)
104 {
105 return make_cleanup (restore_inferior_pid, inferior_pid);
106 }
107
108 static void
109 restore_inferior_pid (int pid)
110 {
111 inferior_pid = pid;
112 }
113 \f
114 static int find_active_thread (void);
115
116 static int cached_thread;
117 static int cached_active_thread;
118 static cma__t_int_tcb cached_tcb;
119
120 static int
121 find_active_thread (void)
122 {
123 static cma__t_int_tcb tcb;
124 CORE_ADDR tcb_ptr;
125
126 if (cached_active_thread != 0)
127 return cached_active_thread;
128
129 read_memory ((CORE_ADDR) P_cma__g_current_thread,
130 (char *) &tcb_ptr,
131 sizeof tcb_ptr);
132
133 read_memory (tcb_ptr, (char *) &tcb, sizeof tcb);
134
135 return (cma_thread_get_unique (&tcb.prolog.client_thread) << 16) | main_pid;
136 }
137
138 static cma__t_int_tcb *find_tcb (int thread);
139
140 static cma__t_int_tcb *
141 find_tcb (int thread)
142 {
143 cma__t_known_object queue_header;
144 cma__t_queue *queue_ptr;
145
146 if (thread == cached_thread)
147 return &cached_tcb;
148
149 read_memory ((CORE_ADDR) P_cma__g_known_threads,
150 (char *) &queue_header,
151 sizeof queue_header);
152
153 for (queue_ptr = queue_header.queue.flink;
154 queue_ptr != (cma__t_queue *) P_cma__g_known_threads;
155 queue_ptr = cached_tcb.threads.flink)
156 {
157 cma__t_int_tcb *tcb_ptr;
158
159 tcb_ptr = cma__base (queue_ptr, threads, cma__t_int_tcb);
160
161 read_memory ((CORE_ADDR) tcb_ptr, (char *) &cached_tcb, sizeof cached_tcb);
162
163 if (cached_tcb.header.type == cma__c_obj_tcb)
164 if (cma_thread_get_unique (&cached_tcb.prolog.client_thread) == thread >> 16)
165 {
166 cached_thread = thread;
167 return &cached_tcb;
168 }
169 }
170
171 error ("Can't find TCB %d,%d", thread >> 16, thread & 0xffff);
172 return NULL;
173 }
174 \f
175 /* Most target vector functions from here on actually just pass through to
176 inftarg.c, as they don't need to do anything specific for threads. */
177
178 /* ARGSUSED */
179 static void
180 hpux_thread_open (char *arg, int from_tty)
181 {
182 child_ops.to_open (arg, from_tty);
183 }
184
185 /* Attach to process PID, then initialize for debugging it
186 and wait for the trace-trap that results from attaching. */
187
188 static void
189 hpux_thread_attach (char *args, int from_tty)
190 {
191 child_ops.to_attach (args, from_tty);
192
193 /* XXX - might want to iterate over all the threads and register them. */
194 }
195
196 /* Take a program previously attached to and detaches it.
197 The program resumes execution and will no longer stop
198 on signals, etc. We'd better not have left any breakpoints
199 in the program or it'll die when it hits one. For this
200 to work, it may be necessary for the process to have been
201 previously attached. It *might* work if the program was
202 started via the normal ptrace (PTRACE_TRACEME). */
203
204 static void
205 hpux_thread_detach (char *args, int from_tty)
206 {
207 child_ops.to_detach (args, from_tty);
208 }
209
210 /* Resume execution of process PID. If STEP is nozero, then
211 just single step it. If SIGNAL is nonzero, restart it with that
212 signal activated. We may have to convert pid from a thread-id to an LWP id
213 for procfs. */
214
215 static void
216 hpux_thread_resume (int pid, int step, enum target_signal signo)
217 {
218 struct cleanup *old_chain;
219
220 old_chain = save_inferior_pid ();
221
222 pid = inferior_pid = main_pid;
223
224 #if 0
225 if (pid != -1)
226 {
227 pid = thread_to_lwp (pid, -2);
228 if (pid == -2) /* Inactive thread */
229 error ("This version of Solaris can't start inactive threads.");
230 }
231 #endif
232
233 child_ops.to_resume (pid, step, signo);
234
235 cached_thread = 0;
236 cached_active_thread = 0;
237
238 do_cleanups (old_chain);
239 }
240
241 /* Wait for any threads to stop. We may have to convert PID from a thread id
242 to a LWP id, and vice versa on the way out. */
243
244 static int
245 hpux_thread_wait (int pid, struct target_waitstatus *ourstatus)
246 {
247 int rtnval;
248 struct cleanup *old_chain;
249
250 old_chain = save_inferior_pid ();
251
252 inferior_pid = main_pid;
253
254 if (pid != -1)
255 pid = main_pid;
256
257 rtnval = child_ops.to_wait (pid, ourstatus);
258
259 rtnval = find_active_thread ();
260
261 do_cleanups (old_chain);
262
263 return rtnval;
264 }
265
266 static char regmap[NUM_REGS] =
267 {
268 -2, -1, -1, 0, 4, 8, 12, 16, 20, 24, /* flags, r1 -> r9 */
269 28, 32, 36, 40, 44, 48, 52, 56, 60, -1, /* r10 -> r19 */
270 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* r20 -> r29 */
271
272 /* r30, r31, sar, pcoqh, pcsqh, pcoqt, pcsqt, eiem, iir, isr */
273 -2, -1, -1, -2, -1, -1, -1, -1, -1, -1,
274
275 /* ior, ipsw, goto, sr4, sr0, sr1, sr2, sr3, sr5, sr6 */
276 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
277
278 /* sr7, cr0, cr8, cr9, ccr, cr12, cr13, cr24, cr25, cr26 */
279 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
280
281 -1, -1, -1, -1, /* mpsfu_high, mpsfu_low, mpsfu_ovflo, pad */
282 144, -1, -1, -1, -1, -1, -1, -1, /* fpsr, fpe1 -> fpe7 */
283 -1, -1, -1, -1, -1, -1, -1, -1, /* fr4 -> fr7 */
284 -1, -1, -1, -1, -1, -1, -1, -1, /* fr8 -> fr11 */
285 136, -1, 128, -1, 120, -1, 112, -1, /* fr12 -> fr15 */
286 104, -1, 96, -1, 88, -1, 80, -1, /* fr16 -> fr19 */
287 72, -1, 64, -1, -1, -1, -1, -1, /* fr20 -> fr23 */
288 -1, -1, -1, -1, -1, -1, -1, -1, /* fr24 -> fr27 */
289 -1, -1, -1, -1, -1, -1, -1, -1, /* fr28 -> fr31 */
290 };
291
292 static void
293 hpux_thread_fetch_registers (int regno)
294 {
295 cma__t_int_tcb tcb, *tcb_ptr;
296 struct cleanup *old_chain;
297 int i;
298 int first_regno, last_regno;
299
300 tcb_ptr = find_tcb (inferior_pid);
301
302 old_chain = save_inferior_pid ();
303
304 inferior_pid = main_pid;
305
306 if (tcb_ptr->state == cma__c_state_running)
307 {
308 child_ops.to_fetch_registers (regno);
309
310 do_cleanups (old_chain);
311
312 return;
313 }
314
315 if (regno == -1)
316 {
317 first_regno = 0;
318 last_regno = NUM_REGS - 1;
319 }
320 else
321 {
322 first_regno = regno;
323 last_regno = regno;
324 }
325
326 for (regno = first_regno; regno <= last_regno; regno++)
327 {
328 if (regmap[regno] == -1)
329 child_ops.to_fetch_registers (regno);
330 else
331 {
332 unsigned char buf[MAX_REGISTER_RAW_SIZE];
333 CORE_ADDR sp;
334
335 sp = (CORE_ADDR) tcb_ptr->static_ctx.sp - 160;
336
337 if (regno == FLAGS_REGNUM)
338 /* Flags must be 0 to avoid bogus value for SS_INSYSCALL */
339 memset (buf, '\000', REGISTER_RAW_SIZE (regno));
340 else if (regno == SP_REGNUM)
341 store_address (buf, sizeof sp, sp);
342 else if (regno == PC_REGNUM)
343 read_memory (sp - 20, buf, REGISTER_RAW_SIZE (regno));
344 else
345 read_memory (sp + regmap[regno], buf, REGISTER_RAW_SIZE (regno));
346
347 supply_register (regno, buf);
348 }
349 }
350
351 do_cleanups (old_chain);
352 }
353
354 static void
355 hpux_thread_store_registers (int regno)
356 {
357 cma__t_int_tcb tcb, *tcb_ptr;
358 struct cleanup *old_chain;
359 int i;
360 int first_regno, last_regno;
361
362 tcb_ptr = find_tcb (inferior_pid);
363
364 old_chain = save_inferior_pid ();
365
366 inferior_pid = main_pid;
367
368 if (tcb_ptr->state == cma__c_state_running)
369 {
370 child_ops.to_store_registers (regno);
371
372 do_cleanups (old_chain);
373
374 return;
375 }
376
377 if (regno == -1)
378 {
379 first_regno = 0;
380 last_regno = NUM_REGS - 1;
381 }
382 else
383 {
384 first_regno = regno;
385 last_regno = regno;
386 }
387
388 for (regno = first_regno; regno <= last_regno; regno++)
389 {
390 if (regmap[regno] == -1)
391 child_ops.to_store_registers (regno);
392 else
393 {
394 unsigned char buf[MAX_REGISTER_RAW_SIZE];
395 CORE_ADDR sp;
396
397 sp = (CORE_ADDR) tcb_ptr->static_ctx.sp - 160;
398
399 if (regno == FLAGS_REGNUM)
400 child_ops.to_store_registers (regno); /* Let lower layer handle this... */
401 else if (regno == SP_REGNUM)
402 {
403 write_memory ((CORE_ADDR) & tcb_ptr->static_ctx.sp,
404 registers + REGISTER_BYTE (regno),
405 REGISTER_RAW_SIZE (regno));
406 tcb_ptr->static_ctx.sp = (cma__t_hppa_regs *)
407 (extract_address (registers + REGISTER_BYTE (regno), REGISTER_RAW_SIZE (regno)) + 160);
408 }
409 else if (regno == PC_REGNUM)
410 write_memory (sp - 20,
411 registers + REGISTER_BYTE (regno),
412 REGISTER_RAW_SIZE (regno));
413 else
414 write_memory (sp + regmap[regno],
415 registers + REGISTER_BYTE (regno),
416 REGISTER_RAW_SIZE (regno));
417 }
418 }
419
420 do_cleanups (old_chain);
421 }
422
423 /* Get ready to modify the registers array. On machines which store
424 individual registers, this doesn't need to do anything. On machines
425 which store all the registers in one fell swoop, this makes sure
426 that registers contains all the registers from the program being
427 debugged. */
428
429 static void
430 hpux_thread_prepare_to_store (void)
431 {
432 child_ops.to_prepare_to_store ();
433 }
434
435 static int
436 hpux_thread_xfer_memory (CORE_ADDR memaddr, char *myaddr, int len,
437 int dowrite, struct target_ops *target)
438 {
439 int retval;
440 struct cleanup *old_chain;
441
442 old_chain = save_inferior_pid ();
443
444 inferior_pid = main_pid;
445
446 retval = child_ops.to_xfer_memory (memaddr, myaddr, len, dowrite, target);
447
448 do_cleanups (old_chain);
449
450 return retval;
451 }
452
453 /* Print status information about what we're accessing. */
454
455 static void
456 hpux_thread_files_info (struct target_ops *ignore)
457 {
458 child_ops.to_files_info (ignore);
459 }
460
461 static void
462 hpux_thread_kill_inferior (void)
463 {
464 child_ops.to_kill ();
465 }
466
467 static void
468 hpux_thread_notice_signals (int pid)
469 {
470 child_ops.to_notice_signals (pid);
471 }
472
473 /* Fork an inferior process, and start debugging it with /proc. */
474
475 static void
476 hpux_thread_create_inferior (char *exec_file, char *allargs, char **env)
477 {
478 child_ops.to_create_inferior (exec_file, allargs, env);
479
480 if (hpux_thread_active)
481 {
482 main_pid = inferior_pid;
483
484 push_target (&hpux_thread_ops);
485
486 inferior_pid = find_active_thread ();
487
488 add_thread (inferior_pid);
489 }
490 }
491
492 /* This routine is called whenever a new symbol table is read in, or when all
493 symbol tables are removed. libthread_db can only be initialized when it
494 finds the right variables in libthread.so. Since it's a shared library,
495 those variables don't show up until the library gets mapped and the symbol
496 table is read in. */
497
498 /* This new_objfile event is now managed by a chained function pointer.
499 * It is the callee's responsability to call the next client on the chain.
500 */
501
502 /* Saved pointer to previous owner of the new_objfile event. */
503 static void (*target_new_objfile_chain) (struct objfile *);
504
505 void
506 hpux_thread_new_objfile (struct objfile *objfile)
507 {
508 struct minimal_symbol *ms;
509
510 if (!objfile)
511 {
512 hpux_thread_active = 0;
513 goto quit;
514 }
515
516 ms = lookup_minimal_symbol ("cma__g_known_threads", NULL, objfile);
517
518 if (!ms)
519 goto quit;
520
521 P_cma__g_known_threads = SYMBOL_VALUE_ADDRESS (ms);
522
523 ms = lookup_minimal_symbol ("cma__g_current_thread", NULL, objfile);
524
525 if (!ms)
526 goto quit;
527
528 P_cma__g_current_thread = SYMBOL_VALUE_ADDRESS (ms);
529
530 hpux_thread_active = 1;
531 quit:
532 /* Call predecessor on chain, if any. */
533 if (target_new_objfile_chain)
534 target_new_objfile_chain (objfile);
535 }
536
537 /* Clean up after the inferior dies. */
538
539 static void
540 hpux_thread_mourn_inferior (void)
541 {
542 child_ops.to_mourn_inferior ();
543 }
544
545 /* Mark our target-struct as eligible for stray "run" and "attach" commands. */
546
547 static int
548 hpux_thread_can_run (void)
549 {
550 return child_suppress_run;
551 }
552
553 static int
554 hpux_thread_alive (int pid)
555 {
556 return 1;
557 }
558
559 static void
560 hpux_thread_stop (void)
561 {
562 child_ops.to_stop ();
563 }
564 \f
565 /* Convert a pid to printable form. */
566
567 char *
568 hpux_pid_to_str (int pid)
569 {
570 static char buf[100];
571
572 sprintf (buf, "Thread %d", pid >> 16);
573
574 return buf;
575 }
576 \f
577 static void
578 init_hpux_thread_ops (void)
579 {
580 hpux_thread_ops.to_shortname = "hpux-threads";
581 hpux_thread_ops.to_longname = "HPUX threads and pthread.";
582 hpux_thread_ops.to_doc = "HPUX threads and pthread support.";
583 hpux_thread_ops.to_open = hpux_thread_open;
584 hpux_thread_ops.to_attach = hpux_thread_attach;
585 hpux_thread_ops.to_detach = hpux_thread_detach;
586 hpux_thread_ops.to_resume = hpux_thread_resume;
587 hpux_thread_ops.to_wait = hpux_thread_wait;
588 hpux_thread_ops.to_fetch_registers = hpux_thread_fetch_registers;
589 hpux_thread_ops.to_store_registers = hpux_thread_store_registers;
590 hpux_thread_ops.to_prepare_to_store = hpux_thread_prepare_to_store;
591 hpux_thread_ops.to_xfer_memory = hpux_thread_xfer_memory;
592 hpux_thread_ops.to_files_info = hpux_thread_files_info;
593 hpux_thread_ops.to_insert_breakpoint = memory_insert_breakpoint;
594 hpux_thread_ops.to_remove_breakpoint = memory_remove_breakpoint;
595 hpux_thread_ops.to_terminal_init = terminal_init_inferior;
596 hpux_thread_ops.to_terminal_inferior = terminal_inferior;
597 hpux_thread_ops.to_terminal_ours_for_output = terminal_ours_for_output;
598 hpux_thread_ops.to_terminal_ours = terminal_ours;
599 hpux_thread_ops.to_terminal_info = child_terminal_info;
600 hpux_thread_ops.to_kill = hpux_thread_kill_inferior;
601 hpux_thread_ops.to_create_inferior = hpux_thread_create_inferior;
602 hpux_thread_ops.to_mourn_inferior = hpux_thread_mourn_inferior;
603 hpux_thread_ops.to_can_run = hpux_thread_can_run;
604 hpux_thread_ops.to_notice_signals = hpux_thread_notice_signals;
605 hpux_thread_ops.to_thread_alive = hpux_thread_alive;
606 hpux_thread_ops.to_stop = hpux_thread_stop;
607 hpux_thread_ops.to_stratum = process_stratum;
608 hpux_thread_ops.to_has_all_memory = 1;
609 hpux_thread_ops.to_has_memory = 1;
610 hpux_thread_ops.to_has_stack = 1;
611 hpux_thread_ops.to_has_registers = 1;
612 hpux_thread_ops.to_has_execution = 1;
613 hpux_thread_ops.to_magic = OPS_MAGIC;
614 }
615
616 void
617 _initialize_hpux_thread (void)
618 {
619 init_hpux_thread_ops ();
620 add_target (&hpux_thread_ops);
621
622 child_suppress_run = 1;
623 /* Hook into new_objfile notification. */
624 target_new_objfile_chain = target_new_objfile_hook;
625 target_new_objfile_hook = hpux_thread_new_objfile;
626 }
This page took 0.042707 seconds and 4 git commands to generate.