PR symtab/2161
[deliverable/binutils-gdb.git] / gdb / gdbserver / thread-db.c
1 /* Thread management interface, for the remote server for GDB.
2 Copyright (C) 2002, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
3
4 Contributed by MontaVista Software.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
22
23 #include "server.h"
24
25 #include "linux-low.h"
26
27 extern int debug_threads;
28
29 #ifdef HAVE_THREAD_DB_H
30 #include <thread_db.h>
31 #endif
32
33 #include "gdb_proc_service.h"
34
35 #include <stdint.h>
36
37 /* Structure that identifies the child process for the
38 <proc_service.h> interface. */
39 static struct ps_prochandle proc_handle;
40
41 /* Connection to the libthread_db library. */
42 static td_thragent_t *thread_agent;
43
44 static void thread_db_find_new_threads (void);
45 static int find_new_threads_callback (const td_thrhandle_t *th_p, void *data);
46
47 static char *
48 thread_db_err_str (td_err_e err)
49 {
50 static char buf[64];
51
52 switch (err)
53 {
54 case TD_OK:
55 return "generic 'call succeeded'";
56 case TD_ERR:
57 return "generic error";
58 case TD_NOTHR:
59 return "no thread to satisfy query";
60 case TD_NOSV:
61 return "no sync handle to satisfy query";
62 case TD_NOLWP:
63 return "no LWP to satisfy query";
64 case TD_BADPH:
65 return "invalid process handle";
66 case TD_BADTH:
67 return "invalid thread handle";
68 case TD_BADSH:
69 return "invalid synchronization handle";
70 case TD_BADTA:
71 return "invalid thread agent";
72 case TD_BADKEY:
73 return "invalid key";
74 case TD_NOMSG:
75 return "no event message for getmsg";
76 case TD_NOFPREGS:
77 return "FPU register set not available";
78 case TD_NOLIBTHREAD:
79 return "application not linked with libthread";
80 case TD_NOEVENT:
81 return "requested event is not supported";
82 case TD_NOCAPAB:
83 return "capability not available";
84 case TD_DBERR:
85 return "debugger service failed";
86 case TD_NOAPLIC:
87 return "operation not applicable to";
88 case TD_NOTSD:
89 return "no thread-specific data for this thread";
90 case TD_MALLOC:
91 return "malloc failed";
92 case TD_PARTIALREG:
93 return "only part of register set was written/read";
94 case TD_NOXREGS:
95 return "X register set not available for this thread";
96 #ifdef HAVE_TD_VERSION
97 case TD_VERSION:
98 return "version mismatch between libthread_db and libpthread";
99 #endif
100 default:
101 snprintf (buf, sizeof (buf), "unknown thread_db error '%d'", err);
102 return buf;
103 }
104 }
105
106 #if 0
107 static char *
108 thread_db_state_str (td_thr_state_e state)
109 {
110 static char buf[64];
111
112 switch (state)
113 {
114 case TD_THR_STOPPED:
115 return "stopped by debugger";
116 case TD_THR_RUN:
117 return "runnable";
118 case TD_THR_ACTIVE:
119 return "active";
120 case TD_THR_ZOMBIE:
121 return "zombie";
122 case TD_THR_SLEEP:
123 return "sleeping";
124 case TD_THR_STOPPED_ASLEEP:
125 return "stopped by debugger AND blocked";
126 default:
127 snprintf (buf, sizeof (buf), "unknown thread_db state %d", state);
128 return buf;
129 }
130 }
131 #endif
132
133 static void
134 thread_db_create_event (CORE_ADDR where)
135 {
136 td_event_msg_t msg;
137 td_err_e err;
138 struct inferior_linux_data *tdata;
139 struct thread_info *inferior;
140 struct process_info *process;
141
142 if (debug_threads)
143 fprintf (stderr, "Thread creation event.\n");
144
145 tdata = inferior_target_data (current_inferior);
146
147 /* FIXME: This assumes we don't get another event.
148 In the LinuxThreads implementation, this is safe,
149 because all events come from the manager thread
150 (except for its own creation, of course). */
151 err = td_ta_event_getmsg (thread_agent, &msg);
152 if (err != TD_OK)
153 fprintf (stderr, "thread getmsg err: %s\n",
154 thread_db_err_str (err));
155
156 /* If we do not know about the main thread yet, this would be a good time to
157 find it. We need to do this to pick up the main thread before any newly
158 created threads. */
159 inferior = (struct thread_info *) all_threads.head;
160 process = get_thread_process (inferior);
161 if (process->thread_known == 0)
162 thread_db_find_new_threads ();
163
164 /* msg.event == TD_EVENT_CREATE */
165
166 find_new_threads_callback (msg.th_p, NULL);
167 }
168
169 #if 0
170 static void
171 thread_db_death_event (CORE_ADDR where)
172 {
173 if (debug_threads)
174 fprintf (stderr, "Thread death event.\n");
175 }
176 #endif
177
178 static int
179 thread_db_enable_reporting ()
180 {
181 td_thr_events_t events;
182 td_notify_t notify;
183 td_err_e err;
184
185 /* Set the process wide mask saying which events we're interested in. */
186 td_event_emptyset (&events);
187 td_event_addset (&events, TD_CREATE);
188
189 #if 0
190 /* This is reported to be broken in glibc 2.1.3. A different approach
191 will be necessary to support that. */
192 td_event_addset (&events, TD_DEATH);
193 #endif
194
195 err = td_ta_set_event (thread_agent, &events);
196 if (err != TD_OK)
197 {
198 warning ("Unable to set global thread event mask: %s",
199 thread_db_err_str (err));
200 return 0;
201 }
202
203 /* Get address for thread creation breakpoint. */
204 err = td_ta_event_addr (thread_agent, TD_CREATE, &notify);
205 if (err != TD_OK)
206 {
207 warning ("Unable to get location for thread creation breakpoint: %s",
208 thread_db_err_str (err));
209 return 0;
210 }
211 set_breakpoint_at ((CORE_ADDR) (unsigned long) notify.u.bptaddr,
212 thread_db_create_event);
213
214 #if 0
215 /* Don't concern ourselves with reported thread deaths, only
216 with actual thread deaths (via wait). */
217
218 /* Get address for thread death breakpoint. */
219 err = td_ta_event_addr (thread_agent, TD_DEATH, &notify);
220 if (err != TD_OK)
221 {
222 warning ("Unable to get location for thread death breakpoint: %s",
223 thread_db_err_str (err));
224 return;
225 }
226 set_breakpoint_at ((CORE_ADDR) (unsigned long) notify.u.bptaddr,
227 thread_db_death_event);
228 #endif
229
230 return 1;
231 }
232
233 static void
234 maybe_attach_thread (const td_thrhandle_t *th_p, td_thrinfo_t *ti_p)
235 {
236 td_err_e err;
237 struct thread_info *inferior;
238 struct process_info *process;
239
240 /* If we are attaching to our first thread, things are a little
241 different. */
242 if (all_threads.head == all_threads.tail)
243 {
244 inferior = (struct thread_info *) all_threads.head;
245 process = get_thread_process (inferior);
246
247 if (process->thread_known == 0)
248 {
249 /* If the new thread ID is zero, a final thread ID will be
250 available later. Do not enable thread debugging yet. */
251 if (ti_p->ti_tid == 0)
252 {
253 err = td_thr_event_enable (th_p, 1);
254 if (err != TD_OK)
255 error ("Cannot enable thread event reporting for %d: %s",
256 ti_p->ti_lid, thread_db_err_str (err));
257 return;
258 }
259
260 if (process->lwpid != ti_p->ti_lid)
261 fatal ("PID mismatch! Expected %ld, got %ld",
262 (long) process->lwpid, (long) ti_p->ti_lid);
263
264 /* Switch to indexing the threads list by TID. */
265 change_inferior_id (&all_threads, ti_p->ti_tid);
266 goto found;
267 }
268 }
269
270 inferior = (struct thread_info *) find_inferior_id (&all_threads,
271 ti_p->ti_tid);
272 if (inferior != NULL)
273 return;
274
275 if (debug_threads)
276 fprintf (stderr, "Attaching to thread %ld (LWP %d)\n",
277 ti_p->ti_tid, ti_p->ti_lid);
278 linux_attach_lwp (ti_p->ti_lid, ti_p->ti_tid);
279 inferior = (struct thread_info *) find_inferior_id (&all_threads,
280 ti_p->ti_tid);
281 if (inferior == NULL)
282 {
283 warning ("Could not attach to thread %ld (LWP %d)\n",
284 ti_p->ti_tid, ti_p->ti_lid);
285 return;
286 }
287
288 process = inferior_target_data (inferior);
289
290 found:
291 new_thread_notify (ti_p->ti_tid);
292
293 process->tid = ti_p->ti_tid;
294 process->lwpid = ti_p->ti_lid;
295
296 process->thread_known = 1;
297 process->th = *th_p;
298 err = td_thr_event_enable (th_p, 1);
299 if (err != TD_OK)
300 error ("Cannot enable thread event reporting for %d: %s",
301 ti_p->ti_lid, thread_db_err_str (err));
302 }
303
304 static int
305 find_new_threads_callback (const td_thrhandle_t *th_p, void *data)
306 {
307 td_thrinfo_t ti;
308 td_err_e err;
309
310 err = td_thr_get_info (th_p, &ti);
311 if (err != TD_OK)
312 error ("Cannot get thread info: %s", thread_db_err_str (err));
313
314 /* Check for zombies. */
315 if (ti.ti_state == TD_THR_UNKNOWN || ti.ti_state == TD_THR_ZOMBIE)
316 return 0;
317
318 maybe_attach_thread (th_p, &ti);
319
320 return 0;
321 }
322
323 static void
324 thread_db_find_new_threads (void)
325 {
326 td_err_e err;
327
328 /* Iterate over all user-space threads to discover new threads. */
329 err = td_ta_thr_iter (thread_agent, find_new_threads_callback, NULL,
330 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
331 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
332 if (err != TD_OK)
333 error ("Cannot find new threads: %s", thread_db_err_str (err));
334 }
335
336 /* Cache all future symbols that thread_db might request. We can not
337 request symbols at arbitrary states in the remote protocol, only
338 when the client tells us that new symbols are available. So when
339 we load the thread library, make sure to check the entire list. */
340
341 static void
342 thread_db_look_up_symbols (void)
343 {
344 const char **sym_list = td_symbol_list ();
345 CORE_ADDR unused;
346
347 for (sym_list = td_symbol_list (); *sym_list; sym_list++)
348 look_up_one_symbol (*sym_list, &unused);
349 }
350
351 int
352 thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
353 CORE_ADDR load_module, CORE_ADDR *address)
354 {
355 #if HAVE_TD_THR_TLS_GET_ADDR
356 psaddr_t addr;
357 td_err_e err;
358 struct process_info *process;
359
360 process = get_thread_process (thread);
361 if (!process->thread_known)
362 thread_db_find_new_threads ();
363 if (!process->thread_known)
364 return TD_NOTHR;
365
366 /* Note the cast through uintptr_t: this interface only works if
367 a target address fits in a psaddr_t, which is a host pointer.
368 So a 32-bit debugger can not access 64-bit TLS through this. */
369 err = td_thr_tls_get_addr (&process->th, (psaddr_t) (uintptr_t) load_module,
370 offset, &addr);
371 if (err == TD_OK)
372 {
373 *address = (CORE_ADDR) (uintptr_t) addr;
374 return 0;
375 }
376 else
377 return err;
378 #else
379 return -1;
380 #endif
381 }
382
383 int
384 thread_db_init ()
385 {
386 int err;
387
388 /* FIXME drow/2004-10-16: This is the "overall process ID", which
389 GNU/Linux calls tgid, "thread group ID". When we support
390 attaching to threads, the original thread may not be the correct
391 thread. We would have to get the process ID from /proc for NPTL.
392 For LinuxThreads we could do something similar: follow the chain
393 of parent processes until we find the highest one we're attached
394 to, and use its tgid.
395
396 This isn't the only place in gdbserver that assumes that the first
397 process in the list is the thread group leader. */
398 proc_handle.pid = ((struct inferior_list_entry *)current_inferior)->id;
399
400 /* Allow new symbol lookups. */
401 all_symbols_looked_up = 0;
402
403 err = td_ta_new (&proc_handle, &thread_agent);
404 switch (err)
405 {
406 case TD_NOLIBTHREAD:
407 /* No thread library was detected. */
408 return 0;
409
410 case TD_OK:
411 /* The thread library was detected. */
412
413 if (thread_db_enable_reporting () == 0)
414 return 0;
415 thread_db_find_new_threads ();
416 thread_db_look_up_symbols ();
417 all_symbols_looked_up = 1;
418 return 1;
419
420 default:
421 warning ("error initializing thread_db library: %s",
422 thread_db_err_str (err));
423 }
424
425 return 0;
426 }
This page took 0.03835 seconds and 4 git commands to generate.