0e0b6f2d785a24deefa0cfdfee1da3f8119a62e4
[deliverable/binutils-gdb.git] / gdb / ravenscar-thread.c
1 /* Ada Ravenscar thread support.
2
3 Copyright (C) 2004-2019 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "gdbcore.h"
22 #include "gdbthread.h"
23 #include "ada-lang.h"
24 #include "target.h"
25 #include "inferior.h"
26 #include "command.h"
27 #include "ravenscar-thread.h"
28 #include "observable.h"
29 #include "gdbcmd.h"
30 #include "top.h"
31 #include "regcache.h"
32 #include "objfiles.h"
33
34 /* This module provides support for "Ravenscar" tasks (Ada) when
35 debugging on bare-metal targets.
36
37 The typical situation is when debugging a bare-metal target over
38 the remote protocol. In that situation, the system does not know
39 about high-level concepts such as threads, only about some code
40 running on one or more CPUs. And since the remote protocol does not
41 provide any handling for CPUs, the de facto standard for handling
42 them is to have one thread per CPU, where the thread's ptid has
43 its lwp field set to the CPU number (eg: 1 for the first CPU,
44 2 for the second one, etc). This module will make that assumption.
45
46 This module then creates and maintains the list of threads based
47 on the list of Ada tasks, with one thread per Ada task. The convention
48 is that threads corresponding to the CPUs (see assumption above)
49 have a ptid_t of the form (PID, LWP, 0), while threads corresponding
50 to our Ada tasks have a ptid_t of the form (PID, 0, TID) where TID
51 is the Ada task's ID as extracted from Ada runtime information.
52
53 Switching to a given Ada task (or its underlying thread) is performed
54 by fetching the registers of that task from the memory area where
55 the registers were saved. For any of the other operations, the
56 operation is performed by first finding the CPU on which the task
57 is running, switching to its corresponding ptid, and then performing
58 the operation on that ptid using the target beneath us. */
59
60 /* If non-null, ravenscar task support is enabled. */
61 static int ravenscar_task_support = 1;
62
63 /* PTID of the last thread that received an event.
64 This can be useful to determine the associated task that received
65 the event, to make it the current task. */
66 static ptid_t base_ptid;
67
68 static const char running_thread_name[] = "__gnat_running_thread_table";
69
70 static const char known_tasks_name[] = "system__tasking__debug__known_tasks";
71 static const char first_task_name[] = "system__tasking__debug__first_task";
72
73 static const char ravenscar_runtime_initializer[]
74 = "system__bb__threads__initialize";
75
76 static const target_info ravenscar_target_info = {
77 "ravenscar",
78 N_("Ravenscar tasks."),
79 N_("Ravenscar tasks support.")
80 };
81
82 struct ravenscar_thread_target final : public target_ops
83 {
84 const target_info &info () const override
85 { return ravenscar_target_info; }
86
87 strata stratum () const override { return thread_stratum; }
88
89 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
90 void resume (ptid_t, int, enum gdb_signal) override;
91
92 void fetch_registers (struct regcache *, int) override;
93 void store_registers (struct regcache *, int) override;
94
95 void prepare_to_store (struct regcache *) override;
96
97 bool stopped_by_sw_breakpoint () override;
98
99 bool stopped_by_hw_breakpoint () override;
100
101 bool stopped_by_watchpoint () override;
102
103 bool stopped_data_address (CORE_ADDR *) override;
104
105 bool thread_alive (ptid_t ptid) override;
106
107 int core_of_thread (ptid_t ptid) override;
108
109 void update_thread_list () override;
110
111 const char *extra_thread_info (struct thread_info *) override;
112
113 const char *pid_to_str (ptid_t) override;
114
115 ptid_t get_ada_task_ptid (long lwp, long thread) override;
116
117 void mourn_inferior () override;
118 };
119
120 /* This module's target-specific operations. */
121 static ravenscar_thread_target ravenscar_ops;
122
123 static ptid_t ravenscar_active_task (int cpu);
124 static void ravenscar_update_inferior_ptid (void);
125 static int has_ravenscar_runtime (void);
126 static int ravenscar_runtime_initialized (void);
127 static void ravenscar_inferior_created (struct target_ops *target,
128 int from_tty);
129
130 /* Return nonzero iff PTID corresponds to a ravenscar task. */
131
132 static int
133 is_ravenscar_task (ptid_t ptid)
134 {
135 /* By construction, ravenscar tasks have their LWP set to zero.
136 Also make sure that the TID is nonzero, as some remotes, when
137 asked for the list of threads, will return the first thread
138 as having its TID set to zero. For instance, TSIM version
139 2.0.48 for LEON3 sends 'm0' as a reply to the 'qfThreadInfo'
140 query, which the remote protocol layer then treats as a thread
141 whose TID is 0. This is obviously not a ravenscar task. */
142 return ptid.lwp () == 0 && ptid.tid () != 0;
143 }
144
145 /* Given PTID, which can be either a ravenscar task or a CPU thread,
146 return which CPU that ptid is running on.
147
148 This assume that PTID is a valid ptid_t. Otherwise, a gdb_assert
149 will be triggered. */
150
151 static int
152 ravenscar_get_thread_base_cpu (ptid_t ptid)
153 {
154 int base_cpu;
155
156 if (is_ravenscar_task (ptid))
157 {
158 struct ada_task_info *task_info = ada_get_task_info_from_ptid (ptid);
159
160 gdb_assert (task_info != NULL);
161 base_cpu = task_info->base_cpu;
162 }
163 else
164 {
165 /* We assume that the LWP of the PTID is equal to the CPU number. */
166 base_cpu = ptid.lwp ();
167 }
168
169 return base_cpu;
170 }
171
172 /* Given a ravenscar task (identified by its ptid_t PTID), return nonzero
173 if this task is the currently active task on the cpu that task is
174 running on.
175
176 In other words, this function determine which CPU this task is
177 currently running on, and then return nonzero if the CPU in question
178 is executing the code for that task. If that's the case, then
179 that task's registers are in the CPU bank. Otherwise, the task
180 is currently suspended, and its registers have been saved in memory. */
181
182 static int
183 ravenscar_task_is_currently_active (ptid_t ptid)
184 {
185 ptid_t active_task_ptid
186 = ravenscar_active_task (ravenscar_get_thread_base_cpu (ptid));
187
188 return ptid == active_task_ptid;
189 }
190
191 /* Return the CPU thread (as a ptid_t) on which the given ravenscar
192 task is running.
193
194 This is the thread that corresponds to the CPU on which the task
195 is running. */
196
197 static ptid_t
198 get_base_thread_from_ravenscar_task (ptid_t ptid)
199 {
200 int base_cpu;
201
202 if (!is_ravenscar_task (ptid))
203 return ptid;
204
205 base_cpu = ravenscar_get_thread_base_cpu (ptid);
206 return ptid_t (ptid.pid (), base_cpu, 0);
207 }
208
209 /* Fetch the ravenscar running thread from target memory and
210 update inferior_ptid accordingly. */
211
212 static void
213 ravenscar_update_inferior_ptid (void)
214 {
215 int base_cpu;
216
217 base_ptid = inferior_ptid;
218
219 gdb_assert (!is_ravenscar_task (inferior_ptid));
220 base_cpu = ravenscar_get_thread_base_cpu (base_ptid);
221
222 /* If the runtime has not been initialized yet, the inferior_ptid is
223 the only ptid that there is. */
224 if (!ravenscar_runtime_initialized ())
225 return;
226
227 /* Make sure we set base_ptid before calling ravenscar_active_task
228 as the latter relies on it. */
229 inferior_ptid = ravenscar_active_task (base_cpu);
230 gdb_assert (inferior_ptid != null_ptid);
231
232 /* The running thread may not have been added to
233 system.tasking.debug's list yet; so ravenscar_update_thread_list
234 may not always add it to the thread list. Add it here. */
235 if (!find_thread_ptid (inferior_ptid))
236 add_thread (inferior_ptid);
237 }
238
239 /* The Ravenscar Runtime exports a symbol which contains the ID of
240 the thread that is currently running. Try to locate that symbol
241 and return its associated minimal symbol.
242 Return NULL if not found. */
243
244 static struct bound_minimal_symbol
245 get_running_thread_msymbol (void)
246 {
247 struct bound_minimal_symbol msym;
248
249 msym = lookup_minimal_symbol (running_thread_name, NULL, NULL);
250 if (!msym.minsym)
251 /* Older versions of the GNAT runtime were using a different
252 (less ideal) name for the symbol where the active thread ID
253 is stored. If we couldn't find the symbol using the latest
254 name, then try the old one. */
255 msym = lookup_minimal_symbol ("running_thread", NULL, NULL);
256
257 return msym;
258 }
259
260 /* Return True if the Ada Ravenscar run-time can be found in the
261 application. */
262
263 static int
264 has_ravenscar_runtime (void)
265 {
266 struct bound_minimal_symbol msym_ravenscar_runtime_initializer
267 = lookup_minimal_symbol (ravenscar_runtime_initializer, NULL, NULL);
268 struct bound_minimal_symbol msym_known_tasks
269 = lookup_minimal_symbol (known_tasks_name, NULL, NULL);
270 struct bound_minimal_symbol msym_first_task
271 = lookup_minimal_symbol (first_task_name, NULL, NULL);
272 struct bound_minimal_symbol msym_running_thread
273 = get_running_thread_msymbol ();
274
275 return (msym_ravenscar_runtime_initializer.minsym
276 && (msym_known_tasks.minsym || msym_first_task.minsym)
277 && msym_running_thread.minsym);
278 }
279
280 /* Return True if the Ada Ravenscar run-time can be found in the
281 application, and if it has been initialized on target. */
282
283 static int
284 ravenscar_runtime_initialized (void)
285 {
286 return (!(ravenscar_active_task (1) == null_ptid));
287 }
288
289 /* Return the ID of the thread that is currently running.
290 Return 0 if the ID could not be determined. */
291
292 static CORE_ADDR
293 get_running_thread_id (int cpu)
294 {
295 struct bound_minimal_symbol object_msym = get_running_thread_msymbol ();
296 int object_size;
297 int buf_size;
298 gdb_byte *buf;
299 CORE_ADDR object_addr;
300 struct type *builtin_type_void_data_ptr
301 = builtin_type (target_gdbarch ())->builtin_data_ptr;
302
303 if (!object_msym.minsym)
304 return 0;
305
306 object_size = TYPE_LENGTH (builtin_type_void_data_ptr);
307 object_addr = (BMSYMBOL_VALUE_ADDRESS (object_msym)
308 + (cpu - 1) * object_size);
309 buf_size = object_size;
310 buf = (gdb_byte *) alloca (buf_size);
311 read_memory (object_addr, buf, buf_size);
312 return extract_typed_address (buf, builtin_type_void_data_ptr);
313 }
314
315 void
316 ravenscar_thread_target::resume (ptid_t ptid, int step,
317 enum gdb_signal siggnal)
318 {
319 inferior_ptid = base_ptid;
320 beneath ()->resume (base_ptid, step, siggnal);
321 }
322
323 ptid_t
324 ravenscar_thread_target::wait (ptid_t ptid,
325 struct target_waitstatus *status,
326 int options)
327 {
328 ptid_t event_ptid;
329
330 inferior_ptid = base_ptid;
331 event_ptid = beneath ()->wait (base_ptid, status, 0);
332 /* Find any new threads that might have been created, and update
333 inferior_ptid to the active thread.
334
335 Only do it if the program is still alive, though. Otherwise,
336 this causes problems when debugging through the remote protocol,
337 because we might try switching threads (and thus sending packets)
338 after the remote has disconnected. */
339 if (status->kind != TARGET_WAITKIND_EXITED
340 && status->kind != TARGET_WAITKIND_SIGNALLED)
341 {
342 inferior_ptid = event_ptid;
343 this->update_thread_list ();
344 ravenscar_update_inferior_ptid ();
345 }
346 return inferior_ptid;
347 }
348
349 /* Add the thread associated to the given TASK to the thread list
350 (if the thread has already been added, this is a no-op). */
351
352 static void
353 ravenscar_add_thread (struct ada_task_info *task)
354 {
355 if (find_thread_ptid (task->ptid) == NULL)
356 add_thread (task->ptid);
357 }
358
359 void
360 ravenscar_thread_target::update_thread_list ()
361 {
362 ada_build_task_list ();
363
364 /* Do not clear the thread list before adding the Ada task, to keep
365 the thread that the process stratum has included into it
366 (base_ptid) and the running thread, that may not have been included
367 to system.tasking.debug's list yet. */
368
369 iterate_over_live_ada_tasks (ravenscar_add_thread);
370 }
371
372 static ptid_t
373 ravenscar_active_task (int cpu)
374 {
375 CORE_ADDR tid = get_running_thread_id (cpu);
376
377 if (tid == 0)
378 return null_ptid;
379 else
380 return ptid_t (base_ptid.pid (), 0, tid);
381 }
382
383 const char *
384 ravenscar_thread_target::extra_thread_info (thread_info *tp)
385 {
386 return "Ravenscar task";
387 }
388
389 bool
390 ravenscar_thread_target::thread_alive (ptid_t ptid)
391 {
392 /* Ravenscar tasks are non-terminating. */
393 return true;
394 }
395
396 const char *
397 ravenscar_thread_target::pid_to_str (ptid_t ptid)
398 {
399 static char buf[30];
400
401 snprintf (buf, sizeof (buf), "Thread %#x", (int) ptid.tid ());
402 return buf;
403 }
404
405 void
406 ravenscar_thread_target::fetch_registers (struct regcache *regcache, int regnum)
407 {
408 ptid_t ptid = regcache->ptid ();
409
410 if (ravenscar_runtime_initialized ()
411 && is_ravenscar_task (ptid)
412 && !ravenscar_task_is_currently_active (ptid))
413 {
414 struct gdbarch *gdbarch = regcache->arch ();
415 struct ravenscar_arch_ops *arch_ops
416 = gdbarch_ravenscar_ops (gdbarch);
417
418 arch_ops->fetch_registers (regcache, regnum);
419 }
420 else
421 beneath ()->fetch_registers (regcache, regnum);
422 }
423
424 void
425 ravenscar_thread_target::store_registers (struct regcache *regcache,
426 int regnum)
427 {
428 ptid_t ptid = regcache->ptid ();
429
430 if (ravenscar_runtime_initialized ()
431 && is_ravenscar_task (ptid)
432 && !ravenscar_task_is_currently_active (ptid))
433 {
434 struct gdbarch *gdbarch = regcache->arch ();
435 struct ravenscar_arch_ops *arch_ops
436 = gdbarch_ravenscar_ops (gdbarch);
437
438 arch_ops->store_registers (regcache, regnum);
439 }
440 else
441 beneath ()->store_registers (regcache, regnum);
442 }
443
444 void
445 ravenscar_thread_target::prepare_to_store (struct regcache *regcache)
446 {
447 ptid_t ptid = regcache->ptid ();
448
449 if (ravenscar_runtime_initialized ()
450 && is_ravenscar_task (ptid)
451 && !ravenscar_task_is_currently_active (ptid))
452 {
453 /* Nothing. */
454 }
455 else
456 beneath ()->prepare_to_store (regcache);
457 }
458
459 /* Implement the to_stopped_by_sw_breakpoint target_ops "method". */
460
461 bool
462 ravenscar_thread_target::stopped_by_sw_breakpoint ()
463 {
464 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
465 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
466 return beneath ()->stopped_by_sw_breakpoint ();
467 }
468
469 /* Implement the to_stopped_by_hw_breakpoint target_ops "method". */
470
471 bool
472 ravenscar_thread_target::stopped_by_hw_breakpoint ()
473 {
474 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
475 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
476 return beneath ()->stopped_by_hw_breakpoint ();
477 }
478
479 /* Implement the to_stopped_by_watchpoint target_ops "method". */
480
481 bool
482 ravenscar_thread_target::stopped_by_watchpoint ()
483 {
484 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
485 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
486 return beneath ()->stopped_by_watchpoint ();
487 }
488
489 /* Implement the to_stopped_data_address target_ops "method". */
490
491 bool
492 ravenscar_thread_target::stopped_data_address (CORE_ADDR *addr_p)
493 {
494 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
495 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
496 return beneath ()->stopped_data_address (addr_p);
497 }
498
499 void
500 ravenscar_thread_target::mourn_inferior ()
501 {
502 base_ptid = null_ptid;
503 beneath ()->mourn_inferior ();
504 unpush_target (&ravenscar_ops);
505 }
506
507 /* Implement the to_core_of_thread target_ops "method". */
508
509 int
510 ravenscar_thread_target::core_of_thread (ptid_t ptid)
511 {
512 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
513 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
514 return beneath ()->core_of_thread (inferior_ptid);
515 }
516
517 /* Observer on inferior_created: push ravenscar thread stratum if needed. */
518
519 static void
520 ravenscar_inferior_created (struct target_ops *target, int from_tty)
521 {
522 const char *err_msg;
523
524 if (!ravenscar_task_support
525 || gdbarch_ravenscar_ops (target_gdbarch ()) == NULL
526 || !has_ravenscar_runtime ())
527 return;
528
529 err_msg = ada_get_tcb_types_info ();
530 if (err_msg != NULL)
531 {
532 warning (_("%s. Task/thread support disabled."), err_msg);
533 return;
534 }
535
536 ravenscar_update_inferior_ptid ();
537 push_target (&ravenscar_ops);
538 }
539
540 ptid_t
541 ravenscar_thread_target::get_ada_task_ptid (long lwp, long thread)
542 {
543 return ptid_t (base_ptid.pid (), 0, thread);
544 }
545
546 /* Command-list for the "set/show ravenscar" prefix command. */
547 static struct cmd_list_element *set_ravenscar_list;
548 static struct cmd_list_element *show_ravenscar_list;
549
550 /* Implement the "set ravenscar" prefix command. */
551
552 static void
553 set_ravenscar_command (const char *arg, int from_tty)
554 {
555 printf_unfiltered (_(\
556 "\"set ravenscar\" must be followed by the name of a setting.\n"));
557 help_list (set_ravenscar_list, "set ravenscar ", all_commands, gdb_stdout);
558 }
559
560 /* Implement the "show ravenscar" prefix command. */
561
562 static void
563 show_ravenscar_command (const char *args, int from_tty)
564 {
565 cmd_show_list (show_ravenscar_list, from_tty, "");
566 }
567
568 /* Implement the "show ravenscar task-switching" command. */
569
570 static void
571 show_ravenscar_task_switching_command (struct ui_file *file, int from_tty,
572 struct cmd_list_element *c,
573 const char *value)
574 {
575 if (ravenscar_task_support)
576 fprintf_filtered (file, _("\
577 Support for Ravenscar task/thread switching is enabled\n"));
578 else
579 fprintf_filtered (file, _("\
580 Support for Ravenscar task/thread switching is disabled\n"));
581 }
582
583 /* Module startup initialization function, automagically called by
584 init.c. */
585
586 void
587 _initialize_ravenscar (void)
588 {
589 base_ptid = null_ptid;
590
591 /* Notice when the inferior is created in order to push the
592 ravenscar ops if needed. */
593 gdb::observers::inferior_created.attach (ravenscar_inferior_created);
594
595 add_prefix_cmd ("ravenscar", no_class, set_ravenscar_command,
596 _("Prefix command for changing Ravenscar-specific settings"),
597 &set_ravenscar_list, "set ravenscar ", 0, &setlist);
598
599 add_prefix_cmd ("ravenscar", no_class, show_ravenscar_command,
600 _("Prefix command for showing Ravenscar-specific settings"),
601 &show_ravenscar_list, "show ravenscar ", 0, &showlist);
602
603 add_setshow_boolean_cmd ("task-switching", class_obscure,
604 &ravenscar_task_support, _("\
605 Enable or disable support for GNAT Ravenscar tasks"), _("\
606 Show whether support for GNAT Ravenscar tasks is enabled"),
607 _("\
608 Enable or disable support for task/thread switching with the GNAT\n\
609 Ravenscar run-time library for bareboard configuration."),
610 NULL, show_ravenscar_task_switching_command,
611 &set_ravenscar_list, &show_ravenscar_list);
612 }
This page took 0.041937 seconds and 4 git commands to generate.