X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=gdbserver%2Flinux-low.cc;h=bde6c767e87e4e8b1d7f4f702a21610429e52df4;hb=6fde587ff78a54b9e3bd70259de60cc5d7d8ced7;hp=546ca731d8f2271329191579382dc8e3a3c8f071;hpb=aa8d21c9bb43baaa35f456a3d371942a26cdce4e;p=deliverable%2Fbinutils-gdb.git diff --git a/gdbserver/linux-low.cc b/gdbserver/linux-low.cc index 546ca731d8..bde6c767e8 100644 --- a/gdbserver/linux-low.cc +++ b/gdbserver/linux-low.cc @@ -86,11 +86,6 @@ #define PT_TEXT_ADDR 49*4 #define PT_DATA_ADDR 50*4 #define PT_TEXT_END_ADDR 51*4 -/* BFIN already defines these since at least 2.6.32 kernels. */ -#elif defined(BFIN) -#define PT_TEXT_ADDR 220 -#define PT_TEXT_END_ADDR 224 -#define PT_DATA_ADDR 228 /* These are still undefined in 3.10 kernels. */ #elif defined(__TMS320C6X__) #define PT_TEXT_ADDR (0x10000*4) @@ -267,60 +262,47 @@ int using_threads = 1; jump pads). */ static int stabilizing_threads; -static void linux_resume_one_lwp (struct lwp_info *lwp, - int step, int signal, siginfo_t *info); static void unsuspend_all_lwps (struct lwp_info *except); -static struct lwp_info *add_lwp (ptid_t ptid); static void mark_lwp_dead (struct lwp_info *lwp, int wstat); static int lwp_is_marked_dead (struct lwp_info *lwp); -static int finish_step_over (struct lwp_info *lwp); static int kill_lwp (unsigned long lwpid, int signo); static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info); static int linux_low_ptrace_options (int attached); static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp); -static void proceed_one_lwp (thread_info *thread, lwp_info *except); /* When the event-loop is doing a step-over, this points at the thread being stepped. */ ptid_t step_over_bkpt; -/* True if the low target can hardware single-step. */ - -static int -can_hardware_single_step (void) +bool +linux_process_target::low_supports_breakpoints () { - if (the_low_target.supports_hardware_single_step != NULL) - return the_low_target.supports_hardware_single_step (); - else - return 0; + return false; } -/* True if the low target can software single-step. Such targets - implement the GET_NEXT_PCS callback. */ - -static int -can_software_single_step (void) +CORE_ADDR +linux_process_target::low_get_pc (regcache *regcache) { - return (the_low_target.get_next_pcs != NULL); + return 0; } -/* True if the low target supports memory breakpoints. If so, we'll - have a GET_PC implementation. */ - -static int -supports_breakpoints (void) +void +linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc) { - return (the_low_target.get_pc != NULL); + gdb_assert_not_reached ("linux target op low_set_pc is not implemented"); } -/* Returns true if this target can support fast tracepoints. This - does not mean that the in-process agent has been loaded in the - inferior. */ +std::vector +linux_process_target::low_get_next_pcs (regcache *regcache) +{ + gdb_assert_not_reached ("linux target op low_get_next_pcs is not " + "implemented"); +} -static int -supports_fast_tracepoints (void) +int +linux_process_target::low_decr_pc_after_break () { - return the_low_target.install_fast_tracepoint_jump_pad != NULL; + return 0; } /* True if LWP is stopped in its stepping range. */ @@ -403,8 +385,8 @@ linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine) return elf_64_file_p (file, machine); } -static void -delete_lwp (struct lwp_info *lwp) +void +linux_process_target::delete_lwp (lwp_info *lwp) { struct thread_info *thr = get_lwp_thread (lwp); @@ -413,32 +395,51 @@ delete_lwp (struct lwp_info *lwp) remove_thread (thr); - if (the_low_target.delete_thread != NULL) - the_low_target.delete_thread (lwp->arch_private); - else - gdb_assert (lwp->arch_private == NULL); + low_delete_thread (lwp->arch_private); free (lwp); } -/* Add a process to the common process list, and set its private - data. */ +void +linux_process_target::low_delete_thread (arch_lwp_info *info) +{ + /* Default implementation should be overridden if architecture-specific + info is being used. */ + gdb_assert (info == nullptr); +} -static struct process_info * -linux_add_process (int pid, int attached) +process_info * +linux_process_target::add_linux_process (int pid, int attached) { struct process_info *proc; proc = add_process (pid, attached); proc->priv = XCNEW (struct process_info_private); - if (the_low_target.new_process != NULL) - proc->priv->arch_private = the_low_target.new_process (); + proc->priv->arch_private = low_new_process (); return proc; } -static CORE_ADDR get_pc (struct lwp_info *lwp); +arch_process_info * +linux_process_target::low_new_process () +{ + return nullptr; +} + +void +linux_process_target::low_delete_process (arch_process_info *info) +{ + /* Default implementation must be overridden if architecture-specific + info exists. */ + gdb_assert (info == nullptr); +} + +void +linux_process_target::low_new_fork (process_info *parent, process_info *child) +{ + /* Nop. */ +} void linux_process_target::arch_setup_thread (thread_info *thread) @@ -520,7 +521,7 @@ linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp, will be detached, since we will need the process object and the breakpoints to remove any breakpoints from memory when we detach, and the client side will access registers. */ - child_proc = linux_add_process (new_pid, 0); + child_proc = add_linux_process (new_pid, 0); gdb_assert (child_proc != NULL); child_lwp = add_lwp (ptid); gdb_assert (child_lwp != NULL); @@ -547,7 +548,7 @@ linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp, child_proc->attached = parent_proc->attached; if (event_lwp->bp_reinsert != 0 - && can_software_single_step () + && supports_software_single_step () && event == PTRACE_EVENT_VFORK) { /* If we leave single-step breakpoints there, child will @@ -564,8 +565,7 @@ linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp, child_proc->tdesc = tdesc; /* Clone arch-specific process data. */ - if (the_low_target.new_fork != NULL) - the_low_target.new_fork (parent_proc, child_proc); + low_new_fork (parent_proc, child_proc); /* Save fork info in the parent thread. */ if (event == PTRACE_EVENT_FORK) @@ -592,7 +592,7 @@ linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp, In case of vfork, we'll reinsert them back once vforked child is done. */ if (event_lwp->bp_reinsert != 0 - && can_software_single_step ()) + && supports_software_single_step ()) { /* The child process is forked and stopped, so it is safe to access its memory without stopping all other threads @@ -616,9 +616,9 @@ linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp, new_lwp = add_lwp (ptid); /* Either we're going to immediately resume the new thread - or leave it stopped. linux_resume_one_lwp is a nop if it + or leave it stopped. resume_one_lwp is a nop if it thinks the thread is currently running, so set this first - before calling linux_resume_one_lwp. */ + before calling resume_one_lwp. */ new_lwp->stopped = 1; /* If we're suspending all threads, leave this one suspended @@ -656,7 +656,7 @@ linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp, { event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE; - if (event_lwp->bp_reinsert != 0 && can_software_single_step ()) + if (event_lwp->bp_reinsert != 0 && supports_software_single_step ()) { reinsert_single_step_breakpoints (event_thr); @@ -692,7 +692,7 @@ linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp, current_thread = NULL; /* Create a new process/lwp/thread. */ - proc = linux_add_process (event_pid, 0); + proc = add_linux_process (event_pid, 0); event_lwp = add_lwp (event_ptid); event_thr = get_lwp_thread (event_lwp); gdb_assert (current_thread == event_thr); @@ -726,24 +726,21 @@ linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp, internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event); } -/* Return the PC as read from the regcache of LWP, without any - adjustment. */ - -static CORE_ADDR -get_pc (struct lwp_info *lwp) +CORE_ADDR +linux_process_target::get_pc (lwp_info *lwp) { struct thread_info *saved_thread; struct regcache *regcache; CORE_ADDR pc; - if (the_low_target.get_pc == NULL) + if (!low_supports_breakpoints ()) return 0; saved_thread = current_thread; current_thread = get_lwp_thread (lwp); regcache = get_thread_regcache (current_thread, 1); - pc = (*the_low_target.get_pc) (regcache); + pc = low_get_pc (regcache); if (debug_threads) debug_printf ("pc is 0x%lx\n", (long) pc); @@ -752,28 +749,17 @@ get_pc (struct lwp_info *lwp) return pc; } -/* This function should only be called if LWP got a SYSCALL_SIGTRAP. - Fill *SYSNO with the syscall nr trapped. */ - -static void -get_syscall_trapinfo (struct lwp_info *lwp, int *sysno) +void +linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno) { struct thread_info *saved_thread; struct regcache *regcache; - if (the_low_target.get_syscall_trapinfo == NULL) - { - /* If we cannot get the syscall trapinfo, report an unknown - system call number. */ - *sysno = UNKNOWN_SYSCALL; - return; - } - saved_thread = current_thread; current_thread = get_lwp_thread (lwp); regcache = get_thread_regcache (current_thread, 1); - (*the_low_target.get_syscall_trapinfo) (regcache, sysno); + low_get_syscall_trapinfo (regcache, sysno); if (debug_threads) debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno); @@ -781,16 +767,15 @@ get_syscall_trapinfo (struct lwp_info *lwp, int *sysno) current_thread = saved_thread; } -static int check_stopped_by_watchpoint (struct lwp_info *child); - -/* Called when the LWP stopped for a signal/trap. If it stopped for a - trap check what caused it (breakpoint, watchpoint, trace, etc.), - and save the result in the LWP's stop_reason field. If it stopped - for a breakpoint, decrement the PC if necessary on the lwp's - architecture. Returns true if we now have the LWP's stop PC. */ +void +linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno) +{ + /* By default, report an unknown system call number. */ + *sysno = UNKNOWN_SYSCALL; +} -static int -save_stop_reason (struct lwp_info *lwp) +bool +linux_process_target::save_stop_reason (lwp_info *lwp) { CORE_ADDR pc; CORE_ADDR sw_breakpoint_pc; @@ -799,11 +784,11 @@ save_stop_reason (struct lwp_info *lwp) siginfo_t siginfo; #endif - if (the_low_target.get_pc == NULL) - return 0; + if (!low_supports_breakpoints ()) + return false; pc = get_pc (lwp); - sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break; + sw_breakpoint_pc = pc - low_decr_pc_after_break (); /* breakpoint_at reads from the current thread. */ saved_thread = current_thread; @@ -857,7 +842,7 @@ save_stop_reason (struct lwp_info *lwp) then the user inserts a breakpoint inside the range. In that case we need to report the breakpoint PC. */ if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc) - && (*the_low_target.breakpoint_at) (sw_breakpoint_pc)) + && low_breakpoint_at (sw_breakpoint_pc)) lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT; if (hardware_breakpoint_inserted_here (pc)) @@ -882,7 +867,7 @@ save_stop_reason (struct lwp_info *lwp) { struct regcache *regcache = get_thread_regcache (current_thread, 1); - (*the_low_target.set_pc) (regcache, sw_breakpoint_pc); + low_set_pc (regcache, sw_breakpoint_pc); } /* Update this so we record the correct stop PC below. */ @@ -921,11 +906,11 @@ save_stop_reason (struct lwp_info *lwp) lwp->stop_pc = pc; current_thread = saved_thread; - return 1; + return true; } -static struct lwp_info * -add_lwp (ptid_t ptid) +lwp_info * +linux_process_target::add_lwp (ptid_t ptid) { struct lwp_info *lwp; @@ -935,12 +920,17 @@ add_lwp (ptid_t ptid) lwp->thread = add_thread (ptid, lwp); - if (the_low_target.new_thread != NULL) - the_low_target.new_thread (lwp); + low_new_thread (lwp); return lwp; } +void +linux_process_target::low_new_thread (lwp_info *info) +{ + /* Nop. */ +} + /* Callback to be used when calling fork_inferior, responsible for actually initiating the tracing of the inferior. */ @@ -989,7 +979,7 @@ linux_process_target::create_inferior (const char *program, { maybe_disable_address_space_randomization restore_personality (cs.disable_randomization); - std::string str_program_args = stringify_argv (program_args); + std::string str_program_args = construct_inferior_arguments (program_args); pid = fork_inferior (program, str_program_args.c_str (), @@ -997,7 +987,7 @@ linux_process_target::create_inferior (const char *program, NULL, NULL, NULL, NULL); } - linux_add_process (pid, 0); + add_linux_process (pid, 0); ptid = ptid_t (pid, pid, 0); new_lwp = add_lwp (ptid); @@ -1027,11 +1017,8 @@ linux_process_target::post_create_inferior () } } -/* Attach to an inferior process. Returns 0 on success, ERRNO on - error. */ - int -linux_attach_lwp (ptid_t ptid) +linux_process_target::attach_lwp (ptid_t ptid) { struct lwp_info *new_lwp; int lwpid = ptid.lwp (); @@ -1128,7 +1115,7 @@ attach_proc_task_lwp_callback (ptid_t ptid) if (debug_threads) debug_printf ("Found new lwp %d\n", lwpid); - err = linux_attach_lwp (ptid); + err = the_linux_target->attach_lwp (ptid); /* Be quiet if we simply raced with the thread exiting. EPERM is returned if the thread's task still exists, and is marked @@ -1170,11 +1157,11 @@ linux_process_target::attach (unsigned long pid) ptid_t ptid = ptid_t (pid, pid, 0); int err; - proc = linux_add_process (pid, 1); + proc = add_linux_process (pid, 1); /* Attach to PID. We will check for other threads soon. */ - err = linux_attach_lwp (ptid); + err = attach_lwp (ptid); if (err != 0) { remove_process (proc); @@ -1482,10 +1469,8 @@ get_detach_signal (struct thread_info *thread) } } -/* Detach from LWP. */ - -static void -linux_detach_one_lwp (struct lwp_info *lwp) +void +linux_process_target::detach_one_lwp (lwp_info *lwp) { struct thread_info *thread = get_lwp_thread (lwp); int sig; @@ -1514,8 +1499,7 @@ linux_detach_one_lwp (struct lwp_info *lwp) regcache_invalidate_thread (thread); /* Finally, let it resume. */ - if (the_low_target.prepare_to_resume != NULL) - the_low_target.prepare_to_resume (lwp); + low_prepare_to_resume (lwp); } catch (const gdb_exception_error &ex) { @@ -1567,22 +1551,6 @@ linux_detach_one_lwp (struct lwp_info *lwp) delete_lwp (lwp); } -/* Callback for for_each_thread. Detaches from non-leader threads of a - given process. */ - -static void -linux_detach_lwp_callback (thread_info *thread) -{ - /* We don't actually detach from the thread group leader just yet. - If the thread group exits, we must reap the zombie clone lwps - before we're able to reap the leader. */ - if (thread->id.pid () == thread->id.lwp ()) - return; - - lwp_info *lwp = get_thread_lwp (thread); - linux_detach_one_lwp (lwp); -} - int linux_process_target::detach (process_info *process) { @@ -1609,10 +1577,20 @@ linux_process_target::detach (process_info *process) /* Detach from the clone lwps first. If the thread group exits just while we're detaching, we must reap the clone lwps before we're able to reap the leader. */ - for_each_thread (process->pid, linux_detach_lwp_callback); + for_each_thread (process->pid, [this] (thread_info *thread) + { + /* We don't actually detach from the thread group leader just yet. + If the thread group exits, we must reap the zombie clone lwps + before we're able to reap the leader. */ + if (thread->id.pid () == thread->id.lwp ()) + return; + + lwp_info *lwp = get_thread_lwp (thread); + detach_one_lwp (lwp); + }); main_lwp = find_lwp_pid (ptid_t (process->pid)); - linux_detach_one_lwp (main_lwp); + detach_one_lwp (main_lwp); mourn (process); @@ -1633,17 +1611,14 @@ linux_process_target::mourn (process_info *process) thread_db_mourn (process); #endif - for_each_thread (process->pid, [] (thread_info *thread) + for_each_thread (process->pid, [this] (thread_info *thread) { delete_lwp (get_thread_lwp (thread)); }); /* Freeing all private data. */ priv = process->priv; - if (the_low_target.delete_process != NULL) - the_low_target.delete_process (priv->arch_private); - else - gdb_assert (priv->arch_private == NULL); + low_delete_process (priv->arch_private); free (priv); process->priv = NULL; @@ -1678,12 +1653,8 @@ linux_process_target::thread_alive (ptid_t ptid) return 0; } -/* Return 1 if this lwp still has an interesting status pending. If - not (e.g., it had stopped for a breakpoint that is gone), return - false. */ - -static int -thread_still_has_status_pending_p (struct thread_info *thread) +bool +linux_process_target::thread_still_has_status_pending (thread_info *thread) { struct lwp_info *lp = get_thread_lwp (thread); @@ -1715,7 +1686,7 @@ thread_still_has_status_pending_p (struct thread_info *thread) #if !USE_SIGTRAP_SIGINFO else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT - && !(*the_low_target.breakpoint_at) (pc)) + && !low_breakpoint_at (pc)) { if (debug_threads) debug_printf ("previous SW breakpoint of %ld gone\n", @@ -1766,9 +1737,9 @@ lwp_resumed (struct lwp_info *lwp) return 0; } -/* Return true if this lwp has an interesting status pending. */ -static bool -status_pending_p_callback (thread_info *thread, ptid_t ptid) +bool +linux_process_target::status_pending_p_callback (thread_info *thread, + ptid_t ptid) { struct lwp_info *lp = get_thread_lwp (thread); @@ -1781,9 +1752,9 @@ status_pending_p_callback (thread_info *thread, ptid_t ptid) return 0; if (lp->status_pending_p - && !thread_still_has_status_pending_p (thread)) + && !thread_still_has_status_pending (thread)) { - linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL); + resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL); return 0; } @@ -1839,13 +1810,10 @@ iterate_over_lwps (ptid_t filter, return get_thread_lwp (thread); } -/* Detect zombie thread group leaders, and "exit" them. We can't reap - their exits until all other threads in the group have exited. */ - -static void -check_zombie_leaders (void) +void +linux_process_target::check_zombie_leaders () { - for_each_process ([] (process_info *proc) { + for_each_process ([this] (process_info *proc) { pid_t leader_pid = pid_of (proc); struct lwp_info *leader_lp; @@ -1998,29 +1966,29 @@ handle_tracepoints (struct lwp_info *lwp) return 0; } -/* Convenience wrapper. Returns information about LWP's fast tracepoint - collection status. */ - -static fast_tpoint_collect_result -linux_fast_tracepoint_collecting (struct lwp_info *lwp, - struct fast_tpoint_collect_status *status) +fast_tpoint_collect_result +linux_process_target::linux_fast_tracepoint_collecting + (lwp_info *lwp, fast_tpoint_collect_status *status) { CORE_ADDR thread_area; struct thread_info *thread = get_lwp_thread (lwp); - if (the_low_target.get_thread_area == NULL) - return fast_tpoint_collect_result::not_collecting; - /* Get the thread area address. This is used to recognize which thread is which when tracing with the in-process agent library. We don't read anything from the address, and treat it as opaque; it's the address itself that we assume is unique per-thread. */ - if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1) + if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1) return fast_tpoint_collect_result::not_collecting; return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status); } +int +linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp) +{ + return -1; +} + bool linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat) { @@ -2110,7 +2078,7 @@ linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat) } regcache = get_thread_regcache (current_thread, 1); - (*the_low_target.set_pc) (regcache, status.tpoint_addr); + low_set_pc (regcache, status.tpoint_addr); lwp->stop_pc = status.tpoint_addr; /* Cancel any fast tracepoint lock this thread was @@ -2250,46 +2218,33 @@ dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat) return 0; } -/* Fetch the possibly triggered data watchpoint info and store it in - CHILD. - - On some archs, like x86, that use debug registers to set - watchpoints, it's possible that the way to know which watched - address trapped, is to check the register that is used to select - which address to watch. Problem is, between setting the watchpoint - and reading back which data address trapped, the user may change - the set of watchpoints, and, as a consequence, GDB changes the - debug registers in the inferior. To avoid reading back a stale - stopped-data-address when that happens, we cache in LP the fact - that a watchpoint trapped, and the corresponding data address, as - soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug - registers meanwhile, we have the cached data we can rely on. */ - -static int -check_stopped_by_watchpoint (struct lwp_info *child) +bool +linux_process_target::check_stopped_by_watchpoint (lwp_info *child) { - if (the_low_target.stopped_by_watchpoint != NULL) - { - struct thread_info *saved_thread; + struct thread_info *saved_thread = current_thread; + current_thread = get_lwp_thread (child); - saved_thread = current_thread; - current_thread = get_lwp_thread (child); + if (low_stopped_by_watchpoint ()) + { + child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT; + child->stopped_data_address = low_stopped_data_address (); + } - if (the_low_target.stopped_by_watchpoint ()) - { - child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT; + current_thread = saved_thread; - if (the_low_target.stopped_data_address != NULL) - child->stopped_data_address - = the_low_target.stopped_data_address (); - else - child->stopped_data_address = 0; - } + return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT; +} - current_thread = saved_thread; - } +bool +linux_process_target::low_stopped_by_watchpoint () +{ + return false; +} - return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT; +CORE_ADDR +linux_process_target::low_stopped_data_address () +{ + return 0; } /* Return the ptrace options that we want to try to enable. */ @@ -2517,7 +2472,7 @@ linux_process_target::filter_event (int lwpid, int wstat) child->stepping ? "step" : "continue", target_pid_to_str (ptid_of (thread))); - linux_resume_one_lwp (child, child->stepping, 0, NULL); + resume_one_lwp (child, child->stepping, 0, NULL); return NULL; } } @@ -2527,27 +2482,22 @@ linux_process_target::filter_event (int lwpid, int wstat) return child; } -/* Return true if THREAD is doing hardware single step. */ - -static int -maybe_hw_step (struct thread_info *thread) +bool +linux_process_target::maybe_hw_step (thread_info *thread) { - if (can_hardware_single_step ()) - return 1; + if (supports_hardware_single_step ()) + return true; else { /* GDBserver must insert single-step breakpoint for software single step. */ gdb_assert (has_single_step_breakpoints (thread)); - return 0; + return false; } } -/* Resume LWPs that are currently stopped without any pending status - to report, but are resumed from the core's perspective. */ - -static void -resume_stopped_resumed_lwps (thread_info *thread) +void +linux_process_target::resume_stopped_resumed_lwps (thread_info *thread) { struct lwp_info *lp = get_thread_lwp (thread); @@ -2567,7 +2517,7 @@ resume_stopped_resumed_lwps (thread_info *thread) paddress (lp->stop_pc), step); - linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL); + resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL); } } @@ -2614,7 +2564,7 @@ linux_process_target::wait_for_event_filtered (ptid_t wait_ptid, &requested_child->status_pending); requested_child->status_pending_p = 0; requested_child->status_pending = 0; - linux_resume_one_lwp (requested_child, 0, 0, NULL); + resume_one_lwp (requested_child, 0, 0, NULL); } if (requested_child->suspended @@ -2702,7 +2652,10 @@ linux_process_target::wait_for_event_filtered (ptid_t wait_ptid, /* Now that we've pulled all events out of the kernel, resume LWPs that don't have an interesting event to report. */ if (stopping_threads == NOT_STOPPING_THREADS) - for_each_thread (resume_stopped_resumed_lwps); + for_each_thread ([this] (thread_info *thread) + { + resume_stopped_resumed_lwps (thread); + }); /* ... and find an LWP with a status to report to the core, if any. */ @@ -2848,7 +2801,6 @@ unsuspend_all_lwps (struct lwp_info *except) }); } -static bool stuck_in_jump_pad_callback (thread_info *thread); static bool lwp_running (thread_info *thread); /* Stabilize threads (move out of jump pads). @@ -2884,7 +2836,10 @@ static bool lwp_running (thread_info *thread); void linux_process_target::stabilize_threads () { - thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback); + thread_info *thread_stuck = find_thread ([this] (thread_info *thread) + { + return stuck_in_jump_pad (thread); + }); if (thread_stuck != NULL) { @@ -2940,7 +2895,10 @@ linux_process_target::stabilize_threads () if (debug_threads) { - thread_stuck = find_thread (stuck_in_jump_pad_callback); + thread_stuck = find_thread ([this] (thread_info *thread) + { + return stuck_in_jump_pad (thread); + }); if (thread_stuck != NULL) debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n", @@ -2963,14 +2921,9 @@ ignore_event (struct target_waitstatus *ourstatus) return null_ptid; } -/* Convenience function that is called when the kernel reports an exit - event. This decides whether to report the event to GDB as a - process exit event, a thread exit event, or to suppress the - event. */ - -static ptid_t -filter_exit_event (struct lwp_info *event_child, - struct target_waitstatus *ourstatus) +ptid_t +linux_process_target::filter_exit_event (lwp_info *event_child, + target_waitstatus *ourstatus) { client_state &cs = get_client_state (); struct thread_info *thread = get_lwp_thread (event_child); @@ -2999,29 +2952,26 @@ gdb_catching_syscalls_p (struct lwp_info *event_child) return !proc->syscalls_to_catch.empty (); } -/* Returns 1 if GDB is interested in the event_child syscall. - Only to be called when stopped reason is SYSCALL_SIGTRAP. */ - -static int -gdb_catch_this_syscall_p (struct lwp_info *event_child) +bool +linux_process_target::gdb_catch_this_syscall (lwp_info *event_child) { int sysno; struct thread_info *thread = get_lwp_thread (event_child); struct process_info *proc = get_thread_process (thread); if (proc->syscalls_to_catch.empty ()) - return 0; + return false; if (proc->syscalls_to_catch[0] == ANY_SYSCALL) - return 1; + return true; get_syscall_trapinfo (event_child, &sysno); for (int iter : proc->syscalls_to_catch) if (iter == sysno) - return 1; + return true; - return 0; + return false; } ptid_t @@ -3188,9 +3138,9 @@ linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus, = get_thread_regcache (current_thread, 1); event_child->stop_pc += increment_pc; - (*the_low_target.set_pc) (regcache, event_child->stop_pc); + low_set_pc (regcache, event_child->stop_pc); - if (!(*the_low_target.breakpoint_at) (event_child->stop_pc)) + if (!low_breakpoint_at (event_child->stop_pc)) event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON; } } @@ -3201,11 +3151,11 @@ linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus, not support internal breakpoints at all, we also report the SIGTRAP without further processing; it's of no concern to us. */ maybe_internal_trap - = (supports_breakpoints () + = (low_supports_breakpoints () && (WSTOPSIG (w) == SIGTRAP || ((WSTOPSIG (w) == SIGILL || WSTOPSIG (w) == SIGSEGV) - && (*the_low_target.breakpoint_at) (event_child->stop_pc)))); + && low_breakpoint_at (event_child->stop_pc)))); if (maybe_internal_trap) { @@ -3272,7 +3222,7 @@ linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus, debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n", WSTOPSIG (w), lwpid_of (current_thread)); - linux_resume_one_lwp (event_child, 0, 0, NULL); + resume_one_lwp (event_child, 0, 0, NULL); if (debug_threads) debug_exit (); @@ -3364,7 +3314,7 @@ linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus, /* Check if GDB is interested in this syscall. */ if (WIFSTOPPED (w) && WSTOPSIG (w) == SYSCALL_SIGTRAP - && !gdb_catch_this_syscall_p (event_child)) + && !gdb_catch_this_syscall (event_child)) { if (debug_threads) { @@ -3372,8 +3322,7 @@ linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus, lwpid_of (current_thread)); } - linux_resume_one_lwp (event_child, event_child->stepping, - 0, NULL); + resume_one_lwp (event_child, event_child->stepping, 0, NULL); if (debug_threads) debug_exit (); @@ -3429,8 +3378,8 @@ linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus, } else { - linux_resume_one_lwp (event_child, event_child->stepping, - WSTOPSIG (w), info_p); + resume_one_lwp (event_child, event_child->stepping, + WSTOPSIG (w), info_p); } if (debug_threads) @@ -3497,11 +3446,11 @@ linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus, decr_pc_after_break adjustment to the inferior's regcache ourselves. */ - if (the_low_target.set_pc != NULL) + if (low_supports_breakpoints ()) { struct regcache *regcache = get_thread_regcache (current_thread, 1); - (*the_low_target.set_pc) (regcache, event_child->stop_pc); + low_set_pc (regcache, event_child->stop_pc); } if (step_over_finished) @@ -3518,7 +3467,7 @@ linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus, /* Remove the single-step breakpoints if any. Note that there isn't single-step breakpoint if we finished stepping over. */ - if (can_software_single_step () + if (supports_software_single_step () && has_single_step_breakpoints (current_thread)) { stop_all_lwps (0, event_child); @@ -3565,7 +3514,7 @@ linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus, /* Alright, we're going to report a stop. */ /* Remove single-step breakpoints. */ - if (can_software_single_step ()) + if (supports_software_single_step ()) { /* Remove single-step breakpoints or not. It it is true, stop all lwps, so that other threads won't hit the breakpoint in the @@ -3706,13 +3655,13 @@ linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus, if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT && !cs.swbreak_feature) { - int decr_pc = the_low_target.decr_pc_after_break; + int decr_pc = low_decr_pc_after_break (); if (decr_pc != 0) { struct regcache *regcache = get_thread_regcache (current_thread, 1); - (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc); + low_set_pc (regcache, event_child->stop_pc + decr_pc); } } @@ -3969,13 +3918,8 @@ linux_process_target::wait_for_sigstop () } } -/* Returns true if THREAD is stopped in a jump pad, and we can't - move it out, because we need to report the stop event to GDB. For - example, if the user puts a breakpoint in the jump pad, it's - because she wants to debug it. */ - -static bool -stuck_in_jump_pad_callback (thread_info *thread) +bool +linux_process_target::stuck_in_jump_pad (thread_info *thread) { struct lwp_info *lwp = get_thread_lwp (thread); @@ -4039,7 +3983,7 @@ linux_process_target::move_out_of_jump_pad (thread_info *thread) WSTOPSIG (*wstat), lwpid_of (thread)); } - linux_resume_one_lwp (lwp, 0, 0, NULL); + resume_one_lwp (lwp, 0, 0, NULL); } else lwp_suspended_inc (lwp); @@ -4117,10 +4061,8 @@ enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info) lwp->pending_signals = p_sig; } -/* Install breakpoints for software single stepping. */ - -static void -install_software_single_step_breakpoints (struct lwp_info *lwp) +void +linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp) { struct thread_info *thread = get_lwp_thread (lwp); struct regcache *regcache = get_thread_regcache (thread, 1); @@ -4128,26 +4070,22 @@ install_software_single_step_breakpoints (struct lwp_info *lwp) scoped_restore save_current_thread = make_scoped_restore (¤t_thread); current_thread = thread; - std::vector next_pcs = the_low_target.get_next_pcs (regcache); + std::vector next_pcs = low_get_next_pcs (regcache); for (CORE_ADDR pc : next_pcs) set_single_step_breakpoint (pc, current_ptid); } -/* Single step via hardware or software single step. - Return 1 if hardware single stepping, 0 if software single stepping - or can't single step. */ - -static int -single_step (struct lwp_info* lwp) +int +linux_process_target::single_step (lwp_info* lwp) { int step = 0; - if (can_hardware_single_step ()) + if (supports_hardware_single_step ()) { step = 1; } - else if (can_software_single_step ()) + else if (supports_software_single_step ()) { install_software_single_step_breakpoints (lwp); step = 0; @@ -4174,12 +4112,9 @@ lwp_signal_can_be_delivered (struct lwp_info *lwp) == fast_tpoint_collect_result::not_collecting); } -/* Resume execution of LWP. If STEP is nonzero, single-step it. If - SIGNAL is nonzero, give it that signal. */ - -static void -linux_resume_one_lwp_throw (struct lwp_info *lwp, - int step, int signal, siginfo_t *info) +void +linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step, + int signal, siginfo_t *info) { struct thread_info *thread = get_lwp_thread (lwp); struct thread_info *saved_thread; @@ -4257,7 +4192,7 @@ linux_resume_one_lwp_throw (struct lwp_info *lwp, debug_printf (" pending reinsert at 0x%s\n", paddress (lwp->bp_reinsert)); - if (can_hardware_single_step ()) + if (supports_hardware_single_step ()) { if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting) { @@ -4286,7 +4221,7 @@ linux_resume_one_lwp_throw (struct lwp_info *lwp, " single-stepping\n", lwpid_of (thread)); - if (can_hardware_single_step ()) + if (supports_hardware_single_step ()) step = 1; else { @@ -4313,11 +4248,11 @@ linux_resume_one_lwp_throw (struct lwp_info *lwp, step = single_step (lwp); } - if (proc->tdesc != NULL && the_low_target.get_pc != NULL) + if (proc->tdesc != NULL && low_supports_breakpoints ()) { struct regcache *regcache = get_thread_regcache (current_thread, 1); - lwp->stop_pc = (*the_low_target.get_pc) (regcache); + lwp->stop_pc = low_get_pc (regcache); if (debug_threads) { @@ -4350,8 +4285,7 @@ linux_resume_one_lwp_throw (struct lwp_info *lwp, lwpid_of (thread), step ? "step" : "continue", signal, lwp->stop_expected ? "expected" : "not expected"); - if (the_low_target.prepare_to_resume != NULL) - the_low_target.prepare_to_resume (lwp); + low_prepare_to_resume (lwp); regcache_invalidate_thread (thread); errno = 0; @@ -4383,6 +4317,12 @@ linux_resume_one_lwp_throw (struct lwp_info *lwp, lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON; } +void +linux_process_target::low_prepare_to_resume (lwp_info *lwp) +{ + /* Nop. */ +} + /* Called when we try to resume a stopped LWP and that errors out. If the LWP is no longer in ptrace-stopped state (meaning it's zombie, or about to become), discard the error, clear any pending status @@ -4416,16 +4356,13 @@ check_ptrace_stopped_lwp_gone (struct lwp_info *lp) return 0; } -/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP - disappears while we try to resume it. */ - -static void -linux_resume_one_lwp (struct lwp_info *lwp, - int step, int signal, siginfo_t *info) +void +linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal, + siginfo_t *info) { try { - linux_resume_one_lwp_throw (lwp, step, signal, info); + resume_one_lwp_throw (lwp, step, signal, info); } catch (const gdb_exception_error &ex) { @@ -4546,11 +4483,8 @@ linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n) lwp->resume = NULL; } -/* find_thread callback for linux_resume. Return true if this lwp has an - interesting status pending. */ - -static bool -resume_status_pending_p (thread_info *thread) +bool +linux_process_target::resume_status_pending (thread_info *thread) { struct lwp_info *lwp = get_thread_lwp (thread); @@ -4559,16 +4493,11 @@ resume_status_pending_p (thread_info *thread) if (lwp->resume == NULL) return false; - return thread_still_has_status_pending_p (thread); + return thread_still_has_status_pending (thread); } -/* Return 1 if this lwp that GDB wants running is stopped at an - internal breakpoint that we need to step over. It assumes that any - required STOP_PC adjustment has already been propagated to the - inferior's regcache. */ - -static bool -need_step_over_p (thread_info *thread) +bool +linux_process_target::thread_needs_step_over (thread_info *thread) { struct lwp_info *lwp = get_thread_lwp (thread); struct thread_info *saved_thread; @@ -4640,7 +4569,7 @@ need_step_over_p (thread_info *thread) /* On software single step target, resume the inferior with signal rather than stepping over. */ - if (can_software_single_step () + if (supports_software_single_step () && lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp)) { @@ -4739,18 +4668,14 @@ linux_process_target::start_step_over (lwp_info *lwp) current_thread = saved_thread; - linux_resume_one_lwp (lwp, step, 0, NULL); + resume_one_lwp (lwp, step, 0, NULL); /* Require next event from this LWP. */ step_over_bkpt = thread->id; } -/* Finish a step-over. Reinsert the breakpoint we had uninserted in - start_step_over, if still there, and delete any single-step - breakpoints we've set, on non hardware single-step targets. */ - -static int -finish_step_over (struct lwp_info *lwp) +bool +linux_process_target::finish_step_over (lwp_info *lwp) { if (lwp->bp_reinsert != 0) { @@ -4773,7 +4698,7 @@ finish_step_over (struct lwp_info *lwp) and later not being able to explain it, because we were stepping over a breakpoint, and we hold all threads but LWP stopped while doing that. */ - if (!can_hardware_single_step ()) + if (!supports_hardware_single_step ()) { gdb_assert (has_single_step_breakpoints (current_thread)); delete_single_step_breakpoints (current_thread); @@ -4781,10 +4706,10 @@ finish_step_over (struct lwp_info *lwp) step_over_bkpt = null_ptid; current_thread = saved_thread; - return 1; + return true; } else - return 0; + return false; } void @@ -4814,21 +4739,9 @@ linux_process_target::complete_ongoing_step_over () } } -/* This function is called once per thread. We check the thread's resume - request, which will tell us whether to resume, step, or leave the thread - stopped; and what signal, if any, it should be sent. - - For threads which we aren't explicitly told otherwise, we preserve - the stepping flag; this is used for stepping over gdbserver-placed - breakpoints. - - If pending_flags was set in any thread, we queue any needed - signals, since we won't actually resume. We already have a pending - event to report, so we don't need to preserve any step requests; - they should be re-issued if necessary. */ - -static void -linux_resume_one_thread (thread_info *thread, bool leave_all_stopped) +void +linux_process_target::resume_one_thread (thread_info *thread, + bool leave_all_stopped) { struct lwp_info *lwp = get_thread_lwp (thread); int leave_pending; @@ -4955,7 +4868,10 @@ linux_process_target::resume (thread_resume *resume_info, size_t n) before considering to start a step-over (in all-stop). */ bool any_pending = false; if (!non_stop) - any_pending = find_thread (resume_status_pending_p) != NULL; + any_pending = find_thread ([this] (thread_info *thread) + { + return resume_status_pending (thread); + }) != nullptr; /* If there is a thread which would otherwise be resumed, which is stopped at a breakpoint that needs stepping over, then don't @@ -4963,8 +4879,11 @@ linux_process_target::resume (thread_resume *resume_info, size_t n) other threads stopped, then resume all threads again. Make sure to queue any signals that would otherwise be delivered or queued. */ - if (!any_pending && supports_breakpoints ()) - need_step_over = find_thread (need_step_over_p); + if (!any_pending && low_supports_breakpoints ()) + need_step_over = find_thread ([this] (thread_info *thread) + { + return thread_needs_step_over (thread); + }); bool leave_all_stopped = (need_step_over != NULL || any_pending); @@ -4983,7 +4902,7 @@ linux_process_target::resume (thread_resume *resume_info, size_t n) otherwise deliver. */ for_each_thread ([&] (thread_info *thread) { - linux_resume_one_thread (thread, leave_all_stopped); + resume_one_thread (thread, leave_all_stopped); }); if (need_step_over) @@ -5001,17 +4920,8 @@ linux_process_target::resume (thread_resume *resume_info, size_t n) async_file_mark (); } -/* This function is called once per thread. We check the thread's - last resume request, which will tell us whether to resume, step, or - leave the thread stopped. Any signal the client requested to be - delivered has already been enqueued at this point. - - If any thread that GDB wants running is stopped at an internal - breakpoint that needs stepping over, we start a step-over operation - on that particular thread, and leave all others stopped. */ - -static void -proceed_one_lwp (thread_info *thread, lwp_info *except) +void +linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except) { struct lwp_info *lwp = get_thread_lwp (thread); int step; @@ -5087,7 +4997,7 @@ proceed_one_lwp (thread_info *thread, lwp_info *except) /* If resume_step is requested by GDB, install single-step breakpoints when the thread is about to be actually resumed if the single-step breakpoints weren't removed. */ - if (can_software_single_step () + if (supports_software_single_step () && !has_single_step_breakpoints (thread)) install_software_single_step_breakpoints (lwp); @@ -5104,11 +5014,12 @@ proceed_one_lwp (thread_info *thread, lwp_info *except) else step = 0; - linux_resume_one_lwp (lwp, step, 0, NULL); + resume_one_lwp (lwp, step, 0, NULL); } -static void -unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except) +void +linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread, + lwp_info *except) { struct lwp_info *lwp = get_thread_lwp (thread); @@ -5130,9 +5041,12 @@ linux_process_target::proceed_all_lwps () resume any threads - have it step over the breakpoint with all other threads stopped, then resume all threads again. */ - if (supports_breakpoints ()) + if (low_supports_breakpoints ()) { - need_step_over = find_thread (need_step_over_p); + need_step_over = find_thread ([this] (thread_info *thread) + { + return thread_needs_step_over (thread); + }); if (need_step_over != NULL) { @@ -5149,7 +5063,7 @@ linux_process_target::proceed_all_lwps () if (debug_threads) debug_printf ("Proceeding, no step-over needed\n"); - for_each_thread ([] (thread_info *thread) + for_each_thread ([this] (thread_info *thread) { proceed_one_lwp (thread, NULL); }); @@ -5362,7 +5276,7 @@ regsets_store_inferior_registers (struct regsets_info *regsets_info, /* At this point, ESRCH should mean the process is already gone, in which case we simply ignore attempts to change its registers. See also the related - comment in linux_resume_one_lwp. */ + comment in resume_one_lwp. */ free (buf); return 0; } @@ -5418,10 +5332,10 @@ register_addr (const struct usrregs_info *usrregs, int regnum) return addr; } -/* Fetch one register. */ -static void -fetch_register (const struct usrregs_info *usrregs, - struct regcache *regcache, int regno) + +void +linux_process_target::fetch_register (const usrregs_info *usrregs, + regcache *regcache, int regno) { CORE_ADDR regaddr; int i, size; @@ -5430,7 +5344,7 @@ fetch_register (const struct usrregs_info *usrregs, if (regno >= usrregs->num_regs) return; - if ((*the_low_target.cannot_fetch_register) (regno)) + if (low_cannot_fetch_register (regno)) return; regaddr = register_addr (usrregs, regno); @@ -5460,16 +5374,12 @@ fetch_register (const struct usrregs_info *usrregs, } } - if (the_low_target.supply_ptrace_register) - the_low_target.supply_ptrace_register (regcache, regno, buf); - else - supply_register (regcache, regno, buf); + low_supply_ptrace_register (regcache, regno, buf); } -/* Store one register. */ -static void -store_register (const struct usrregs_info *usrregs, - struct regcache *regcache, int regno) +void +linux_process_target::store_register (const usrregs_info *usrregs, + regcache *regcache, int regno) { CORE_ADDR regaddr; int i, size; @@ -5478,7 +5388,7 @@ store_register (const struct usrregs_info *usrregs, if (regno >= usrregs->num_regs) return; - if ((*the_low_target.cannot_store_register) (regno)) + if (low_cannot_store_register (regno)) return; regaddr = register_addr (usrregs, regno); @@ -5491,10 +5401,7 @@ store_register (const struct usrregs_info *usrregs, buf = (char *) alloca (size); memset (buf, 0, size); - if (the_low_target.collect_ptrace_register) - the_low_target.collect_ptrace_register (regcache, regno, buf); - else - collect_register (regcache, regno, buf); + low_collect_ptrace_register (regcache, regno, buf); pid = lwpid_of (current_thread); for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE)) @@ -5510,26 +5417,39 @@ store_register (const struct usrregs_info *usrregs, /* At this point, ESRCH should mean the process is already gone, in which case we simply ignore attempts to change its registers. See also the related - comment in linux_resume_one_lwp. */ + comment in resume_one_lwp. */ if (errno == ESRCH) return; - if ((*the_low_target.cannot_store_register) (regno) == 0) + + if (!low_cannot_store_register (regno)) error ("writing register %d: %s", regno, safe_strerror (errno)); } regaddr += sizeof (PTRACE_XFER_TYPE); } } +#endif /* HAVE_LINUX_USRREGS */ -/* Fetch all registers, or just one, from the child process. - If REGNO is -1, do this for all registers, skipping any that are - assumed to have been retrieved by regsets_fetch_inferior_registers, - unless ALL is non-zero. - Otherwise, REGNO specifies which register (so we can save time). */ -static void -usr_fetch_inferior_registers (const struct regs_info *regs_info, - struct regcache *regcache, int regno, int all) +void +linux_process_target::low_collect_ptrace_register (regcache *regcache, + int regno, char *buf) { + collect_register (regcache, regno, buf); +} + +void +linux_process_target::low_supply_ptrace_register (regcache *regcache, + int regno, const char *buf) +{ + supply_register (regcache, regno, buf); +} + +void +linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info, + regcache *regcache, + int regno, int all) +{ +#ifdef HAVE_LINUX_USRREGS struct usrregs_info *usr = regs_info->usrregs; if (regno == -1) @@ -5540,17 +5460,15 @@ usr_fetch_inferior_registers (const struct regs_info *regs_info, } else fetch_register (usr, regcache, regno); +#endif } -/* Store our register values back into the inferior. - If REGNO is -1, do this for all registers, skipping any that are - assumed to have been saved by regsets_store_inferior_registers, - unless ALL is non-zero. - Otherwise, REGNO specifies which register (so we can save time). */ -static void -usr_store_inferior_registers (const struct regs_info *regs_info, - struct regcache *regcache, int regno, int all) +void +linux_process_target::usr_store_inferior_registers (const regs_info *regs_info, + regcache *regcache, + int regno, int all) { +#ifdef HAVE_LINUX_USRREGS struct usrregs_info *usr = regs_info->usrregs; if (regno == -1) @@ -5561,15 +5479,8 @@ usr_store_inferior_registers (const struct regs_info *regs_info, } else store_register (usr, regcache, regno); -} - -#else /* !HAVE_LINUX_USRREGS */ - -#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0) -#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0) - #endif - +} void linux_process_target::fetch_registers (regcache *regcache, int regno) @@ -5580,10 +5491,9 @@ linux_process_target::fetch_registers (regcache *regcache, int regno) if (regno == -1) { - if (the_low_target.fetch_register != NULL - && regs_info->usrregs != NULL) + if (regs_info->usrregs != NULL) for (regno = 0; regno < regs_info->usrregs->num_regs; regno++) - (*the_low_target.fetch_register) (regcache, regno); + low_fetch_register (regcache, regno); all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache); if (regs_info->usrregs != NULL) @@ -5591,8 +5501,7 @@ linux_process_target::fetch_registers (regcache *regcache, int regno) } else { - if (the_low_target.fetch_register != NULL - && (*the_low_target.fetch_register) (regcache, regno)) + if (low_fetch_register (regcache, regno)) return; use_regsets = linux_register_in_regsets (regs_info, regno); @@ -5629,6 +5538,11 @@ linux_process_target::store_registers (regcache *regcache, int regno) } } +bool +linux_process_target::low_fetch_register (regcache *regcache, int regno) +{ + return false; +} /* A wrapper for the read_memory target op. */ @@ -5874,28 +5788,22 @@ linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr, return n; } -/* These breakpoint and watchpoint related wrapper functions simply - pass on the function call if the target has registered a - corresponding function. */ - -bool -linux_process_target::supports_z_point_type (char z_type) -{ - return (the_low_target.supports_z_point_type != NULL - && the_low_target.supports_z_point_type (z_type)); -} - int linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr, int size, raw_breakpoint *bp) { if (type == raw_bkpt_type_sw) return insert_memory_breakpoint (bp); - else if (the_low_target.insert_point != NULL) - return the_low_target.insert_point (type, addr, size, bp); else - /* Unsupported (see target.h). */ - return 1; + return low_insert_point (type, addr, size, bp); +} + +int +linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr, + int size, raw_breakpoint *bp) +{ + /* Unsupported (see target.h). */ + return 1; } int @@ -5904,11 +5812,16 @@ linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr, { if (type == raw_bkpt_type_sw) return remove_memory_breakpoint (bp); - else if (the_low_target.remove_point != NULL) - return the_low_target.remove_point (type, addr, size, bp); else - /* Unsupported (see target.h). */ - return 1; + return low_remove_point (type, addr, size, bp); +} + +int +linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr, + int size, raw_breakpoint *bp) +{ + /* Unsupported (see target.h). */ + return 1; } /* Implement the stopped_by_sw_breakpoint target_ops @@ -5956,13 +5869,7 @@ linux_process_target::supports_stopped_by_hw_breakpoint () bool linux_process_target::supports_hardware_single_step () { - return can_hardware_single_step (); -} - -bool -linux_process_target::supports_software_single_step () -{ - return can_software_single_step (); + return true; } bool @@ -6075,16 +5982,11 @@ linux_process_target::qxfer_osdata (const char *annex, return linux_common_xfer_osdata (annex, readbuf, offset, len); } -/* Convert a native/host siginfo object, into/from the siginfo in the - layout of the inferiors' architecture. */ - -static void -siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction) +void +linux_process_target::siginfo_fixup (siginfo_t *siginfo, + gdb_byte *inf_siginfo, int direction) { - int done = 0; - - if (the_low_target.siginfo_fixup != NULL) - done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction); + bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction); /* If there was no callback, or the callback didn't do anything, then just do a straight memcpy. */ @@ -6097,6 +5999,13 @@ siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction) } } +bool +linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf, + int direction) +{ + return false; +} + bool linux_process_target::supports_qxfer_siginfo () { @@ -6348,12 +6257,16 @@ linux_process_target::supports_agent () bool linux_process_target::supports_range_stepping () { - if (can_software_single_step ()) + if (supports_software_single_step ()) return true; - if (*the_low_target.supports_range_stepping == NULL) - return false; - return (*the_low_target.supports_range_stepping) (); + return low_supports_range_stepping (); +} + +bool +linux_process_target::low_supports_range_stepping () +{ + return false; } bool @@ -6476,53 +6389,34 @@ linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset, } #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */ -void -linux_process_target::process_qsupported (char **features, int count) -{ - if (the_low_target.process_qsupported != NULL) - the_low_target.process_qsupported (features, count); -} - bool linux_process_target::supports_catch_syscall () { - return (the_low_target.get_syscall_trapinfo != NULL + return (low_supports_catch_syscall () && linux_supports_tracesysgood ()); } -int -linux_process_target::get_ipa_tdesc_idx () -{ - if (the_low_target.get_ipa_tdesc_idx == NULL) - return 0; - - return (*the_low_target.get_ipa_tdesc_idx) (); -} - bool -linux_process_target::supports_tracepoints () +linux_process_target::low_supports_catch_syscall () { - if (*the_low_target.supports_tracepoints == NULL) - return false; - - return (*the_low_target.supports_tracepoints) (); + return false; } CORE_ADDR linux_process_target::read_pc (regcache *regcache) { - if (the_low_target.get_pc == NULL) + if (!low_supports_breakpoints ()) return 0; - return (*the_low_target.get_pc) (regcache); + return low_get_pc (regcache); } void linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc) { - gdb_assert (the_low_target.set_pc != NULL); + gdb_assert (low_supports_breakpoints ()); - (*the_low_target.set_pc) (regcache, pc); + low_set_pc (regcache, pc); } bool @@ -6573,44 +6467,6 @@ linux_process_target::done_accessing_memory () target_unpause_all (true); } -bool -linux_process_target::supports_fast_tracepoints () -{ - return the_low_target.install_fast_tracepoint_jump_pad != nullptr; -} - -int -linux_process_target::install_fast_tracepoint_jump_pad - (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector, - CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry, - CORE_ADDR *trampoline, ULONGEST *trampoline_size, - unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size, - CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end, - char *err) -{ - return (*the_low_target.install_fast_tracepoint_jump_pad) - (tpoint, tpaddr, collector, lockaddr, orig_size, - jump_entry, trampoline, trampoline_size, - jjump_pad_insn, jjump_pad_insn_size, - adjusted_insn_addr, adjusted_insn_addr_end, - err); -} - -emit_ops * -linux_process_target::emit_ops () -{ - if (the_low_target.emit_ops != NULL) - return (*the_low_target.emit_ops) (); - else - return NULL; -} - -int -linux_process_target::get_min_fast_tracepoint_insn_len () -{ - return (*the_low_target.get_min_fast_tracepoint_insn_len) (); -} - /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */ static int @@ -7279,39 +7135,6 @@ current_lwp_ptid (void) return ptid_of (current_thread); } -/* Implementation of the target_ops method "breakpoint_kind_from_pc". */ - -int -linux_process_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr) -{ - if (the_low_target.breakpoint_kind_from_pc != NULL) - return (*the_low_target.breakpoint_kind_from_pc) (pcptr); - else - return process_stratum_target::breakpoint_kind_from_pc (pcptr); -} - -/* Implementation of the target_ops method "sw_breakpoint_from_kind". */ - -const gdb_byte * -linux_process_target::sw_breakpoint_from_kind (int kind, int *size) -{ - gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL); - - return (*the_low_target.sw_breakpoint_from_kind) (kind, size); -} - -/* Implementation of the target_ops method - "breakpoint_kind_from_current_state". */ - -int -linux_process_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr) -{ - if (the_low_target.breakpoint_kind_from_current_state != NULL) - return (*the_low_target.breakpoint_kind_from_current_state) (pcptr); - else - return breakpoint_kind_from_pc (pcptr); -} - const char * linux_process_target::thread_name (ptid_t thread) {