X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=gdb%2Fremote.c;h=c0f85c44947285e4772f1cff62f94d8622264f65;hb=112e8700a6fd2fed65ca70132c9cbed4132e8bd4;hp=1f0d67cfddf865bff04b12977638a527df570870;hpb=57809e5e5a506664eb54433ded81ab0785168a83;p=deliverable%2Fbinutils-gdb.git diff --git a/gdb/remote.c b/gdb/remote.c index 1f0d67cfdd..c0f85c4494 100644 --- a/gdb/remote.c +++ b/gdb/remote.c @@ -70,6 +70,8 @@ #include "ax-gdb.h" #include "agent.h" #include "btrace.h" +#include "record-btrace.h" +#include /* Temp hacks for tracepoint encoding migration. */ static char *target_buf; @@ -231,6 +233,8 @@ static int remote_can_run_breakpoint_commands (struct target_ops *self); static void remote_btrace_reset (void); +static void remote_btrace_maybe_reopen (void); + static int stop_reply_queue_length (void); static void readahead_cache_invalidate (void); @@ -448,6 +452,24 @@ struct private_thread_info /* This is set to the data address of the access causing the target to stop for a watchpoint. */ CORE_ADDR watch_data_address; + + /* Fields used by the vCont action coalescing implemented in + remote_resume / remote_commit_resume. remote_resume stores each + thread's last resume request in these fields, so that a later + remote_commit_resume knows which is the proper action for this + thread to include in the vCont packet. */ + + /* True if the last target_resume call for this thread was a step + request, false if a continue request. */ + int last_resume_step; + + /* The signal specified in the last target_resume call for this + thread. */ + enum gdb_signal last_resume_sig; + + /* Whether this thread was already vCont-resumed on the remote + side. */ + int vcont_resumed; }; static void @@ -1796,11 +1818,14 @@ remote_add_inferior (int fake_pid_p, int pid, int attached, /* If no main executable is currently open then attempt to open the file that was executed to create this inferior. */ if (try_open_exec && get_exec_file (0) == NULL) - exec_file_locate_attach (pid, 1); + exec_file_locate_attach (pid, 0, 1); return inf; } +static struct private_thread_info * + get_private_info_thread (struct thread_info *info); + /* Add thread PTID to GDB's thread list. Tag it as executing/running according to RUNNING. */ @@ -1808,6 +1833,7 @@ static void remote_add_thread (ptid_t ptid, int running, int executing) { struct remote_state *rs = get_remote_state (); + struct thread_info *thread; /* GDB historically didn't pull threads in the initial connection setup. If the remote target doesn't even have a concept of @@ -1816,10 +1842,11 @@ remote_add_thread (ptid_t ptid, int running, int executing) might be confusing to the user. Be silent then, preserving the age old behavior. */ if (rs->starting_up) - add_thread_silent (ptid); + thread = add_thread_silent (ptid); else - add_thread (ptid); + thread = add_thread (ptid); + get_private_info_thread (thread)->vcont_resumed = executing; set_executing (ptid, executing); set_running (ptid, running); } @@ -1914,25 +1941,40 @@ remote_notice_new_inferior (ptid_t currthread, int executing) } } -/* Return the private thread data, creating it if necessary. */ +/* Return THREAD's private thread data, creating it if necessary. */ static struct private_thread_info * -demand_private_info (ptid_t ptid) +get_private_info_thread (struct thread_info *thread) { - struct thread_info *info = find_thread_ptid (ptid); + gdb_assert (thread != NULL); - gdb_assert (info); - - if (!info->priv) + if (thread->priv == NULL) { - info->priv = XNEW (struct private_thread_info); - info->private_dtor = free_private_thread_info; - info->priv->core = -1; - info->priv->extra = NULL; - info->priv->name = NULL; + struct private_thread_info *priv = XNEW (struct private_thread_info); + + thread->private_dtor = free_private_thread_info; + thread->priv = priv; + + priv->core = -1; + priv->extra = NULL; + priv->name = NULL; + priv->name = NULL; + priv->last_resume_step = 0; + priv->last_resume_sig = GDB_SIGNAL_0; + priv->vcont_resumed = 0; } - return info->priv; + return thread->priv; +} + +/* Return PTID's private thread data, creating it if necessary. */ + +static struct private_thread_info * +get_private_info_ptid (ptid_t ptid) +{ + struct thread_info *info = find_thread_ptid (ptid); + + return get_private_info_thread (info); } /* Call this function as a result of @@ -3276,7 +3318,7 @@ remote_update_thread_list (struct target_ops *ops) remote_notice_new_inferior (item->ptid, executing); - info = demand_private_info (item->ptid); + info = get_private_info_ptid (item->ptid); info->core = item->core; info->extra = item->extra; item->extra = NULL; @@ -3351,7 +3393,7 @@ remote_threads_extra_info (struct target_ops *self, struct thread_info *tp) getpkt (&rs->buf, &rs->buf_size, 0); if (rs->buf[0] != 0) { - n = min (strlen (rs->buf) / 2, sizeof (display_buf)); + n = std::min (strlen (rs->buf) / 2, sizeof (display_buf)); result = hex2bin (rs->buf, (gdb_byte *) display_buf, n); display_buf [result] = '\0'; return display_buf; @@ -3909,6 +3951,7 @@ process_initial_stop_replies (int from_tty) set_executing (event_ptid, 0); set_running (event_ptid, 0); + thread->priv->vcont_resumed = 0; } /* "Notice" the new inferiors before anything related to @@ -4298,6 +4341,10 @@ remote_start_remote (int from_tty, struct target_ops *target, int extended_p) merge_uploaded_tracepoints (&uploaded_tps); } + /* Possibly the target has been engaged in a btrace record started + previously; find out where things are at. */ + remote_btrace_maybe_reopen (); + /* The thread and inferior lists are now synchronized with the target, our symbols have been relocated, and we're merged the target's tracepoints with ours. We're done with basic start @@ -5134,15 +5181,7 @@ remote_detach_1 (const char *args, int from_tty) if (!target_has_execution) error (_("No process to detach from.")); - if (from_tty) - { - char *exec_file = get_exec_file (0); - if (exec_file == NULL) - exec_file = ""; - printf_unfiltered (_("Detaching from program: %s, %s\n"), exec_file, - target_pid_to_str (pid_to_ptid (pid))); - gdb_flush (gdb_stdout); - } + target_announce_detach (from_tty); /* Tell the remote target to detach. */ remote_detach_pid (pid); @@ -5159,7 +5198,7 @@ remote_detach_1 (const char *args, int from_tty) /* If doing detach-on-fork, we don't mourn, because that will delete breakpoints that should be available for the followed inferior. */ if (!is_fork_parent) - target_mourn_inferior (); + target_mourn_inferior (inferior_ptid); else { inferior_ptid = null_ptid; @@ -5556,6 +5595,58 @@ append_pending_thread_resumptions (char *p, char *endp, ptid_t ptid) return p; } +/* Set the target running, using the packets that use Hc + (c/s/C/S). */ + +static void +remote_resume_with_hc (struct target_ops *ops, + ptid_t ptid, int step, enum gdb_signal siggnal) +{ + struct remote_state *rs = get_remote_state (); + struct thread_info *thread; + char *buf; + + rs->last_sent_signal = siggnal; + rs->last_sent_step = step; + + /* The c/s/C/S resume packets use Hc, so set the continue + thread. */ + if (ptid_equal (ptid, minus_one_ptid)) + set_continue_thread (any_thread_ptid); + else + set_continue_thread (ptid); + + ALL_NON_EXITED_THREADS (thread) + resume_clear_thread_private_info (thread); + + buf = rs->buf; + if (execution_direction == EXEC_REVERSE) + { + /* We don't pass signals to the target in reverse exec mode. */ + if (info_verbose && siggnal != GDB_SIGNAL_0) + warning (_(" - Can't pass signal %d to target in reverse: ignored."), + siggnal); + + if (step && packet_support (PACKET_bs) == PACKET_DISABLE) + error (_("Remote reverse-step not supported.")); + if (!step && packet_support (PACKET_bc) == PACKET_DISABLE) + error (_("Remote reverse-continue not supported.")); + + strcpy (buf, step ? "bs" : "bc"); + } + else if (siggnal != GDB_SIGNAL_0) + { + buf[0] = step ? 'S' : 'C'; + buf[1] = tohex (((int) siggnal >> 4) & 0xf); + buf[2] = tohex (((int) siggnal) & 0xf); + buf[3] = '\0'; + } + else + strcpy (buf, step ? "s" : "c"); + + putpkt (buf); +} + /* Resume the remote inferior by using a "vCont" packet. The thread to be resumed is PTID; STEP and SIGGNAL indicate whether the resumed thread should be single-stepped and/or signalled. If PTID @@ -5563,16 +5654,20 @@ append_pending_thread_resumptions (char *p, char *endp, ptid_t ptid) be stepped and/or signalled is given in the global INFERIOR_PTID. This function returns non-zero iff it resumes the inferior. - This function issues a strict subset of all possible vCont commands at the - moment. */ + This function issues a strict subset of all possible vCont commands + at the moment. */ static int -remote_vcont_resume (ptid_t ptid, int step, enum gdb_signal siggnal) +remote_resume_with_vcont (ptid_t ptid, int step, enum gdb_signal siggnal) { struct remote_state *rs = get_remote_state (); char *p; char *endp; + /* No reverse execution actions defined for vCont. */ + if (execution_direction == EXEC_REVERSE) + return 0; + if (packet_support (PACKET_vCont) == PACKET_SUPPORT_UNKNOWN) remote_vcont_probe (rs); @@ -5644,8 +5739,26 @@ remote_resume (struct target_ops *ops, ptid_t ptid, int step, enum gdb_signal siggnal) { struct remote_state *rs = get_remote_state (); - char *buf; - struct thread_info *thread; + + /* When connected in non-stop mode, the core resumes threads + individually. Resuming remote threads directly in target_resume + would thus result in sending one packet per thread. Instead, to + minimize roundtrip latency, here we just store the resume + request; the actual remote resumption will be done in + target_commit_resume / remote_commit_resume, where we'll be able + to do vCont action coalescing. */ + if (target_is_non_stop_p () && execution_direction != EXEC_REVERSE) + { + struct private_thread_info *remote_thr; + + if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid)) + remote_thr = get_private_info_ptid (inferior_ptid); + else + remote_thr = get_private_info_ptid (ptid); + remote_thr->last_resume_step = step; + remote_thr->last_resume_sig = siggnal; + return; + } /* In all-stop, we can't mark REMOTE_ASYNC_GET_PENDING_EVENTS_TOKEN (explained in remote-notif.c:handle_notification) so @@ -5656,55 +5769,12 @@ remote_resume (struct target_ops *ops, if (!target_is_non_stop_p ()) remote_notif_process (rs->notif_state, ¬if_client_stop); - rs->last_sent_signal = siggnal; - rs->last_sent_step = step; - rs->last_resume_exec_dir = execution_direction; - /* The vCont packet doesn't need to specify threads via Hc. */ - /* No reverse support (yet) for vCont. */ - if (execution_direction != EXEC_REVERSE) - if (remote_vcont_resume (ptid, step, siggnal)) - goto done; - - /* All other supported resume packets do use Hc, so set the continue - thread. */ - if (ptid_equal (ptid, minus_one_ptid)) - set_continue_thread (any_thread_ptid); - else - set_continue_thread (ptid); - - ALL_NON_EXITED_THREADS (thread) - resume_clear_thread_private_info (thread); - - buf = rs->buf; - if (execution_direction == EXEC_REVERSE) - { - /* We don't pass signals to the target in reverse exec mode. */ - if (info_verbose && siggnal != GDB_SIGNAL_0) - warning (_(" - Can't pass signal %d to target in reverse: ignored."), - siggnal); - - if (step && packet_support (PACKET_bs) == PACKET_DISABLE) - error (_("Remote reverse-step not supported.")); - if (!step && packet_support (PACKET_bc) == PACKET_DISABLE) - error (_("Remote reverse-continue not supported.")); - - strcpy (buf, step ? "bs" : "bc"); - } - else if (siggnal != GDB_SIGNAL_0) - { - buf[0] = step ? 'S' : 'C'; - buf[1] = tohex (((int) siggnal >> 4) & 0xf); - buf[2] = tohex (((int) siggnal) & 0xf); - buf[3] = '\0'; - } - else - strcpy (buf, step ? "s" : "c"); - - putpkt (buf); + /* Prefer vCont, and fallback to s/c/S/C, which use Hc. */ + if (!remote_resume_with_vcont (ptid, step, siggnal)) + remote_resume_with_hc (ops, ptid, step, siggnal); - done: /* We are about to start executing the inferior, let's register it with the event loop. NOTE: this is the one place where all the execution commands end up. We could alternatively do this in each @@ -5725,6 +5795,283 @@ remote_resume (struct target_ops *ops, if (!target_is_non_stop_p ()) rs->waiting_for_stop_reply = 1; } + +static void check_pending_events_prevent_wildcard_vcont + (int *may_global_wildcard_vcont); +static int is_pending_fork_parent_thread (struct thread_info *thread); + +/* Private per-inferior info for target remote processes. */ + +struct private_inferior +{ + /* Whether we can send a wildcard vCont for this process. */ + int may_wildcard_vcont; +}; + +/* Structure used to track the construction of a vCont packet in the + outgoing packet buffer. This is used to send multiple vCont + packets if we have more actions than would fit a single packet. */ + +struct vcont_builder +{ + /* Pointer to the first action. P points here if no action has been + appended yet. */ + char *first_action; + + /* Where the next action will be appended. */ + char *p; + + /* The end of the buffer. Must never write past this. */ + char *endp; +}; + +/* Prepare the outgoing buffer for a new vCont packet. */ + +static void +vcont_builder_restart (struct vcont_builder *builder) +{ + struct remote_state *rs = get_remote_state (); + + builder->p = rs->buf; + builder->endp = rs->buf + get_remote_packet_size (); + builder->p += xsnprintf (builder->p, builder->endp - builder->p, "vCont"); + builder->first_action = builder->p; +} + +/* If the vCont packet being built has any action, send it to the + remote end. */ + +static void +vcont_builder_flush (struct vcont_builder *builder) +{ + struct remote_state *rs; + + if (builder->p == builder->first_action) + return; + + rs = get_remote_state (); + putpkt (rs->buf); + getpkt (&rs->buf, &rs->buf_size, 0); + if (strcmp (rs->buf, "OK") != 0) + error (_("Unexpected vCont reply in non-stop mode: %s"), rs->buf); +} + +/* The largest action is range-stepping, with its two addresses. This + is more than sufficient. If a new, bigger action is created, it'll + quickly trigger a failed assertion in append_resumption (and we'll + just bump this). */ +#define MAX_ACTION_SIZE 200 + +/* Append a new vCont action in the outgoing packet being built. If + the action doesn't fit the packet along with previous actions, push + what we've got so far to the remote end and start over a new vCont + packet (with the new action). */ + +static void +vcont_builder_push_action (struct vcont_builder *builder, + ptid_t ptid, int step, enum gdb_signal siggnal) +{ + char buf[MAX_ACTION_SIZE + 1]; + char *endp; + size_t rsize; + + endp = append_resumption (buf, buf + sizeof (buf), + ptid, step, siggnal); + + /* Check whether this new action would fit in the vCont packet along + with previous actions. If not, send what we've got so far and + start a new vCont packet. */ + rsize = endp - buf; + if (rsize > builder->endp - builder->p) + { + vcont_builder_flush (builder); + vcont_builder_restart (builder); + + /* Should now fit. */ + gdb_assert (rsize <= builder->endp - builder->p); + } + + memcpy (builder->p, buf, rsize); + builder->p += rsize; + *builder->p = '\0'; +} + +/* to_commit_resume implementation. */ + +static void +remote_commit_resume (struct target_ops *ops) +{ + struct remote_state *rs = get_remote_state (); + struct inferior *inf; + struct thread_info *tp; + int any_process_wildcard; + int may_global_wildcard_vcont; + struct vcont_builder vcont_builder; + + /* If connected in all-stop mode, we'd send the remote resume + request directly from remote_resume. Likewise if + reverse-debugging, as there are no defined vCont actions for + reverse execution. */ + if (!target_is_non_stop_p () || execution_direction == EXEC_REVERSE) + return; + + /* Try to send wildcard actions ("vCont;c" or "vCont;c:pPID.-1") + instead of resuming all threads of each process individually. + However, if any thread of a process must remain halted, we can't + send wildcard resumes and must send one action per thread. + + Care must be taken to not resume threads/processes the server + side already told us are stopped, but the core doesn't know about + yet, because the events are still in the vStopped notification + queue. For example: + + #1 => vCont s:p1.1;c + #2 <= OK + #3 <= %Stopped T05 p1.1 + #4 => vStopped + #5 <= T05 p1.2 + #6 => vStopped + #7 <= OK + #8 (infrun handles the stop for p1.1 and continues stepping) + #9 => vCont s:p1.1;c + + The last vCont above would resume thread p1.2 by mistake, because + the server has no idea that the event for p1.2 had not been + handled yet. + + The server side must similarly ignore resume actions for the + thread that has a pending %Stopped notification (and any other + threads with events pending), until GDB acks the notification + with vStopped. Otherwise, e.g., the following case is + mishandled: + + #1 => g (or any other packet) + #2 <= [registers] + #3 <= %Stopped T05 p1.2 + #4 => vCont s:p1.1;c + #5 <= OK + + Above, the server must not resume thread p1.2. GDB can't know + that p1.2 stopped until it acks the %Stopped notification, and + since from GDB's perspective all threads should be running, it + sends a "c" action. + + Finally, special care must also be given to handling fork/vfork + events. A (v)fork event actually tells us that two processes + stopped -- the parent and the child. Until we follow the fork, + we must not resume the child. Therefore, if we have a pending + fork follow, we must not send a global wildcard resume action + (vCont;c). We can still send process-wide wildcards though. */ + + /* Start by assuming a global wildcard (vCont;c) is possible. */ + may_global_wildcard_vcont = 1; + + /* And assume every process is individually wildcard-able too. */ + ALL_NON_EXITED_INFERIORS (inf) + { + if (inf->priv == NULL) + inf->priv = XNEW (struct private_inferior); + inf->priv->may_wildcard_vcont = 1; + } + + /* Check for any pending events (not reported or processed yet) and + disable process and global wildcard resumes appropriately. */ + check_pending_events_prevent_wildcard_vcont (&may_global_wildcard_vcont); + + ALL_NON_EXITED_THREADS (tp) + { + /* If a thread of a process is not meant to be resumed, then we + can't wildcard that process. */ + if (!tp->executing) + { + tp->inf->priv->may_wildcard_vcont = 0; + + /* And if we can't wildcard a process, we can't wildcard + everything either. */ + may_global_wildcard_vcont = 0; + continue; + } + + /* If a thread is the parent of an unfollowed fork, then we + can't do a global wildcard, as that would resume the fork + child. */ + if (is_pending_fork_parent_thread (tp)) + may_global_wildcard_vcont = 0; + } + + /* Now let's build the vCont packet(s). Actions must be appended + from narrower to wider scopes (thread -> process -> global). If + we end up with too many actions for a single packet vcont_builder + flushes the current vCont packet to the remote side and starts a + new one. */ + vcont_builder_restart (&vcont_builder); + + /* Threads first. */ + ALL_NON_EXITED_THREADS (tp) + { + struct private_thread_info *remote_thr = tp->priv; + + if (!tp->executing || remote_thr->vcont_resumed) + continue; + + gdb_assert (!thread_is_in_step_over_chain (tp)); + + if (!remote_thr->last_resume_step + && remote_thr->last_resume_sig == GDB_SIGNAL_0 + && tp->inf->priv->may_wildcard_vcont) + { + /* We'll send a wildcard resume instead. */ + remote_thr->vcont_resumed = 1; + continue; + } + + vcont_builder_push_action (&vcont_builder, tp->ptid, + remote_thr->last_resume_step, + remote_thr->last_resume_sig); + remote_thr->vcont_resumed = 1; + } + + /* Now check whether we can send any process-wide wildcard. This is + to avoid sending a global wildcard in the case nothing is + supposed to be resumed. */ + any_process_wildcard = 0; + + ALL_NON_EXITED_INFERIORS (inf) + { + if (inf->priv->may_wildcard_vcont) + { + any_process_wildcard = 1; + break; + } + } + + if (any_process_wildcard) + { + /* If all processes are wildcard-able, then send a single "c" + action, otherwise, send an "all (-1) threads of process" + continue action for each running process, if any. */ + if (may_global_wildcard_vcont) + { + vcont_builder_push_action (&vcont_builder, minus_one_ptid, + 0, GDB_SIGNAL_0); + } + else + { + ALL_NON_EXITED_INFERIORS (inf) + { + if (inf->priv->may_wildcard_vcont) + { + vcont_builder_push_action (&vcont_builder, + pid_to_ptid (inf->pid), + 0, GDB_SIGNAL_0); + } + } + } + } + + vcont_builder_flush (&vcont_builder); +} + /* Non-stop version of target_stop. Uses `vCont;t' to stop a remote @@ -5924,7 +6271,6 @@ remote_terminal_inferior (struct target_ops *self) can go away. */ if (!remote_async_terminal_ours_p) return; - delete_file_handler (input_fd); remote_async_terminal_ours_p = 0; /* NOTE: At this point we could also register our selves as the recipient of all input. Any characters typed could then be @@ -5937,7 +6283,6 @@ remote_terminal_ours (struct target_ops *self) /* See FIXME in remote_terminal_inferior. */ if (remote_async_terminal_ours_p) return; - add_file_handler (input_fd, stdin_event_handler, 0); remote_async_terminal_ours_p = 1; } @@ -6032,7 +6377,7 @@ remote_notif_stop_ack (struct notif_client *self, char *buf, struct stop_reply *stop_reply = (struct stop_reply *) event; /* acknowledge */ - putpkt ((char *) self->ack_command); + putpkt (self->ack_command); if (stop_reply->ws.kind == TARGET_WAITKIND_IGNORE) /* We got an unknown stop reply. */ @@ -6093,7 +6438,7 @@ struct queue_iter_param struct stop_reply *output; }; -/* Determine if THREAD is a pending fork parent thread. ARG contains +/* Determine if THREAD_PTID is a pending fork parent thread. ARG contains the pid of the process that owns the threads we want to check, or -1 if we want to check all threads. */ @@ -6111,6 +6456,29 @@ is_pending_fork_parent (struct target_waitstatus *ws, int event_pid, return 0; } +/* Return the thread's pending status used to determine whether the + thread is a fork parent stopped at a fork event. */ + +static struct target_waitstatus * +thread_pending_fork_status (struct thread_info *thread) +{ + if (thread->suspend.waitstatus_pending_p) + return &thread->suspend.waitstatus; + else + return &thread->pending_follow; +} + +/* Determine if THREAD is a pending fork parent thread. */ + +static int +is_pending_fork_parent_thread (struct thread_info *thread) +{ + struct target_waitstatus *ws = thread_pending_fork_status (thread); + int pid = -1; + + return is_pending_fork_parent (ws, pid, thread->ptid); +} + /* Check whether EVENT is a fork event, and if it is, remove the fork child from the context list passed in DATA. */ @@ -6150,12 +6518,7 @@ remove_new_fork_children (struct threads_listing_context *context) fork child threads from the CONTEXT list. */ ALL_NON_EXITED_THREADS (thread) { - struct target_waitstatus *ws; - - if (thread->suspend.waitstatus_pending_p) - ws = &thread->suspend.waitstatus; - else - ws = &thread->pending_follow; + struct target_waitstatus *ws = thread_pending_fork_status (thread); if (is_pending_fork_parent (ws, pid, thread->ptid)) { @@ -6173,6 +6536,56 @@ remove_new_fork_children (struct threads_listing_context *context) remove_child_of_pending_fork, ¶m); } +/* Check whether EVENT would prevent a global or process wildcard + vCont action. */ + +static int +check_pending_event_prevents_wildcard_vcont_callback + (QUEUE (stop_reply_p) *q, + QUEUE_ITER (stop_reply_p) *iter, + stop_reply_p event, + void *data) +{ + struct inferior *inf; + int *may_global_wildcard_vcont = (int *) data; + + if (event->ws.kind == TARGET_WAITKIND_NO_RESUMED + || event->ws.kind == TARGET_WAITKIND_NO_HISTORY) + return 1; + + if (event->ws.kind == TARGET_WAITKIND_FORKED + || event->ws.kind == TARGET_WAITKIND_VFORKED) + *may_global_wildcard_vcont = 0; + + inf = find_inferior_ptid (event->ptid); + + /* This may be the first time we heard about this process. + Regardless, we must not do a global wildcard resume, otherwise + we'd resume this process too. */ + *may_global_wildcard_vcont = 0; + if (inf != NULL) + inf->priv->may_wildcard_vcont = 0; + + return 1; +} + +/* Check whether any event pending in the vStopped queue would prevent + a global or process wildcard vCont action. Clear + *may_global_wildcard if we can't do a global wildcard (vCont;c), + and clear the event inferior's may_wildcard_vcont flag if we can't + do a process-wide wildcard resume (vCont;c:pPID.-1). */ + +static void +check_pending_events_prevent_wildcard_vcont (int *may_global_wildcard) +{ + struct notif_client *notif = ¬if_client_stop; + + remote_notif_get_pending_events (notif); + QUEUE_iterate (stop_reply_p, stop_reply_queue, + check_pending_event_prevents_wildcard_vcont_callback, + may_global_wildcard); +} + /* Remove stop replies in the queue if its pid is equal to the given inferior's pid. */ @@ -6803,10 +7216,11 @@ process_stop_reply (struct stop_reply *stop_reply, } remote_notice_new_inferior (ptid, 0); - remote_thr = demand_private_info (ptid); + remote_thr = get_private_info_ptid (ptid); remote_thr->core = stop_reply->core; remote_thr->stop_reason = stop_reply->stop_reason; remote_thr->watch_data_address = stop_reply->watch_data_address; + remote_thr->vcont_resumed = 0; } stop_reply_xfree (stop_reply); @@ -6982,8 +7396,8 @@ remote_wait_as (ptid_t ptid, struct target_waitstatus *status, int options) rs->last_sent_signal = GDB_SIGNAL_0; target_terminal_inferior (); - strcpy ((char *) buf, rs->last_sent_step ? "s" : "c"); - putpkt ((char *) buf); + strcpy (buf, rs->last_sent_step ? "s" : "c"); + putpkt (buf); break; } /* else fallthrough */ @@ -7165,18 +7579,31 @@ process_g_packet (struct regcache *regcache) the 'p' packet must be used. */ if (buf_len < 2 * rsa->sizeof_g_packet) { - rsa->sizeof_g_packet = buf_len / 2; + long sizeof_g_packet = buf_len / 2; for (i = 0; i < gdbarch_num_regs (gdbarch); i++) { + long offset = rsa->regs[i].offset; + long reg_size = register_size (gdbarch, i); + if (rsa->regs[i].pnum == -1) continue; - if (rsa->regs[i].offset >= rsa->sizeof_g_packet) + if (offset >= sizeof_g_packet) rsa->regs[i].in_g_packet = 0; + else if (offset + reg_size > sizeof_g_packet) + error (_("Truncated register %d in remote 'g' packet"), i); else rsa->regs[i].in_g_packet = 1; } + + /* Looks valid enough, we can assume this is the correct length + for a 'g' packet. It's important not to adjust + rsa->sizeof_g_packet if we have truncated registers otherwise + this "if" won't be run the next time the method is called + with a packet of the same size and one of the internal errors + below will trigger instead. */ + rsa->sizeof_g_packet = sizeof_g_packet; } regs = (char *) alloca (rsa->sizeof_g_packet); @@ -7206,10 +7633,11 @@ process_g_packet (struct regcache *regcache) for (i = 0; i < gdbarch_num_regs (gdbarch); i++) { struct packet_reg *r = &rsa->regs[i]; + long reg_size = register_size (gdbarch, i); if (r->in_g_packet) { - if (r->offset * 2 >= strlen (rs->buf)) + if ((r->offset + reg_size) * 2 > strlen (rs->buf)) /* This shouldn't happen - we adjusted in_g_packet above. */ internal_error (__FILE__, __LINE__, _("unexpected end of 'g' packet reply")); @@ -7469,7 +7897,7 @@ hexnumlen (ULONGEST num) for (i = 0; num != 0; i++) num >>= 4; - return max (i, 1); + return std::max (i, 1); } /* Set BUF to the minimum number of hex digits representing NUM. */ @@ -7674,18 +8102,22 @@ remote_write_bytes_aux (const char *header, CORE_ADDR memaddr, if (packet_format == 'X') { /* Best guess at number of bytes that will fit. */ - todo_units = min (len_units, payload_capacity_bytes / unit_size); + todo_units = std::min (len_units, + (ULONGEST) payload_capacity_bytes / unit_size); if (use_length) payload_capacity_bytes -= hexnumlen (todo_units); - todo_units = min (todo_units, payload_capacity_bytes / unit_size); + todo_units = std::min (todo_units, payload_capacity_bytes / unit_size); } else { /* Number of bytes that will fit. */ - todo_units = min (len_units, (payload_capacity_bytes / unit_size) / 2); + todo_units + = std::min (len_units, + (ULONGEST) (payload_capacity_bytes / unit_size) / 2); if (use_length) payload_capacity_bytes -= hexnumlen (todo_units); - todo_units = min (todo_units, (payload_capacity_bytes / unit_size) / 2); + todo_units = std::min (todo_units, + (payload_capacity_bytes / unit_size) / 2); } if (todo_units <= 0) @@ -7844,7 +8276,8 @@ remote_read_bytes_1 (CORE_ADDR memaddr, gdb_byte *myaddr, ULONGEST len_units, get_memory_packet_size ensures this. */ /* Number of units that will fit. */ - todo_units = min (len_units, (buf_size_bytes / unit_size) / 2); + todo_units = std::min (len_units, + (ULONGEST) (buf_size_bytes / unit_size) / 2); /* Construct "m"","". */ memaddr = remote_address_masked (memaddr); @@ -8217,23 +8650,20 @@ remote_send (char **buf, error (_("Remote failure reply: %s"), *buf); } -/* Return a pointer to an xmalloc'ed string representing an escaped - version of BUF, of len N. E.g. \n is converted to \\n, \t to \\t, - etc. The caller is responsible for releasing the returned - memory. */ +/* Return a string representing an escaped version of BUF, of len N. + E.g. \n is converted to \\n, \t to \\t, etc. */ -static char * +static std::string escape_buffer (const char *buf, int n) { struct cleanup *old_chain; struct ui_file *stb; - char *str; stb = mem_fileopen (); old_chain = make_cleanup_ui_file_delete (stb); fputstrn_unfiltered (buf, n, '\\', stb); - str = ui_file_xstrdup (stb, NULL); + std::string str = ui_file_as_string (stb); do_cleanups (old_chain); return str; } @@ -8317,15 +8747,12 @@ putpkt_binary (const char *buf, int cnt) if (remote_debug) { - struct cleanup *old_chain; - char *str; - *p = '\0'; - str = escape_buffer (buf2, p - buf2); - old_chain = make_cleanup (xfree, str); - fprintf_unfiltered (gdb_stdlog, "Sending packet: %s...", str); + + std::string str = escape_buffer (buf2, p - buf2); + + fprintf_unfiltered (gdb_stdlog, "Sending packet: %s...", str.c_str ()); gdb_flush (gdb_stdlog); - do_cleanups (old_chain); } remote_serial_write (buf2, p - buf2); @@ -8403,15 +8830,11 @@ putpkt_binary (const char *buf, int cnt) { if (remote_debug) { - struct cleanup *old_chain; - char *str; + std::string str = escape_buffer (rs->buf, val); - str = escape_buffer (rs->buf, val); - old_chain = make_cleanup (xfree, str); fprintf_unfiltered (gdb_stdlog, " Notification received: %s\n", - str); - do_cleanups (old_chain); + str.c_str ()); } handle_notification (rs->notif_state, rs->buf); /* We're in sync now, rewait for the ack. */ @@ -8577,16 +9000,12 @@ read_frame (char **buf_p, if (remote_debug) { - struct cleanup *old_chain; - char *str; + std::string str = escape_buffer (buf, bc); - str = escape_buffer (buf, bc); - old_chain = make_cleanup (xfree, str); fprintf_unfiltered (gdb_stdlog, "Bad checksum, sentsum=0x%x, " "csum=0x%x, buf=%s\n", - pktcsum, csum, str); - do_cleanups (old_chain); + pktcsum, csum, str.c_str ()); } /* Number of characters in buffer ignoring trailing NULL. */ @@ -8760,13 +9179,9 @@ getpkt_or_notif_sane_1 (char **buf, long *sizeof_buf, int forever, { if (remote_debug) { - struct cleanup *old_chain; - char *str; + std::string str = escape_buffer (*buf, val); - str = escape_buffer (*buf, val); - old_chain = make_cleanup (xfree, str); - fprintf_unfiltered (gdb_stdlog, "Packet received: %s\n", str); - do_cleanups (old_chain); + fprintf_unfiltered (gdb_stdlog, "Packet received: %s\n", str.c_str ()); } /* Skip the ack char if we're in no-ack mode. */ @@ -8785,15 +9200,11 @@ getpkt_or_notif_sane_1 (char **buf, long *sizeof_buf, int forever, if (remote_debug) { - struct cleanup *old_chain; - char *str; + std::string str = escape_buffer (*buf, val); - str = escape_buffer (*buf, val); - old_chain = make_cleanup (xfree, str); fprintf_unfiltered (gdb_stdlog, " Notification received: %s\n", - str); - do_cleanups (old_chain); + str.c_str ()); } if (is_notif != NULL) *is_notif = 1; @@ -8906,7 +9317,7 @@ remote_kill (struct target_ops *ops) res = remote_vkill (pid, rs); if (res == 0) { - target_mourn_inferior (); + target_mourn_inferior (inferior_ptid); return; } } @@ -8923,7 +9334,7 @@ remote_kill (struct target_ops *ops) not in extended mode, mourning the inferior also unpushes remote_ops from the target stack, which closes the remote connection. */ - target_mourn_inferior (); + target_mourn_inferior (inferior_ptid); return; } @@ -9212,10 +9623,7 @@ remote_add_target_side_condition (struct gdbarch *gdbarch, struct bp_target_info *bp_tgt, char *buf, char *buf_end) { - struct agent_expr *aexpr = NULL; - int i, ix; - - if (VEC_empty (agent_expr_p, bp_tgt->conditions)) + if (bp_tgt->conditions.empty ()) return 0; buf += strlen (buf); @@ -9223,13 +9631,13 @@ remote_add_target_side_condition (struct gdbarch *gdbarch, buf++; /* Send conditions to the target and free the vector. */ - for (ix = 0; - VEC_iterate (agent_expr_p, bp_tgt->conditions, ix, aexpr); - ix++) + for (int ix = 0; ix < bp_tgt->conditions.size (); ix++) { + struct agent_expr *aexpr = bp_tgt->conditions[ix]; + xsnprintf (buf, buf_end - buf, "X%x,", aexpr->len); buf += strlen (buf); - for (i = 0; i < aexpr->len; ++i) + for (int i = 0; i < aexpr->len; ++i) buf = pack_hex_byte (buf, aexpr->buf[i]); *buf = '\0'; } @@ -9240,10 +9648,7 @@ static void remote_add_target_side_commands (struct gdbarch *gdbarch, struct bp_target_info *bp_tgt, char *buf) { - struct agent_expr *aexpr = NULL; - int i, ix; - - if (VEC_empty (agent_expr_p, bp_tgt->tcommands)) + if (bp_tgt->tcommands.empty ()) return; buf += strlen (buf); @@ -9253,13 +9658,13 @@ remote_add_target_side_commands (struct gdbarch *gdbarch, /* Concatenate all the agent expressions that are commands into the cmds parameter. */ - for (ix = 0; - VEC_iterate (agent_expr_p, bp_tgt->tcommands, ix, aexpr); - ix++) + for (int ix = 0; ix < bp_tgt->tcommands.size (); ix++) { + struct agent_expr *aexpr = bp_tgt->tcommands[ix]; + sprintf (buf, "X%x,", aexpr->len); buf += strlen (buf); - for (i = 0; i < aexpr->len; ++i) + for (int i = 0; i < aexpr->len; ++i) buf = pack_hex_byte (buf, aexpr->buf[i]); *buf = '\0'; } @@ -9291,8 +9696,6 @@ remote_insert_breakpoint (struct target_ops *ops, if (!gdbarch_has_global_breakpoints (target_gdbarch ())) set_general_process (); - gdbarch_remote_breakpoint_from_pc (gdbarch, &addr, &bpsize); - rs = get_remote_state (); p = rs->buf; endbuf = rs->buf + get_remote_packet_size (); @@ -9302,7 +9705,7 @@ remote_insert_breakpoint (struct target_ops *ops, *(p++) = ','; addr = (ULONGEST) remote_address_masked (addr); p += hexnumstr (p, addr); - xsnprintf (p, endbuf - p, ",%d", bpsize); + xsnprintf (p, endbuf - p, ",%d", bp_tgt->kind); if (remote_supports_cond_breakpoints (ops)) remote_add_target_side_condition (gdbarch, bp_tgt, p, endbuf); @@ -9318,8 +9721,6 @@ remote_insert_breakpoint (struct target_ops *ops, case PACKET_ERROR: return -1; case PACKET_OK: - bp_tgt->placed_address = addr; - bp_tgt->placed_size = bpsize; return 0; case PACKET_UNKNOWN: break; @@ -9328,7 +9729,7 @@ remote_insert_breakpoint (struct target_ops *ops, /* If this breakpoint has target-side commands but this stub doesn't support Z0 packets, throw error. */ - if (!VEC_empty (agent_expr_p, bp_tgt->tcommands)) + if (!bp_tgt->tcommands.empty ()) throw_error (NOT_SUPPORTED_ERROR, _("\ Target doesn't support breakpoints that have target side commands.")); @@ -9338,7 +9739,8 @@ Target doesn't support breakpoints that have target side commands.")); static int remote_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch, - struct bp_target_info *bp_tgt) + struct bp_target_info *bp_tgt, + enum remove_bp_reason reason) { CORE_ADDR addr = bp_tgt->placed_address; struct remote_state *rs = get_remote_state (); @@ -9359,7 +9761,7 @@ remote_remove_breakpoint (struct target_ops *ops, addr = (ULONGEST) remote_address_masked (bp_tgt->placed_address); p += hexnumstr (p, addr); - xsnprintf (p, endbuf - p, ",%d", bp_tgt->placed_size); + xsnprintf (p, endbuf - p, ",%d", bp_tgt->kind); putpkt (rs->buf); getpkt (&rs->buf, &rs->buf_size, 0); @@ -9367,7 +9769,7 @@ remote_remove_breakpoint (struct target_ops *ops, return (rs->buf[0] == 'E'); } - return memory_remove_breakpoint (ops, gdbarch, bp_tgt); + return memory_remove_breakpoint (ops, gdbarch, bp_tgt, reason); } static enum Z_packet_type @@ -9595,12 +9997,6 @@ remote_insert_hw_breakpoint (struct target_ops *self, struct gdbarch *gdbarch, struct remote_state *rs; char *p, *endbuf; char *message; - int bpsize; - - /* The length field should be set to the size of a breakpoint - instruction, even though we aren't inserting one ourselves. */ - - gdbarch_remote_breakpoint_from_pc (gdbarch, &addr, &bpsize); if (packet_support (PACKET_Z1) == PACKET_DISABLE) return -1; @@ -9620,7 +10016,7 @@ remote_insert_hw_breakpoint (struct target_ops *self, struct gdbarch *gdbarch, addr = remote_address_masked (addr); p += hexnumstr (p, (ULONGEST) addr); - xsnprintf (p, endbuf - p, ",%x", bpsize); + xsnprintf (p, endbuf - p, ",%x", bp_tgt->kind); if (remote_supports_cond_breakpoints (self)) remote_add_target_side_condition (gdbarch, bp_tgt, p, endbuf); @@ -9644,8 +10040,6 @@ remote_insert_hw_breakpoint (struct target_ops *self, struct gdbarch *gdbarch, case PACKET_UNKNOWN: return -1; case PACKET_OK: - bp_tgt->placed_address = addr; - bp_tgt->placed_size = bpsize; return 0; } internal_error (__FILE__, __LINE__, @@ -9676,7 +10070,7 @@ remote_remove_hw_breakpoint (struct target_ops *self, struct gdbarch *gdbarch, addr = remote_address_masked (bp_tgt->placed_address); p += hexnumstr (p, (ULONGEST) addr); - xsnprintf (p, endbuf - p, ",%x", bp_tgt->placed_size); + xsnprintf (p, endbuf - p, ",%x", bp_tgt->kind); putpkt (rs->buf); getpkt (&rs->buf, &rs->buf_size, 0); @@ -9903,7 +10297,7 @@ remote_read_qxfer (struct target_ops *ops, const char *object_name, may not, since we don't know how much of it will need to be escaped; the target is free to respond with slightly less data. We subtract five to account for the response type and the protocol frame. */ - n = min (get_remote_packet_size () - 5, len); + n = std::min (get_remote_packet_size () - 5, len); snprintf (rs->buf, get_remote_packet_size () - 4, "qXfer:%s:read:%s:%s,%s", object_name, annex ? annex : "", phex_nz (offset, sizeof offset), @@ -10162,6 +10556,14 @@ remote_xfer_partial (struct target_ops *ops, enum target_object object, return TARGET_XFER_OK; } +/* Implementation of to_get_memory_xfer_limit. */ + +static ULONGEST +remote_get_memory_xfer_limit (struct target_ops *ops) +{ + return get_memory_write_packet_size (); +} + static int remote_search_memory (struct target_ops* ops, CORE_ADDR start_addr, ULONGEST search_space_len, @@ -11852,8 +12254,6 @@ remote_download_tracepoint (struct target_ops *self, struct bp_location *loc) char **stepping_actions; int ndx; struct cleanup *old_chain = NULL; - struct agent_expr *aexpr; - struct cleanup *aexpr_chain = NULL; char *pkt; struct breakpoint *b = loc->owner; struct tracepoint *t = (struct tracepoint *) b; @@ -11924,15 +12324,13 @@ remote_download_tracepoint (struct target_ops *self, struct bp_location *loc) capabilities at definition time. */ if (remote_supports_cond_tracepoints ()) { - aexpr = gen_eval_for_expr (tpaddr, loc->cond); - aexpr_chain = make_cleanup_free_agent_expr (aexpr); + agent_expr_up aexpr = gen_eval_for_expr (tpaddr, loc->cond.get ()); xsnprintf (buf + strlen (buf), BUF_SIZE - strlen (buf), ":X%x,", aexpr->len); pkt = buf + strlen (buf); for (ndx = 0; ndx < aexpr->len; ++ndx) pkt = pack_hex_byte (pkt, aexpr->buf[ndx]); *pkt = '\0'; - do_cleanups (aexpr_chain); } else warning (_("Target does not support conditional tracepoints, " @@ -12776,6 +13174,60 @@ btrace_read_config (struct btrace_config *conf) } } +/* Maybe reopen target btrace. */ + +static void +remote_btrace_maybe_reopen (void) +{ + struct remote_state *rs = get_remote_state (); + struct cleanup *cleanup; + struct thread_info *tp; + int btrace_target_pushed = 0; + int warned = 0; + + cleanup = make_cleanup_restore_current_thread (); + ALL_NON_EXITED_THREADS (tp) + { + set_general_thread (tp->ptid); + + memset (&rs->btrace_config, 0x00, sizeof (struct btrace_config)); + btrace_read_config (&rs->btrace_config); + + if (rs->btrace_config.format == BTRACE_FORMAT_NONE) + continue; + +#if !defined (HAVE_LIBIPT) + if (rs->btrace_config.format == BTRACE_FORMAT_PT) + { + if (!warned) + { + warned = 1; + warning (_("GDB does not support Intel Processor Trace. " + "\"record\" will not work in this session.")); + } + + continue; + } +#endif /* !defined (HAVE_LIBIPT) */ + + /* Push target, once, but before anything else happens. This way our + changes to the threads will be cleaned up by unpushing the target + in case btrace_read_config () throws. */ + if (!btrace_target_pushed) + { + btrace_target_pushed = 1; + record_btrace_push_target (); + printf_filtered (_("Target is recording using %s.\n"), + btrace_format_string (rs->btrace_config.format)); + } + + tp->btrace.target = XCNEW (struct btrace_target_info); + tp->btrace.target->ptid = tp->ptid; + tp->btrace.target->conf = rs->btrace_config; + } + do_cleanups (cleanup); +} + /* Enable branch tracing. */ static struct btrace_target_info * @@ -13037,6 +13489,7 @@ Specify the serial device it is connected to\n\ remote_ops.to_detach = remote_detach; remote_ops.to_disconnect = remote_disconnect; remote_ops.to_resume = remote_resume; + remote_ops.to_commit_resume = remote_commit_resume; remote_ops.to_wait = remote_wait; remote_ops.to_fetch_registers = remote_fetch_registers; remote_ops.to_store_registers = remote_store_registers; @@ -13075,6 +13528,7 @@ Specify the serial device it is connected to\n\ remote_ops.to_interrupt = remote_interrupt; remote_ops.to_pass_ctrlc = remote_pass_ctrlc; remote_ops.to_xfer_partial = remote_xfer_partial; + remote_ops.to_get_memory_xfer_limit = remote_get_memory_xfer_limit; remote_ops.to_rcmd = remote_rcmd; remote_ops.to_pid_to_exec_file = remote_pid_to_exec_file; remote_ops.to_log_command = serial_log_command; @@ -13331,10 +13785,10 @@ show_remote_cmd (char *args, int from_tty) struct cleanup *option_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "option"); - ui_out_field_string (uiout, "name", list->name); - ui_out_text (uiout, ": "); + uiout->field_string ("name", list->name); + uiout->text (": "); if (list->type == show_cmd) - do_show_command ((char *) NULL, from_tty, list); + do_show_command (NULL, from_tty, list); else cmd_func (list, NULL, from_tty); /* Close the tuple. */