+/* See dwarf2loc.h. */
+
+unsigned int entry_values_debug = 0;
+
+/* Helper to set entry_values_debug. */
+
+static void
+show_entry_values_debug (struct ui_file *file, int from_tty,
+ struct cmd_list_element *c, const char *value)
+{
+ fprintf_filtered (file,
+ _("Entry values and tail call frames debugging is %s.\n"),
+ value);
+}
+
+/* Find DW_TAG_GNU_call_site's DW_AT_GNU_call_site_target address.
+ CALLER_FRAME (for registers) can be NULL if it is not known. This function
+ always returns valid address or it throws NO_ENTRY_VALUE_ERROR. */
+
+static CORE_ADDR
+call_site_to_target_addr (struct gdbarch *call_site_gdbarch,
+ struct call_site *call_site,
+ struct frame_info *caller_frame)
+{
+ switch (FIELD_LOC_KIND (call_site->target))
+ {
+ case FIELD_LOC_KIND_DWARF_BLOCK:
+ {
+ struct dwarf2_locexpr_baton *dwarf_block;
+ struct value *val;
+ struct type *caller_core_addr_type;
+ struct gdbarch *caller_arch;
+
+ dwarf_block = FIELD_DWARF_BLOCK (call_site->target);
+ if (dwarf_block == NULL)
+ {
+ struct bound_minimal_symbol msym;
+
+ msym = lookup_minimal_symbol_by_pc (call_site->pc - 1);
+ throw_error (NO_ENTRY_VALUE_ERROR,
+ _("DW_AT_GNU_call_site_target is not specified "
+ "at %s in %s"),
+ paddress (call_site_gdbarch, call_site->pc),
+ (msym.minsym == NULL ? "???"
+ : MSYMBOL_PRINT_NAME (msym.minsym)));
+
+ }
+ if (caller_frame == NULL)
+ {
+ struct bound_minimal_symbol msym;
+
+ msym = lookup_minimal_symbol_by_pc (call_site->pc - 1);
+ throw_error (NO_ENTRY_VALUE_ERROR,
+ _("DW_AT_GNU_call_site_target DWARF block resolving "
+ "requires known frame which is currently not "
+ "available at %s in %s"),
+ paddress (call_site_gdbarch, call_site->pc),
+ (msym.minsym == NULL ? "???"
+ : MSYMBOL_PRINT_NAME (msym.minsym)));
+
+ }
+ caller_arch = get_frame_arch (caller_frame);
+ caller_core_addr_type = builtin_type (caller_arch)->builtin_func_ptr;
+ val = dwarf2_evaluate_loc_desc (caller_core_addr_type, caller_frame,
+ dwarf_block->data, dwarf_block->size,
+ dwarf_block->per_cu);
+ /* DW_AT_GNU_call_site_target is a DWARF expression, not a DWARF
+ location. */
+ if (VALUE_LVAL (val) == lval_memory)
+ return value_address (val);
+ else
+ return value_as_address (val);
+ }
+
+ case FIELD_LOC_KIND_PHYSNAME:
+ {
+ const char *physname;
+ struct bound_minimal_symbol msym;
+
+ physname = FIELD_STATIC_PHYSNAME (call_site->target);
+
+ /* Handle both the mangled and demangled PHYSNAME. */
+ msym = lookup_minimal_symbol (physname, NULL, NULL);
+ if (msym.minsym == NULL)
+ {
+ msym = lookup_minimal_symbol_by_pc (call_site->pc - 1);
+ throw_error (NO_ENTRY_VALUE_ERROR,
+ _("Cannot find function \"%s\" for a call site target "
+ "at %s in %s"),
+ physname, paddress (call_site_gdbarch, call_site->pc),
+ (msym.minsym == NULL ? "???"
+ : MSYMBOL_PRINT_NAME (msym.minsym)));
+
+ }
+ return BMSYMBOL_VALUE_ADDRESS (msym);
+ }
+
+ case FIELD_LOC_KIND_PHYSADDR:
+ return FIELD_STATIC_PHYSADDR (call_site->target);
+
+ default:
+ internal_error (__FILE__, __LINE__, _("invalid call site target kind"));
+ }
+}
+
+/* Convert function entry point exact address ADDR to the function which is
+ compliant with TAIL_CALL_LIST_COMPLETE condition. Throw
+ NO_ENTRY_VALUE_ERROR otherwise. */
+
+static struct symbol *
+func_addr_to_tail_call_list (struct gdbarch *gdbarch, CORE_ADDR addr)
+{
+ struct symbol *sym = find_pc_function (addr);
+ struct type *type;
+
+ if (sym == NULL || BLOCK_START (SYMBOL_BLOCK_VALUE (sym)) != addr)
+ throw_error (NO_ENTRY_VALUE_ERROR,
+ _("DW_TAG_GNU_call_site resolving failed to find function "
+ "name for address %s"),
+ paddress (gdbarch, addr));
+
+ type = SYMBOL_TYPE (sym);
+ gdb_assert (TYPE_CODE (type) == TYPE_CODE_FUNC);
+ gdb_assert (TYPE_SPECIFIC_FIELD (type) == TYPE_SPECIFIC_FUNC);
+
+ return sym;
+}
+
+/* Verify function with entry point exact address ADDR can never call itself
+ via its tail calls (incl. transitively). Throw NO_ENTRY_VALUE_ERROR if it
+ can call itself via tail calls.
+
+ If a funtion can tail call itself its entry value based parameters are
+ unreliable. There is no verification whether the value of some/all
+ parameters is unchanged through the self tail call, we expect if there is
+ a self tail call all the parameters can be modified. */
+
+static void
+func_verify_no_selftailcall (struct gdbarch *gdbarch, CORE_ADDR verify_addr)
+{
+ struct obstack addr_obstack;
+ struct cleanup *old_chain;
+ CORE_ADDR addr;
+
+ /* Track here CORE_ADDRs which were already visited. */
+ htab_t addr_hash;
+
+ /* The verification is completely unordered. Track here function addresses
+ which still need to be iterated. */
+ VEC (CORE_ADDR) *todo = NULL;
+
+ obstack_init (&addr_obstack);
+ old_chain = make_cleanup_obstack_free (&addr_obstack);
+ addr_hash = htab_create_alloc_ex (64, core_addr_hash, core_addr_eq, NULL,
+ &addr_obstack, hashtab_obstack_allocate,
+ NULL);
+ make_cleanup_htab_delete (addr_hash);
+
+ make_cleanup (VEC_cleanup (CORE_ADDR), &todo);
+
+ VEC_safe_push (CORE_ADDR, todo, verify_addr);
+ while (!VEC_empty (CORE_ADDR, todo))
+ {
+ struct symbol *func_sym;
+ struct call_site *call_site;
+
+ addr = VEC_pop (CORE_ADDR, todo);
+
+ func_sym = func_addr_to_tail_call_list (gdbarch, addr);
+
+ for (call_site = TYPE_TAIL_CALL_LIST (SYMBOL_TYPE (func_sym));
+ call_site; call_site = call_site->tail_call_next)
+ {
+ CORE_ADDR target_addr;
+ void **slot;
+
+ /* CALLER_FRAME with registers is not available for tail-call jumped
+ frames. */
+ target_addr = call_site_to_target_addr (gdbarch, call_site, NULL);
+
+ if (target_addr == verify_addr)
+ {
+ struct bound_minimal_symbol msym;
+
+ msym = lookup_minimal_symbol_by_pc (verify_addr);
+ throw_error (NO_ENTRY_VALUE_ERROR,
+ _("DW_OP_GNU_entry_value resolving has found "
+ "function \"%s\" at %s can call itself via tail "
+ "calls"),
+ (msym.minsym == NULL ? "???"
+ : MSYMBOL_PRINT_NAME (msym.minsym)),
+ paddress (gdbarch, verify_addr));
+ }
+
+ slot = htab_find_slot (addr_hash, &target_addr, INSERT);
+ if (*slot == NULL)
+ {
+ *slot = obstack_copy (&addr_obstack, &target_addr,
+ sizeof (target_addr));
+ VEC_safe_push (CORE_ADDR, todo, target_addr);
+ }
+ }
+ }
+
+ do_cleanups (old_chain);
+}
+
+/* Print user readable form of CALL_SITE->PC to gdb_stdlog. Used only for
+ ENTRY_VALUES_DEBUG. */
+
+static void
+tailcall_dump (struct gdbarch *gdbarch, const struct call_site *call_site)
+{
+ CORE_ADDR addr = call_site->pc;
+ struct bound_minimal_symbol msym = lookup_minimal_symbol_by_pc (addr - 1);
+
+ fprintf_unfiltered (gdb_stdlog, " %s(%s)", paddress (gdbarch, addr),
+ (msym.minsym == NULL ? "???"
+ : MSYMBOL_PRINT_NAME (msym.minsym)));
+
+}
+
+/* vec.h needs single word type name, typedef it. */
+typedef struct call_site *call_sitep;
+
+/* Define VEC (call_sitep) functions. */
+DEF_VEC_P (call_sitep);
+
+/* Intersect RESULTP with CHAIN to keep RESULTP unambiguous, keep in RESULTP
+ only top callers and bottom callees which are present in both. GDBARCH is
+ used only for ENTRY_VALUES_DEBUG. RESULTP is NULL after return if there are
+ no remaining possibilities to provide unambiguous non-trivial result.
+ RESULTP should point to NULL on the first (initialization) call. Caller is
+ responsible for xfree of any RESULTP data. */
+
+static void
+chain_candidate (struct gdbarch *gdbarch, struct call_site_chain **resultp,
+ VEC (call_sitep) *chain)
+{
+ struct call_site_chain *result = *resultp;
+ long length = VEC_length (call_sitep, chain);
+ int callers, callees, idx;
+
+ if (result == NULL)
+ {
+ /* Create the initial chain containing all the passed PCs. */
+
+ result = xmalloc (sizeof (*result) + sizeof (*result->call_site)
+ * (length - 1));
+ result->length = length;
+ result->callers = result->callees = length;
+ if (!VEC_empty (call_sitep, chain))
+ memcpy (result->call_site, VEC_address (call_sitep, chain),
+ sizeof (*result->call_site) * length);
+ *resultp = result;
+
+ if (entry_values_debug)
+ {
+ fprintf_unfiltered (gdb_stdlog, "tailcall: initial:");
+ for (idx = 0; idx < length; idx++)
+ tailcall_dump (gdbarch, result->call_site[idx]);
+ fputc_unfiltered ('\n', gdb_stdlog);
+ }
+
+ return;
+ }
+
+ if (entry_values_debug)
+ {
+ fprintf_unfiltered (gdb_stdlog, "tailcall: compare:");
+ for (idx = 0; idx < length; idx++)
+ tailcall_dump (gdbarch, VEC_index (call_sitep, chain, idx));
+ fputc_unfiltered ('\n', gdb_stdlog);
+ }
+
+ /* Intersect callers. */
+
+ callers = min (result->callers, length);
+ for (idx = 0; idx < callers; idx++)
+ if (result->call_site[idx] != VEC_index (call_sitep, chain, idx))
+ {
+ result->callers = idx;
+ break;
+ }
+
+ /* Intersect callees. */
+
+ callees = min (result->callees, length);
+ for (idx = 0; idx < callees; idx++)
+ if (result->call_site[result->length - 1 - idx]
+ != VEC_index (call_sitep, chain, length - 1 - idx))
+ {
+ result->callees = idx;
+ break;
+ }
+
+ if (entry_values_debug)
+ {
+ fprintf_unfiltered (gdb_stdlog, "tailcall: reduced:");
+ for (idx = 0; idx < result->callers; idx++)
+ tailcall_dump (gdbarch, result->call_site[idx]);
+ fputs_unfiltered (" |", gdb_stdlog);
+ for (idx = 0; idx < result->callees; idx++)
+ tailcall_dump (gdbarch, result->call_site[result->length
+ - result->callees + idx]);
+ fputc_unfiltered ('\n', gdb_stdlog);
+ }
+
+ if (result->callers == 0 && result->callees == 0)
+ {
+ /* There are no common callers or callees. It could be also a direct
+ call (which has length 0) with ambiguous possibility of an indirect
+ call - CALLERS == CALLEES == 0 is valid during the first allocation
+ but any subsequence processing of such entry means ambiguity. */
+ xfree (result);
+ *resultp = NULL;
+ return;
+ }
+
+ /* See call_site_find_chain_1 why there is no way to reach the bottom callee
+ PC again. In such case there must be two different code paths to reach
+ it, therefore some of the former determined intermediate PCs must differ
+ and the unambiguous chain gets shortened. */
+ gdb_assert (result->callers + result->callees < result->length);
+}
+
+/* Create and return call_site_chain for CALLER_PC and CALLEE_PC. All the
+ assumed frames between them use GDBARCH. Use depth first search so we can
+ keep single CHAIN of call_site's back to CALLER_PC. Function recursion
+ would have needless GDB stack overhead. Caller is responsible for xfree of
+ the returned result. Any unreliability results in thrown
+ NO_ENTRY_VALUE_ERROR. */
+
+static struct call_site_chain *
+call_site_find_chain_1 (struct gdbarch *gdbarch, CORE_ADDR caller_pc,
+ CORE_ADDR callee_pc)
+{
+ CORE_ADDR save_callee_pc = callee_pc;
+ struct obstack addr_obstack;
+ struct cleanup *back_to_retval, *back_to_workdata;
+ struct call_site_chain *retval = NULL;
+ struct call_site *call_site;
+
+ /* Mark CALL_SITEs so we do not visit the same ones twice. */
+ htab_t addr_hash;
+
+ /* CHAIN contains only the intermediate CALL_SITEs. Neither CALLER_PC's
+ call_site nor any possible call_site at CALLEE_PC's function is there.
+ Any CALL_SITE in CHAIN will be iterated to its siblings - via
+ TAIL_CALL_NEXT. This is inappropriate for CALLER_PC's call_site. */
+ VEC (call_sitep) *chain = NULL;
+
+ /* We are not interested in the specific PC inside the callee function. */
+ callee_pc = get_pc_function_start (callee_pc);
+ if (callee_pc == 0)
+ throw_error (NO_ENTRY_VALUE_ERROR, _("Unable to find function for PC %s"),
+ paddress (gdbarch, save_callee_pc));
+
+ back_to_retval = make_cleanup (free_current_contents, &retval);
+
+ obstack_init (&addr_obstack);
+ back_to_workdata = make_cleanup_obstack_free (&addr_obstack);
+ addr_hash = htab_create_alloc_ex (64, core_addr_hash, core_addr_eq, NULL,
+ &addr_obstack, hashtab_obstack_allocate,
+ NULL);
+ make_cleanup_htab_delete (addr_hash);
+
+ make_cleanup (VEC_cleanup (call_sitep), &chain);
+
+ /* Do not push CALL_SITE to CHAIN. Push there only the first tail call site
+ at the target's function. All the possible tail call sites in the
+ target's function will get iterated as already pushed into CHAIN via their
+ TAIL_CALL_NEXT. */
+ call_site = call_site_for_pc (gdbarch, caller_pc);
+
+ while (call_site)
+ {
+ CORE_ADDR target_func_addr;
+ struct call_site *target_call_site;
+
+ /* CALLER_FRAME with registers is not available for tail-call jumped
+ frames. */
+ target_func_addr = call_site_to_target_addr (gdbarch, call_site, NULL);
+
+ if (target_func_addr == callee_pc)
+ {
+ chain_candidate (gdbarch, &retval, chain);
+ if (retval == NULL)
+ break;
+
+ /* There is no way to reach CALLEE_PC again as we would prevent
+ entering it twice as being already marked in ADDR_HASH. */
+ target_call_site = NULL;
+ }
+ else
+ {
+ struct symbol *target_func;
+
+ target_func = func_addr_to_tail_call_list (gdbarch, target_func_addr);
+ target_call_site = TYPE_TAIL_CALL_LIST (SYMBOL_TYPE (target_func));
+ }
+
+ do
+ {
+ /* Attempt to visit TARGET_CALL_SITE. */
+
+ if (target_call_site)
+ {
+ void **slot;
+
+ slot = htab_find_slot (addr_hash, &target_call_site->pc, INSERT);
+ if (*slot == NULL)
+ {
+ /* Successfully entered TARGET_CALL_SITE. */
+
+ *slot = &target_call_site->pc;
+ VEC_safe_push (call_sitep, chain, target_call_site);
+ break;
+ }
+ }
+
+ /* Backtrack (without revisiting the originating call_site). Try the
+ callers's sibling; if there isn't any try the callers's callers's
+ sibling etc. */
+
+ target_call_site = NULL;
+ while (!VEC_empty (call_sitep, chain))
+ {
+ call_site = VEC_pop (call_sitep, chain);
+
+ gdb_assert (htab_find_slot (addr_hash, &call_site->pc,
+ NO_INSERT) != NULL);
+ htab_remove_elt (addr_hash, &call_site->pc);
+
+ target_call_site = call_site->tail_call_next;
+ if (target_call_site)
+ break;
+ }
+ }
+ while (target_call_site);
+
+ if (VEC_empty (call_sitep, chain))
+ call_site = NULL;
+ else
+ call_site = VEC_last (call_sitep, chain);
+ }
+
+ if (retval == NULL)
+ {
+ struct bound_minimal_symbol msym_caller, msym_callee;
+
+ msym_caller = lookup_minimal_symbol_by_pc (caller_pc);
+ msym_callee = lookup_minimal_symbol_by_pc (callee_pc);
+ throw_error (NO_ENTRY_VALUE_ERROR,
+ _("There are no unambiguously determinable intermediate "
+ "callers or callees between caller function \"%s\" at %s "
+ "and callee function \"%s\" at %s"),
+ (msym_caller.minsym == NULL
+ ? "???" : MSYMBOL_PRINT_NAME (msym_caller.minsym)),
+ paddress (gdbarch, caller_pc),
+ (msym_callee.minsym == NULL
+ ? "???" : MSYMBOL_PRINT_NAME (msym_callee.minsym)),
+ paddress (gdbarch, callee_pc));
+ }
+
+ do_cleanups (back_to_workdata);
+ discard_cleanups (back_to_retval);
+ return retval;
+}
+
+/* Create and return call_site_chain for CALLER_PC and CALLEE_PC. All the
+ assumed frames between them use GDBARCH. If valid call_site_chain cannot be
+ constructed return NULL. Caller is responsible for xfree of the returned
+ result. */
+
+struct call_site_chain *
+call_site_find_chain (struct gdbarch *gdbarch, CORE_ADDR caller_pc,
+ CORE_ADDR callee_pc)
+{
+ struct call_site_chain *retval = NULL;
+
+ TRY
+ {
+ retval = call_site_find_chain_1 (gdbarch, caller_pc, callee_pc);
+ }
+ CATCH (e, RETURN_MASK_ERROR)
+ {
+ if (e.error == NO_ENTRY_VALUE_ERROR)
+ {
+ if (entry_values_debug)
+ exception_print (gdb_stdout, e);
+
+ return NULL;
+ }
+ else
+ throw_exception (e);
+ }
+ END_CATCH
+
+ return retval;
+}
+
+/* Return 1 if KIND and KIND_U match PARAMETER. Return 0 otherwise. */
+
+static int
+call_site_parameter_matches (struct call_site_parameter *parameter,
+ enum call_site_parameter_kind kind,
+ union call_site_parameter_u kind_u)
+{
+ if (kind == parameter->kind)
+ switch (kind)
+ {
+ case CALL_SITE_PARAMETER_DWARF_REG:
+ return kind_u.dwarf_reg == parameter->u.dwarf_reg;
+ case CALL_SITE_PARAMETER_FB_OFFSET:
+ return kind_u.fb_offset == parameter->u.fb_offset;
+ case CALL_SITE_PARAMETER_PARAM_OFFSET:
+ return kind_u.param_offset.cu_off == parameter->u.param_offset.cu_off;
+ }
+ return 0;
+}
+
+/* Fetch call_site_parameter from caller matching KIND and KIND_U.
+ FRAME is for callee.
+
+ Function always returns non-NULL, it throws NO_ENTRY_VALUE_ERROR
+ otherwise. */
+
+static struct call_site_parameter *
+dwarf_expr_reg_to_entry_parameter (struct frame_info *frame,
+ enum call_site_parameter_kind kind,
+ union call_site_parameter_u kind_u,
+ struct dwarf2_per_cu_data **per_cu_return)
+{
+ CORE_ADDR func_addr, caller_pc;
+ struct gdbarch *gdbarch;
+ struct frame_info *caller_frame;
+ struct call_site *call_site;
+ int iparams;
+ /* Initialize it just to avoid a GCC false warning. */
+ struct call_site_parameter *parameter = NULL;
+ CORE_ADDR target_addr;
+
+ while (get_frame_type (frame) == INLINE_FRAME)
+ {
+ frame = get_prev_frame (frame);
+ gdb_assert (frame != NULL);
+ }
+
+ func_addr = get_frame_func (frame);
+ gdbarch = get_frame_arch (frame);
+ caller_frame = get_prev_frame (frame);
+ if (gdbarch != frame_unwind_arch (frame))
+ {
+ struct bound_minimal_symbol msym
+ = lookup_minimal_symbol_by_pc (func_addr);
+ struct gdbarch *caller_gdbarch = frame_unwind_arch (frame);
+
+ throw_error (NO_ENTRY_VALUE_ERROR,
+ _("DW_OP_GNU_entry_value resolving callee gdbarch %s "
+ "(of %s (%s)) does not match caller gdbarch %s"),
+ gdbarch_bfd_arch_info (gdbarch)->printable_name,
+ paddress (gdbarch, func_addr),
+ (msym.minsym == NULL ? "???"
+ : MSYMBOL_PRINT_NAME (msym.minsym)),
+ gdbarch_bfd_arch_info (caller_gdbarch)->printable_name);
+ }
+
+ if (caller_frame == NULL)
+ {
+ struct bound_minimal_symbol msym
+ = lookup_minimal_symbol_by_pc (func_addr);
+
+ throw_error (NO_ENTRY_VALUE_ERROR, _("DW_OP_GNU_entry_value resolving "
+ "requires caller of %s (%s)"),
+ paddress (gdbarch, func_addr),
+ (msym.minsym == NULL ? "???"
+ : MSYMBOL_PRINT_NAME (msym.minsym)));
+ }
+ caller_pc = get_frame_pc (caller_frame);
+ call_site = call_site_for_pc (gdbarch, caller_pc);
+
+ target_addr = call_site_to_target_addr (gdbarch, call_site, caller_frame);
+ if (target_addr != func_addr)
+ {
+ struct minimal_symbol *target_msym, *func_msym;
+
+ target_msym = lookup_minimal_symbol_by_pc (target_addr).minsym;
+ func_msym = lookup_minimal_symbol_by_pc (func_addr).minsym;
+ throw_error (NO_ENTRY_VALUE_ERROR,
+ _("DW_OP_GNU_entry_value resolving expects callee %s at %s "
+ "but the called frame is for %s at %s"),
+ (target_msym == NULL ? "???"
+ : MSYMBOL_PRINT_NAME (target_msym)),
+ paddress (gdbarch, target_addr),
+ func_msym == NULL ? "???" : MSYMBOL_PRINT_NAME (func_msym),
+ paddress (gdbarch, func_addr));
+ }
+
+ /* No entry value based parameters would be reliable if this function can
+ call itself via tail calls. */
+ func_verify_no_selftailcall (gdbarch, func_addr);
+
+ for (iparams = 0; iparams < call_site->parameter_count; iparams++)
+ {
+ parameter = &call_site->parameter[iparams];
+ if (call_site_parameter_matches (parameter, kind, kind_u))
+ break;
+ }
+ if (iparams == call_site->parameter_count)
+ {
+ struct minimal_symbol *msym
+ = lookup_minimal_symbol_by_pc (caller_pc).minsym;
+
+ /* DW_TAG_GNU_call_site_parameter will be missing just if GCC could not
+ determine its value. */
+ throw_error (NO_ENTRY_VALUE_ERROR, _("Cannot find matching parameter "
+ "at DW_TAG_GNU_call_site %s at %s"),
+ paddress (gdbarch, caller_pc),
+ msym == NULL ? "???" : MSYMBOL_PRINT_NAME (msym));
+ }
+
+ *per_cu_return = call_site->per_cu;
+ return parameter;
+}
+
+/* Return value for PARAMETER matching DEREF_SIZE. If DEREF_SIZE is -1, return
+ the normal DW_AT_GNU_call_site_value block. Otherwise return the
+ DW_AT_GNU_call_site_data_value (dereferenced) block.
+
+ TYPE and CALLER_FRAME specify how to evaluate the DWARF block into returned
+ struct value.
+
+ Function always returns non-NULL, non-optimized out value. It throws
+ NO_ENTRY_VALUE_ERROR if it cannot resolve the value for any reason. */
+
+static struct value *
+dwarf_entry_parameter_to_value (struct call_site_parameter *parameter,
+ CORE_ADDR deref_size, struct type *type,
+ struct frame_info *caller_frame,
+ struct dwarf2_per_cu_data *per_cu)
+{
+ const gdb_byte *data_src;
+ gdb_byte *data;
+ size_t size;
+
+ data_src = deref_size == -1 ? parameter->value : parameter->data_value;
+ size = deref_size == -1 ? parameter->value_size : parameter->data_value_size;
+
+ /* DEREF_SIZE size is not verified here. */
+ if (data_src == NULL)
+ throw_error (NO_ENTRY_VALUE_ERROR,
+ _("Cannot resolve DW_AT_GNU_call_site_data_value"));
+
+ /* DW_AT_GNU_call_site_value is a DWARF expression, not a DWARF
+ location. Postprocessing of DWARF_VALUE_MEMORY would lose the type from
+ DWARF block. */
+ data = alloca (size + 1);
+ memcpy (data, data_src, size);
+ data[size] = DW_OP_stack_value;
+
+ return dwarf2_evaluate_loc_desc (type, caller_frame, data, size + 1, per_cu);
+}
+
+/* Execute DWARF block of call_site_parameter which matches KIND and KIND_U.
+ Choose DEREF_SIZE value of that parameter. Search caller of the CTX's
+ frame. CTX must be of dwarf_expr_ctx_funcs kind.
+
+ The CTX caller can be from a different CU - per_cu_dwarf_call implementation
+ can be more simple as it does not support cross-CU DWARF executions. */
+
+static void
+dwarf_expr_push_dwarf_reg_entry_value (struct dwarf_expr_context *ctx,
+ enum call_site_parameter_kind kind,
+ union call_site_parameter_u kind_u,
+ int deref_size)
+{
+ struct dwarf_expr_baton *debaton;
+ struct frame_info *frame, *caller_frame;
+ struct dwarf2_per_cu_data *caller_per_cu;
+ struct dwarf_expr_baton baton_local;
+ struct dwarf_expr_context saved_ctx;
+ struct call_site_parameter *parameter;
+ const gdb_byte *data_src;
+ size_t size;
+
+ gdb_assert (ctx->funcs == &dwarf_expr_ctx_funcs);
+ debaton = ctx->baton;
+ frame = debaton->frame;
+ caller_frame = get_prev_frame (frame);
+
+ parameter = dwarf_expr_reg_to_entry_parameter (frame, kind, kind_u,
+ &caller_per_cu);
+ data_src = deref_size == -1 ? parameter->value : parameter->data_value;
+ size = deref_size == -1 ? parameter->value_size : parameter->data_value_size;
+
+ /* DEREF_SIZE size is not verified here. */
+ if (data_src == NULL)
+ throw_error (NO_ENTRY_VALUE_ERROR,
+ _("Cannot resolve DW_AT_GNU_call_site_data_value"));
+
+ baton_local.frame = caller_frame;
+ baton_local.per_cu = caller_per_cu;
+ baton_local.obj_address = 0;
+
+ saved_ctx.gdbarch = ctx->gdbarch;
+ saved_ctx.addr_size = ctx->addr_size;
+ saved_ctx.offset = ctx->offset;
+ saved_ctx.baton = ctx->baton;
+ ctx->gdbarch = get_objfile_arch (dwarf2_per_cu_objfile (baton_local.per_cu));
+ ctx->addr_size = dwarf2_per_cu_addr_size (baton_local.per_cu);
+ ctx->offset = dwarf2_per_cu_text_offset (baton_local.per_cu);
+ ctx->baton = &baton_local;
+
+ dwarf_expr_eval (ctx, data_src, size);
+
+ ctx->gdbarch = saved_ctx.gdbarch;
+ ctx->addr_size = saved_ctx.addr_size;
+ ctx->offset = saved_ctx.offset;
+ ctx->baton = saved_ctx.baton;
+}
+
+/* Callback function for dwarf2_evaluate_loc_desc.
+ Fetch the address indexed by DW_OP_GNU_addr_index. */
+
+static CORE_ADDR
+dwarf_expr_get_addr_index (void *baton, unsigned int index)
+{
+ struct dwarf_expr_baton *debaton = (struct dwarf_expr_baton *) baton;
+
+ return dwarf2_read_addr_index (debaton->per_cu, index);
+}
+
+/* Callback function for get_object_address. Return the address of the VLA
+ object. */
+
+static CORE_ADDR
+dwarf_expr_get_obj_addr (void *baton)
+{
+ struct dwarf_expr_baton *debaton = baton;
+
+ gdb_assert (debaton != NULL);
+
+ if (debaton->obj_address == 0)
+ error (_("Location address is not set."));
+
+ return debaton->obj_address;
+}
+
+/* VALUE must be of type lval_computed with entry_data_value_funcs. Perform
+ the indirect method on it, that is use its stored target value, the sole
+ purpose of entry_data_value_funcs.. */
+
+static struct value *
+entry_data_value_coerce_ref (const struct value *value)
+{
+ struct type *checked_type = check_typedef (value_type (value));
+ struct value *target_val;
+
+ if (TYPE_CODE (checked_type) != TYPE_CODE_REF)
+ return NULL;
+
+ target_val = value_computed_closure (value);
+ value_incref (target_val);
+ return target_val;
+}
+
+/* Implement copy_closure. */
+
+static void *
+entry_data_value_copy_closure (const struct value *v)
+{
+ struct value *target_val = value_computed_closure (v);
+
+ value_incref (target_val);
+ return target_val;
+}
+
+/* Implement free_closure. */
+
+static void
+entry_data_value_free_closure (struct value *v)
+{
+ struct value *target_val = value_computed_closure (v);
+
+ value_free (target_val);
+}
+
+/* Vector for methods for an entry value reference where the referenced value
+ is stored in the caller. On the first dereference use
+ DW_AT_GNU_call_site_data_value in the caller. */
+
+static const struct lval_funcs entry_data_value_funcs =
+{
+ NULL, /* read */
+ NULL, /* write */
+ NULL, /* indirect */
+ entry_data_value_coerce_ref,
+ NULL, /* check_synthetic_pointer */
+ entry_data_value_copy_closure,
+ entry_data_value_free_closure
+};
+
+/* Read parameter of TYPE at (callee) FRAME's function entry. KIND and KIND_U
+ are used to match DW_AT_location at the caller's
+ DW_TAG_GNU_call_site_parameter.
+
+ Function always returns non-NULL value. It throws NO_ENTRY_VALUE_ERROR if it
+ cannot resolve the parameter for any reason. */
+
+static struct value *
+value_of_dwarf_reg_entry (struct type *type, struct frame_info *frame,
+ enum call_site_parameter_kind kind,
+ union call_site_parameter_u kind_u)
+{
+ struct type *checked_type = check_typedef (type);
+ struct type *target_type = TYPE_TARGET_TYPE (checked_type);
+ struct frame_info *caller_frame = get_prev_frame (frame);
+ struct value *outer_val, *target_val, *val;
+ struct call_site_parameter *parameter;
+ struct dwarf2_per_cu_data *caller_per_cu;
+
+ parameter = dwarf_expr_reg_to_entry_parameter (frame, kind, kind_u,
+ &caller_per_cu);
+
+ outer_val = dwarf_entry_parameter_to_value (parameter, -1 /* deref_size */,
+ type, caller_frame,
+ caller_per_cu);
+
+ /* Check if DW_AT_GNU_call_site_data_value cannot be used. If it should be
+ used and it is not available do not fall back to OUTER_VAL - dereferencing
+ TYPE_CODE_REF with non-entry data value would give current value - not the
+ entry value. */
+
+ if (TYPE_CODE (checked_type) != TYPE_CODE_REF
+ || TYPE_TARGET_TYPE (checked_type) == NULL)
+ return outer_val;
+
+ target_val = dwarf_entry_parameter_to_value (parameter,
+ TYPE_LENGTH (target_type),
+ target_type, caller_frame,
+ caller_per_cu);
+
+ release_value (target_val);
+ val = allocate_computed_value (type, &entry_data_value_funcs,
+ target_val /* closure */);
+
+ /* Copy the referencing pointer to the new computed value. */
+ memcpy (value_contents_raw (val), value_contents_raw (outer_val),
+ TYPE_LENGTH (checked_type));
+ set_value_lazy (val, 0);
+
+ return val;
+}
+
+/* Read parameter of TYPE at (callee) FRAME's function entry. DATA and
+ SIZE are DWARF block used to match DW_AT_location at the caller's
+ DW_TAG_GNU_call_site_parameter.
+
+ Function always returns non-NULL value. It throws NO_ENTRY_VALUE_ERROR if it
+ cannot resolve the parameter for any reason. */
+
+static struct value *
+value_of_dwarf_block_entry (struct type *type, struct frame_info *frame,
+ const gdb_byte *block, size_t block_len)
+{
+ union call_site_parameter_u kind_u;
+
+ kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (block, block + block_len);
+ if (kind_u.dwarf_reg != -1)
+ return value_of_dwarf_reg_entry (type, frame, CALL_SITE_PARAMETER_DWARF_REG,
+ kind_u);
+
+ if (dwarf_block_to_fb_offset (block, block + block_len, &kind_u.fb_offset))
+ return value_of_dwarf_reg_entry (type, frame, CALL_SITE_PARAMETER_FB_OFFSET,
+ kind_u);
+
+ /* This can normally happen - throw NO_ENTRY_VALUE_ERROR to get the message
+ suppressed during normal operation. The expression can be arbitrary if
+ there is no caller-callee entry value binding expected. */
+ throw_error (NO_ENTRY_VALUE_ERROR,
+ _("DWARF-2 expression error: DW_OP_GNU_entry_value is supported "
+ "only for single DW_OP_reg* or for DW_OP_fbreg(*)"));
+}
+