+ offsets[op_ptr - base] = expr->len;
+ ++op_ptr;
+
+ /* Our basic approach to code generation is to map DWARF
+ operations directly to AX operations. However, there are
+ some differences.
+
+ First, DWARF works on address-sized units, but AX always uses
+ LONGEST. For most operations we simply ignore this
+ difference; instead we generate sign extensions as needed
+ before division and comparison operations. It would be nice
+ to omit the sign extensions, but there is no way to determine
+ the size of the target's LONGEST. (This code uses the size
+ of the host LONGEST in some cases -- that is a bug but it is
+ difficult to fix.)
+
+ Second, some DWARF operations cannot be translated to AX.
+ For these we simply fail. See
+ http://sourceware.org/bugzilla/show_bug.cgi?id=11662. */
+ switch (op)
+ {
+ case DW_OP_lit0:
+ case DW_OP_lit1:
+ case DW_OP_lit2:
+ case DW_OP_lit3:
+ case DW_OP_lit4:
+ case DW_OP_lit5:
+ case DW_OP_lit6:
+ case DW_OP_lit7:
+ case DW_OP_lit8:
+ case DW_OP_lit9:
+ case DW_OP_lit10:
+ case DW_OP_lit11:
+ case DW_OP_lit12:
+ case DW_OP_lit13:
+ case DW_OP_lit14:
+ case DW_OP_lit15:
+ case DW_OP_lit16:
+ case DW_OP_lit17:
+ case DW_OP_lit18:
+ case DW_OP_lit19:
+ case DW_OP_lit20:
+ case DW_OP_lit21:
+ case DW_OP_lit22:
+ case DW_OP_lit23:
+ case DW_OP_lit24:
+ case DW_OP_lit25:
+ case DW_OP_lit26:
+ case DW_OP_lit27:
+ case DW_OP_lit28:
+ case DW_OP_lit29:
+ case DW_OP_lit30:
+ case DW_OP_lit31:
+ ax_const_l (expr, op - DW_OP_lit0);
+ break;
+
+ case DW_OP_addr:
+ uoffset = extract_unsigned_integer (op_ptr, addr_size, byte_order);
+ op_ptr += addr_size;
+ /* Some versions of GCC emit DW_OP_addr before
+ DW_OP_GNU_push_tls_address. In this case the value is an
+ index, not an address. We don't support things like
+ branching between the address and the TLS op. */
+ if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
+ uoffset += dwarf2_per_cu_text_offset (per_cu);
+ ax_const_l (expr, uoffset);
+ break;
+
+ case DW_OP_const1u:
+ ax_const_l (expr, extract_unsigned_integer (op_ptr, 1, byte_order));
+ op_ptr += 1;
+ break;
+ case DW_OP_const1s:
+ ax_const_l (expr, extract_signed_integer (op_ptr, 1, byte_order));
+ op_ptr += 1;
+ break;
+ case DW_OP_const2u:
+ ax_const_l (expr, extract_unsigned_integer (op_ptr, 2, byte_order));
+ op_ptr += 2;
+ break;
+ case DW_OP_const2s:
+ ax_const_l (expr, extract_signed_integer (op_ptr, 2, byte_order));
+ op_ptr += 2;
+ break;
+ case DW_OP_const4u:
+ ax_const_l (expr, extract_unsigned_integer (op_ptr, 4, byte_order));
+ op_ptr += 4;
+ break;
+ case DW_OP_const4s:
+ ax_const_l (expr, extract_signed_integer (op_ptr, 4, byte_order));
+ op_ptr += 4;
+ break;
+ case DW_OP_const8u:
+ ax_const_l (expr, extract_unsigned_integer (op_ptr, 8, byte_order));
+ op_ptr += 8;
+ break;
+ case DW_OP_const8s:
+ ax_const_l (expr, extract_signed_integer (op_ptr, 8, byte_order));
+ op_ptr += 8;
+ break;
+ case DW_OP_constu:
+ op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
+ ax_const_l (expr, uoffset);
+ break;
+ case DW_OP_consts:
+ op_ptr = read_sleb128 (op_ptr, op_end, &offset);
+ ax_const_l (expr, offset);
+ break;
+
+ case DW_OP_reg0:
+ case DW_OP_reg1:
+ case DW_OP_reg2:
+ case DW_OP_reg3:
+ case DW_OP_reg4:
+ case DW_OP_reg5:
+ case DW_OP_reg6:
+ case DW_OP_reg7:
+ case DW_OP_reg8:
+ case DW_OP_reg9:
+ case DW_OP_reg10:
+ case DW_OP_reg11:
+ case DW_OP_reg12:
+ case DW_OP_reg13:
+ case DW_OP_reg14:
+ case DW_OP_reg15:
+ case DW_OP_reg16:
+ case DW_OP_reg17:
+ case DW_OP_reg18:
+ case DW_OP_reg19:
+ case DW_OP_reg20:
+ case DW_OP_reg21:
+ case DW_OP_reg22:
+ case DW_OP_reg23:
+ case DW_OP_reg24:
+ case DW_OP_reg25:
+ case DW_OP_reg26:
+ case DW_OP_reg27:
+ case DW_OP_reg28:
+ case DW_OP_reg29:
+ case DW_OP_reg30:
+ case DW_OP_reg31:
+ dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
+ loc->u.reg = translate_register (arch, op - DW_OP_reg0);
+ loc->kind = axs_lvalue_register;
+ break;
+
+ case DW_OP_regx:
+ op_ptr = read_uleb128 (op_ptr, op_end, ®);
+ dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
+ loc->u.reg = translate_register (arch, reg);
+ loc->kind = axs_lvalue_register;
+ break;
+
+ case DW_OP_implicit_value:
+ {
+ ULONGEST len;
+
+ op_ptr = read_uleb128 (op_ptr, op_end, &len);
+ if (op_ptr + len > op_end)
+ error (_("DW_OP_implicit_value: too few bytes available."));
+ if (len > sizeof (ULONGEST))
+ error (_("Cannot translate DW_OP_implicit_value of %d bytes"),
+ (int) len);
+
+ ax_const_l (expr, extract_unsigned_integer (op_ptr, len,
+ byte_order));
+ op_ptr += len;
+ dwarf_expr_require_composition (op_ptr, op_end,
+ "DW_OP_implicit_value");
+
+ loc->kind = axs_rvalue;
+ }
+ break;
+
+ case DW_OP_stack_value:
+ dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
+ loc->kind = axs_rvalue;
+ break;
+
+ case DW_OP_breg0:
+ case DW_OP_breg1:
+ case DW_OP_breg2:
+ case DW_OP_breg3:
+ case DW_OP_breg4:
+ case DW_OP_breg5:
+ case DW_OP_breg6:
+ case DW_OP_breg7:
+ case DW_OP_breg8:
+ case DW_OP_breg9:
+ case DW_OP_breg10:
+ case DW_OP_breg11:
+ case DW_OP_breg12:
+ case DW_OP_breg13:
+ case DW_OP_breg14:
+ case DW_OP_breg15:
+ case DW_OP_breg16:
+ case DW_OP_breg17:
+ case DW_OP_breg18:
+ case DW_OP_breg19:
+ case DW_OP_breg20:
+ case DW_OP_breg21:
+ case DW_OP_breg22:
+ case DW_OP_breg23:
+ case DW_OP_breg24:
+ case DW_OP_breg25:
+ case DW_OP_breg26:
+ case DW_OP_breg27:
+ case DW_OP_breg28:
+ case DW_OP_breg29:
+ case DW_OP_breg30:
+ case DW_OP_breg31:
+ op_ptr = read_sleb128 (op_ptr, op_end, &offset);
+ i = translate_register (arch, op - DW_OP_breg0);
+ ax_reg (expr, i);
+ if (offset != 0)
+ {
+ ax_const_l (expr, offset);
+ ax_simple (expr, aop_add);
+ }
+ break;
+ case DW_OP_bregx:
+ {
+ op_ptr = read_uleb128 (op_ptr, op_end, ®);
+ op_ptr = read_sleb128 (op_ptr, op_end, &offset);
+ i = translate_register (arch, reg);
+ ax_reg (expr, i);
+ if (offset != 0)
+ {
+ ax_const_l (expr, offset);
+ ax_simple (expr, aop_add);
+ }
+ }
+ break;
+ case DW_OP_fbreg:
+ {
+ const gdb_byte *datastart;
+ size_t datalen;
+ unsigned int before_stack_len;
+ struct block *b;
+ struct symbol *framefunc;
+ LONGEST base_offset = 0;
+
+ b = block_for_pc (expr->scope);
+
+ if (!b)
+ error (_("No block found for address"));
+
+ framefunc = block_linkage_function (b);
+
+ if (!framefunc)
+ error (_("No function found for block"));
+
+ dwarf_expr_frame_base_1 (framefunc, expr->scope,
+ &datastart, &datalen);
+
+ op_ptr = read_sleb128 (op_ptr, op_end, &offset);
+ compile_dwarf_to_ax (expr, loc, arch, addr_size, datastart,
+ datastart + datalen, per_cu);
+
+ if (offset != 0)
+ {
+ ax_const_l (expr, offset);
+ ax_simple (expr, aop_add);
+ }
+
+ loc->kind = axs_lvalue_memory;
+ }
+ break;
+
+ case DW_OP_dup:
+ ax_simple (expr, aop_dup);
+ break;
+
+ case DW_OP_drop:
+ ax_simple (expr, aop_pop);
+ break;
+
+ case DW_OP_pick:
+ offset = *op_ptr++;
+ unimplemented (op);
+ break;
+
+ case DW_OP_swap:
+ ax_simple (expr, aop_swap);
+ break;
+
+ case DW_OP_over:
+ /* We can't directly support DW_OP_over, but GCC emits it as
+ part of a sequence to implement signed modulus. As a
+ hack, we recognize this sequence. Note that if GCC ever
+ generates a branch to the middle of this sequence, then
+ we will die somehow. */
+ if (op_end - op_ptr >= 4
+ && op_ptr[0] == DW_OP_over
+ && op_ptr[1] == DW_OP_div
+ && op_ptr[2] == DW_OP_mul
+ && op_ptr[3] == DW_OP_minus)
+ {
+ /* Sign extend the operands. */
+ ax_ext (expr, addr_size_bits);
+ ax_simple (expr, aop_swap);
+ ax_ext (expr, addr_size_bits);
+ ax_simple (expr, aop_swap);
+ ax_simple (expr, aop_rem_signed);
+ op_ptr += 4;
+ }
+ else
+ unimplemented (op);
+ break;
+
+ case DW_OP_rot:
+ unimplemented (op);
+ break;
+
+ case DW_OP_deref:
+ case DW_OP_deref_size:
+ {
+ int size;
+
+ if (op == DW_OP_deref_size)
+ size = *op_ptr++;
+ else
+ size = addr_size;
+
+ switch (size)
+ {
+ case 8:
+ ax_simple (expr, aop_ref8);
+ break;
+ case 16:
+ ax_simple (expr, aop_ref16);
+ break;
+ case 32:
+ ax_simple (expr, aop_ref32);
+ break;
+ case 64:
+ ax_simple (expr, aop_ref64);
+ break;
+ default:
+ error (_("Unsupported size %d in %s"),
+ size, dwarf_stack_op_name (op, 1));
+ }
+ }
+ break;
+
+ case DW_OP_abs:
+ /* Sign extend the operand. */
+ ax_ext (expr, addr_size_bits);
+ ax_simple (expr, aop_dup);
+ ax_const_l (expr, 0);
+ ax_simple (expr, aop_less_signed);
+ ax_simple (expr, aop_log_not);
+ i = ax_goto (expr, aop_if_goto);
+ /* We have to emit 0 - X. */
+ ax_const_l (expr, 0);
+ ax_simple (expr, aop_swap);
+ ax_simple (expr, aop_sub);
+ ax_label (expr, i, expr->len);
+ break;
+
+ case DW_OP_neg:
+ /* No need to sign extend here. */
+ ax_const_l (expr, 0);
+ ax_simple (expr, aop_swap);
+ ax_simple (expr, aop_sub);
+ break;
+
+ case DW_OP_not:
+ /* Sign extend the operand. */
+ ax_ext (expr, addr_size_bits);
+ ax_simple (expr, aop_bit_not);
+ break;
+
+ case DW_OP_plus_uconst:
+ op_ptr = read_uleb128 (op_ptr, op_end, ®);
+ /* It would be really weird to emit `DW_OP_plus_uconst 0',
+ but we micro-optimize anyhow. */
+ if (reg != 0)
+ {
+ ax_const_l (expr, reg);
+ ax_simple (expr, aop_add);
+ }
+ break;
+
+ case DW_OP_and:
+ ax_simple (expr, aop_bit_and);
+ break;
+
+ case DW_OP_div:
+ /* Sign extend the operands. */
+ ax_ext (expr, addr_size_bits);
+ ax_simple (expr, aop_swap);
+ ax_ext (expr, addr_size_bits);
+ ax_simple (expr, aop_swap);
+ ax_simple (expr, aop_div_signed);
+ break;
+
+ case DW_OP_minus:
+ ax_simple (expr, aop_sub);
+ break;
+
+ case DW_OP_mod:
+ ax_simple (expr, aop_rem_unsigned);
+ break;
+
+ case DW_OP_mul:
+ ax_simple (expr, aop_mul);
+ break;
+
+ case DW_OP_or:
+ ax_simple (expr, aop_bit_or);
+ break;
+
+ case DW_OP_plus:
+ ax_simple (expr, aop_add);
+ break;
+
+ case DW_OP_shl:
+ ax_simple (expr, aop_lsh);
+ break;
+
+ case DW_OP_shr:
+ ax_simple (expr, aop_rsh_unsigned);
+ break;
+
+ case DW_OP_shra:
+ ax_simple (expr, aop_rsh_signed);
+ break;
+
+ case DW_OP_xor:
+ ax_simple (expr, aop_bit_xor);
+ break;
+
+ case DW_OP_le:
+ /* Sign extend the operands. */
+ ax_ext (expr, addr_size_bits);
+ ax_simple (expr, aop_swap);
+ ax_ext (expr, addr_size_bits);
+ /* Note no swap here: A <= B is !(B < A). */
+ ax_simple (expr, aop_less_signed);
+ ax_simple (expr, aop_log_not);
+ break;
+
+ case DW_OP_ge:
+ /* Sign extend the operands. */
+ ax_ext (expr, addr_size_bits);
+ ax_simple (expr, aop_swap);
+ ax_ext (expr, addr_size_bits);
+ ax_simple (expr, aop_swap);
+ /* A >= B is !(A < B). */
+ ax_simple (expr, aop_less_signed);
+ ax_simple (expr, aop_log_not);
+ break;
+
+ case DW_OP_eq:
+ /* Sign extend the operands. */
+ ax_ext (expr, addr_size_bits);
+ ax_simple (expr, aop_swap);
+ ax_ext (expr, addr_size_bits);
+ /* No need for a second swap here. */
+ ax_simple (expr, aop_equal);
+ break;
+
+ case DW_OP_lt:
+ /* Sign extend the operands. */
+ ax_ext (expr, addr_size_bits);
+ ax_simple (expr, aop_swap);
+ ax_ext (expr, addr_size_bits);
+ ax_simple (expr, aop_swap);
+ ax_simple (expr, aop_less_signed);
+ break;
+
+ case DW_OP_gt:
+ /* Sign extend the operands. */
+ ax_ext (expr, addr_size_bits);
+ ax_simple (expr, aop_swap);
+ ax_ext (expr, addr_size_bits);
+ /* Note no swap here: A > B is B < A. */
+ ax_simple (expr, aop_less_signed);
+ break;
+
+ case DW_OP_ne:
+ /* Sign extend the operands. */
+ ax_ext (expr, addr_size_bits);
+ ax_simple (expr, aop_swap);
+ ax_ext (expr, addr_size_bits);
+ /* No need for a swap here. */
+ ax_simple (expr, aop_equal);
+ ax_simple (expr, aop_log_not);
+ break;
+
+ case DW_OP_call_frame_cfa:
+ unimplemented (op);
+ break;
+
+ case DW_OP_GNU_push_tls_address:
+ unimplemented (op);
+ break;
+
+ case DW_OP_skip:
+ offset = extract_signed_integer (op_ptr, 2, byte_order);
+ op_ptr += 2;
+ i = ax_goto (expr, aop_goto);
+ VEC_safe_push (int, dw_labels, op_ptr + offset - base);
+ VEC_safe_push (int, patches, i);
+ break;
+
+ case DW_OP_bra:
+ offset = extract_signed_integer (op_ptr, 2, byte_order);
+ op_ptr += 2;
+ /* Zero extend the operand. */
+ ax_zero_ext (expr, addr_size_bits);
+ i = ax_goto (expr, aop_if_goto);
+ VEC_safe_push (int, dw_labels, op_ptr + offset - base);
+ VEC_safe_push (int, patches, i);
+ break;
+
+ case DW_OP_nop:
+ break;
+
+ case DW_OP_piece:
+ case DW_OP_bit_piece:
+ {
+ ULONGEST size, offset;
+
+ if (op_ptr - 1 == previous_piece)
+ error (_("Cannot translate empty pieces to agent expressions"));
+ previous_piece = op_ptr - 1;
+
+ op_ptr = read_uleb128 (op_ptr, op_end, &size);
+ if (op == DW_OP_piece)
+ {
+ size *= 8;
+ offset = 0;
+ }
+ else
+ op_ptr = read_uleb128 (op_ptr, op_end, &offset);
+
+ if (bits_collected + size > 8 * sizeof (LONGEST))
+ error (_("Expression pieces exceed word size"));
+
+ /* Access the bits. */
+ switch (loc->kind)
+ {
+ case axs_lvalue_register:
+ ax_reg (expr, loc->u.reg);
+ break;
+
+ case axs_lvalue_memory:
+ /* Offset the pointer, if needed. */
+ if (offset > 8)
+ {
+ ax_const_l (expr, offset / 8);
+ ax_simple (expr, aop_add);
+ offset %= 8;
+ }
+ access_memory (arch, expr, size);
+ break;
+ }
+
+ /* For a bits-big-endian target, shift up what we already
+ have. For a bits-little-endian target, shift up the
+ new data. Note that there is a potential bug here if
+ the DWARF expression leaves multiple values on the
+ stack. */
+ if (bits_collected > 0)
+ {
+ if (bits_big_endian)
+ {
+ ax_simple (expr, aop_swap);
+ ax_const_l (expr, size);
+ ax_simple (expr, aop_lsh);
+ /* We don't need a second swap here, because
+ aop_bit_or is symmetric. */
+ }
+ else
+ {
+ ax_const_l (expr, size);
+ ax_simple (expr, aop_lsh);
+ }
+ ax_simple (expr, aop_bit_or);
+ }
+
+ bits_collected += size;
+ loc->kind = axs_rvalue;
+ }
+ break;
+
+ case DW_OP_GNU_uninit:
+ unimplemented (op);
+
+ case DW_OP_call2:
+ case DW_OP_call4:
+ {
+ struct dwarf2_locexpr_baton block;
+ int size = (op == DW_OP_call2 ? 2 : 4);
+
+ uoffset = extract_unsigned_integer (op_ptr, size, byte_order);
+ op_ptr += size;
+
+ block = dwarf2_fetch_die_location_block (uoffset, per_cu);
+
+ /* DW_OP_call_ref is currently not supported. */
+ gdb_assert (block.per_cu == per_cu);
+
+ compile_dwarf_to_ax (expr, loc, arch, addr_size,
+ block.data, block.data + block.size,
+ per_cu);
+ }
+ break;
+
+ case DW_OP_call_ref:
+ unimplemented (op);
+
+ default:
+ error (_("Unhandled dwarf expression opcode 0x%x"), op);
+ }
+ }