aboutsummaryrefslogtreecommitdiffstats
path: root/tcg/tci
diff options
context:
space:
mode:
Diffstat (limited to 'tcg/tci')
-rw-r--r--tcg/tci/README120
-rw-r--r--tcg/tci/tcg-target-con-set.h21
-rw-r--r--tcg/tci/tcg-target-con-str.h11
-rw-r--r--tcg/tci/tcg-target.c.inc862
-rw-r--r--tcg/tci/tcg-target.h180
5 files changed, 1194 insertions, 0 deletions
diff --git a/tcg/tci/README b/tcg/tci/README
new file mode 100644
index 000000000..f72a40a39
--- /dev/null
+++ b/tcg/tci/README
@@ -0,0 +1,120 @@
+TCG Interpreter (TCI) - Copyright (c) 2011 Stefan Weil.
+
+This file is released under the BSD license.
+
+1) Introduction
+
+TCG (Tiny Code Generator) is a code generator which translates
+code fragments ("basic blocks") from target code (any of the
+targets supported by QEMU) to a code representation which
+can be run on a host.
+
+QEMU can create native code for some hosts (arm, i386, ia64, ppc, ppc64,
+s390, sparc, x86_64). For others, unofficial host support was written.
+
+By adding a code generator for a virtual machine and using an
+interpreter for the generated bytecode, it is possible to
+support (almost) any host.
+
+This is what TCI (Tiny Code Interpreter) does.
+
+2) Implementation
+
+Like each TCG host frontend, TCI implements the code generator in
+tcg-target.c.inc, tcg-target.h. Both files are in directory tcg/tci.
+
+The additional file tcg/tci.c adds the interpreter and disassembler.
+
+The bytecode consists of opcodes (with only a few exceptions, with
+the same same numeric values and semantics as used by TCG), and up
+to six arguments packed into a 32-bit integer. See comments in tci.c
+for details on the encoding.
+
+3) Usage
+
+For hosts without native TCG, the interpreter TCI must be enabled by
+
+ configure --enable-tcg-interpreter
+
+If configure is called without --enable-tcg-interpreter, it will
+suggest using this option. Setting it automatically would need
+additional code in configure which must be fixed when new native TCG
+implementations are added.
+
+For hosts with native TCG, the interpreter TCI can be enabled by
+
+ configure --enable-tcg-interpreter
+
+The only difference from running QEMU with TCI to running without TCI
+should be speed. Especially during development of TCI, it was very
+useful to compare runs with and without TCI. Create /tmp/qemu.log by
+
+ qemu-system-i386 -d in_asm,op_opt,cpu -D /tmp/qemu.log -singlestep
+
+once with interpreter and once without interpreter and compare the resulting
+qemu.log files. This is also useful to see the effects of additional
+registers or additional opcodes (it is easy to modify the virtual machine).
+It can also be used to verify native TCGs.
+
+Hosts with native TCG can also enable TCI by claiming to be unsupported:
+
+ configure --cpu=unknown --enable-tcg-interpreter
+
+configure then no longer uses the native linker script (*.ld) for
+user mode emulation.
+
+
+4) Status
+
+TCI needs special implementation for 32 and 64 bit host, 32 and 64 bit target,
+host and target with same or different endianness.
+
+ | host (le) host (be)
+ | 32 64 32 64
+------------+------------------------------------------------------------
+target (le) | s0, u0 s1, u1 s?, u? s?, u?
+32 bit |
+ |
+target (le) | sc, uc s1, u1 s?, u? s?, u?
+64 bit |
+ |
+target (be) | sc, u0 sc, uc s?, u? s?, u?
+32 bit |
+ |
+target (be) | sc, uc sc, uc s?, u? s?, u?
+64 bit |
+ |
+
+System emulation
+s? = untested
+sc = compiles
+s0 = bios works
+s1 = grub works
+s2 = Linux boots
+
+Linux user mode emulation
+u? = untested
+uc = compiles
+u0 = static hello works
+u1 = linux-user-test works
+
+5) Todo list
+
+* TCI is not widely tested. It was written and tested on a x86_64 host
+ running i386 and x86_64 system emulation and Linux user mode.
+ A cross compiled QEMU for i386 host also works with the same basic tests.
+ A cross compiled QEMU for mipsel host works, too. It is terribly slow
+ because I run it in a mips malta emulation, so it is an interpreted
+ emulation in an emulation.
+ A cross compiled QEMU for arm host works (tested with pc bios).
+ A cross compiled QEMU for ppc host works at least partially:
+ i386-linux-user/qemu-i386 can run a simple hello-world program
+ (tested in a ppc emulation).
+
+* Some TCG opcodes are either missing in the code generator and/or
+ in the interpreter. These opcodes raise a runtime exception, so it is
+ possible to see where code must be added.
+
+* It might be useful to have a runtime option which selects the native TCG
+ or TCI, so QEMU would have to include two TCGs. Today, selecting TCI
+ is a configure option, so you need two compilations of QEMU.
diff --git a/tcg/tci/tcg-target-con-set.h b/tcg/tci/tcg-target-con-set.h
new file mode 100644
index 000000000..ae2dc3b84
--- /dev/null
+++ b/tcg/tci/tcg-target-con-set.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * TCI target-specific constraint sets.
+ * Copyright (c) 2021 Linaro
+ */
+
+/*
+ * C_On_Im(...) defines a constraint set with <n> outputs and <m> inputs.
+ * Each operand should be a sequence of constraint letters as defined by
+ * tcg-target-con-str.h; the constraint combination is inclusive or.
+ */
+C_O0_I1(r)
+C_O0_I2(r, r)
+C_O0_I3(r, r, r)
+C_O0_I4(r, r, r, r)
+C_O1_I1(r, r)
+C_O1_I2(r, r, r)
+C_O1_I4(r, r, r, r, r)
+C_O2_I1(r, r, r)
+C_O2_I2(r, r, r, r)
+C_O2_I4(r, r, r, r, r, r)
diff --git a/tcg/tci/tcg-target-con-str.h b/tcg/tci/tcg-target-con-str.h
new file mode 100644
index 000000000..87c0f19e9
--- /dev/null
+++ b/tcg/tci/tcg-target-con-str.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define TCI target-specific operand constraints.
+ * Copyright (c) 2021 Linaro
+ */
+
+/*
+ * Define constraint letters for register sets:
+ * REGS(letter, register_mask)
+ */
+REGS('r', MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS))
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
new file mode 100644
index 000000000..0cb16aaa8
--- /dev/null
+++ b/tcg/tci/tcg-target.c.inc
@@ -0,0 +1,862 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2009, 2011 Stefan Weil
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "../tcg-pool.c.inc"
+
+static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
+{
+ switch (op) {
+ case INDEX_op_goto_ptr:
+ return C_O0_I1(r);
+
+ case INDEX_op_ld8u_i32:
+ case INDEX_op_ld8s_i32:
+ case INDEX_op_ld16u_i32:
+ case INDEX_op_ld16s_i32:
+ case INDEX_op_ld_i32:
+ case INDEX_op_ld8u_i64:
+ case INDEX_op_ld8s_i64:
+ case INDEX_op_ld16u_i64:
+ case INDEX_op_ld16s_i64:
+ case INDEX_op_ld32u_i64:
+ case INDEX_op_ld32s_i64:
+ case INDEX_op_ld_i64:
+ case INDEX_op_not_i32:
+ case INDEX_op_not_i64:
+ case INDEX_op_neg_i32:
+ case INDEX_op_neg_i64:
+ case INDEX_op_ext8s_i32:
+ case INDEX_op_ext8s_i64:
+ case INDEX_op_ext16s_i32:
+ case INDEX_op_ext16s_i64:
+ case INDEX_op_ext8u_i32:
+ case INDEX_op_ext8u_i64:
+ case INDEX_op_ext16u_i32:
+ case INDEX_op_ext16u_i64:
+ case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
+ case INDEX_op_ext_i32_i64:
+ case INDEX_op_extu_i32_i64:
+ case INDEX_op_bswap16_i32:
+ case INDEX_op_bswap16_i64:
+ case INDEX_op_bswap32_i32:
+ case INDEX_op_bswap32_i64:
+ case INDEX_op_bswap64_i64:
+ case INDEX_op_extract_i32:
+ case INDEX_op_extract_i64:
+ case INDEX_op_sextract_i32:
+ case INDEX_op_sextract_i64:
+ case INDEX_op_ctpop_i32:
+ case INDEX_op_ctpop_i64:
+ return C_O1_I1(r, r);
+
+ case INDEX_op_st8_i32:
+ case INDEX_op_st16_i32:
+ case INDEX_op_st_i32:
+ case INDEX_op_st8_i64:
+ case INDEX_op_st16_i64:
+ case INDEX_op_st32_i64:
+ case INDEX_op_st_i64:
+ return C_O0_I2(r, r);
+
+ case INDEX_op_div_i32:
+ case INDEX_op_div_i64:
+ case INDEX_op_divu_i32:
+ case INDEX_op_divu_i64:
+ case INDEX_op_rem_i32:
+ case INDEX_op_rem_i64:
+ case INDEX_op_remu_i32:
+ case INDEX_op_remu_i64:
+ case INDEX_op_add_i32:
+ case INDEX_op_add_i64:
+ case INDEX_op_sub_i32:
+ case INDEX_op_sub_i64:
+ case INDEX_op_mul_i32:
+ case INDEX_op_mul_i64:
+ case INDEX_op_and_i32:
+ case INDEX_op_and_i64:
+ case INDEX_op_andc_i32:
+ case INDEX_op_andc_i64:
+ case INDEX_op_eqv_i32:
+ case INDEX_op_eqv_i64:
+ case INDEX_op_nand_i32:
+ case INDEX_op_nand_i64:
+ case INDEX_op_nor_i32:
+ case INDEX_op_nor_i64:
+ case INDEX_op_or_i32:
+ case INDEX_op_or_i64:
+ case INDEX_op_orc_i32:
+ case INDEX_op_orc_i64:
+ case INDEX_op_xor_i32:
+ case INDEX_op_xor_i64:
+ case INDEX_op_shl_i32:
+ case INDEX_op_shl_i64:
+ case INDEX_op_shr_i32:
+ case INDEX_op_shr_i64:
+ case INDEX_op_sar_i32:
+ case INDEX_op_sar_i64:
+ case INDEX_op_rotl_i32:
+ case INDEX_op_rotl_i64:
+ case INDEX_op_rotr_i32:
+ case INDEX_op_rotr_i64:
+ case INDEX_op_setcond_i32:
+ case INDEX_op_setcond_i64:
+ case INDEX_op_deposit_i32:
+ case INDEX_op_deposit_i64:
+ case INDEX_op_clz_i32:
+ case INDEX_op_clz_i64:
+ case INDEX_op_ctz_i32:
+ case INDEX_op_ctz_i64:
+ return C_O1_I2(r, r, r);
+
+ case INDEX_op_brcond_i32:
+ case INDEX_op_brcond_i64:
+ return C_O0_I2(r, r);
+
+ case INDEX_op_add2_i32:
+ case INDEX_op_add2_i64:
+ case INDEX_op_sub2_i32:
+ case INDEX_op_sub2_i64:
+ return C_O2_I4(r, r, r, r, r, r);
+
+#if TCG_TARGET_REG_BITS == 32
+ case INDEX_op_brcond2_i32:
+ return C_O0_I4(r, r, r, r);
+#endif
+
+ case INDEX_op_mulu2_i32:
+ case INDEX_op_mulu2_i64:
+ case INDEX_op_muls2_i32:
+ case INDEX_op_muls2_i64:
+ return C_O2_I2(r, r, r, r);
+
+ case INDEX_op_movcond_i32:
+ case INDEX_op_movcond_i64:
+ case INDEX_op_setcond2_i32:
+ return C_O1_I4(r, r, r, r, r);
+
+ case INDEX_op_qemu_ld_i32:
+ return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
+ ? C_O1_I1(r, r)
+ : C_O1_I2(r, r, r));
+ case INDEX_op_qemu_ld_i64:
+ return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r)
+ : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, r)
+ : C_O2_I2(r, r, r, r));
+ case INDEX_op_qemu_st_i32:
+ return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
+ ? C_O0_I2(r, r)
+ : C_O0_I3(r, r, r));
+ case INDEX_op_qemu_st_i64:
+ return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r)
+ : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(r, r, r)
+ : C_O0_I4(r, r, r, r));
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static const int tcg_target_reg_alloc_order[] = {
+ TCG_REG_R2,
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_R1,
+ TCG_REG_R0,
+};
+
+#if MAX_OPC_PARAM_IARGS != 6
+# error Fix needed, number of supported input arguments changed!
+#endif
+
+/* No call arguments via registers. All will be stored on the "stack". */
+static const int tcg_target_call_iarg_regs[] = { };
+
+static const int tcg_target_call_oarg_regs[] = {
+ TCG_REG_R0,
+#if TCG_TARGET_REG_BITS == 32
+ TCG_REG_R1
+#endif
+};
+
+#ifdef CONFIG_DEBUG_TCG
+static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
+ "r00",
+ "r01",
+ "r02",
+ "r03",
+ "r04",
+ "r05",
+ "r06",
+ "r07",
+ "r08",
+ "r09",
+ "r10",
+ "r11",
+ "r12",
+ "r13",
+ "r14",
+ "r15",
+};
+#endif
+
+static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
+ intptr_t value, intptr_t addend)
+{
+ intptr_t diff = value - (intptr_t)(code_ptr + 1);
+
+ tcg_debug_assert(addend == 0);
+ tcg_debug_assert(type == 20);
+
+ if (diff == sextract32(diff, 0, type)) {
+ tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff));
+ return true;
+ }
+ return false;
+}
+
+static void stack_bounds_check(TCGReg base, target_long offset)
+{
+ if (base == TCG_REG_CALL_STACK) {
+ tcg_debug_assert(offset >= 0);
+ tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE +
+ TCG_STATIC_FRAME_SIZE));
+ }
+}
+
+static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0)
+{
+ tcg_insn_unit insn = 0;
+
+ tcg_out_reloc(s, s->code_ptr, 20, l0, 0);
+ insn = deposit32(insn, 0, 8, op);
+ tcg_out32(s, insn);
+}
+
+static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0)
+{
+ tcg_insn_unit insn = 0;
+ intptr_t diff;
+
+ /* Special case for exit_tb: map null -> 0. */
+ if (p0 == NULL) {
+ diff = 0;
+ } else {
+ diff = p0 - (void *)(s->code_ptr + 1);
+ tcg_debug_assert(diff != 0);
+ if (diff != sextract32(diff, 0, 20)) {
+ tcg_raise_tb_overflow(s);
+ }
+ }
+ insn = deposit32(insn, 0, 8, op);
+ insn = deposit32(insn, 12, 20, diff);
+ tcg_out32(s, insn);
+}
+
+static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0)
+{
+ tcg_insn_unit insn = 0;
+
+ insn = deposit32(insn, 0, 8, op);
+ insn = deposit32(insn, 8, 4, r0);
+ tcg_out32(s, insn);
+}
+
+static void tcg_out_op_v(TCGContext *s, TCGOpcode op)
+{
+ tcg_out32(s, (uint8_t)op);
+}
+
+static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1)
+{
+ tcg_insn_unit insn = 0;
+
+ tcg_debug_assert(i1 == sextract32(i1, 0, 20));
+ insn = deposit32(insn, 0, 8, op);
+ insn = deposit32(insn, 8, 4, r0);
+ insn = deposit32(insn, 12, 20, i1);
+ tcg_out32(s, insn);
+}
+
+static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1)
+{
+ tcg_insn_unit insn = 0;
+
+ tcg_out_reloc(s, s->code_ptr, 20, l1, 0);
+ insn = deposit32(insn, 0, 8, op);
+ insn = deposit32(insn, 8, 4, r0);
+ tcg_out32(s, insn);
+}
+
+static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1)
+{
+ tcg_insn_unit insn = 0;
+
+ insn = deposit32(insn, 0, 8, op);
+ insn = deposit32(insn, 8, 4, r0);
+ insn = deposit32(insn, 12, 4, r1);
+ tcg_out32(s, insn);
+}
+
+static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op,
+ TCGReg r0, TCGReg r1, TCGArg m2)
+{
+ tcg_insn_unit insn = 0;
+
+ tcg_debug_assert(m2 == extract32(m2, 0, 12));
+ insn = deposit32(insn, 0, 8, op);
+ insn = deposit32(insn, 8, 4, r0);
+ insn = deposit32(insn, 12, 4, r1);
+ insn = deposit32(insn, 20, 12, m2);
+ tcg_out32(s, insn);
+}
+
+static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op,
+ TCGReg r0, TCGReg r1, TCGReg r2)
+{
+ tcg_insn_unit insn = 0;
+
+ insn = deposit32(insn, 0, 8, op);
+ insn = deposit32(insn, 8, 4, r0);
+ insn = deposit32(insn, 12, 4, r1);
+ insn = deposit32(insn, 16, 4, r2);
+ tcg_out32(s, insn);
+}
+
+static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op,
+ TCGReg r0, TCGReg r1, intptr_t i2)
+{
+ tcg_insn_unit insn = 0;
+
+ tcg_debug_assert(i2 == sextract32(i2, 0, 16));
+ insn = deposit32(insn, 0, 8, op);
+ insn = deposit32(insn, 8, 4, r0);
+ insn = deposit32(insn, 12, 4, r1);
+ insn = deposit32(insn, 16, 16, i2);
+ tcg_out32(s, insn);
+}
+
+static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
+ TCGReg r1, uint8_t b2, uint8_t b3)
+{
+ tcg_insn_unit insn = 0;
+
+ tcg_debug_assert(b2 == extract32(b2, 0, 6));
+ tcg_debug_assert(b3 == extract32(b3, 0, 6));
+ insn = deposit32(insn, 0, 8, op);
+ insn = deposit32(insn, 8, 4, r0);
+ insn = deposit32(insn, 12, 4, r1);
+ insn = deposit32(insn, 16, 6, b2);
+ insn = deposit32(insn, 22, 6, b3);
+ tcg_out32(s, insn);
+}
+
+static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op,
+ TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3)
+{
+ tcg_insn_unit insn = 0;
+
+ insn = deposit32(insn, 0, 8, op);
+ insn = deposit32(insn, 8, 4, r0);
+ insn = deposit32(insn, 12, 4, r1);
+ insn = deposit32(insn, 16, 4, r2);
+ insn = deposit32(insn, 20, 4, c3);
+ tcg_out32(s, insn);
+}
+
+static void tcg_out_op_rrrm(TCGContext *s, TCGOpcode op,
+ TCGReg r0, TCGReg r1, TCGReg r2, TCGArg m3)
+{
+ tcg_insn_unit insn = 0;
+
+ tcg_debug_assert(m3 == extract32(m3, 0, 12));
+ insn = deposit32(insn, 0, 8, op);
+ insn = deposit32(insn, 8, 4, r0);
+ insn = deposit32(insn, 12, 4, r1);
+ insn = deposit32(insn, 16, 4, r2);
+ insn = deposit32(insn, 20, 12, m3);
+ tcg_out32(s, insn);
+}
+
+static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
+ TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4)
+{
+ tcg_insn_unit insn = 0;
+
+ tcg_debug_assert(b3 == extract32(b3, 0, 6));
+ tcg_debug_assert(b4 == extract32(b4, 0, 6));
+ insn = deposit32(insn, 0, 8, op);
+ insn = deposit32(insn, 8, 4, r0);
+ insn = deposit32(insn, 12, 4, r1);
+ insn = deposit32(insn, 16, 4, r2);
+ insn = deposit32(insn, 20, 6, b3);
+ insn = deposit32(insn, 26, 6, b4);
+ tcg_out32(s, insn);
+}
+
+static void tcg_out_op_rrrrr(TCGContext *s, TCGOpcode op, TCGReg r0,
+ TCGReg r1, TCGReg r2, TCGReg r3, TCGReg r4)
+{
+ tcg_insn_unit insn = 0;
+
+ insn = deposit32(insn, 0, 8, op);
+ insn = deposit32(insn, 8, 4, r0);
+ insn = deposit32(insn, 12, 4, r1);
+ insn = deposit32(insn, 16, 4, r2);
+ insn = deposit32(insn, 20, 4, r3);
+ insn = deposit32(insn, 24, 4, r4);
+ tcg_out32(s, insn);
+}
+
+static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
+ TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
+{
+ tcg_insn_unit insn = 0;
+
+ insn = deposit32(insn, 0, 8, op);
+ insn = deposit32(insn, 8, 4, r0);
+ insn = deposit32(insn, 12, 4, r1);
+ insn = deposit32(insn, 16, 4, r2);
+ insn = deposit32(insn, 20, 4, r3);
+ tcg_out32(s, insn);
+}
+
+static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op,
+ TCGReg r0, TCGReg r1, TCGReg r2,
+ TCGReg r3, TCGReg r4, TCGCond c5)
+{
+ tcg_insn_unit insn = 0;
+
+ insn = deposit32(insn, 0, 8, op);
+ insn = deposit32(insn, 8, 4, r0);
+ insn = deposit32(insn, 12, 4, r1);
+ insn = deposit32(insn, 16, 4, r2);
+ insn = deposit32(insn, 20, 4, r3);
+ insn = deposit32(insn, 24, 4, r4);
+ insn = deposit32(insn, 28, 4, c5);
+ tcg_out32(s, insn);
+}
+
+static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op,
+ TCGReg r0, TCGReg r1, TCGReg r2,
+ TCGReg r3, TCGReg r4, TCGReg r5)
+{
+ tcg_insn_unit insn = 0;
+
+ insn = deposit32(insn, 0, 8, op);
+ insn = deposit32(insn, 8, 4, r0);
+ insn = deposit32(insn, 12, 4, r1);
+ insn = deposit32(insn, 16, 4, r2);
+ insn = deposit32(insn, 20, 4, r3);
+ insn = deposit32(insn, 24, 4, r4);
+ insn = deposit32(insn, 28, 4, r5);
+ tcg_out32(s, insn);
+}
+
+static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
+ TCGReg base, intptr_t offset)
+{
+ stack_bounds_check(base, offset);
+ if (offset != sextract32(offset, 0, 16)) {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
+ tcg_out_op_rrr(s, (TCG_TARGET_REG_BITS == 32
+ ? INDEX_op_add_i32 : INDEX_op_add_i64),
+ TCG_REG_TMP, TCG_REG_TMP, base);
+ base = TCG_REG_TMP;
+ offset = 0;
+ }
+ tcg_out_op_rrs(s, op, val, base, offset);
+}
+
+static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
+ intptr_t offset)
+{
+ switch (type) {
+ case TCG_TYPE_I32:
+ tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset);
+ break;
+#if TCG_TARGET_REG_BITS == 64
+ case TCG_TYPE_I64:
+ tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset);
+ break;
+#endif
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
+{
+ switch (type) {
+ case TCG_TYPE_I32:
+ tcg_out_op_rr(s, INDEX_op_mov_i32, ret, arg);
+ break;
+#if TCG_TARGET_REG_BITS == 64
+ case TCG_TYPE_I64:
+ tcg_out_op_rr(s, INDEX_op_mov_i64, ret, arg);
+ break;
+#endif
+ default:
+ g_assert_not_reached();
+ }
+ return true;
+}
+
+static void tcg_out_movi(TCGContext *s, TCGType type,
+ TCGReg ret, tcg_target_long arg)
+{
+ switch (type) {
+ case TCG_TYPE_I32:
+#if TCG_TARGET_REG_BITS == 64
+ arg = (int32_t)arg;
+ /* fall through */
+ case TCG_TYPE_I64:
+#endif
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (arg == sextract32(arg, 0, 20)) {
+ tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg);
+ } else {
+ tcg_insn_unit insn = 0;
+
+ new_pool_label(s, arg, 20, s->code_ptr, 0);
+ insn = deposit32(insn, 0, 8, INDEX_op_tci_movl);
+ insn = deposit32(insn, 8, 4, ret);
+ tcg_out32(s, insn);
+ }
+}
+
+static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
+ ffi_cif *cif)
+{
+ tcg_insn_unit insn = 0;
+ uint8_t which;
+
+ if (cif->rtype == &ffi_type_void) {
+ which = 0;
+ } else if (cif->rtype->size == 4) {
+ which = 1;
+ } else {
+ tcg_debug_assert(cif->rtype->size == 8);
+ which = 2;
+ }
+ new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif);
+ insn = deposit32(insn, 0, 8, INDEX_op_call);
+ insn = deposit32(insn, 8, 4, which);
+ tcg_out32(s, insn);
+}
+
+#if TCG_TARGET_REG_BITS == 64
+# define CASE_32_64(x) \
+ case glue(glue(INDEX_op_, x), _i64): \
+ case glue(glue(INDEX_op_, x), _i32):
+# define CASE_64(x) \
+ case glue(glue(INDEX_op_, x), _i64):
+#else
+# define CASE_32_64(x) \
+ case glue(glue(INDEX_op_, x), _i32):
+# define CASE_64(x)
+#endif
+
+static void tcg_out_op(TCGContext *s, TCGOpcode opc,
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
+{
+ TCGOpcode exts;
+
+ switch (opc) {
+ case INDEX_op_exit_tb:
+ tcg_out_op_p(s, opc, (void *)args[0]);
+ break;
+
+ case INDEX_op_goto_tb:
+ tcg_debug_assert(s->tb_jmp_insn_offset == 0);
+ /* indirect jump method. */
+ tcg_out_op_p(s, opc, s->tb_jmp_target_addr + args[0]);
+ set_jmp_reset_offset(s, args[0]);
+ break;
+
+ case INDEX_op_goto_ptr:
+ tcg_out_op_r(s, opc, args[0]);
+ break;
+
+ case INDEX_op_br:
+ tcg_out_op_l(s, opc, arg_label(args[0]));
+ break;
+
+ CASE_32_64(setcond)
+ tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]);
+ break;
+
+ CASE_32_64(movcond)
+ case INDEX_op_setcond2_i32:
+ tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2],
+ args[3], args[4], args[5]);
+ break;
+
+ CASE_32_64(ld8u)
+ CASE_32_64(ld8s)
+ CASE_32_64(ld16u)
+ CASE_32_64(ld16s)
+ case INDEX_op_ld_i32:
+ CASE_64(ld32u)
+ CASE_64(ld32s)
+ CASE_64(ld)
+ CASE_32_64(st8)
+ CASE_32_64(st16)
+ case INDEX_op_st_i32:
+ CASE_64(st32)
+ CASE_64(st)
+ tcg_out_ldst(s, opc, args[0], args[1], args[2]);
+ break;
+
+ CASE_32_64(add)
+ CASE_32_64(sub)
+ CASE_32_64(mul)
+ CASE_32_64(and)
+ CASE_32_64(or)
+ CASE_32_64(xor)
+ CASE_32_64(andc) /* Optional (TCG_TARGET_HAS_andc_*). */
+ CASE_32_64(orc) /* Optional (TCG_TARGET_HAS_orc_*). */
+ CASE_32_64(eqv) /* Optional (TCG_TARGET_HAS_eqv_*). */
+ CASE_32_64(nand) /* Optional (TCG_TARGET_HAS_nand_*). */
+ CASE_32_64(nor) /* Optional (TCG_TARGET_HAS_nor_*). */
+ CASE_32_64(shl)
+ CASE_32_64(shr)
+ CASE_32_64(sar)
+ CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */
+ CASE_32_64(rotr) /* Optional (TCG_TARGET_HAS_rot_*). */
+ CASE_32_64(div) /* Optional (TCG_TARGET_HAS_div_*). */
+ CASE_32_64(divu) /* Optional (TCG_TARGET_HAS_div_*). */
+ CASE_32_64(rem) /* Optional (TCG_TARGET_HAS_div_*). */
+ CASE_32_64(remu) /* Optional (TCG_TARGET_HAS_div_*). */
+ CASE_32_64(clz) /* Optional (TCG_TARGET_HAS_clz_*). */
+ CASE_32_64(ctz) /* Optional (TCG_TARGET_HAS_ctz_*). */
+ tcg_out_op_rrr(s, opc, args[0], args[1], args[2]);
+ break;
+
+ CASE_32_64(deposit) /* Optional (TCG_TARGET_HAS_deposit_*). */
+ {
+ TCGArg pos = args[3], len = args[4];
+ TCGArg max = opc == INDEX_op_deposit_i32 ? 32 : 64;
+
+ tcg_debug_assert(pos < max);
+ tcg_debug_assert(pos + len <= max);
+
+ tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], pos, len);
+ }
+ break;
+
+ CASE_32_64(extract) /* Optional (TCG_TARGET_HAS_extract_*). */
+ CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
+ {
+ TCGArg pos = args[2], len = args[3];
+ TCGArg max = tcg_op_defs[opc].flags & TCG_OPF_64BIT ? 64 : 32;
+
+ tcg_debug_assert(pos < max);
+ tcg_debug_assert(pos + len <= max);
+
+ tcg_out_op_rrbb(s, opc, args[0], args[1], pos, len);
+ }
+ break;
+
+ CASE_32_64(brcond)
+ tcg_out_op_rrrc(s, (opc == INDEX_op_brcond_i32
+ ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64),
+ TCG_REG_TMP, args[0], args[1], args[2]);
+ tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3]));
+ break;
+
+ CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */
+ CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */
+ CASE_32_64(ext8s) /* Optional (TCG_TARGET_HAS_ext8s_*). */
+ CASE_32_64(ext8u) /* Optional (TCG_TARGET_HAS_ext8u_*). */
+ CASE_32_64(ext16s) /* Optional (TCG_TARGET_HAS_ext16s_*). */
+ CASE_32_64(ext16u) /* Optional (TCG_TARGET_HAS_ext16u_*). */
+ CASE_64(ext32s) /* Optional (TCG_TARGET_HAS_ext32s_i64). */
+ CASE_64(ext32u) /* Optional (TCG_TARGET_HAS_ext32u_i64). */
+ CASE_64(ext_i32)
+ CASE_64(extu_i32)
+ CASE_32_64(ctpop) /* Optional (TCG_TARGET_HAS_ctpop_*). */
+ case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
+ case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
+ tcg_out_op_rr(s, opc, args[0], args[1]);
+ break;
+
+ case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
+ exts = INDEX_op_ext16s_i32;
+ goto do_bswap;
+ case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
+ exts = INDEX_op_ext16s_i64;
+ goto do_bswap;
+ case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
+ exts = INDEX_op_ext32s_i64;
+ do_bswap:
+ /* The base tci bswaps zero-extend, and ignore high bits. */
+ tcg_out_op_rr(s, opc, args[0], args[1]);
+ if (args[2] & TCG_BSWAP_OS) {
+ tcg_out_op_rr(s, exts, args[0], args[0]);
+ }
+ break;
+
+ CASE_32_64(add2)
+ CASE_32_64(sub2)
+ tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
+ args[3], args[4], args[5]);
+ break;
+
+#if TCG_TARGET_REG_BITS == 32
+ case INDEX_op_brcond2_i32:
+ tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
+ args[0], args[1], args[2], args[3], args[4]);
+ tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5]));
+ break;
+#endif
+
+ CASE_32_64(mulu2)
+ CASE_32_64(muls2)
+ tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]);
+ break;
+
+ case INDEX_op_qemu_ld_i32:
+ case INDEX_op_qemu_st_i32:
+ if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
+ tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
+ } else {
+ tcg_out_op_rrrm(s, opc, args[0], args[1], args[2], args[3]);
+ }
+ break;
+
+ case INDEX_op_qemu_ld_i64:
+ case INDEX_op_qemu_st_i64:
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
+ } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
+ tcg_out_op_rrrm(s, opc, args[0], args[1], args[2], args[3]);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[4]);
+ tcg_out_op_rrrrr(s, opc, args[0], args[1],
+ args[2], args[3], TCG_REG_TMP);
+ }
+ break;
+
+ case INDEX_op_mb:
+ tcg_out_op_v(s, opc);
+ break;
+
+ case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
+ case INDEX_op_mov_i64:
+ case INDEX_op_call: /* Always emitted via tcg_out_call. */
+ default:
+ tcg_abort();
+ }
+}
+
+static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
+ intptr_t offset)
+{
+ stack_bounds_check(base, offset);
+ switch (type) {
+ case TCG_TYPE_I32:
+ tcg_out_op_rrs(s, INDEX_op_st_i32, val, base, offset);
+ break;
+#if TCG_TARGET_REG_BITS == 64
+ case TCG_TYPE_I64:
+ tcg_out_op_rrs(s, INDEX_op_st_i64, val, base, offset);
+ break;
+#endif
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
+ TCGReg base, intptr_t ofs)
+{
+ return false;
+}
+
+/* Test if a constant matches the constraint. */
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
+{
+ return ct & TCG_CT_CONST;
+}
+
+static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
+{
+ memset(p, 0, sizeof(*p) * count);
+}
+
+static void tcg_target_init(TCGContext *s)
+{
+#if defined(CONFIG_DEBUG_TCG_INTERPRETER)
+ const char *envval = getenv("DEBUG_TCG");
+ if (envval) {
+ qemu_set_log(strtol(envval, NULL, 0));
+ }
+#endif
+
+ /* The current code uses uint8_t for tcg operations. */
+ tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX);
+
+ /* Registers available for 32 bit operations. */
+ tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1;
+ /* Registers available for 64 bit operations. */
+ tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1;
+ /*
+ * The interpreter "registers" are in the local stack frame and
+ * cannot be clobbered by the called helper functions. However,
+ * the interpreter assumes a 64-bit return value and assigns to
+ * the return value registers.
+ */
+ tcg_target_call_clobber_regs =
+ MAKE_64BIT_MASK(TCG_REG_R0, 64 / TCG_TARGET_REG_BITS);
+
+ s->reserved_regs = 0;
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
+
+ /* The call arguments come first, followed by the temp storage. */
+ tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
+ TCG_STATIC_FRAME_SIZE);
+}
+
+/* Generate global QEMU prologue and epilogue code. */
+static inline void tcg_target_qemu_prologue(TCGContext *s)
+{
+}
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
new file mode 100644
index 000000000..033e613f2
--- /dev/null
+++ b/tcg/tci/tcg-target.h
@@ -0,0 +1,180 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2009, 2011 Stefan Weil
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/*
+ * This code implements a TCG which does not generate machine code for some
+ * real target machine but which generates virtual machine code for an
+ * interpreter. Interpreted pseudo code is slow, but it works on any host.
+ *
+ * Some remarks might help in understanding the code:
+ *
+ * "target" or "TCG target" is the machine which runs the generated code.
+ * This is different to the usual meaning in QEMU where "target" is the
+ * emulated machine. So normally QEMU host is identical to TCG target.
+ * Here the TCG target is a virtual machine, but this virtual machine must
+ * use the same word size like the real machine.
+ * Therefore, we need both 32 and 64 bit virtual machines (interpreter).
+ */
+
+#ifndef TCG_TARGET_H
+#define TCG_TARGET_H
+
+#define TCG_TARGET_INTERPRETER 1
+#define TCG_TARGET_INSN_UNIT_SIZE 4
+#define TCG_TARGET_TLB_DISPLACEMENT_BITS 32
+#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
+
+#if UINTPTR_MAX == UINT32_MAX
+# define TCG_TARGET_REG_BITS 32
+#elif UINTPTR_MAX == UINT64_MAX
+# define TCG_TARGET_REG_BITS 64
+#else
+# error Unknown pointer size for tci target
+#endif
+
+#ifdef CONFIG_DEBUG_TCG
+/* Enable debug output. */
+#define CONFIG_DEBUG_TCG_INTERPRETER
+#endif
+
+/* Optional instructions. */
+
+#define TCG_TARGET_HAS_bswap16_i32 1
+#define TCG_TARGET_HAS_bswap32_i32 1
+#define TCG_TARGET_HAS_div_i32 1
+#define TCG_TARGET_HAS_rem_i32 1
+#define TCG_TARGET_HAS_ext8s_i32 1
+#define TCG_TARGET_HAS_ext16s_i32 1
+#define TCG_TARGET_HAS_ext8u_i32 1
+#define TCG_TARGET_HAS_ext16u_i32 1
+#define TCG_TARGET_HAS_andc_i32 1
+#define TCG_TARGET_HAS_deposit_i32 1
+#define TCG_TARGET_HAS_extract_i32 1
+#define TCG_TARGET_HAS_sextract_i32 1
+#define TCG_TARGET_HAS_extract2_i32 0
+#define TCG_TARGET_HAS_eqv_i32 1
+#define TCG_TARGET_HAS_nand_i32 1
+#define TCG_TARGET_HAS_nor_i32 1
+#define TCG_TARGET_HAS_clz_i32 1
+#define TCG_TARGET_HAS_ctz_i32 1
+#define TCG_TARGET_HAS_ctpop_i32 1
+#define TCG_TARGET_HAS_neg_i32 1
+#define TCG_TARGET_HAS_not_i32 1
+#define TCG_TARGET_HAS_orc_i32 1
+#define TCG_TARGET_HAS_rot_i32 1
+#define TCG_TARGET_HAS_movcond_i32 1
+#define TCG_TARGET_HAS_muls2_i32 1
+#define TCG_TARGET_HAS_muluh_i32 0
+#define TCG_TARGET_HAS_mulsh_i32 0
+#define TCG_TARGET_HAS_direct_jump 0
+#define TCG_TARGET_HAS_qemu_st8_i32 0
+
+#if TCG_TARGET_REG_BITS == 64
+#define TCG_TARGET_HAS_extrl_i64_i32 0
+#define TCG_TARGET_HAS_extrh_i64_i32 0
+#define TCG_TARGET_HAS_bswap16_i64 1
+#define TCG_TARGET_HAS_bswap32_i64 1
+#define TCG_TARGET_HAS_bswap64_i64 1
+#define TCG_TARGET_HAS_deposit_i64 1
+#define TCG_TARGET_HAS_extract_i64 1
+#define TCG_TARGET_HAS_sextract_i64 1
+#define TCG_TARGET_HAS_extract2_i64 0
+#define TCG_TARGET_HAS_div_i64 1
+#define TCG_TARGET_HAS_rem_i64 1
+#define TCG_TARGET_HAS_ext8s_i64 1
+#define TCG_TARGET_HAS_ext16s_i64 1
+#define TCG_TARGET_HAS_ext32s_i64 1
+#define TCG_TARGET_HAS_ext8u_i64 1
+#define TCG_TARGET_HAS_ext16u_i64 1
+#define TCG_TARGET_HAS_ext32u_i64 1
+#define TCG_TARGET_HAS_andc_i64 1
+#define TCG_TARGET_HAS_eqv_i64 1
+#define TCG_TARGET_HAS_nand_i64 1
+#define TCG_TARGET_HAS_nor_i64 1
+#define TCG_TARGET_HAS_clz_i64 1
+#define TCG_TARGET_HAS_ctz_i64 1
+#define TCG_TARGET_HAS_ctpop_i64 1
+#define TCG_TARGET_HAS_neg_i64 1
+#define TCG_TARGET_HAS_not_i64 1
+#define TCG_TARGET_HAS_orc_i64 1
+#define TCG_TARGET_HAS_rot_i64 1
+#define TCG_TARGET_HAS_movcond_i64 1
+#define TCG_TARGET_HAS_muls2_i64 1
+#define TCG_TARGET_HAS_add2_i32 1
+#define TCG_TARGET_HAS_sub2_i32 1
+#define TCG_TARGET_HAS_mulu2_i32 1
+#define TCG_TARGET_HAS_add2_i64 1
+#define TCG_TARGET_HAS_sub2_i64 1
+#define TCG_TARGET_HAS_mulu2_i64 1
+#define TCG_TARGET_HAS_muluh_i64 0
+#define TCG_TARGET_HAS_mulsh_i64 0
+#else
+#define TCG_TARGET_HAS_mulu2_i32 1
+#endif /* TCG_TARGET_REG_BITS == 64 */
+
+/* Number of registers available. */
+#define TCG_TARGET_NB_REGS 16
+
+/* List of registers which are used by TCG. */
+typedef enum {
+ TCG_REG_R0 = 0,
+ TCG_REG_R1,
+ TCG_REG_R2,
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R14,
+ TCG_REG_R15,
+
+ TCG_REG_TMP = TCG_REG_R13,
+ TCG_AREG0 = TCG_REG_R14,
+ TCG_REG_CALL_STACK = TCG_REG_R15,
+} TCGReg;
+
+/* Used for function call generation. */
+#define TCG_TARGET_CALL_STACK_OFFSET 0
+#define TCG_TARGET_STACK_ALIGN 8
+
+#define HAVE_TCG_QEMU_TB_EXEC
+#define TCG_TARGET_NEED_POOL_LABELS
+
+/* We could notice __i386__ or __s390x__ and reduce the barriers depending
+ on the host. But if you want performance, you use the normal backend.
+ We prefer consistency across hosts on this. */
+#define TCG_TARGET_DEFAULT_MO (0)
+
+#define TCG_TARGET_HAS_MEMORY_BSWAP 1
+
+/* not defined -- call should be eliminated at compile time */
+void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
+
+#endif /* TCG_TARGET_H */