aboutsummaryrefslogtreecommitdiffstats
path: root/tcg/tci.c
diff options
context:
space:
mode:
authorTimos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>2023-10-10 11:40:56 +0000
committerTimos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>2023-10-10 11:40:56 +0000
commite02cda008591317b1625707ff8e115a4841aa889 (patch)
treeaee302e3cf8b59ec2d32ec481be3d1afddfc8968 /tcg/tci.c
parentcc668e6b7e0ffd8c9d130513d12053cf5eda1d3b (diff)
Introduce Virtio-loopback epsilon release:
Epsilon release introduces a new compatibility layer which make virtio-loopback design to work with QEMU and rust-vmm vhost-user backend without require any changes. Signed-off-by: Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com> Change-Id: I52e57563e08a7d0bdc002f8e928ee61ba0c53dd9
Diffstat (limited to 'tcg/tci.c')
-rw-r--r--tcg/tci.c1401
1 files changed, 1401 insertions, 0 deletions
diff --git a/tcg/tci.c b/tcg/tci.c
new file mode 100644
index 000000000..e76087cca
--- /dev/null
+++ b/tcg/tci.c
@@ -0,0 +1,1401 @@
+/*
+ * Tiny Code Interpreter for QEMU
+ *
+ * Copyright (c) 2009, 2011, 2016 Stefan Weil
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */
+#include "exec/cpu_ldst.h"
+#include "tcg/tcg-op.h"
+#include "tcg/tcg-ldst.h"
+#include "qemu/compiler.h"
+#include <ffi.h>
+
+
+/*
+ * Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
+ * Without assertions, the interpreter runs much faster.
+ */
+#if defined(CONFIG_DEBUG_TCG)
+# define tci_assert(cond) assert(cond)
+#else
+# define tci_assert(cond) ((void)(cond))
+#endif
+
+__thread uintptr_t tci_tb_ptr;
+
+static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
+ uint32_t low_index, uint64_t value)
+{
+ regs[low_index] = (uint32_t)value;
+ regs[high_index] = value >> 32;
+}
+
+/* Create a 64 bit value from two 32 bit values. */
+static uint64_t tci_uint64(uint32_t high, uint32_t low)
+{
+ return ((uint64_t)high << 32) + low;
+}
+
+/*
+ * Load sets of arguments all at once. The naming convention is:
+ * tci_args_<arguments>
+ * where arguments is a sequence of
+ *
+ * b = immediate (bit position)
+ * c = condition (TCGCond)
+ * i = immediate (uint32_t)
+ * I = immediate (tcg_target_ulong)
+ * l = label or pointer
+ * m = immediate (MemOpIdx)
+ * n = immediate (call return length)
+ * r = register
+ * s = signed ldst offset
+ */
+
+static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0)
+{
+ int diff = sextract32(insn, 12, 20);
+ *l0 = diff ? (void *)tb_ptr + diff : NULL;
+}
+
+static void tci_args_r(uint32_t insn, TCGReg *r0)
+{
+ *r0 = extract32(insn, 8, 4);
+}
+
+static void tci_args_nl(uint32_t insn, const void *tb_ptr,
+ uint8_t *n0, void **l1)
+{
+ *n0 = extract32(insn, 8, 4);
+ *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
+}
+
+static void tci_args_rl(uint32_t insn, const void *tb_ptr,
+ TCGReg *r0, void **l1)
+{
+ *r0 = extract32(insn, 8, 4);
+ *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
+}
+
+static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1)
+{
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+}
+
+static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1)
+{
+ *r0 = extract32(insn, 8, 4);
+ *i1 = sextract32(insn, 12, 20);
+}
+
+static void tci_args_rrm(uint32_t insn, TCGReg *r0,
+ TCGReg *r1, MemOpIdx *m2)
+{
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *m2 = extract32(insn, 20, 12);
+}
+
+static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2)
+{
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *r2 = extract32(insn, 16, 4);
+}
+
+static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2)
+{
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *i2 = sextract32(insn, 16, 16);
+}
+
+static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
+ uint8_t *i2, uint8_t *i3)
+{
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *i2 = extract32(insn, 16, 6);
+ *i3 = extract32(insn, 22, 6);
+}
+
+static void tci_args_rrrc(uint32_t insn,
+ TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
+{
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *r2 = extract32(insn, 16, 4);
+ *c3 = extract32(insn, 20, 4);
+}
+
+static void tci_args_rrrm(uint32_t insn,
+ TCGReg *r0, TCGReg *r1, TCGReg *r2, MemOpIdx *m3)
+{
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *r2 = extract32(insn, 16, 4);
+ *m3 = extract32(insn, 20, 12);
+}
+
+static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
+ TCGReg *r2, uint8_t *i3, uint8_t *i4)
+{
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *r2 = extract32(insn, 16, 4);
+ *i3 = extract32(insn, 20, 6);
+ *i4 = extract32(insn, 26, 6);
+}
+
+static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
+ TCGReg *r2, TCGReg *r3, TCGReg *r4)
+{
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *r2 = extract32(insn, 16, 4);
+ *r3 = extract32(insn, 20, 4);
+ *r4 = extract32(insn, 24, 4);
+}
+
+static void tci_args_rrrr(uint32_t insn,
+ TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
+{
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *r2 = extract32(insn, 16, 4);
+ *r3 = extract32(insn, 20, 4);
+}
+
+static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1,
+ TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5)
+{
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *r2 = extract32(insn, 16, 4);
+ *r3 = extract32(insn, 20, 4);
+ *r4 = extract32(insn, 24, 4);
+ *c5 = extract32(insn, 28, 4);
+}
+
+static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
+ TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5)
+{
+ *r0 = extract32(insn, 8, 4);
+ *r1 = extract32(insn, 12, 4);
+ *r2 = extract32(insn, 16, 4);
+ *r3 = extract32(insn, 20, 4);
+ *r4 = extract32(insn, 24, 4);
+ *r5 = extract32(insn, 28, 4);
+}
+
+static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
+{
+ bool result = false;
+ int32_t i0 = u0;
+ int32_t i1 = u1;
+ switch (condition) {
+ case TCG_COND_EQ:
+ result = (u0 == u1);
+ break;
+ case TCG_COND_NE:
+ result = (u0 != u1);
+ break;
+ case TCG_COND_LT:
+ result = (i0 < i1);
+ break;
+ case TCG_COND_GE:
+ result = (i0 >= i1);
+ break;
+ case TCG_COND_LE:
+ result = (i0 <= i1);
+ break;
+ case TCG_COND_GT:
+ result = (i0 > i1);
+ break;
+ case TCG_COND_LTU:
+ result = (u0 < u1);
+ break;
+ case TCG_COND_GEU:
+ result = (u0 >= u1);
+ break;
+ case TCG_COND_LEU:
+ result = (u0 <= u1);
+ break;
+ case TCG_COND_GTU:
+ result = (u0 > u1);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return result;
+}
+
+static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
+{
+ bool result = false;
+ int64_t i0 = u0;
+ int64_t i1 = u1;
+ switch (condition) {
+ case TCG_COND_EQ:
+ result = (u0 == u1);
+ break;
+ case TCG_COND_NE:
+ result = (u0 != u1);
+ break;
+ case TCG_COND_LT:
+ result = (i0 < i1);
+ break;
+ case TCG_COND_GE:
+ result = (i0 >= i1);
+ break;
+ case TCG_COND_LE:
+ result = (i0 <= i1);
+ break;
+ case TCG_COND_GT:
+ result = (i0 > i1);
+ break;
+ case TCG_COND_LTU:
+ result = (u0 < u1);
+ break;
+ case TCG_COND_GEU:
+ result = (u0 >= u1);
+ break;
+ case TCG_COND_LEU:
+ result = (u0 <= u1);
+ break;
+ case TCG_COND_GTU:
+ result = (u0 > u1);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return result;
+}
+
+static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
+ MemOpIdx oi, const void *tb_ptr)
+{
+ MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE);
+ uintptr_t ra = (uintptr_t)tb_ptr;
+
+#ifdef CONFIG_SOFTMMU
+ switch (mop) {
+ case MO_UB:
+ return helper_ret_ldub_mmu(env, taddr, oi, ra);
+ case MO_SB:
+ return helper_ret_ldsb_mmu(env, taddr, oi, ra);
+ case MO_LEUW:
+ return helper_le_lduw_mmu(env, taddr, oi, ra);
+ case MO_LESW:
+ return helper_le_ldsw_mmu(env, taddr, oi, ra);
+ case MO_LEUL:
+ return helper_le_ldul_mmu(env, taddr, oi, ra);
+ case MO_LESL:
+ return helper_le_ldsl_mmu(env, taddr, oi, ra);
+ case MO_LEQ:
+ return helper_le_ldq_mmu(env, taddr, oi, ra);
+ case MO_BEUW:
+ return helper_be_lduw_mmu(env, taddr, oi, ra);
+ case MO_BESW:
+ return helper_be_ldsw_mmu(env, taddr, oi, ra);
+ case MO_BEUL:
+ return helper_be_ldul_mmu(env, taddr, oi, ra);
+ case MO_BESL:
+ return helper_be_ldsl_mmu(env, taddr, oi, ra);
+ case MO_BEQ:
+ return helper_be_ldq_mmu(env, taddr, oi, ra);
+ default:
+ g_assert_not_reached();
+ }
+#else
+ void *haddr = g2h(env_cpu(env), taddr);
+ uint64_t ret;
+
+ set_helper_retaddr(ra);
+ switch (mop) {
+ case MO_UB:
+ ret = ldub_p(haddr);
+ break;
+ case MO_SB:
+ ret = ldsb_p(haddr);
+ break;
+ case MO_LEUW:
+ ret = lduw_le_p(haddr);
+ break;
+ case MO_LESW:
+ ret = ldsw_le_p(haddr);
+ break;
+ case MO_LEUL:
+ ret = (uint32_t)ldl_le_p(haddr);
+ break;
+ case MO_LESL:
+ ret = (int32_t)ldl_le_p(haddr);
+ break;
+ case MO_LEQ:
+ ret = ldq_le_p(haddr);
+ break;
+ case MO_BEUW:
+ ret = lduw_be_p(haddr);
+ break;
+ case MO_BESW:
+ ret = ldsw_be_p(haddr);
+ break;
+ case MO_BEUL:
+ ret = (uint32_t)ldl_be_p(haddr);
+ break;
+ case MO_BESL:
+ ret = (int32_t)ldl_be_p(haddr);
+ break;
+ case MO_BEQ:
+ ret = ldq_be_p(haddr);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ clear_helper_retaddr();
+ return ret;
+#endif
+}
+
+static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
+ MemOpIdx oi, const void *tb_ptr)
+{
+ MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE);
+ uintptr_t ra = (uintptr_t)tb_ptr;
+
+#ifdef CONFIG_SOFTMMU
+ switch (mop) {
+ case MO_UB:
+ helper_ret_stb_mmu(env, taddr, val, oi, ra);
+ break;
+ case MO_LEUW:
+ helper_le_stw_mmu(env, taddr, val, oi, ra);
+ break;
+ case MO_LEUL:
+ helper_le_stl_mmu(env, taddr, val, oi, ra);
+ break;
+ case MO_LEQ:
+ helper_le_stq_mmu(env, taddr, val, oi, ra);
+ break;
+ case MO_BEUW:
+ helper_be_stw_mmu(env, taddr, val, oi, ra);
+ break;
+ case MO_BEUL:
+ helper_be_stl_mmu(env, taddr, val, oi, ra);
+ break;
+ case MO_BEQ:
+ helper_be_stq_mmu(env, taddr, val, oi, ra);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+#else
+ void *haddr = g2h(env_cpu(env), taddr);
+
+ set_helper_retaddr(ra);
+ switch (mop) {
+ case MO_UB:
+ stb_p(haddr, val);
+ break;
+ case MO_LEUW:
+ stw_le_p(haddr, val);
+ break;
+ case MO_LEUL:
+ stl_le_p(haddr, val);
+ break;
+ case MO_LEQ:
+ stq_le_p(haddr, val);
+ break;
+ case MO_BEUW:
+ stw_be_p(haddr, val);
+ break;
+ case MO_BEUL:
+ stl_be_p(haddr, val);
+ break;
+ case MO_BEQ:
+ stq_be_p(haddr, val);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ clear_helper_retaddr();
+#endif
+}
+
+#if TCG_TARGET_REG_BITS == 64
+# define CASE_32_64(x) \
+ case glue(glue(INDEX_op_, x), _i64): \
+ case glue(glue(INDEX_op_, x), _i32):
+# define CASE_64(x) \
+ case glue(glue(INDEX_op_, x), _i64):
+#else
+# define CASE_32_64(x) \
+ case glue(glue(INDEX_op_, x), _i32):
+# define CASE_64(x)
+#endif
+
+/* Interpret pseudo code in tb. */
+/*
+ * Disable CFI checks.
+ * One possible operation in the pseudo code is a call to binary code.
+ * Therefore, disable CFI checks in the interpreter function
+ */
+uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
+ const void *v_tb_ptr)
+{
+ const uint32_t *tb_ptr = v_tb_ptr;
+ tcg_target_ulong regs[TCG_TARGET_NB_REGS];
+ uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE)
+ / sizeof(uint64_t)];
+ void *call_slots[TCG_STATIC_CALL_ARGS_SIZE / sizeof(uint64_t)];
+
+ regs[TCG_AREG0] = (tcg_target_ulong)env;
+ regs[TCG_REG_CALL_STACK] = (uintptr_t)stack;
+ /* Other call_slots entries initialized at first use (see below). */
+ call_slots[0] = NULL;
+ tci_assert(tb_ptr);
+
+ for (;;) {
+ uint32_t insn;
+ TCGOpcode opc;
+ TCGReg r0, r1, r2, r3, r4, r5;
+ tcg_target_ulong t1;
+ TCGCond condition;
+ target_ulong taddr;
+ uint8_t pos, len;
+ uint32_t tmp32;
+ uint64_t tmp64;
+ uint64_t T1, T2;
+ MemOpIdx oi;
+ int32_t ofs;
+ void *ptr;
+
+ insn = *tb_ptr++;
+ opc = extract32(insn, 0, 8);
+
+ switch (opc) {
+ case INDEX_op_call:
+ /*
+ * Set up the ffi_avalue array once, delayed until now
+ * because many TB's do not make any calls. In tcg_gen_callN,
+ * we arranged for every real argument to be "left-aligned"
+ * in each 64-bit slot.
+ */
+ if (unlikely(call_slots[0] == NULL)) {
+ for (int i = 0; i < ARRAY_SIZE(call_slots); ++i) {
+ call_slots[i] = &stack[i];
+ }
+ }
+
+ tci_args_nl(insn, tb_ptr, &len, &ptr);
+
+ /* Helper functions may need to access the "return address" */
+ tci_tb_ptr = (uintptr_t)tb_ptr;
+
+ {
+ void **pptr = ptr;
+ ffi_call(pptr[1], pptr[0], stack, call_slots);
+ }
+
+ /* Any result winds up "left-aligned" in the stack[0] slot. */
+ switch (len) {
+ case 0: /* void */
+ break;
+ case 1: /* uint32_t */
+ /*
+ * Note that libffi has an odd special case in that it will
+ * always widen an integral result to ffi_arg.
+ */
+ if (sizeof(ffi_arg) == 4) {
+ regs[TCG_REG_R0] = *(uint32_t *)stack;
+ break;
+ }
+ /* fall through */
+ case 2: /* uint64_t */
+ if (TCG_TARGET_REG_BITS == 32) {
+ tci_write_reg64(regs, TCG_REG_R1, TCG_REG_R0, stack[0]);
+ } else {
+ regs[TCG_REG_R0] = stack[0];
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+
+ case INDEX_op_br:
+ tci_args_l(insn, tb_ptr, &ptr);
+ tb_ptr = ptr;
+ continue;
+ case INDEX_op_setcond_i32:
+ tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
+ regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
+ break;
+ case INDEX_op_movcond_i32:
+ tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
+ tmp32 = tci_compare32(regs[r1], regs[r2], condition);
+ regs[r0] = regs[tmp32 ? r3 : r4];
+ break;
+#if TCG_TARGET_REG_BITS == 32
+ case INDEX_op_setcond2_i32:
+ tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
+ T1 = tci_uint64(regs[r2], regs[r1]);
+ T2 = tci_uint64(regs[r4], regs[r3]);
+ regs[r0] = tci_compare64(T1, T2, condition);
+ break;
+#elif TCG_TARGET_REG_BITS == 64
+ case INDEX_op_setcond_i64:
+ tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
+ regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
+ break;
+ case INDEX_op_movcond_i64:
+ tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
+ tmp32 = tci_compare64(regs[r1], regs[r2], condition);
+ regs[r0] = regs[tmp32 ? r3 : r4];
+ break;
+#endif
+ CASE_32_64(mov)
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = regs[r1];
+ break;
+ case INDEX_op_tci_movi:
+ tci_args_ri(insn, &r0, &t1);
+ regs[r0] = t1;
+ break;
+ case INDEX_op_tci_movl:
+ tci_args_rl(insn, tb_ptr, &r0, &ptr);
+ regs[r0] = *(tcg_target_ulong *)ptr;
+ break;
+
+ /* Load/store operations (32 bit). */
+
+ CASE_32_64(ld8u)
+ tci_args_rrs(insn, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ regs[r0] = *(uint8_t *)ptr;
+ break;
+ CASE_32_64(ld8s)
+ tci_args_rrs(insn, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ regs[r0] = *(int8_t *)ptr;
+ break;
+ CASE_32_64(ld16u)
+ tci_args_rrs(insn, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ regs[r0] = *(uint16_t *)ptr;
+ break;
+ CASE_32_64(ld16s)
+ tci_args_rrs(insn, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ regs[r0] = *(int16_t *)ptr;
+ break;
+ case INDEX_op_ld_i32:
+ CASE_64(ld32u)
+ tci_args_rrs(insn, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ regs[r0] = *(uint32_t *)ptr;
+ break;
+ CASE_32_64(st8)
+ tci_args_rrs(insn, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ *(uint8_t *)ptr = regs[r0];
+ break;
+ CASE_32_64(st16)
+ tci_args_rrs(insn, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ *(uint16_t *)ptr = regs[r0];
+ break;
+ case INDEX_op_st_i32:
+ CASE_64(st32)
+ tci_args_rrs(insn, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ *(uint32_t *)ptr = regs[r0];
+ break;
+
+ /* Arithmetic operations (mixed 32/64 bit). */
+
+ CASE_32_64(add)
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = regs[r1] + regs[r2];
+ break;
+ CASE_32_64(sub)
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = regs[r1] - regs[r2];
+ break;
+ CASE_32_64(mul)
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = regs[r1] * regs[r2];
+ break;
+ CASE_32_64(and)
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = regs[r1] & regs[r2];
+ break;
+ CASE_32_64(or)
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = regs[r1] | regs[r2];
+ break;
+ CASE_32_64(xor)
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = regs[r1] ^ regs[r2];
+ break;
+#if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64
+ CASE_32_64(andc)
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = regs[r1] & ~regs[r2];
+ break;
+#endif
+#if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64
+ CASE_32_64(orc)
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = regs[r1] | ~regs[r2];
+ break;
+#endif
+#if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64
+ CASE_32_64(eqv)
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = ~(regs[r1] ^ regs[r2]);
+ break;
+#endif
+#if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64
+ CASE_32_64(nand)
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = ~(regs[r1] & regs[r2]);
+ break;
+#endif
+#if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64
+ CASE_32_64(nor)
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = ~(regs[r1] | regs[r2]);
+ break;
+#endif
+
+ /* Arithmetic operations (32 bit). */
+
+ case INDEX_op_div_i32:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
+ break;
+ case INDEX_op_divu_i32:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
+ break;
+ case INDEX_op_rem_i32:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
+ break;
+ case INDEX_op_remu_i32:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
+ break;
+#if TCG_TARGET_HAS_clz_i32
+ case INDEX_op_clz_i32:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ tmp32 = regs[r1];
+ regs[r0] = tmp32 ? clz32(tmp32) : regs[r2];
+ break;
+#endif
+#if TCG_TARGET_HAS_ctz_i32
+ case INDEX_op_ctz_i32:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ tmp32 = regs[r1];
+ regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2];
+ break;
+#endif
+#if TCG_TARGET_HAS_ctpop_i32
+ case INDEX_op_ctpop_i32:
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = ctpop32(regs[r1]);
+ break;
+#endif
+
+ /* Shift/rotate operations (32 bit). */
+
+ case INDEX_op_shl_i32:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
+ break;
+ case INDEX_op_shr_i32:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
+ break;
+ case INDEX_op_sar_i32:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
+ break;
+#if TCG_TARGET_HAS_rot_i32
+ case INDEX_op_rotl_i32:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = rol32(regs[r1], regs[r2] & 31);
+ break;
+ case INDEX_op_rotr_i32:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = ror32(regs[r1], regs[r2] & 31);
+ break;
+#endif
+#if TCG_TARGET_HAS_deposit_i32
+ case INDEX_op_deposit_i32:
+ tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
+ regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
+ break;
+#endif
+#if TCG_TARGET_HAS_extract_i32
+ case INDEX_op_extract_i32:
+ tci_args_rrbb(insn, &r0, &r1, &pos, &len);
+ regs[r0] = extract32(regs[r1], pos, len);
+ break;
+#endif
+#if TCG_TARGET_HAS_sextract_i32
+ case INDEX_op_sextract_i32:
+ tci_args_rrbb(insn, &r0, &r1, &pos, &len);
+ regs[r0] = sextract32(regs[r1], pos, len);
+ break;
+#endif
+ case INDEX_op_brcond_i32:
+ tci_args_rl(insn, tb_ptr, &r0, &ptr);
+ if ((uint32_t)regs[r0]) {
+ tb_ptr = ptr;
+ }
+ break;
+#if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32
+ case INDEX_op_add2_i32:
+ tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
+ T1 = tci_uint64(regs[r3], regs[r2]);
+ T2 = tci_uint64(regs[r5], regs[r4]);
+ tci_write_reg64(regs, r1, r0, T1 + T2);
+ break;
+#endif
+#if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32
+ case INDEX_op_sub2_i32:
+ tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
+ T1 = tci_uint64(regs[r3], regs[r2]);
+ T2 = tci_uint64(regs[r5], regs[r4]);
+ tci_write_reg64(regs, r1, r0, T1 - T2);
+ break;
+#endif
+#if TCG_TARGET_HAS_mulu2_i32
+ case INDEX_op_mulu2_i32:
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+ tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3];
+ tci_write_reg64(regs, r1, r0, tmp64);
+ break;
+#endif
+#if TCG_TARGET_HAS_muls2_i32
+ case INDEX_op_muls2_i32:
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+ tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3];
+ tci_write_reg64(regs, r1, r0, tmp64);
+ break;
+#endif
+#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
+ CASE_32_64(ext8s)
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = (int8_t)regs[r1];
+ break;
+#endif
+#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \
+ TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
+ CASE_32_64(ext16s)
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = (int16_t)regs[r1];
+ break;
+#endif
+#if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
+ CASE_32_64(ext8u)
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = (uint8_t)regs[r1];
+ break;
+#endif
+#if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
+ CASE_32_64(ext16u)
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = (uint16_t)regs[r1];
+ break;
+#endif
+#if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
+ CASE_32_64(bswap16)
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = bswap16(regs[r1]);
+ break;
+#endif
+#if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
+ CASE_32_64(bswap32)
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = bswap32(regs[r1]);
+ break;
+#endif
+#if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
+ CASE_32_64(not)
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = ~regs[r1];
+ break;
+#endif
+#if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
+ CASE_32_64(neg)
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = -regs[r1];
+ break;
+#endif
+#if TCG_TARGET_REG_BITS == 64
+ /* Load/store operations (64 bit). */
+
+ case INDEX_op_ld32s_i64:
+ tci_args_rrs(insn, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ regs[r0] = *(int32_t *)ptr;
+ break;
+ case INDEX_op_ld_i64:
+ tci_args_rrs(insn, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ regs[r0] = *(uint64_t *)ptr;
+ break;
+ case INDEX_op_st_i64:
+ tci_args_rrs(insn, &r0, &r1, &ofs);
+ ptr = (void *)(regs[r1] + ofs);
+ *(uint64_t *)ptr = regs[r0];
+ break;
+
+ /* Arithmetic operations (64 bit). */
+
+ case INDEX_op_div_i64:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
+ break;
+ case INDEX_op_divu_i64:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
+ break;
+ case INDEX_op_rem_i64:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
+ break;
+ case INDEX_op_remu_i64:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
+ break;
+#if TCG_TARGET_HAS_clz_i64
+ case INDEX_op_clz_i64:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2];
+ break;
+#endif
+#if TCG_TARGET_HAS_ctz_i64
+ case INDEX_op_ctz_i64:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2];
+ break;
+#endif
+#if TCG_TARGET_HAS_ctpop_i64
+ case INDEX_op_ctpop_i64:
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = ctpop64(regs[r1]);
+ break;
+#endif
+#if TCG_TARGET_HAS_mulu2_i64
+ case INDEX_op_mulu2_i64:
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+ mulu64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
+ break;
+#endif
+#if TCG_TARGET_HAS_muls2_i64
+ case INDEX_op_muls2_i64:
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+ muls64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
+ break;
+#endif
+#if TCG_TARGET_HAS_add2_i64
+ case INDEX_op_add2_i64:
+ tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
+ T1 = regs[r2] + regs[r4];
+ T2 = regs[r3] + regs[r5] + (T1 < regs[r2]);
+ regs[r0] = T1;
+ regs[r1] = T2;
+ break;
+#endif
+#if TCG_TARGET_HAS_add2_i64
+ case INDEX_op_sub2_i64:
+ tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
+ T1 = regs[r2] - regs[r4];
+ T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]);
+ regs[r0] = T1;
+ regs[r1] = T2;
+ break;
+#endif
+
+ /* Shift/rotate operations (64 bit). */
+
+ case INDEX_op_shl_i64:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = regs[r1] << (regs[r2] & 63);
+ break;
+ case INDEX_op_shr_i64:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = regs[r1] >> (regs[r2] & 63);
+ break;
+ case INDEX_op_sar_i64:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
+ break;
+#if TCG_TARGET_HAS_rot_i64
+ case INDEX_op_rotl_i64:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = rol64(regs[r1], regs[r2] & 63);
+ break;
+ case INDEX_op_rotr_i64:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ regs[r0] = ror64(regs[r1], regs[r2] & 63);
+ break;
+#endif
+#if TCG_TARGET_HAS_deposit_i64
+ case INDEX_op_deposit_i64:
+ tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
+ regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
+ break;
+#endif
+#if TCG_TARGET_HAS_extract_i64
+ case INDEX_op_extract_i64:
+ tci_args_rrbb(insn, &r0, &r1, &pos, &len);
+ regs[r0] = extract64(regs[r1], pos, len);
+ break;
+#endif
+#if TCG_TARGET_HAS_sextract_i64
+ case INDEX_op_sextract_i64:
+ tci_args_rrbb(insn, &r0, &r1, &pos, &len);
+ regs[r0] = sextract64(regs[r1], pos, len);
+ break;
+#endif
+ case INDEX_op_brcond_i64:
+ tci_args_rl(insn, tb_ptr, &r0, &ptr);
+ if (regs[r0]) {
+ tb_ptr = ptr;
+ }
+ break;
+ case INDEX_op_ext32s_i64:
+ case INDEX_op_ext_i32_i64:
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = (int32_t)regs[r1];
+ break;
+ case INDEX_op_ext32u_i64:
+ case INDEX_op_extu_i32_i64:
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = (uint32_t)regs[r1];
+ break;
+#if TCG_TARGET_HAS_bswap64_i64
+ case INDEX_op_bswap64_i64:
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = bswap64(regs[r1]);
+ break;
+#endif
+#endif /* TCG_TARGET_REG_BITS == 64 */
+
+ /* QEMU specific operations. */
+
+ case INDEX_op_exit_tb:
+ tci_args_l(insn, tb_ptr, &ptr);
+ return (uintptr_t)ptr;
+
+ case INDEX_op_goto_tb:
+ tci_args_l(insn, tb_ptr, &ptr);
+ tb_ptr = *(void **)ptr;
+ break;
+
+ case INDEX_op_goto_ptr:
+ tci_args_r(insn, &r0);
+ ptr = (void *)regs[r0];
+ if (!ptr) {
+ return 0;
+ }
+ tb_ptr = ptr;
+ break;
+
+ case INDEX_op_qemu_ld_i32:
+ if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
+ tci_args_rrm(insn, &r0, &r1, &oi);
+ taddr = regs[r1];
+ } else {
+ tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
+ taddr = tci_uint64(regs[r2], regs[r1]);
+ }
+ tmp32 = tci_qemu_ld(env, taddr, oi, tb_ptr);
+ regs[r0] = tmp32;
+ break;
+
+ case INDEX_op_qemu_ld_i64:
+ if (TCG_TARGET_REG_BITS == 64) {
+ tci_args_rrm(insn, &r0, &r1, &oi);
+ taddr = regs[r1];
+ } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
+ tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
+ taddr = regs[r2];
+ } else {
+ tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
+ taddr = tci_uint64(regs[r3], regs[r2]);
+ oi = regs[r4];
+ }
+ tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
+ if (TCG_TARGET_REG_BITS == 32) {
+ tci_write_reg64(regs, r1, r0, tmp64);
+ } else {
+ regs[r0] = tmp64;
+ }
+ break;
+
+ case INDEX_op_qemu_st_i32:
+ if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
+ tci_args_rrm(insn, &r0, &r1, &oi);
+ taddr = regs[r1];
+ } else {
+ tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
+ taddr = tci_uint64(regs[r2], regs[r1]);
+ }
+ tmp32 = regs[r0];
+ tci_qemu_st(env, taddr, tmp32, oi, tb_ptr);
+ break;
+
+ case INDEX_op_qemu_st_i64:
+ if (TCG_TARGET_REG_BITS == 64) {
+ tci_args_rrm(insn, &r0, &r1, &oi);
+ taddr = regs[r1];
+ tmp64 = regs[r0];
+ } else {
+ if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
+ tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
+ taddr = regs[r2];
+ } else {
+ tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
+ taddr = tci_uint64(regs[r3], regs[r2]);
+ oi = regs[r4];
+ }
+ tmp64 = tci_uint64(regs[r1], regs[r0]);
+ }
+ tci_qemu_st(env, taddr, tmp64, oi, tb_ptr);
+ break;
+
+ case INDEX_op_mb:
+ /* Ensure ordering for all kinds */
+ smp_mb();
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+}
+
+/*
+ * Disassembler that matches the interpreter
+ */
+
+static const char *str_r(TCGReg r)
+{
+ static const char regs[TCG_TARGET_NB_REGS][4] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp"
+ };
+
+ QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14);
+ QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15);
+
+ assert((unsigned)r < TCG_TARGET_NB_REGS);
+ return regs[r];
+}
+
+static const char *str_c(TCGCond c)
+{
+ static const char cond[16][8] = {
+ [TCG_COND_NEVER] = "never",
+ [TCG_COND_ALWAYS] = "always",
+ [TCG_COND_EQ] = "eq",
+ [TCG_COND_NE] = "ne",
+ [TCG_COND_LT] = "lt",
+ [TCG_COND_GE] = "ge",
+ [TCG_COND_LE] = "le",
+ [TCG_COND_GT] = "gt",
+ [TCG_COND_LTU] = "ltu",
+ [TCG_COND_GEU] = "geu",
+ [TCG_COND_LEU] = "leu",
+ [TCG_COND_GTU] = "gtu",
+ };
+
+ assert((unsigned)c < ARRAY_SIZE(cond));
+ assert(cond[c][0] != 0);
+ return cond[c];
+}
+
+/* Disassemble TCI bytecode. */
+int print_insn_tci(bfd_vma addr, disassemble_info *info)
+{
+ const uint32_t *tb_ptr = (const void *)(uintptr_t)addr;
+ const TCGOpDef *def;
+ const char *op_name;
+ uint32_t insn;
+ TCGOpcode op;
+ TCGReg r0, r1, r2, r3, r4, r5;
+ tcg_target_ulong i1;
+ int32_t s2;
+ TCGCond c;
+ MemOpIdx oi;
+ uint8_t pos, len;
+ void *ptr;
+
+ /* TCI is always the host, so we don't need to load indirect. */
+ insn = *tb_ptr++;
+
+ info->fprintf_func(info->stream, "%08x ", insn);
+
+ op = extract32(insn, 0, 8);
+ def = &tcg_op_defs[op];
+ op_name = def->name;
+
+ switch (op) {
+ case INDEX_op_br:
+ case INDEX_op_exit_tb:
+ case INDEX_op_goto_tb:
+ tci_args_l(insn, tb_ptr, &ptr);
+ info->fprintf_func(info->stream, "%-12s %p", op_name, ptr);
+ break;
+
+ case INDEX_op_goto_ptr:
+ tci_args_r(insn, &r0);
+ info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0));
+ break;
+
+ case INDEX_op_call:
+ tci_args_nl(insn, tb_ptr, &len, &ptr);
+ info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr);
+ break;
+
+ case INDEX_op_brcond_i32:
+ case INDEX_op_brcond_i64:
+ tci_args_rl(insn, tb_ptr, &r0, &ptr);
+ info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p",
+ op_name, str_r(r0), ptr);
+ break;
+
+ case INDEX_op_setcond_i32:
+ case INDEX_op_setcond_i64:
+ tci_args_rrrc(insn, &r0, &r1, &r2, &c);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
+ op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c));
+ break;
+
+ case INDEX_op_tci_movi:
+ tci_args_ri(insn, &r0, &i1);
+ info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx,
+ op_name, str_r(r0), i1);
+ break;
+
+ case INDEX_op_tci_movl:
+ tci_args_rl(insn, tb_ptr, &r0, &ptr);
+ info->fprintf_func(info->stream, "%-12s %s, %p",
+ op_name, str_r(r0), ptr);
+ break;
+
+ case INDEX_op_ld8u_i32:
+ case INDEX_op_ld8u_i64:
+ case INDEX_op_ld8s_i32:
+ case INDEX_op_ld8s_i64:
+ case INDEX_op_ld16u_i32:
+ case INDEX_op_ld16u_i64:
+ case INDEX_op_ld16s_i32:
+ case INDEX_op_ld16s_i64:
+ case INDEX_op_ld32u_i64:
+ case INDEX_op_ld32s_i64:
+ case INDEX_op_ld_i32:
+ case INDEX_op_ld_i64:
+ case INDEX_op_st8_i32:
+ case INDEX_op_st8_i64:
+ case INDEX_op_st16_i32:
+ case INDEX_op_st16_i64:
+ case INDEX_op_st32_i64:
+ case INDEX_op_st_i32:
+ case INDEX_op_st_i64:
+ tci_args_rrs(insn, &r0, &r1, &s2);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %d",
+ op_name, str_r(r0), str_r(r1), s2);
+ break;
+
+ case INDEX_op_mov_i32:
+ case INDEX_op_mov_i64:
+ case INDEX_op_ext8s_i32:
+ case INDEX_op_ext8s_i64:
+ case INDEX_op_ext8u_i32:
+ case INDEX_op_ext8u_i64:
+ case INDEX_op_ext16s_i32:
+ case INDEX_op_ext16s_i64:
+ case INDEX_op_ext16u_i32:
+ case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
+ case INDEX_op_ext_i32_i64:
+ case INDEX_op_extu_i32_i64:
+ case INDEX_op_bswap16_i32:
+ case INDEX_op_bswap16_i64:
+ case INDEX_op_bswap32_i32:
+ case INDEX_op_bswap32_i64:
+ case INDEX_op_bswap64_i64:
+ case INDEX_op_not_i32:
+ case INDEX_op_not_i64:
+ case INDEX_op_neg_i32:
+ case INDEX_op_neg_i64:
+ case INDEX_op_ctpop_i32:
+ case INDEX_op_ctpop_i64:
+ tci_args_rr(insn, &r0, &r1);
+ info->fprintf_func(info->stream, "%-12s %s, %s",
+ op_name, str_r(r0), str_r(r1));
+ break;
+
+ case INDEX_op_add_i32:
+ case INDEX_op_add_i64:
+ case INDEX_op_sub_i32:
+ case INDEX_op_sub_i64:
+ case INDEX_op_mul_i32:
+ case INDEX_op_mul_i64:
+ case INDEX_op_and_i32:
+ case INDEX_op_and_i64:
+ case INDEX_op_or_i32:
+ case INDEX_op_or_i64:
+ case INDEX_op_xor_i32:
+ case INDEX_op_xor_i64:
+ case INDEX_op_andc_i32:
+ case INDEX_op_andc_i64:
+ case INDEX_op_orc_i32:
+ case INDEX_op_orc_i64:
+ case INDEX_op_eqv_i32:
+ case INDEX_op_eqv_i64:
+ case INDEX_op_nand_i32:
+ case INDEX_op_nand_i64:
+ case INDEX_op_nor_i32:
+ case INDEX_op_nor_i64:
+ case INDEX_op_div_i32:
+ case INDEX_op_div_i64:
+ case INDEX_op_rem_i32:
+ case INDEX_op_rem_i64:
+ case INDEX_op_divu_i32:
+ case INDEX_op_divu_i64:
+ case INDEX_op_remu_i32:
+ case INDEX_op_remu_i64:
+ case INDEX_op_shl_i32:
+ case INDEX_op_shl_i64:
+ case INDEX_op_shr_i32:
+ case INDEX_op_shr_i64:
+ case INDEX_op_sar_i32:
+ case INDEX_op_sar_i64:
+ case INDEX_op_rotl_i32:
+ case INDEX_op_rotl_i64:
+ case INDEX_op_rotr_i32:
+ case INDEX_op_rotr_i64:
+ case INDEX_op_clz_i32:
+ case INDEX_op_clz_i64:
+ case INDEX_op_ctz_i32:
+ case INDEX_op_ctz_i64:
+ tci_args_rrr(insn, &r0, &r1, &r2);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s",
+ op_name, str_r(r0), str_r(r1), str_r(r2));
+ break;
+
+ case INDEX_op_deposit_i32:
+ case INDEX_op_deposit_i64:
+ tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d",
+ op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
+ break;
+
+ case INDEX_op_extract_i32:
+ case INDEX_op_extract_i64:
+ case INDEX_op_sextract_i32:
+ case INDEX_op_sextract_i64:
+ tci_args_rrbb(insn, &r0, &r1, &pos, &len);
+ info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d",
+ op_name, str_r(r0), str_r(r1), pos, len);
+ break;
+
+ case INDEX_op_movcond_i32:
+ case INDEX_op_movcond_i64:
+ case INDEX_op_setcond2_i32:
+ tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
+ op_name, str_r(r0), str_r(r1), str_r(r2),
+ str_r(r3), str_r(r4), str_c(c));
+ break;
+
+ case INDEX_op_mulu2_i32:
+ case INDEX_op_mulu2_i64:
+ case INDEX_op_muls2_i32:
+ case INDEX_op_muls2_i64:
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
+ op_name, str_r(r0), str_r(r1),
+ str_r(r2), str_r(r3));
+ break;
+
+ case INDEX_op_add2_i32:
+ case INDEX_op_add2_i64:
+ case INDEX_op_sub2_i32:
+ case INDEX_op_sub2_i64:
+ tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
+ op_name, str_r(r0), str_r(r1), str_r(r2),
+ str_r(r3), str_r(r4), str_r(r5));
+ break;
+
+ case INDEX_op_qemu_ld_i64:
+ case INDEX_op_qemu_st_i64:
+ len = DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
+ goto do_qemu_ldst;
+ case INDEX_op_qemu_ld_i32:
+ case INDEX_op_qemu_st_i32:
+ len = 1;
+ do_qemu_ldst:
+ len += DIV_ROUND_UP(TARGET_LONG_BITS, TCG_TARGET_REG_BITS);
+ switch (len) {
+ case 2:
+ tci_args_rrm(insn, &r0, &r1, &oi);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %x",
+ op_name, str_r(r0), str_r(r1), oi);
+ break;
+ case 3:
+ tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %x",
+ op_name, str_r(r0), str_r(r1), str_r(r2), oi);
+ break;
+ case 4:
+ tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s",
+ op_name, str_r(r0), str_r(r1),
+ str_r(r2), str_r(r3), str_r(r4));
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+
+ case 0:
+ /* tcg_out_nop_fill uses zeros */
+ if (insn == 0) {
+ info->fprintf_func(info->stream, "align");
+ break;
+ }
+ /* fall through */
+
+ default:
+ info->fprintf_func(info->stream, "illegal opcode %d", op);
+ break;
+ }
+
+ return sizeof(insn);
+}