diff options
author | 2023-10-10 11:40:56 +0000 | |
---|---|---|
committer | 2023-10-10 11:40:56 +0000 | |
commit | e02cda008591317b1625707ff8e115a4841aa889 (patch) | |
tree | aee302e3cf8b59ec2d32ec481be3d1afddfc8968 /accel/tcg | |
parent | cc668e6b7e0ffd8c9d130513d12053cf5eda1d3b (diff) |
Introduce Virtio-loopback epsilon release:
Epsilon release introduces a new compatibility layer which make virtio-loopback
design to work with QEMU and rust-vmm vhost-user backend without require any
changes.
Signed-off-by: Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>
Change-Id: I52e57563e08a7d0bdc002f8e928ee61ba0c53dd9
Diffstat (limited to 'accel/tcg')
31 files changed, 11982 insertions, 0 deletions
diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc new file mode 100644 index 000000000..1df1f243e --- /dev/null +++ b/accel/tcg/atomic_common.c.inc @@ -0,0 +1,124 @@ +/* + * Common Atomic Helper Functions + * + * This file should be included before the various instantiations of + * the atomic_template.h helpers. + * + * Copyright (c) 2019 Linaro + * Written by Alex Bennée <alex.bennee@linaro.org> + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +static void atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr, + MemOpIdx oi) +{ + CPUState *cpu = env_cpu(env); + + trace_guest_rmw_before_exec(cpu, addr, oi); +} + +static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr, + MemOpIdx oi) +{ + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW); +} + +#if HAVE_ATOMIC128 +static void atomic_trace_ld_pre(CPUArchState *env, target_ulong addr, + MemOpIdx oi) +{ + trace_guest_ld_before_exec(env_cpu(env), addr, oi); +} + +static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr, + MemOpIdx oi) +{ + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); +} + +static void atomic_trace_st_pre(CPUArchState *env, target_ulong addr, + MemOpIdx oi) +{ + trace_guest_st_before_exec(env_cpu(env), addr, oi); +} + +static void atomic_trace_st_post(CPUArchState *env, target_ulong addr, + MemOpIdx oi) +{ + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} +#endif + +/* + * Atomic helpers callable from TCG. + * These have a common interface and all defer to cpu_atomic_* + * using the host return address from GETPC(). + */ + +#define CMPXCHG_HELPER(OP, TYPE) \ + TYPE HELPER(atomic_##OP)(CPUArchState *env, target_ulong addr, \ + TYPE oldv, TYPE newv, uint32_t oi) \ + { return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); } + +CMPXCHG_HELPER(cmpxchgb, uint32_t) +CMPXCHG_HELPER(cmpxchgw_be, uint32_t) +CMPXCHG_HELPER(cmpxchgw_le, uint32_t) +CMPXCHG_HELPER(cmpxchgl_be, uint32_t) +CMPXCHG_HELPER(cmpxchgl_le, uint32_t) + +#ifdef CONFIG_ATOMIC64 +CMPXCHG_HELPER(cmpxchgq_be, uint64_t) +CMPXCHG_HELPER(cmpxchgq_le, uint64_t) +#endif + +#undef CMPXCHG_HELPER + +#define ATOMIC_HELPER(OP, TYPE) \ + TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, target_ulong addr, \ + TYPE val, uint32_t oi) \ + { return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); } + +#ifdef CONFIG_ATOMIC64 +#define GEN_ATOMIC_HELPERS(OP) \ + ATOMIC_HELPER(glue(OP,b), uint32_t) \ + ATOMIC_HELPER(glue(OP,w_be), uint32_t) \ + ATOMIC_HELPER(glue(OP,w_le), uint32_t) \ + ATOMIC_HELPER(glue(OP,l_be), uint32_t) \ + ATOMIC_HELPER(glue(OP,l_le), uint32_t) \ + ATOMIC_HELPER(glue(OP,q_be), uint64_t) \ + ATOMIC_HELPER(glue(OP,q_le), uint64_t) +#else +#define GEN_ATOMIC_HELPERS(OP) \ + ATOMIC_HELPER(glue(OP,b), uint32_t) \ + ATOMIC_HELPER(glue(OP,w_be), uint32_t) \ + ATOMIC_HELPER(glue(OP,w_le), uint32_t) \ + ATOMIC_HELPER(glue(OP,l_be), uint32_t) \ + ATOMIC_HELPER(glue(OP,l_le), uint32_t) +#endif + +GEN_ATOMIC_HELPERS(fetch_add) +GEN_ATOMIC_HELPERS(fetch_and) +GEN_ATOMIC_HELPERS(fetch_or) +GEN_ATOMIC_HELPERS(fetch_xor) +GEN_ATOMIC_HELPERS(fetch_smin) +GEN_ATOMIC_HELPERS(fetch_umin) +GEN_ATOMIC_HELPERS(fetch_smax) +GEN_ATOMIC_HELPERS(fetch_umax) + +GEN_ATOMIC_HELPERS(add_fetch) +GEN_ATOMIC_HELPERS(and_fetch) +GEN_ATOMIC_HELPERS(or_fetch) +GEN_ATOMIC_HELPERS(xor_fetch) +GEN_ATOMIC_HELPERS(smin_fetch) +GEN_ATOMIC_HELPERS(umin_fetch) +GEN_ATOMIC_HELPERS(smax_fetch) +GEN_ATOMIC_HELPERS(umax_fetch) + +GEN_ATOMIC_HELPERS(xchg) + +#undef ATOMIC_HELPER +#undef GEN_ATOMIC_HELPERS diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h new file mode 100644 index 000000000..2d917b6b1 --- /dev/null +++ b/accel/tcg/atomic_template.h @@ -0,0 +1,352 @@ +/* + * Atomic helper templates + * Included from tcg-runtime.c and cputlb.c. + * + * Copyright (c) 2016 Red Hat, Inc + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/plugin.h" + +#if DATA_SIZE == 16 +# define SUFFIX o +# define DATA_TYPE Int128 +# define BSWAP bswap128 +# define SHIFT 4 +#elif DATA_SIZE == 8 +# define SUFFIX q +# define DATA_TYPE aligned_uint64_t +# define SDATA_TYPE aligned_int64_t +# define BSWAP bswap64 +# define SHIFT 3 +#elif DATA_SIZE == 4 +# define SUFFIX l +# define DATA_TYPE uint32_t +# define SDATA_TYPE int32_t +# define BSWAP bswap32 +# define SHIFT 2 +#elif DATA_SIZE == 2 +# define SUFFIX w +# define DATA_TYPE uint16_t +# define SDATA_TYPE int16_t +# define BSWAP bswap16 +# define SHIFT 1 +#elif DATA_SIZE == 1 +# define SUFFIX b +# define DATA_TYPE uint8_t +# define SDATA_TYPE int8_t +# define BSWAP +# define SHIFT 0 +#else +# error unsupported data size +#endif + +#if DATA_SIZE >= 4 +# define ABI_TYPE DATA_TYPE +#else +# define ABI_TYPE uint32_t +#endif + +/* Define host-endian atomic operations. Note that END is used within + the ATOMIC_NAME macro, and redefined below. */ +#if DATA_SIZE == 1 +# define END +#elif defined(HOST_WORDS_BIGENDIAN) +# define END _be +#else +# define END _le +#endif + +ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, + ABI_TYPE cmpv, ABI_TYPE newv, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_READ | PAGE_WRITE, retaddr); + DATA_TYPE ret; + + atomic_trace_rmw_pre(env, addr, oi); +#if DATA_SIZE == 16 + ret = atomic16_cmpxchg(haddr, cmpv, newv); +#else + ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv); +#endif + ATOMIC_MMU_CLEANUP; + atomic_trace_rmw_post(env, addr, oi); + return ret; +} + +#if DATA_SIZE >= 16 +#if HAVE_ATOMIC128 +ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_READ, retaddr); + DATA_TYPE val; + + atomic_trace_ld_pre(env, addr, oi); + val = atomic16_read(haddr); + ATOMIC_MMU_CLEANUP; + atomic_trace_ld_post(env, addr, oi); + return val; +} + +void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_WRITE, retaddr); + + atomic_trace_st_pre(env, addr, oi); + atomic16_set(haddr, val); + ATOMIC_MMU_CLEANUP; + atomic_trace_st_post(env, addr, oi); +} +#endif +#else +ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_READ | PAGE_WRITE, retaddr); + DATA_TYPE ret; + + atomic_trace_rmw_pre(env, addr, oi); + ret = qatomic_xchg__nocheck(haddr, val); + ATOMIC_MMU_CLEANUP; + atomic_trace_rmw_post(env, addr, oi); + return ret; +} + +#define GEN_ATOMIC_HELPER(X) \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ + ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ +{ \ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ + PAGE_READ | PAGE_WRITE, retaddr); \ + DATA_TYPE ret; \ + atomic_trace_rmw_pre(env, addr, oi); \ + ret = qatomic_##X(haddr, val); \ + ATOMIC_MMU_CLEANUP; \ + atomic_trace_rmw_post(env, addr, oi); \ + return ret; \ +} + +GEN_ATOMIC_HELPER(fetch_add) +GEN_ATOMIC_HELPER(fetch_and) +GEN_ATOMIC_HELPER(fetch_or) +GEN_ATOMIC_HELPER(fetch_xor) +GEN_ATOMIC_HELPER(add_fetch) +GEN_ATOMIC_HELPER(and_fetch) +GEN_ATOMIC_HELPER(or_fetch) +GEN_ATOMIC_HELPER(xor_fetch) + +#undef GEN_ATOMIC_HELPER + +/* + * These helpers are, as a whole, full barriers. Within the helper, + * the leading barrier is explicit and the trailing barrier is within + * cmpxchg primitive. + * + * Trace this load + RMW loop as a single RMW op. This way, regardless + * of CF_PARALLEL's value, we'll trace just a read and a write. + */ +#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ + ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ +{ \ + XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ + PAGE_READ | PAGE_WRITE, retaddr); \ + XDATA_TYPE cmp, old, new, val = xval; \ + atomic_trace_rmw_pre(env, addr, oi); \ + smp_mb(); \ + cmp = qatomic_read__nocheck(haddr); \ + do { \ + old = cmp; new = FN(old, val); \ + cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \ + } while (cmp != old); \ + ATOMIC_MMU_CLEANUP; \ + atomic_trace_rmw_post(env, addr, oi); \ + return RET; \ +} + +GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old) + +GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new) +GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new) +GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new) +GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) + +#undef GEN_ATOMIC_HELPER_FN +#endif /* DATA SIZE >= 16 */ + +#undef END + +#if DATA_SIZE > 1 + +/* Define reverse-host-endian atomic operations. Note that END is used + within the ATOMIC_NAME macro. */ +#ifdef HOST_WORDS_BIGENDIAN +# define END _le +#else +# define END _be +#endif + +ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, + ABI_TYPE cmpv, ABI_TYPE newv, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_READ | PAGE_WRITE, retaddr); + DATA_TYPE ret; + + atomic_trace_rmw_pre(env, addr, oi); +#if DATA_SIZE == 16 + ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv)); +#else + ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv)); +#endif + ATOMIC_MMU_CLEANUP; + atomic_trace_rmw_post(env, addr, oi); + return BSWAP(ret); +} + +#if DATA_SIZE >= 16 +#if HAVE_ATOMIC128 +ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_READ, retaddr); + DATA_TYPE val; + + atomic_trace_ld_pre(env, addr, oi); + val = atomic16_read(haddr); + ATOMIC_MMU_CLEANUP; + atomic_trace_ld_post(env, addr, oi); + return BSWAP(val); +} + +void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_WRITE, retaddr); + + atomic_trace_st_pre(env, addr, oi); + val = BSWAP(val); + atomic16_set(haddr, val); + ATOMIC_MMU_CLEANUP; + atomic_trace_st_post(env, addr, oi); +} +#endif +#else +ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_READ | PAGE_WRITE, retaddr); + ABI_TYPE ret; + + atomic_trace_rmw_pre(env, addr, oi); + ret = qatomic_xchg__nocheck(haddr, BSWAP(val)); + ATOMIC_MMU_CLEANUP; + atomic_trace_rmw_post(env, addr, oi); + return BSWAP(ret); +} + +#define GEN_ATOMIC_HELPER(X) \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ + ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ +{ \ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ + PAGE_READ | PAGE_WRITE, retaddr); \ + DATA_TYPE ret; \ + atomic_trace_rmw_pre(env, addr, oi); \ + ret = qatomic_##X(haddr, BSWAP(val)); \ + ATOMIC_MMU_CLEANUP; \ + atomic_trace_rmw_post(env, addr, oi); \ + return BSWAP(ret); \ +} + +GEN_ATOMIC_HELPER(fetch_and) +GEN_ATOMIC_HELPER(fetch_or) +GEN_ATOMIC_HELPER(fetch_xor) +GEN_ATOMIC_HELPER(and_fetch) +GEN_ATOMIC_HELPER(or_fetch) +GEN_ATOMIC_HELPER(xor_fetch) + +#undef GEN_ATOMIC_HELPER + +/* These helpers are, as a whole, full barriers. Within the helper, + * the leading barrier is explicit and the trailing barrier is within + * cmpxchg primitive. + * + * Trace this load + RMW loop as a single RMW op. This way, regardless + * of CF_PARALLEL's value, we'll trace just a read and a write. + */ +#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ + ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ +{ \ + XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ + PAGE_READ | PAGE_WRITE, retaddr); \ + XDATA_TYPE ldo, ldn, old, new, val = xval; \ + atomic_trace_rmw_pre(env, addr, oi); \ + smp_mb(); \ + ldn = qatomic_read__nocheck(haddr); \ + do { \ + ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \ + ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \ + } while (ldo != ldn); \ + ATOMIC_MMU_CLEANUP; \ + atomic_trace_rmw_post(env, addr, oi); \ + return RET; \ +} + +GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old) + +GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new) +GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new) +GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new) +GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) + +/* Note that for addition, we need to use a separate cmpxchg loop instead + of bswaps for the reverse-host-endian helpers. */ +#define ADD(X, Y) (X + Y) +GEN_ATOMIC_HELPER_FN(fetch_add, ADD, DATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new) +#undef ADD + +#undef GEN_ATOMIC_HELPER_FN +#endif /* DATA_SIZE >= 16 */ + +#undef END +#endif /* DATA_SIZE > 1 */ + +#undef BSWAP +#undef ABI_TYPE +#undef DATA_TYPE +#undef SDATA_TYPE +#undef SUFFIX +#undef DATA_SIZE +#undef SHIFT diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c new file mode 100644 index 000000000..be6fe45aa --- /dev/null +++ b/accel/tcg/cpu-exec-common.c @@ -0,0 +1,83 @@ +/* + * emulator main execution loop + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "sysemu/cpus.h" +#include "sysemu/tcg.h" +#include "exec/exec-all.h" + +bool tcg_allowed; + +/* exit the current TB, but without causing any exception to be raised */ +void cpu_loop_exit_noexc(CPUState *cpu) +{ + cpu->exception_index = -1; + cpu_loop_exit(cpu); +} + +#if defined(CONFIG_SOFTMMU) +void cpu_reloading_memory_map(void) +{ + if (qemu_in_vcpu_thread() && current_cpu->running) { + /* The guest can in theory prolong the RCU critical section as long + * as it feels like. The major problem with this is that because it + * can do multiple reconfigurations of the memory map within the + * critical section, we could potentially accumulate an unbounded + * collection of memory data structures awaiting reclamation. + * + * Because the only thing we're currently protecting with RCU is the + * memory data structures, it's sufficient to break the critical section + * in this callback, which we know will get called every time the + * memory map is rearranged. + * + * (If we add anything else in the system that uses RCU to protect + * its data structures, we will need to implement some other mechanism + * to force TCG CPUs to exit the critical section, at which point this + * part of this callback might become unnecessary.) + * + * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which + * only protects cpu->as->dispatch. Since we know our caller is about + * to reload it, it's safe to split the critical section. + */ + rcu_read_unlock(); + rcu_read_lock(); + } +} +#endif + +void cpu_loop_exit(CPUState *cpu) +{ + /* Undo the setting in cpu_tb_exec. */ + cpu->can_do_io = 1; + siglongjmp(cpu->jmp_env, 1); +} + +void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc) +{ + if (pc) { + cpu_restore_state(cpu, pc, true); + } + cpu_loop_exit(cpu); +} + +void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc) +{ + cpu->exception_index = EXCP_ATOMIC; + cpu_loop_exit_restore(cpu, pc); +} diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c new file mode 100644 index 000000000..409ec8c38 --- /dev/null +++ b/accel/tcg/cpu-exec.c @@ -0,0 +1,1093 @@ +/* + * emulator main execution loop + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/qemu-print.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" +#include "qapi/type-helpers.h" +#include "hw/core/tcg-cpu-ops.h" +#include "trace.h" +#include "disas/disas.h" +#include "exec/exec-all.h" +#include "tcg/tcg.h" +#include "qemu/atomic.h" +#include "qemu/compiler.h" +#include "qemu/timer.h" +#include "qemu/rcu.h" +#include "exec/log.h" +#include "qemu/main-loop.h" +#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) +#include "hw/i386/apic.h" +#endif +#include "sysemu/cpus.h" +#include "exec/cpu-all.h" +#include "sysemu/cpu-timers.h" +#include "sysemu/replay.h" +#include "sysemu/tcg.h" +#include "exec/helper-proto.h" +#include "tb-hash.h" +#include "tb-context.h" +#include "internal.h" + +/* -icount align implementation. */ + +typedef struct SyncClocks { + int64_t diff_clk; + int64_t last_cpu_icount; + int64_t realtime_clock; +} SyncClocks; + +#if !defined(CONFIG_USER_ONLY) +/* Allow the guest to have a max 3ms advance. + * The difference between the 2 clocks could therefore + * oscillate around 0. + */ +#define VM_CLOCK_ADVANCE 3000000 +#define THRESHOLD_REDUCE 1.5 +#define MAX_DELAY_PRINT_RATE 2000000000LL +#define MAX_NB_PRINTS 100 + +static int64_t max_delay; +static int64_t max_advance; + +static void align_clocks(SyncClocks *sc, CPUState *cpu) +{ + int64_t cpu_icount; + + if (!icount_align_option) { + return; + } + + cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; + sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount); + sc->last_cpu_icount = cpu_icount; + + if (sc->diff_clk > VM_CLOCK_ADVANCE) { +#ifndef _WIN32 + struct timespec sleep_delay, rem_delay; + sleep_delay.tv_sec = sc->diff_clk / 1000000000LL; + sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL; + if (nanosleep(&sleep_delay, &rem_delay) < 0) { + sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec; + } else { + sc->diff_clk = 0; + } +#else + Sleep(sc->diff_clk / SCALE_MS); + sc->diff_clk = 0; +#endif + } +} + +static void print_delay(const SyncClocks *sc) +{ + static float threshold_delay; + static int64_t last_realtime_clock; + static int nb_prints; + + if (icount_align_option && + sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE && + nb_prints < MAX_NB_PRINTS) { + if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) || + (-sc->diff_clk / (float)1000000000LL < + (threshold_delay - THRESHOLD_REDUCE))) { + threshold_delay = (-sc->diff_clk / 1000000000LL) + 1; + qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n", + threshold_delay - 1, + threshold_delay); + nb_prints++; + last_realtime_clock = sc->realtime_clock; + } + } +} + +static void init_delay_params(SyncClocks *sc, CPUState *cpu) +{ + if (!icount_align_option) { + return; + } + sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); + sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; + sc->last_cpu_icount + = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; + if (sc->diff_clk < max_delay) { + max_delay = sc->diff_clk; + } + if (sc->diff_clk > max_advance) { + max_advance = sc->diff_clk; + } + + /* Print every 2s max if the guest is late. We limit the number + of printed messages to NB_PRINT_MAX(currently 100) */ + print_delay(sc); +} +#else +static void align_clocks(SyncClocks *sc, const CPUState *cpu) +{ +} + +static void init_delay_params(SyncClocks *sc, const CPUState *cpu) +{ +} +#endif /* CONFIG USER ONLY */ + +uint32_t curr_cflags(CPUState *cpu) +{ + uint32_t cflags = cpu->tcg_cflags; + + /* + * Record gdb single-step. We should be exiting the TB by raising + * EXCP_DEBUG, but to simplify other tests, disable chaining too. + * + * For singlestep and -d nochain, suppress goto_tb so that + * we can log -d cpu,exec after every TB. + */ + if (unlikely(cpu->singlestep_enabled)) { + cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1; + } else if (singlestep) { + cflags |= CF_NO_GOTO_TB | 1; + } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { + cflags |= CF_NO_GOTO_TB; + } + + return cflags; +} + +/* Might cause an exception, so have a longjmp destination ready */ +static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, + target_ulong cs_base, + uint32_t flags, uint32_t cflags) +{ + TranslationBlock *tb; + uint32_t hash; + + /* we should never be trying to look up an INVALID tb */ + tcg_debug_assert(!(cflags & CF_INVALID)); + + hash = tb_jmp_cache_hash_func(pc); + tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]); + + if (likely(tb && + tb->pc == pc && + tb->cs_base == cs_base && + tb->flags == flags && + tb->trace_vcpu_dstate == *cpu->trace_dstate && + tb_cflags(tb) == cflags)) { + return tb; + } + tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); + if (tb == NULL) { + return NULL; + } + qatomic_set(&cpu->tb_jmp_cache[hash], tb); + return tb; +} + +static inline void log_cpu_exec(target_ulong pc, CPUState *cpu, + const TranslationBlock *tb) +{ + if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) + && qemu_log_in_addr_range(pc)) { + + qemu_log_mask(CPU_LOG_EXEC, + "Trace %d: %p [" TARGET_FMT_lx + "/" TARGET_FMT_lx "/%08x/%08x] %s\n", + cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc, + tb->flags, tb->cflags, lookup_symbol(pc)); + +#if defined(DEBUG_DISAS) + if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { + FILE *logfile = qemu_log_lock(); + int flags = 0; + + if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) { + flags |= CPU_DUMP_FPU; + } +#if defined(TARGET_I386) + flags |= CPU_DUMP_CCOP; +#endif + log_cpu_state(cpu, flags); + qemu_log_unlock(logfile); + } +#endif /* DEBUG_DISAS */ + } +} + +static bool check_for_breakpoints(CPUState *cpu, target_ulong pc, + uint32_t *cflags) +{ + CPUBreakpoint *bp; + bool match_page = false; + + if (likely(QTAILQ_EMPTY(&cpu->breakpoints))) { + return false; + } + + /* + * Singlestep overrides breakpoints. + * This requirement is visible in the record-replay tests, where + * we would fail to make forward progress in reverse-continue. + * + * TODO: gdb singlestep should only override gdb breakpoints, + * so that one could (gdb) singlestep into the guest kernel's + * architectural breakpoint handler. + */ + if (cpu->singlestep_enabled) { + return false; + } + + QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { + /* + * If we have an exact pc match, trigger the breakpoint. + * Otherwise, note matches within the page. + */ + if (pc == bp->pc) { + bool match_bp = false; + + if (bp->flags & BP_GDB) { + match_bp = true; + } else if (bp->flags & BP_CPU) { +#ifdef CONFIG_USER_ONLY + g_assert_not_reached(); +#else + CPUClass *cc = CPU_GET_CLASS(cpu); + assert(cc->tcg_ops->debug_check_breakpoint); + match_bp = cc->tcg_ops->debug_check_breakpoint(cpu); +#endif + } + + if (match_bp) { + cpu->exception_index = EXCP_DEBUG; + return true; + } + } else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) { + match_page = true; + } + } + + /* + * Within the same page as a breakpoint, single-step, + * returning to helper_lookup_tb_ptr after each insn looking + * for the actual breakpoint. + * + * TODO: Perhaps better to record all of the TBs associated + * with a given virtual page that contains a breakpoint, and + * then invalidate them when a new overlapping breakpoint is + * set on the page. Non-overlapping TBs would not be + * invalidated, nor would any TB need to be invalidated as + * breakpoints are removed. + */ + if (match_page) { + *cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1; + } + return false; +} + +/** + * helper_lookup_tb_ptr: quick check for next tb + * @env: current cpu state + * + * Look for an existing TB matching the current cpu state. + * If found, return the code pointer. If not found, return + * the tcg epilogue so that we return into cpu_tb_exec. + */ +const void *HELPER(lookup_tb_ptr)(CPUArchState *env) +{ + CPUState *cpu = env_cpu(env); + TranslationBlock *tb; + target_ulong cs_base, pc; + uint32_t flags, cflags; + + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); + + cflags = curr_cflags(cpu); + if (check_for_breakpoints(cpu, pc, &cflags)) { + cpu_loop_exit(cpu); + } + + tb = tb_lookup(cpu, pc, cs_base, flags, cflags); + if (tb == NULL) { + return tcg_code_gen_epilogue; + } + + log_cpu_exec(pc, cpu, tb); + + return tb->tc.ptr; +} + +/* Execute a TB, and fix up the CPU state afterwards if necessary */ +/* + * Disable CFI checks. + * TCG creates binary blobs at runtime, with the transformed code. + * A TB is a blob of binary code, created at runtime and called with an + * indirect function call. Since such function did not exist at compile time, + * the CFI runtime has no way to verify its signature and would fail. + * TCG is not considered a security-sensitive part of QEMU so this does not + * affect the impact of CFI in environment with high security requirements + */ +static inline TranslationBlock * QEMU_DISABLE_CFI +cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) +{ + CPUArchState *env = cpu->env_ptr; + uintptr_t ret; + TranslationBlock *last_tb; + const void *tb_ptr = itb->tc.ptr; + + log_cpu_exec(itb->pc, cpu, itb); + + qemu_thread_jit_execute(); + ret = tcg_qemu_tb_exec(env, tb_ptr); + cpu->can_do_io = 1; + /* + * TODO: Delay swapping back to the read-write region of the TB + * until we actually need to modify the TB. The read-only copy, + * coming from the rx region, shares the same host TLB entry as + * the code that executed the exit_tb opcode that arrived here. + * If we insist on touching both the RX and the RW pages, we + * double the host TLB pressure. + */ + last_tb = tcg_splitwx_to_rw((void *)(ret & ~TB_EXIT_MASK)); + *tb_exit = ret & TB_EXIT_MASK; + + trace_exec_tb_exit(last_tb, *tb_exit); + + if (*tb_exit > TB_EXIT_IDX1) { + /* We didn't start executing this TB (eg because the instruction + * counter hit zero); we must restore the guest PC to the address + * of the start of the TB. + */ + CPUClass *cc = CPU_GET_CLASS(cpu); + qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc, + "Stopped execution of TB chain before %p [" + TARGET_FMT_lx "] %s\n", + last_tb->tc.ptr, last_tb->pc, + lookup_symbol(last_tb->pc)); + if (cc->tcg_ops->synchronize_from_tb) { + cc->tcg_ops->synchronize_from_tb(cpu, last_tb); + } else { + assert(cc->set_pc); + cc->set_pc(cpu, last_tb->pc); + } + } + + /* + * If gdb single-step, and we haven't raised another exception, + * raise a debug exception. Single-step with another exception + * is handled in cpu_handle_exception. + */ + if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) { + cpu->exception_index = EXCP_DEBUG; + cpu_loop_exit(cpu); + } + + return last_tb; +} + + +static void cpu_exec_enter(CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (cc->tcg_ops->cpu_exec_enter) { + cc->tcg_ops->cpu_exec_enter(cpu); + } +} + +static void cpu_exec_exit(CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (cc->tcg_ops->cpu_exec_exit) { + cc->tcg_ops->cpu_exec_exit(cpu); + } +} + +void cpu_exec_step_atomic(CPUState *cpu) +{ + CPUArchState *env = (CPUArchState *)cpu->env_ptr; + TranslationBlock *tb; + target_ulong cs_base, pc; + uint32_t flags, cflags; + int tb_exit; + + if (sigsetjmp(cpu->jmp_env, 0) == 0) { + start_exclusive(); + g_assert(cpu == current_cpu); + g_assert(!cpu->running); + cpu->running = true; + + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); + + cflags = curr_cflags(cpu); + /* Execute in a serial context. */ + cflags &= ~CF_PARALLEL; + /* After 1 insn, return and release the exclusive lock. */ + cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1; + /* + * No need to check_for_breakpoints here. + * We only arrive in cpu_exec_step_atomic after beginning execution + * of an insn that includes an atomic operation we can't handle. + * Any breakpoint for this insn will have been recognized earlier. + */ + + tb = tb_lookup(cpu, pc, cs_base, flags, cflags); + if (tb == NULL) { + mmap_lock(); + tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); + mmap_unlock(); + } + + cpu_exec_enter(cpu); + /* execute the generated code */ + trace_exec_tb(tb, pc); + cpu_tb_exec(cpu, tb, &tb_exit); + cpu_exec_exit(cpu); + } else { + /* + * The mmap_lock is dropped by tb_gen_code if it runs out of + * memory. + */ +#ifndef CONFIG_SOFTMMU + clear_helper_retaddr(); + tcg_debug_assert(!have_mmap_lock()); +#endif + if (qemu_mutex_iothread_locked()) { + qemu_mutex_unlock_iothread(); + } + assert_no_pages_locked(); + qemu_plugin_disable_mem_helpers(cpu); + } + + /* + * As we start the exclusive region before codegen we must still + * be in the region if we longjump out of either the codegen or + * the execution. + */ + g_assert(cpu_in_exclusive_context(cpu)); + cpu->running = false; + end_exclusive(); +} + +struct tb_desc { + target_ulong pc; + target_ulong cs_base; + CPUArchState *env; + tb_page_addr_t phys_page1; + uint32_t flags; + uint32_t cflags; + uint32_t trace_vcpu_dstate; +}; + +static bool tb_lookup_cmp(const void *p, const void *d) +{ + const TranslationBlock *tb = p; + const struct tb_desc *desc = d; + + if (tb->pc == desc->pc && + tb->page_addr[0] == desc->phys_page1 && + tb->cs_base == desc->cs_base && + tb->flags == desc->flags && + tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && + tb_cflags(tb) == desc->cflags) { + /* check next page if needed */ + if (tb->page_addr[1] == -1) { + return true; + } else { + tb_page_addr_t phys_page2; + target_ulong virt_page2; + + virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + phys_page2 = get_page_addr_code(desc->env, virt_page2); + if (tb->page_addr[1] == phys_page2) { + return true; + } + } + } + return false; +} + +TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, + target_ulong cs_base, uint32_t flags, + uint32_t cflags) +{ + tb_page_addr_t phys_pc; + struct tb_desc desc; + uint32_t h; + + desc.env = (CPUArchState *)cpu->env_ptr; + desc.cs_base = cs_base; + desc.flags = flags; + desc.cflags = cflags; + desc.trace_vcpu_dstate = *cpu->trace_dstate; + desc.pc = pc; + phys_pc = get_page_addr_code(desc.env, pc); + if (phys_pc == -1) { + return NULL; + } + desc.phys_page1 = phys_pc & TARGET_PAGE_MASK; + h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate); + return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); +} + +void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr) +{ + if (TCG_TARGET_HAS_direct_jump) { + uintptr_t offset = tb->jmp_target_arg[n]; + uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr; + uintptr_t jmp_rx = tc_ptr + offset; + uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff; + tb_target_set_jmp_target(tc_ptr, jmp_rx, jmp_rw, addr); + } else { + tb->jmp_target_arg[n] = addr; + } +} + +static inline void tb_add_jump(TranslationBlock *tb, int n, + TranslationBlock *tb_next) +{ + uintptr_t old; + + qemu_thread_jit_write(); + assert(n < ARRAY_SIZE(tb->jmp_list_next)); + qemu_spin_lock(&tb_next->jmp_lock); + + /* make sure the destination TB is valid */ + if (tb_next->cflags & CF_INVALID) { + goto out_unlock_next; + } + /* Atomically claim the jump destination slot only if it was NULL */ + old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, + (uintptr_t)tb_next); + if (old) { + goto out_unlock_next; + } + + /* patch the native jump address */ + tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr); + + /* add in TB jmp list */ + tb->jmp_list_next[n] = tb_next->jmp_list_head; + tb_next->jmp_list_head = (uintptr_t)tb | n; + + qemu_spin_unlock(&tb_next->jmp_lock); + + qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, + "Linking TBs %p [" TARGET_FMT_lx + "] index %d -> %p [" TARGET_FMT_lx "]\n", + tb->tc.ptr, tb->pc, n, + tb_next->tc.ptr, tb_next->pc); + return; + + out_unlock_next: + qemu_spin_unlock(&tb_next->jmp_lock); + return; +} + +static inline bool cpu_handle_halt(CPUState *cpu) +{ +#ifndef CONFIG_USER_ONLY + if (cpu->halted) { +#if defined(TARGET_I386) + if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { + X86CPU *x86_cpu = X86_CPU(cpu); + qemu_mutex_lock_iothread(); + apic_poll_irq(x86_cpu->apic_state); + cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); + qemu_mutex_unlock_iothread(); + } +#endif /* TARGET_I386 */ + if (!cpu_has_work(cpu)) { + return true; + } + + cpu->halted = 0; + } +#endif /* !CONFIG_USER_ONLY */ + + return false; +} + +static inline void cpu_handle_debug_exception(CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + CPUWatchpoint *wp; + + if (!cpu->watchpoint_hit) { + QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { + wp->flags &= ~BP_WATCHPOINT_HIT; + } + } + + if (cc->tcg_ops->debug_excp_handler) { + cc->tcg_ops->debug_excp_handler(cpu); + } +} + +static inline bool cpu_handle_exception(CPUState *cpu, int *ret) +{ + if (cpu->exception_index < 0) { +#ifndef CONFIG_USER_ONLY + if (replay_has_exception() + && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) { + /* Execute just one insn to trigger exception pending in the log */ + cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) | 1; + } +#endif + return false; + } + if (cpu->exception_index >= EXCP_INTERRUPT) { + /* exit request from the cpu execution loop */ + *ret = cpu->exception_index; + if (*ret == EXCP_DEBUG) { + cpu_handle_debug_exception(cpu); + } + cpu->exception_index = -1; + return true; + } else { +#if defined(CONFIG_USER_ONLY) + /* if user mode only, we simulate a fake exception + which will be handled outside the cpu execution + loop */ +#if defined(TARGET_I386) + CPUClass *cc = CPU_GET_CLASS(cpu); + cc->tcg_ops->fake_user_interrupt(cpu); +#endif /* TARGET_I386 */ + *ret = cpu->exception_index; + cpu->exception_index = -1; + return true; +#else + if (replay_exception()) { + CPUClass *cc = CPU_GET_CLASS(cpu); + qemu_mutex_lock_iothread(); + cc->tcg_ops->do_interrupt(cpu); + qemu_mutex_unlock_iothread(); + cpu->exception_index = -1; + + if (unlikely(cpu->singlestep_enabled)) { + /* + * After processing the exception, ensure an EXCP_DEBUG is + * raised when single-stepping so that GDB doesn't miss the + * next instruction. + */ + *ret = EXCP_DEBUG; + cpu_handle_debug_exception(cpu); + return true; + } + } else if (!replay_has_interrupt()) { + /* give a chance to iothread in replay mode */ + *ret = EXCP_INTERRUPT; + return true; + } +#endif + } + + return false; +} + +#ifndef CONFIG_USER_ONLY +/* + * CPU_INTERRUPT_POLL is a virtual event which gets converted into a + * "real" interrupt event later. It does not need to be recorded for + * replay purposes. + */ +static inline bool need_replay_interrupt(int interrupt_request) +{ +#if defined(TARGET_I386) + return !(interrupt_request & CPU_INTERRUPT_POLL); +#else + return true; +#endif +} +#endif /* !CONFIG_USER_ONLY */ + +static inline bool cpu_handle_interrupt(CPUState *cpu, + TranslationBlock **last_tb) +{ + /* + * If we have requested custom cflags with CF_NOIRQ we should + * skip checking here. Any pending interrupts will get picked up + * by the next TB we execute under normal cflags. + */ + if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) { + return false; + } + + /* Clear the interrupt flag now since we're processing + * cpu->interrupt_request and cpu->exit_request. + * Ensure zeroing happens before reading cpu->exit_request or + * cpu->interrupt_request (see also smp_wmb in cpu_exit()) + */ + qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0); + + if (unlikely(qatomic_read(&cpu->interrupt_request))) { + int interrupt_request; + qemu_mutex_lock_iothread(); + interrupt_request = cpu->interrupt_request; + if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { + /* Mask out external interrupts for this step. */ + interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; + } + if (interrupt_request & CPU_INTERRUPT_DEBUG) { + cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; + cpu->exception_index = EXCP_DEBUG; + qemu_mutex_unlock_iothread(); + return true; + } +#if !defined(CONFIG_USER_ONLY) + if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { + /* Do nothing */ + } else if (interrupt_request & CPU_INTERRUPT_HALT) { + replay_interrupt(); + cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; + cpu->halted = 1; + cpu->exception_index = EXCP_HLT; + qemu_mutex_unlock_iothread(); + return true; + } +#if defined(TARGET_I386) + else if (interrupt_request & CPU_INTERRUPT_INIT) { + X86CPU *x86_cpu = X86_CPU(cpu); + CPUArchState *env = &x86_cpu->env; + replay_interrupt(); + cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); + do_cpu_init(x86_cpu); + cpu->exception_index = EXCP_HALTED; + qemu_mutex_unlock_iothread(); + return true; + } +#else + else if (interrupt_request & CPU_INTERRUPT_RESET) { + replay_interrupt(); + cpu_reset(cpu); + qemu_mutex_unlock_iothread(); + return true; + } +#endif /* !TARGET_I386 */ + /* The target hook has 3 exit conditions: + False when the interrupt isn't processed, + True when it is, and we should restart on a new TB, + and via longjmp via cpu_loop_exit. */ + else { + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (cc->tcg_ops->cpu_exec_interrupt && + cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) { + if (need_replay_interrupt(interrupt_request)) { + replay_interrupt(); + } + /* + * After processing the interrupt, ensure an EXCP_DEBUG is + * raised when single-stepping so that GDB doesn't miss the + * next instruction. + */ + cpu->exception_index = + (cpu->singlestep_enabled ? EXCP_DEBUG : -1); + *last_tb = NULL; + } + /* The target hook may have updated the 'cpu->interrupt_request'; + * reload the 'interrupt_request' value */ + interrupt_request = cpu->interrupt_request; + } +#endif /* !CONFIG_USER_ONLY */ + if (interrupt_request & CPU_INTERRUPT_EXITTB) { + cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; + /* ensure that no TB jump will be modified as + the program flow was changed */ + *last_tb = NULL; + } + + /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ + qemu_mutex_unlock_iothread(); + } + + /* Finally, check if we need to exit to the main loop. */ + if (unlikely(qatomic_read(&cpu->exit_request)) + || (icount_enabled() + && (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT) + && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) { + qatomic_set(&cpu->exit_request, 0); + if (cpu->exception_index == -1) { + cpu->exception_index = EXCP_INTERRUPT; + } + return true; + } + + return false; +} + +static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, + TranslationBlock **last_tb, int *tb_exit) +{ + int32_t insns_left; + + trace_exec_tb(tb, tb->pc); + tb = cpu_tb_exec(cpu, tb, tb_exit); + if (*tb_exit != TB_EXIT_REQUESTED) { + *last_tb = tb; + return; + } + + *last_tb = NULL; + insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32); + if (insns_left < 0) { + /* Something asked us to stop executing chained TBs; just + * continue round the main loop. Whatever requested the exit + * will also have set something else (eg exit_request or + * interrupt_request) which will be handled by + * cpu_handle_interrupt. cpu_handle_interrupt will also + * clear cpu->icount_decr.u16.high. + */ + return; + } + + /* Instruction counter expired. */ + assert(icount_enabled()); +#ifndef CONFIG_USER_ONLY + /* Ensure global icount has gone forward */ + icount_update(cpu); + /* Refill decrementer and continue execution. */ + insns_left = MIN(0xffff, cpu->icount_budget); + cpu_neg(cpu)->icount_decr.u16.low = insns_left; + cpu->icount_extra = cpu->icount_budget - insns_left; + + /* + * If the next tb has more instructions than we have left to + * execute we need to ensure we find/generate a TB with exactly + * insns_left instructions in it. + */ + if (insns_left > 0 && insns_left < tb->icount) { + assert(insns_left <= CF_COUNT_MASK); + assert(cpu->icount_extra == 0); + cpu->cflags_next_tb = (tb->cflags & ~CF_COUNT_MASK) | insns_left; + } +#endif +} + +/* main execution loop */ + +int cpu_exec(CPUState *cpu) +{ + int ret; + SyncClocks sc = { 0 }; + + /* replay_interrupt may need current_cpu */ + current_cpu = cpu; + + if (cpu_handle_halt(cpu)) { + return EXCP_HALTED; + } + + rcu_read_lock(); + + cpu_exec_enter(cpu); + + /* Calculate difference between guest clock and host clock. + * This delay includes the delay of the last cycle, so + * what we have to do is sleep until it is 0. As for the + * advance/delay we gain here, we try to fix it next time. + */ + init_delay_params(&sc, cpu); + + /* prepare setjmp context for exception handling */ + if (sigsetjmp(cpu->jmp_env, 0) != 0) { +#if defined(__clang__) + /* + * Some compilers wrongly smash all local variables after + * siglongjmp (the spec requires that only non-volatile locals + * which are changed between the sigsetjmp and siglongjmp are + * permitted to be trashed). There were bug reports for gcc + * 4.5.0 and clang. The bug is fixed in all versions of gcc + * that we support, but is still unfixed in clang: + * https://bugs.llvm.org/show_bug.cgi?id=21183 + * + * Reload an essential local variable here for those compilers. + * Newer versions of gcc would complain about this code (-Wclobbered), + * so we only perform the workaround for clang. + */ + cpu = current_cpu; +#else + /* Non-buggy compilers preserve this; assert the correct value. */ + g_assert(cpu == current_cpu); +#endif + +#ifndef CONFIG_SOFTMMU + clear_helper_retaddr(); + tcg_debug_assert(!have_mmap_lock()); +#endif + if (qemu_mutex_iothread_locked()) { + qemu_mutex_unlock_iothread(); + } + qemu_plugin_disable_mem_helpers(cpu); + + assert_no_pages_locked(); + } + + /* if an exception is pending, we execute it here */ + while (!cpu_handle_exception(cpu, &ret)) { + TranslationBlock *last_tb = NULL; + int tb_exit = 0; + + while (!cpu_handle_interrupt(cpu, &last_tb)) { + TranslationBlock *tb; + target_ulong cs_base, pc; + uint32_t flags, cflags; + + cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags); + + /* + * When requested, use an exact setting for cflags for the next + * execution. This is used for icount, precise smc, and stop- + * after-access watchpoints. Since this request should never + * have CF_INVALID set, -1 is a convenient invalid value that + * does not require tcg headers for cpu_common_reset. + */ + cflags = cpu->cflags_next_tb; + if (cflags == -1) { + cflags = curr_cflags(cpu); + } else { + cpu->cflags_next_tb = -1; + } + + if (check_for_breakpoints(cpu, pc, &cflags)) { + break; + } + + tb = tb_lookup(cpu, pc, cs_base, flags, cflags); + if (tb == NULL) { + mmap_lock(); + tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); + mmap_unlock(); + /* + * We add the TB in the virtual pc hash table + * for the fast lookup + */ + qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); + } + +#ifndef CONFIG_USER_ONLY + /* + * We don't take care of direct jumps when address mapping + * changes in system emulation. So it's not safe to make a + * direct jump to a TB spanning two pages because the mapping + * for the second page can change. + */ + if (tb->page_addr[1] != -1) { + last_tb = NULL; + } +#endif + /* See if we can patch the calling TB. */ + if (last_tb) { + tb_add_jump(last_tb, tb_exit, tb); + } + + cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit); + + /* Try to align the host and virtual clocks + if the guest is in advance */ + align_clocks(&sc, cpu); + } + } + + cpu_exec_exit(cpu); + rcu_read_unlock(); + + return ret; +} + +void tcg_exec_realizefn(CPUState *cpu, Error **errp) +{ + static bool tcg_target_initialized; + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (!tcg_target_initialized) { + cc->tcg_ops->initialize(); + tcg_target_initialized = true; + } + tlb_init(cpu); + qemu_plugin_vcpu_init_hook(cpu); + +#ifndef CONFIG_USER_ONLY + tcg_iommu_init_notifier_list(cpu); +#endif /* !CONFIG_USER_ONLY */ +} + +/* undo the initializations in reverse order */ +void tcg_exec_unrealizefn(CPUState *cpu) +{ +#ifndef CONFIG_USER_ONLY + tcg_iommu_free_notifier_list(cpu); +#endif /* !CONFIG_USER_ONLY */ + + qemu_plugin_vcpu_exit_hook(cpu); + tlb_destroy(cpu); +} + +#ifndef CONFIG_USER_ONLY + +void dump_drift_info(GString *buf) +{ + if (!icount_enabled()) { + return; + } + + g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n", + (cpu_get_clock() - icount_get()) / SCALE_MS); + if (icount_align_option) { + g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n", + -max_delay / SCALE_MS); + g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n", + max_advance / SCALE_MS); + } else { + g_string_append_printf(buf, "Max guest delay NA\n"); + g_string_append_printf(buf, "Max guest advance NA\n"); + } +} + +HumanReadableText *qmp_x_query_jit(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + + if (!tcg_enabled()) { + error_setg(errp, "JIT information is only available with accel=tcg"); + return NULL; + } + + dump_exec_info(buf); + dump_drift_info(buf); + + return human_readable_text_from_str(buf); +} + +HumanReadableText *qmp_x_query_opcount(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + + if (!tcg_enabled()) { + error_setg(errp, "Opcode count information is only available with accel=tcg"); + return NULL; + } + + dump_opcount_info(buf); + + return human_readable_text_from_str(buf); +} + +#endif /* !CONFIG_USER_ONLY */ diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c new file mode 100644 index 000000000..b69a95344 --- /dev/null +++ b/accel/tcg/cputlb.c @@ -0,0 +1,2619 @@ +/* + * Common CPU TLB handling + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "hw/core/tcg-cpu-ops.h" +#include "exec/exec-all.h" +#include "exec/memory.h" +#include "exec/cpu_ldst.h" +#include "exec/cputlb.h" +#include "exec/memory-internal.h" +#include "exec/ram_addr.h" +#include "tcg/tcg.h" +#include "qemu/error-report.h" +#include "exec/log.h" +#include "exec/helper-proto.h" +#include "qemu/atomic.h" +#include "qemu/atomic128.h" +#include "exec/translate-all.h" +#include "trace/trace-root.h" +#include "tb-hash.h" +#include "internal.h" +#ifdef CONFIG_PLUGIN +#include "qemu/plugin-memory.h" +#endif +#include "tcg/tcg-ldst.h" + +/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ +/* #define DEBUG_TLB */ +/* #define DEBUG_TLB_LOG */ + +#ifdef DEBUG_TLB +# define DEBUG_TLB_GATE 1 +# ifdef DEBUG_TLB_LOG +# define DEBUG_TLB_LOG_GATE 1 +# else +# define DEBUG_TLB_LOG_GATE 0 +# endif +#else +# define DEBUG_TLB_GATE 0 +# define DEBUG_TLB_LOG_GATE 0 +#endif + +#define tlb_debug(fmt, ...) do { \ + if (DEBUG_TLB_LOG_GATE) { \ + qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ + ## __VA_ARGS__); \ + } else if (DEBUG_TLB_GATE) { \ + fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ + } \ +} while (0) + +#define assert_cpu_is_self(cpu) do { \ + if (DEBUG_TLB_GATE) { \ + g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ + } \ + } while (0) + +/* run_on_cpu_data.target_ptr should always be big enough for a + * target_ulong even on 32 bit builds */ +QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); + +/* We currently can't handle more than 16 bits in the MMUIDX bitmask. + */ +QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); +#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) + +static inline size_t tlb_n_entries(CPUTLBDescFast *fast) +{ + return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; +} + +static inline size_t sizeof_tlb(CPUTLBDescFast *fast) +{ + return fast->mask + (1 << CPU_TLB_ENTRY_BITS); +} + +static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, + size_t max_entries) +{ + desc->window_begin_ns = ns; + desc->window_max_entries = max_entries; +} + +static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) +{ + unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr); + + for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { + qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); + } +} + +static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) +{ + /* Discard jump cache entries for any tb which might potentially + overlap the flushed page. */ + tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); + tb_jmp_cache_clear_page(cpu, addr); +} + +/** + * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary + * @desc: The CPUTLBDesc portion of the TLB + * @fast: The CPUTLBDescFast portion of the same TLB + * + * Called with tlb_lock_held. + * + * We have two main constraints when resizing a TLB: (1) we only resize it + * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing + * the array or unnecessarily flushing it), which means we do not control how + * frequently the resizing can occur; (2) we don't have access to the guest's + * future scheduling decisions, and therefore have to decide the magnitude of + * the resize based on past observations. + * + * In general, a memory-hungry process can benefit greatly from an appropriately + * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that + * we just have to make the TLB as large as possible; while an oversized TLB + * results in minimal TLB miss rates, it also takes longer to be flushed + * (flushes can be _very_ frequent), and the reduced locality can also hurt + * performance. + * + * To achieve near-optimal performance for all kinds of workloads, we: + * + * 1. Aggressively increase the size of the TLB when the use rate of the + * TLB being flushed is high, since it is likely that in the near future this + * memory-hungry process will execute again, and its memory hungriness will + * probably be similar. + * + * 2. Slowly reduce the size of the TLB as the use rate declines over a + * reasonably large time window. The rationale is that if in such a time window + * we have not observed a high TLB use rate, it is likely that we won't observe + * it in the near future. In that case, once a time window expires we downsize + * the TLB to match the maximum use rate observed in the window. + * + * 3. Try to keep the maximum use rate in a time window in the 30-70% range, + * since in that range performance is likely near-optimal. Recall that the TLB + * is direct mapped, so we want the use rate to be low (or at least not too + * high), since otherwise we are likely to have a significant amount of + * conflict misses. + */ +static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, + int64_t now) +{ + size_t old_size = tlb_n_entries(fast); + size_t rate; + size_t new_size = old_size; + int64_t window_len_ms = 100; + int64_t window_len_ns = window_len_ms * 1000 * 1000; + bool window_expired = now > desc->window_begin_ns + window_len_ns; + + if (desc->n_used_entries > desc->window_max_entries) { + desc->window_max_entries = desc->n_used_entries; + } + rate = desc->window_max_entries * 100 / old_size; + + if (rate > 70) { + new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); + } else if (rate < 30 && window_expired) { + size_t ceil = pow2ceil(desc->window_max_entries); + size_t expected_rate = desc->window_max_entries * 100 / ceil; + + /* + * Avoid undersizing when the max number of entries seen is just below + * a pow2. For instance, if max_entries == 1025, the expected use rate + * would be 1025/2048==50%. However, if max_entries == 1023, we'd get + * 1023/1024==99.9% use rate, so we'd likely end up doubling the size + * later. Thus, make sure that the expected use rate remains below 70%. + * (and since we double the size, that means the lowest rate we'd + * expect to get is 35%, which is still in the 30-70% range where + * we consider that the size is appropriate.) + */ + if (expected_rate > 70) { + ceil *= 2; + } + new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); + } + + if (new_size == old_size) { + if (window_expired) { + tlb_window_reset(desc, now, desc->n_used_entries); + } + return; + } + + g_free(fast->table); + g_free(desc->iotlb); + + tlb_window_reset(desc, now, 0); + /* desc->n_used_entries is cleared by the caller */ + fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; + fast->table = g_try_new(CPUTLBEntry, new_size); + desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); + + /* + * If the allocations fail, try smaller sizes. We just freed some + * memory, so going back to half of new_size has a good chance of working. + * Increased memory pressure elsewhere in the system might cause the + * allocations to fail though, so we progressively reduce the allocation + * size, aborting if we cannot even allocate the smallest TLB we support. + */ + while (fast->table == NULL || desc->iotlb == NULL) { + if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { + error_report("%s: %s", __func__, strerror(errno)); + abort(); + } + new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); + fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; + + g_free(fast->table); + g_free(desc->iotlb); + fast->table = g_try_new(CPUTLBEntry, new_size); + desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); + } +} + +static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) +{ + desc->n_used_entries = 0; + desc->large_page_addr = -1; + desc->large_page_mask = -1; + desc->vindex = 0; + memset(fast->table, -1, sizeof_tlb(fast)); + memset(desc->vtable, -1, sizeof(desc->vtable)); +} + +static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, + int64_t now) +{ + CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; + CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; + + tlb_mmu_resize_locked(desc, fast, now); + tlb_mmu_flush_locked(desc, fast); +} + +static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) +{ + size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; + + tlb_window_reset(desc, now, 0); + desc->n_used_entries = 0; + fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; + fast->table = g_new(CPUTLBEntry, n_entries); + desc->iotlb = g_new(CPUIOTLBEntry, n_entries); + tlb_mmu_flush_locked(desc, fast); +} + +static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) +{ + env_tlb(env)->d[mmu_idx].n_used_entries++; +} + +static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) +{ + env_tlb(env)->d[mmu_idx].n_used_entries--; +} + +void tlb_init(CPUState *cpu) +{ + CPUArchState *env = cpu->env_ptr; + int64_t now = get_clock_realtime(); + int i; + + qemu_spin_init(&env_tlb(env)->c.lock); + + /* All tlbs are initialized flushed. */ + env_tlb(env)->c.dirty = 0; + + for (i = 0; i < NB_MMU_MODES; i++) { + tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); + } +} + +void tlb_destroy(CPUState *cpu) +{ + CPUArchState *env = cpu->env_ptr; + int i; + + qemu_spin_destroy(&env_tlb(env)->c.lock); + for (i = 0; i < NB_MMU_MODES; i++) { + CPUTLBDesc *desc = &env_tlb(env)->d[i]; + CPUTLBDescFast *fast = &env_tlb(env)->f[i]; + + g_free(fast->table); + g_free(desc->iotlb); + } +} + +/* flush_all_helper: run fn across all cpus + * + * If the wait flag is set then the src cpu's helper will be queued as + * "safe" work and the loop exited creating a synchronisation point + * where all queued work will be finished before execution starts + * again. + */ +static void flush_all_helper(CPUState *src, run_on_cpu_func fn, + run_on_cpu_data d) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + if (cpu != src) { + async_run_on_cpu(cpu, fn, d); + } + } +} + +void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) +{ + CPUState *cpu; + size_t full = 0, part = 0, elide = 0; + + CPU_FOREACH(cpu) { + CPUArchState *env = cpu->env_ptr; + + full += qatomic_read(&env_tlb(env)->c.full_flush_count); + part += qatomic_read(&env_tlb(env)->c.part_flush_count); + elide += qatomic_read(&env_tlb(env)->c.elide_flush_count); + } + *pfull = full; + *ppart = part; + *pelide = elide; +} + +static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) +{ + CPUArchState *env = cpu->env_ptr; + uint16_t asked = data.host_int; + uint16_t all_dirty, work, to_clean; + int64_t now = get_clock_realtime(); + + assert_cpu_is_self(cpu); + + tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); + + qemu_spin_lock(&env_tlb(env)->c.lock); + + all_dirty = env_tlb(env)->c.dirty; + to_clean = asked & all_dirty; + all_dirty &= ~to_clean; + env_tlb(env)->c.dirty = all_dirty; + + for (work = to_clean; work != 0; work &= work - 1) { + int mmu_idx = ctz32(work); + tlb_flush_one_mmuidx_locked(env, mmu_idx, now); + } + + qemu_spin_unlock(&env_tlb(env)->c.lock); + + cpu_tb_jmp_cache_clear(cpu); + + if (to_clean == ALL_MMUIDX_BITS) { + qatomic_set(&env_tlb(env)->c.full_flush_count, + env_tlb(env)->c.full_flush_count + 1); + } else { + qatomic_set(&env_tlb(env)->c.part_flush_count, + env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); + if (to_clean != asked) { + qatomic_set(&env_tlb(env)->c.elide_flush_count, + env_tlb(env)->c.elide_flush_count + + ctpop16(asked & ~to_clean)); + } + } +} + +void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) +{ + tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); + + if (cpu->created && !qemu_cpu_is_self(cpu)) { + async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, + RUN_ON_CPU_HOST_INT(idxmap)); + } else { + tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); + } +} + +void tlb_flush(CPUState *cpu) +{ + tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); +} + +void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) +{ + const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; + + tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); + + flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); + fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); +} + +void tlb_flush_all_cpus(CPUState *src_cpu) +{ + tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); +} + +void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) +{ + const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; + + tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); + + flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); + async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); +} + +void tlb_flush_all_cpus_synced(CPUState *src_cpu) +{ + tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); +} + +static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry, + target_ulong page, target_ulong mask) +{ + page &= mask; + mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK; + + return (page == (tlb_entry->addr_read & mask) || + page == (tlb_addr_write(tlb_entry) & mask) || + page == (tlb_entry->addr_code & mask)); +} + +static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, + target_ulong page) +{ + return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); +} + +/** + * tlb_entry_is_empty - return true if the entry is not in use + * @te: pointer to CPUTLBEntry + */ +static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) +{ + return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; +} + +/* Called with tlb_c.lock held */ +static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry, + target_ulong page, + target_ulong mask) +{ + if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) { + memset(tlb_entry, -1, sizeof(*tlb_entry)); + return true; + } + return false; +} + +static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, + target_ulong page) +{ + return tlb_flush_entry_mask_locked(tlb_entry, page, -1); +} + +/* Called with tlb_c.lock held */ +static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx, + target_ulong page, + target_ulong mask) +{ + CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; + int k; + + assert_cpu_is_self(env_cpu(env)); + for (k = 0; k < CPU_VTLB_SIZE; k++) { + if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) { + tlb_n_used_entries_dec(env, mmu_idx); + } + } +} + +static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, + target_ulong page) +{ + tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1); +} + +static void tlb_flush_page_locked(CPUArchState *env, int midx, + target_ulong page) +{ + target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; + target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; + + /* Check if we need to flush due to large pages. */ + if ((page & lp_mask) == lp_addr) { + tlb_debug("forcing full flush midx %d (" + TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", + midx, lp_addr, lp_mask); + tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); + } else { + if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { + tlb_n_used_entries_dec(env, midx); + } + tlb_flush_vtlb_page_locked(env, midx, page); + } +} + +/** + * tlb_flush_page_by_mmuidx_async_0: + * @cpu: cpu on which to flush + * @addr: page of virtual address to flush + * @idxmap: set of mmu_idx to flush + * + * Helper for tlb_flush_page_by_mmuidx and friends, flush one page + * at @addr from the tlbs indicated by @idxmap from @cpu. + */ +static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, + target_ulong addr, + uint16_t idxmap) +{ + CPUArchState *env = cpu->env_ptr; + int mmu_idx; + + assert_cpu_is_self(cpu); + + tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap); + + qemu_spin_lock(&env_tlb(env)->c.lock); + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + if ((idxmap >> mmu_idx) & 1) { + tlb_flush_page_locked(env, mmu_idx, addr); + } + } + qemu_spin_unlock(&env_tlb(env)->c.lock); + + tb_flush_jmp_cache(cpu, addr); +} + +/** + * tlb_flush_page_by_mmuidx_async_1: + * @cpu: cpu on which to flush + * @data: encoded addr + idxmap + * + * Helper for tlb_flush_page_by_mmuidx and friends, called through + * async_run_on_cpu. The idxmap parameter is encoded in the page + * offset of the target_ptr field. This limits the set of mmu_idx + * that can be passed via this method. + */ +static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, + run_on_cpu_data data) +{ + target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; + target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; + uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; + + tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); +} + +typedef struct { + target_ulong addr; + uint16_t idxmap; +} TLBFlushPageByMMUIdxData; + +/** + * tlb_flush_page_by_mmuidx_async_2: + * @cpu: cpu on which to flush + * @data: allocated addr + idxmap + * + * Helper for tlb_flush_page_by_mmuidx and friends, called through + * async_run_on_cpu. The addr+idxmap parameters are stored in a + * TLBFlushPageByMMUIdxData structure that has been allocated + * specifically for this helper. Free the structure when done. + */ +static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, + run_on_cpu_data data) +{ + TLBFlushPageByMMUIdxData *d = data.host_ptr; + + tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); + g_free(d); +} + +void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) +{ + tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); + + /* This should already be page aligned */ + addr &= TARGET_PAGE_MASK; + + if (qemu_cpu_is_self(cpu)) { + tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); + } else if (idxmap < TARGET_PAGE_SIZE) { + /* + * Most targets have only a few mmu_idx. In the case where + * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid + * allocating memory for this operation. + */ + async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, + RUN_ON_CPU_TARGET_PTR(addr | idxmap)); + } else { + TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); + + /* Otherwise allocate a structure, freed by the worker. */ + d->addr = addr; + d->idxmap = idxmap; + async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, + RUN_ON_CPU_HOST_PTR(d)); + } +} + +void tlb_flush_page(CPUState *cpu, target_ulong addr) +{ + tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); +} + +void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, + uint16_t idxmap) +{ + tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); + + /* This should already be page aligned */ + addr &= TARGET_PAGE_MASK; + + /* + * Allocate memory to hold addr+idxmap only when needed. + * See tlb_flush_page_by_mmuidx for details. + */ + if (idxmap < TARGET_PAGE_SIZE) { + flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, + RUN_ON_CPU_TARGET_PTR(addr | idxmap)); + } else { + CPUState *dst_cpu; + + /* Allocate a separate data block for each destination cpu. */ + CPU_FOREACH(dst_cpu) { + if (dst_cpu != src_cpu) { + TLBFlushPageByMMUIdxData *d + = g_new(TLBFlushPageByMMUIdxData, 1); + + d->addr = addr; + d->idxmap = idxmap; + async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, + RUN_ON_CPU_HOST_PTR(d)); + } + } + } + + tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); +} + +void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) +{ + tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); +} + +void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, + target_ulong addr, + uint16_t idxmap) +{ + tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); + + /* This should already be page aligned */ + addr &= TARGET_PAGE_MASK; + + /* + * Allocate memory to hold addr+idxmap only when needed. + * See tlb_flush_page_by_mmuidx for details. + */ + if (idxmap < TARGET_PAGE_SIZE) { + flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, + RUN_ON_CPU_TARGET_PTR(addr | idxmap)); + async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, + RUN_ON_CPU_TARGET_PTR(addr | idxmap)); + } else { + CPUState *dst_cpu; + TLBFlushPageByMMUIdxData *d; + + /* Allocate a separate data block for each destination cpu. */ + CPU_FOREACH(dst_cpu) { + if (dst_cpu != src_cpu) { + d = g_new(TLBFlushPageByMMUIdxData, 1); + d->addr = addr; + d->idxmap = idxmap; + async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, + RUN_ON_CPU_HOST_PTR(d)); + } + } + + d = g_new(TLBFlushPageByMMUIdxData, 1); + d->addr = addr; + d->idxmap = idxmap; + async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, + RUN_ON_CPU_HOST_PTR(d)); + } +} + +void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) +{ + tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); +} + +static void tlb_flush_range_locked(CPUArchState *env, int midx, + target_ulong addr, target_ulong len, + unsigned bits) +{ + CPUTLBDesc *d = &env_tlb(env)->d[midx]; + CPUTLBDescFast *f = &env_tlb(env)->f[midx]; + target_ulong mask = MAKE_64BIT_MASK(0, bits); + + /* + * If @bits is smaller than the tlb size, there may be multiple entries + * within the TLB; otherwise all addresses that match under @mask hit + * the same TLB entry. + * TODO: Perhaps allow bits to be a few bits less than the size. + * For now, just flush the entire TLB. + * + * If @len is larger than the tlb size, then it will take longer to + * test all of the entries in the TLB than it will to flush it all. + */ + if (mask < f->mask || len > f->mask) { + tlb_debug("forcing full flush midx %d (" + TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n", + midx, addr, mask, len); + tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); + return; + } + + /* + * Check if we need to flush due to large pages. + * Because large_page_mask contains all 1's from the msb, + * we only need to test the end of the range. + */ + if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) { + tlb_debug("forcing full flush midx %d (" + TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", + midx, d->large_page_addr, d->large_page_mask); + tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); + return; + } + + for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) { + target_ulong page = addr + i; + CPUTLBEntry *entry = tlb_entry(env, midx, page); + + if (tlb_flush_entry_mask_locked(entry, page, mask)) { + tlb_n_used_entries_dec(env, midx); + } + tlb_flush_vtlb_page_mask_locked(env, midx, page, mask); + } +} + +typedef struct { + target_ulong addr; + target_ulong len; + uint16_t idxmap; + uint16_t bits; +} TLBFlushRangeData; + +static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, + TLBFlushRangeData d) +{ + CPUArchState *env = cpu->env_ptr; + int mmu_idx; + + assert_cpu_is_self(cpu); + + tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n", + d.addr, d.bits, d.len, d.idxmap); + + qemu_spin_lock(&env_tlb(env)->c.lock); + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + if ((d.idxmap >> mmu_idx) & 1) { + tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits); + } + } + qemu_spin_unlock(&env_tlb(env)->c.lock); + + for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) { + tb_flush_jmp_cache(cpu, d.addr + i); + } +} + +static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu, + run_on_cpu_data data) +{ + TLBFlushRangeData *d = data.host_ptr; + tlb_flush_range_by_mmuidx_async_0(cpu, *d); + g_free(d); +} + +void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, + target_ulong len, uint16_t idxmap, + unsigned bits) +{ + TLBFlushRangeData d; + + /* + * If all bits are significant, and len is small, + * this devolves to tlb_flush_page. + */ + if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { + tlb_flush_page_by_mmuidx(cpu, addr, idxmap); + return; + } + /* If no page bits are significant, this devolves to tlb_flush. */ + if (bits < TARGET_PAGE_BITS) { + tlb_flush_by_mmuidx(cpu, idxmap); + return; + } + + /* This should already be page aligned */ + d.addr = addr & TARGET_PAGE_MASK; + d.len = len; + d.idxmap = idxmap; + d.bits = bits; + + if (qemu_cpu_is_self(cpu)) { + tlb_flush_range_by_mmuidx_async_0(cpu, d); + } else { + /* Otherwise allocate a structure, freed by the worker. */ + TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); + async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1, + RUN_ON_CPU_HOST_PTR(p)); + } +} + +void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, + uint16_t idxmap, unsigned bits) +{ + tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); +} + +void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, + target_ulong addr, target_ulong len, + uint16_t idxmap, unsigned bits) +{ + TLBFlushRangeData d; + CPUState *dst_cpu; + + /* + * If all bits are significant, and len is small, + * this devolves to tlb_flush_page. + */ + if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { + tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap); + return; + } + /* If no page bits are significant, this devolves to tlb_flush. */ + if (bits < TARGET_PAGE_BITS) { + tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap); + return; + } + + /* This should already be page aligned */ + d.addr = addr & TARGET_PAGE_MASK; + d.len = len; + d.idxmap = idxmap; + d.bits = bits; + + /* Allocate a separate data block for each destination cpu. */ + CPU_FOREACH(dst_cpu) { + if (dst_cpu != src_cpu) { + TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); + async_run_on_cpu(dst_cpu, + tlb_flush_range_by_mmuidx_async_1, + RUN_ON_CPU_HOST_PTR(p)); + } + } + + tlb_flush_range_by_mmuidx_async_0(src_cpu, d); +} + +void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, + target_ulong addr, + uint16_t idxmap, unsigned bits) +{ + tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE, + idxmap, bits); +} + +void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, + target_ulong addr, + target_ulong len, + uint16_t idxmap, + unsigned bits) +{ + TLBFlushRangeData d, *p; + CPUState *dst_cpu; + + /* + * If all bits are significant, and len is small, + * this devolves to tlb_flush_page. + */ + if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { + tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); + return; + } + /* If no page bits are significant, this devolves to tlb_flush. */ + if (bits < TARGET_PAGE_BITS) { + tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); + return; + } + + /* This should already be page aligned */ + d.addr = addr & TARGET_PAGE_MASK; + d.len = len; + d.idxmap = idxmap; + d.bits = bits; + + /* Allocate a separate data block for each destination cpu. */ + CPU_FOREACH(dst_cpu) { + if (dst_cpu != src_cpu) { + p = g_memdup(&d, sizeof(d)); + async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1, + RUN_ON_CPU_HOST_PTR(p)); + } + } + + p = g_memdup(&d, sizeof(d)); + async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1, + RUN_ON_CPU_HOST_PTR(p)); +} + +void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, + target_ulong addr, + uint16_t idxmap, + unsigned bits) +{ + tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE, + idxmap, bits); +} + +/* update the TLBs so that writes to code in the virtual page 'addr' + can be detected */ +void tlb_protect_code(ram_addr_t ram_addr) +{ + cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, + DIRTY_MEMORY_CODE); +} + +/* update the TLB so that writes in physical page 'phys_addr' are no longer + tested for self modifying code */ +void tlb_unprotect_code(ram_addr_t ram_addr) +{ + cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); +} + + +/* + * Dirty write flag handling + * + * When the TCG code writes to a location it looks up the address in + * the TLB and uses that data to compute the final address. If any of + * the lower bits of the address are set then the slow path is forced. + * There are a number of reasons to do this but for normal RAM the + * most usual is detecting writes to code regions which may invalidate + * generated code. + * + * Other vCPUs might be reading their TLBs during guest execution, so we update + * te->addr_write with qatomic_set. We don't need to worry about this for + * oversized guests as MTTCG is disabled for them. + * + * Called with tlb_c.lock held. + */ +static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, + uintptr_t start, uintptr_t length) +{ + uintptr_t addr = tlb_entry->addr_write; + + if ((addr & (TLB_INVALID_MASK | TLB_MMIO | + TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { + addr &= TARGET_PAGE_MASK; + addr += tlb_entry->addend; + if ((addr - start) < length) { +#if TCG_OVERSIZED_GUEST + tlb_entry->addr_write |= TLB_NOTDIRTY; +#else + qatomic_set(&tlb_entry->addr_write, + tlb_entry->addr_write | TLB_NOTDIRTY); +#endif + } + } +} + +/* + * Called with tlb_c.lock held. + * Called only from the vCPU context, i.e. the TLB's owner thread. + */ +static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) +{ + *d = *s; +} + +/* This is a cross vCPU call (i.e. another vCPU resetting the flags of + * the target vCPU). + * We must take tlb_c.lock to avoid racing with another vCPU update. The only + * thing actually updated is the target TLB entry ->addr_write flags. + */ +void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) +{ + CPUArchState *env; + + int mmu_idx; + + env = cpu->env_ptr; + qemu_spin_lock(&env_tlb(env)->c.lock); + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + unsigned int i; + unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); + + for (i = 0; i < n; i++) { + tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], + start1, length); + } + + for (i = 0; i < CPU_VTLB_SIZE; i++) { + tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], + start1, length); + } + } + qemu_spin_unlock(&env_tlb(env)->c.lock); +} + +/* Called with tlb_c.lock held */ +static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, + target_ulong vaddr) +{ + if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { + tlb_entry->addr_write = vaddr; + } +} + +/* update the TLB corresponding to virtual page vaddr + so that it is no longer dirty */ +void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) +{ + CPUArchState *env = cpu->env_ptr; + int mmu_idx; + + assert_cpu_is_self(cpu); + + vaddr &= TARGET_PAGE_MASK; + qemu_spin_lock(&env_tlb(env)->c.lock); + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); + } + + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + int k; + for (k = 0; k < CPU_VTLB_SIZE; k++) { + tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); + } + } + qemu_spin_unlock(&env_tlb(env)->c.lock); +} + +/* Our TLB does not support large pages, so remember the area covered by + large pages and trigger a full TLB flush if these are invalidated. */ +static void tlb_add_large_page(CPUArchState *env, int mmu_idx, + target_ulong vaddr, target_ulong size) +{ + target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; + target_ulong lp_mask = ~(size - 1); + + if (lp_addr == (target_ulong)-1) { + /* No previous large page. */ + lp_addr = vaddr; + } else { + /* Extend the existing region to include the new page. + This is a compromise between unnecessary flushes and + the cost of maintaining a full variable size TLB. */ + lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; + while (((lp_addr ^ vaddr) & lp_mask) != 0) { + lp_mask <<= 1; + } + } + env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; + env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; +} + +/* Add a new TLB entry. At most one entry for a given virtual address + * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the + * supplied size is only used by tlb_flush_page. + * + * Called from TCG-generated code, which is under an RCU read-side + * critical section. + */ +void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, + hwaddr paddr, MemTxAttrs attrs, int prot, + int mmu_idx, target_ulong size) +{ + CPUArchState *env = cpu->env_ptr; + CPUTLB *tlb = env_tlb(env); + CPUTLBDesc *desc = &tlb->d[mmu_idx]; + MemoryRegionSection *section; + unsigned int index; + target_ulong address; + target_ulong write_address; + uintptr_t addend; + CPUTLBEntry *te, tn; + hwaddr iotlb, xlat, sz, paddr_page; + target_ulong vaddr_page; + int asidx = cpu_asidx_from_attrs(cpu, attrs); + int wp_flags; + bool is_ram, is_romd; + + assert_cpu_is_self(cpu); + + if (size <= TARGET_PAGE_SIZE) { + sz = TARGET_PAGE_SIZE; + } else { + tlb_add_large_page(env, mmu_idx, vaddr, size); + sz = size; + } + vaddr_page = vaddr & TARGET_PAGE_MASK; + paddr_page = paddr & TARGET_PAGE_MASK; + + section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, + &xlat, &sz, attrs, &prot); + assert(sz >= TARGET_PAGE_SIZE); + + tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx + " prot=%x idx=%d\n", + vaddr, paddr, prot, mmu_idx); + + address = vaddr_page; + if (size < TARGET_PAGE_SIZE) { + /* Repeat the MMU check and TLB fill on every access. */ + address |= TLB_INVALID_MASK; + } + if (attrs.byte_swap) { + address |= TLB_BSWAP; + } + + is_ram = memory_region_is_ram(section->mr); + is_romd = memory_region_is_romd(section->mr); + + if (is_ram || is_romd) { + /* RAM and ROMD both have associated host memory. */ + addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; + } else { + /* I/O does not; force the host address to NULL. */ + addend = 0; + } + + write_address = address; + if (is_ram) { + iotlb = memory_region_get_ram_addr(section->mr) + xlat; + /* + * Computing is_clean is expensive; avoid all that unless + * the page is actually writable. + */ + if (prot & PAGE_WRITE) { + if (section->readonly) { + write_address |= TLB_DISCARD_WRITE; + } else if (cpu_physical_memory_is_clean(iotlb)) { + write_address |= TLB_NOTDIRTY; + } + } + } else { + /* I/O or ROMD */ + iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; + /* + * Writes to romd devices must go through MMIO to enable write. + * Reads to romd devices go through the ram_ptr found above, + * but of course reads to I/O must go through MMIO. + */ + write_address |= TLB_MMIO; + if (!is_romd) { + address = write_address; + } + } + + wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, + TARGET_PAGE_SIZE); + + index = tlb_index(env, mmu_idx, vaddr_page); + te = tlb_entry(env, mmu_idx, vaddr_page); + + /* + * Hold the TLB lock for the rest of the function. We could acquire/release + * the lock several times in the function, but it is faster to amortize the + * acquisition cost by acquiring it just once. Note that this leads to + * a longer critical section, but this is not a concern since the TLB lock + * is unlikely to be contended. + */ + qemu_spin_lock(&tlb->c.lock); + + /* Note that the tlb is no longer clean. */ + tlb->c.dirty |= 1 << mmu_idx; + + /* Make sure there's no cached translation for the new page. */ + tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); + + /* + * Only evict the old entry to the victim tlb if it's for a + * different page; otherwise just overwrite the stale data. + */ + if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { + unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; + CPUTLBEntry *tv = &desc->vtable[vidx]; + + /* Evict the old entry into the victim tlb. */ + copy_tlb_helper_locked(tv, te); + desc->viotlb[vidx] = desc->iotlb[index]; + tlb_n_used_entries_dec(env, mmu_idx); + } + + /* refill the tlb */ + /* + * At this point iotlb contains a physical section number in the lower + * TARGET_PAGE_BITS, and either + * + the ram_addr_t of the page base of the target RAM (RAM) + * + the offset within section->mr of the page base (I/O, ROMD) + * We subtract the vaddr_page (which is page aligned and thus won't + * disturb the low bits) to give an offset which can be added to the + * (non-page-aligned) vaddr of the eventual memory access to get + * the MemoryRegion offset for the access. Note that the vaddr we + * subtract here is that of the page base, and not the same as the + * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). + */ + desc->iotlb[index].addr = iotlb - vaddr_page; + desc->iotlb[index].attrs = attrs; + + /* Now calculate the new entry */ + tn.addend = addend - vaddr_page; + if (prot & PAGE_READ) { + tn.addr_read = address; + if (wp_flags & BP_MEM_READ) { + tn.addr_read |= TLB_WATCHPOINT; + } + } else { + tn.addr_read = -1; + } + + if (prot & PAGE_EXEC) { + tn.addr_code = address; + } else { + tn.addr_code = -1; + } + + tn.addr_write = -1; + if (prot & PAGE_WRITE) { + tn.addr_write = write_address; + if (prot & PAGE_WRITE_INV) { + tn.addr_write |= TLB_INVALID_MASK; + } + if (wp_flags & BP_MEM_WRITE) { + tn.addr_write |= TLB_WATCHPOINT; + } + } + + copy_tlb_helper_locked(te, &tn); + tlb_n_used_entries_inc(env, mmu_idx); + qemu_spin_unlock(&tlb->c.lock); +} + +/* Add a new TLB entry, but without specifying the memory + * transaction attributes to be used. + */ +void tlb_set_page(CPUState *cpu, target_ulong vaddr, + hwaddr paddr, int prot, + int mmu_idx, target_ulong size) +{ + tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, + prot, mmu_idx, size); +} + +static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) +{ + ram_addr_t ram_addr; + + ram_addr = qemu_ram_addr_from_host(ptr); + if (ram_addr == RAM_ADDR_INVALID) { + error_report("Bad ram pointer %p", ptr); + abort(); + } + return ram_addr; +} + +/* + * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the + * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must + * be discarded and looked up again (e.g. via tlb_entry()). + */ +static void tlb_fill(CPUState *cpu, target_ulong addr, int size, + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + bool ok; + + /* + * This is not a probe, so only valid return is success; failure + * should result in exception + longjmp to the cpu loop. + */ + ok = cc->tcg_ops->tlb_fill(cpu, addr, size, + access_type, mmu_idx, false, retaddr); + assert(ok); +} + +static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr); +} + +static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, + uintptr_t retaddr) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (!cpu->ignore_memory_transaction_failures && + cc->tcg_ops->do_transaction_failed) { + cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size, + access_type, mmu_idx, attrs, + response, retaddr); + } +} + +static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, + int mmu_idx, target_ulong addr, uintptr_t retaddr, + MMUAccessType access_type, MemOp op) +{ + CPUState *cpu = env_cpu(env); + hwaddr mr_offset; + MemoryRegionSection *section; + MemoryRegion *mr; + uint64_t val; + bool locked = false; + MemTxResult r; + + section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); + mr = section->mr; + mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; + cpu->mem_io_pc = retaddr; + if (!cpu->can_do_io) { + cpu_io_recompile(cpu, retaddr); + } + + if (!qemu_mutex_iothread_locked()) { + qemu_mutex_lock_iothread(); + locked = true; + } + r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); + if (r != MEMTX_OK) { + hwaddr physaddr = mr_offset + + section->offset_within_address_space - + section->offset_within_region; + + cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, + mmu_idx, iotlbentry->attrs, r, retaddr); + } + if (locked) { + qemu_mutex_unlock_iothread(); + } + + return val; +} + +/* + * Save a potentially trashed IOTLB entry for later lookup by plugin. + * This is read by tlb_plugin_lookup if the iotlb entry doesn't match + * because of the side effect of io_writex changing memory layout. + */ +static void save_iotlb_data(CPUState *cs, hwaddr addr, + MemoryRegionSection *section, hwaddr mr_offset) +{ +#ifdef CONFIG_PLUGIN + SavedIOTLB *saved = &cs->saved_iotlb; + saved->addr = addr; + saved->section = section; + saved->mr_offset = mr_offset; +#endif +} + +static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, + int mmu_idx, uint64_t val, target_ulong addr, + uintptr_t retaddr, MemOp op) +{ + CPUState *cpu = env_cpu(env); + hwaddr mr_offset; + MemoryRegionSection *section; + MemoryRegion *mr; + bool locked = false; + MemTxResult r; + + section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); + mr = section->mr; + mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; + if (!cpu->can_do_io) { + cpu_io_recompile(cpu, retaddr); + } + cpu->mem_io_pc = retaddr; + + /* + * The memory_region_dispatch may trigger a flush/resize + * so for plugins we save the iotlb_data just in case. + */ + save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset); + + if (!qemu_mutex_iothread_locked()) { + qemu_mutex_lock_iothread(); + locked = true; + } + r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); + if (r != MEMTX_OK) { + hwaddr physaddr = mr_offset + + section->offset_within_address_space - + section->offset_within_region; + + cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), + MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, + retaddr); + } + if (locked) { + qemu_mutex_unlock_iothread(); + } +} + +static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) +{ +#if TCG_OVERSIZED_GUEST + return *(target_ulong *)((uintptr_t)entry + ofs); +#else + /* ofs might correspond to .addr_write, so use qatomic_read */ + return qatomic_read((target_ulong *)((uintptr_t)entry + ofs)); +#endif +} + +/* Return true if ADDR is present in the victim tlb, and has been copied + back to the main tlb. */ +static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, + size_t elt_ofs, target_ulong page) +{ + size_t vidx; + + assert_cpu_is_self(env_cpu(env)); + for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { + CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; + target_ulong cmp; + + /* elt_ofs might correspond to .addr_write, so use qatomic_read */ +#if TCG_OVERSIZED_GUEST + cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); +#else + cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); +#endif + + if (cmp == page) { + /* Found entry in victim tlb, swap tlb and iotlb. */ + CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; + + qemu_spin_lock(&env_tlb(env)->c.lock); + copy_tlb_helper_locked(&tmptlb, tlb); + copy_tlb_helper_locked(tlb, vtlb); + copy_tlb_helper_locked(vtlb, &tmptlb); + qemu_spin_unlock(&env_tlb(env)->c.lock); + + CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; + CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; + tmpio = *io; *io = *vio; *vio = tmpio; + return true; + } + } + return false; +} + +/* Macro to call the above, with local variables from the use context. */ +#define VICTIM_TLB_HIT(TY, ADDR) \ + victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ + (ADDR) & TARGET_PAGE_MASK) + +/* + * Return a ram_addr_t for the virtual address for execution. + * + * Return -1 if we can't translate and execute from an entire page + * of RAM. This will force us to execute by loading and translating + * one insn at a time, without caching. + * + * NOTE: This function will trigger an exception if the page is + * not executable. + */ +tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, + void **hostp) +{ + uintptr_t mmu_idx = cpu_mmu_index(env, true); + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + void *p; + + if (unlikely(!tlb_hit(entry->addr_code, addr))) { + if (!VICTIM_TLB_HIT(addr_code, addr)) { + tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); + + if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { + /* + * The MMU protection covers a smaller range than a target + * page, so we must redo the MMU check for every insn. + */ + return -1; + } + } + assert(tlb_hit(entry->addr_code, addr)); + } + + if (unlikely(entry->addr_code & TLB_MMIO)) { + /* The region is not backed by RAM. */ + if (hostp) { + *hostp = NULL; + } + return -1; + } + + p = (void *)((uintptr_t)addr + entry->addend); + if (hostp) { + *hostp = p; + } + return qemu_ram_addr_from_host_nofail(p); +} + +tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) +{ + return get_page_addr_code_hostp(env, addr, NULL); +} + +static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, + CPUIOTLBEntry *iotlbentry, uintptr_t retaddr) +{ + ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr; + + trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); + + if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { + struct page_collection *pages + = page_collection_lock(ram_addr, ram_addr + size); + tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr); + page_collection_unlock(pages); + } + + /* + * Set both VGA and migration bits for simplicity and to remove + * the notdirty callback faster. + */ + cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); + + /* We remove the notdirty callback only if the code has been flushed. */ + if (!cpu_physical_memory_is_clean(ram_addr)) { + trace_memory_notdirty_set_dirty(mem_vaddr); + tlb_set_dirty(cpu, mem_vaddr); + } +} + +static int probe_access_internal(CPUArchState *env, target_ulong addr, + int fault_size, MMUAccessType access_type, + int mmu_idx, bool nonfault, + void **phost, uintptr_t retaddr) +{ + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + target_ulong tlb_addr, page_addr; + size_t elt_ofs; + int flags; + + switch (access_type) { + case MMU_DATA_LOAD: + elt_ofs = offsetof(CPUTLBEntry, addr_read); + break; + case MMU_DATA_STORE: + elt_ofs = offsetof(CPUTLBEntry, addr_write); + break; + case MMU_INST_FETCH: + elt_ofs = offsetof(CPUTLBEntry, addr_code); + break; + default: + g_assert_not_reached(); + } + tlb_addr = tlb_read_ofs(entry, elt_ofs); + + page_addr = addr & TARGET_PAGE_MASK; + if (!tlb_hit_page(tlb_addr, page_addr)) { + if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { + CPUState *cs = env_cpu(env); + CPUClass *cc = CPU_GET_CLASS(cs); + + if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, + mmu_idx, nonfault, retaddr)) { + /* Non-faulting page table read failed. */ + *phost = NULL; + return TLB_INVALID_MASK; + } + + /* TLB resize via tlb_fill may have moved the entry. */ + entry = tlb_entry(env, mmu_idx, addr); + } + tlb_addr = tlb_read_ofs(entry, elt_ofs); + } + flags = tlb_addr & TLB_FLAGS_MASK; + + /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ + if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { + *phost = NULL; + return TLB_MMIO; + } + + /* Everything else is RAM. */ + *phost = (void *)((uintptr_t)addr + entry->addend); + return flags; +} + +int probe_access_flags(CPUArchState *env, target_ulong addr, + MMUAccessType access_type, int mmu_idx, + bool nonfault, void **phost, uintptr_t retaddr) +{ + int flags; + + flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, + nonfault, phost, retaddr); + + /* Handle clean RAM pages. */ + if (unlikely(flags & TLB_NOTDIRTY)) { + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + + notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); + flags &= ~TLB_NOTDIRTY; + } + + return flags; +} + +void *probe_access(CPUArchState *env, target_ulong addr, int size, + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) +{ + void *host; + int flags; + + g_assert(-(addr | TARGET_PAGE_MASK) >= size); + + flags = probe_access_internal(env, addr, size, access_type, mmu_idx, + false, &host, retaddr); + + /* Per the interface, size == 0 merely faults the access. */ + if (size == 0) { + return NULL; + } + + if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + + /* Handle watchpoints. */ + if (flags & TLB_WATCHPOINT) { + int wp_access = (access_type == MMU_DATA_STORE + ? BP_MEM_WRITE : BP_MEM_READ); + cpu_check_watchpoint(env_cpu(env), addr, size, + iotlbentry->attrs, wp_access, retaddr); + } + + /* Handle clean RAM pages. */ + if (flags & TLB_NOTDIRTY) { + notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); + } + } + + return host; +} + +void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, + MMUAccessType access_type, int mmu_idx) +{ + void *host; + int flags; + + flags = probe_access_internal(env, addr, 0, access_type, + mmu_idx, true, &host, 0); + + /* No combination of flags are expected by the caller. */ + return flags ? NULL : host; +} + +#ifdef CONFIG_PLUGIN +/* + * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. + * This should be a hot path as we will have just looked this path up + * in the softmmu lookup code (or helper). We don't handle re-fills or + * checking the victim table. This is purely informational. + * + * This almost never fails as the memory access being instrumented + * should have just filled the TLB. The one corner case is io_writex + * which can cause TLB flushes and potential resizing of the TLBs + * losing the information we need. In those cases we need to recover + * data from a copy of the iotlbentry. As long as this always occurs + * from the same thread (which a mem callback will be) this is safe. + */ + +bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, + bool is_store, struct qemu_plugin_hwaddr *data) +{ + CPUArchState *env = cpu->env_ptr; + CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); + uintptr_t index = tlb_index(env, mmu_idx, addr); + target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read; + + if (likely(tlb_hit(tlb_addr, addr))) { + /* We must have an iotlb entry for MMIO */ + if (tlb_addr & TLB_MMIO) { + CPUIOTLBEntry *iotlbentry; + iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + data->is_io = true; + data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); + data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; + } else { + data->is_io = false; + data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend); + } + return true; + } else { + SavedIOTLB *saved = &cpu->saved_iotlb; + data->is_io = true; + data->v.io.section = saved->section; + data->v.io.offset = saved->mr_offset; + return true; + } +} + +#endif + +/* + * Probe for an atomic operation. Do not allow unaligned operations, + * or io operations to proceed. Return the host address. + * + * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. + */ +static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, + MemOpIdx oi, int size, int prot, + uintptr_t retaddr) +{ + size_t mmu_idx = get_mmuidx(oi); + MemOp mop = get_memop(oi); + int a_bits = get_alignment_bits(mop); + uintptr_t index; + CPUTLBEntry *tlbe; + target_ulong tlb_addr; + void *hostaddr; + + /* Adjust the given return address. */ + retaddr -= GETPC_ADJ; + + /* Enforce guest required alignment. */ + if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { + /* ??? Maybe indicate atomic op to cpu_unaligned_access */ + cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + /* Enforce qemu required alignment. */ + if (unlikely(addr & (size - 1))) { + /* We get here if guest alignment was not requested, + or was not enforced by cpu_unaligned_access above. + We might widen the access and emulate, but for now + mark an exception and exit the cpu loop. */ + goto stop_the_world; + } + + index = tlb_index(env, mmu_idx, addr); + tlbe = tlb_entry(env, mmu_idx, addr); + + /* Check TLB entry and enforce page permissions. */ + if (prot & PAGE_WRITE) { + tlb_addr = tlb_addr_write(tlbe); + if (!tlb_hit(tlb_addr, addr)) { + if (!VICTIM_TLB_HIT(addr_write, addr)) { + tlb_fill(env_cpu(env), addr, size, + MMU_DATA_STORE, mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + tlbe = tlb_entry(env, mmu_idx, addr); + } + tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; + } + + /* Let the guest notice RMW on a write-only page. */ + if ((prot & PAGE_READ) && + unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { + tlb_fill(env_cpu(env), addr, size, + MMU_DATA_LOAD, mmu_idx, retaddr); + /* + * Since we don't support reads and writes to different addresses, + * and we do have the proper page loaded for write, this shouldn't + * ever return. But just in case, handle via stop-the-world. + */ + goto stop_the_world; + } + } else /* if (prot & PAGE_READ) */ { + tlb_addr = tlbe->addr_read; + if (!tlb_hit(tlb_addr, addr)) { + if (!VICTIM_TLB_HIT(addr_write, addr)) { + tlb_fill(env_cpu(env), addr, size, + MMU_DATA_LOAD, mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + tlbe = tlb_entry(env, mmu_idx, addr); + } + tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK; + } + } + + /* Notice an IO access or a needs-MMU-lookup access */ + if (unlikely(tlb_addr & TLB_MMIO)) { + /* There's really nothing that can be done to + support this apart from stop-the-world. */ + goto stop_the_world; + } + + hostaddr = (void *)((uintptr_t)addr + tlbe->addend); + + if (unlikely(tlb_addr & TLB_NOTDIRTY)) { + notdirty_write(env_cpu(env), addr, size, + &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr); + } + + return hostaddr; + + stop_the_world: + cpu_loop_exit_atomic(env_cpu(env), retaddr); +} + +/* + * Verify that we have passed the correct MemOp to the correct function. + * + * In the case of the helper_*_mmu functions, we will have done this by + * using the MemOp to look up the helper during code generation. + * + * In the case of the cpu_*_mmu functions, this is up to the caller. + * We could present one function to target code, and dispatch based on + * the MemOp, but so far we have worked hard to avoid an indirect function + * call along the memory path. + */ +static void validate_memop(MemOpIdx oi, MemOp expected) +{ +#ifdef CONFIG_DEBUG_TCG + MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); + assert(have == expected); +#endif +} + +/* + * Load Helpers + * + * We support two different access types. SOFTMMU_CODE_ACCESS is + * specifically for reading instructions from system memory. It is + * called by the translation loop and in some helpers where the code + * is disassembled. It shouldn't be called directly by guest code. + */ + +typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); + +static inline uint64_t QEMU_ALWAYS_INLINE +load_memop(const void *haddr, MemOp op) +{ + switch (op) { + case MO_UB: + return ldub_p(haddr); + case MO_BEUW: + return lduw_be_p(haddr); + case MO_LEUW: + return lduw_le_p(haddr); + case MO_BEUL: + return (uint32_t)ldl_be_p(haddr); + case MO_LEUL: + return (uint32_t)ldl_le_p(haddr); + case MO_BEQ: + return ldq_be_p(haddr); + case MO_LEQ: + return ldq_le_p(haddr); + default: + qemu_build_not_reached(); + } +} + +static inline uint64_t QEMU_ALWAYS_INLINE +load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi, + uintptr_t retaddr, MemOp op, bool code_read, + FullLoadHelper *full_load) +{ + uintptr_t mmu_idx = get_mmuidx(oi); + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read; + const size_t tlb_off = code_read ? + offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); + const MMUAccessType access_type = + code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; + unsigned a_bits = get_alignment_bits(get_memop(oi)); + void *haddr; + uint64_t res; + size_t size = memop_size(op); + + /* Handle CPU specific unaligned behaviour */ + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(env_cpu(env), addr, access_type, + mmu_idx, retaddr); + } + + /* If the TLB entry is for a different page, reload and try again. */ + if (!tlb_hit(tlb_addr, addr)) { + if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, + addr & TARGET_PAGE_MASK)) { + tlb_fill(env_cpu(env), addr, size, + access_type, mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); + } + tlb_addr = code_read ? entry->addr_code : entry->addr_read; + tlb_addr &= ~TLB_INVALID_MASK; + } + + /* Handle anything that isn't just a straight memory access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + CPUIOTLBEntry *iotlbentry; + bool need_swap; + + /* For anything that is unaligned, recurse through full_load. */ + if ((addr & (size - 1)) != 0) { + goto do_unaligned_access; + } + + iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + + /* Handle watchpoints. */ + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { + /* On watchpoint hit, this will longjmp out. */ + cpu_check_watchpoint(env_cpu(env), addr, size, + iotlbentry->attrs, BP_MEM_READ, retaddr); + } + + need_swap = size > 1 && (tlb_addr & TLB_BSWAP); + + /* Handle I/O access. */ + if (likely(tlb_addr & TLB_MMIO)) { + return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, + access_type, op ^ (need_swap * MO_BSWAP)); + } + + haddr = (void *)((uintptr_t)addr + entry->addend); + + /* + * Keep these two load_memop separate to ensure that the compiler + * is able to fold the entire function to a single instruction. + * There is a build-time assert inside to remind you of this. ;-) + */ + if (unlikely(need_swap)) { + return load_memop(haddr, op ^ MO_BSWAP); + } + return load_memop(haddr, op); + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (size > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 + >= TARGET_PAGE_SIZE)) { + target_ulong addr1, addr2; + uint64_t r1, r2; + unsigned shift; + do_unaligned_access: + addr1 = addr & ~((target_ulong)size - 1); + addr2 = addr1 + size; + r1 = full_load(env, addr1, oi, retaddr); + r2 = full_load(env, addr2, oi, retaddr); + shift = (addr & (size - 1)) * 8; + + if (memop_big_endian(op)) { + /* Big-endian combine. */ + res = (r1 << shift) | (r2 >> ((size * 8) - shift)); + } else { + /* Little-endian combine. */ + res = (r1 >> shift) | (r2 << ((size * 8) - shift)); + } + return res & MAKE_64BIT_MASK(0, size * 8); + } + + haddr = (void *)((uintptr_t)addr + entry->addend); + return load_memop(haddr, op); +} + +/* + * For the benefit of TCG generated code, we want to avoid the + * complication of ABI-specific return type promotion and always + * return a value extended to the register size of the host. This is + * tcg_target_long, except in the case of a 32-bit host and 64-bit + * data, and for that we always have uint64_t. + * + * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. + */ + +static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_UB); + return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); +} + +tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return full_ldub_mmu(env, addr, oi, retaddr); +} + +static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_LEUW); + return load_helper(env, addr, oi, retaddr, MO_LEUW, false, + full_le_lduw_mmu); +} + +tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return full_le_lduw_mmu(env, addr, oi, retaddr); +} + +static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_BEUW); + return load_helper(env, addr, oi, retaddr, MO_BEUW, false, + full_be_lduw_mmu); +} + +tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return full_be_lduw_mmu(env, addr, oi, retaddr); +} + +static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_LEUL); + return load_helper(env, addr, oi, retaddr, MO_LEUL, false, + full_le_ldul_mmu); +} + +tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return full_le_ldul_mmu(env, addr, oi, retaddr); +} + +static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_BEUL); + return load_helper(env, addr, oi, retaddr, MO_BEUL, false, + full_be_ldul_mmu); +} + +tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return full_be_ldul_mmu(env, addr, oi, retaddr); +} + +uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_LEQ); + return load_helper(env, addr, oi, retaddr, MO_LEQ, false, + helper_le_ldq_mmu); +} + +uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_BEQ); + return load_helper(env, addr, oi, retaddr, MO_BEQ, false, + helper_be_ldq_mmu); +} + +/* + * Provide signed versions of the load routines as well. We can of course + * avoid this for 64-bit data, or for 32-bit data on 32-bit host. + */ + + +tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); +} + +tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); +} + +tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); +} + +tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); +} + +tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); +} + +/* + * Load helpers for cpu_ldst.h. + */ + +static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t retaddr, + FullLoadHelper *full_load) +{ + uint64_t ret; + + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + ret = full_load(env, addr, oi, retaddr); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return ret; +} + +uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) +{ + return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu); +} + +uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu); +} + +uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu); +} + +uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + return cpu_load_helper(env, addr, oi, MO_BEQ, helper_be_ldq_mmu); +} + +uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu); +} + +uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu); +} + +uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu); +} + +/* + * Store Helpers + */ + +static inline void QEMU_ALWAYS_INLINE +store_memop(void *haddr, uint64_t val, MemOp op) +{ + switch (op) { + case MO_UB: + stb_p(haddr, val); + break; + case MO_BEUW: + stw_be_p(haddr, val); + break; + case MO_LEUW: + stw_le_p(haddr, val); + break; + case MO_BEUL: + stl_be_p(haddr, val); + break; + case MO_LEUL: + stl_le_p(haddr, val); + break; + case MO_BEQ: + stq_be_p(haddr, val); + break; + case MO_LEQ: + stq_le_p(haddr, val); + break; + default: + qemu_build_not_reached(); + } +} + +static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr); + +static void __attribute__((noinline)) +store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, + uintptr_t retaddr, size_t size, uintptr_t mmu_idx, + bool big_endian) +{ + const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); + uintptr_t index, index2; + CPUTLBEntry *entry, *entry2; + target_ulong page2, tlb_addr, tlb_addr2; + MemOpIdx oi; + size_t size2; + int i; + + /* + * Ensure the second page is in the TLB. Note that the first page + * is already guaranteed to be filled, and that the second page + * cannot evict the first. + */ + page2 = (addr + size) & TARGET_PAGE_MASK; + size2 = (addr + size) & ~TARGET_PAGE_MASK; + index2 = tlb_index(env, mmu_idx, page2); + entry2 = tlb_entry(env, mmu_idx, page2); + + tlb_addr2 = tlb_addr_write(entry2); + if (!tlb_hit_page(tlb_addr2, page2)) { + if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { + tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, + mmu_idx, retaddr); + index2 = tlb_index(env, mmu_idx, page2); + entry2 = tlb_entry(env, mmu_idx, page2); + } + tlb_addr2 = tlb_addr_write(entry2); + } + + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); + tlb_addr = tlb_addr_write(entry); + + /* + * Handle watchpoints. Since this may trap, all checks + * must happen before any store. + */ + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { + cpu_check_watchpoint(env_cpu(env), addr, size - size2, + env_tlb(env)->d[mmu_idx].iotlb[index].attrs, + BP_MEM_WRITE, retaddr); + } + if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { + cpu_check_watchpoint(env_cpu(env), page2, size2, + env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, + BP_MEM_WRITE, retaddr); + } + + /* + * XXX: not efficient, but simple. + * This loop must go in the forward direction to avoid issues + * with self-modifying code in Windows 64-bit. + */ + oi = make_memop_idx(MO_UB, mmu_idx); + if (big_endian) { + for (i = 0; i < size; ++i) { + /* Big-endian extract. */ + uint8_t val8 = val >> (((size - 1) * 8) - (i * 8)); + full_stb_mmu(env, addr + i, val8, oi, retaddr); + } + } else { + for (i = 0; i < size; ++i) { + /* Little-endian extract. */ + uint8_t val8 = val >> (i * 8); + full_stb_mmu(env, addr + i, val8, oi, retaddr); + } + } +} + +static inline void QEMU_ALWAYS_INLINE +store_helper(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr, MemOp op) +{ + uintptr_t mmu_idx = get_mmuidx(oi); + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + target_ulong tlb_addr = tlb_addr_write(entry); + const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); + unsigned a_bits = get_alignment_bits(get_memop(oi)); + void *haddr; + size_t size = memop_size(op); + + /* Handle CPU specific unaligned behaviour */ + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + /* If the TLB entry is for a different page, reload and try again. */ + if (!tlb_hit(tlb_addr, addr)) { + if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, + addr & TARGET_PAGE_MASK)) { + tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, + mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); + } + tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; + } + + /* Handle anything that isn't just a straight memory access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + CPUIOTLBEntry *iotlbentry; + bool need_swap; + + /* For anything that is unaligned, recurse through byte stores. */ + if ((addr & (size - 1)) != 0) { + goto do_unaligned_access; + } + + iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + + /* Handle watchpoints. */ + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { + /* On watchpoint hit, this will longjmp out. */ + cpu_check_watchpoint(env_cpu(env), addr, size, + iotlbentry->attrs, BP_MEM_WRITE, retaddr); + } + + need_swap = size > 1 && (tlb_addr & TLB_BSWAP); + + /* Handle I/O access. */ + if (tlb_addr & TLB_MMIO) { + io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, + op ^ (need_swap * MO_BSWAP)); + return; + } + + /* Ignore writes to ROM. */ + if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) { + return; + } + + /* Handle clean RAM pages. */ + if (tlb_addr & TLB_NOTDIRTY) { + notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr); + } + + haddr = (void *)((uintptr_t)addr + entry->addend); + + /* + * Keep these two store_memop separate to ensure that the compiler + * is able to fold the entire function to a single instruction. + * There is a build-time assert inside to remind you of this. ;-) + */ + if (unlikely(need_swap)) { + store_memop(haddr, val, op ^ MO_BSWAP); + } else { + store_memop(haddr, val, op); + } + return; + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (size > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 + >= TARGET_PAGE_SIZE)) { + do_unaligned_access: + store_helper_unaligned(env, addr, val, retaddr, size, + mmu_idx, memop_big_endian(op)); + return; + } + + haddr = (void *)((uintptr_t)addr + entry->addend); + store_memop(haddr, val, op); +} + +static void __attribute__((noinline)) +full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_UB); + store_helper(env, addr, val, oi, retaddr, MO_UB); +} + +void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + full_stb_mmu(env, addr, val, oi, retaddr); +} + +static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_LEUW); + store_helper(env, addr, val, oi, retaddr, MO_LEUW); +} + +void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + full_le_stw_mmu(env, addr, val, oi, retaddr); +} + +static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_BEUW); + store_helper(env, addr, val, oi, retaddr, MO_BEUW); +} + +void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + full_be_stw_mmu(env, addr, val, oi, retaddr); +} + +static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_LEUL); + store_helper(env, addr, val, oi, retaddr, MO_LEUL); +} + +void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + full_le_stl_mmu(env, addr, val, oi, retaddr); +} + +static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_BEUL); + store_helper(env, addr, val, oi, retaddr, MO_BEUL); +} + +void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + full_be_stl_mmu(env, addr, val, oi, retaddr); +} + +void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_LEQ); + store_helper(env, addr, val, oi, retaddr, MO_LEQ); +} + +void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_BEQ); + store_helper(env, addr, val, oi, retaddr, MO_BEQ); +} + +/* + * Store Helpers for cpu_ldst.h + */ + +typedef void FullStoreHelper(CPUArchState *env, target_ulong addr, + uint64_t val, MemOpIdx oi, uintptr_t retaddr); + +static inline void cpu_store_helper(CPUArchState *env, target_ulong addr, + uint64_t val, MemOpIdx oi, uintptr_t ra, + FullStoreHelper *full_store) +{ + trace_guest_st_before_exec(env_cpu(env), addr, oi); + full_store(env, addr, val, oi, ra); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu); +} + +void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu); +} + +void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu); +} + +void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu); +} + +void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu); +} + +void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu); +} + +void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu); +} + +#include "ldst_common.c.inc" + +/* + * First set of functions passes in OI and RETADDR. + * This makes them callable from other helpers. + */ + +#define ATOMIC_NAME(X) \ + glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) + +#define ATOMIC_MMU_CLEANUP +#define ATOMIC_MMU_IDX get_mmuidx(oi) + +#include "atomic_common.c.inc" + +#define DATA_SIZE 1 +#include "atomic_template.h" + +#define DATA_SIZE 2 +#include "atomic_template.h" + +#define DATA_SIZE 4 +#include "atomic_template.h" + +#ifdef CONFIG_ATOMIC64 +#define DATA_SIZE 8 +#include "atomic_template.h" +#endif + +#if HAVE_CMPXCHG128 || HAVE_ATOMIC128 +#define DATA_SIZE 16 +#include "atomic_template.h" +#endif + +/* Code access functions. */ + +static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code); +} + +uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) +{ + MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); + return full_ldub_code(env, addr, oi, 0); +} + +static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code); +} + +uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) +{ + MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); + return full_lduw_code(env, addr, oi, 0); +} + +static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code); +} + +uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) +{ + MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); + return full_ldl_code(env, addr, oi, 0); +} + +static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code); +} + +uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) +{ + MemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true)); + return full_ldq_code(env, addr, oi, 0); +} diff --git a/accel/tcg/hmp.c b/accel/tcg/hmp.c new file mode 100644 index 000000000..d2ea35265 --- /dev/null +++ b/accel/tcg/hmp.c @@ -0,0 +1,15 @@ +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" +#include "exec/exec-all.h" +#include "monitor/monitor.h" +#include "sysemu/tcg.h" + +static void hmp_tcg_register(void) +{ + monitor_register_hmp_info_hrt("jit", qmp_x_query_jit); + monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount); +} + +type_init(hmp_tcg_register); diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h new file mode 100644 index 000000000..881bc1ede --- /dev/null +++ b/accel/tcg/internal.h @@ -0,0 +1,22 @@ +/* + * Internal execution defines for qemu + * + * Copyright (c) 2003 Fabrice Bellard + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#ifndef ACCEL_TCG_INTERNAL_H +#define ACCEL_TCG_INTERNAL_H + +#include "exec/exec-all.h" + +TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc, + target_ulong cs_base, uint32_t flags, + int cflags); + +void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); +void page_init(void); +void tb_htable_init(void); + +#endif /* ACCEL_TCG_INTERNAL_H */ diff --git a/accel/tcg/ldst_common.c.inc b/accel/tcg/ldst_common.c.inc new file mode 100644 index 000000000..bfefb275e --- /dev/null +++ b/accel/tcg/ldst_common.c.inc @@ -0,0 +1,307 @@ +/* + * Routines common to user and system emulation of load/store. + * + * Copyright (c) 2003 Fabrice Bellard + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + return cpu_ldb_mmu(env, addr, oi, ra); +} + +int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra); +} + +uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); + return cpu_ldw_be_mmu(env, addr, oi, ra); +} + +int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra); +} + +uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); + return cpu_ldl_be_mmu(env, addr, oi, ra); +} + +uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx); + return cpu_ldq_be_mmu(env, addr, oi, ra); +} + +uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); + return cpu_ldw_le_mmu(env, addr, oi, ra); +} + +int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra); +} + +uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); + return cpu_ldl_le_mmu(env, addr, oi, ra); +} + +uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx); + return cpu_ldq_le_mmu(env, addr, oi, ra); +} + +void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + cpu_stb_mmu(env, addr, val, oi, ra); +} + +void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); + cpu_stw_be_mmu(env, addr, val, oi, ra); +} + +void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); + cpu_stl_be_mmu(env, addr, val, oi, ra); +} + +void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx); + cpu_stq_be_mmu(env, addr, val, oi, ra); +} + +void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); + cpu_stw_le_mmu(env, addr, val, oi, ra); +} + +void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); + cpu_stl_le_mmu(env, addr, val, oi, ra); +} + +void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx); + cpu_stq_le_mmu(env, addr, val, oi, ra); +} + +/*--------------------------*/ + +uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldub_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return (int8_t)cpu_ldub_data_ra(env, addr, ra); +} + +uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_lduw_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return (int16_t)cpu_lduw_be_data_ra(env, addr, ra); +} + +uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldl_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldq_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_lduw_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return (int16_t)cpu_lduw_le_data_ra(env, addr, ra); +} + +uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldl_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldq_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +void cpu_stb_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stb_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stw_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stl_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr, + uint64_t val, uintptr_t ra) +{ + cpu_stq_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stw_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stl_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr, + uint64_t val, uintptr_t ra) +{ + cpu_stq_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +/*--------------------------*/ + +uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldub_data_ra(env, addr, 0); +} + +int cpu_ldsb_data(CPUArchState *env, abi_ptr addr) +{ + return (int8_t)cpu_ldub_data(env, addr); +} + +uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_lduw_be_data_ra(env, addr, 0); +} + +int cpu_ldsw_be_data(CPUArchState *env, abi_ptr addr) +{ + return (int16_t)cpu_lduw_be_data(env, addr); +} + +uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldl_be_data_ra(env, addr, 0); +} + +uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldq_be_data_ra(env, addr, 0); +} + +uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_lduw_le_data_ra(env, addr, 0); +} + +int cpu_ldsw_le_data(CPUArchState *env, abi_ptr addr) +{ + return (int16_t)cpu_lduw_le_data(env, addr); +} + +uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldl_le_data_ra(env, addr, 0); +} + +uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldq_le_data_ra(env, addr, 0); +} + +void cpu_stb_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stb_data_ra(env, addr, val, 0); +} + +void cpu_stw_be_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stw_be_data_ra(env, addr, val, 0); +} + +void cpu_stl_be_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stl_be_data_ra(env, addr, val, 0); +} + +void cpu_stq_be_data(CPUArchState *env, abi_ptr addr, uint64_t val) +{ + cpu_stq_be_data_ra(env, addr, val, 0); +} + +void cpu_stw_le_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stw_le_data_ra(env, addr, val, 0); +} + +void cpu_stl_le_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stl_le_data_ra(env, addr, val, 0); +} + +void cpu_stq_le_data(CPUArchState *env, abi_ptr addr, uint64_t val) +{ + cpu_stq_le_data_ra(env, addr, val, 0); +} diff --git a/accel/tcg/meson.build b/accel/tcg/meson.build new file mode 100644 index 000000000..7a0a79d73 --- /dev/null +++ b/accel/tcg/meson.build @@ -0,0 +1,26 @@ +tcg_ss = ss.source_set() +tcg_ss.add(files( + 'tcg-all.c', + 'cpu-exec-common.c', + 'cpu-exec.c', + 'tcg-runtime-gvec.c', + 'tcg-runtime.c', + 'translate-all.c', + 'translator.c', +)) +tcg_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c')) +tcg_ss.add(when: 'CONFIG_SOFTMMU', if_false: files('user-exec-stub.c')) +tcg_ss.add(when: 'CONFIG_PLUGIN', if_true: [files('plugin-gen.c')]) +specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss) + +specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files( + 'cputlb.c', + 'hmp.c', +)) + +tcg_module_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files( + 'tcg-accel-ops.c', + 'tcg-accel-ops-mttcg.c', + 'tcg-accel-ops-icount.c', + 'tcg-accel-ops-rr.c', +)) diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c new file mode 100644 index 000000000..22d95fe1c --- /dev/null +++ b/accel/tcg/plugin-gen.c @@ -0,0 +1,926 @@ +/* + * plugin-gen.c - TCG-related bits of plugin infrastructure + * + * Copyright (C) 2018, Emilio G. Cota <cota@braap.org> + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * We support instrumentation at an instruction granularity. That is, + * if a plugin wants to instrument the memory accesses performed by a + * particular instruction, it can just do that instead of instrumenting + * all memory accesses. Thus, in order to do this we first have to + * translate a TB, so that plugins can decide what/where to instrument. + * + * Injecting the desired instrumentation could be done with a second + * translation pass that combined the instrumentation requests, but that + * would be ugly and inefficient since we would decode the guest code twice. + * Instead, during TB translation we add "empty" instrumentation calls for all + * possible instrumentation events, and then once we collect the instrumentation + * requests from plugins, we either "fill in" those empty events or remove them + * if they have no requests. + * + * When "filling in" an event we first copy the empty callback's TCG ops. This + * might seem unnecessary, but it is done to support an arbitrary number + * of callbacks per event. Take for example a regular instruction callback. + * We first generate a callback to an empty helper function. Then, if two + * plugins register one callback each for this instruction, we make two copies + * of the TCG ops generated for the empty callback, substituting the function + * pointer that points to the empty helper function with the plugins' desired + * callback functions. After that we remove the empty callback's ops. + * + * Note that the location in TCGOp.args[] of the pointer to a helper function + * varies across different guest and host architectures. Instead of duplicating + * the logic that figures this out, we rely on the fact that the empty + * callbacks point to empty functions that are unique pointers in the program. + * Thus, to find the right location we just have to look for a match in + * TCGOp.args[]. This is the main reason why we first copy an empty callback's + * TCG ops and then fill them in; regardless of whether we have one or many + * callbacks for that event, the logic to add all of them is the same. + * + * When generating more than one callback per event, we make a small + * optimization to avoid generating redundant operations. For instance, for the + * second and all subsequent callbacks of an event, we do not need to reload the + * CPU's index into a TCG temp, since the first callback did it already. + */ +#include "qemu/osdep.h" +#include "tcg/tcg.h" +#include "tcg/tcg-op.h" +#include "exec/exec-all.h" +#include "exec/plugin-gen.h" +#include "exec/translator.h" + +#ifdef CONFIG_SOFTMMU +# define CONFIG_SOFTMMU_GATE 1 +#else +# define CONFIG_SOFTMMU_GATE 0 +#endif + +/* + * plugin_cb_start TCG op args[]: + * 0: enum plugin_gen_from + * 1: enum plugin_gen_cb + * 2: set to 1 for mem callback that is a write, 0 otherwise. + */ + +enum plugin_gen_from { + PLUGIN_GEN_FROM_TB, + PLUGIN_GEN_FROM_INSN, + PLUGIN_GEN_FROM_MEM, + PLUGIN_GEN_AFTER_INSN, + PLUGIN_GEN_N_FROMS, +}; + +enum plugin_gen_cb { + PLUGIN_GEN_CB_UDATA, + PLUGIN_GEN_CB_INLINE, + PLUGIN_GEN_CB_MEM, + PLUGIN_GEN_ENABLE_MEM_HELPER, + PLUGIN_GEN_DISABLE_MEM_HELPER, + PLUGIN_GEN_N_CBS, +}; + +/* + * These helpers are stubs that get dynamically switched out for calls + * direct to the plugin if they are subscribed to. + */ +void HELPER(plugin_vcpu_udata_cb)(uint32_t cpu_index, void *udata) +{ } + +void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index, + qemu_plugin_meminfo_t info, uint64_t vaddr, + void *userdata) +{ } + +static void do_gen_mem_cb(TCGv vaddr, uint32_t info) +{ + TCGv_i32 cpu_index = tcg_temp_new_i32(); + TCGv_i32 meminfo = tcg_const_i32(info); + TCGv_i64 vaddr64 = tcg_temp_new_i64(); + TCGv_ptr udata = tcg_const_ptr(NULL); + + tcg_gen_ld_i32(cpu_index, cpu_env, + -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); + tcg_gen_extu_tl_i64(vaddr64, vaddr); + + gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, vaddr64, udata); + + tcg_temp_free_ptr(udata); + tcg_temp_free_i64(vaddr64); + tcg_temp_free_i32(meminfo); + tcg_temp_free_i32(cpu_index); +} + +static void gen_empty_udata_cb(void) +{ + TCGv_i32 cpu_index = tcg_temp_new_i32(); + TCGv_ptr udata = tcg_const_ptr(NULL); /* will be overwritten later */ + + tcg_gen_ld_i32(cpu_index, cpu_env, + -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); + gen_helper_plugin_vcpu_udata_cb(cpu_index, udata); + + tcg_temp_free_ptr(udata); + tcg_temp_free_i32(cpu_index); +} + +/* + * For now we only support addi_i64. + * When we support more ops, we can generate one empty inline cb for each. + */ +static void gen_empty_inline_cb(void) +{ + TCGv_i64 val = tcg_temp_new_i64(); + TCGv_ptr ptr = tcg_const_ptr(NULL); /* overwritten later */ + + tcg_gen_ld_i64(val, ptr, 0); + /* pass an immediate != 0 so that it doesn't get optimized away */ + tcg_gen_addi_i64(val, val, 0xdeadface); + tcg_gen_st_i64(val, ptr, 0); + tcg_temp_free_ptr(ptr); + tcg_temp_free_i64(val); +} + +static void gen_empty_mem_cb(TCGv addr, uint32_t info) +{ + do_gen_mem_cb(addr, info); +} + +/* + * Share the same function for enable/disable. When enabling, the NULL + * pointer will be overwritten later. + */ +static void gen_empty_mem_helper(void) +{ + TCGv_ptr ptr; + + ptr = tcg_const_ptr(NULL); + tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) - + offsetof(ArchCPU, env)); + tcg_temp_free_ptr(ptr); +} + +static void gen_plugin_cb_start(enum plugin_gen_from from, + enum plugin_gen_cb type, unsigned wr) +{ + tcg_gen_plugin_cb_start(from, type, wr); +} + +static void gen_wrapped(enum plugin_gen_from from, + enum plugin_gen_cb type, void (*func)(void)) +{ + gen_plugin_cb_start(from, type, 0); + func(); + tcg_gen_plugin_cb_end(); +} + +static void plugin_gen_empty_callback(enum plugin_gen_from from) +{ + switch (from) { + case PLUGIN_GEN_AFTER_INSN: + gen_wrapped(from, PLUGIN_GEN_DISABLE_MEM_HELPER, + gen_empty_mem_helper); + break; + case PLUGIN_GEN_FROM_INSN: + /* + * Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being + * the first callback of an instruction + */ + gen_wrapped(from, PLUGIN_GEN_ENABLE_MEM_HELPER, + gen_empty_mem_helper); + /* fall through */ + case PLUGIN_GEN_FROM_TB: + gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb); + gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb); + break; + default: + g_assert_not_reached(); + } +} + +union mem_gen_fn { + void (*mem_fn)(TCGv, uint32_t); + void (*inline_fn)(void); +}; + +static void gen_mem_wrapped(enum plugin_gen_cb type, + const union mem_gen_fn *f, TCGv addr, + uint32_t info, bool is_mem) +{ + enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info); + + gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, rw); + if (is_mem) { + f->mem_fn(addr, info); + } else { + f->inline_fn(); + } + tcg_gen_plugin_cb_end(); +} + +void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info) +{ + union mem_gen_fn fn; + + fn.mem_fn = gen_empty_mem_cb; + gen_mem_wrapped(PLUGIN_GEN_CB_MEM, &fn, addr, info, true); + + fn.inline_fn = gen_empty_inline_cb; + gen_mem_wrapped(PLUGIN_GEN_CB_INLINE, &fn, 0, info, false); +} + +static TCGOp *find_op(TCGOp *op, TCGOpcode opc) +{ + while (op) { + if (op->opc == opc) { + return op; + } + op = QTAILQ_NEXT(op, link); + } + return NULL; +} + +static TCGOp *rm_ops_range(TCGOp *begin, TCGOp *end) +{ + TCGOp *ret = QTAILQ_NEXT(end, link); + + QTAILQ_REMOVE_SEVERAL(&tcg_ctx->ops, begin, end, link); + return ret; +} + +/* remove all ops until (and including) plugin_cb_end */ +static TCGOp *rm_ops(TCGOp *op) +{ + TCGOp *end_op = find_op(op, INDEX_op_plugin_cb_end); + + tcg_debug_assert(end_op); + return rm_ops_range(op, end_op); +} + +static TCGOp *copy_op_nocheck(TCGOp **begin_op, TCGOp *op) +{ + *begin_op = QTAILQ_NEXT(*begin_op, link); + tcg_debug_assert(*begin_op); + op = tcg_op_insert_after(tcg_ctx, op, (*begin_op)->opc); + memcpy(op->args, (*begin_op)->args, sizeof(op->args)); + return op; +} + +static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc) +{ + op = copy_op_nocheck(begin_op, op); + tcg_debug_assert((*begin_op)->opc == opc); + return op; +} + +static TCGOp *copy_extu_i32_i64(TCGOp **begin_op, TCGOp *op) +{ + if (TCG_TARGET_REG_BITS == 32) { + /* mov_i32 */ + op = copy_op(begin_op, op, INDEX_op_mov_i32); + /* mov_i32 w/ $0 */ + op = copy_op(begin_op, op, INDEX_op_mov_i32); + } else { + /* extu_i32_i64 */ + op = copy_op(begin_op, op, INDEX_op_extu_i32_i64); + } + return op; +} + +static TCGOp *copy_mov_i64(TCGOp **begin_op, TCGOp *op) +{ + if (TCG_TARGET_REG_BITS == 32) { + /* 2x mov_i32 */ + op = copy_op(begin_op, op, INDEX_op_mov_i32); + op = copy_op(begin_op, op, INDEX_op_mov_i32); + } else { + /* mov_i64 */ + op = copy_op(begin_op, op, INDEX_op_mov_i64); + } + return op; +} + +static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr) +{ + if (UINTPTR_MAX == UINT32_MAX) { + /* mov_i32 */ + op = copy_op(begin_op, op, INDEX_op_mov_i32); + op->args[1] = tcgv_i32_arg(tcg_constant_i32((uintptr_t)ptr)); + } else { + /* mov_i64 */ + op = copy_op(begin_op, op, INDEX_op_mov_i64); + op->args[1] = tcgv_i64_arg(tcg_constant_i64((uintptr_t)ptr)); + } + return op; +} + +static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op) +{ + if (TARGET_LONG_BITS == 32) { + /* extu_i32_i64 */ + op = copy_extu_i32_i64(begin_op, op); + } else { + /* mov_i64 */ + op = copy_mov_i64(begin_op, op); + } + return op; +} + +static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op) +{ + if (TCG_TARGET_REG_BITS == 32) { + /* 2x ld_i32 */ + op = copy_op(begin_op, op, INDEX_op_ld_i32); + op = copy_op(begin_op, op, INDEX_op_ld_i32); + } else { + /* ld_i64 */ + op = copy_op(begin_op, op, INDEX_op_ld_i64); + } + return op; +} + +static TCGOp *copy_st_i64(TCGOp **begin_op, TCGOp *op) +{ + if (TCG_TARGET_REG_BITS == 32) { + /* 2x st_i32 */ + op = copy_op(begin_op, op, INDEX_op_st_i32); + op = copy_op(begin_op, op, INDEX_op_st_i32); + } else { + /* st_i64 */ + op = copy_op(begin_op, op, INDEX_op_st_i64); + } + return op; +} + +static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v) +{ + if (TCG_TARGET_REG_BITS == 32) { + /* all 32-bit backends must implement add2_i32 */ + g_assert(TCG_TARGET_HAS_add2_i32); + op = copy_op(begin_op, op, INDEX_op_add2_i32); + op->args[4] = tcgv_i32_arg(tcg_constant_i32(v)); + op->args[5] = tcgv_i32_arg(tcg_constant_i32(v >> 32)); + } else { + op = copy_op(begin_op, op, INDEX_op_add_i64); + op->args[2] = tcgv_i64_arg(tcg_constant_i64(v)); + } + return op; +} + +static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op) +{ + if (UINTPTR_MAX == UINT32_MAX) { + /* st_i32 */ + op = copy_op(begin_op, op, INDEX_op_st_i32); + } else { + /* st_i64 */ + op = copy_st_i64(begin_op, op); + } + return op; +} + +static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func, + void *func, int *cb_idx) +{ + /* copy all ops until the call */ + do { + op = copy_op_nocheck(begin_op, op); + } while (op->opc != INDEX_op_call); + + /* fill in the op call */ + op->param1 = (*begin_op)->param1; + op->param2 = (*begin_op)->param2; + tcg_debug_assert(op->life == 0); + if (*cb_idx == -1) { + int i; + + /* + * Instead of working out the position of the callback in args[], just + * look for @empty_func, since it should be a unique pointer. + */ + for (i = 0; i < MAX_OPC_PARAM_ARGS; i++) { + if ((uintptr_t)(*begin_op)->args[i] == (uintptr_t)empty_func) { + *cb_idx = i; + break; + } + } + tcg_debug_assert(i < MAX_OPC_PARAM_ARGS); + } + op->args[*cb_idx] = (uintptr_t)func; + op->args[*cb_idx + 1] = (*begin_op)->args[*cb_idx + 1]; + + return op; +} + +/* + * When we append/replace ops here we are sensitive to changing patterns of + * TCGOps generated by the tcg_gen_FOO calls when we generated the + * empty callbacks. This will assert very quickly in a debug build as + * we assert the ops we are replacing are the correct ones. + */ +static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb, + TCGOp *begin_op, TCGOp *op, int *cb_idx) +{ + /* const_ptr */ + op = copy_const_ptr(&begin_op, op, cb->userp); + + /* copy the ld_i32, but note that we only have to copy it once */ + begin_op = QTAILQ_NEXT(begin_op, link); + tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32); + if (*cb_idx == -1) { + op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32); + memcpy(op->args, begin_op->args, sizeof(op->args)); + } + + /* call */ + op = copy_call(&begin_op, op, HELPER(plugin_vcpu_udata_cb), + cb->f.vcpu_udata, cb_idx); + + return op; +} + +static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb, + TCGOp *begin_op, TCGOp *op, + int *unused) +{ + /* const_ptr */ + op = copy_const_ptr(&begin_op, op, cb->userp); + + /* ld_i64 */ + op = copy_ld_i64(&begin_op, op); + + /* add_i64 */ + op = copy_add_i64(&begin_op, op, cb->inline_insn.imm); + + /* st_i64 */ + op = copy_st_i64(&begin_op, op); + + return op; +} + +static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb, + TCGOp *begin_op, TCGOp *op, int *cb_idx) +{ + enum plugin_gen_cb type = begin_op->args[1]; + + tcg_debug_assert(type == PLUGIN_GEN_CB_MEM); + + /* const_i32 == mov_i32 ("info", so it remains as is) */ + op = copy_op(&begin_op, op, INDEX_op_mov_i32); + + /* const_ptr */ + op = copy_const_ptr(&begin_op, op, cb->userp); + + /* copy the ld_i32, but note that we only have to copy it once */ + begin_op = QTAILQ_NEXT(begin_op, link); + tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32); + if (*cb_idx == -1) { + op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32); + memcpy(op->args, begin_op->args, sizeof(op->args)); + } + + /* extu_tl_i64 */ + op = copy_extu_tl_i64(&begin_op, op); + + if (type == PLUGIN_GEN_CB_MEM) { + /* call */ + op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb), + cb->f.vcpu_udata, cb_idx); + } + + return op; +} + +typedef TCGOp *(*inject_fn)(const struct qemu_plugin_dyn_cb *cb, + TCGOp *begin_op, TCGOp *op, int *intp); +typedef bool (*op_ok_fn)(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb); + +static bool op_ok(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb) +{ + return true; +} + +static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb) +{ + int w; + + w = op->args[2]; + return !!(cb->rw & (w + 1)); +} + +static void inject_cb_type(const GArray *cbs, TCGOp *begin_op, + inject_fn inject, op_ok_fn ok) +{ + TCGOp *end_op; + TCGOp *op; + int cb_idx = -1; + int i; + + if (!cbs || cbs->len == 0) { + rm_ops(begin_op); + return; + } + + end_op = find_op(begin_op, INDEX_op_plugin_cb_end); + tcg_debug_assert(end_op); + + op = end_op; + for (i = 0; i < cbs->len; i++) { + struct qemu_plugin_dyn_cb *cb = + &g_array_index(cbs, struct qemu_plugin_dyn_cb, i); + + if (!ok(begin_op, cb)) { + continue; + } + op = inject(cb, begin_op, op, &cb_idx); + } + rm_ops_range(begin_op, end_op); +} + +static void +inject_udata_cb(const GArray *cbs, TCGOp *begin_op) +{ + inject_cb_type(cbs, begin_op, append_udata_cb, op_ok); +} + +static void +inject_inline_cb(const GArray *cbs, TCGOp *begin_op, op_ok_fn ok) +{ + inject_cb_type(cbs, begin_op, append_inline_cb, ok); +} + +static void +inject_mem_cb(const GArray *cbs, TCGOp *begin_op) +{ + inject_cb_type(cbs, begin_op, append_mem_cb, op_rw); +} + +/* we could change the ops in place, but we can reuse more code by copying */ +static void inject_mem_helper(TCGOp *begin_op, GArray *arr) +{ + TCGOp *orig_op = begin_op; + TCGOp *end_op; + TCGOp *op; + + end_op = find_op(begin_op, INDEX_op_plugin_cb_end); + tcg_debug_assert(end_op); + + /* const ptr */ + op = copy_const_ptr(&begin_op, end_op, arr); + + /* st_ptr */ + op = copy_st_ptr(&begin_op, op); + + rm_ops_range(orig_op, end_op); +} + +/* + * Tracking memory accesses performed from helpers requires extra work. + * If an instruction is emulated with helpers, we do two things: + * (1) copy the CB descriptors, and keep track of it so that they can be + * freed later on, and (2) point CPUState.plugin_mem_cbs to the descriptors, so + * that we can read them at run-time (i.e. when the helper executes). + * This run-time access is performed from qemu_plugin_vcpu_mem_cb. + * + * Note that plugin_gen_disable_mem_helpers undoes (2). Since it + * is possible that the code we generate after the instruction is + * dead, we also add checks before generating tb_exit etc. + */ +static void inject_mem_enable_helper(struct qemu_plugin_insn *plugin_insn, + TCGOp *begin_op) +{ + GArray *cbs[2]; + GArray *arr; + size_t n_cbs, i; + + cbs[0] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR]; + cbs[1] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE]; + + n_cbs = 0; + for (i = 0; i < ARRAY_SIZE(cbs); i++) { + n_cbs += cbs[i]->len; + } + + plugin_insn->mem_helper = plugin_insn->calls_helpers && n_cbs; + if (likely(!plugin_insn->mem_helper)) { + rm_ops(begin_op); + return; + } + + arr = g_array_sized_new(false, false, + sizeof(struct qemu_plugin_dyn_cb), n_cbs); + + for (i = 0; i < ARRAY_SIZE(cbs); i++) { + g_array_append_vals(arr, cbs[i]->data, cbs[i]->len); + } + + qemu_plugin_add_dyn_cb_arr(arr); + inject_mem_helper(begin_op, arr); +} + +static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn, + TCGOp *begin_op) +{ + if (likely(!plugin_insn->mem_helper)) { + rm_ops(begin_op); + return; + } + inject_mem_helper(begin_op, NULL); +} + +/* called before finishing a TB with exit_tb, goto_tb or goto_ptr */ +void plugin_gen_disable_mem_helpers(void) +{ + TCGv_ptr ptr; + + if (likely(tcg_ctx->plugin_insn == NULL || + !tcg_ctx->plugin_insn->mem_helper)) { + return; + } + ptr = tcg_const_ptr(NULL); + tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) - + offsetof(ArchCPU, env)); + tcg_temp_free_ptr(ptr); + tcg_ctx->plugin_insn->mem_helper = false; +} + +static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op) +{ + inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op); +} + +static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op) +{ + inject_inline_cb(ptb->cbs[PLUGIN_CB_INLINE], begin_op, op_ok); +} + +static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op, int insn_idx) +{ + struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); + + inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op); +} + +static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op, int insn_idx) +{ + struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); + inject_inline_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE], + begin_op, op_ok); +} + +static void plugin_gen_mem_regular(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op, int insn_idx) +{ + struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); + inject_mem_cb(insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], begin_op); +} + +static void plugin_gen_mem_inline(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op, int insn_idx) +{ + const GArray *cbs; + struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); + + cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE]; + inject_inline_cb(cbs, begin_op, op_rw); +} + +static void plugin_gen_enable_mem_helper(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op, int insn_idx) +{ + struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); + inject_mem_enable_helper(insn, begin_op); +} + +static void plugin_gen_disable_mem_helper(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op, int insn_idx) +{ + struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); + inject_mem_disable_helper(insn, begin_op); +} + +/* #define DEBUG_PLUGIN_GEN_OPS */ +static void pr_ops(void) +{ +#ifdef DEBUG_PLUGIN_GEN_OPS + TCGOp *op; + int i = 0; + + QTAILQ_FOREACH(op, &tcg_ctx->ops, link) { + const char *name = ""; + const char *type = ""; + + if (op->opc == INDEX_op_plugin_cb_start) { + switch (op->args[0]) { + case PLUGIN_GEN_FROM_TB: + name = "tb"; + break; + case PLUGIN_GEN_FROM_INSN: + name = "insn"; + break; + case PLUGIN_GEN_FROM_MEM: + name = "mem"; + break; + case PLUGIN_GEN_AFTER_INSN: + name = "after insn"; + break; + default: + break; + } + switch (op->args[1]) { + case PLUGIN_GEN_CB_UDATA: + type = "udata"; + break; + case PLUGIN_GEN_CB_INLINE: + type = "inline"; + break; + case PLUGIN_GEN_CB_MEM: + type = "mem"; + break; + case PLUGIN_GEN_ENABLE_MEM_HELPER: + type = "enable mem helper"; + break; + case PLUGIN_GEN_DISABLE_MEM_HELPER: + type = "disable mem helper"; + break; + default: + break; + } + } + printf("op[%2i]: %s %s %s\n", i, tcg_op_defs[op->opc].name, name, type); + i++; + } +#endif +} + +static void plugin_gen_inject(const struct qemu_plugin_tb *plugin_tb) +{ + TCGOp *op; + int insn_idx = -1; + + pr_ops(); + + QTAILQ_FOREACH(op, &tcg_ctx->ops, link) { + switch (op->opc) { + case INDEX_op_insn_start: + insn_idx++; + break; + case INDEX_op_plugin_cb_start: + { + enum plugin_gen_from from = op->args[0]; + enum plugin_gen_cb type = op->args[1]; + + switch (from) { + case PLUGIN_GEN_FROM_TB: + { + g_assert(insn_idx == -1); + + switch (type) { + case PLUGIN_GEN_CB_UDATA: + plugin_gen_tb_udata(plugin_tb, op); + break; + case PLUGIN_GEN_CB_INLINE: + plugin_gen_tb_inline(plugin_tb, op); + break; + default: + g_assert_not_reached(); + } + break; + } + case PLUGIN_GEN_FROM_INSN: + { + g_assert(insn_idx >= 0); + + switch (type) { + case PLUGIN_GEN_CB_UDATA: + plugin_gen_insn_udata(plugin_tb, op, insn_idx); + break; + case PLUGIN_GEN_CB_INLINE: + plugin_gen_insn_inline(plugin_tb, op, insn_idx); + break; + case PLUGIN_GEN_ENABLE_MEM_HELPER: + plugin_gen_enable_mem_helper(plugin_tb, op, insn_idx); + break; + default: + g_assert_not_reached(); + } + break; + } + case PLUGIN_GEN_FROM_MEM: + { + g_assert(insn_idx >= 0); + + switch (type) { + case PLUGIN_GEN_CB_MEM: + plugin_gen_mem_regular(plugin_tb, op, insn_idx); + break; + case PLUGIN_GEN_CB_INLINE: + plugin_gen_mem_inline(plugin_tb, op, insn_idx); + break; + default: + g_assert_not_reached(); + } + + break; + } + case PLUGIN_GEN_AFTER_INSN: + { + g_assert(insn_idx >= 0); + + switch (type) { + case PLUGIN_GEN_DISABLE_MEM_HELPER: + plugin_gen_disable_mem_helper(plugin_tb, op, insn_idx); + break; + default: + g_assert_not_reached(); + } + break; + } + default: + g_assert_not_reached(); + } + break; + } + default: + /* plugins don't care about any other ops */ + break; + } + } + pr_ops(); +} + +bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_only) +{ + struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; + bool ret = false; + + if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) { + ret = true; + + ptb->vaddr = tb->pc; + ptb->vaddr2 = -1; + get_page_addr_code_hostp(cpu->env_ptr, tb->pc, &ptb->haddr1); + ptb->haddr2 = NULL; + ptb->mem_only = mem_only; + + plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB); + } + return ret; +} + +void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db) +{ + struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; + struct qemu_plugin_insn *pinsn; + + pinsn = qemu_plugin_tb_insn_get(ptb, db->pc_next); + tcg_ctx->plugin_insn = pinsn; + plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN); + + /* + * Detect page crossing to get the new host address. + * Note that we skip this when haddr1 == NULL, e.g. when we're + * fetching instructions from a region not backed by RAM. + */ + if (likely(ptb->haddr1 != NULL && ptb->vaddr2 == -1) && + unlikely((db->pc_next & TARGET_PAGE_MASK) != + (db->pc_first & TARGET_PAGE_MASK))) { + get_page_addr_code_hostp(cpu->env_ptr, db->pc_next, + &ptb->haddr2); + ptb->vaddr2 = db->pc_next; + } + if (likely(ptb->vaddr2 == -1)) { + pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr; + } else { + pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2; + } +} + +void plugin_gen_insn_end(void) +{ + plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN); +} + +void plugin_gen_tb_end(CPUState *cpu) +{ + struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; + int i; + + /* collect instrumentation requests */ + qemu_plugin_tb_trans_cb(cpu, ptb); + + /* inject the instrumentation at the appropriate places */ + plugin_gen_inject(ptb); + + /* clean up */ + for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) { + if (ptb->cbs[i]) { + g_array_set_size(ptb->cbs[i], 0); + } + } + ptb->n = 0; + tcg_ctx->plugin_insn = NULL; +} diff --git a/accel/tcg/plugin-helpers.h b/accel/tcg/plugin-helpers.h new file mode 100644 index 000000000..9829abe4a --- /dev/null +++ b/accel/tcg/plugin-helpers.h @@ -0,0 +1,4 @@ +#ifdef CONFIG_PLUGIN +DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb, TCG_CALL_NO_RWG, void, i32, ptr) +DEF_HELPER_FLAGS_4(plugin_vcpu_mem_cb, TCG_CALL_NO_RWG, void, i32, i32, i64, ptr) +#endif diff --git a/accel/tcg/tb-context.h b/accel/tcg/tb-context.h new file mode 100644 index 000000000..cac62d974 --- /dev/null +++ b/accel/tcg/tb-context.h @@ -0,0 +1,42 @@ +/* + * Internal structs that QEMU exports to TCG + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef QEMU_TB_CONTEXT_H +#define QEMU_TB_CONTEXT_H + +#include "qemu/thread.h" +#include "qemu/qht.h" + +#define CODE_GEN_HTABLE_BITS 15 +#define CODE_GEN_HTABLE_SIZE (1 << CODE_GEN_HTABLE_BITS) + +typedef struct TBContext TBContext; + +struct TBContext { + + struct qht htable; + + /* statistics */ + unsigned tb_flush_count; + unsigned tb_phys_invalidate_count; +}; + +extern TBContext tb_ctx; + +#endif diff --git a/accel/tcg/tb-hash.h b/accel/tcg/tb-hash.h new file mode 100644 index 000000000..0a273d960 --- /dev/null +++ b/accel/tcg/tb-hash.h @@ -0,0 +1,69 @@ +/* + * internal execution defines for qemu + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef EXEC_TB_HASH_H +#define EXEC_TB_HASH_H + +#include "exec/cpu-defs.h" +#include "exec/exec-all.h" +#include "qemu/xxhash.h" + +#ifdef CONFIG_SOFTMMU + +/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for + addresses on the same page. The top bits are the same. This allows + TLB invalidation to quickly clear a subset of the hash table. */ +#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2) +#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS) +#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1) +#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE) + +static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) +{ + target_ulong tmp; + tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); + return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK; +} + +static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) +{ + target_ulong tmp; + tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); + return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK) + | (tmp & TB_JMP_ADDR_MASK)); +} + +#else + +/* In user-mode we can get better hashing because we do not have a TLB */ +static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) +{ + return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1); +} + +#endif /* CONFIG_SOFTMMU */ + +static inline +uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags, + uint32_t cf_mask, uint32_t trace_vcpu_dstate) +{ + return qemu_xxhash7(phys_pc, pc, flags, cf_mask, trace_vcpu_dstate); +} + +#endif diff --git a/accel/tcg/tcg-accel-ops-icount.c b/accel/tcg/tcg-accel-ops-icount.c new file mode 100644 index 000000000..ea42d1d51 --- /dev/null +++ b/accel/tcg/tcg-accel-ops-icount.c @@ -0,0 +1,143 @@ +/* + * QEMU TCG Single Threaded vCPUs implementation using instruction counting + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "sysemu/tcg.h" +#include "sysemu/replay.h" +#include "qemu/main-loop.h" +#include "qemu/guest-random.h" +#include "exec/exec-all.h" + +#include "tcg-accel-ops.h" +#include "tcg-accel-ops-icount.h" +#include "tcg-accel-ops-rr.h" + +static int64_t icount_get_limit(void) +{ + int64_t deadline; + + if (replay_mode != REPLAY_MODE_PLAY) { + /* + * Include all the timers, because they may need an attention. + * Too long CPU execution may create unnecessary delay in UI. + */ + deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL, + QEMU_TIMER_ATTR_ALL); + /* Check realtime timers, because they help with input processing */ + deadline = qemu_soonest_timeout(deadline, + qemu_clock_deadline_ns_all(QEMU_CLOCK_REALTIME, + QEMU_TIMER_ATTR_ALL)); + + /* + * Maintain prior (possibly buggy) behaviour where if no deadline + * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than + * INT32_MAX nanoseconds ahead, we still use INT32_MAX + * nanoseconds. + */ + if ((deadline < 0) || (deadline > INT32_MAX)) { + deadline = INT32_MAX; + } + + return icount_round(deadline); + } else { + return replay_get_instructions(); + } +} + +static void icount_notify_aio_contexts(void) +{ + /* Wake up other AioContexts. */ + qemu_clock_notify(QEMU_CLOCK_VIRTUAL); + qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL); +} + +void icount_handle_deadline(void) +{ + assert(qemu_in_vcpu_thread()); + int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL, + QEMU_TIMER_ATTR_ALL); + + /* + * Instructions, interrupts, and exceptions are processed in cpu-exec. + * Don't interrupt cpu thread, when these events are waiting + * (i.e., there is no checkpoint) + */ + if (deadline == 0 + && (replay_mode != REPLAY_MODE_PLAY || replay_has_checkpoint())) { + icount_notify_aio_contexts(); + } +} + +void icount_prepare_for_run(CPUState *cpu) +{ + int insns_left; + + /* + * These should always be cleared by icount_process_data after + * each vCPU execution. However u16.high can be raised + * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt + */ + g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0); + g_assert(cpu->icount_extra == 0); + + cpu->icount_budget = icount_get_limit(); + insns_left = MIN(0xffff, cpu->icount_budget); + cpu_neg(cpu)->icount_decr.u16.low = insns_left; + cpu->icount_extra = cpu->icount_budget - insns_left; + + replay_mutex_lock(); + + if (cpu->icount_budget == 0 && replay_has_checkpoint()) { + icount_notify_aio_contexts(); + } +} + +void icount_process_data(CPUState *cpu) +{ + /* Account for executed instructions */ + icount_update(cpu); + + /* Reset the counters */ + cpu_neg(cpu)->icount_decr.u16.low = 0; + cpu->icount_extra = 0; + cpu->icount_budget = 0; + + replay_account_executed_instructions(); + + replay_mutex_unlock(); +} + +void icount_handle_interrupt(CPUState *cpu, int mask) +{ + int old_mask = cpu->interrupt_request; + + tcg_handle_interrupt(cpu, mask); + if (qemu_cpu_is_self(cpu) && + !cpu->can_do_io + && (mask & ~old_mask) != 0) { + cpu_abort(cpu, "Raised interrupt while not in I/O function"); + } +} diff --git a/accel/tcg/tcg-accel-ops-icount.h b/accel/tcg/tcg-accel-ops-icount.h new file mode 100644 index 000000000..d884aa2aa --- /dev/null +++ b/accel/tcg/tcg-accel-ops-icount.h @@ -0,0 +1,19 @@ +/* + * QEMU TCG Single Threaded vCPUs implementation using instruction counting + * + * Copyright 2020 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef TCG_CPUS_ICOUNT_H +#define TCG_CPUS_ICOUNT_H + +void icount_handle_deadline(void); +void icount_prepare_for_run(CPUState *cpu); +void icount_process_data(CPUState *cpu); + +void icount_handle_interrupt(CPUState *cpu, int mask); + +#endif /* TCG_CPUS_ICOUNT_H */ diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c new file mode 100644 index 000000000..29632bd4c --- /dev/null +++ b/accel/tcg/tcg-accel-ops-mttcg.c @@ -0,0 +1,159 @@ +/* + * QEMU TCG Multi Threaded vCPUs implementation + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "sysemu/tcg.h" +#include "sysemu/replay.h" +#include "qemu/main-loop.h" +#include "qemu/notify.h" +#include "qemu/guest-random.h" +#include "exec/exec-all.h" +#include "hw/boards.h" + +#include "tcg-accel-ops.h" +#include "tcg-accel-ops-mttcg.h" + +typedef struct MttcgForceRcuNotifier { + Notifier notifier; + CPUState *cpu; +} MttcgForceRcuNotifier; + +static void do_nothing(CPUState *cpu, run_on_cpu_data d) +{ +} + +static void mttcg_force_rcu(Notifier *notify, void *data) +{ + CPUState *cpu = container_of(notify, MttcgForceRcuNotifier, notifier)->cpu; + + /* + * Called with rcu_registry_lock held, using async_run_on_cpu() ensures + * that there are no deadlocks. + */ + async_run_on_cpu(cpu, do_nothing, RUN_ON_CPU_NULL); +} + +/* + * In the multi-threaded case each vCPU has its own thread. The TLS + * variable current_cpu can be used deep in the code to find the + * current CPUState for a given thread. + */ + +static void *mttcg_cpu_thread_fn(void *arg) +{ + MttcgForceRcuNotifier force_rcu; + CPUState *cpu = arg; + + assert(tcg_enabled()); + g_assert(!icount_enabled()); + + rcu_register_thread(); + force_rcu.notifier.notify = mttcg_force_rcu; + force_rcu.cpu = cpu; + rcu_add_force_rcu_notifier(&force_rcu.notifier); + tcg_register_thread(); + + qemu_mutex_lock_iothread(); + qemu_thread_get_self(cpu->thread); + + cpu->thread_id = qemu_get_thread_id(); + cpu->can_do_io = 1; + current_cpu = cpu; + cpu_thread_signal_created(cpu); + qemu_guest_random_seed_thread_part2(cpu->random_seed); + + /* process any pending work */ + cpu->exit_request = 1; + + do { + if (cpu_can_run(cpu)) { + int r; + qemu_mutex_unlock_iothread(); + r = tcg_cpus_exec(cpu); + qemu_mutex_lock_iothread(); + switch (r) { + case EXCP_DEBUG: + cpu_handle_guest_debug(cpu); + break; + case EXCP_HALTED: + /* + * during start-up the vCPU is reset and the thread is + * kicked several times. If we don't ensure we go back + * to sleep in the halted state we won't cleanly + * start-up when the vCPU is enabled. + * + * cpu->halted should ensure we sleep in wait_io_event + */ + g_assert(cpu->halted); + break; + case EXCP_ATOMIC: + qemu_mutex_unlock_iothread(); + cpu_exec_step_atomic(cpu); + qemu_mutex_lock_iothread(); + default: + /* Ignore everything else? */ + break; + } + } + + qatomic_mb_set(&cpu->exit_request, 0); + qemu_wait_io_event(cpu); + } while (!cpu->unplug || cpu_can_run(cpu)); + + tcg_cpus_destroy(cpu); + qemu_mutex_unlock_iothread(); + rcu_remove_force_rcu_notifier(&force_rcu.notifier); + rcu_unregister_thread(); + return NULL; +} + +void mttcg_kick_vcpu_thread(CPUState *cpu) +{ + cpu_exit(cpu); +} + +void mttcg_start_vcpu_thread(CPUState *cpu) +{ + char thread_name[VCPU_THREAD_NAME_SIZE]; + + g_assert(tcg_enabled()); + tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1); + + cpu->thread = g_malloc0(sizeof(QemuThread)); + cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + qemu_cond_init(cpu->halt_cond); + + /* create a thread per vCPU with TCG (MTTCG) */ + snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG", + cpu->cpu_index); + + qemu_thread_create(cpu->thread, thread_name, mttcg_cpu_thread_fn, + cpu, QEMU_THREAD_JOINABLE); + +#ifdef _WIN32 + cpu->hThread = qemu_thread_get_handle(cpu->thread); +#endif +} diff --git a/accel/tcg/tcg-accel-ops-mttcg.h b/accel/tcg/tcg-accel-ops-mttcg.h new file mode 100644 index 000000000..9fdc5a2ab --- /dev/null +++ b/accel/tcg/tcg-accel-ops-mttcg.h @@ -0,0 +1,19 @@ +/* + * QEMU TCG Multi Threaded vCPUs implementation + * + * Copyright 2021 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef TCG_CPUS_MTTCG_H +#define TCG_CPUS_MTTCG_H + +/* kick MTTCG vCPU thread */ +void mttcg_kick_vcpu_thread(CPUState *cpu); + +/* start an mttcg vCPU thread */ +void mttcg_start_vcpu_thread(CPUState *cpu); + +#endif /* TCG_CPUS_MTTCG_H */ diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c new file mode 100644 index 000000000..bf59f53db --- /dev/null +++ b/accel/tcg/tcg-accel-ops-rr.c @@ -0,0 +1,305 @@ +/* + * QEMU TCG Single Threaded vCPUs implementation + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "sysemu/tcg.h" +#include "sysemu/replay.h" +#include "qemu/main-loop.h" +#include "qemu/notify.h" +#include "qemu/guest-random.h" +#include "exec/exec-all.h" + +#include "tcg-accel-ops.h" +#include "tcg-accel-ops-rr.h" +#include "tcg-accel-ops-icount.h" + +/* Kick all RR vCPUs */ +void rr_kick_vcpu_thread(CPUState *unused) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + cpu_exit(cpu); + }; +} + +/* + * TCG vCPU kick timer + * + * The kick timer is responsible for moving single threaded vCPU + * emulation on to the next vCPU. If more than one vCPU is running a + * timer event with force a cpu->exit so the next vCPU can get + * scheduled. + * + * The timer is removed if all vCPUs are idle and restarted again once + * idleness is complete. + */ + +static QEMUTimer *rr_kick_vcpu_timer; +static CPUState *rr_current_cpu; + +static inline int64_t rr_next_kick_time(void) +{ + return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD; +} + +/* Kick the currently round-robin scheduled vCPU to next */ +static void rr_kick_next_cpu(void) +{ + CPUState *cpu; + do { + cpu = qatomic_mb_read(&rr_current_cpu); + if (cpu) { + cpu_exit(cpu); + } + } while (cpu != qatomic_mb_read(&rr_current_cpu)); +} + +static void rr_kick_thread(void *opaque) +{ + timer_mod(rr_kick_vcpu_timer, rr_next_kick_time()); + rr_kick_next_cpu(); +} + +static void rr_start_kick_timer(void) +{ + if (!rr_kick_vcpu_timer && CPU_NEXT(first_cpu)) { + rr_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + rr_kick_thread, NULL); + } + if (rr_kick_vcpu_timer && !timer_pending(rr_kick_vcpu_timer)) { + timer_mod(rr_kick_vcpu_timer, rr_next_kick_time()); + } +} + +static void rr_stop_kick_timer(void) +{ + if (rr_kick_vcpu_timer && timer_pending(rr_kick_vcpu_timer)) { + timer_del(rr_kick_vcpu_timer); + } +} + +static void rr_wait_io_event(void) +{ + CPUState *cpu; + + while (all_cpu_threads_idle()) { + rr_stop_kick_timer(); + qemu_cond_wait_iothread(first_cpu->halt_cond); + } + + rr_start_kick_timer(); + + CPU_FOREACH(cpu) { + qemu_wait_io_event_common(cpu); + } +} + +/* + * Destroy any remaining vCPUs which have been unplugged and have + * finished running + */ +static void rr_deal_with_unplugged_cpus(void) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + if (cpu->unplug && !cpu_can_run(cpu)) { + tcg_cpus_destroy(cpu); + break; + } + } +} + +static void rr_force_rcu(Notifier *notify, void *data) +{ + rr_kick_next_cpu(); +} + +/* + * In the single-threaded case each vCPU is simulated in turn. If + * there is more than a single vCPU we create a simple timer to kick + * the vCPU and ensure we don't get stuck in a tight loop in one vCPU. + * This is done explicitly rather than relying on side-effects + * elsewhere. + */ + +static void *rr_cpu_thread_fn(void *arg) +{ + Notifier force_rcu; + CPUState *cpu = arg; + + assert(tcg_enabled()); + rcu_register_thread(); + force_rcu.notify = rr_force_rcu; + rcu_add_force_rcu_notifier(&force_rcu); + tcg_register_thread(); + + qemu_mutex_lock_iothread(); + qemu_thread_get_self(cpu->thread); + + cpu->thread_id = qemu_get_thread_id(); + cpu->can_do_io = 1; + cpu_thread_signal_created(cpu); + qemu_guest_random_seed_thread_part2(cpu->random_seed); + + /* wait for initial kick-off after machine start */ + while (first_cpu->stopped) { + qemu_cond_wait_iothread(first_cpu->halt_cond); + + /* process any pending work */ + CPU_FOREACH(cpu) { + current_cpu = cpu; + qemu_wait_io_event_common(cpu); + } + } + + rr_start_kick_timer(); + + cpu = first_cpu; + + /* process any pending work */ + cpu->exit_request = 1; + + while (1) { + qemu_mutex_unlock_iothread(); + replay_mutex_lock(); + qemu_mutex_lock_iothread(); + + if (icount_enabled()) { + /* Account partial waits to QEMU_CLOCK_VIRTUAL. */ + icount_account_warp_timer(); + /* + * Run the timers here. This is much more efficient than + * waking up the I/O thread and waiting for completion. + */ + icount_handle_deadline(); + } + + replay_mutex_unlock(); + + if (!cpu) { + cpu = first_cpu; + } + + while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) { + + qatomic_mb_set(&rr_current_cpu, cpu); + current_cpu = cpu; + + qemu_clock_enable(QEMU_CLOCK_VIRTUAL, + (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0); + + if (cpu_can_run(cpu)) { + int r; + + qemu_mutex_unlock_iothread(); + if (icount_enabled()) { + icount_prepare_for_run(cpu); + } + r = tcg_cpus_exec(cpu); + if (icount_enabled()) { + icount_process_data(cpu); + } + qemu_mutex_lock_iothread(); + + if (r == EXCP_DEBUG) { + cpu_handle_guest_debug(cpu); + break; + } else if (r == EXCP_ATOMIC) { + qemu_mutex_unlock_iothread(); + cpu_exec_step_atomic(cpu); + qemu_mutex_lock_iothread(); + break; + } + } else if (cpu->stop) { + if (cpu->unplug) { + cpu = CPU_NEXT(cpu); + } + break; + } + + cpu = CPU_NEXT(cpu); + } /* while (cpu && !cpu->exit_request).. */ + + /* Does not need qatomic_mb_set because a spurious wakeup is okay. */ + qatomic_set(&rr_current_cpu, NULL); + + if (cpu && cpu->exit_request) { + qatomic_mb_set(&cpu->exit_request, 0); + } + + if (icount_enabled() && all_cpu_threads_idle()) { + /* + * When all cpus are sleeping (e.g in WFI), to avoid a deadlock + * in the main_loop, wake it up in order to start the warp timer. + */ + qemu_notify_event(); + } + + rr_wait_io_event(); + rr_deal_with_unplugged_cpus(); + } + + rcu_remove_force_rcu_notifier(&force_rcu); + rcu_unregister_thread(); + return NULL; +} + +void rr_start_vcpu_thread(CPUState *cpu) +{ + char thread_name[VCPU_THREAD_NAME_SIZE]; + static QemuCond *single_tcg_halt_cond; + static QemuThread *single_tcg_cpu_thread; + + g_assert(tcg_enabled()); + tcg_cpu_init_cflags(cpu, false); + + if (!single_tcg_cpu_thread) { + cpu->thread = g_malloc0(sizeof(QemuThread)); + cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + qemu_cond_init(cpu->halt_cond); + + /* share a single thread for all cpus with TCG */ + snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG"); + qemu_thread_create(cpu->thread, thread_name, + rr_cpu_thread_fn, + cpu, QEMU_THREAD_JOINABLE); + + single_tcg_halt_cond = cpu->halt_cond; + single_tcg_cpu_thread = cpu->thread; +#ifdef _WIN32 + cpu->hThread = qemu_thread_get_handle(cpu->thread); +#endif + } else { + /* we share the thread */ + cpu->thread = single_tcg_cpu_thread; + cpu->halt_cond = single_tcg_halt_cond; + cpu->thread_id = first_cpu->thread_id; + cpu->can_do_io = 1; + cpu->created = true; + } +} diff --git a/accel/tcg/tcg-accel-ops-rr.h b/accel/tcg/tcg-accel-ops-rr.h new file mode 100644 index 000000000..54f6ae6e8 --- /dev/null +++ b/accel/tcg/tcg-accel-ops-rr.h @@ -0,0 +1,21 @@ +/* + * QEMU TCG Single Threaded vCPUs implementation + * + * Copyright 2020 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef TCG_CPUS_RR_H +#define TCG_CPUS_RR_H + +#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10) + +/* Kick all RR vCPUs. */ +void rr_kick_vcpu_thread(CPUState *unused); + +/* start the round robin vcpu thread */ +void rr_start_vcpu_thread(CPUState *cpu); + +#endif /* TCG_CPUS_RR_H */ diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c new file mode 100644 index 000000000..1a8e8390b --- /dev/null +++ b/accel/tcg/tcg-accel-ops.c @@ -0,0 +1,133 @@ +/* + * QEMU TCG vCPU common functionality + * + * Functionality common to all TCG vCPU variants: mttcg, rr and icount. + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "sysemu/tcg.h" +#include "sysemu/replay.h" +#include "qemu/main-loop.h" +#include "qemu/guest-random.h" +#include "exec/exec-all.h" + +#include "tcg-accel-ops.h" +#include "tcg-accel-ops-mttcg.h" +#include "tcg-accel-ops-rr.h" +#include "tcg-accel-ops-icount.h" + +/* common functionality among all TCG variants */ + +void tcg_cpu_init_cflags(CPUState *cpu, bool parallel) +{ + uint32_t cflags = cpu->cluster_index << CF_CLUSTER_SHIFT; + cflags |= parallel ? CF_PARALLEL : 0; + cflags |= icount_enabled() ? CF_USE_ICOUNT : 0; + cpu->tcg_cflags = cflags; +} + +void tcg_cpus_destroy(CPUState *cpu) +{ + cpu_thread_signal_destroyed(cpu); +} + +int tcg_cpus_exec(CPUState *cpu) +{ + int ret; +#ifdef CONFIG_PROFILER + int64_t ti; +#endif + assert(tcg_enabled()); +#ifdef CONFIG_PROFILER + ti = profile_getclock(); +#endif + cpu_exec_start(cpu); + ret = cpu_exec(cpu); + cpu_exec_end(cpu); +#ifdef CONFIG_PROFILER + qatomic_set(&tcg_ctx->prof.cpu_exec_time, + tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti); +#endif + return ret; +} + +/* mask must never be zero, except for A20 change call */ +void tcg_handle_interrupt(CPUState *cpu, int mask) +{ + g_assert(qemu_mutex_iothread_locked()); + + cpu->interrupt_request |= mask; + + /* + * If called from iothread context, wake the target cpu in + * case its halted. + */ + if (!qemu_cpu_is_self(cpu)) { + qemu_cpu_kick(cpu); + } else { + qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); + } +} + +static void tcg_accel_ops_init(AccelOpsClass *ops) +{ + if (qemu_tcg_mttcg_enabled()) { + ops->create_vcpu_thread = mttcg_start_vcpu_thread; + ops->kick_vcpu_thread = mttcg_kick_vcpu_thread; + ops->handle_interrupt = tcg_handle_interrupt; + } else if (icount_enabled()) { + ops->create_vcpu_thread = rr_start_vcpu_thread; + ops->kick_vcpu_thread = rr_kick_vcpu_thread; + ops->handle_interrupt = icount_handle_interrupt; + ops->get_virtual_clock = icount_get; + ops->get_elapsed_ticks = icount_get; + } else { + ops->create_vcpu_thread = rr_start_vcpu_thread; + ops->kick_vcpu_thread = rr_kick_vcpu_thread; + ops->handle_interrupt = tcg_handle_interrupt; + } +} + +static void tcg_accel_ops_class_init(ObjectClass *oc, void *data) +{ + AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); + + ops->ops_init = tcg_accel_ops_init; +} + +static const TypeInfo tcg_accel_ops_type = { + .name = ACCEL_OPS_NAME("tcg"), + + .parent = TYPE_ACCEL_OPS, + .class_init = tcg_accel_ops_class_init, + .abstract = true, +}; +module_obj(ACCEL_OPS_NAME("tcg")); + +static void tcg_accel_ops_register_types(void) +{ + type_register_static(&tcg_accel_ops_type); +} +type_init(tcg_accel_ops_register_types); diff --git a/accel/tcg/tcg-accel-ops.h b/accel/tcg/tcg-accel-ops.h new file mode 100644 index 000000000..6a5fcef88 --- /dev/null +++ b/accel/tcg/tcg-accel-ops.h @@ -0,0 +1,22 @@ +/* + * QEMU TCG vCPU common functionality + * + * Functionality common to all TCG vcpu variants: mttcg, rr and icount. + * + * Copyright 2020 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef TCG_CPUS_H +#define TCG_CPUS_H + +#include "sysemu/cpus.h" + +void tcg_cpus_destroy(CPUState *cpu); +int tcg_cpus_exec(CPUState *cpu); +void tcg_handle_interrupt(CPUState *cpu, int mask); +void tcg_cpu_init_cflags(CPUState *cpu, bool parallel); + +#endif /* TCG_CPUS_H */ diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c new file mode 100644 index 000000000..d6336a9c9 --- /dev/null +++ b/accel/tcg/tcg-all.c @@ -0,0 +1,248 @@ +/* + * QEMU System Emulator, accelerator interfaces + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "sysemu/tcg.h" +#include "sysemu/cpu-timers.h" +#include "tcg/tcg.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/accel.h" +#include "qapi/qapi-builtin-visit.h" +#include "qemu/units.h" +#if !defined(CONFIG_USER_ONLY) +#include "hw/boards.h" +#endif +#include "internal.h" + +struct TCGState { + AccelState parent_obj; + + bool mttcg_enabled; + int splitwx_enabled; + unsigned long tb_size; +}; +typedef struct TCGState TCGState; + +#define TYPE_TCG_ACCEL ACCEL_CLASS_NAME("tcg") + +DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE, + TYPE_TCG_ACCEL) + +/* + * We default to false if we know other options have been enabled + * which are currently incompatible with MTTCG. Otherwise when each + * guest (target) has been updated to support: + * - atomic instructions + * - memory ordering primitives (barriers) + * they can set the appropriate CONFIG flags in ${target}-softmmu.mak + * + * Once a guest architecture has been converted to the new primitives + * there are two remaining limitations to check. + * + * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host) + * - The host must have a stronger memory order than the guest + * + * It may be possible in future to support strong guests on weak hosts + * but that will require tagging all load/stores in a guest with their + * implicit memory order requirements which would likely slow things + * down a lot. + */ + +static bool check_tcg_memory_orders_compatible(void) +{ +#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO) + return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0; +#else + return false; +#endif +} + +static bool default_mttcg_enabled(void) +{ + if (icount_enabled() || TCG_OVERSIZED_GUEST) { + return false; + } else { +#ifdef TARGET_SUPPORTS_MTTCG + return check_tcg_memory_orders_compatible(); +#else + return false; +#endif + } +} + +static void tcg_accel_instance_init(Object *obj) +{ + TCGState *s = TCG_STATE(obj); + + s->mttcg_enabled = default_mttcg_enabled(); + + /* If debugging enabled, default "auto on", otherwise off. */ +#if defined(CONFIG_DEBUG_TCG) && !defined(CONFIG_USER_ONLY) + s->splitwx_enabled = -1; +#else + s->splitwx_enabled = 0; +#endif +} + +bool mttcg_enabled; + +static int tcg_init_machine(MachineState *ms) +{ + TCGState *s = TCG_STATE(current_accel()); +#ifdef CONFIG_USER_ONLY + unsigned max_cpus = 1; +#else + unsigned max_cpus = ms->smp.max_cpus; +#endif + + tcg_allowed = true; + mttcg_enabled = s->mttcg_enabled; + + page_init(); + tb_htable_init(); + tcg_init(s->tb_size * MiB, s->splitwx_enabled, max_cpus); + +#if defined(CONFIG_SOFTMMU) + /* + * There's no guest base to take into account, so go ahead and + * initialize the prologue now. + */ + tcg_prologue_init(tcg_ctx); +#endif + + return 0; +} + +static char *tcg_get_thread(Object *obj, Error **errp) +{ + TCGState *s = TCG_STATE(obj); + + return g_strdup(s->mttcg_enabled ? "multi" : "single"); +} + +static void tcg_set_thread(Object *obj, const char *value, Error **errp) +{ + TCGState *s = TCG_STATE(obj); + + if (strcmp(value, "multi") == 0) { + if (TCG_OVERSIZED_GUEST) { + error_setg(errp, "No MTTCG when guest word size > hosts"); + } else if (icount_enabled()) { + error_setg(errp, "No MTTCG when icount is enabled"); + } else { +#ifndef TARGET_SUPPORTS_MTTCG + warn_report("Guest not yet converted to MTTCG - " + "you may get unexpected results"); +#endif + if (!check_tcg_memory_orders_compatible()) { + warn_report("Guest expects a stronger memory ordering " + "than the host provides"); + error_printf("This may cause strange/hard to debug errors\n"); + } + s->mttcg_enabled = true; + } + } else if (strcmp(value, "single") == 0) { + s->mttcg_enabled = false; + } else { + error_setg(errp, "Invalid 'thread' setting %s", value); + } +} + +static void tcg_get_tb_size(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + TCGState *s = TCG_STATE(obj); + uint32_t value = s->tb_size; + + visit_type_uint32(v, name, &value, errp); +} + +static void tcg_set_tb_size(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + TCGState *s = TCG_STATE(obj); + uint32_t value; + + if (!visit_type_uint32(v, name, &value, errp)) { + return; + } + + s->tb_size = value; +} + +static bool tcg_get_splitwx(Object *obj, Error **errp) +{ + TCGState *s = TCG_STATE(obj); + return s->splitwx_enabled; +} + +static void tcg_set_splitwx(Object *obj, bool value, Error **errp) +{ + TCGState *s = TCG_STATE(obj); + s->splitwx_enabled = value; +} + +static void tcg_accel_class_init(ObjectClass *oc, void *data) +{ + AccelClass *ac = ACCEL_CLASS(oc); + ac->name = "tcg"; + ac->init_machine = tcg_init_machine; + ac->allowed = &tcg_allowed; + + object_class_property_add_str(oc, "thread", + tcg_get_thread, + tcg_set_thread); + + object_class_property_add(oc, "tb-size", "int", + tcg_get_tb_size, tcg_set_tb_size, + NULL, NULL); + object_class_property_set_description(oc, "tb-size", + "TCG translation block cache size"); + + object_class_property_add_bool(oc, "split-wx", + tcg_get_splitwx, tcg_set_splitwx); + object_class_property_set_description(oc, "split-wx", + "Map jit pages into separate RW and RX regions"); +} + +static const TypeInfo tcg_accel_type = { + .name = TYPE_TCG_ACCEL, + .parent = TYPE_ACCEL, + .instance_init = tcg_accel_instance_init, + .class_init = tcg_accel_class_init, + .instance_size = sizeof(TCGState), +}; +module_obj(TYPE_TCG_ACCEL); + +static void register_accel_types(void) +{ + type_register_static(&tcg_accel_type); +} + +type_init(register_accel_types); diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c new file mode 100644 index 000000000..ac7d28c25 --- /dev/null +++ b/accel/tcg/tcg-runtime-gvec.c @@ -0,0 +1,1534 @@ +/* + * Generic vectorized operation runtime + * + * Copyright (c) 2018 Linaro + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "tcg/tcg-gvec-desc.h" + + +static inline void clear_high(void *d, intptr_t oprsz, uint32_t desc) +{ + intptr_t maxsz = simd_maxsz(desc); + intptr_t i; + + if (unlikely(maxsz > oprsz)) { + for (i = oprsz; i < maxsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = 0; + } + } +} + +void HELPER(gvec_add8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) + *(uint8_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_add16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) + *(uint16_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_add32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) + *(uint32_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_add64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) + *(uint64_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_adds8)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) + (uint8_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_adds16)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) + (uint16_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_adds32)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) + (uint32_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_adds64)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) + b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sub8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) - *(uint8_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sub16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) - *(uint16_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sub32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) - *(uint32_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sub64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) - *(uint64_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_subs8)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) - (uint8_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_subs16)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) - (uint16_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_subs32)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) - (uint32_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_subs64)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) - b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_mul8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) * *(uint8_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_mul16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) * *(uint16_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_mul32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) * *(uint32_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_mul64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) * *(uint64_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_muls8)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) * (uint8_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_muls16)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) * (uint16_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_muls32)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) * (uint32_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_muls64)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) * b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_neg8)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = -*(uint8_t *)(a + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_neg16)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = -*(uint16_t *)(a + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_neg32)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = -*(uint32_t *)(a + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_neg64)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = -*(uint64_t *)(a + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_abs8)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int8_t)) { + int8_t aa = *(int8_t *)(a + i); + *(int8_t *)(d + i) = aa < 0 ? -aa : aa; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_abs16)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int16_t)) { + int16_t aa = *(int16_t *)(a + i); + *(int16_t *)(d + i) = aa < 0 ? -aa : aa; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_abs32)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int32_t)) { + int32_t aa = *(int32_t *)(a + i); + *(int32_t *)(d + i) = aa < 0 ? -aa : aa; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_abs64)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int64_t)) { + int64_t aa = *(int64_t *)(a + i); + *(int64_t *)(d + i) = aa < 0 ? -aa : aa; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_mov)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + + memcpy(d, a, oprsz); + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_dup64)(void *d, uint32_t desc, uint64_t c) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + if (c == 0) { + oprsz = 0; + } else { + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = c; + } + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_dup32)(void *d, uint32_t desc, uint32_t c) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + if (c == 0) { + oprsz = 0; + } else { + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = c; + } + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_dup16)(void *d, uint32_t desc, uint32_t c) +{ + HELPER(gvec_dup32)(d, desc, 0x00010001 * (c & 0xffff)); +} + +void HELPER(gvec_dup8)(void *d, uint32_t desc, uint32_t c) +{ + HELPER(gvec_dup32)(d, desc, 0x01010101 * (c & 0xff)); +} + +void HELPER(gvec_not)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = ~*(uint64_t *)(a + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_and)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) & *(uint64_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_or)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) | *(uint64_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_xor)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) ^ *(uint64_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_andc)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) &~ *(uint64_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_orc)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) |~ *(uint64_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_nand)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = ~(*(uint64_t *)(a + i) & *(uint64_t *)(b + i)); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_nor)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = ~(*(uint64_t *)(a + i) | *(uint64_t *)(b + i)); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_eqv)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = ~(*(uint64_t *)(a + i) ^ *(uint64_t *)(b + i)); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ands)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) & b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_xors)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) ^ b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ors)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) | b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl8i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) << shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl16i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) << shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl32i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) << shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl64i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) << shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr8i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr16i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr32i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr64i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar8i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(int8_t *)(d + i) = *(int8_t *)(a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar16i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(int16_t *)(d + i) = *(int16_t *)(a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar32i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(int32_t *)(d + i) = *(int32_t *)(a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar64i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(int64_t *)(d + i) = *(int64_t *)(a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotl8i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = rol8(*(uint8_t *)(a + i), shift); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotl16i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = rol16(*(uint16_t *)(a + i), shift); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotl32i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = rol32(*(uint32_t *)(a + i), shift); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotl64i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = rol64(*(uint64_t *)(a + i), shift); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl8v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + uint8_t sh = *(uint8_t *)(b + i) & 7; + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) << sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl16v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + uint8_t sh = *(uint16_t *)(b + i) & 15; + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) << sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl32v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint8_t sh = *(uint32_t *)(b + i) & 31; + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) << sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl64v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint8_t sh = *(uint64_t *)(b + i) & 63; + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) << sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr8v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + uint8_t sh = *(uint8_t *)(b + i) & 7; + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr16v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + uint8_t sh = *(uint16_t *)(b + i) & 15; + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr32v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint8_t sh = *(uint32_t *)(b + i) & 31; + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr64v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint8_t sh = *(uint64_t *)(b + i) & 63; + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar8v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int8_t)) { + uint8_t sh = *(uint8_t *)(b + i) & 7; + *(int8_t *)(d + i) = *(int8_t *)(a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar16v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int16_t)) { + uint8_t sh = *(uint16_t *)(b + i) & 15; + *(int16_t *)(d + i) = *(int16_t *)(a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar32v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int32_t)) { + uint8_t sh = *(uint32_t *)(b + i) & 31; + *(int32_t *)(d + i) = *(int32_t *)(a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar64v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int64_t)) { + uint8_t sh = *(uint64_t *)(b + i) & 63; + *(int64_t *)(d + i) = *(int64_t *)(a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotl8v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + uint8_t sh = *(uint8_t *)(b + i) & 7; + *(uint8_t *)(d + i) = rol8(*(uint8_t *)(a + i), sh); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotl16v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + uint8_t sh = *(uint16_t *)(b + i) & 15; + *(uint16_t *)(d + i) = rol16(*(uint16_t *)(a + i), sh); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotl32v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint8_t sh = *(uint32_t *)(b + i) & 31; + *(uint32_t *)(d + i) = rol32(*(uint32_t *)(a + i), sh); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotl64v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint8_t sh = *(uint64_t *)(b + i) & 63; + *(uint64_t *)(d + i) = rol64(*(uint64_t *)(a + i), sh); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotr8v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + uint8_t sh = *(uint8_t *)(b + i) & 7; + *(uint8_t *)(d + i) = ror8(*(uint8_t *)(a + i), sh); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotr16v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + uint8_t sh = *(uint16_t *)(b + i) & 15; + *(uint16_t *)(d + i) = ror16(*(uint16_t *)(a + i), sh); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotr32v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint8_t sh = *(uint32_t *)(b + i) & 31; + *(uint32_t *)(d + i) = ror32(*(uint32_t *)(a + i), sh); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotr64v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint8_t sh = *(uint64_t *)(b + i) & 63; + *(uint64_t *)(d + i) = ror64(*(uint64_t *)(a + i), sh); + } + clear_high(d, oprsz, desc); +} + +#define DO_CMP1(NAME, TYPE, OP) \ +void HELPER(NAME)(void *d, void *a, void *b, uint32_t desc) \ +{ \ + intptr_t oprsz = simd_oprsz(desc); \ + intptr_t i; \ + for (i = 0; i < oprsz; i += sizeof(TYPE)) { \ + *(TYPE *)(d + i) = -(*(TYPE *)(a + i) OP *(TYPE *)(b + i)); \ + } \ + clear_high(d, oprsz, desc); \ +} + +#define DO_CMP2(SZ) \ + DO_CMP1(gvec_eq##SZ, uint##SZ##_t, ==) \ + DO_CMP1(gvec_ne##SZ, uint##SZ##_t, !=) \ + DO_CMP1(gvec_lt##SZ, int##SZ##_t, <) \ + DO_CMP1(gvec_le##SZ, int##SZ##_t, <=) \ + DO_CMP1(gvec_ltu##SZ, uint##SZ##_t, <) \ + DO_CMP1(gvec_leu##SZ, uint##SZ##_t, <=) + +DO_CMP2(8) +DO_CMP2(16) +DO_CMP2(32) +DO_CMP2(64) + +#undef DO_CMP1 +#undef DO_CMP2 + +void HELPER(gvec_ssadd8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int8_t)) { + int r = *(int8_t *)(a + i) + *(int8_t *)(b + i); + if (r > INT8_MAX) { + r = INT8_MAX; + } else if (r < INT8_MIN) { + r = INT8_MIN; + } + *(int8_t *)(d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ssadd16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int16_t)) { + int r = *(int16_t *)(a + i) + *(int16_t *)(b + i); + if (r > INT16_MAX) { + r = INT16_MAX; + } else if (r < INT16_MIN) { + r = INT16_MIN; + } + *(int16_t *)(d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ssadd32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int32_t)) { + int32_t ai = *(int32_t *)(a + i); + int32_t bi = *(int32_t *)(b + i); + int32_t di; + if (sadd32_overflow(ai, bi, &di)) { + di = (di < 0 ? INT32_MAX : INT32_MIN); + } + *(int32_t *)(d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ssadd64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int64_t)) { + int64_t ai = *(int64_t *)(a + i); + int64_t bi = *(int64_t *)(b + i); + int64_t di; + if (sadd64_overflow(ai, bi, &di)) { + di = (di < 0 ? INT64_MAX : INT64_MIN); + } + *(int64_t *)(d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sssub8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + int r = *(int8_t *)(a + i) - *(int8_t *)(b + i); + if (r > INT8_MAX) { + r = INT8_MAX; + } else if (r < INT8_MIN) { + r = INT8_MIN; + } + *(uint8_t *)(d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sssub16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int16_t)) { + int r = *(int16_t *)(a + i) - *(int16_t *)(b + i); + if (r > INT16_MAX) { + r = INT16_MAX; + } else if (r < INT16_MIN) { + r = INT16_MIN; + } + *(int16_t *)(d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sssub32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int32_t)) { + int32_t ai = *(int32_t *)(a + i); + int32_t bi = *(int32_t *)(b + i); + int32_t di; + if (ssub32_overflow(ai, bi, &di)) { + di = (di < 0 ? INT32_MAX : INT32_MIN); + } + *(int32_t *)(d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sssub64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int64_t)) { + int64_t ai = *(int64_t *)(a + i); + int64_t bi = *(int64_t *)(b + i); + int64_t di; + if (ssub64_overflow(ai, bi, &di)) { + di = (di < 0 ? INT64_MAX : INT64_MIN); + } + *(int64_t *)(d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_usadd8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + unsigned r = *(uint8_t *)(a + i) + *(uint8_t *)(b + i); + if (r > UINT8_MAX) { + r = UINT8_MAX; + } + *(uint8_t *)(d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_usadd16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + unsigned r = *(uint16_t *)(a + i) + *(uint16_t *)(b + i); + if (r > UINT16_MAX) { + r = UINT16_MAX; + } + *(uint16_t *)(d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_usadd32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint32_t ai = *(uint32_t *)(a + i); + uint32_t bi = *(uint32_t *)(b + i); + uint32_t di; + if (uadd32_overflow(ai, bi, &di)) { + di = UINT32_MAX; + } + *(uint32_t *)(d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_usadd64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint64_t ai = *(uint64_t *)(a + i); + uint64_t bi = *(uint64_t *)(b + i); + uint64_t di; + if (uadd64_overflow(ai, bi, &di)) { + di = UINT64_MAX; + } + *(uint64_t *)(d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ussub8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + int r = *(uint8_t *)(a + i) - *(uint8_t *)(b + i); + if (r < 0) { + r = 0; + } + *(uint8_t *)(d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ussub16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + int r = *(uint16_t *)(a + i) - *(uint16_t *)(b + i); + if (r < 0) { + r = 0; + } + *(uint16_t *)(d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ussub32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint32_t ai = *(uint32_t *)(a + i); + uint32_t bi = *(uint32_t *)(b + i); + uint32_t di; + if (usub32_overflow(ai, bi, &di)) { + di = 0; + } + *(uint32_t *)(d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ussub64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint64_t ai = *(uint64_t *)(a + i); + uint64_t bi = *(uint64_t *)(b + i); + uint64_t di; + if (usub64_overflow(ai, bi, &di)) { + di = 0; + } + *(uint64_t *)(d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smin8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int8_t)) { + int8_t aa = *(int8_t *)(a + i); + int8_t bb = *(int8_t *)(b + i); + int8_t dd = aa < bb ? aa : bb; + *(int8_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smin16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int16_t)) { + int16_t aa = *(int16_t *)(a + i); + int16_t bb = *(int16_t *)(b + i); + int16_t dd = aa < bb ? aa : bb; + *(int16_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smin32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int32_t)) { + int32_t aa = *(int32_t *)(a + i); + int32_t bb = *(int32_t *)(b + i); + int32_t dd = aa < bb ? aa : bb; + *(int32_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smin64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int64_t)) { + int64_t aa = *(int64_t *)(a + i); + int64_t bb = *(int64_t *)(b + i); + int64_t dd = aa < bb ? aa : bb; + *(int64_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smax8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int8_t)) { + int8_t aa = *(int8_t *)(a + i); + int8_t bb = *(int8_t *)(b + i); + int8_t dd = aa > bb ? aa : bb; + *(int8_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smax16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int16_t)) { + int16_t aa = *(int16_t *)(a + i); + int16_t bb = *(int16_t *)(b + i); + int16_t dd = aa > bb ? aa : bb; + *(int16_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smax32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int32_t)) { + int32_t aa = *(int32_t *)(a + i); + int32_t bb = *(int32_t *)(b + i); + int32_t dd = aa > bb ? aa : bb; + *(int32_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smax64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int64_t)) { + int64_t aa = *(int64_t *)(a + i); + int64_t bb = *(int64_t *)(b + i); + int64_t dd = aa > bb ? aa : bb; + *(int64_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umin8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + uint8_t aa = *(uint8_t *)(a + i); + uint8_t bb = *(uint8_t *)(b + i); + uint8_t dd = aa < bb ? aa : bb; + *(uint8_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umin16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + uint16_t aa = *(uint16_t *)(a + i); + uint16_t bb = *(uint16_t *)(b + i); + uint16_t dd = aa < bb ? aa : bb; + *(uint16_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umin32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint32_t aa = *(uint32_t *)(a + i); + uint32_t bb = *(uint32_t *)(b + i); + uint32_t dd = aa < bb ? aa : bb; + *(uint32_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umin64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint64_t aa = *(uint64_t *)(a + i); + uint64_t bb = *(uint64_t *)(b + i); + uint64_t dd = aa < bb ? aa : bb; + *(uint64_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umax8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + uint8_t aa = *(uint8_t *)(a + i); + uint8_t bb = *(uint8_t *)(b + i); + uint8_t dd = aa > bb ? aa : bb; + *(uint8_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umax16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + uint16_t aa = *(uint16_t *)(a + i); + uint16_t bb = *(uint16_t *)(b + i); + uint16_t dd = aa > bb ? aa : bb; + *(uint16_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umax32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint32_t aa = *(uint32_t *)(a + i); + uint32_t bb = *(uint32_t *)(b + i); + uint32_t dd = aa > bb ? aa : bb; + *(uint32_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umax64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint64_t aa = *(uint64_t *)(a + i); + uint64_t bb = *(uint64_t *)(b + i); + uint64_t dd = aa > bb ? aa : bb; + *(uint64_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_bitsel)(void *d, void *a, void *b, void *c, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint64_t aa = *(uint64_t *)(a + i); + uint64_t bb = *(uint64_t *)(b + i); + uint64_t cc = *(uint64_t *)(c + i); + *(uint64_t *)(d + i) = (bb & aa) | (cc & ~aa); + } + clear_high(d, oprsz, desc); +} diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c new file mode 100644 index 000000000..e4e030043 --- /dev/null +++ b/accel/tcg/tcg-runtime.c @@ -0,0 +1,150 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" +#include "exec/exec-all.h" +#include "disas/disas.h" +#include "exec/log.h" +#include "tcg/tcg.h" + +/* 32-bit helpers */ + +int32_t HELPER(div_i32)(int32_t arg1, int32_t arg2) +{ + return arg1 / arg2; +} + +int32_t HELPER(rem_i32)(int32_t arg1, int32_t arg2) +{ + return arg1 % arg2; +} + +uint32_t HELPER(divu_i32)(uint32_t arg1, uint32_t arg2) +{ + return arg1 / arg2; +} + +uint32_t HELPER(remu_i32)(uint32_t arg1, uint32_t arg2) +{ + return arg1 % arg2; +} + +/* 64-bit helpers */ + +uint64_t HELPER(shl_i64)(uint64_t arg1, uint64_t arg2) +{ + return arg1 << arg2; +} + +uint64_t HELPER(shr_i64)(uint64_t arg1, uint64_t arg2) +{ + return arg1 >> arg2; +} + +int64_t HELPER(sar_i64)(int64_t arg1, int64_t arg2) +{ + return arg1 >> arg2; +} + +int64_t HELPER(div_i64)(int64_t arg1, int64_t arg2) +{ + return arg1 / arg2; +} + +int64_t HELPER(rem_i64)(int64_t arg1, int64_t arg2) +{ + return arg1 % arg2; +} + +uint64_t HELPER(divu_i64)(uint64_t arg1, uint64_t arg2) +{ + return arg1 / arg2; +} + +uint64_t HELPER(remu_i64)(uint64_t arg1, uint64_t arg2) +{ + return arg1 % arg2; +} + +uint64_t HELPER(muluh_i64)(uint64_t arg1, uint64_t arg2) +{ + uint64_t l, h; + mulu64(&l, &h, arg1, arg2); + return h; +} + +int64_t HELPER(mulsh_i64)(int64_t arg1, int64_t arg2) +{ + uint64_t l, h; + muls64(&l, &h, arg1, arg2); + return h; +} + +uint32_t HELPER(clz_i32)(uint32_t arg, uint32_t zero_val) +{ + return arg ? clz32(arg) : zero_val; +} + +uint32_t HELPER(ctz_i32)(uint32_t arg, uint32_t zero_val) +{ + return arg ? ctz32(arg) : zero_val; +} + +uint64_t HELPER(clz_i64)(uint64_t arg, uint64_t zero_val) +{ + return arg ? clz64(arg) : zero_val; +} + +uint64_t HELPER(ctz_i64)(uint64_t arg, uint64_t zero_val) +{ + return arg ? ctz64(arg) : zero_val; +} + +uint32_t HELPER(clrsb_i32)(uint32_t arg) +{ + return clrsb32(arg); +} + +uint64_t HELPER(clrsb_i64)(uint64_t arg) +{ + return clrsb64(arg); +} + +uint32_t HELPER(ctpop_i32)(uint32_t arg) +{ + return ctpop32(arg); +} + +uint64_t HELPER(ctpop_i64)(uint64_t arg) +{ + return ctpop64(arg); +} + +void HELPER(exit_atomic)(CPUArchState *env) +{ + cpu_loop_exit_atomic(env_cpu(env), GETPC()); +} diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h new file mode 100644 index 000000000..37cbd722b --- /dev/null +++ b/accel/tcg/tcg-runtime.h @@ -0,0 +1,287 @@ +DEF_HELPER_FLAGS_2(div_i32, TCG_CALL_NO_RWG_SE, s32, s32, s32) +DEF_HELPER_FLAGS_2(rem_i32, TCG_CALL_NO_RWG_SE, s32, s32, s32) +DEF_HELPER_FLAGS_2(divu_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(remu_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) + +DEF_HELPER_FLAGS_2(div_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) +DEF_HELPER_FLAGS_2(rem_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) +DEF_HELPER_FLAGS_2(divu_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(remu_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(shl_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(shr_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(sar_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) + +DEF_HELPER_FLAGS_2(mulsh_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) +DEF_HELPER_FLAGS_2(muluh_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(clz_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(ctz_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(clz_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(ctz_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_1(clrsb_i32, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(clrsb_i64, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(ctpop_i32, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(ctpop_i64, TCG_CALL_NO_RWG_SE, i64, i64) + +DEF_HELPER_FLAGS_1(lookup_tb_ptr, TCG_CALL_NO_WG_SE, cptr, env) + +DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env) + +#ifndef IN_HELPER_PROTO +/* + * Pass calls to memset directly to libc, without a thunk in qemu. + * Do not re-declare memset, especially since we fudge the type here; + * we assume sizeof(void *) == sizeof(size_t), which is true for + * all supported hosts. + */ +#define helper_memset memset +DEF_HELPER_FLAGS_3(memset, TCG_CALL_NO_RWG, ptr, ptr, int, ptr) +#endif /* IN_HELPER_PROTO */ + +DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +#ifdef CONFIG_ATOMIC64 +DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG, + i64, env, tl, i64, i64, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG, + i64, env, tl, i64, i64, i32) +#endif + +#ifdef CONFIG_ATOMIC64 +#define GEN_ATOMIC_HELPERS(NAME) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_le), \ + TCG_CALL_NO_WG, i64, env, tl, i64, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be), \ + TCG_CALL_NO_WG, i64, env, tl, i64, i32) +#else +#define GEN_ATOMIC_HELPERS(NAME) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) +#endif /* CONFIG_ATOMIC64 */ + +GEN_ATOMIC_HELPERS(fetch_add) +GEN_ATOMIC_HELPERS(fetch_and) +GEN_ATOMIC_HELPERS(fetch_or) +GEN_ATOMIC_HELPERS(fetch_xor) +GEN_ATOMIC_HELPERS(fetch_smin) +GEN_ATOMIC_HELPERS(fetch_umin) +GEN_ATOMIC_HELPERS(fetch_smax) +GEN_ATOMIC_HELPERS(fetch_umax) + +GEN_ATOMIC_HELPERS(add_fetch) +GEN_ATOMIC_HELPERS(and_fetch) +GEN_ATOMIC_HELPERS(or_fetch) +GEN_ATOMIC_HELPERS(xor_fetch) +GEN_ATOMIC_HELPERS(smin_fetch) +GEN_ATOMIC_HELPERS(umin_fetch) +GEN_ATOMIC_HELPERS(smax_fetch) +GEN_ATOMIC_HELPERS(umax_fetch) + +GEN_ATOMIC_HELPERS(xchg) + +#undef GEN_ATOMIC_HELPERS + +DEF_HELPER_FLAGS_3(gvec_mov, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_dup8, TCG_CALL_NO_RWG, void, ptr, i32, i32) +DEF_HELPER_FLAGS_3(gvec_dup16, TCG_CALL_NO_RWG, void, ptr, i32, i32) +DEF_HELPER_FLAGS_3(gvec_dup32, TCG_CALL_NO_RWG, void, ptr, i32, i32) +DEF_HELPER_FLAGS_3(gvec_dup64, TCG_CALL_NO_RWG, void, ptr, i32, i64) + +DEF_HELPER_FLAGS_4(gvec_add8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_add16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_add32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_add64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_adds8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_adds16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_adds32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_adds64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(gvec_sub8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sub16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sub32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sub64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_subs8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_subs16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_subs32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_subs64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(gvec_mul8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_mul16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_mul32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_mul64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_muls8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_muls16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_muls32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_muls64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(gvec_ssadd8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ssadd16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ssadd32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ssadd64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_sssub8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sssub16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sssub32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sssub64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_usadd8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_usadd16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_usadd32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_usadd64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_ussub8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ussub16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ussub32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ussub64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_smin8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smin16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smin32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smin64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_smax8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smax16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smax32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smax64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_umin8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umin16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umin32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umin64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_umax8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umax16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umax32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umax64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_neg8, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_neg16, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_neg32, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_neg64, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_abs8, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_abs16, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_abs32, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_abs64, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_not, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_and, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_or, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_xor, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_andc, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_orc, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_nand, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_nor, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_eqv, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_ands, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_xors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_ors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_3(gvec_shl8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_shl16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_shl32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_shl64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_shr8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_shr16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_shr32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_shr64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_sar8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_sar16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_sar32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_sar64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_rotl8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_rotl16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_rotl32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_rotl64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_shl8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_shl16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_shl32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_shl64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_shr8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_shr16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_shr32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_shr64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_sar8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sar16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sar32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sar64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_rotl8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_rotl16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_rotl32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_rotl64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_rotr8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_rotr16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_rotr32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_rotr64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_eq8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_eq16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_eq32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_eq64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_ne8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ne16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ne32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ne64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_lt8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_lt16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_lt32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_lt64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_le8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_le16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_le32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_le64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_ltu8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ltu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ltu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ltu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_leu8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_leu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_bitsel, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) diff --git a/accel/tcg/trace-events b/accel/tcg/trace-events new file mode 100644 index 000000000..59eab96f2 --- /dev/null +++ b/accel/tcg/trace-events @@ -0,0 +1,10 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# TCG related tracing +# cpu-exec.c +exec_tb(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR +exec_tb_nocache(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR +exec_tb_exit(void *last_tb, unsigned int flags) "tb:%p flags=0x%x" + +# translate-all.c +translate_block(void *tb, uintptr_t pc, const void *tb_code) "tb:%p, pc:0x%"PRIxPTR", tb_code:%p" diff --git a/accel/tcg/trace.h b/accel/tcg/trace.h new file mode 100644 index 000000000..db61fad3c --- /dev/null +++ b/accel/tcg/trace.h @@ -0,0 +1 @@ +#include "trace/trace-accel_tcg.h" diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c new file mode 100644 index 000000000..bd71db59a --- /dev/null +++ b/accel/tcg/translate-all.c @@ -0,0 +1,2490 @@ +/* + * Host code generation + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" + +#define NO_CPU_IO_DEFS +#include "trace.h" +#include "disas/disas.h" +#include "exec/exec-all.h" +#include "tcg/tcg.h" +#if defined(CONFIG_USER_ONLY) +#include "qemu.h" +#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) +#include <sys/param.h> +#if __FreeBSD_version >= 700104 +#define HAVE_KINFO_GETVMMAP +#define sigqueue sigqueue_freebsd /* avoid redefinition */ +#include <sys/proc.h> +#include <machine/profile.h> +#define _KERNEL +#include <sys/user.h> +#undef _KERNEL +#undef sigqueue +#include <libutil.h> +#endif +#endif +#else +#include "exec/ram_addr.h" +#endif + +#include "exec/cputlb.h" +#include "exec/translate-all.h" +#include "qemu/bitmap.h" +#include "qemu/qemu-print.h" +#include "qemu/timer.h" +#include "qemu/main-loop.h" +#include "exec/log.h" +#include "sysemu/cpus.h" +#include "sysemu/cpu-timers.h" +#include "sysemu/tcg.h" +#include "qapi/error.h" +#include "hw/core/tcg-cpu-ops.h" +#include "tb-hash.h" +#include "tb-context.h" +#include "internal.h" + +/* #define DEBUG_TB_INVALIDATE */ +/* #define DEBUG_TB_FLUSH */ +/* make various TB consistency checks */ +/* #define DEBUG_TB_CHECK */ + +#ifdef DEBUG_TB_INVALIDATE +#define DEBUG_TB_INVALIDATE_GATE 1 +#else +#define DEBUG_TB_INVALIDATE_GATE 0 +#endif + +#ifdef DEBUG_TB_FLUSH +#define DEBUG_TB_FLUSH_GATE 1 +#else +#define DEBUG_TB_FLUSH_GATE 0 +#endif + +#if !defined(CONFIG_USER_ONLY) +/* TB consistency checks only implemented for usermode emulation. */ +#undef DEBUG_TB_CHECK +#endif + +#ifdef DEBUG_TB_CHECK +#define DEBUG_TB_CHECK_GATE 1 +#else +#define DEBUG_TB_CHECK_GATE 0 +#endif + +/* Access to the various translations structures need to be serialised via locks + * for consistency. + * In user-mode emulation access to the memory related structures are protected + * with mmap_lock. + * In !user-mode we use per-page locks. + */ +#ifdef CONFIG_SOFTMMU +#define assert_memory_lock() +#else +#define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) +#endif + +#define SMC_BITMAP_USE_THRESHOLD 10 + +typedef struct PageDesc { + /* list of TBs intersecting this ram page */ + uintptr_t first_tb; +#ifdef CONFIG_SOFTMMU + /* in order to optimize self modifying code, we count the number + of lookups we do to a given page to use a bitmap */ + unsigned long *code_bitmap; + unsigned int code_write_count; +#else + unsigned long flags; + void *target_data; +#endif +#ifndef CONFIG_USER_ONLY + QemuSpin lock; +#endif +} PageDesc; + +/** + * struct page_entry - page descriptor entry + * @pd: pointer to the &struct PageDesc of the page this entry represents + * @index: page index of the page + * @locked: whether the page is locked + * + * This struct helps us keep track of the locked state of a page, without + * bloating &struct PageDesc. + * + * A page lock protects accesses to all fields of &struct PageDesc. + * + * See also: &struct page_collection. + */ +struct page_entry { + PageDesc *pd; + tb_page_addr_t index; + bool locked; +}; + +/** + * struct page_collection - tracks a set of pages (i.e. &struct page_entry's) + * @tree: Binary search tree (BST) of the pages, with key == page index + * @max: Pointer to the page in @tree with the highest page index + * + * To avoid deadlock we lock pages in ascending order of page index. + * When operating on a set of pages, we need to keep track of them so that + * we can lock them in order and also unlock them later. For this we collect + * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the + * @tree implementation we use does not provide an O(1) operation to obtain the + * highest-ranked element, we use @max to keep track of the inserted page + * with the highest index. This is valuable because if a page is not in + * the tree and its index is higher than @max's, then we can lock it + * without breaking the locking order rule. + * + * Note on naming: 'struct page_set' would be shorter, but we already have a few + * page_set_*() helpers, so page_collection is used instead to avoid confusion. + * + * See also: page_collection_lock(). + */ +struct page_collection { + GTree *tree; + struct page_entry *max; +}; + +/* list iterators for lists of tagged pointers in TranslationBlock */ +#define TB_FOR_EACH_TAGGED(head, tb, n, field) \ + for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \ + tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \ + tb = (TranslationBlock *)((uintptr_t)tb & ~1)) + +#define PAGE_FOR_EACH_TB(pagedesc, tb, n) \ + TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next) + +#define TB_FOR_EACH_JMP(head_tb, tb, n) \ + TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next) + +/* + * In system mode we want L1_MAP to be based on ram offsets, + * while in user mode we want it to be based on virtual addresses. + * + * TODO: For user mode, see the caveat re host vs guest virtual + * address spaces near GUEST_ADDR_MAX. + */ +#if !defined(CONFIG_USER_ONLY) +#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS +# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS +#else +# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS +#endif +#else +# define L1_MAP_ADDR_SPACE_BITS MIN(HOST_LONG_BITS, TARGET_ABI_BITS) +#endif + +/* Size of the L2 (and L3, etc) page tables. */ +#define V_L2_BITS 10 +#define V_L2_SIZE (1 << V_L2_BITS) + +/* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */ +QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS > + sizeof_field(TranslationBlock, trace_vcpu_dstate) + * BITS_PER_BYTE); + +/* + * L1 Mapping properties + */ +static int v_l1_size; +static int v_l1_shift; +static int v_l2_levels; + +/* The bottom level has pointers to PageDesc, and is indexed by + * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size. + */ +#define V_L1_MIN_BITS 4 +#define V_L1_MAX_BITS (V_L2_BITS + 3) +#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS) + +static void *l1_map[V_L1_MAX_SIZE]; + +TBContext tb_ctx; + +static void page_table_config_init(void) +{ + uint32_t v_l1_bits; + + assert(TARGET_PAGE_BITS); + /* The bits remaining after N lower levels of page tables. */ + v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS; + if (v_l1_bits < V_L1_MIN_BITS) { + v_l1_bits += V_L2_BITS; + } + + v_l1_size = 1 << v_l1_bits; + v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits; + v_l2_levels = v_l1_shift / V_L2_BITS - 1; + + assert(v_l1_bits <= V_L1_MAX_BITS); + assert(v_l1_shift % V_L2_BITS == 0); + assert(v_l2_levels >= 0); +} + +/* Encode VAL as a signed leb128 sequence at P. + Return P incremented past the encoded value. */ +static uint8_t *encode_sleb128(uint8_t *p, target_long val) +{ + int more, byte; + + do { + byte = val & 0x7f; + val >>= 7; + more = !((val == 0 && (byte & 0x40) == 0) + || (val == -1 && (byte & 0x40) != 0)); + if (more) { + byte |= 0x80; + } + *p++ = byte; + } while (more); + + return p; +} + +/* Decode a signed leb128 sequence at *PP; increment *PP past the + decoded value. Return the decoded value. */ +static target_long decode_sleb128(const uint8_t **pp) +{ + const uint8_t *p = *pp; + target_long val = 0; + int byte, shift = 0; + + do { + byte = *p++; + val |= (target_ulong)(byte & 0x7f) << shift; + shift += 7; + } while (byte & 0x80); + if (shift < TARGET_LONG_BITS && (byte & 0x40)) { + val |= -(target_ulong)1 << shift; + } + + *pp = p; + return val; +} + +/* Encode the data collected about the instructions while compiling TB. + Place the data at BLOCK, and return the number of bytes consumed. + + The logical table consists of TARGET_INSN_START_WORDS target_ulong's, + which come from the target's insn_start data, followed by a uintptr_t + which comes from the host pc of the end of the code implementing the insn. + + Each line of the table is encoded as sleb128 deltas from the previous + line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }. + That is, the first column is seeded with the guest pc, the last column + with the host pc, and the middle columns with zeros. */ + +static int encode_search(TranslationBlock *tb, uint8_t *block) +{ + uint8_t *highwater = tcg_ctx->code_gen_highwater; + uint8_t *p = block; + int i, j, n; + + for (i = 0, n = tb->icount; i < n; ++i) { + target_ulong prev; + + for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { + if (i == 0) { + prev = (j == 0 ? tb->pc : 0); + } else { + prev = tcg_ctx->gen_insn_data[i - 1][j]; + } + p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev); + } + prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]); + p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev); + + /* Test for (pending) buffer overflow. The assumption is that any + one row beginning below the high water mark cannot overrun + the buffer completely. Thus we can test for overflow after + encoding a row without having to check during encoding. */ + if (unlikely(p > highwater)) { + return -1; + } + } + + return p - block; +} + +/* The cpu state corresponding to 'searched_pc' is restored. + * When reset_icount is true, current TB will be interrupted and + * icount should be recalculated. + */ +static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, + uintptr_t searched_pc, bool reset_icount) +{ + target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; + uintptr_t host_pc = (uintptr_t)tb->tc.ptr; + CPUArchState *env = cpu->env_ptr; + const uint8_t *p = tb->tc.ptr + tb->tc.size; + int i, j, num_insns = tb->icount; +#ifdef CONFIG_PROFILER + TCGProfile *prof = &tcg_ctx->prof; + int64_t ti = profile_getclock(); +#endif + + searched_pc -= GETPC_ADJ; + + if (searched_pc < host_pc) { + return -1; + } + + /* Reconstruct the stored insn data while looking for the point at + which the end of the insn exceeds the searched_pc. */ + for (i = 0; i < num_insns; ++i) { + for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { + data[j] += decode_sleb128(&p); + } + host_pc += decode_sleb128(&p); + if (host_pc > searched_pc) { + goto found; + } + } + return -1; + + found: + if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) { + assert(icount_enabled()); + /* Reset the cycle counter to the start of the block + and shift if to the number of actually executed instructions */ + cpu_neg(cpu)->icount_decr.u16.low += num_insns - i; + } + restore_state_to_opc(env, tb, data); + +#ifdef CONFIG_PROFILER + qatomic_set(&prof->restore_time, + prof->restore_time + profile_getclock() - ti); + qatomic_set(&prof->restore_count, prof->restore_count + 1); +#endif + return 0; +} + +bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit) +{ + /* + * The host_pc has to be in the rx region of the code buffer. + * If it is not we will not be able to resolve it here. + * The two cases where host_pc will not be correct are: + * + * - fault during translation (instruction fetch) + * - fault from helper (not using GETPC() macro) + * + * Either way we need return early as we can't resolve it here. + */ + if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) { + TranslationBlock *tb = tcg_tb_lookup(host_pc); + if (tb) { + cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit); + return true; + } + } + return false; +} + +void page_init(void) +{ + page_size_init(); + page_table_config_init(); + +#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) + { +#ifdef HAVE_KINFO_GETVMMAP + struct kinfo_vmentry *freep; + int i, cnt; + + freep = kinfo_getvmmap(getpid(), &cnt); + if (freep) { + mmap_lock(); + for (i = 0; i < cnt; i++) { + unsigned long startaddr, endaddr; + + startaddr = freep[i].kve_start; + endaddr = freep[i].kve_end; + if (h2g_valid(startaddr)) { + startaddr = h2g(startaddr) & TARGET_PAGE_MASK; + + if (h2g_valid(endaddr)) { + endaddr = h2g(endaddr); + page_set_flags(startaddr, endaddr, PAGE_RESERVED); + } else { +#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS + endaddr = ~0ul; + page_set_flags(startaddr, endaddr, PAGE_RESERVED); +#endif + } + } + } + free(freep); + mmap_unlock(); + } +#else + FILE *f; + + last_brk = (unsigned long)sbrk(0); + + f = fopen("/compat/linux/proc/self/maps", "r"); + if (f) { + mmap_lock(); + + do { + unsigned long startaddr, endaddr; + int n; + + n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); + + if (n == 2 && h2g_valid(startaddr)) { + startaddr = h2g(startaddr) & TARGET_PAGE_MASK; + + if (h2g_valid(endaddr)) { + endaddr = h2g(endaddr); + } else { + endaddr = ~0ul; + } + page_set_flags(startaddr, endaddr, PAGE_RESERVED); + } + } while (!feof(f)); + + fclose(f); + mmap_unlock(); + } +#endif + } +#endif +} + +static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) +{ + PageDesc *pd; + void **lp; + int i; + + /* Level 1. Always allocated. */ + lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1)); + + /* Level 2..N-1. */ + for (i = v_l2_levels; i > 0; i--) { + void **p = qatomic_rcu_read(lp); + + if (p == NULL) { + void *existing; + + if (!alloc) { + return NULL; + } + p = g_new0(void *, V_L2_SIZE); + existing = qatomic_cmpxchg(lp, NULL, p); + if (unlikely(existing)) { + g_free(p); + p = existing; + } + } + + lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); + } + + pd = qatomic_rcu_read(lp); + if (pd == NULL) { + void *existing; + + if (!alloc) { + return NULL; + } + pd = g_new0(PageDesc, V_L2_SIZE); +#ifndef CONFIG_USER_ONLY + { + int i; + + for (i = 0; i < V_L2_SIZE; i++) { + qemu_spin_init(&pd[i].lock); + } + } +#endif + existing = qatomic_cmpxchg(lp, NULL, pd); + if (unlikely(existing)) { +#ifndef CONFIG_USER_ONLY + { + int i; + + for (i = 0; i < V_L2_SIZE; i++) { + qemu_spin_destroy(&pd[i].lock); + } + } +#endif + g_free(pd); + pd = existing; + } + } + + return pd + (index & (V_L2_SIZE - 1)); +} + +static inline PageDesc *page_find(tb_page_addr_t index) +{ + return page_find_alloc(index, 0); +} + +static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, + PageDesc **ret_p2, tb_page_addr_t phys2, int alloc); + +/* In user-mode page locks aren't used; mmap_lock is enough */ +#ifdef CONFIG_USER_ONLY + +#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock()) + +static inline void page_lock(PageDesc *pd) +{ } + +static inline void page_unlock(PageDesc *pd) +{ } + +static inline void page_lock_tb(const TranslationBlock *tb) +{ } + +static inline void page_unlock_tb(const TranslationBlock *tb) +{ } + +struct page_collection * +page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) +{ + return NULL; +} + +void page_collection_unlock(struct page_collection *set) +{ } +#else /* !CONFIG_USER_ONLY */ + +#ifdef CONFIG_DEBUG_TCG + +static __thread GHashTable *ht_pages_locked_debug; + +static void ht_pages_locked_debug_init(void) +{ + if (ht_pages_locked_debug) { + return; + } + ht_pages_locked_debug = g_hash_table_new(NULL, NULL); +} + +static bool page_is_locked(const PageDesc *pd) +{ + PageDesc *found; + + ht_pages_locked_debug_init(); + found = g_hash_table_lookup(ht_pages_locked_debug, pd); + return !!found; +} + +static void page_lock__debug(PageDesc *pd) +{ + ht_pages_locked_debug_init(); + g_assert(!page_is_locked(pd)); + g_hash_table_insert(ht_pages_locked_debug, pd, pd); +} + +static void page_unlock__debug(const PageDesc *pd) +{ + bool removed; + + ht_pages_locked_debug_init(); + g_assert(page_is_locked(pd)); + removed = g_hash_table_remove(ht_pages_locked_debug, pd); + g_assert(removed); +} + +static void +do_assert_page_locked(const PageDesc *pd, const char *file, int line) +{ + if (unlikely(!page_is_locked(pd))) { + error_report("assert_page_lock: PageDesc %p not locked @ %s:%d", + pd, file, line); + abort(); + } +} + +#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__) + +void assert_no_pages_locked(void) +{ + ht_pages_locked_debug_init(); + g_assert(g_hash_table_size(ht_pages_locked_debug) == 0); +} + +#else /* !CONFIG_DEBUG_TCG */ + +#define assert_page_locked(pd) + +static inline void page_lock__debug(const PageDesc *pd) +{ +} + +static inline void page_unlock__debug(const PageDesc *pd) +{ +} + +#endif /* CONFIG_DEBUG_TCG */ + +static inline void page_lock(PageDesc *pd) +{ + page_lock__debug(pd); + qemu_spin_lock(&pd->lock); +} + +static inline void page_unlock(PageDesc *pd) +{ + qemu_spin_unlock(&pd->lock); + page_unlock__debug(pd); +} + +/* lock the page(s) of a TB in the correct acquisition order */ +static inline void page_lock_tb(const TranslationBlock *tb) +{ + page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0); +} + +static inline void page_unlock_tb(const TranslationBlock *tb) +{ + PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); + + page_unlock(p1); + if (unlikely(tb->page_addr[1] != -1)) { + PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); + + if (p2 != p1) { + page_unlock(p2); + } + } +} + +static inline struct page_entry * +page_entry_new(PageDesc *pd, tb_page_addr_t index) +{ + struct page_entry *pe = g_malloc(sizeof(*pe)); + + pe->index = index; + pe->pd = pd; + pe->locked = false; + return pe; +} + +static void page_entry_destroy(gpointer p) +{ + struct page_entry *pe = p; + + g_assert(pe->locked); + page_unlock(pe->pd); + g_free(pe); +} + +/* returns false on success */ +static bool page_entry_trylock(struct page_entry *pe) +{ + bool busy; + + busy = qemu_spin_trylock(&pe->pd->lock); + if (!busy) { + g_assert(!pe->locked); + pe->locked = true; + page_lock__debug(pe->pd); + } + return busy; +} + +static void do_page_entry_lock(struct page_entry *pe) +{ + page_lock(pe->pd); + g_assert(!pe->locked); + pe->locked = true; +} + +static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data) +{ + struct page_entry *pe = value; + + do_page_entry_lock(pe); + return FALSE; +} + +static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data) +{ + struct page_entry *pe = value; + + if (pe->locked) { + pe->locked = false; + page_unlock(pe->pd); + } + return FALSE; +} + +/* + * Trylock a page, and if successful, add the page to a collection. + * Returns true ("busy") if the page could not be locked; false otherwise. + */ +static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr) +{ + tb_page_addr_t index = addr >> TARGET_PAGE_BITS; + struct page_entry *pe; + PageDesc *pd; + + pe = g_tree_lookup(set->tree, &index); + if (pe) { + return false; + } + + pd = page_find(index); + if (pd == NULL) { + return false; + } + + pe = page_entry_new(pd, index); + g_tree_insert(set->tree, &pe->index, pe); + + /* + * If this is either (1) the first insertion or (2) a page whose index + * is higher than any other so far, just lock the page and move on. + */ + if (set->max == NULL || pe->index > set->max->index) { + set->max = pe; + do_page_entry_lock(pe); + return false; + } + /* + * Try to acquire out-of-order lock; if busy, return busy so that we acquire + * locks in order. + */ + return page_entry_trylock(pe); +} + +static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata) +{ + tb_page_addr_t a = *(const tb_page_addr_t *)ap; + tb_page_addr_t b = *(const tb_page_addr_t *)bp; + + if (a == b) { + return 0; + } else if (a < b) { + return -1; + } + return 1; +} + +/* + * Lock a range of pages ([@start,@end[) as well as the pages of all + * intersecting TBs. + * Locking order: acquire locks in ascending order of page index. + */ +struct page_collection * +page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) +{ + struct page_collection *set = g_malloc(sizeof(*set)); + tb_page_addr_t index; + PageDesc *pd; + + start >>= TARGET_PAGE_BITS; + end >>= TARGET_PAGE_BITS; + g_assert(start <= end); + + set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL, + page_entry_destroy); + set->max = NULL; + assert_no_pages_locked(); + + retry: + g_tree_foreach(set->tree, page_entry_lock, NULL); + + for (index = start; index <= end; index++) { + TranslationBlock *tb; + int n; + + pd = page_find(index); + if (pd == NULL) { + continue; + } + if (page_trylock_add(set, index << TARGET_PAGE_BITS)) { + g_tree_foreach(set->tree, page_entry_unlock, NULL); + goto retry; + } + assert_page_locked(pd); + PAGE_FOR_EACH_TB(pd, tb, n) { + if (page_trylock_add(set, tb->page_addr[0]) || + (tb->page_addr[1] != -1 && + page_trylock_add(set, tb->page_addr[1]))) { + /* drop all locks, and reacquire in order */ + g_tree_foreach(set->tree, page_entry_unlock, NULL); + goto retry; + } + } + } + return set; +} + +void page_collection_unlock(struct page_collection *set) +{ + /* entries are unlocked and freed via page_entry_destroy */ + g_tree_destroy(set->tree); + g_free(set); +} + +#endif /* !CONFIG_USER_ONLY */ + +static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, + PageDesc **ret_p2, tb_page_addr_t phys2, int alloc) +{ + PageDesc *p1, *p2; + tb_page_addr_t page1; + tb_page_addr_t page2; + + assert_memory_lock(); + g_assert(phys1 != -1); + + page1 = phys1 >> TARGET_PAGE_BITS; + page2 = phys2 >> TARGET_PAGE_BITS; + + p1 = page_find_alloc(page1, alloc); + if (ret_p1) { + *ret_p1 = p1; + } + if (likely(phys2 == -1)) { + page_lock(p1); + return; + } else if (page1 == page2) { + page_lock(p1); + if (ret_p2) { + *ret_p2 = p1; + } + return; + } + p2 = page_find_alloc(page2, alloc); + if (ret_p2) { + *ret_p2 = p2; + } + if (page1 < page2) { + page_lock(p1); + page_lock(p2); + } else { + page_lock(p2); + page_lock(p1); + } +} + +static bool tb_cmp(const void *ap, const void *bp) +{ + const TranslationBlock *a = ap; + const TranslationBlock *b = bp; + + return a->pc == b->pc && + a->cs_base == b->cs_base && + a->flags == b->flags && + (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) && + a->trace_vcpu_dstate == b->trace_vcpu_dstate && + a->page_addr[0] == b->page_addr[0] && + a->page_addr[1] == b->page_addr[1]; +} + +void tb_htable_init(void) +{ + unsigned int mode = QHT_MODE_AUTO_RESIZE; + + qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode); +} + +/* call with @p->lock held */ +static inline void invalidate_page_bitmap(PageDesc *p) +{ + assert_page_locked(p); +#ifdef CONFIG_SOFTMMU + g_free(p->code_bitmap); + p->code_bitmap = NULL; + p->code_write_count = 0; +#endif +} + +/* Set to NULL all the 'first_tb' fields in all PageDescs. */ +static void page_flush_tb_1(int level, void **lp) +{ + int i; + + if (*lp == NULL) { + return; + } + if (level == 0) { + PageDesc *pd = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + page_lock(&pd[i]); + pd[i].first_tb = (uintptr_t)NULL; + invalidate_page_bitmap(pd + i); + page_unlock(&pd[i]); + } + } else { + void **pp = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + page_flush_tb_1(level - 1, pp + i); + } + } +} + +static void page_flush_tb(void) +{ + int i, l1_sz = v_l1_size; + + for (i = 0; i < l1_sz; i++) { + page_flush_tb_1(v_l2_levels, l1_map + i); + } +} + +static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data) +{ + const TranslationBlock *tb = value; + size_t *size = data; + + *size += tb->tc.size; + return false; +} + +/* flush all the translation blocks */ +static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) +{ + bool did_flush = false; + + mmap_lock(); + /* If it is already been done on request of another CPU, + * just retry. + */ + if (tb_ctx.tb_flush_count != tb_flush_count.host_int) { + goto done; + } + did_flush = true; + + if (DEBUG_TB_FLUSH_GATE) { + size_t nb_tbs = tcg_nb_tbs(); + size_t host_size = 0; + + tcg_tb_foreach(tb_host_size_iter, &host_size); + printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n", + tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0); + } + + CPU_FOREACH(cpu) { + cpu_tb_jmp_cache_clear(cpu); + } + + qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE); + page_flush_tb(); + + tcg_region_reset_all(); + /* XXX: flush processor icache at this point if cache flush is + expensive */ + qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1); + +done: + mmap_unlock(); + if (did_flush) { + qemu_plugin_flush_cb(); + } +} + +void tb_flush(CPUState *cpu) +{ + if (tcg_enabled()) { + unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count); + + if (cpu_in_exclusive_context(cpu)) { + do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count)); + } else { + async_safe_run_on_cpu(cpu, do_tb_flush, + RUN_ON_CPU_HOST_INT(tb_flush_count)); + } + } +} + +/* + * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only, + * so in order to prevent bit rot we compile them unconditionally in user-mode, + * and let the optimizer get rid of them by wrapping their user-only callers + * with if (DEBUG_TB_CHECK_GATE). + */ +#ifdef CONFIG_USER_ONLY + +static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp) +{ + TranslationBlock *tb = p; + target_ulong addr = *(target_ulong *)userp; + + if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) { + printf("ERROR invalidate: address=" TARGET_FMT_lx + " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size); + } +} + +/* verify that all the pages have correct rights for code + * + * Called with mmap_lock held. + */ +static void tb_invalidate_check(target_ulong address) +{ + address &= TARGET_PAGE_MASK; + qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address); +} + +static void do_tb_page_check(void *p, uint32_t hash, void *userp) +{ + TranslationBlock *tb = p; + int flags1, flags2; + + flags1 = page_get_flags(tb->pc); + flags2 = page_get_flags(tb->pc + tb->size - 1); + if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { + printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", + (long)tb->pc, tb->size, flags1, flags2); + } +} + +/* verify that all the pages have correct rights for code */ +static void tb_page_check(void) +{ + qht_iter(&tb_ctx.htable, do_tb_page_check, NULL); +} + +#endif /* CONFIG_USER_ONLY */ + +/* + * user-mode: call with mmap_lock held + * !user-mode: call with @pd->lock held + */ +static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb) +{ + TranslationBlock *tb1; + uintptr_t *pprev; + unsigned int n1; + + assert_page_locked(pd); + pprev = &pd->first_tb; + PAGE_FOR_EACH_TB(pd, tb1, n1) { + if (tb1 == tb) { + *pprev = tb1->page_next[n1]; + return; + } + pprev = &tb1->page_next[n1]; + } + g_assert_not_reached(); +} + +/* remove @orig from its @n_orig-th jump list */ +static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig) +{ + uintptr_t ptr, ptr_locked; + TranslationBlock *dest; + TranslationBlock *tb; + uintptr_t *pprev; + int n; + + /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */ + ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1); + dest = (TranslationBlock *)(ptr & ~1); + if (dest == NULL) { + return; + } + + qemu_spin_lock(&dest->jmp_lock); + /* + * While acquiring the lock, the jump might have been removed if the + * destination TB was invalidated; check again. + */ + ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]); + if (ptr_locked != ptr) { + qemu_spin_unlock(&dest->jmp_lock); + /* + * The only possibility is that the jump was unlinked via + * tb_jump_unlink(dest). Seeing here another destination would be a bug, + * because we set the LSB above. + */ + g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID); + return; + } + /* + * We first acquired the lock, and since the destination pointer matches, + * we know for sure that @orig is in the jmp list. + */ + pprev = &dest->jmp_list_head; + TB_FOR_EACH_JMP(dest, tb, n) { + if (tb == orig && n == n_orig) { + *pprev = tb->jmp_list_next[n]; + /* no need to set orig->jmp_dest[n]; setting the LSB was enough */ + qemu_spin_unlock(&dest->jmp_lock); + return; + } + pprev = &tb->jmp_list_next[n]; + } + g_assert_not_reached(); +} + +/* reset the jump entry 'n' of a TB so that it is not chained to + another TB */ +static inline void tb_reset_jump(TranslationBlock *tb, int n) +{ + uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]); + tb_set_jmp_target(tb, n, addr); +} + +/* remove any jumps to the TB */ +static inline void tb_jmp_unlink(TranslationBlock *dest) +{ + TranslationBlock *tb; + int n; + + qemu_spin_lock(&dest->jmp_lock); + + TB_FOR_EACH_JMP(dest, tb, n) { + tb_reset_jump(tb, n); + qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1); + /* No need to clear the list entry; setting the dest ptr is enough */ + } + dest->jmp_list_head = (uintptr_t)NULL; + + qemu_spin_unlock(&dest->jmp_lock); +} + +/* + * In user-mode, call with mmap_lock held. + * In !user-mode, if @rm_from_page_list is set, call with the TB's pages' + * locks held. + */ +static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) +{ + CPUState *cpu; + PageDesc *p; + uint32_t h; + tb_page_addr_t phys_pc; + uint32_t orig_cflags = tb_cflags(tb); + + assert_memory_lock(); + + /* make sure no further incoming jumps will be chained to this TB */ + qemu_spin_lock(&tb->jmp_lock); + qatomic_set(&tb->cflags, tb->cflags | CF_INVALID); + qemu_spin_unlock(&tb->jmp_lock); + + /* remove the TB from the hash list */ + phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); + h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags, + tb->trace_vcpu_dstate); + if (!qht_remove(&tb_ctx.htable, tb, h)) { + return; + } + + /* remove the TB from the page list */ + if (rm_from_page_list) { + p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); + tb_page_remove(p, tb); + invalidate_page_bitmap(p); + if (tb->page_addr[1] != -1) { + p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); + tb_page_remove(p, tb); + invalidate_page_bitmap(p); + } + } + + /* remove the TB from the hash list */ + h = tb_jmp_cache_hash_func(tb->pc); + CPU_FOREACH(cpu) { + if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) { + qatomic_set(&cpu->tb_jmp_cache[h], NULL); + } + } + + /* suppress this TB from the two jump lists */ + tb_remove_from_jmp_list(tb, 0); + tb_remove_from_jmp_list(tb, 1); + + /* suppress any remaining jumps to this TB */ + tb_jmp_unlink(tb); + + qatomic_set(&tb_ctx.tb_phys_invalidate_count, + tb_ctx.tb_phys_invalidate_count + 1); +} + +static void tb_phys_invalidate__locked(TranslationBlock *tb) +{ + qemu_thread_jit_write(); + do_tb_phys_invalidate(tb, true); + qemu_thread_jit_execute(); +} + +/* invalidate one TB + * + * Called with mmap_lock held in user-mode. + */ +void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) +{ + if (page_addr == -1 && tb->page_addr[0] != -1) { + page_lock_tb(tb); + do_tb_phys_invalidate(tb, true); + page_unlock_tb(tb); + } else { + do_tb_phys_invalidate(tb, false); + } +} + +#ifdef CONFIG_SOFTMMU +/* call with @p->lock held */ +static void build_page_bitmap(PageDesc *p) +{ + int n, tb_start, tb_end; + TranslationBlock *tb; + + assert_page_locked(p); + p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE); + + PAGE_FOR_EACH_TB(p, tb, n) { + /* NOTE: this is subtle as a TB may span two physical pages */ + if (n == 0) { + /* NOTE: tb_end may be after the end of the page, but + it is not a problem */ + tb_start = tb->pc & ~TARGET_PAGE_MASK; + tb_end = tb_start + tb->size; + if (tb_end > TARGET_PAGE_SIZE) { + tb_end = TARGET_PAGE_SIZE; + } + } else { + tb_start = 0; + tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); + } + bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start); + } +} +#endif + +/* add the tb in the target page and protect it if necessary + * + * Called with mmap_lock held for user-mode emulation. + * Called with @p->lock held in !user-mode. + */ +static inline void tb_page_add(PageDesc *p, TranslationBlock *tb, + unsigned int n, tb_page_addr_t page_addr) +{ +#ifndef CONFIG_USER_ONLY + bool page_already_protected; +#endif + + assert_page_locked(p); + + tb->page_addr[n] = page_addr; + tb->page_next[n] = p->first_tb; +#ifndef CONFIG_USER_ONLY + page_already_protected = p->first_tb != (uintptr_t)NULL; +#endif + p->first_tb = (uintptr_t)tb | n; + invalidate_page_bitmap(p); + +#if defined(CONFIG_USER_ONLY) + /* translator_loop() must have made all TB pages non-writable */ + assert(!(p->flags & PAGE_WRITE)); +#else + /* if some code is already present, then the pages are already + protected. So we handle the case where only the first TB is + allocated in a physical page */ + if (!page_already_protected) { + tlb_protect_code(page_addr); + } +#endif +} + +/* + * Add a new TB and link it to the physical page tables. phys_page2 is + * (-1) to indicate that only one page contains the TB. + * + * Called with mmap_lock held for user-mode emulation. + * + * Returns a pointer @tb, or a pointer to an existing TB that matches @tb. + * Note that in !user-mode, another thread might have already added a TB + * for the same block of guest code that @tb corresponds to. In that case, + * the caller should discard the original @tb, and use instead the returned TB. + */ +static TranslationBlock * +tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, + tb_page_addr_t phys_page2) +{ + PageDesc *p; + PageDesc *p2 = NULL; + void *existing_tb = NULL; + uint32_t h; + + assert_memory_lock(); + tcg_debug_assert(!(tb->cflags & CF_INVALID)); + + /* + * Add the TB to the page list, acquiring first the pages's locks. + * We keep the locks held until after inserting the TB in the hash table, + * so that if the insertion fails we know for sure that the TBs are still + * in the page descriptors. + * Note that inserting into the hash table first isn't an option, since + * we can only insert TBs that are fully initialized. + */ + page_lock_pair(&p, phys_pc, &p2, phys_page2, 1); + tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK); + if (p2) { + tb_page_add(p2, tb, 1, phys_page2); + } else { + tb->page_addr[1] = -1; + } + + /* add in the hash table */ + h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags, + tb->trace_vcpu_dstate); + qht_insert(&tb_ctx.htable, tb, h, &existing_tb); + + /* remove TB from the page(s) if we couldn't insert it */ + if (unlikely(existing_tb)) { + tb_page_remove(p, tb); + invalidate_page_bitmap(p); + if (p2) { + tb_page_remove(p2, tb); + invalidate_page_bitmap(p2); + } + tb = existing_tb; + } + + if (p2 && p2 != p) { + page_unlock(p2); + } + page_unlock(p); + +#ifdef CONFIG_USER_ONLY + if (DEBUG_TB_CHECK_GATE) { + tb_page_check(); + } +#endif + return tb; +} + +/* Called with mmap_lock held for user mode emulation. */ +TranslationBlock *tb_gen_code(CPUState *cpu, + target_ulong pc, target_ulong cs_base, + uint32_t flags, int cflags) +{ + CPUArchState *env = cpu->env_ptr; + TranslationBlock *tb, *existing_tb; + tb_page_addr_t phys_pc, phys_page2; + target_ulong virt_page2; + tcg_insn_unit *gen_code_buf; + int gen_code_size, search_size, max_insns; +#ifdef CONFIG_PROFILER + TCGProfile *prof = &tcg_ctx->prof; + int64_t ti; +#endif + + assert_memory_lock(); + qemu_thread_jit_write(); + + phys_pc = get_page_addr_code(env, pc); + + if (phys_pc == -1) { + /* Generate a one-shot TB with 1 insn in it */ + cflags = (cflags & ~CF_COUNT_MASK) | CF_LAST_IO | 1; + } + + max_insns = cflags & CF_COUNT_MASK; + if (max_insns == 0) { + max_insns = TCG_MAX_INSNS; + } + QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS); + + buffer_overflow: + tb = tcg_tb_alloc(tcg_ctx); + if (unlikely(!tb)) { + /* flush must be done */ + tb_flush(cpu); + mmap_unlock(); + /* Make the execution loop process the flush as soon as possible. */ + cpu->exception_index = EXCP_INTERRUPT; + cpu_loop_exit(cpu); + } + + gen_code_buf = tcg_ctx->code_gen_ptr; + tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf); + tb->pc = pc; + tb->cs_base = cs_base; + tb->flags = flags; + tb->cflags = cflags; + tb->trace_vcpu_dstate = *cpu->trace_dstate; + tcg_ctx->tb_cflags = cflags; + tb_overflow: + +#ifdef CONFIG_PROFILER + /* includes aborted translations because of exceptions */ + qatomic_set(&prof->tb_count1, prof->tb_count1 + 1); + ti = profile_getclock(); +#endif + + gen_code_size = sigsetjmp(tcg_ctx->jmp_trans, 0); + if (unlikely(gen_code_size != 0)) { + goto error_return; + } + + tcg_func_start(tcg_ctx); + + tcg_ctx->cpu = env_cpu(env); + gen_intermediate_code(cpu, tb, max_insns); + assert(tb->size != 0); + tcg_ctx->cpu = NULL; + max_insns = tb->icount; + + trace_translate_block(tb, tb->pc, tb->tc.ptr); + + /* generate machine code */ + tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; + tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; + tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset; + if (TCG_TARGET_HAS_direct_jump) { + tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg; + tcg_ctx->tb_jmp_target_addr = NULL; + } else { + tcg_ctx->tb_jmp_insn_offset = NULL; + tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg; + } + +#ifdef CONFIG_PROFILER + qatomic_set(&prof->tb_count, prof->tb_count + 1); + qatomic_set(&prof->interm_time, + prof->interm_time + profile_getclock() - ti); + ti = profile_getclock(); +#endif + + gen_code_size = tcg_gen_code(tcg_ctx, tb); + if (unlikely(gen_code_size < 0)) { + error_return: + switch (gen_code_size) { + case -1: + /* + * Overflow of code_gen_buffer, or the current slice of it. + * + * TODO: We don't need to re-do gen_intermediate_code, nor + * should we re-do the tcg optimization currently hidden + * inside tcg_gen_code. All that should be required is to + * flush the TBs, allocate a new TB, re-initialize it per + * above, and re-do the actual code generation. + */ + qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, + "Restarting code generation for " + "code_gen_buffer overflow\n"); + goto buffer_overflow; + + case -2: + /* + * The code generated for the TranslationBlock is too large. + * The maximum size allowed by the unwind info is 64k. + * There may be stricter constraints from relocations + * in the tcg backend. + * + * Try again with half as many insns as we attempted this time. + * If a single insn overflows, there's a bug somewhere... + */ + assert(max_insns > 1); + max_insns /= 2; + qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, + "Restarting code generation with " + "smaller translation block (max %d insns)\n", + max_insns); + goto tb_overflow; + + default: + g_assert_not_reached(); + } + } + search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); + if (unlikely(search_size < 0)) { + goto buffer_overflow; + } + tb->tc.size = gen_code_size; + +#ifdef CONFIG_PROFILER + qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti); + qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size); + qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size); + qatomic_set(&prof->search_out_len, prof->search_out_len + search_size); +#endif + +#ifdef DEBUG_DISAS + if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && + qemu_log_in_addr_range(tb->pc)) { + FILE *logfile = qemu_log_lock(); + int code_size, data_size; + const tcg_target_ulong *rx_data_gen_ptr; + size_t chunk_start; + int insn = 0; + + if (tcg_ctx->data_gen_ptr) { + rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr); + code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr; + data_size = gen_code_size - code_size; + } else { + rx_data_gen_ptr = 0; + code_size = gen_code_size; + data_size = 0; + } + + /* Dump header and the first instruction */ + qemu_log("OUT: [size=%d]\n", gen_code_size); + qemu_log(" -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n", + tcg_ctx->gen_insn_data[insn][0]); + chunk_start = tcg_ctx->gen_insn_end_off[insn]; + log_disas(tb->tc.ptr, chunk_start); + + /* + * Dump each instruction chunk, wrapping up empty chunks into + * the next instruction. The whole array is offset so the + * first entry is the beginning of the 2nd instruction. + */ + while (insn < tb->icount) { + size_t chunk_end = tcg_ctx->gen_insn_end_off[insn]; + if (chunk_end > chunk_start) { + qemu_log(" -- guest addr 0x" TARGET_FMT_lx "\n", + tcg_ctx->gen_insn_data[insn][0]); + log_disas(tb->tc.ptr + chunk_start, chunk_end - chunk_start); + chunk_start = chunk_end; + } + insn++; + } + + if (chunk_start < code_size) { + qemu_log(" -- tb slow paths + alignment\n"); + log_disas(tb->tc.ptr + chunk_start, code_size - chunk_start); + } + + /* Finally dump any data we may have after the block */ + if (data_size) { + int i; + qemu_log(" data: [size=%d]\n", data_size); + for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) { + if (sizeof(tcg_target_ulong) == 8) { + qemu_log("0x%08" PRIxPTR ": .quad 0x%016" TCG_PRIlx "\n", + (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]); + } else if (sizeof(tcg_target_ulong) == 4) { + qemu_log("0x%08" PRIxPTR ": .long 0x%08" TCG_PRIlx "\n", + (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]); + } else { + qemu_build_not_reached(); + } + } + } + qemu_log("\n"); + qemu_log_flush(); + qemu_log_unlock(logfile); + } +#endif + + qatomic_set(&tcg_ctx->code_gen_ptr, (void *) + ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, + CODE_GEN_ALIGN)); + + /* init jump list */ + qemu_spin_init(&tb->jmp_lock); + tb->jmp_list_head = (uintptr_t)NULL; + tb->jmp_list_next[0] = (uintptr_t)NULL; + tb->jmp_list_next[1] = (uintptr_t)NULL; + tb->jmp_dest[0] = (uintptr_t)NULL; + tb->jmp_dest[1] = (uintptr_t)NULL; + + /* init original jump addresses which have been set during tcg_gen_code() */ + if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { + tb_reset_jump(tb, 0); + } + if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { + tb_reset_jump(tb, 1); + } + + /* + * If the TB is not associated with a physical RAM page then + * it must be a temporary one-insn TB, and we have nothing to do + * except fill in the page_addr[] fields. Return early before + * attempting to link to other TBs or add to the lookup table. + */ + if (phys_pc == -1) { + tb->page_addr[0] = tb->page_addr[1] = -1; + return tb; + } + + /* + * Insert TB into the corresponding region tree before publishing it + * through QHT. Otherwise rewinding happened in the TB might fail to + * lookup itself using host PC. + */ + tcg_tb_insert(tb); + + /* check next page if needed */ + virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; + phys_page2 = -1; + if ((pc & TARGET_PAGE_MASK) != virt_page2) { + phys_page2 = get_page_addr_code(env, virt_page2); + } + /* + * No explicit memory barrier is required -- tb_link_page() makes the + * TB visible in a consistent state. + */ + existing_tb = tb_link_page(tb, phys_pc, phys_page2); + /* if the TB already exists, discard what we just translated */ + if (unlikely(existing_tb != tb)) { + uintptr_t orig_aligned = (uintptr_t)gen_code_buf; + + orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize); + qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned); + tcg_tb_remove(tb); + return existing_tb; + } + return tb; +} + +/* + * @p must be non-NULL. + * user-mode: call with mmap_lock held. + * !user-mode: call with all @pages locked. + */ +static void +tb_invalidate_phys_page_range__locked(struct page_collection *pages, + PageDesc *p, tb_page_addr_t start, + tb_page_addr_t end, + uintptr_t retaddr) +{ + TranslationBlock *tb; + tb_page_addr_t tb_start, tb_end; + int n; +#ifdef TARGET_HAS_PRECISE_SMC + CPUState *cpu = current_cpu; + CPUArchState *env = NULL; + bool current_tb_not_found = retaddr != 0; + bool current_tb_modified = false; + TranslationBlock *current_tb = NULL; + target_ulong current_pc = 0; + target_ulong current_cs_base = 0; + uint32_t current_flags = 0; +#endif /* TARGET_HAS_PRECISE_SMC */ + + assert_page_locked(p); + +#if defined(TARGET_HAS_PRECISE_SMC) + if (cpu != NULL) { + env = cpu->env_ptr; + } +#endif + + /* we remove all the TBs in the range [start, end[ */ + /* XXX: see if in some cases it could be faster to invalidate all + the code */ + PAGE_FOR_EACH_TB(p, tb, n) { + assert_page_locked(p); + /* NOTE: this is subtle as a TB may span two physical pages */ + if (n == 0) { + /* NOTE: tb_end may be after the end of the page, but + it is not a problem */ + tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); + tb_end = tb_start + tb->size; + } else { + tb_start = tb->page_addr[1]; + tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); + } + if (!(tb_end <= start || tb_start >= end)) { +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_not_found) { + current_tb_not_found = false; + /* now we have a real cpu fault */ + current_tb = tcg_tb_lookup(retaddr); + } + if (current_tb == tb && + (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { + /* + * If we are modifying the current TB, we must stop + * its execution. We could be more precise by checking + * that the modification is after the current PC, but it + * would require a specialized function to partially + * restore the CPU state. + */ + current_tb_modified = true; + cpu_restore_state_from_tb(cpu, current_tb, retaddr, true); + cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, + ¤t_flags); + } +#endif /* TARGET_HAS_PRECISE_SMC */ + tb_phys_invalidate__locked(tb); + } + } +#if !defined(CONFIG_USER_ONLY) + /* if no code remaining, no need to continue to use slow writes */ + if (!p->first_tb) { + invalidate_page_bitmap(p); + tlb_unprotect_code(start); + } +#endif +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_modified) { + page_collection_unlock(pages); + /* Force execution of one insn next time. */ + cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); + mmap_unlock(); + cpu_loop_exit_noexc(cpu); + } +#endif +} + +/* + * Invalidate all TBs which intersect with the target physical address range + * [start;end[. NOTE: start and end must refer to the *same* physical page. + * 'is_cpu_write_access' should be true if called from a real cpu write + * access: the virtual CPU will exit the current TB if code is modified inside + * this TB. + * + * Called with mmap_lock held for user-mode emulation + */ +void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end) +{ + struct page_collection *pages; + PageDesc *p; + + assert_memory_lock(); + + p = page_find(start >> TARGET_PAGE_BITS); + if (p == NULL) { + return; + } + pages = page_collection_lock(start, end); + tb_invalidate_phys_page_range__locked(pages, p, start, end, 0); + page_collection_unlock(pages); +} + +/* + * Invalidate all TBs which intersect with the target physical address range + * [start;end[. NOTE: start and end may refer to *different* physical pages. + * 'is_cpu_write_access' should be true if called from a real cpu write + * access: the virtual CPU will exit the current TB if code is modified inside + * this TB. + * + * Called with mmap_lock held for user-mode emulation. + */ +#ifdef CONFIG_SOFTMMU +void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end) +#else +void tb_invalidate_phys_range(target_ulong start, target_ulong end) +#endif +{ + struct page_collection *pages; + tb_page_addr_t next; + + assert_memory_lock(); + + pages = page_collection_lock(start, end); + for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + start < end; + start = next, next += TARGET_PAGE_SIZE) { + PageDesc *pd = page_find(start >> TARGET_PAGE_BITS); + tb_page_addr_t bound = MIN(next, end); + + if (pd == NULL) { + continue; + } + tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0); + } + page_collection_unlock(pages); +} + +#ifdef CONFIG_SOFTMMU +/* len must be <= 8 and start must be a multiple of len. + * Called via softmmu_template.h when code areas are written to with + * iothread mutex not held. + * + * Call with all @pages in the range [@start, @start + len[ locked. + */ +void tb_invalidate_phys_page_fast(struct page_collection *pages, + tb_page_addr_t start, int len, + uintptr_t retaddr) +{ + PageDesc *p; + + assert_memory_lock(); + + p = page_find(start >> TARGET_PAGE_BITS); + if (!p) { + return; + } + + assert_page_locked(p); + if (!p->code_bitmap && + ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { + build_page_bitmap(p); + } + if (p->code_bitmap) { + unsigned int nr; + unsigned long b; + + nr = start & ~TARGET_PAGE_MASK; + b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)); + if (b & ((1 << len) - 1)) { + goto do_invalidate; + } + } else { + do_invalidate: + tb_invalidate_phys_page_range__locked(pages, p, start, start + len, + retaddr); + } +} +#else +/* Called with mmap_lock held. If pc is not 0 then it indicates the + * host PC of the faulting store instruction that caused this invalidate. + * Returns true if the caller needs to abort execution of the current + * TB (because it was modified by this store and the guest CPU has + * precise-SMC semantics). + */ +static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) +{ + TranslationBlock *tb; + PageDesc *p; + int n; +#ifdef TARGET_HAS_PRECISE_SMC + TranslationBlock *current_tb = NULL; + CPUState *cpu = current_cpu; + CPUArchState *env = NULL; + int current_tb_modified = 0; + target_ulong current_pc = 0; + target_ulong current_cs_base = 0; + uint32_t current_flags = 0; +#endif + + assert_memory_lock(); + + addr &= TARGET_PAGE_MASK; + p = page_find(addr >> TARGET_PAGE_BITS); + if (!p) { + return false; + } + +#ifdef TARGET_HAS_PRECISE_SMC + if (p->first_tb && pc != 0) { + current_tb = tcg_tb_lookup(pc); + } + if (cpu != NULL) { + env = cpu->env_ptr; + } +#endif + assert_page_locked(p); + PAGE_FOR_EACH_TB(p, tb, n) { +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb == tb && + (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { + /* If we are modifying the current TB, we must stop + its execution. We could be more precise by checking + that the modification is after the current PC, but it + would require a specialized function to partially + restore the CPU state */ + + current_tb_modified = 1; + cpu_restore_state_from_tb(cpu, current_tb, pc, true); + cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, + ¤t_flags); + } +#endif /* TARGET_HAS_PRECISE_SMC */ + tb_phys_invalidate(tb, addr); + } + p->first_tb = (uintptr_t)NULL; +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_modified) { + /* Force execution of one insn next time. */ + cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); + return true; + } +#endif + + return false; +} +#endif + +/* user-mode: call with mmap_lock held */ +void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr) +{ + TranslationBlock *tb; + + assert_memory_lock(); + + tb = tcg_tb_lookup(retaddr); + if (tb) { + /* We can use retranslation to find the PC. */ + cpu_restore_state_from_tb(cpu, tb, retaddr, true); + tb_phys_invalidate(tb, -1); + } else { + /* The exception probably happened in a helper. The CPU state should + have been saved before calling it. Fetch the PC from there. */ + CPUArchState *env = cpu->env_ptr; + target_ulong pc, cs_base; + tb_page_addr_t addr; + uint32_t flags; + + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); + addr = get_page_addr_code(env, pc); + if (addr != -1) { + tb_invalidate_phys_range(addr, addr + 1); + } + } +} + +#ifndef CONFIG_USER_ONLY +/* + * In deterministic execution mode, instructions doing device I/Os + * must be at the end of the TB. + * + * Called by softmmu_template.h, with iothread mutex not held. + */ +void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) +{ + TranslationBlock *tb; + CPUClass *cc; + uint32_t n; + + tb = tcg_tb_lookup(retaddr); + if (!tb) { + cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", + (void *)retaddr); + } + cpu_restore_state_from_tb(cpu, tb, retaddr, true); + + /* + * Some guests must re-execute the branch when re-executing a delay + * slot instruction. When this is the case, adjust icount and N + * to account for the re-execution of the branch. + */ + n = 1; + cc = CPU_GET_CLASS(cpu); + if (cc->tcg_ops->io_recompile_replay_branch && + cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) { + cpu_neg(cpu)->icount_decr.u16.low++; + n = 2; + } + + /* + * Exit the loop and potentially generate a new TB executing the + * just the I/O insns. We also limit instrumentation to memory + * operations only (which execute after completion) so we don't + * double instrument the instruction. + */ + cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n; + + qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, + "cpu_io_recompile: rewound execution of TB to " + TARGET_FMT_lx "\n", tb->pc); + + cpu_loop_exit_noexc(cpu); +} + +static void print_qht_statistics(struct qht_stats hst, GString *buf) +{ + uint32_t hgram_opts; + size_t hgram_bins; + char *hgram; + + if (!hst.head_buckets) { + return; + } + g_string_append_printf(buf, "TB hash buckets %zu/%zu " + "(%0.2f%% head buckets used)\n", + hst.used_head_buckets, hst.head_buckets, + (double)hst.used_head_buckets / + hst.head_buckets * 100); + + hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; + hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT; + if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) { + hgram_opts |= QDIST_PR_NODECIMAL; + } + hgram = qdist_pr(&hst.occupancy, 10, hgram_opts); + g_string_append_printf(buf, "TB hash occupancy %0.2f%% avg chain occ. " + "Histogram: %s\n", + qdist_avg(&hst.occupancy) * 100, hgram); + g_free(hgram); + + hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; + hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain); + if (hgram_bins > 10) { + hgram_bins = 10; + } else { + hgram_bins = 0; + hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE; + } + hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts); + g_string_append_printf(buf, "TB hash avg chain %0.3f buckets. " + "Histogram: %s\n", + qdist_avg(&hst.chain), hgram); + g_free(hgram); +} + +struct tb_tree_stats { + size_t nb_tbs; + size_t host_size; + size_t target_size; + size_t max_target_size; + size_t direct_jmp_count; + size_t direct_jmp2_count; + size_t cross_page; +}; + +static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data) +{ + const TranslationBlock *tb = value; + struct tb_tree_stats *tst = data; + + tst->nb_tbs++; + tst->host_size += tb->tc.size; + tst->target_size += tb->size; + if (tb->size > tst->max_target_size) { + tst->max_target_size = tb->size; + } + if (tb->page_addr[1] != -1) { + tst->cross_page++; + } + if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { + tst->direct_jmp_count++; + if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { + tst->direct_jmp2_count++; + } + } + return false; +} + +void dump_exec_info(GString *buf) +{ + struct tb_tree_stats tst = {}; + struct qht_stats hst; + size_t nb_tbs, flush_full, flush_part, flush_elide; + + tcg_tb_foreach(tb_tree_stats_iter, &tst); + nb_tbs = tst.nb_tbs; + /* XXX: avoid using doubles ? */ + g_string_append_printf(buf, "Translation buffer state:\n"); + /* + * Report total code size including the padding and TB structs; + * otherwise users might think "-accel tcg,tb-size" is not honoured. + * For avg host size we use the precise numbers from tb_tree_stats though. + */ + g_string_append_printf(buf, "gen code size %zu/%zu\n", + tcg_code_size(), tcg_code_capacity()); + g_string_append_printf(buf, "TB count %zu\n", nb_tbs); + g_string_append_printf(buf, "TB avg target size %zu max=%zu bytes\n", + nb_tbs ? tst.target_size / nb_tbs : 0, + tst.max_target_size); + g_string_append_printf(buf, "TB avg host size %zu bytes " + "(expansion ratio: %0.1f)\n", + nb_tbs ? tst.host_size / nb_tbs : 0, + tst.target_size ? + (double)tst.host_size / tst.target_size : 0); + g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n", + tst.cross_page, + nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0); + g_string_append_printf(buf, "direct jump count %zu (%zu%%) " + "(2 jumps=%zu %zu%%)\n", + tst.direct_jmp_count, + nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0, + tst.direct_jmp2_count, + nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0); + + qht_statistics_init(&tb_ctx.htable, &hst); + print_qht_statistics(hst, buf); + qht_statistics_destroy(&hst); + + g_string_append_printf(buf, "\nStatistics:\n"); + g_string_append_printf(buf, "TB flush count %u\n", + qatomic_read(&tb_ctx.tb_flush_count)); + g_string_append_printf(buf, "TB invalidate count %u\n", + qatomic_read(&tb_ctx.tb_phys_invalidate_count)); + + tlb_flush_counts(&flush_full, &flush_part, &flush_elide); + g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full); + g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part); + g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide); + tcg_dump_info(buf); +} + +void dump_opcount_info(GString *buf) +{ + tcg_dump_op_count(buf); +} + +#else /* CONFIG_USER_ONLY */ + +void cpu_interrupt(CPUState *cpu, int mask) +{ + g_assert(qemu_mutex_iothread_locked()); + cpu->interrupt_request |= mask; + qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); +} + +/* + * Walks guest process memory "regions" one by one + * and calls callback function 'fn' for each region. + */ +struct walk_memory_regions_data { + walk_memory_regions_fn fn; + void *priv; + target_ulong start; + int prot; +}; + +static int walk_memory_regions_end(struct walk_memory_regions_data *data, + target_ulong end, int new_prot) +{ + if (data->start != -1u) { + int rc = data->fn(data->priv, data->start, end, data->prot); + if (rc != 0) { + return rc; + } + } + + data->start = (new_prot ? end : -1u); + data->prot = new_prot; + + return 0; +} + +static int walk_memory_regions_1(struct walk_memory_regions_data *data, + target_ulong base, int level, void **lp) +{ + target_ulong pa; + int i, rc; + + if (*lp == NULL) { + return walk_memory_regions_end(data, base, 0); + } + + if (level == 0) { + PageDesc *pd = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + int prot = pd[i].flags; + + pa = base | (i << TARGET_PAGE_BITS); + if (prot != data->prot) { + rc = walk_memory_regions_end(data, pa, prot); + if (rc != 0) { + return rc; + } + } + } + } else { + void **pp = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + pa = base | ((target_ulong)i << + (TARGET_PAGE_BITS + V_L2_BITS * level)); + rc = walk_memory_regions_1(data, pa, level - 1, pp + i); + if (rc != 0) { + return rc; + } + } + } + + return 0; +} + +int walk_memory_regions(void *priv, walk_memory_regions_fn fn) +{ + struct walk_memory_regions_data data; + uintptr_t i, l1_sz = v_l1_size; + + data.fn = fn; + data.priv = priv; + data.start = -1u; + data.prot = 0; + + for (i = 0; i < l1_sz; i++) { + target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS); + int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i); + if (rc != 0) { + return rc; + } + } + + return walk_memory_regions_end(&data, 0, 0); +} + +static int dump_region(void *priv, target_ulong start, + target_ulong end, unsigned long prot) +{ + FILE *f = (FILE *)priv; + + (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx + " "TARGET_FMT_lx" %c%c%c\n", + start, end, end - start, + ((prot & PAGE_READ) ? 'r' : '-'), + ((prot & PAGE_WRITE) ? 'w' : '-'), + ((prot & PAGE_EXEC) ? 'x' : '-')); + + return 0; +} + +/* dump memory mappings */ +void page_dump(FILE *f) +{ + const int length = sizeof(target_ulong) * 2; + (void) fprintf(f, "%-*s %-*s %-*s %s\n", + length, "start", length, "end", length, "size", "prot"); + walk_memory_regions(f, dump_region); +} + +int page_get_flags(target_ulong address) +{ + PageDesc *p; + + p = page_find(address >> TARGET_PAGE_BITS); + if (!p) { + return 0; + } + return p->flags; +} + +/* Modify the flags of a page and invalidate the code if necessary. + The flag PAGE_WRITE_ORG is positioned automatically depending + on PAGE_WRITE. The mmap_lock should already be held. */ +void page_set_flags(target_ulong start, target_ulong end, int flags) +{ + target_ulong addr, len; + bool reset_target_data; + + /* This function should never be called with addresses outside the + guest address space. If this assert fires, it probably indicates + a missing call to h2g_valid. */ + assert(end - 1 <= GUEST_ADDR_MAX); + assert(start < end); + /* Only set PAGE_ANON with new mappings. */ + assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET)); + assert_memory_lock(); + + start = start & TARGET_PAGE_MASK; + end = TARGET_PAGE_ALIGN(end); + + if (flags & PAGE_WRITE) { + flags |= PAGE_WRITE_ORG; + } + reset_target_data = !(flags & PAGE_VALID) || (flags & PAGE_RESET); + flags &= ~PAGE_RESET; + + for (addr = start, len = end - start; + len != 0; + len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { + PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); + + /* If the write protection bit is set, then we invalidate + the code inside. */ + if (!(p->flags & PAGE_WRITE) && + (flags & PAGE_WRITE) && + p->first_tb) { + tb_invalidate_phys_page(addr, 0); + } + if (reset_target_data) { + g_free(p->target_data); + p->target_data = NULL; + p->flags = flags; + } else { + /* Using mprotect on a page does not change MAP_ANON. */ + p->flags = (p->flags & PAGE_ANON) | flags; + } + } +} + +void *page_get_target_data(target_ulong address) +{ + PageDesc *p = page_find(address >> TARGET_PAGE_BITS); + return p ? p->target_data : NULL; +} + +void *page_alloc_target_data(target_ulong address, size_t size) +{ + PageDesc *p = page_find(address >> TARGET_PAGE_BITS); + void *ret = NULL; + + if (p->flags & PAGE_VALID) { + ret = p->target_data; + if (!ret) { + p->target_data = ret = g_malloc0(size); + } + } + return ret; +} + +int page_check_range(target_ulong start, target_ulong len, int flags) +{ + PageDesc *p; + target_ulong end; + target_ulong addr; + + /* This function should never be called with addresses outside the + guest address space. If this assert fires, it probably indicates + a missing call to h2g_valid. */ + if (TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS) { + assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); + } + + if (len == 0) { + return 0; + } + if (start + len - 1 < start) { + /* We've wrapped around. */ + return -1; + } + + /* must do before we loose bits in the next step */ + end = TARGET_PAGE_ALIGN(start + len); + start = start & TARGET_PAGE_MASK; + + for (addr = start, len = end - start; + len != 0; + len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { + p = page_find(addr >> TARGET_PAGE_BITS); + if (!p) { + return -1; + } + if (!(p->flags & PAGE_VALID)) { + return -1; + } + + if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) { + return -1; + } + if (flags & PAGE_WRITE) { + if (!(p->flags & PAGE_WRITE_ORG)) { + return -1; + } + /* unprotect the page if it was put read-only because it + contains translated code */ + if (!(p->flags & PAGE_WRITE)) { + if (!page_unprotect(addr, 0)) { + return -1; + } + } + } + } + return 0; +} + +void page_protect(tb_page_addr_t page_addr) +{ + target_ulong addr; + PageDesc *p; + int prot; + + p = page_find(page_addr >> TARGET_PAGE_BITS); + if (p && (p->flags & PAGE_WRITE)) { + /* + * Force the host page as non writable (writes will have a page fault + + * mprotect overhead). + */ + page_addr &= qemu_host_page_mask; + prot = 0; + for (addr = page_addr; addr < page_addr + qemu_host_page_size; + addr += TARGET_PAGE_SIZE) { + + p = page_find(addr >> TARGET_PAGE_BITS); + if (!p) { + continue; + } + prot |= p->flags; + p->flags &= ~PAGE_WRITE; + } + mprotect(g2h_untagged(page_addr), qemu_host_page_size, + (prot & PAGE_BITS) & ~PAGE_WRITE); + if (DEBUG_TB_INVALIDATE_GATE) { + printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr); + } + } +} + +/* called from signal handler: invalidate the code and unprotect the + * page. Return 0 if the fault was not handled, 1 if it was handled, + * and 2 if it was handled but the caller must cause the TB to be + * immediately exited. (We can only return 2 if the 'pc' argument is + * non-zero.) + */ +int page_unprotect(target_ulong address, uintptr_t pc) +{ + unsigned int prot; + bool current_tb_invalidated; + PageDesc *p; + target_ulong host_start, host_end, addr; + + /* Technically this isn't safe inside a signal handler. However we + know this only ever happens in a synchronous SEGV handler, so in + practice it seems to be ok. */ + mmap_lock(); + + p = page_find(address >> TARGET_PAGE_BITS); + if (!p) { + mmap_unlock(); + return 0; + } + + /* if the page was really writable, then we change its + protection back to writable */ + if (p->flags & PAGE_WRITE_ORG) { + current_tb_invalidated = false; + if (p->flags & PAGE_WRITE) { + /* If the page is actually marked WRITE then assume this is because + * this thread raced with another one which got here first and + * set the page to PAGE_WRITE and did the TB invalidate for us. + */ +#ifdef TARGET_HAS_PRECISE_SMC + TranslationBlock *current_tb = tcg_tb_lookup(pc); + if (current_tb) { + current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID; + } +#endif + } else { + host_start = address & qemu_host_page_mask; + host_end = host_start + qemu_host_page_size; + + prot = 0; + for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) { + p = page_find(addr >> TARGET_PAGE_BITS); + p->flags |= PAGE_WRITE; + prot |= p->flags; + + /* and since the content will be modified, we must invalidate + the corresponding translated code. */ + current_tb_invalidated |= tb_invalidate_phys_page(addr, pc); +#ifdef CONFIG_USER_ONLY + if (DEBUG_TB_CHECK_GATE) { + tb_invalidate_check(addr); + } +#endif + } + mprotect((void *)g2h_untagged(host_start), qemu_host_page_size, + prot & PAGE_BITS); + } + mmap_unlock(); + /* If current TB was invalidated return to main loop */ + return current_tb_invalidated ? 2 : 1; + } + mmap_unlock(); + return 0; +} +#endif /* CONFIG_USER_ONLY */ + +/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */ +void tcg_flush_softmmu_tlb(CPUState *cs) +{ +#ifdef CONFIG_SOFTMMU + tlb_flush(cs); +#endif +} diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c new file mode 100644 index 000000000..f06c31426 --- /dev/null +++ b/accel/tcg/translator.c @@ -0,0 +1,178 @@ +/* + * Generic intermediate code generation. + * + * Copyright (C) 2016-2017 Lluís Vilanova <vilanova@ac.upc.edu> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "tcg/tcg.h" +#include "tcg/tcg-op.h" +#include "exec/exec-all.h" +#include "exec/gen-icount.h" +#include "exec/log.h" +#include "exec/translator.h" +#include "exec/plugin-gen.h" +#include "sysemu/replay.h" + +/* Pairs with tcg_clear_temp_count. + To be called by #TranslatorOps.{translate_insn,tb_stop} if + (1) the target is sufficiently clean to support reporting, + (2) as and when all temporaries are known to be consumed. + For most targets, (2) is at the end of translate_insn. */ +void translator_loop_temp_check(DisasContextBase *db) +{ + if (tcg_check_temp_count()) { + qemu_log("warning: TCG temporary leaks before " + TARGET_FMT_lx "\n", db->pc_next); + } +} + +bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest) +{ + /* Suppress goto_tb if requested. */ + if (tb_cflags(db->tb) & CF_NO_GOTO_TB) { + return false; + } + + /* Check for the dest on the same page as the start of the TB. */ + return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0; +} + +static inline void translator_page_protect(DisasContextBase *dcbase, + target_ulong pc) +{ +#ifdef CONFIG_USER_ONLY + dcbase->page_protect_end = pc | ~TARGET_PAGE_MASK; + page_protect(pc); +#endif +} + +void translator_loop(const TranslatorOps *ops, DisasContextBase *db, + CPUState *cpu, TranslationBlock *tb, int max_insns) +{ + uint32_t cflags = tb_cflags(tb); + bool plugin_enabled; + + /* Initialize DisasContext */ + db->tb = tb; + db->pc_first = tb->pc; + db->pc_next = db->pc_first; + db->is_jmp = DISAS_NEXT; + db->num_insns = 0; + db->max_insns = max_insns; + db->singlestep_enabled = cflags & CF_SINGLE_STEP; + translator_page_protect(db, db->pc_next); + + ops->init_disas_context(db, cpu); + tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ + + /* Reset the temp count so that we can identify leaks */ + tcg_clear_temp_count(); + + /* Start translating. */ + gen_tb_start(db->tb); + ops->tb_start(db, cpu); + tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ + + plugin_enabled = plugin_gen_tb_start(cpu, tb, cflags & CF_MEMI_ONLY); + + while (true) { + db->num_insns++; + ops->insn_start(db, cpu); + tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ + + if (plugin_enabled) { + plugin_gen_insn_start(cpu, db); + } + + /* Disassemble one instruction. The translate_insn hook should + update db->pc_next and db->is_jmp to indicate what should be + done next -- either exiting this loop or locate the start of + the next instruction. */ + if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) { + /* Accept I/O on the last instruction. */ + gen_io_start(); + ops->translate_insn(db, cpu); + } else { + /* we should only see CF_MEMI_ONLY for io_recompile */ + tcg_debug_assert(!(cflags & CF_MEMI_ONLY)); + ops->translate_insn(db, cpu); + } + + /* Stop translation if translate_insn so indicated. */ + if (db->is_jmp != DISAS_NEXT) { + break; + } + + /* + * We can't instrument after instructions that change control + * flow although this only really affects post-load operations. + */ + if (plugin_enabled) { + plugin_gen_insn_end(); + } + + /* Stop translation if the output buffer is full, + or we have executed all of the allowed instructions. */ + if (tcg_op_buf_full() || db->num_insns >= db->max_insns) { + db->is_jmp = DISAS_TOO_MANY; + break; + } + } + + /* Emit code to exit the TB, as indicated by db->is_jmp. */ + ops->tb_stop(db, cpu); + gen_tb_end(db->tb, db->num_insns); + + if (plugin_enabled) { + plugin_gen_tb_end(cpu); + } + + /* The disas_log hook may use these values rather than recompute. */ + tb->size = db->pc_next - db->pc_first; + tb->icount = db->num_insns; + +#ifdef DEBUG_DISAS + if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) + && qemu_log_in_addr_range(db->pc_first)) { + FILE *logfile = qemu_log_lock(); + qemu_log("----------------\n"); + ops->disas_log(db, cpu); + qemu_log("\n"); + qemu_log_unlock(logfile); + } +#endif +} + +static inline void translator_maybe_page_protect(DisasContextBase *dcbase, + target_ulong pc, size_t len) +{ +#ifdef CONFIG_USER_ONLY + target_ulong end = pc + len - 1; + + if (end > dcbase->page_protect_end) { + translator_page_protect(dcbase, end); + } +#endif +} + +#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \ + type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \ + abi_ptr pc, bool do_swap) \ + { \ + translator_maybe_page_protect(dcbase, pc, sizeof(type)); \ + type ret = load_fn(env, pc); \ + if (do_swap) { \ + ret = swap_fn(ret); \ + } \ + plugin_insn_append(pc, &ret, sizeof(ret)); \ + return ret; \ + } + +FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD) + +#undef GEN_TRANSLATOR_LD diff --git a/accel/tcg/user-exec-stub.c b/accel/tcg/user-exec-stub.c new file mode 100644 index 000000000..968cd3ca6 --- /dev/null +++ b/accel/tcg/user-exec-stub.c @@ -0,0 +1,39 @@ +#include "qemu/osdep.h" +#include "hw/core/cpu.h" +#include "sysemu/replay.h" + +bool enable_cpu_pm = false; + +void cpu_resume(CPUState *cpu) +{ +} + +void cpu_remove_sync(CPUState *cpu) +{ +} + +void qemu_init_vcpu(CPUState *cpu) +{ +} + +/* User mode emulation does not support record/replay yet. */ + +bool replay_exception(void) +{ + return true; +} + +bool replay_has_exception(void) +{ + return false; +} + +bool replay_interrupt(void) +{ + return true; +} + +bool replay_has_interrupt(void) +{ + return false; +} diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c new file mode 100644 index 000000000..1528a21fa --- /dev/null +++ b/accel/tcg/user-exec.c @@ -0,0 +1,542 @@ +/* + * User emulator execution + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#include "qemu/osdep.h" +#include "hw/core/tcg-cpu-ops.h" +#include "disas/disas.h" +#include "exec/exec-all.h" +#include "tcg/tcg.h" +#include "qemu/bitops.h" +#include "exec/cpu_ldst.h" +#include "exec/translate-all.h" +#include "exec/helper-proto.h" +#include "qemu/atomic128.h" +#include "trace/trace-root.h" +#include "tcg/tcg-ldst.h" +#include "internal.h" + +__thread uintptr_t helper_retaddr; + +//#define DEBUG_SIGNAL + +/* + * Adjust the pc to pass to cpu_restore_state; return the memop type. + */ +MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write) +{ + switch (helper_retaddr) { + default: + /* + * Fault during host memory operation within a helper function. + * The helper's host return address, saved here, gives us a + * pointer into the generated code that will unwind to the + * correct guest pc. + */ + *pc = helper_retaddr; + break; + + case 0: + /* + * Fault during host memory operation within generated code. + * (Or, a unrelated bug within qemu, but we can't tell from here). + * + * We take the host pc from the signal frame. However, we cannot + * use that value directly. Within cpu_restore_state_from_tb, we + * assume PC comes from GETPC(), as used by the helper functions, + * so we adjust the address by -GETPC_ADJ to form an address that + * is within the call insn, so that the address does not accidentally + * match the beginning of the next guest insn. However, when the + * pc comes from the signal frame it points to the actual faulting + * host memory insn and not the return from a call insn. + * + * Therefore, adjust to compensate for what will be done later + * by cpu_restore_state_from_tb. + */ + *pc += GETPC_ADJ; + break; + + case 1: + /* + * Fault during host read for translation, or loosely, "execution". + * + * The guest pc is already pointing to the start of the TB for which + * code is being generated. If the guest translator manages the + * page crossings correctly, this is exactly the correct address + * (and if the translator doesn't handle page boundaries correctly + * there's little we can do about that here). Therefore, do not + * trigger the unwinder. + * + * Like tb_gen_code, release the memory lock before cpu_loop_exit. + */ + mmap_unlock(); + *pc = 0; + return MMU_INST_FETCH; + } + + return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; +} + +/** + * handle_sigsegv_accerr_write: + * @cpu: the cpu context + * @old_set: the sigset_t from the signal ucontext_t + * @host_pc: the host pc, adjusted for the signal + * @guest_addr: the guest address of the fault + * + * Return true if the write fault has been handled, and should be re-tried. + * + * Note that it is important that we don't call page_unprotect() unless + * this is really a "write to nonwriteable page" fault, because + * page_unprotect() assumes that if it is called for an access to + * a page that's writeable this means we had two threads racing and + * another thread got there first and already made the page writeable; + * so we will retry the access. If we were to call page_unprotect() + * for some other kind of fault that should really be passed to the + * guest, we'd end up in an infinite loop of retrying the faulting access. + */ +bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, + uintptr_t host_pc, abi_ptr guest_addr) +{ + switch (page_unprotect(guest_addr, host_pc)) { + case 0: + /* + * Fault not caused by a page marked unwritable to protect + * cached translations, must be the guest binary's problem. + */ + return false; + case 1: + /* + * Fault caused by protection of cached translation; TBs + * invalidated, so resume execution. + */ + return true; + case 2: + /* + * Fault caused by protection of cached translation, and the + * currently executing TB was modified and must be exited immediately. + */ + sigprocmask(SIG_SETMASK, old_set, NULL); + cpu_loop_exit_noexc(cpu); + /* NORETURN */ + default: + g_assert_not_reached(); + } +} + +static int probe_access_internal(CPUArchState *env, target_ulong addr, + int fault_size, MMUAccessType access_type, + bool nonfault, uintptr_t ra) +{ + int acc_flag; + bool maperr; + + switch (access_type) { + case MMU_DATA_STORE: + acc_flag = PAGE_WRITE_ORG; + break; + case MMU_DATA_LOAD: + acc_flag = PAGE_READ; + break; + case MMU_INST_FETCH: + acc_flag = PAGE_EXEC; + break; + default: + g_assert_not_reached(); + } + + if (guest_addr_valid_untagged(addr)) { + int page_flags = page_get_flags(addr); + if (page_flags & acc_flag) { + return 0; /* success */ + } + maperr = !(page_flags & PAGE_VALID); + } else { + maperr = true; + } + + if (nonfault) { + return TLB_INVALID_MASK; + } + + cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra); +} + +int probe_access_flags(CPUArchState *env, target_ulong addr, + MMUAccessType access_type, int mmu_idx, + bool nonfault, void **phost, uintptr_t ra) +{ + int flags; + + flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra); + *phost = flags ? NULL : g2h(env_cpu(env), addr); + return flags; +} + +void *probe_access(CPUArchState *env, target_ulong addr, int size, + MMUAccessType access_type, int mmu_idx, uintptr_t ra) +{ + int flags; + + g_assert(-(addr | TARGET_PAGE_MASK) >= size); + flags = probe_access_internal(env, addr, size, access_type, false, ra); + g_assert(flags == 0); + + return size ? g2h(env_cpu(env), addr) : NULL; +} + +/* The softmmu versions of these helpers are in cputlb.c. */ + +/* + * Verify that we have passed the correct MemOp to the correct function. + * + * We could present one function to target code, and dispatch based on + * the MemOp, but so far we have worked hard to avoid an indirect function + * call along the memory path. + */ +static void validate_memop(MemOpIdx oi, MemOp expected) +{ +#ifdef CONFIG_DEBUG_TCG + MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); + assert(have == expected); +#endif +} + +void helper_unaligned_ld(CPUArchState *env, target_ulong addr) +{ + cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC()); +} + +void helper_unaligned_st(CPUArchState *env, target_ulong addr) +{ + cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC()); +} + +static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t ra, MMUAccessType type) +{ + MemOp mop = get_memop(oi); + int a_bits = get_alignment_bits(mop); + void *ret; + + /* Enforce guest required alignment. */ + if (unlikely(addr & ((1 << a_bits) - 1))) { + cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra); + } + + ret = g2h(env_cpu(env), addr); + set_helper_retaddr(ra); + return ret; +} + +uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + uint8_t ret; + + validate_memop(oi, MO_UB); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldub_p(haddr); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return ret; +} + +uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + uint16_t ret; + + validate_memop(oi, MO_BEUW); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = lduw_be_p(haddr); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return ret; +} + +uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + uint32_t ret; + + validate_memop(oi, MO_BEUL); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldl_be_p(haddr); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return ret; +} + +uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + uint64_t ret; + + validate_memop(oi, MO_BEQ); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldq_be_p(haddr); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return ret; +} + +uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + uint16_t ret; + + validate_memop(oi, MO_LEUW); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = lduw_le_p(haddr); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return ret; +} + +uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + uint32_t ret; + + validate_memop(oi, MO_LEUL); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldl_le_p(haddr); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return ret; +} + +uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + uint64_t ret; + + validate_memop(oi, MO_LEQ); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldq_le_p(haddr); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return ret; +} + +void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + + validate_memop(oi, MO_UB); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stb_p(haddr, val); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + + validate_memop(oi, MO_BEUW); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stw_be_p(haddr, val); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + + validate_memop(oi, MO_BEUL); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stl_be_p(haddr, val); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + + validate_memop(oi, MO_BEQ); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stq_be_p(haddr, val); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + + validate_memop(oi, MO_LEUW); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stw_le_p(haddr, val); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + + validate_memop(oi, MO_LEUL); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stl_le_p(haddr, val); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + + validate_memop(oi, MO_LEQ); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stq_le_p(haddr, val); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) +{ + uint32_t ret; + + set_helper_retaddr(1); + ret = ldub_p(g2h_untagged(ptr)); + clear_helper_retaddr(); + return ret; +} + +uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr) +{ + uint32_t ret; + + set_helper_retaddr(1); + ret = lduw_p(g2h_untagged(ptr)); + clear_helper_retaddr(); + return ret; +} + +uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr) +{ + uint32_t ret; + + set_helper_retaddr(1); + ret = ldl_p(g2h_untagged(ptr)); + clear_helper_retaddr(); + return ret; +} + +uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr) +{ + uint64_t ret; + + set_helper_retaddr(1); + ret = ldq_p(g2h_untagged(ptr)); + clear_helper_retaddr(); + return ret; +} + +#include "ldst_common.c.inc" + +/* + * Do not allow unaligned operations to proceed. Return the host address. + * + * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. + */ +static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, + MemOpIdx oi, int size, int prot, + uintptr_t retaddr) +{ + MemOp mop = get_memop(oi); + int a_bits = get_alignment_bits(mop); + void *ret; + + /* Enforce guest required alignment. */ + if (unlikely(addr & ((1 << a_bits) - 1))) { + MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE; + cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr); + } + + /* Enforce qemu required alignment. */ + if (unlikely(addr & (size - 1))) { + cpu_loop_exit_atomic(env_cpu(env), retaddr); + } + + ret = g2h(env_cpu(env), addr); + set_helper_retaddr(retaddr); + return ret; +} + +#include "atomic_common.c.inc" + +/* + * First set of functions passes in OI and RETADDR. + * This makes them callable from other helpers. + */ + +#define ATOMIC_NAME(X) \ + glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) +#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) +#define ATOMIC_MMU_IDX MMU_USER_IDX + +#define DATA_SIZE 1 +#include "atomic_template.h" + +#define DATA_SIZE 2 +#include "atomic_template.h" + +#define DATA_SIZE 4 +#include "atomic_template.h" + +#ifdef CONFIG_ATOMIC64 +#define DATA_SIZE 8 +#include "atomic_template.h" +#endif + +#if HAVE_ATOMIC128 || HAVE_CMPXCHG128 +#define DATA_SIZE 16 +#include "atomic_template.h" +#endif |