aboutsummaryrefslogtreecommitdiffstats
path: root/include/exec
diff options
context:
space:
mode:
authorTimos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>2023-10-10 11:40:56 +0000
committerTimos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>2023-10-10 11:40:56 +0000
commite02cda008591317b1625707ff8e115a4841aa889 (patch)
treeaee302e3cf8b59ec2d32ec481be3d1afddfc8968 /include/exec
parentcc668e6b7e0ffd8c9d130513d12053cf5eda1d3b (diff)
Introduce Virtio-loopback epsilon release:
Epsilon release introduces a new compatibility layer which make virtio-loopback design to work with QEMU and rust-vmm vhost-user backend without require any changes. Signed-off-by: Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com> Change-Id: I52e57563e08a7d0bdc002f8e928ee61ba0c53dd9
Diffstat (limited to 'include/exec')
-rw-r--r--include/exec/address-spaces.h41
-rw-r--r--include/exec/confidential-guest-support.h62
-rw-r--r--include/exec/cpu-all.h520
-rw-r--r--include/exec/cpu-common.h113
-rw-r--r--include/exec/cpu-defs.h245
-rw-r--r--include/exec/cpu_ldst.h476
-rw-r--r--include/exec/cputlb.h31
-rw-r--r--include/exec/exec-all.h763
-rw-r--r--include/exec/gdbstub.h191
-rw-r--r--include/exec/gen-icount.h94
-rw-r--r--include/exec/helper-gen.h97
-rw-r--r--include/exec/helper-head.h137
-rw-r--r--include/exec/helper-proto.h56
-rw-r--r--include/exec/helper-tcg.h76
-rw-r--r--include/exec/hwaddr.h26
-rw-r--r--include/exec/ioport.h75
-rw-r--r--include/exec/log.h84
-rw-r--r--include/exec/memattrs.h71
-rw-r--r--include/exec/memop.h138
-rw-r--r--include/exec/memopidx.h55
-rw-r--r--include/exec/memory-internal.h53
-rw-r--r--include/exec/memory.h2986
-rw-r--r--include/exec/memory_ldst.h.inc71
-rw-r--r--include/exec/memory_ldst_cached.h.inc111
-rw-r--r--include/exec/memory_ldst_phys.h.inc147
-rw-r--r--include/exec/page-vary.h34
-rw-r--r--include/exec/plugin-gen.h79
-rw-r--r--include/exec/poison.h96
-rw-r--r--include/exec/ram_addr.h523
-rw-r--r--include/exec/ramblock.h74
-rw-r--r--include/exec/ramlist.h85
-rw-r--r--include/exec/softmmu-semi.h101
-rw-r--r--include/exec/target_page.h21
-rw-r--r--include/exec/translate-all.h40
-rw-r--r--include/exec/translator.h190
-rw-r--r--include/exec/user/abitypes.h71
-rw-r--r--include/exec/user/thunk.h203
37 files changed, 8236 insertions, 0 deletions
diff --git a/include/exec/address-spaces.h b/include/exec/address-spaces.h
new file mode 100644
index 000000000..db8bfa9a9
--- /dev/null
+++ b/include/exec/address-spaces.h
@@ -0,0 +1,41 @@
+/*
+ * Internal memory management interfaces
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef EXEC_ADDRESS_SPACES_H
+#define EXEC_ADDRESS_SPACES_H
+
+/*
+ * Internal interfaces between memory.c/exec.c/vl.c. Do not #include unless
+ * you're one of them.
+ */
+
+#include "exec/memory.h"
+
+#ifndef CONFIG_USER_ONLY
+
+/* Get the root memory region. This interface should only be used temporarily
+ * until a proper bus interface is available.
+ */
+MemoryRegion *get_system_memory(void);
+
+/* Get the root I/O port region. This interface should only be used
+ * temporarily until a proper bus interface is available.
+ */
+MemoryRegion *get_system_io(void);
+
+extern AddressSpace address_space_memory;
+extern AddressSpace address_space_io;
+
+#endif
+
+#endif
diff --git a/include/exec/confidential-guest-support.h b/include/exec/confidential-guest-support.h
new file mode 100644
index 000000000..ba2dd4b5d
--- /dev/null
+++ b/include/exec/confidential-guest-support.h
@@ -0,0 +1,62 @@
+/*
+ * QEMU Confidential Guest support
+ * This interface describes the common pieces between various
+ * schemes for protecting guest memory or other state against a
+ * compromised hypervisor. This includes memory encryption (AMD's
+ * SEV and Intel's MKTME) or special protection modes (PEF on POWER,
+ * or PV on s390x).
+ *
+ * Copyright Red Hat.
+ *
+ * Authors:
+ * David Gibson <david@gibson.dropbear.id.au>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ *
+ */
+#ifndef QEMU_CONFIDENTIAL_GUEST_SUPPORT_H
+#define QEMU_CONFIDENTIAL_GUEST_SUPPORT_H
+
+#ifndef CONFIG_USER_ONLY
+
+#include "qom/object.h"
+
+#define TYPE_CONFIDENTIAL_GUEST_SUPPORT "confidential-guest-support"
+OBJECT_DECLARE_SIMPLE_TYPE(ConfidentialGuestSupport, CONFIDENTIAL_GUEST_SUPPORT)
+
+struct ConfidentialGuestSupport {
+ Object parent;
+
+ /*
+ * ready: flag set by CGS initialization code once it's ready to
+ * start executing instructions in a potentially-secure
+ * guest
+ *
+ * The definition here is a bit fuzzy, because this is essentially
+ * part of a self-sanity-check, rather than a strict mechanism.
+ *
+ * It's not feasible to have a single point in the common machine
+ * init path to configure confidential guest support, because
+ * different mechanisms have different interdependencies requiring
+ * initialization in different places, often in arch or machine
+ * type specific code. It's also usually not possible to check
+ * for invalid configurations until that initialization code.
+ * That means it would be very easy to have a bug allowing CGS
+ * init to be bypassed entirely in certain configurations.
+ *
+ * Silently ignoring a requested security feature would be bad, so
+ * to avoid that we check late in init that this 'ready' flag is
+ * set if CGS was requested. If the CGS init hasn't happened, and
+ * so 'ready' is not set, we'll abort.
+ */
+ bool ready;
+};
+
+typedef struct ConfidentialGuestSupportClass {
+ ObjectClass parent;
+} ConfidentialGuestSupportClass;
+
+#endif /* !CONFIG_USER_ONLY */
+
+#endif /* QEMU_CONFIDENTIAL_GUEST_SUPPORT_H */
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
new file mode 100644
index 000000000..3c8e24292
--- /dev/null
+++ b/include/exec/cpu-all.h
@@ -0,0 +1,520 @@
+/*
+ * defines common to all virtual CPUs
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef CPU_ALL_H
+#define CPU_ALL_H
+
+#include "exec/cpu-common.h"
+#include "exec/memory.h"
+#include "qemu/thread.h"
+#include "hw/core/cpu.h"
+#include "qemu/rcu.h"
+
+#define EXCP_INTERRUPT 0x10000 /* async interruption */
+#define EXCP_HLT 0x10001 /* hlt instruction reached */
+#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
+#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
+#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
+#define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */
+
+/* some important defines:
+ *
+ * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
+ * otherwise little endian.
+ *
+ * TARGET_WORDS_BIGENDIAN : same for target cpu
+ */
+
+#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
+#define BSWAP_NEEDED
+#endif
+
+#ifdef BSWAP_NEEDED
+
+static inline uint16_t tswap16(uint16_t s)
+{
+ return bswap16(s);
+}
+
+static inline uint32_t tswap32(uint32_t s)
+{
+ return bswap32(s);
+}
+
+static inline uint64_t tswap64(uint64_t s)
+{
+ return bswap64(s);
+}
+
+static inline void tswap16s(uint16_t *s)
+{
+ *s = bswap16(*s);
+}
+
+static inline void tswap32s(uint32_t *s)
+{
+ *s = bswap32(*s);
+}
+
+static inline void tswap64s(uint64_t *s)
+{
+ *s = bswap64(*s);
+}
+
+#else
+
+static inline uint16_t tswap16(uint16_t s)
+{
+ return s;
+}
+
+static inline uint32_t tswap32(uint32_t s)
+{
+ return s;
+}
+
+static inline uint64_t tswap64(uint64_t s)
+{
+ return s;
+}
+
+static inline void tswap16s(uint16_t *s)
+{
+}
+
+static inline void tswap32s(uint32_t *s)
+{
+}
+
+static inline void tswap64s(uint64_t *s)
+{
+}
+
+#endif
+
+#if TARGET_LONG_SIZE == 4
+#define tswapl(s) tswap32(s)
+#define tswapls(s) tswap32s((uint32_t *)(s))
+#define bswaptls(s) bswap32s(s)
+#else
+#define tswapl(s) tswap64(s)
+#define tswapls(s) tswap64s((uint64_t *)(s))
+#define bswaptls(s) bswap64s(s)
+#endif
+
+/* Target-endianness CPU memory access functions. These fit into the
+ * {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h.
+ */
+#if defined(TARGET_WORDS_BIGENDIAN)
+#define lduw_p(p) lduw_be_p(p)
+#define ldsw_p(p) ldsw_be_p(p)
+#define ldl_p(p) ldl_be_p(p)
+#define ldq_p(p) ldq_be_p(p)
+#define stw_p(p, v) stw_be_p(p, v)
+#define stl_p(p, v) stl_be_p(p, v)
+#define stq_p(p, v) stq_be_p(p, v)
+#define ldn_p(p, sz) ldn_be_p(p, sz)
+#define stn_p(p, sz, v) stn_be_p(p, sz, v)
+#else
+#define lduw_p(p) lduw_le_p(p)
+#define ldsw_p(p) ldsw_le_p(p)
+#define ldl_p(p) ldl_le_p(p)
+#define ldq_p(p) ldq_le_p(p)
+#define stw_p(p, v) stw_le_p(p, v)
+#define stl_p(p, v) stl_le_p(p, v)
+#define stq_p(p, v) stq_le_p(p, v)
+#define ldn_p(p, sz) ldn_le_p(p, sz)
+#define stn_p(p, sz, v) stn_le_p(p, sz, v)
+#endif
+
+/* MMU memory access macros */
+
+#if defined(CONFIG_USER_ONLY)
+#include "exec/user/abitypes.h"
+
+/* On some host systems the guest address space is reserved on the host.
+ * This allows the guest address space to be offset to a convenient location.
+ */
+extern uintptr_t guest_base;
+extern bool have_guest_base;
+extern unsigned long reserved_va;
+
+/*
+ * Limit the guest addresses as best we can.
+ *
+ * When not using -R reserved_va, we cannot really limit the guest
+ * to less address space than the host. For 32-bit guests, this
+ * acts as a sanity check that we're not giving the guest an address
+ * that it cannot even represent. For 64-bit guests... the address
+ * might not be what the real kernel would give, but it is at least
+ * representable in the guest.
+ *
+ * TODO: Improve address allocation to avoid this problem, and to
+ * avoid setting bits at the top of guest addresses that might need
+ * to be used for tags.
+ */
+#define GUEST_ADDR_MAX_ \
+ ((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \
+ UINT32_MAX : ~0ul)
+#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : GUEST_ADDR_MAX_)
+
+#else
+
+#include "exec/hwaddr.h"
+
+#define SUFFIX
+#define ARG1 as
+#define ARG1_DECL AddressSpace *as
+#define TARGET_ENDIANNESS
+#include "exec/memory_ldst.h.inc"
+
+#define SUFFIX _cached_slow
+#define ARG1 cache
+#define ARG1_DECL MemoryRegionCache *cache
+#define TARGET_ENDIANNESS
+#include "exec/memory_ldst.h.inc"
+
+static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
+{
+ address_space_stl_notdirty(as, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+#define SUFFIX
+#define ARG1 as
+#define ARG1_DECL AddressSpace *as
+#define TARGET_ENDIANNESS
+#include "exec/memory_ldst_phys.h.inc"
+
+/* Inline fast path for direct RAM access. */
+#define ENDIANNESS
+#include "exec/memory_ldst_cached.h.inc"
+
+#define SUFFIX _cached
+#define ARG1 cache
+#define ARG1_DECL MemoryRegionCache *cache
+#define TARGET_ENDIANNESS
+#include "exec/memory_ldst_phys.h.inc"
+#endif
+
+/* page related stuff */
+
+#ifdef TARGET_PAGE_BITS_VARY
+# include "exec/page-vary.h"
+extern const TargetPageBits target_page;
+#ifdef CONFIG_DEBUG_TCG
+#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; })
+#define TARGET_PAGE_MASK ({ assert(target_page.decided); \
+ (target_long)target_page.mask; })
+#else
+#define TARGET_PAGE_BITS target_page.bits
+#define TARGET_PAGE_MASK ((target_long)target_page.mask)
+#endif
+#define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK)
+#else
+#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
+#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
+#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
+#endif
+
+#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
+
+/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
+ * when intptr_t is 32-bit and we are aligning a long long.
+ */
+extern uintptr_t qemu_host_page_size;
+extern intptr_t qemu_host_page_mask;
+
+#define HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_host_page_size)
+#define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size)
+
+/* same as PROT_xxx */
+#define PAGE_READ 0x0001
+#define PAGE_WRITE 0x0002
+#define PAGE_EXEC 0x0004
+#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
+#define PAGE_VALID 0x0008
+/*
+ * Original state of the write flag (used when tracking self-modifying code)
+ */
+#define PAGE_WRITE_ORG 0x0010
+/*
+ * Invalidate the TLB entry immediately, helpful for s390x
+ * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs()
+ */
+#define PAGE_WRITE_INV 0x0020
+/* For use with page_set_flags: page is being replaced; target_data cleared. */
+#define PAGE_RESET 0x0040
+/* For linux-user, indicates that the page is MAP_ANON. */
+#define PAGE_ANON 0x0080
+
+#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
+/* FIXME: Code that sets/uses this is broken and needs to go away. */
+#define PAGE_RESERVED 0x0100
+#endif
+/* Target-specific bits that will be used via page_get_flags(). */
+#define PAGE_TARGET_1 0x0200
+#define PAGE_TARGET_2 0x0400
+
+#if defined(CONFIG_USER_ONLY)
+void page_dump(FILE *f);
+
+typedef int (*walk_memory_regions_fn)(void *, target_ulong,
+ target_ulong, unsigned long);
+int walk_memory_regions(void *, walk_memory_regions_fn);
+
+int page_get_flags(target_ulong address);
+void page_set_flags(target_ulong start, target_ulong end, int flags);
+int page_check_range(target_ulong start, target_ulong len, int flags);
+
+/**
+ * page_alloc_target_data(address, size)
+ * @address: guest virtual address
+ * @size: size of data to allocate
+ *
+ * Allocate @size bytes of out-of-band data to associate with the
+ * guest page at @address. If the page is not mapped, NULL will
+ * be returned. If there is existing data associated with @address,
+ * no new memory will be allocated.
+ *
+ * The memory will be freed when the guest page is deallocated,
+ * e.g. with the munmap system call.
+ */
+void *page_alloc_target_data(target_ulong address, size_t size);
+
+/**
+ * page_get_target_data(address)
+ * @address: guest virtual address
+ *
+ * Return any out-of-bound memory assocated with the guest page
+ * at @address, as per page_alloc_target_data.
+ */
+void *page_get_target_data(target_ulong address);
+#endif
+
+CPUArchState *cpu_copy(CPUArchState *env);
+
+/* Flags for use in ENV->INTERRUPT_PENDING.
+
+ The numbers assigned here are non-sequential in order to preserve
+ binary compatibility with the vmstate dump. Bit 0 (0x0001) was
+ previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
+ the vmstate dump. */
+
+/* External hardware interrupt pending. This is typically used for
+ interrupts from devices. */
+#define CPU_INTERRUPT_HARD 0x0002
+
+/* Exit the current TB. This is typically used when some system-level device
+ makes some change to the memory mapping. E.g. the a20 line change. */
+#define CPU_INTERRUPT_EXITTB 0x0004
+
+/* Halt the CPU. */
+#define CPU_INTERRUPT_HALT 0x0020
+
+/* Debug event pending. */
+#define CPU_INTERRUPT_DEBUG 0x0080
+
+/* Reset signal. */
+#define CPU_INTERRUPT_RESET 0x0400
+
+/* Several target-specific external hardware interrupts. Each target/cpu.h
+ should define proper names based on these defines. */
+#define CPU_INTERRUPT_TGT_EXT_0 0x0008
+#define CPU_INTERRUPT_TGT_EXT_1 0x0010
+#define CPU_INTERRUPT_TGT_EXT_2 0x0040
+#define CPU_INTERRUPT_TGT_EXT_3 0x0200
+#define CPU_INTERRUPT_TGT_EXT_4 0x1000
+
+/* Several target-specific internal interrupts. These differ from the
+ preceding target-specific interrupts in that they are intended to
+ originate from within the cpu itself, typically in response to some
+ instruction being executed. These, therefore, are not masked while
+ single-stepping within the debugger. */
+#define CPU_INTERRUPT_TGT_INT_0 0x0100
+#define CPU_INTERRUPT_TGT_INT_1 0x0800
+#define CPU_INTERRUPT_TGT_INT_2 0x2000
+
+/* First unused bit: 0x4000. */
+
+/* The set of all bits that should be masked when single-stepping. */
+#define CPU_INTERRUPT_SSTEP_MASK \
+ (CPU_INTERRUPT_HARD \
+ | CPU_INTERRUPT_TGT_EXT_0 \
+ | CPU_INTERRUPT_TGT_EXT_1 \
+ | CPU_INTERRUPT_TGT_EXT_2 \
+ | CPU_INTERRUPT_TGT_EXT_3 \
+ | CPU_INTERRUPT_TGT_EXT_4)
+
+#ifdef CONFIG_USER_ONLY
+
+/*
+ * Allow some level of source compatibility with softmmu. We do not
+ * support any of the more exotic features, so only invalid pages may
+ * be signaled by probe_access_flags().
+ */
+#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
+#define TLB_MMIO 0
+#define TLB_WATCHPOINT 0
+
+#else
+
+/*
+ * Flags stored in the low bits of the TLB virtual address.
+ * These are defined so that fast path ram access is all zeros.
+ * The flags all must be between TARGET_PAGE_BITS and
+ * maximum address alignment bit.
+ *
+ * Use TARGET_PAGE_BITS_MIN so that these bits are constant
+ * when TARGET_PAGE_BITS_VARY is in effect.
+ */
+/* Zero if TLB entry is valid. */
+#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
+/* Set if TLB entry references a clean RAM page. The iotlb entry will
+ contain the page physical address. */
+#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
+/* Set if TLB entry is an IO callback. */
+#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
+/* Set if TLB entry contains a watchpoint. */
+#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
+/* Set if TLB entry requires byte swap. */
+#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
+/* Set if TLB entry writes ignored. */
+#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
+
+/* Use this mask to check interception with an alignment mask
+ * in a TCG backend.
+ */
+#define TLB_FLAGS_MASK \
+ (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
+ | TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE)
+
+/**
+ * tlb_hit_page: return true if page aligned @addr is a hit against the
+ * TLB entry @tlb_addr
+ *
+ * @addr: virtual address to test (must be page aligned)
+ * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
+ */
+static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
+{
+ return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
+}
+
+/**
+ * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr
+ *
+ * @addr: virtual address to test (need not be page aligned)
+ * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
+ */
+static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr)
+{
+ return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
+}
+
+#ifdef CONFIG_TCG
+/* accel/tcg/cpu-exec.c */
+void dump_drift_info(GString *buf);
+/* accel/tcg/translate-all.c */
+void dump_exec_info(GString *buf);
+void dump_opcount_info(GString *buf);
+#endif /* CONFIG_TCG */
+
+#endif /* !CONFIG_USER_ONLY */
+
+#ifdef CONFIG_TCG
+/* accel/tcg/cpu-exec.c */
+int cpu_exec(CPUState *cpu);
+void tcg_exec_realizefn(CPUState *cpu, Error **errp);
+void tcg_exec_unrealizefn(CPUState *cpu);
+#endif /* CONFIG_TCG */
+
+/* Returns: 0 on success, -1 on error */
+int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
+ void *ptr, target_ulong len, bool is_write);
+
+/**
+ * cpu_set_cpustate_pointers(cpu)
+ * @cpu: The cpu object
+ *
+ * Set the generic pointers in CPUState into the outer object.
+ */
+static inline void cpu_set_cpustate_pointers(ArchCPU *cpu)
+{
+ cpu->parent_obj.env_ptr = &cpu->env;
+ cpu->parent_obj.icount_decr_ptr = &cpu->neg.icount_decr;
+}
+
+/**
+ * env_archcpu(env)
+ * @env: The architecture environment
+ *
+ * Return the ArchCPU associated with the environment.
+ */
+static inline ArchCPU *env_archcpu(CPUArchState *env)
+{
+ return container_of(env, ArchCPU, env);
+}
+
+/**
+ * env_cpu(env)
+ * @env: The architecture environment
+ *
+ * Return the CPUState associated with the environment.
+ */
+static inline CPUState *env_cpu(CPUArchState *env)
+{
+ return &env_archcpu(env)->parent_obj;
+}
+
+/**
+ * env_neg(env)
+ * @env: The architecture environment
+ *
+ * Return the CPUNegativeOffsetState associated with the environment.
+ */
+static inline CPUNegativeOffsetState *env_neg(CPUArchState *env)
+{
+ ArchCPU *arch_cpu = container_of(env, ArchCPU, env);
+ return &arch_cpu->neg;
+}
+
+/**
+ * cpu_neg(cpu)
+ * @cpu: The generic CPUState
+ *
+ * Return the CPUNegativeOffsetState associated with the cpu.
+ */
+static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu)
+{
+ ArchCPU *arch_cpu = container_of(cpu, ArchCPU, parent_obj);
+ return &arch_cpu->neg;
+}
+
+/**
+ * env_tlb(env)
+ * @env: The architecture environment
+ *
+ * Return the CPUTLB state associated with the environment.
+ */
+static inline CPUTLB *env_tlb(CPUArchState *env)
+{
+ return &env_neg(env)->tlb;
+}
+
+#endif /* CPU_ALL_H */
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
new file mode 100644
index 000000000..039d422bf
--- /dev/null
+++ b/include/exec/cpu-common.h
@@ -0,0 +1,113 @@
+#ifndef CPU_COMMON_H
+#define CPU_COMMON_H
+
+/* CPU interfaces that are target independent. */
+
+#ifndef CONFIG_USER_ONLY
+#include "exec/hwaddr.h"
+#endif
+
+/* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */
+void qemu_init_cpu_list(void);
+void cpu_list_lock(void);
+void cpu_list_unlock(void);
+
+void tcg_flush_softmmu_tlb(CPUState *cs);
+
+void tcg_iommu_init_notifier_list(CPUState *cpu);
+void tcg_iommu_free_notifier_list(CPUState *cpu);
+
+#if !defined(CONFIG_USER_ONLY)
+
+enum device_endian {
+ DEVICE_NATIVE_ENDIAN,
+ DEVICE_BIG_ENDIAN,
+ DEVICE_LITTLE_ENDIAN,
+};
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN
+#else
+#define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN
+#endif
+
+/* address in the RAM (different from a physical address) */
+#if defined(CONFIG_XEN_BACKEND)
+typedef uint64_t ram_addr_t;
+# define RAM_ADDR_MAX UINT64_MAX
+# define RAM_ADDR_FMT "%" PRIx64
+#else
+typedef uintptr_t ram_addr_t;
+# define RAM_ADDR_MAX UINTPTR_MAX
+# define RAM_ADDR_FMT "%" PRIxPTR
+#endif
+
+/* memory API */
+
+void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
+/* This should not be used by devices. */
+ram_addr_t qemu_ram_addr_from_host(void *ptr);
+RAMBlock *qemu_ram_block_by_name(const char *name);
+RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
+ ram_addr_t *offset);
+ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host);
+void qemu_ram_set_idstr(RAMBlock *block, const char *name, DeviceState *dev);
+void qemu_ram_unset_idstr(RAMBlock *block);
+const char *qemu_ram_get_idstr(RAMBlock *rb);
+void *qemu_ram_get_host_addr(RAMBlock *rb);
+ram_addr_t qemu_ram_get_offset(RAMBlock *rb);
+ram_addr_t qemu_ram_get_used_length(RAMBlock *rb);
+ram_addr_t qemu_ram_get_max_length(RAMBlock *rb);
+bool qemu_ram_is_shared(RAMBlock *rb);
+bool qemu_ram_is_noreserve(RAMBlock *rb);
+bool qemu_ram_is_uf_zeroable(RAMBlock *rb);
+void qemu_ram_set_uf_zeroable(RAMBlock *rb);
+bool qemu_ram_is_migratable(RAMBlock *rb);
+void qemu_ram_set_migratable(RAMBlock *rb);
+void qemu_ram_unset_migratable(RAMBlock *rb);
+
+size_t qemu_ram_pagesize(RAMBlock *block);
+size_t qemu_ram_pagesize_largest(void);
+
+void cpu_physical_memory_rw(hwaddr addr, void *buf,
+ hwaddr len, bool is_write);
+static inline void cpu_physical_memory_read(hwaddr addr,
+ void *buf, hwaddr len)
+{
+ cpu_physical_memory_rw(addr, buf, len, false);
+}
+static inline void cpu_physical_memory_write(hwaddr addr,
+ const void *buf, hwaddr len)
+{
+ cpu_physical_memory_rw(addr, (void *)buf, len, true);
+}
+void *cpu_physical_memory_map(hwaddr addr,
+ hwaddr *plen,
+ bool is_write);
+void cpu_physical_memory_unmap(void *buffer, hwaddr len,
+ bool is_write, hwaddr access_len);
+void cpu_register_map_client(QEMUBH *bh);
+void cpu_unregister_map_client(QEMUBH *bh);
+
+bool cpu_physical_memory_is_io(hwaddr phys_addr);
+
+/* Coalesced MMIO regions are areas where write operations can be reordered.
+ * This usually implies that write operations are side-effect free. This allows
+ * batching which can make a major impact on performance when using
+ * virtualization.
+ */
+void qemu_flush_coalesced_mmio_buffer(void);
+
+void cpu_flush_icache_range(hwaddr start, hwaddr len);
+
+typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
+
+int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
+int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length);
+
+#endif
+
+/* vl.c */
+extern int singlestep;
+
+#endif /* CPU_COMMON_H */
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
new file mode 100644
index 000000000..ba3cd32a1
--- /dev/null
+++ b/include/exec/cpu-defs.h
@@ -0,0 +1,245 @@
+/*
+ * common defines for all CPUs
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef CPU_DEFS_H
+#define CPU_DEFS_H
+
+#ifndef NEED_CPU_H
+#error cpu.h included from common code
+#endif
+
+#include "qemu/host-utils.h"
+#include "qemu/thread.h"
+#ifndef CONFIG_USER_ONLY
+#include "exec/hwaddr.h"
+#endif
+#include "exec/memattrs.h"
+#include "hw/core/cpu.h"
+
+#include "cpu-param.h"
+
+#ifndef TARGET_LONG_BITS
+# error TARGET_LONG_BITS must be defined in cpu-param.h
+#endif
+#ifndef NB_MMU_MODES
+# error NB_MMU_MODES must be defined in cpu-param.h
+#endif
+#ifndef TARGET_PHYS_ADDR_SPACE_BITS
+# error TARGET_PHYS_ADDR_SPACE_BITS must be defined in cpu-param.h
+#endif
+#ifndef TARGET_VIRT_ADDR_SPACE_BITS
+# error TARGET_VIRT_ADDR_SPACE_BITS must be defined in cpu-param.h
+#endif
+#ifndef TARGET_PAGE_BITS
+# ifdef TARGET_PAGE_BITS_VARY
+# ifndef TARGET_PAGE_BITS_MIN
+# error TARGET_PAGE_BITS_MIN must be defined in cpu-param.h
+# endif
+# else
+# error TARGET_PAGE_BITS must be defined in cpu-param.h
+# endif
+#endif
+
+#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
+
+/* target_ulong is the type of a virtual address */
+#if TARGET_LONG_SIZE == 4
+typedef int32_t target_long;
+typedef uint32_t target_ulong;
+#define TARGET_FMT_lx "%08x"
+#define TARGET_FMT_ld "%d"
+#define TARGET_FMT_lu "%u"
+#elif TARGET_LONG_SIZE == 8
+typedef int64_t target_long;
+typedef uint64_t target_ulong;
+#define TARGET_FMT_lx "%016" PRIx64
+#define TARGET_FMT_ld "%" PRId64
+#define TARGET_FMT_lu "%" PRIu64
+#else
+#error TARGET_LONG_SIZE undefined
+#endif
+
+#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
+
+/* use a fully associative victim tlb of 8 entries */
+#define CPU_VTLB_SIZE 8
+
+#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
+#define CPU_TLB_ENTRY_BITS 4
+#else
+#define CPU_TLB_ENTRY_BITS 5
+#endif
+
+#define CPU_TLB_DYN_MIN_BITS 6
+#define CPU_TLB_DYN_DEFAULT_BITS 8
+
+# if HOST_LONG_BITS == 32
+/* Make sure we do not require a double-word shift for the TLB load */
+# define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS)
+# else /* HOST_LONG_BITS == 64 */
+/*
+ * Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) ==
+ * 2**34 == 16G of address space. This is roughly what one would expect a
+ * TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel
+ * Skylake's Level-2 STLB has 16 1G entries.
+ * Also, make sure we do not size the TLB past the guest's address space.
+ */
+# ifdef TARGET_PAGE_BITS_VARY
+# define CPU_TLB_DYN_MAX_BITS \
+ MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
+# else
+# define CPU_TLB_DYN_MAX_BITS \
+ MIN_CONST(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
+# endif
+# endif
+
+typedef struct CPUTLBEntry {
+ /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
+ bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
+ go directly to ram.
+ bit 3 : indicates that the entry is invalid
+ bit 2..0 : zero
+ */
+ union {
+ struct {
+ target_ulong addr_read;
+ target_ulong addr_write;
+ target_ulong addr_code;
+ /* Addend to virtual address to get host address. IO accesses
+ use the corresponding iotlb value. */
+ uintptr_t addend;
+ };
+ /* padding to get a power of two size */
+ uint8_t dummy[1 << CPU_TLB_ENTRY_BITS];
+ };
+} CPUTLBEntry;
+
+QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
+
+/* The IOTLB is not accessed directly inline by generated TCG code,
+ * so the CPUIOTLBEntry layout is not as critical as that of the
+ * CPUTLBEntry. (This is also why we don't want to combine the two
+ * structs into one.)
+ */
+typedef struct CPUIOTLBEntry {
+ /*
+ * @addr contains:
+ * - in the lower TARGET_PAGE_BITS, a physical section number
+ * - with the lower TARGET_PAGE_BITS masked off, an offset which
+ * must be added to the virtual address to obtain:
+ * + the ram_addr_t of the target RAM (if the physical section
+ * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
+ * + the offset within the target MemoryRegion (otherwise)
+ */
+ hwaddr addr;
+ MemTxAttrs attrs;
+} CPUIOTLBEntry;
+
+/*
+ * Data elements that are per MMU mode, minus the bits accessed by
+ * the TCG fast path.
+ */
+typedef struct CPUTLBDesc {
+ /*
+ * Describe a region covering all of the large pages allocated
+ * into the tlb. When any page within this region is flushed,
+ * we must flush the entire tlb. The region is matched if
+ * (addr & large_page_mask) == large_page_addr.
+ */
+ target_ulong large_page_addr;
+ target_ulong large_page_mask;
+ /* host time (in ns) at the beginning of the time window */
+ int64_t window_begin_ns;
+ /* maximum number of entries observed in the window */
+ size_t window_max_entries;
+ size_t n_used_entries;
+ /* The next index to use in the tlb victim table. */
+ size_t vindex;
+ /* The tlb victim table, in two parts. */
+ CPUTLBEntry vtable[CPU_VTLB_SIZE];
+ CPUIOTLBEntry viotlb[CPU_VTLB_SIZE];
+ /* The iotlb. */
+ CPUIOTLBEntry *iotlb;
+} CPUTLBDesc;
+
+/*
+ * Data elements that are per MMU mode, accessed by the fast path.
+ * The structure is aligned to aid loading the pair with one insn.
+ */
+typedef struct CPUTLBDescFast {
+ /* Contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */
+ uintptr_t mask;
+ /* The array of tlb entries itself. */
+ CPUTLBEntry *table;
+} CPUTLBDescFast QEMU_ALIGNED(2 * sizeof(void *));
+
+/*
+ * Data elements that are shared between all MMU modes.
+ */
+typedef struct CPUTLBCommon {
+ /* Serialize updates to f.table and d.vtable, and others as noted. */
+ QemuSpin lock;
+ /*
+ * Within dirty, for each bit N, modifications have been made to
+ * mmu_idx N since the last time that mmu_idx was flushed.
+ * Protected by tlb_c.lock.
+ */
+ uint16_t dirty;
+ /*
+ * Statistics. These are not lock protected, but are read and
+ * written atomically. This allows the monitor to print a snapshot
+ * of the stats without interfering with the cpu.
+ */
+ size_t full_flush_count;
+ size_t part_flush_count;
+ size_t elide_flush_count;
+} CPUTLBCommon;
+
+/*
+ * The entire softmmu tlb, for all MMU modes.
+ * The meaning of each of the MMU modes is defined in the target code.
+ * Since this is placed within CPUNegativeOffsetState, the smallest
+ * negative offsets are at the end of the struct.
+ */
+
+typedef struct CPUTLB {
+ CPUTLBCommon c;
+ CPUTLBDesc d[NB_MMU_MODES];
+ CPUTLBDescFast f[NB_MMU_MODES];
+} CPUTLB;
+
+/* This will be used by TCG backends to compute offsets. */
+#define TLB_MASK_TABLE_OFS(IDX) \
+ ((int)offsetof(ArchCPU, neg.tlb.f[IDX]) - (int)offsetof(ArchCPU, env))
+
+#else
+
+typedef struct CPUTLB { } CPUTLB;
+
+#endif /* !CONFIG_USER_ONLY && CONFIG_TCG */
+
+/*
+ * This structure must be placed in ArchCPU immediately
+ * before CPUArchState, as a field named "neg".
+ */
+typedef struct CPUNegativeOffsetState {
+ CPUTLB tlb;
+ IcountDecr icount_decr;
+} CPUNegativeOffsetState;
+
+#endif
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
new file mode 100644
index 000000000..a878fd010
--- /dev/null
+++ b/include/exec/cpu_ldst.h
@@ -0,0 +1,476 @@
+/*
+ * Software MMU support
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+/*
+ * Generate inline load/store functions for all MMU modes (typically
+ * at least _user and _kernel) as well as _data versions, for all data
+ * sizes.
+ *
+ * Used by target op helpers.
+ *
+ * The syntax for the accessors is:
+ *
+ * load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr)
+ * cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr)
+ * cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
+ * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr)
+ *
+ * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val)
+ * cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr)
+ * cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
+ * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)
+ *
+ * sign is:
+ * (empty): for 32 and 64 bit sizes
+ * u : unsigned
+ * s : signed
+ *
+ * size is:
+ * b: 8 bits
+ * w: 16 bits
+ * l: 32 bits
+ * q: 64 bits
+ *
+ * end is:
+ * (empty): for target native endian, or for 8 bit access
+ * _be: for forced big endian
+ * _le: for forced little endian
+ *
+ * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx".
+ * The "mmuidx" suffix carries an extra mmu_idx argument that specifies
+ * the index to use; the "data" and "code" suffixes take the index from
+ * cpu_mmu_index().
+ *
+ * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the
+ * MemOp including alignment requirements. The alignment will be enforced.
+ */
+#ifndef CPU_LDST_H
+#define CPU_LDST_H
+
+#include "exec/memopidx.h"
+#include "qemu/int128.h"
+
+#if defined(CONFIG_USER_ONLY)
+/* sparc32plus has 64bit long but 32bit space address
+ * this can make bad result with g2h() and h2g()
+ */
+#if TARGET_VIRT_ADDR_SPACE_BITS <= 32
+typedef uint32_t abi_ptr;
+#define TARGET_ABI_FMT_ptr "%x"
+#else
+typedef uint64_t abi_ptr;
+#define TARGET_ABI_FMT_ptr "%"PRIx64
+#endif
+
+#ifndef TARGET_TAGGED_ADDRESSES
+static inline abi_ptr cpu_untagged_addr(CPUState *cs, abi_ptr x)
+{
+ return x;
+}
+#endif
+
+/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
+static inline void *g2h_untagged(abi_ptr x)
+{
+ return (void *)((uintptr_t)(x) + guest_base);
+}
+
+static inline void *g2h(CPUState *cs, abi_ptr x)
+{
+ return g2h_untagged(cpu_untagged_addr(cs, x));
+}
+
+static inline bool guest_addr_valid_untagged(abi_ulong x)
+{
+ return x <= GUEST_ADDR_MAX;
+}
+
+static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len)
+{
+ return len - 1 <= GUEST_ADDR_MAX && start <= GUEST_ADDR_MAX - len + 1;
+}
+
+#define h2g_valid(x) \
+ (HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS || \
+ (uintptr_t)(x) - guest_base <= GUEST_ADDR_MAX)
+
+#define h2g_nocheck(x) ({ \
+ uintptr_t __ret = (uintptr_t)(x) - guest_base; \
+ (abi_ptr)__ret; \
+})
+
+#define h2g(x) ({ \
+ /* Check if given address fits target address space */ \
+ assert(h2g_valid(x)); \
+ h2g_nocheck(x); \
+})
+#else
+typedef target_ulong abi_ptr;
+#define TARGET_ABI_FMT_ptr TARGET_ABI_FMT_lx
+#endif
+
+uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
+int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
+uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr);
+int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr);
+uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr);
+uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr);
+uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr);
+int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr);
+uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr);
+uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr);
+
+uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
+
+void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
+void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
+void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
+void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
+void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
+void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
+void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
+
+void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
+ uint32_t val, uintptr_t ra);
+void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
+ uint32_t val, uintptr_t ra);
+void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
+ uint32_t val, uintptr_t ra);
+void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
+ uint64_t val, uintptr_t ra);
+void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
+ uint32_t val, uintptr_t ra);
+void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
+ uint32_t val, uintptr_t ra);
+void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
+ uint64_t val, uintptr_t ra);
+
+uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+
+void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
+ int mmu_idx, uintptr_t ra);
+
+uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
+uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr ptr,
+ MemOpIdx oi, uintptr_t ra);
+uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr ptr,
+ MemOpIdx oi, uintptr_t ra);
+uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr ptr,
+ MemOpIdx oi, uintptr_t ra);
+uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr ptr,
+ MemOpIdx oi, uintptr_t ra);
+uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr ptr,
+ MemOpIdx oi, uintptr_t ra);
+uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr ptr,
+ MemOpIdx oi, uintptr_t ra);
+
+void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stw_be_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stl_be_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stq_be_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stw_le_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stl_le_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stq_le_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
+ MemOpIdx oi, uintptr_t ra);
+
+uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
+ uint64_t cmpv, uint64_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
+ uint64_t cmpv, uint64_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+
+#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
+TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
+ (CPUArchState *env, target_ulong addr, TYPE val, \
+ MemOpIdx oi, uintptr_t retaddr);
+
+#ifdef CONFIG_ATOMIC64
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
+#else
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
+#endif
+
+GEN_ATOMIC_HELPER_ALL(fetch_add)
+GEN_ATOMIC_HELPER_ALL(fetch_sub)
+GEN_ATOMIC_HELPER_ALL(fetch_and)
+GEN_ATOMIC_HELPER_ALL(fetch_or)
+GEN_ATOMIC_HELPER_ALL(fetch_xor)
+GEN_ATOMIC_HELPER_ALL(fetch_smin)
+GEN_ATOMIC_HELPER_ALL(fetch_umin)
+GEN_ATOMIC_HELPER_ALL(fetch_smax)
+GEN_ATOMIC_HELPER_ALL(fetch_umax)
+
+GEN_ATOMIC_HELPER_ALL(add_fetch)
+GEN_ATOMIC_HELPER_ALL(sub_fetch)
+GEN_ATOMIC_HELPER_ALL(and_fetch)
+GEN_ATOMIC_HELPER_ALL(or_fetch)
+GEN_ATOMIC_HELPER_ALL(xor_fetch)
+GEN_ATOMIC_HELPER_ALL(smin_fetch)
+GEN_ATOMIC_HELPER_ALL(umin_fetch)
+GEN_ATOMIC_HELPER_ALL(smax_fetch)
+GEN_ATOMIC_HELPER_ALL(umax_fetch)
+
+GEN_ATOMIC_HELPER_ALL(xchg)
+
+#undef GEN_ATOMIC_HELPER_ALL
+#undef GEN_ATOMIC_HELPER
+
+Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
+ Int128 cmpv, Int128 newv,
+ MemOpIdx oi, uintptr_t retaddr);
+Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
+ Int128 cmpv, Int128 newv,
+ MemOpIdx oi, uintptr_t retaddr);
+
+Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
+ MemOpIdx oi, uintptr_t retaddr);
+Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
+ MemOpIdx oi, uintptr_t retaddr);
+void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
+ MemOpIdx oi, uintptr_t retaddr);
+void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
+ MemOpIdx oi, uintptr_t retaddr);
+
+#if defined(CONFIG_USER_ONLY)
+
+extern __thread uintptr_t helper_retaddr;
+
+static inline void set_helper_retaddr(uintptr_t ra)
+{
+ helper_retaddr = ra;
+ /*
+ * Ensure that this write is visible to the SIGSEGV handler that
+ * may be invoked due to a subsequent invalid memory operation.
+ */
+ signal_barrier();
+}
+
+static inline void clear_helper_retaddr(void)
+{
+ /*
+ * Ensure that previous memory operations have succeeded before
+ * removing the data visible to the signal handler.
+ */
+ signal_barrier();
+ helper_retaddr = 0;
+}
+
+#else
+
+/* Needed for TCG_OVERSIZED_GUEST */
+#include "tcg/tcg.h"
+
+static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
+{
+#if TCG_OVERSIZED_GUEST
+ return entry->addr_write;
+#else
+ return qatomic_read(&entry->addr_write);
+#endif
+}
+
+/* Find the TLB index corresponding to the mmu_idx + address pair. */
+static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
+ target_ulong addr)
+{
+ uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
+
+ return (addr >> TARGET_PAGE_BITS) & size_mask;
+}
+
+/* Find the TLB entry corresponding to the mmu_idx + address pair. */
+static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
+ target_ulong addr)
+{
+ return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
+}
+
+#endif /* defined(CONFIG_USER_ONLY) */
+
+#ifdef TARGET_WORDS_BIGENDIAN
+# define cpu_lduw_data cpu_lduw_be_data
+# define cpu_ldsw_data cpu_ldsw_be_data
+# define cpu_ldl_data cpu_ldl_be_data
+# define cpu_ldq_data cpu_ldq_be_data
+# define cpu_lduw_data_ra cpu_lduw_be_data_ra
+# define cpu_ldsw_data_ra cpu_ldsw_be_data_ra
+# define cpu_ldl_data_ra cpu_ldl_be_data_ra
+# define cpu_ldq_data_ra cpu_ldq_be_data_ra
+# define cpu_lduw_mmuidx_ra cpu_lduw_be_mmuidx_ra
+# define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra
+# define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra
+# define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra
+# define cpu_ldw_mmu cpu_ldw_be_mmu
+# define cpu_ldl_mmu cpu_ldl_be_mmu
+# define cpu_ldq_mmu cpu_ldq_be_mmu
+# define cpu_stw_data cpu_stw_be_data
+# define cpu_stl_data cpu_stl_be_data
+# define cpu_stq_data cpu_stq_be_data
+# define cpu_stw_data_ra cpu_stw_be_data_ra
+# define cpu_stl_data_ra cpu_stl_be_data_ra
+# define cpu_stq_data_ra cpu_stq_be_data_ra
+# define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra
+# define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra
+# define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra
+# define cpu_stw_mmu cpu_stw_be_mmu
+# define cpu_stl_mmu cpu_stl_be_mmu
+# define cpu_stq_mmu cpu_stq_be_mmu
+#else
+# define cpu_lduw_data cpu_lduw_le_data
+# define cpu_ldsw_data cpu_ldsw_le_data
+# define cpu_ldl_data cpu_ldl_le_data
+# define cpu_ldq_data cpu_ldq_le_data
+# define cpu_lduw_data_ra cpu_lduw_le_data_ra
+# define cpu_ldsw_data_ra cpu_ldsw_le_data_ra
+# define cpu_ldl_data_ra cpu_ldl_le_data_ra
+# define cpu_ldq_data_ra cpu_ldq_le_data_ra
+# define cpu_lduw_mmuidx_ra cpu_lduw_le_mmuidx_ra
+# define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra
+# define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra
+# define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra
+# define cpu_ldw_mmu cpu_ldw_le_mmu
+# define cpu_ldl_mmu cpu_ldl_le_mmu
+# define cpu_ldq_mmu cpu_ldq_le_mmu
+# define cpu_stw_data cpu_stw_le_data
+# define cpu_stl_data cpu_stl_le_data
+# define cpu_stq_data cpu_stq_le_data
+# define cpu_stw_data_ra cpu_stw_le_data_ra
+# define cpu_stl_data_ra cpu_stl_le_data_ra
+# define cpu_stq_data_ra cpu_stq_le_data_ra
+# define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra
+# define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra
+# define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra
+# define cpu_stw_mmu cpu_stw_le_mmu
+# define cpu_stl_mmu cpu_stl_le_mmu
+# define cpu_stq_mmu cpu_stq_le_mmu
+#endif
+
+uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
+uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
+uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
+uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr);
+
+static inline int cpu_ldsb_code(CPUArchState *env, abi_ptr addr)
+{
+ return (int8_t)cpu_ldub_code(env, addr);
+}
+
+static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr)
+{
+ return (int16_t)cpu_lduw_code(env, addr);
+}
+
+/**
+ * tlb_vaddr_to_host:
+ * @env: CPUArchState
+ * @addr: guest virtual address to look up
+ * @access_type: 0 for read, 1 for write, 2 for execute
+ * @mmu_idx: MMU index to use for lookup
+ *
+ * Look up the specified guest virtual index in the TCG softmmu TLB.
+ * If we can translate a host virtual address suitable for direct RAM
+ * access, without causing a guest exception, then return it.
+ * Otherwise (TLB entry is for an I/O access, guest software
+ * TLB fill required, etc) return NULL.
+ */
+#ifdef CONFIG_USER_ONLY
+static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
+ MMUAccessType access_type, int mmu_idx)
+{
+ return g2h(env_cpu(env), addr);
+}
+#else
+void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
+ MMUAccessType access_type, int mmu_idx);
+#endif
+
+#endif /* CPU_LDST_H */
diff --git a/include/exec/cputlb.h b/include/exec/cputlb.h
new file mode 100644
index 000000000..19b16e58f
--- /dev/null
+++ b/include/exec/cputlb.h
@@ -0,0 +1,31 @@
+/*
+ * Common CPU TLB handling
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef CPUTLB_H
+#define CPUTLB_H
+
+#include "exec/cpu-common.h"
+
+#if !defined(CONFIG_USER_ONLY)
+/* cputlb.c */
+void tlb_protect_code(ram_addr_t ram_addr);
+void tlb_unprotect_code(ram_addr_t ram_addr);
+void tlb_flush_counts(size_t *full, size_t *part, size_t *elide);
+#endif
+#endif
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
new file mode 100644
index 000000000..35d8e9397
--- /dev/null
+++ b/include/exec/exec-all.h
@@ -0,0 +1,763 @@
+/*
+ * internal execution defines for qemu
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef EXEC_ALL_H
+#define EXEC_ALL_H
+
+#include "cpu.h"
+#ifdef CONFIG_TCG
+#include "exec/cpu_ldst.h"
+#endif
+#include "sysemu/cpu-timers.h"
+
+/* allow to see translation results - the slowdown should be negligible, so we leave it */
+#define DEBUG_DISAS
+
+/* Page tracking code uses ram addresses in system mode, and virtual
+ addresses in userspace mode. Define tb_page_addr_t to be an appropriate
+ type. */
+#if defined(CONFIG_USER_ONLY)
+typedef abi_ulong tb_page_addr_t;
+#define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
+#else
+typedef ram_addr_t tb_page_addr_t;
+#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
+#endif
+
+#include "qemu/log.h"
+
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
+void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
+ target_ulong *data);
+
+/**
+ * cpu_restore_state:
+ * @cpu: the vCPU state is to be restore to
+ * @searched_pc: the host PC the fault occurred at
+ * @will_exit: true if the TB executed will be interrupted after some
+ cpu adjustments. Required for maintaining the correct
+ icount valus
+ * @return: true if state was restored, false otherwise
+ *
+ * Attempt to restore the state for a fault occurring in translated
+ * code. If the searched_pc is not in translated code no state is
+ * restored and the function returns false.
+ */
+bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
+
+void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
+void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
+void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
+void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
+
+/**
+ * cpu_loop_exit_requested:
+ * @cpu: The CPU state to be tested
+ *
+ * Indicate if somebody asked for a return of the CPU to the main loop
+ * (e.g., via cpu_exit() or cpu_interrupt()).
+ *
+ * This is helpful for architectures that support interruptible
+ * instructions. After writing back all state to registers/memory, this
+ * call can be used to check if it makes sense to return to the main loop
+ * or to continue executing the interruptible instruction.
+ */
+static inline bool cpu_loop_exit_requested(CPUState *cpu)
+{
+ return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void cpu_reloading_memory_map(void);
+/**
+ * cpu_address_space_init:
+ * @cpu: CPU to add this address space to
+ * @asidx: integer index of this address space
+ * @prefix: prefix to be used as name of address space
+ * @mr: the root memory region of address space
+ *
+ * Add the specified address space to the CPU's cpu_ases list.
+ * The address space added with @asidx 0 is the one used for the
+ * convenience pointer cpu->as.
+ * The target-specific code which registers ASes is responsible
+ * for defining what semantics address space 0, 1, 2, etc have.
+ *
+ * Before the first call to this function, the caller must set
+ * cpu->num_ases to the total number of address spaces it needs
+ * to support.
+ *
+ * Note that with KVM only one address space is supported.
+ */
+void cpu_address_space_init(CPUState *cpu, int asidx,
+ const char *prefix, MemoryRegion *mr);
+#endif
+
+#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
+/* cputlb.c */
+/**
+ * tlb_init - initialize a CPU's TLB
+ * @cpu: CPU whose TLB should be initialized
+ */
+void tlb_init(CPUState *cpu);
+/**
+ * tlb_destroy - destroy a CPU's TLB
+ * @cpu: CPU whose TLB should be destroyed
+ */
+void tlb_destroy(CPUState *cpu);
+/**
+ * tlb_flush_page:
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of page to be flushed
+ *
+ * Flush one page from the TLB of the specified CPU, for all
+ * MMU indexes.
+ */
+void tlb_flush_page(CPUState *cpu, target_ulong addr);
+/**
+ * tlb_flush_page_all_cpus:
+ * @cpu: src CPU of the flush
+ * @addr: virtual address of page to be flushed
+ *
+ * Flush one page from the TLB of the specified CPU, for all
+ * MMU indexes.
+ */
+void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
+/**
+ * tlb_flush_page_all_cpus_synced:
+ * @cpu: src CPU of the flush
+ * @addr: virtual address of page to be flushed
+ *
+ * Flush one page from the TLB of the specified CPU, for all MMU
+ * indexes like tlb_flush_page_all_cpus except the source vCPUs work
+ * is scheduled as safe work meaning all flushes will be complete once
+ * the source vCPUs safe work is complete. This will depend on when
+ * the guests translation ends the TB.
+ */
+void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
+/**
+ * tlb_flush:
+ * @cpu: CPU whose TLB should be flushed
+ *
+ * Flush the entire TLB for the specified CPU. Most CPU architectures
+ * allow the implementation to drop entries from the TLB at any time
+ * so this is generally safe. If more selective flushing is required
+ * use one of the other functions for efficiency.
+ */
+void tlb_flush(CPUState *cpu);
+/**
+ * tlb_flush_all_cpus:
+ * @cpu: src CPU of the flush
+ */
+void tlb_flush_all_cpus(CPUState *src_cpu);
+/**
+ * tlb_flush_all_cpus_synced:
+ * @cpu: src CPU of the flush
+ *
+ * Like tlb_flush_all_cpus except this except the source vCPUs work is
+ * scheduled as safe work meaning all flushes will be complete once
+ * the source vCPUs safe work is complete. This will depend on when
+ * the guests translation ends the TB.
+ */
+void tlb_flush_all_cpus_synced(CPUState *src_cpu);
+/**
+ * tlb_flush_page_by_mmuidx:
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of page to be flushed
+ * @idxmap: bitmap of MMU indexes to flush
+ *
+ * Flush one page from the TLB of the specified CPU, for the specified
+ * MMU indexes.
+ */
+void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
+ uint16_t idxmap);
+/**
+ * tlb_flush_page_by_mmuidx_all_cpus:
+ * @cpu: Originating CPU of the flush
+ * @addr: virtual address of page to be flushed
+ * @idxmap: bitmap of MMU indexes to flush
+ *
+ * Flush one page from the TLB of all CPUs, for the specified
+ * MMU indexes.
+ */
+void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
+ uint16_t idxmap);
+/**
+ * tlb_flush_page_by_mmuidx_all_cpus_synced:
+ * @cpu: Originating CPU of the flush
+ * @addr: virtual address of page to be flushed
+ * @idxmap: bitmap of MMU indexes to flush
+ *
+ * Flush one page from the TLB of all CPUs, for the specified MMU
+ * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
+ * vCPUs work is scheduled as safe work meaning all flushes will be
+ * complete once the source vCPUs safe work is complete. This will
+ * depend on when the guests translation ends the TB.
+ */
+void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
+ uint16_t idxmap);
+/**
+ * tlb_flush_by_mmuidx:
+ * @cpu: CPU whose TLB should be flushed
+ * @wait: If true ensure synchronisation by exiting the cpu_loop
+ * @idxmap: bitmap of MMU indexes to flush
+ *
+ * Flush all entries from the TLB of the specified CPU, for the specified
+ * MMU indexes.
+ */
+void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
+/**
+ * tlb_flush_by_mmuidx_all_cpus:
+ * @cpu: Originating CPU of the flush
+ * @idxmap: bitmap of MMU indexes to flush
+ *
+ * Flush all entries from all TLBs of all CPUs, for the specified
+ * MMU indexes.
+ */
+void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
+/**
+ * tlb_flush_by_mmuidx_all_cpus_synced:
+ * @cpu: Originating CPU of the flush
+ * @idxmap: bitmap of MMU indexes to flush
+ *
+ * Flush all entries from all TLBs of all CPUs, for the specified
+ * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
+ * vCPUs work is scheduled as safe work meaning all flushes will be
+ * complete once the source vCPUs safe work is complete. This will
+ * depend on when the guests translation ends the TB.
+ */
+void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
+
+/**
+ * tlb_flush_page_bits_by_mmuidx
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of page to be flushed
+ * @idxmap: bitmap of mmu indexes to flush
+ * @bits: number of significant bits in address
+ *
+ * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
+ */
+void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
+ uint16_t idxmap, unsigned bits);
+
+/* Similarly, with broadcast and syncing. */
+void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
+ uint16_t idxmap, unsigned bits);
+void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
+ (CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits);
+
+/**
+ * tlb_flush_range_by_mmuidx
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of the start of the range to be flushed
+ * @len: length of range to be flushed
+ * @idxmap: bitmap of mmu indexes to flush
+ * @bits: number of significant bits in address
+ *
+ * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
+ * comparing only the low @bits worth of each virtual page.
+ */
+void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
+ target_ulong len, uint16_t idxmap,
+ unsigned bits);
+
+/* Similarly, with broadcast and syncing. */
+void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
+ target_ulong len, uint16_t idxmap,
+ unsigned bits);
+void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ target_ulong addr,
+ target_ulong len,
+ uint16_t idxmap,
+ unsigned bits);
+
+/**
+ * tlb_set_page_with_attrs:
+ * @cpu: CPU to add this TLB entry for
+ * @vaddr: virtual address of page to add entry for
+ * @paddr: physical address of the page
+ * @attrs: memory transaction attributes
+ * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
+ * @mmu_idx: MMU index to insert TLB entry for
+ * @size: size of the page in bytes
+ *
+ * Add an entry to this CPU's TLB (a mapping from virtual address
+ * @vaddr to physical address @paddr) with the specified memory
+ * transaction attributes. This is generally called by the target CPU
+ * specific code after it has been called through the tlb_fill()
+ * entry point and performed a successful page table walk to find
+ * the physical address and attributes for the virtual address
+ * which provoked the TLB miss.
+ *
+ * At most one entry for a given virtual address is permitted. Only a
+ * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
+ * used by tlb_flush_page.
+ */
+void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
+ hwaddr paddr, MemTxAttrs attrs,
+ int prot, int mmu_idx, target_ulong size);
+/* tlb_set_page:
+ *
+ * This function is equivalent to calling tlb_set_page_with_attrs()
+ * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
+ * as a convenience for CPUs which don't use memory transaction attributes.
+ */
+void tlb_set_page(CPUState *cpu, target_ulong vaddr,
+ hwaddr paddr, int prot,
+ int mmu_idx, target_ulong size);
+#else
+static inline void tlb_init(CPUState *cpu)
+{
+}
+static inline void tlb_destroy(CPUState *cpu)
+{
+}
+static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
+{
+}
+static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
+{
+}
+static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
+ target_ulong addr)
+{
+}
+static inline void tlb_flush(CPUState *cpu)
+{
+}
+static inline void tlb_flush_all_cpus(CPUState *src_cpu)
+{
+}
+static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
+{
+}
+static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
+ target_ulong addr, uint16_t idxmap)
+{
+}
+
+static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
+{
+}
+static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
+ target_ulong addr,
+ uint16_t idxmap)
+{
+}
+static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ target_ulong addr,
+ uint16_t idxmap)
+{
+}
+static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
+{
+}
+
+static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ uint16_t idxmap)
+{
+}
+static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
+ target_ulong addr,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
+static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
+ target_ulong addr,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
+static inline void
+tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
+ uint16_t idxmap, unsigned bits)
+{
+}
+static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
+ target_ulong len, uint16_t idxmap,
+ unsigned bits)
+{
+}
+static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
+ target_ulong addr,
+ target_ulong len,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
+static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ target_ulong addr,
+ target_long len,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
+#endif
+/**
+ * probe_access:
+ * @env: CPUArchState
+ * @addr: guest virtual address to look up
+ * @size: size of the access
+ * @access_type: read, write or execute permission
+ * @mmu_idx: MMU index to use for lookup
+ * @retaddr: return address for unwinding
+ *
+ * Look up the guest virtual address @addr. Raise an exception if the
+ * page does not satisfy @access_type. Raise an exception if the
+ * access (@addr, @size) hits a watchpoint. For writes, mark a clean
+ * page as dirty.
+ *
+ * Finally, return the host address for a page that is backed by RAM,
+ * or NULL if the page requires I/O.
+ */
+void *probe_access(CPUArchState *env, target_ulong addr, int size,
+ MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
+
+static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
+ int mmu_idx, uintptr_t retaddr)
+{
+ return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
+}
+
+static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
+ int mmu_idx, uintptr_t retaddr)
+{
+ return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
+}
+
+/**
+ * probe_access_flags:
+ * @env: CPUArchState
+ * @addr: guest virtual address to look up
+ * @access_type: read, write or execute permission
+ * @mmu_idx: MMU index to use for lookup
+ * @nonfault: suppress the fault
+ * @phost: return value for host address
+ * @retaddr: return address for unwinding
+ *
+ * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
+ * the page, and storing the host address for RAM in @phost.
+ *
+ * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
+ * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
+ * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
+ * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
+ */
+int probe_access_flags(CPUArchState *env, target_ulong addr,
+ MMUAccessType access_type, int mmu_idx,
+ bool nonfault, void **phost, uintptr_t retaddr);
+
+#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
+
+/* Estimated block size for TB allocation. */
+/* ??? The following is based on a 2015 survey of x86_64 host output.
+ Better would seem to be some sort of dynamically sized TB array,
+ adapting to the block sizes actually being produced. */
+#if defined(CONFIG_SOFTMMU)
+#define CODE_GEN_AVG_BLOCK_SIZE 400
+#else
+#define CODE_GEN_AVG_BLOCK_SIZE 150
+#endif
+
+/*
+ * Translation Cache-related fields of a TB.
+ * This struct exists just for convenience; we keep track of TB's in a binary
+ * search tree, and the only fields needed to compare TB's in the tree are
+ * @ptr and @size.
+ * Note: the address of search data can be obtained by adding @size to @ptr.
+ */
+struct tb_tc {
+ const void *ptr; /* pointer to the translated code */
+ size_t size;
+};
+
+struct TranslationBlock {
+ target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
+ target_ulong cs_base; /* CS base for this block */
+ uint32_t flags; /* flags defining in which context the code was generated */
+ uint32_t cflags; /* compile flags */
+
+/* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
+#define CF_COUNT_MASK 0x000001ff
+#define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */
+#define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */
+#define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */
+#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
+#define CF_MEMI_ONLY 0x00010000 /* Only instrument memory ops */
+#define CF_USE_ICOUNT 0x00020000
+#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
+#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
+#define CF_NOIRQ 0x00100000 /* Generate an uninterruptible TB */
+#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
+#define CF_CLUSTER_SHIFT 24
+
+ /* Per-vCPU dynamic tracing state used to generate this TB */
+ uint32_t trace_vcpu_dstate;
+
+ /*
+ * Above fields used for comparing
+ */
+
+ /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
+ uint16_t size;
+ uint16_t icount;
+
+ struct tb_tc tc;
+
+ /* first and second physical page containing code. The lower bit
+ of the pointer tells the index in page_next[].
+ The list is protected by the TB's page('s) lock(s) */
+ uintptr_t page_next[2];
+ tb_page_addr_t page_addr[2];
+
+ /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
+ QemuSpin jmp_lock;
+
+ /* The following data are used to directly call another TB from
+ * the code of this one. This can be done either by emitting direct or
+ * indirect native jump instructions. These jumps are reset so that the TB
+ * just continues its execution. The TB can be linked to another one by
+ * setting one of the jump targets (or patching the jump instruction). Only
+ * two of such jumps are supported.
+ */
+ uint16_t jmp_reset_offset[2]; /* offset of original jump target */
+#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
+ uintptr_t jmp_target_arg[2]; /* target address or offset */
+
+ /*
+ * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
+ * Each TB can have two outgoing jumps, and therefore can participate
+ * in two lists. The list entries are kept in jmp_list_next[2]. The least
+ * significant bit (LSB) of the pointers in these lists is used to encode
+ * which of the two list entries is to be used in the pointed TB.
+ *
+ * List traversals are protected by jmp_lock. The destination TB of each
+ * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
+ * can be acquired from any origin TB.
+ *
+ * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
+ * being invalidated, so that no further outgoing jumps from it can be set.
+ *
+ * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
+ * to a destination TB that has CF_INVALID set.
+ */
+ uintptr_t jmp_list_head;
+ uintptr_t jmp_list_next[2];
+ uintptr_t jmp_dest[2];
+};
+
+/* Hide the qatomic_read to make code a little easier on the eyes */
+static inline uint32_t tb_cflags(const TranslationBlock *tb)
+{
+ return qatomic_read(&tb->cflags);
+}
+
+/* current cflags for hashing/comparison */
+uint32_t curr_cflags(CPUState *cpu);
+
+/* TranslationBlock invalidate API */
+#if defined(CONFIG_USER_ONLY)
+void tb_invalidate_phys_addr(target_ulong addr);
+void tb_invalidate_phys_range(target_ulong start, target_ulong end);
+#else
+void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
+#endif
+void tb_flush(CPUState *cpu);
+void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
+TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
+ target_ulong cs_base, uint32_t flags,
+ uint32_t cflags);
+void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
+
+/* GETPC is the true target of the return instruction that we'll execute. */
+#if defined(CONFIG_TCG_INTERPRETER)
+extern __thread uintptr_t tci_tb_ptr;
+# define GETPC() tci_tb_ptr
+#else
+# define GETPC() \
+ ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
+#endif
+
+/* The true return address will often point to a host insn that is part of
+ the next translated guest insn. Adjust the address backward to point to
+ the middle of the call insn. Subtracting one would do the job except for
+ several compressed mode architectures (arm, mips) which set the low bit
+ to indicate the compressed mode; subtracting two works around that. It
+ is also the case that there are no host isas that contain a call insn
+ smaller than 4 bytes, so we don't worry about special-casing this. */
+#define GETPC_ADJ 2
+
+#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
+void assert_no_pages_locked(void);
+#else
+static inline void assert_no_pages_locked(void)
+{
+}
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+
+/**
+ * iotlb_to_section:
+ * @cpu: CPU performing the access
+ * @index: TCG CPU IOTLB entry
+ *
+ * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
+ * it refers to. @index will have been initially created and returned
+ * by memory_region_section_get_iotlb().
+ */
+struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
+ hwaddr index, MemTxAttrs attrs);
+#endif
+
+#if defined(CONFIG_USER_ONLY)
+void mmap_lock(void);
+void mmap_unlock(void);
+bool have_mmap_lock(void);
+
+/**
+ * get_page_addr_code() - user-mode version
+ * @env: CPUArchState
+ * @addr: guest virtual address of guest code
+ *
+ * Returns @addr.
+ */
+static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
+ target_ulong addr)
+{
+ return addr;
+}
+
+/**
+ * get_page_addr_code_hostp() - user-mode version
+ * @env: CPUArchState
+ * @addr: guest virtual address of guest code
+ *
+ * Returns @addr.
+ *
+ * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content
+ * is kept.
+ */
+static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
+ target_ulong addr,
+ void **hostp)
+{
+ if (hostp) {
+ *hostp = g2h_untagged(addr);
+ }
+ return addr;
+}
+
+/**
+ * adjust_signal_pc:
+ * @pc: raw pc from the host signal ucontext_t.
+ * @is_write: host memory operation was write, or read-modify-write.
+ *
+ * Alter @pc as required for unwinding. Return the type of the
+ * guest memory access -- host reads may be for guest execution.
+ */
+MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write);
+
+/**
+ * handle_sigsegv_accerr_write:
+ * @cpu: the cpu context
+ * @old_set: the sigset_t from the signal ucontext_t
+ * @host_pc: the host pc, adjusted for the signal
+ * @host_addr: the host address of the fault
+ *
+ * Return true if the write fault has been handled, and should be re-tried.
+ */
+bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
+ uintptr_t host_pc, abi_ptr guest_addr);
+
+/**
+ * cpu_loop_exit_sigsegv:
+ * @cpu: the cpu context
+ * @addr: the guest address of the fault
+ * @access_type: access was read/write/execute
+ * @maperr: true for invalid page, false for permission fault
+ * @ra: host pc for unwinding
+ *
+ * Use the TCGCPUOps hook to record cpu state, do guest operating system
+ * specific things to raise SIGSEGV, and jump to the main cpu loop.
+ */
+void QEMU_NORETURN cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra);
+
+/**
+ * cpu_loop_exit_sigbus:
+ * @cpu: the cpu context
+ * @addr: the guest address of the alignment fault
+ * @access_type: access was read/write/execute
+ * @ra: host pc for unwinding
+ *
+ * Use the TCGCPUOps hook to record cpu state, do guest operating system
+ * specific things to raise SIGBUS, and jump to the main cpu loop.
+ */
+void QEMU_NORETURN cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
+ MMUAccessType access_type,
+ uintptr_t ra);
+
+#else
+static inline void mmap_lock(void) {}
+static inline void mmap_unlock(void) {}
+
+/**
+ * get_page_addr_code() - full-system version
+ * @env: CPUArchState
+ * @addr: guest virtual address of guest code
+ *
+ * If we cannot translate and execute from the entire RAM page, or if
+ * the region is not backed by RAM, returns -1. Otherwise, returns the
+ * ram_addr_t corresponding to the guest code at @addr.
+ *
+ * Note: this function can trigger an exception.
+ */
+tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr);
+
+/**
+ * get_page_addr_code_hostp() - full-system version
+ * @env: CPUArchState
+ * @addr: guest virtual address of guest code
+ *
+ * See get_page_addr_code() (full-system version) for documentation on the
+ * return value.
+ *
+ * Sets *@hostp (when @hostp is non-NULL) as follows.
+ * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
+ * to the host address where @addr's content is kept.
+ *
+ * Note: this function can trigger an exception.
+ */
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
+ void **hostp);
+
+void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
+void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
+
+MemoryRegionSection *
+address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
+ hwaddr *xlat, hwaddr *plen,
+ MemTxAttrs attrs, int *prot);
+hwaddr memory_region_section_get_iotlb(CPUState *cpu,
+ MemoryRegionSection *section);
+#endif
+
+#endif
diff --git a/include/exec/gdbstub.h b/include/exec/gdbstub.h
new file mode 100644
index 000000000..a024a0350
--- /dev/null
+++ b/include/exec/gdbstub.h
@@ -0,0 +1,191 @@
+#ifndef GDBSTUB_H
+#define GDBSTUB_H
+
+#define DEFAULT_GDBSTUB_PORT "1234"
+
+/* GDB breakpoint/watchpoint types */
+#define GDB_BREAKPOINT_SW 0
+#define GDB_BREAKPOINT_HW 1
+#define GDB_WATCHPOINT_WRITE 2
+#define GDB_WATCHPOINT_READ 3
+#define GDB_WATCHPOINT_ACCESS 4
+
+#ifdef NEED_CPU_H
+#include "cpu.h"
+
+typedef void (*gdb_syscall_complete_cb)(CPUState *cpu,
+ target_ulong ret, target_ulong err);
+
+/**
+ * gdb_do_syscall:
+ * @cb: function to call when the system call has completed
+ * @fmt: gdb syscall format string
+ * ...: list of arguments to interpolate into @fmt
+ *
+ * Send a GDB syscall request. This function will return immediately;
+ * the callback function will be called later when the remote system
+ * call has completed.
+ *
+ * @fmt should be in the 'call-id,parameter,parameter...' format documented
+ * for the F request packet in the GDB remote protocol. A limited set of
+ * printf-style format specifiers is supported:
+ * %x - target_ulong argument printed in hex
+ * %lx - 64-bit argument printed in hex
+ * %s - string pointer (target_ulong) and length (int) pair
+ */
+void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...);
+/**
+ * gdb_do_syscallv:
+ * @cb: function to call when the system call has completed
+ * @fmt: gdb syscall format string
+ * @va: arguments to interpolate into @fmt
+ *
+ * As gdb_do_syscall, but taking a va_list rather than a variable
+ * argument list.
+ */
+void gdb_do_syscallv(gdb_syscall_complete_cb cb, const char *fmt, va_list va);
+int use_gdb_syscalls(void);
+void gdb_set_stop_cpu(CPUState *cpu);
+
+/**
+ * gdb_exit: exit gdb session, reporting inferior status
+ * @code: exit code reported
+ *
+ * This closes the session and sends a final packet to GDB reporting
+ * the exit status of the program. It also cleans up any connections
+ * detritus before returning.
+ */
+void gdb_exit(int code);
+
+#ifdef CONFIG_USER_ONLY
+/**
+ * gdb_handlesig: yield control to gdb
+ * @cpu: CPU
+ * @sig: if non-zero, the signal number which caused us to stop
+ *
+ * This function yields control to gdb, when a user-mode-only target
+ * needs to stop execution. If @sig is non-zero, then we will send a
+ * stop packet to tell gdb that we have stopped because of this signal.
+ *
+ * This function will block (handling protocol requests from gdb)
+ * until gdb tells us to continue target execution. When it does
+ * return, the return value is a signal to deliver to the target,
+ * or 0 if no signal should be delivered, ie the signal that caused
+ * us to stop should be ignored.
+ */
+int gdb_handlesig(CPUState *, int);
+void gdb_signalled(CPUArchState *, int);
+void gdbserver_fork(CPUState *);
+#endif
+/* Get or set a register. Returns the size of the register. */
+typedef int (*gdb_get_reg_cb)(CPUArchState *env, GByteArray *buf, int reg);
+typedef int (*gdb_set_reg_cb)(CPUArchState *env, uint8_t *buf, int reg);
+void gdb_register_coprocessor(CPUState *cpu,
+ gdb_get_reg_cb get_reg, gdb_set_reg_cb set_reg,
+ int num_regs, const char *xml, int g_pos);
+
+/*
+ * The GDB remote protocol transfers values in target byte order. As
+ * the gdbstub may be batching up several register values we always
+ * append to the array.
+ */
+
+static inline int gdb_get_reg8(GByteArray *buf, uint8_t val)
+{
+ g_byte_array_append(buf, &val, 1);
+ return 1;
+}
+
+static inline int gdb_get_reg16(GByteArray *buf, uint16_t val)
+{
+ uint16_t to_word = tswap16(val);
+ g_byte_array_append(buf, (uint8_t *) &to_word, 2);
+ return 2;
+}
+
+static inline int gdb_get_reg32(GByteArray *buf, uint32_t val)
+{
+ uint32_t to_long = tswap32(val);
+ g_byte_array_append(buf, (uint8_t *) &to_long, 4);
+ return 4;
+}
+
+static inline int gdb_get_reg64(GByteArray *buf, uint64_t val)
+{
+ uint64_t to_quad = tswap64(val);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+ return 8;
+}
+
+static inline int gdb_get_reg128(GByteArray *buf, uint64_t val_hi,
+ uint64_t val_lo)
+{
+ uint64_t to_quad;
+#ifdef TARGET_WORDS_BIGENDIAN
+ to_quad = tswap64(val_hi);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+ to_quad = tswap64(val_lo);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+#else
+ to_quad = tswap64(val_lo);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+ to_quad = tswap64(val_hi);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+#endif
+ return 16;
+}
+
+static inline int gdb_get_zeroes(GByteArray *array, size_t len)
+{
+ guint oldlen = array->len;
+ g_byte_array_set_size(array, oldlen + len);
+ memset(array->data + oldlen, 0, len);
+
+ return len;
+}
+
+/**
+ * gdb_get_reg_ptr: get pointer to start of last element
+ * @len: length of element
+ *
+ * This is a helper function to extract the pointer to the last
+ * element for additional processing. Some front-ends do additional
+ * dynamic swapping of the elements based on CPU state.
+ */
+static inline uint8_t * gdb_get_reg_ptr(GByteArray *buf, int len)
+{
+ return buf->data + buf->len - len;
+}
+
+#if TARGET_LONG_BITS == 64
+#define gdb_get_regl(buf, val) gdb_get_reg64(buf, val)
+#define ldtul_p(addr) ldq_p(addr)
+#else
+#define gdb_get_regl(buf, val) gdb_get_reg32(buf, val)
+#define ldtul_p(addr) ldl_p(addr)
+#endif
+
+#endif
+
+/**
+ * gdbserver_start: start the gdb server
+ * @port_or_device: connection spec for gdb
+ *
+ * For CONFIG_USER this is either a tcp port or a path to a fifo. For
+ * system emulation you can use a full chardev spec for your gdbserver
+ * port.
+ */
+int gdbserver_start(const char *port_or_device);
+
+/**
+ * gdb_has_xml:
+ * This is an ugly hack to cope with both new and old gdb.
+ * If gdb sends qXfer:features:read then assume we're talking to a newish
+ * gdb that understands target descriptions.
+ */
+extern bool gdb_has_xml;
+
+/* in gdbstub-xml.c, generated by scripts/feature_to_c.sh */
+extern const char *const xml_builtin[][2];
+
+#endif
diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h
new file mode 100644
index 000000000..c57204dda
--- /dev/null
+++ b/include/exec/gen-icount.h
@@ -0,0 +1,94 @@
+#ifndef GEN_ICOUNT_H
+#define GEN_ICOUNT_H
+
+#include "exec/exec-all.h"
+#include "qemu/timer.h"
+
+/* Helpers for instruction counting code generation. */
+
+static TCGOp *icount_start_insn;
+
+static inline void gen_io_start(void)
+{
+ TCGv_i32 tmp = tcg_const_i32(1);
+ tcg_gen_st_i32(tmp, cpu_env,
+ offsetof(ArchCPU, parent_obj.can_do_io) -
+ offsetof(ArchCPU, env));
+ tcg_temp_free_i32(tmp);
+}
+
+static inline void gen_tb_start(const TranslationBlock *tb)
+{
+ TCGv_i32 count;
+
+ if (tb_cflags(tb) & CF_USE_ICOUNT) {
+ count = tcg_temp_local_new_i32();
+ } else {
+ count = tcg_temp_new_i32();
+ }
+
+ tcg_gen_ld_i32(count, cpu_env,
+ offsetof(ArchCPU, neg.icount_decr.u32) -
+ offsetof(ArchCPU, env));
+
+ if (tb_cflags(tb) & CF_USE_ICOUNT) {
+ /*
+ * We emit a sub with a dummy immediate argument. Keep the insn index
+ * of the sub so that we later (when we know the actual insn count)
+ * can update the argument with the actual insn count.
+ */
+ tcg_gen_sub_i32(count, count, tcg_constant_i32(0));
+ icount_start_insn = tcg_last_op();
+ }
+
+ /*
+ * Emit the check against icount_decr.u32 to see if we should exit
+ * unless we suppress the check with CF_NOIRQ. If we are using
+ * icount and have suppressed interruption the higher level code
+ * should have ensured we don't run more instructions than the
+ * budget.
+ */
+ if (tb_cflags(tb) & CF_NOIRQ) {
+ tcg_ctx->exitreq_label = NULL;
+ } else {
+ tcg_ctx->exitreq_label = gen_new_label();
+ tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label);
+ }
+
+ if (tb_cflags(tb) & CF_USE_ICOUNT) {
+ tcg_gen_st16_i32(count, cpu_env,
+ offsetof(ArchCPU, neg.icount_decr.u16.low) -
+ offsetof(ArchCPU, env));
+ /*
+ * cpu->can_do_io is cleared automatically here at the beginning of
+ * each translation block. The cost is minimal and only paid for
+ * -icount, plus it would be very easy to forget doing it in the
+ * translator. Doing it here means we don't need a gen_io_end() to
+ * go with gen_io_start().
+ */
+ tcg_gen_st_i32(tcg_constant_i32(0), cpu_env,
+ offsetof(ArchCPU, parent_obj.can_do_io) -
+ offsetof(ArchCPU, env));
+ }
+
+ tcg_temp_free_i32(count);
+}
+
+static inline void gen_tb_end(const TranslationBlock *tb, int num_insns)
+{
+ if (tb_cflags(tb) & CF_USE_ICOUNT) {
+ /*
+ * Update the num_insn immediate parameter now that we know
+ * the actual insn count.
+ */
+ tcg_set_insn_param(icount_start_insn, 2,
+ tcgv_i32_arg(tcg_constant_i32(num_insns)));
+ }
+
+ if (tcg_ctx->exitreq_label) {
+ gen_set_label(tcg_ctx->exitreq_label);
+ tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED);
+ }
+}
+
+#endif
diff --git a/include/exec/helper-gen.h b/include/exec/helper-gen.h
new file mode 100644
index 000000000..1c2e7a8ed
--- /dev/null
+++ b/include/exec/helper-gen.h
@@ -0,0 +1,97 @@
+/* Helper file for declaring TCG helper functions.
+ This one expands generation functions for tcg opcodes. */
+
+#ifndef HELPER_GEN_H
+#define HELPER_GEN_H
+
+#include "exec/helper-head.h"
+
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl0(ret)) \
+{ \
+ tcg_gen_callN(HELPER(name), dh_retvar(ret), 0, NULL); \
+}
+
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1)) \
+{ \
+ TCGTemp *args[1] = { dh_arg(t1, 1) }; \
+ tcg_gen_callN(HELPER(name), dh_retvar(ret), 1, args); \
+}
+
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \
+{ \
+ TCGTemp *args[2] = { dh_arg(t1, 1), dh_arg(t2, 2) }; \
+ tcg_gen_callN(HELPER(name), dh_retvar(ret), 2, args); \
+}
+
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
+{ \
+ TCGTemp *args[3] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3) }; \
+ tcg_gen_callN(HELPER(name), dh_retvar(ret), 3, args); \
+}
+
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \
+ dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
+{ \
+ TCGTemp *args[4] = { dh_arg(t1, 1), dh_arg(t2, 2), \
+ dh_arg(t3, 3), dh_arg(t4, 4) }; \
+ tcg_gen_callN(HELPER(name), dh_retvar(ret), 4, args); \
+}
+
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
+ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
+{ \
+ TCGTemp *args[5] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
+ dh_arg(t4, 4), dh_arg(t5, 5) }; \
+ tcg_gen_callN(HELPER(name), dh_retvar(ret), 5, args); \
+}
+
+#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
+ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6)) \
+{ \
+ TCGTemp *args[6] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
+ dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6) }; \
+ tcg_gen_callN(HELPER(name), dh_retvar(ret), 6, args); \
+}
+
+#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7)\
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
+ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6), \
+ dh_arg_decl(t7, 7)) \
+{ \
+ TCGTemp *args[7] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
+ dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6), \
+ dh_arg(t7, 7) }; \
+ tcg_gen_callN(HELPER(name), dh_retvar(ret), 7, args); \
+}
+
+#include "helper.h"
+#include "trace/generated-helpers.h"
+#include "trace/generated-helpers-wrappers.h"
+#include "accel/tcg/tcg-runtime.h"
+#include "accel/tcg/plugin-helpers.h"
+
+#undef DEF_HELPER_FLAGS_0
+#undef DEF_HELPER_FLAGS_1
+#undef DEF_HELPER_FLAGS_2
+#undef DEF_HELPER_FLAGS_3
+#undef DEF_HELPER_FLAGS_4
+#undef DEF_HELPER_FLAGS_5
+#undef DEF_HELPER_FLAGS_6
+#undef DEF_HELPER_FLAGS_7
+#undef GEN_HELPER
+
+#endif /* HELPER_GEN_H */
diff --git a/include/exec/helper-head.h b/include/exec/helper-head.h
new file mode 100644
index 000000000..b974eb394
--- /dev/null
+++ b/include/exec/helper-head.h
@@ -0,0 +1,137 @@
+/* Helper file for declaring TCG helper functions.
+ Used by other helper files.
+
+ Targets should use DEF_HELPER_N and DEF_HELPER_FLAGS_N to declare helper
+ functions. Names should be specified without the helper_ prefix, and
+ the return and argument types specified. 3 basic types are understood
+ (i32, i64 and ptr). Additional aliases are provided for convenience and
+ to match the types used by the C helper implementation.
+
+ The target helper.h should be included in all files that use/define
+ helper functions. THis will ensure that function prototypes are
+ consistent. In addition it should be included an extra two times for
+ helper.c, defining:
+ GEN_HELPER 1 to produce op generation functions (gen_helper_*)
+ GEN_HELPER 2 to do runtime registration helper functions.
+ */
+
+#ifndef EXEC_HELPER_HEAD_H
+#define EXEC_HELPER_HEAD_H
+
+#define HELPER(name) glue(helper_, name)
+
+/* Some types that make sense in C, but not for TCG. */
+#define dh_alias_i32 i32
+#define dh_alias_s32 i32
+#define dh_alias_int i32
+#define dh_alias_i64 i64
+#define dh_alias_s64 i64
+#define dh_alias_f16 i32
+#define dh_alias_f32 i32
+#define dh_alias_f64 i64
+#define dh_alias_ptr ptr
+#define dh_alias_cptr ptr
+#define dh_alias_void void
+#define dh_alias_noreturn noreturn
+#define dh_alias(t) glue(dh_alias_, t)
+
+#define dh_ctype_i32 uint32_t
+#define dh_ctype_s32 int32_t
+#define dh_ctype_int int
+#define dh_ctype_i64 uint64_t
+#define dh_ctype_s64 int64_t
+#define dh_ctype_f16 uint32_t
+#define dh_ctype_f32 float32
+#define dh_ctype_f64 float64
+#define dh_ctype_ptr void *
+#define dh_ctype_cptr const void *
+#define dh_ctype_void void
+#define dh_ctype_noreturn void QEMU_NORETURN
+#define dh_ctype(t) dh_ctype_##t
+
+#ifdef NEED_CPU_H
+# ifdef TARGET_LONG_BITS
+# if TARGET_LONG_BITS == 32
+# define dh_alias_tl i32
+# else
+# define dh_alias_tl i64
+# endif
+# endif
+# define dh_alias_env ptr
+# define dh_ctype_tl target_ulong
+# define dh_ctype_env CPUArchState *
+#endif
+
+/* We can't use glue() here because it falls foul of C preprocessor
+ recursive expansion rules. */
+#define dh_retvar_decl0_void void
+#define dh_retvar_decl0_noreturn void
+#define dh_retvar_decl0_i32 TCGv_i32 retval
+#define dh_retvar_decl0_i64 TCGv_i64 retval
+#define dh_retvar_decl0_ptr TCGv_ptr retval
+#define dh_retvar_decl0(t) glue(dh_retvar_decl0_, dh_alias(t))
+
+#define dh_retvar_decl_void
+#define dh_retvar_decl_noreturn
+#define dh_retvar_decl_i32 TCGv_i32 retval,
+#define dh_retvar_decl_i64 TCGv_i64 retval,
+#define dh_retvar_decl_ptr TCGv_ptr retval,
+#define dh_retvar_decl(t) glue(dh_retvar_decl_, dh_alias(t))
+
+#define dh_retvar_void NULL
+#define dh_retvar_noreturn NULL
+#define dh_retvar_i32 tcgv_i32_temp(retval)
+#define dh_retvar_i64 tcgv_i64_temp(retval)
+#define dh_retvar_ptr tcgv_ptr_temp(retval)
+#define dh_retvar(t) glue(dh_retvar_, dh_alias(t))
+
+#define dh_typecode_void 0
+#define dh_typecode_noreturn 0
+#define dh_typecode_i32 2
+#define dh_typecode_s32 3
+#define dh_typecode_i64 4
+#define dh_typecode_s64 5
+#define dh_typecode_ptr 6
+#define dh_typecode(t) glue(dh_typecode_, dh_alias(t))
+
+#define dh_callflag_i32 0
+#define dh_callflag_s32 0
+#define dh_callflag_int 0
+#define dh_callflag_i64 0
+#define dh_callflag_s64 0
+#define dh_callflag_f16 0
+#define dh_callflag_f32 0
+#define dh_callflag_f64 0
+#define dh_callflag_ptr 0
+#define dh_callflag_cptr dh_callflag_ptr
+#define dh_callflag_void 0
+#define dh_callflag_noreturn TCG_CALL_NO_RETURN
+#define dh_callflag(t) glue(dh_callflag_, dh_alias(t))
+
+#define dh_typemask(t, n) (dh_typecode(t) << (n * 3))
+
+#define dh_arg(t, n) \
+ glue(glue(tcgv_, dh_alias(t)), _temp)(glue(arg, n))
+
+#define dh_arg_decl(t, n) glue(TCGv_, dh_alias(t)) glue(arg, n)
+
+#define DEF_HELPER_0(name, ret) \
+ DEF_HELPER_FLAGS_0(name, 0, ret)
+#define DEF_HELPER_1(name, ret, t1) \
+ DEF_HELPER_FLAGS_1(name, 0, ret, t1)
+#define DEF_HELPER_2(name, ret, t1, t2) \
+ DEF_HELPER_FLAGS_2(name, 0, ret, t1, t2)
+#define DEF_HELPER_3(name, ret, t1, t2, t3) \
+ DEF_HELPER_FLAGS_3(name, 0, ret, t1, t2, t3)
+#define DEF_HELPER_4(name, ret, t1, t2, t3, t4) \
+ DEF_HELPER_FLAGS_4(name, 0, ret, t1, t2, t3, t4)
+#define DEF_HELPER_5(name, ret, t1, t2, t3, t4, t5) \
+ DEF_HELPER_FLAGS_5(name, 0, ret, t1, t2, t3, t4, t5)
+#define DEF_HELPER_6(name, ret, t1, t2, t3, t4, t5, t6) \
+ DEF_HELPER_FLAGS_6(name, 0, ret, t1, t2, t3, t4, t5, t6)
+#define DEF_HELPER_7(name, ret, t1, t2, t3, t4, t5, t6, t7) \
+ DEF_HELPER_FLAGS_7(name, 0, ret, t1, t2, t3, t4, t5, t6, t7)
+
+/* MAX_OPC_PARAM_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */
+
+#endif /* EXEC_HELPER_HEAD_H */
diff --git a/include/exec/helper-proto.h b/include/exec/helper-proto.h
new file mode 100644
index 000000000..ba100793a
--- /dev/null
+++ b/include/exec/helper-proto.h
@@ -0,0 +1,56 @@
+/* Helper file for declaring TCG helper functions.
+ This one expands prototypes for the helper functions. */
+
+#ifndef HELPER_PROTO_H
+#define HELPER_PROTO_H
+
+#include "exec/helper-head.h"
+
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
+dh_ctype(ret) HELPER(name) (void);
+
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1));
+
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2));
+
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3));
+
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4));
+
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4), dh_ctype(t5));
+
+#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4), dh_ctype(t5), dh_ctype(t6));
+
+#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4), dh_ctype(t5), dh_ctype(t6), \
+ dh_ctype(t7));
+
+#define IN_HELPER_PROTO
+
+#include "helper.h"
+#include "trace/generated-helpers.h"
+#include "accel/tcg/tcg-runtime.h"
+#include "accel/tcg/plugin-helpers.h"
+
+#undef IN_HELPER_PROTO
+
+#undef DEF_HELPER_FLAGS_0
+#undef DEF_HELPER_FLAGS_1
+#undef DEF_HELPER_FLAGS_2
+#undef DEF_HELPER_FLAGS_3
+#undef DEF_HELPER_FLAGS_4
+#undef DEF_HELPER_FLAGS_5
+#undef DEF_HELPER_FLAGS_6
+#undef DEF_HELPER_FLAGS_7
+
+#endif /* HELPER_PROTO_H */
diff --git a/include/exec/helper-tcg.h b/include/exec/helper-tcg.h
new file mode 100644
index 000000000..16cd318b8
--- /dev/null
+++ b/include/exec/helper-tcg.h
@@ -0,0 +1,76 @@
+/* Helper file for declaring TCG helper functions.
+ This one defines data structures private to tcg.c. */
+
+#ifndef HELPER_TCG_H
+#define HELPER_TCG_H
+
+#include "exec/helper-head.h"
+
+/* Need one more level of indirection before stringification
+ to get all the macros expanded first. */
+#define str(s) #s
+
+#define DEF_HELPER_FLAGS_0(NAME, FLAGS, ret) \
+ { .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(ret), \
+ .typemask = dh_typemask(ret, 0) },
+
+#define DEF_HELPER_FLAGS_1(NAME, FLAGS, ret, t1) \
+ { .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(ret), \
+ .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) },
+
+#define DEF_HELPER_FLAGS_2(NAME, FLAGS, ret, t1, t2) \
+ { .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(ret), \
+ .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \
+ | dh_typemask(t2, 2) },
+
+#define DEF_HELPER_FLAGS_3(NAME, FLAGS, ret, t1, t2, t3) \
+ { .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(ret), \
+ .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \
+ | dh_typemask(t2, 2) | dh_typemask(t3, 3) },
+
+#define DEF_HELPER_FLAGS_4(NAME, FLAGS, ret, t1, t2, t3, t4) \
+ { .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(ret), \
+ .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \
+ | dh_typemask(t2, 2) | dh_typemask(t3, 3) | dh_typemask(t4, 4) },
+
+#define DEF_HELPER_FLAGS_5(NAME, FLAGS, ret, t1, t2, t3, t4, t5) \
+ { .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(ret), \
+ .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \
+ | dh_typemask(t2, 2) | dh_typemask(t3, 3) | dh_typemask(t4, 4) \
+ | dh_typemask(t5, 5) },
+
+#define DEF_HELPER_FLAGS_6(NAME, FLAGS, ret, t1, t2, t3, t4, t5, t6) \
+ { .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(ret), \
+ .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \
+ | dh_typemask(t2, 2) | dh_typemask(t3, 3) | dh_typemask(t4, 4) \
+ | dh_typemask(t5, 5) | dh_typemask(t6, 6) },
+
+#define DEF_HELPER_FLAGS_7(NAME, FLAGS, ret, t1, t2, t3, t4, t5, t6, t7) \
+ { .func = HELPER(NAME), .name = str(NAME), .flags = FLAGS, \
+ .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \
+ | dh_typemask(t2, 2) | dh_typemask(t3, 3) | dh_typemask(t4, 4) \
+ | dh_typemask(t5, 5) | dh_typemask(t6, 6) | dh_typemask(t7, 7) },
+
+#include "helper.h"
+#include "trace/generated-helpers.h"
+#include "accel/tcg/tcg-runtime.h"
+#include "accel/tcg/plugin-helpers.h"
+
+#undef str
+#undef DEF_HELPER_FLAGS_0
+#undef DEF_HELPER_FLAGS_1
+#undef DEF_HELPER_FLAGS_2
+#undef DEF_HELPER_FLAGS_3
+#undef DEF_HELPER_FLAGS_4
+#undef DEF_HELPER_FLAGS_5
+#undef DEF_HELPER_FLAGS_6
+#undef DEF_HELPER_FLAGS_7
+
+#endif /* HELPER_TCG_H */
diff --git a/include/exec/hwaddr.h b/include/exec/hwaddr.h
new file mode 100644
index 000000000..8f16d179a
--- /dev/null
+++ b/include/exec/hwaddr.h
@@ -0,0 +1,26 @@
+/* Define hwaddr if it exists. */
+
+#ifndef HWADDR_H
+#define HWADDR_H
+
+
+#define HWADDR_BITS 64
+/* hwaddr is the type of a physical address (its size can
+ be different from 'target_ulong'). */
+
+typedef uint64_t hwaddr;
+#define HWADDR_MAX UINT64_MAX
+#define TARGET_FMT_plx "%016" PRIx64
+#define HWADDR_PRId PRId64
+#define HWADDR_PRIi PRIi64
+#define HWADDR_PRIo PRIo64
+#define HWADDR_PRIu PRIu64
+#define HWADDR_PRIx PRIx64
+#define HWADDR_PRIX PRIX64
+
+typedef struct MemMapEntry {
+ hwaddr base;
+ hwaddr size;
+} MemMapEntry;
+
+#endif
diff --git a/include/exec/ioport.h b/include/exec/ioport.h
new file mode 100644
index 000000000..e34f66899
--- /dev/null
+++ b/include/exec/ioport.h
@@ -0,0 +1,75 @@
+/*
+ * defines ioport related functions
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**************************************************************************
+ * IO ports API
+ */
+
+#ifndef IOPORT_H
+#define IOPORT_H
+
+#include "exec/memory.h"
+
+#define MAX_IOPORTS (64 * 1024)
+#define IOPORTS_MASK (MAX_IOPORTS - 1)
+
+typedef struct MemoryRegionPortio {
+ uint32_t offset;
+ uint32_t len;
+ unsigned size;
+ uint32_t (*read)(void *opaque, uint32_t address);
+ void (*write)(void *opaque, uint32_t address, uint32_t data);
+ uint32_t base; /* private field */
+} MemoryRegionPortio;
+
+#define PORTIO_END_OF_LIST() { }
+
+#ifndef CONFIG_USER_ONLY
+extern const MemoryRegionOps unassigned_io_ops;
+#endif
+
+void cpu_outb(uint32_t addr, uint8_t val);
+void cpu_outw(uint32_t addr, uint16_t val);
+void cpu_outl(uint32_t addr, uint32_t val);
+uint8_t cpu_inb(uint32_t addr);
+uint16_t cpu_inw(uint32_t addr);
+uint32_t cpu_inl(uint32_t addr);
+
+typedef struct PortioList {
+ const struct MemoryRegionPortio *ports;
+ Object *owner;
+ struct MemoryRegion *address_space;
+ unsigned nr;
+ struct MemoryRegion **regions;
+ void *opaque;
+ const char *name;
+ bool flush_coalesced_mmio;
+} PortioList;
+
+void portio_list_init(PortioList *piolist, Object *owner,
+ const struct MemoryRegionPortio *callbacks,
+ void *opaque, const char *name);
+void portio_list_set_flush_coalesced(PortioList *piolist);
+void portio_list_destroy(PortioList *piolist);
+void portio_list_add(PortioList *piolist,
+ struct MemoryRegion *address_space,
+ uint32_t addr);
+void portio_list_del(PortioList *piolist);
+
+#endif /* IOPORT_H */
diff --git a/include/exec/log.h b/include/exec/log.h
new file mode 100644
index 000000000..3c7fa65ea
--- /dev/null
+++ b/include/exec/log.h
@@ -0,0 +1,84 @@
+#ifndef QEMU_EXEC_LOG_H
+#define QEMU_EXEC_LOG_H
+
+#include "qemu/log.h"
+#include "hw/core/cpu.h"
+#include "disas/disas.h"
+
+/* cpu_dump_state() logging functions: */
+/**
+ * log_cpu_state:
+ * @cpu: The CPU whose state is to be logged.
+ * @flags: Flags what to log.
+ *
+ * Logs the output of cpu_dump_state().
+ */
+static inline void log_cpu_state(CPUState *cpu, int flags)
+{
+ QemuLogFile *logfile;
+
+ if (qemu_log_enabled()) {
+ rcu_read_lock();
+ logfile = qatomic_rcu_read(&qemu_logfile);
+ if (logfile) {
+ cpu_dump_state(cpu, logfile->fd, flags);
+ }
+ rcu_read_unlock();
+ }
+}
+
+/**
+ * log_cpu_state_mask:
+ * @mask: Mask when to log.
+ * @cpu: The CPU whose state is to be logged.
+ * @flags: Flags what to log.
+ *
+ * Logs the output of cpu_dump_state() if loglevel includes @mask.
+ */
+static inline void log_cpu_state_mask(int mask, CPUState *cpu, int flags)
+{
+ if (qemu_loglevel & mask) {
+ log_cpu_state(cpu, flags);
+ }
+}
+
+#ifdef NEED_CPU_H
+/* disas() and target_disas() to qemu_logfile: */
+static inline void log_target_disas(CPUState *cpu, target_ulong start,
+ target_ulong len)
+{
+ QemuLogFile *logfile;
+ rcu_read_lock();
+ logfile = qatomic_rcu_read(&qemu_logfile);
+ if (logfile) {
+ target_disas(logfile->fd, cpu, start, len);
+ }
+ rcu_read_unlock();
+}
+
+static inline void log_disas(const void *code, unsigned long size)
+{
+ QemuLogFile *logfile;
+ rcu_read_lock();
+ logfile = qatomic_rcu_read(&qemu_logfile);
+ if (logfile) {
+ disas(logfile->fd, code, size);
+ }
+ rcu_read_unlock();
+}
+
+#if defined(CONFIG_USER_ONLY)
+/* page_dump() output to the log file: */
+static inline void log_page_dump(const char *operation)
+{
+ FILE *logfile = qemu_log_lock();
+ if (logfile) {
+ qemu_log("page layout changed following %s\n", operation);
+ page_dump(logfile);
+ }
+ qemu_log_unlock(logfile);
+}
+#endif
+#endif
+
+#endif
diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h
new file mode 100644
index 000000000..95f2d20d5
--- /dev/null
+++ b/include/exec/memattrs.h
@@ -0,0 +1,71 @@
+/*
+ * Memory transaction attributes
+ *
+ * Copyright (c) 2015 Linaro Limited.
+ *
+ * Authors:
+ * Peter Maydell <peter.maydell@linaro.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef MEMATTRS_H
+#define MEMATTRS_H
+
+/* Every memory transaction has associated with it a set of
+ * attributes. Some of these are generic (such as the ID of
+ * the bus master); some are specific to a particular kind of
+ * bus (such as the ARM Secure/NonSecure bit). We define them
+ * all as non-overlapping bitfields in a single struct to avoid
+ * confusion if different parts of QEMU used the same bit for
+ * different semantics.
+ */
+typedef struct MemTxAttrs {
+ /* Bus masters which don't specify any attributes will get this
+ * (via the MEMTXATTRS_UNSPECIFIED constant), so that we can
+ * distinguish "all attributes deliberately clear" from
+ * "didn't specify" if necessary.
+ */
+ unsigned int unspecified:1;
+ /* ARM/AMBA: TrustZone Secure access
+ * x86: System Management Mode access
+ */
+ unsigned int secure:1;
+ /* Memory access is usermode (unprivileged) */
+ unsigned int user:1;
+ /* Requester ID (for MSI for example) */
+ unsigned int requester_id:16;
+ /* Invert endianness for this page */
+ unsigned int byte_swap:1;
+ /*
+ * The following are target-specific page-table bits. These are not
+ * related to actual memory transactions at all. However, this structure
+ * is part of the tlb_fill interface, cached in the cputlb structure,
+ * and has unused bits. These fields will be read by target-specific
+ * helpers using env->iotlb[mmu_idx][tlb_index()].attrs.target_tlb_bitN.
+ */
+ unsigned int target_tlb_bit0 : 1;
+ unsigned int target_tlb_bit1 : 1;
+ unsigned int target_tlb_bit2 : 1;
+} MemTxAttrs;
+
+/* Bus masters which don't specify any attributes will get this,
+ * which has all attribute bits clear except the topmost one
+ * (so that we can distinguish "all attributes deliberately clear"
+ * from "didn't specify" if necessary).
+ */
+#define MEMTXATTRS_UNSPECIFIED ((MemTxAttrs) { .unspecified = 1 })
+
+/* New-style MMIO accessors can indicate that the transaction failed.
+ * A zero (MEMTX_OK) response means success; anything else is a failure
+ * of some kind. The memory subsystem will bitwise-OR together results
+ * if it is synthesizing an operation from multiple smaller accesses.
+ */
+#define MEMTX_OK 0
+#define MEMTX_ERROR (1U << 0) /* device returned an error */
+#define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */
+typedef uint32_t MemTxResult;
+
+#endif
diff --git a/include/exec/memop.h b/include/exec/memop.h
new file mode 100644
index 000000000..04264ffd6
--- /dev/null
+++ b/include/exec/memop.h
@@ -0,0 +1,138 @@
+/*
+ * Constants for memory operations
+ *
+ * Authors:
+ * Richard Henderson <rth@twiddle.net>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef MEMOP_H
+#define MEMOP_H
+
+#include "qemu/host-utils.h"
+
+typedef enum MemOp {
+ MO_8 = 0,
+ MO_16 = 1,
+ MO_32 = 2,
+ MO_64 = 3,
+ MO_128 = 4,
+ MO_256 = 5,
+ MO_512 = 6,
+ MO_1024 = 7,
+ MO_SIZE = 0x07, /* Mask for the above. */
+
+ MO_SIGN = 0x08, /* Sign-extended, otherwise zero-extended. */
+
+ MO_BSWAP = 0x10, /* Host reverse endian. */
+#ifdef HOST_WORDS_BIGENDIAN
+ MO_LE = MO_BSWAP,
+ MO_BE = 0,
+#else
+ MO_LE = 0,
+ MO_BE = MO_BSWAP,
+#endif
+#ifdef NEED_CPU_H
+#ifdef TARGET_WORDS_BIGENDIAN
+ MO_TE = MO_BE,
+#else
+ MO_TE = MO_LE,
+#endif
+#endif
+
+ /*
+ * MO_UNALN accesses are never checked for alignment.
+ * MO_ALIGN accesses will result in a call to the CPU's
+ * do_unaligned_access hook if the guest address is not aligned.
+ * The default depends on whether the target CPU defines
+ * TARGET_ALIGNED_ONLY.
+ *
+ * Some architectures (e.g. ARMv8) need the address which is aligned
+ * to a size more than the size of the memory access.
+ * Some architectures (e.g. SPARCv9) need an address which is aligned,
+ * but less strictly than the natural alignment.
+ *
+ * MO_ALIGN supposes the alignment size is the size of a memory access.
+ *
+ * There are three options:
+ * - unaligned access permitted (MO_UNALN).
+ * - an alignment to the size of an access (MO_ALIGN);
+ * - an alignment to a specified size, which may be more or less than
+ * the access size (MO_ALIGN_x where 'x' is a size in bytes);
+ */
+ MO_ASHIFT = 5,
+ MO_AMASK = 0x7 << MO_ASHIFT,
+#ifdef NEED_CPU_H
+#ifdef TARGET_ALIGNED_ONLY
+ MO_ALIGN = 0,
+ MO_UNALN = MO_AMASK,
+#else
+ MO_ALIGN = MO_AMASK,
+ MO_UNALN = 0,
+#endif
+#endif
+ MO_ALIGN_2 = 1 << MO_ASHIFT,
+ MO_ALIGN_4 = 2 << MO_ASHIFT,
+ MO_ALIGN_8 = 3 << MO_ASHIFT,
+ MO_ALIGN_16 = 4 << MO_ASHIFT,
+ MO_ALIGN_32 = 5 << MO_ASHIFT,
+ MO_ALIGN_64 = 6 << MO_ASHIFT,
+
+ /* Combinations of the above, for ease of use. */
+ MO_UB = MO_8,
+ MO_UW = MO_16,
+ MO_UL = MO_32,
+ MO_SB = MO_SIGN | MO_8,
+ MO_SW = MO_SIGN | MO_16,
+ MO_SL = MO_SIGN | MO_32,
+ MO_Q = MO_64,
+
+ MO_LEUW = MO_LE | MO_UW,
+ MO_LEUL = MO_LE | MO_UL,
+ MO_LESW = MO_LE | MO_SW,
+ MO_LESL = MO_LE | MO_SL,
+ MO_LEQ = MO_LE | MO_Q,
+
+ MO_BEUW = MO_BE | MO_UW,
+ MO_BEUL = MO_BE | MO_UL,
+ MO_BESW = MO_BE | MO_SW,
+ MO_BESL = MO_BE | MO_SL,
+ MO_BEQ = MO_BE | MO_Q,
+
+#ifdef NEED_CPU_H
+ MO_TEUW = MO_TE | MO_UW,
+ MO_TEUL = MO_TE | MO_UL,
+ MO_TESW = MO_TE | MO_SW,
+ MO_TESL = MO_TE | MO_SL,
+ MO_TEQ = MO_TE | MO_Q,
+#endif
+
+ MO_SSIZE = MO_SIZE | MO_SIGN,
+} MemOp;
+
+/* MemOp to size in bytes. */
+static inline unsigned memop_size(MemOp op)
+{
+ return 1 << (op & MO_SIZE);
+}
+
+/* Size in bytes to MemOp. */
+static inline MemOp size_memop(unsigned size)
+{
+#ifdef CONFIG_DEBUG_TCG
+ /* Power of 2 up to 8. */
+ assert((size & (size - 1)) == 0 && size >= 1 && size <= 8);
+#endif
+ return ctz32(size);
+}
+
+/* Big endianness from MemOp. */
+static inline bool memop_big_endian(MemOp op)
+{
+ return (op & MO_BSWAP) == MO_BE;
+}
+
+#endif
diff --git a/include/exec/memopidx.h b/include/exec/memopidx.h
new file mode 100644
index 000000000..83bce9787
--- /dev/null
+++ b/include/exec/memopidx.h
@@ -0,0 +1,55 @@
+/*
+ * Combine the MemOp and mmu_idx parameters into a single value.
+ *
+ * Authors:
+ * Richard Henderson <rth@twiddle.net>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef EXEC_MEMOPIDX_H
+#define EXEC_MEMOPIDX_H 1
+
+#include "exec/memop.h"
+
+typedef uint32_t MemOpIdx;
+
+/**
+ * make_memop_idx
+ * @op: memory operation
+ * @idx: mmu index
+ *
+ * Encode these values into a single parameter.
+ */
+static inline MemOpIdx make_memop_idx(MemOp op, unsigned idx)
+{
+#ifdef CONFIG_DEBUG_TCG
+ assert(idx <= 15);
+#endif
+ return (op << 4) | idx;
+}
+
+/**
+ * get_memop
+ * @oi: combined op/idx parameter
+ *
+ * Extract the memory operation from the combined value.
+ */
+static inline MemOp get_memop(MemOpIdx oi)
+{
+ return oi >> 4;
+}
+
+/**
+ * get_mmuidx
+ * @oi: combined op/idx parameter
+ *
+ * Extract the mmu index from the combined value.
+ */
+static inline unsigned get_mmuidx(MemOpIdx oi)
+{
+ return oi & 15;
+}
+
+#endif
diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h
new file mode 100644
index 000000000..9fcc2af25
--- /dev/null
+++ b/include/exec/memory-internal.h
@@ -0,0 +1,53 @@
+/*
+ * Declarations for functions which are internal to the memory subsystem.
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ *
+ */
+
+/*
+ * This header is for use by exec.c, memory.c and accel/tcg/cputlb.c ONLY,
+ * for declarations which are shared between the memory subsystem's
+ * internals and the TCG TLB code. Do not include it from elsewhere.
+ */
+
+#ifndef MEMORY_INTERNAL_H
+#define MEMORY_INTERNAL_H
+
+#include "cpu.h"
+
+#ifndef CONFIG_USER_ONLY
+static inline AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
+{
+ return fv->dispatch;
+}
+
+static inline AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as)
+{
+ return flatview_to_dispatch(address_space_to_flatview(as));
+}
+
+FlatView *address_space_get_flatview(AddressSpace *as);
+void flatview_unref(FlatView *view);
+
+extern const MemoryRegionOps unassigned_mem_ops;
+
+bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
+ unsigned size, bool is_write,
+ MemTxAttrs attrs);
+
+void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section);
+AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv);
+void address_space_dispatch_compact(AddressSpaceDispatch *d);
+void address_space_dispatch_free(AddressSpaceDispatch *d);
+
+void mtree_print_dispatch(struct AddressSpaceDispatch *d,
+ MemoryRegion *root);
+#endif
+#endif
diff --git a/include/exec/memory.h b/include/exec/memory.h
new file mode 100644
index 000000000..20f1b2737
--- /dev/null
+++ b/include/exec/memory.h
@@ -0,0 +1,2986 @@
+/*
+ * Physical memory management API
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef MEMORY_H
+#define MEMORY_H
+
+#ifndef CONFIG_USER_ONLY
+
+#include "exec/cpu-common.h"
+#include "exec/hwaddr.h"
+#include "exec/memattrs.h"
+#include "exec/memop.h"
+#include "exec/ramlist.h"
+#include "qemu/bswap.h"
+#include "qemu/queue.h"
+#include "qemu/int128.h"
+#include "qemu/notify.h"
+#include "qom/object.h"
+#include "qemu/rcu.h"
+
+#define RAM_ADDR_INVALID (~(ram_addr_t)0)
+
+#define MAX_PHYS_ADDR_SPACE_BITS 62
+#define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
+
+#define TYPE_MEMORY_REGION "memory-region"
+DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION,
+ TYPE_MEMORY_REGION)
+
+#define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region"
+typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
+DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
+ IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
+
+#define TYPE_RAM_DISCARD_MANAGER "qemu:ram-discard-manager"
+typedef struct RamDiscardManagerClass RamDiscardManagerClass;
+typedef struct RamDiscardManager RamDiscardManager;
+DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass,
+ RAM_DISCARD_MANAGER, TYPE_RAM_DISCARD_MANAGER);
+
+#ifdef CONFIG_FUZZ
+void fuzz_dma_read_cb(size_t addr,
+ size_t len,
+ MemoryRegion *mr);
+#else
+static inline void fuzz_dma_read_cb(size_t addr,
+ size_t len,
+ MemoryRegion *mr)
+{
+ /* Do Nothing */
+}
+#endif
+
+/* Possible bits for global_dirty_log_{start|stop} */
+
+/* Dirty tracking enabled because migration is running */
+#define GLOBAL_DIRTY_MIGRATION (1U << 0)
+
+/* Dirty tracking enabled because measuring dirty rate */
+#define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
+
+#define GLOBAL_DIRTY_MASK (0x3)
+
+extern unsigned int global_dirty_tracking;
+
+typedef struct MemoryRegionOps MemoryRegionOps;
+
+struct ReservedRegion {
+ hwaddr low;
+ hwaddr high;
+ unsigned type;
+};
+
+/**
+ * struct MemoryRegionSection: describes a fragment of a #MemoryRegion
+ *
+ * @mr: the region, or %NULL if empty
+ * @fv: the flat view of the address space the region is mapped in
+ * @offset_within_region: the beginning of the section, relative to @mr's start
+ * @size: the size of the section; will not exceed @mr's boundaries
+ * @offset_within_address_space: the address of the first byte of the section
+ * relative to the region's address space
+ * @readonly: writes to this section are ignored
+ * @nonvolatile: this section is non-volatile
+ */
+struct MemoryRegionSection {
+ Int128 size;
+ MemoryRegion *mr;
+ FlatView *fv;
+ hwaddr offset_within_region;
+ hwaddr offset_within_address_space;
+ bool readonly;
+ bool nonvolatile;
+};
+
+typedef struct IOMMUTLBEntry IOMMUTLBEntry;
+
+/* See address_space_translate: bit 0 is read, bit 1 is write. */
+typedef enum {
+ IOMMU_NONE = 0,
+ IOMMU_RO = 1,
+ IOMMU_WO = 2,
+ IOMMU_RW = 3,
+} IOMMUAccessFlags;
+
+#define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
+
+struct IOMMUTLBEntry {
+ AddressSpace *target_as;
+ hwaddr iova;
+ hwaddr translated_addr;
+ hwaddr addr_mask; /* 0xfff = 4k translation */
+ IOMMUAccessFlags perm;
+};
+
+/*
+ * Bitmap for different IOMMUNotifier capabilities. Each notifier can
+ * register with one or multiple IOMMU Notifier capability bit(s).
+ */
+typedef enum {
+ IOMMU_NOTIFIER_NONE = 0,
+ /* Notify cache invalidations */
+ IOMMU_NOTIFIER_UNMAP = 0x1,
+ /* Notify entry changes (newly created entries) */
+ IOMMU_NOTIFIER_MAP = 0x2,
+ /* Notify changes on device IOTLB entries */
+ IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04,
+} IOMMUNotifierFlag;
+
+#define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
+#define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP
+#define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \
+ IOMMU_NOTIFIER_DEVIOTLB_EVENTS)
+
+struct IOMMUNotifier;
+typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
+ IOMMUTLBEntry *data);
+
+struct IOMMUNotifier {
+ IOMMUNotify notify;
+ IOMMUNotifierFlag notifier_flags;
+ /* Notify for address space range start <= addr <= end */
+ hwaddr start;
+ hwaddr end;
+ int iommu_idx;
+ QLIST_ENTRY(IOMMUNotifier) node;
+};
+typedef struct IOMMUNotifier IOMMUNotifier;
+
+typedef struct IOMMUTLBEvent {
+ IOMMUNotifierFlag type;
+ IOMMUTLBEntry entry;
+} IOMMUTLBEvent;
+
+/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
+#define RAM_PREALLOC (1 << 0)
+
+/* RAM is mmap-ed with MAP_SHARED */
+#define RAM_SHARED (1 << 1)
+
+/* Only a portion of RAM (used_length) is actually used, and migrated.
+ * Resizing RAM while migrating can result in the migration being canceled.
+ */
+#define RAM_RESIZEABLE (1 << 2)
+
+/* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
+ * zero the page and wake waiting processes.
+ * (Set during postcopy)
+ */
+#define RAM_UF_ZEROPAGE (1 << 3)
+
+/* RAM can be migrated */
+#define RAM_MIGRATABLE (1 << 4)
+
+/* RAM is a persistent kind memory */
+#define RAM_PMEM (1 << 5)
+
+
+/*
+ * UFFDIO_WRITEPROTECT is used on this RAMBlock to
+ * support 'write-tracking' migration type.
+ * Implies ram_state->ram_wt_enabled.
+ */
+#define RAM_UF_WRITEPROTECT (1 << 6)
+
+/*
+ * RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge
+ * pages if applicable) is skipped: will bail out if not supported. When not
+ * set, the OS will do the reservation, if supported for the memory type.
+ */
+#define RAM_NORESERVE (1 << 7)
+
+/* RAM that isn't accessible through normal means. */
+#define RAM_PROTECTED (1 << 8)
+
+static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
+ IOMMUNotifierFlag flags,
+ hwaddr start, hwaddr end,
+ int iommu_idx)
+{
+ n->notify = fn;
+ n->notifier_flags = flags;
+ n->start = start;
+ n->end = end;
+ n->iommu_idx = iommu_idx;
+}
+
+/*
+ * Memory region callbacks
+ */
+struct MemoryRegionOps {
+ /* Read from the memory region. @addr is relative to @mr; @size is
+ * in bytes. */
+ uint64_t (*read)(void *opaque,
+ hwaddr addr,
+ unsigned size);
+ /* Write to the memory region. @addr is relative to @mr; @size is
+ * in bytes. */
+ void (*write)(void *opaque,
+ hwaddr addr,
+ uint64_t data,
+ unsigned size);
+
+ MemTxResult (*read_with_attrs)(void *opaque,
+ hwaddr addr,
+ uint64_t *data,
+ unsigned size,
+ MemTxAttrs attrs);
+ MemTxResult (*write_with_attrs)(void *opaque,
+ hwaddr addr,
+ uint64_t data,
+ unsigned size,
+ MemTxAttrs attrs);
+
+ enum device_endian endianness;
+ /* Guest-visible constraints: */
+ struct {
+ /* If nonzero, specify bounds on access sizes beyond which a machine
+ * check is thrown.
+ */
+ unsigned min_access_size;
+ unsigned max_access_size;
+ /* If true, unaligned accesses are supported. Otherwise unaligned
+ * accesses throw machine checks.
+ */
+ bool unaligned;
+ /*
+ * If present, and returns #false, the transaction is not accepted
+ * by the device (and results in machine dependent behaviour such
+ * as a machine check exception).
+ */
+ bool (*accepts)(void *opaque, hwaddr addr,
+ unsigned size, bool is_write,
+ MemTxAttrs attrs);
+ } valid;
+ /* Internal implementation constraints: */
+ struct {
+ /* If nonzero, specifies the minimum size implemented. Smaller sizes
+ * will be rounded upwards and a partial result will be returned.
+ */
+ unsigned min_access_size;
+ /* If nonzero, specifies the maximum size implemented. Larger sizes
+ * will be done as a series of accesses with smaller sizes.
+ */
+ unsigned max_access_size;
+ /* If true, unaligned accesses are supported. Otherwise all accesses
+ * are converted to (possibly multiple) naturally aligned accesses.
+ */
+ bool unaligned;
+ } impl;
+};
+
+typedef struct MemoryRegionClass {
+ /* private */
+ ObjectClass parent_class;
+} MemoryRegionClass;
+
+
+enum IOMMUMemoryRegionAttr {
+ IOMMU_ATTR_SPAPR_TCE_FD
+};
+
+/*
+ * IOMMUMemoryRegionClass:
+ *
+ * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
+ * and provide an implementation of at least the @translate method here
+ * to handle requests to the memory region. Other methods are optional.
+ *
+ * The IOMMU implementation must use the IOMMU notifier infrastructure
+ * to report whenever mappings are changed, by calling
+ * memory_region_notify_iommu() (or, if necessary, by calling
+ * memory_region_notify_iommu_one() for each registered notifier).
+ *
+ * Conceptually an IOMMU provides a mapping from input address
+ * to an output TLB entry. If the IOMMU is aware of memory transaction
+ * attributes and the output TLB entry depends on the transaction
+ * attributes, we represent this using IOMMU indexes. Each index
+ * selects a particular translation table that the IOMMU has:
+ *
+ * @attrs_to_index returns the IOMMU index for a set of transaction attributes
+ *
+ * @translate takes an input address and an IOMMU index
+ *
+ * and the mapping returned can only depend on the input address and the
+ * IOMMU index.
+ *
+ * Most IOMMUs don't care about the transaction attributes and support
+ * only a single IOMMU index. A more complex IOMMU might have one index
+ * for secure transactions and one for non-secure transactions.
+ */
+struct IOMMUMemoryRegionClass {
+ /* private: */
+ MemoryRegionClass parent_class;
+
+ /* public: */
+ /**
+ * @translate:
+ *
+ * Return a TLB entry that contains a given address.
+ *
+ * The IOMMUAccessFlags indicated via @flag are optional and may
+ * be specified as IOMMU_NONE to indicate that the caller needs
+ * the full translation information for both reads and writes. If
+ * the access flags are specified then the IOMMU implementation
+ * may use this as an optimization, to stop doing a page table
+ * walk as soon as it knows that the requested permissions are not
+ * allowed. If IOMMU_NONE is passed then the IOMMU must do the
+ * full page table walk and report the permissions in the returned
+ * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
+ * return different mappings for reads and writes.)
+ *
+ * The returned information remains valid while the caller is
+ * holding the big QEMU lock or is inside an RCU critical section;
+ * if the caller wishes to cache the mapping beyond that it must
+ * register an IOMMU notifier so it can invalidate its cached
+ * information when the IOMMU mapping changes.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ *
+ * @hwaddr: address to be translated within the memory region
+ *
+ * @flag: requested access permission
+ *
+ * @iommu_idx: IOMMU index for the translation
+ */
+ IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
+ IOMMUAccessFlags flag, int iommu_idx);
+ /**
+ * @get_min_page_size:
+ *
+ * Returns minimum supported page size in bytes.
+ *
+ * If this method is not provided then the minimum is assumed to
+ * be TARGET_PAGE_SIZE.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ */
+ uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
+ /**
+ * @notify_flag_changed:
+ *
+ * Called when IOMMU Notifier flag changes (ie when the set of
+ * events which IOMMU users are requesting notification for changes).
+ * Optional method -- need not be provided if the IOMMU does not
+ * need to know exactly which events must be notified.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ *
+ * @old_flags: events which previously needed to be notified
+ *
+ * @new_flags: events which now need to be notified
+ *
+ * Returns 0 on success, or a negative errno; in particular
+ * returns -EINVAL if the new flag bitmap is not supported by the
+ * IOMMU memory region. In case of failure, the error object
+ * must be created
+ */
+ int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
+ IOMMUNotifierFlag old_flags,
+ IOMMUNotifierFlag new_flags,
+ Error **errp);
+ /**
+ * @replay:
+ *
+ * Called to handle memory_region_iommu_replay().
+ *
+ * The default implementation of memory_region_iommu_replay() is to
+ * call the IOMMU translate method for every page in the address space
+ * with flag == IOMMU_NONE and then call the notifier if translate
+ * returns a valid mapping. If this method is implemented then it
+ * overrides the default behaviour, and must provide the full semantics
+ * of memory_region_iommu_replay(), by calling @notifier for every
+ * translation present in the IOMMU.
+ *
+ * Optional method -- an IOMMU only needs to provide this method
+ * if the default is inefficient or produces undesirable side effects.
+ *
+ * Note: this is not related to record-and-replay functionality.
+ */
+ void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
+
+ /**
+ * @get_attr:
+ *
+ * Get IOMMU misc attributes. This is an optional method that
+ * can be used to allow users of the IOMMU to get implementation-specific
+ * information. The IOMMU implements this method to handle calls
+ * by IOMMU users to memory_region_iommu_get_attr() by filling in
+ * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
+ * the IOMMU supports. If the method is unimplemented then
+ * memory_region_iommu_get_attr() will always return -EINVAL.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ *
+ * @attr: attribute being queried
+ *
+ * @data: memory to fill in with the attribute data
+ *
+ * Returns 0 on success, or a negative errno; in particular
+ * returns -EINVAL for unrecognized or unimplemented attribute types.
+ */
+ int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
+ void *data);
+
+ /**
+ * @attrs_to_index:
+ *
+ * Return the IOMMU index to use for a given set of transaction attributes.
+ *
+ * Optional method: if an IOMMU only supports a single IOMMU index then
+ * the default implementation of memory_region_iommu_attrs_to_index()
+ * will return 0.
+ *
+ * The indexes supported by an IOMMU must be contiguous, starting at 0.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ * @attrs: memory transaction attributes
+ */
+ int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
+
+ /**
+ * @num_indexes:
+ *
+ * Return the number of IOMMU indexes this IOMMU supports.
+ *
+ * Optional method: if this method is not provided, then
+ * memory_region_iommu_num_indexes() will return 1, indicating that
+ * only a single IOMMU index is supported.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ */
+ int (*num_indexes)(IOMMUMemoryRegion *iommu);
+
+ /**
+ * @iommu_set_page_size_mask:
+ *
+ * Restrict the page size mask that can be supported with a given IOMMU
+ * memory region. Used for example to propagate host physical IOMMU page
+ * size mask limitations to the virtual IOMMU.
+ *
+ * Optional method: if this method is not provided, then the default global
+ * page mask is used.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ *
+ * @page_size_mask: a bitmask of supported page sizes. At least one bit,
+ * representing the smallest page size, must be set. Additional set bits
+ * represent supported block sizes. For example a host physical IOMMU that
+ * uses page tables with a page size of 4kB, and supports 2MB and 4GB
+ * blocks, will set mask 0x40201000. A granule of 4kB with indiscriminate
+ * block sizes is specified with mask 0xfffffffffffff000.
+ *
+ * Returns 0 on success, or a negative error. In case of failure, the error
+ * object must be created.
+ */
+ int (*iommu_set_page_size_mask)(IOMMUMemoryRegion *iommu,
+ uint64_t page_size_mask,
+ Error **errp);
+};
+
+typedef struct RamDiscardListener RamDiscardListener;
+typedef int (*NotifyRamPopulate)(RamDiscardListener *rdl,
+ MemoryRegionSection *section);
+typedef void (*NotifyRamDiscard)(RamDiscardListener *rdl,
+ MemoryRegionSection *section);
+
+struct RamDiscardListener {
+ /*
+ * @notify_populate:
+ *
+ * Notification that previously discarded memory is about to get populated.
+ * Listeners are able to object. If any listener objects, already
+ * successfully notified listeners are notified about a discard again.
+ *
+ * @rdl: the #RamDiscardListener getting notified
+ * @section: the #MemoryRegionSection to get populated. The section
+ * is aligned within the memory region to the minimum granularity
+ * unless it would exceed the registered section.
+ *
+ * Returns 0 on success. If the notification is rejected by the listener,
+ * an error is returned.
+ */
+ NotifyRamPopulate notify_populate;
+
+ /*
+ * @notify_discard:
+ *
+ * Notification that previously populated memory was discarded successfully
+ * and listeners should drop all references to such memory and prevent
+ * new population (e.g., unmap).
+ *
+ * @rdl: the #RamDiscardListener getting notified
+ * @section: the #MemoryRegionSection to get populated. The section
+ * is aligned within the memory region to the minimum granularity
+ * unless it would exceed the registered section.
+ */
+ NotifyRamDiscard notify_discard;
+
+ /*
+ * @double_discard_supported:
+ *
+ * The listener suppors getting @notify_discard notifications that span
+ * already discarded parts.
+ */
+ bool double_discard_supported;
+
+ MemoryRegionSection *section;
+ QLIST_ENTRY(RamDiscardListener) next;
+};
+
+static inline void ram_discard_listener_init(RamDiscardListener *rdl,
+ NotifyRamPopulate populate_fn,
+ NotifyRamDiscard discard_fn,
+ bool double_discard_supported)
+{
+ rdl->notify_populate = populate_fn;
+ rdl->notify_discard = discard_fn;
+ rdl->double_discard_supported = double_discard_supported;
+}
+
+typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque);
+typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque);
+
+/*
+ * RamDiscardManagerClass:
+ *
+ * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion
+ * regions are currently populated to be used/accessed by the VM, notifying
+ * after parts were discarded (freeing up memory) and before parts will be
+ * populated (consuming memory), to be used/acessed by the VM.
+ *
+ * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the
+ * #MemoryRegion isn't mapped yet; it cannot change while the #MemoryRegion is
+ * mapped.
+ *
+ * The #RamDiscardManager is intended to be used by technologies that are
+ * incompatible with discarding of RAM (e.g., VFIO, which may pin all
+ * memory inside a #MemoryRegion), and require proper coordination to only
+ * map the currently populated parts, to hinder parts that are expected to
+ * remain discarded from silently getting populated and consuming memory.
+ * Technologies that support discarding of RAM don't have to bother and can
+ * simply map the whole #MemoryRegion.
+ *
+ * An example #RamDiscardManager is virtio-mem, which logically (un)plugs
+ * memory within an assigned RAM #MemoryRegion, coordinated with the VM.
+ * Logically unplugging memory consists of discarding RAM. The VM agreed to not
+ * access unplugged (discarded) memory - especially via DMA. virtio-mem will
+ * properly coordinate with listeners before memory is plugged (populated),
+ * and after memory is unplugged (discarded).
+ *
+ * Listeners are called in multiples of the minimum granularity (unless it
+ * would exceed the registered range) and changes are aligned to the minimum
+ * granularity within the #MemoryRegion. Listeners have to prepare for memory
+ * becomming discarded in a different granularity than it was populated and the
+ * other way around.
+ */
+struct RamDiscardManagerClass {
+ /* private */
+ InterfaceClass parent_class;
+
+ /* public */
+
+ /**
+ * @get_min_granularity:
+ *
+ * Get the minimum granularity in which listeners will get notified
+ * about changes within the #MemoryRegion via the #RamDiscardManager.
+ *
+ * @rdm: the #RamDiscardManager
+ * @mr: the #MemoryRegion
+ *
+ * Returns the minimum granularity.
+ */
+ uint64_t (*get_min_granularity)(const RamDiscardManager *rdm,
+ const MemoryRegion *mr);
+
+ /**
+ * @is_populated:
+ *
+ * Check whether the given #MemoryRegionSection is completely populated
+ * (i.e., no parts are currently discarded) via the #RamDiscardManager.
+ * There are no alignment requirements.
+ *
+ * @rdm: the #RamDiscardManager
+ * @section: the #MemoryRegionSection
+ *
+ * Returns whether the given range is completely populated.
+ */
+ bool (*is_populated)(const RamDiscardManager *rdm,
+ const MemoryRegionSection *section);
+
+ /**
+ * @replay_populated:
+ *
+ * Call the #ReplayRamPopulate callback for all populated parts within the
+ * #MemoryRegionSection via the #RamDiscardManager.
+ *
+ * In case any call fails, no further calls are made.
+ *
+ * @rdm: the #RamDiscardManager
+ * @section: the #MemoryRegionSection
+ * @replay_fn: the #ReplayRamPopulate callback
+ * @opaque: pointer to forward to the callback
+ *
+ * Returns 0 on success, or a negative error if any notification failed.
+ */
+ int (*replay_populated)(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamPopulate replay_fn, void *opaque);
+
+ /**
+ * @replay_discarded:
+ *
+ * Call the #ReplayRamDiscard callback for all discarded parts within the
+ * #MemoryRegionSection via the #RamDiscardManager.
+ *
+ * @rdm: the #RamDiscardManager
+ * @section: the #MemoryRegionSection
+ * @replay_fn: the #ReplayRamDiscard callback
+ * @opaque: pointer to forward to the callback
+ */
+ void (*replay_discarded)(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamDiscard replay_fn, void *opaque);
+
+ /**
+ * @register_listener:
+ *
+ * Register a #RamDiscardListener for the given #MemoryRegionSection and
+ * immediately notify the #RamDiscardListener about all populated parts
+ * within the #MemoryRegionSection via the #RamDiscardManager.
+ *
+ * In case any notification fails, no further notifications are triggered
+ * and an error is logged.
+ *
+ * @rdm: the #RamDiscardManager
+ * @rdl: the #RamDiscardListener
+ * @section: the #MemoryRegionSection
+ */
+ void (*register_listener)(RamDiscardManager *rdm,
+ RamDiscardListener *rdl,
+ MemoryRegionSection *section);
+
+ /**
+ * @unregister_listener:
+ *
+ * Unregister a previously registered #RamDiscardListener via the
+ * #RamDiscardManager after notifying the #RamDiscardListener about all
+ * populated parts becoming unpopulated within the registered
+ * #MemoryRegionSection.
+ *
+ * @rdm: the #RamDiscardManager
+ * @rdl: the #RamDiscardListener
+ */
+ void (*unregister_listener)(RamDiscardManager *rdm,
+ RamDiscardListener *rdl);
+};
+
+uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
+ const MemoryRegion *mr);
+
+bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
+ const MemoryRegionSection *section);
+
+int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamPopulate replay_fn,
+ void *opaque);
+
+void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamDiscard replay_fn,
+ void *opaque);
+
+void ram_discard_manager_register_listener(RamDiscardManager *rdm,
+ RamDiscardListener *rdl,
+ MemoryRegionSection *section);
+
+void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
+ RamDiscardListener *rdl);
+
+typedef struct CoalescedMemoryRange CoalescedMemoryRange;
+typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
+
+/** MemoryRegion:
+ *
+ * A struct representing a memory region.
+ */
+struct MemoryRegion {
+ Object parent_obj;
+
+ /* private: */
+
+ /* The following fields should fit in a cache line */
+ bool romd_mode;
+ bool ram;
+ bool subpage;
+ bool readonly; /* For RAM regions */
+ bool nonvolatile;
+ bool rom_device;
+ bool flush_coalesced_mmio;
+ uint8_t dirty_log_mask;
+ bool is_iommu;
+ RAMBlock *ram_block;
+ Object *owner;
+
+ const MemoryRegionOps *ops;
+ void *opaque;
+ MemoryRegion *container;
+ Int128 size;
+ hwaddr addr;
+ void (*destructor)(MemoryRegion *mr);
+ uint64_t align;
+ bool terminates;
+ bool ram_device;
+ bool enabled;
+ bool warning_printed; /* For reservations */
+ uint8_t vga_logging_count;
+ MemoryRegion *alias;
+ hwaddr alias_offset;
+ int32_t priority;
+ QTAILQ_HEAD(, MemoryRegion) subregions;
+ QTAILQ_ENTRY(MemoryRegion) subregions_link;
+ QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
+ const char *name;
+ unsigned ioeventfd_nb;
+ MemoryRegionIoeventfd *ioeventfds;
+ RamDiscardManager *rdm; /* Only for RAM */
+};
+
+struct IOMMUMemoryRegion {
+ MemoryRegion parent_obj;
+
+ QLIST_HEAD(, IOMMUNotifier) iommu_notify;
+ IOMMUNotifierFlag iommu_notify_flags;
+};
+
+#define IOMMU_NOTIFIER_FOREACH(n, mr) \
+ QLIST_FOREACH((n), &(mr)->iommu_notify, node)
+
+/**
+ * struct MemoryListener: callbacks structure for updates to the physical memory map
+ *
+ * Allows a component to adjust to changes in the guest-visible memory map.
+ * Use with memory_listener_register() and memory_listener_unregister().
+ */
+struct MemoryListener {
+ /**
+ * @begin:
+ *
+ * Called at the beginning of an address space update transaction.
+ * Followed by calls to #MemoryListener.region_add(),
+ * #MemoryListener.region_del(), #MemoryListener.region_nop(),
+ * #MemoryListener.log_start() and #MemoryListener.log_stop() in
+ * increasing address order.
+ *
+ * @listener: The #MemoryListener.
+ */
+ void (*begin)(MemoryListener *listener);
+
+ /**
+ * @commit:
+ *
+ * Called at the end of an address space update transaction,
+ * after the last call to #MemoryListener.region_add(),
+ * #MemoryListener.region_del() or #MemoryListener.region_nop(),
+ * #MemoryListener.log_start() and #MemoryListener.log_stop().
+ *
+ * @listener: The #MemoryListener.
+ */
+ void (*commit)(MemoryListener *listener);
+
+ /**
+ * @region_add:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that is new in this address space
+ * space since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The new #MemoryRegionSection.
+ */
+ void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
+
+ /**
+ * @region_del:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that has disappeared in the address
+ * space since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The old #MemoryRegionSection.
+ */
+ void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
+
+ /**
+ * @region_nop:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that is in the same place in the address
+ * space as in the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The #MemoryRegionSection.
+ */
+ void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
+
+ /**
+ * @log_start:
+ *
+ * Called during an address space update transaction, after
+ * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
+ * #MemoryListener.region_nop(), if dirty memory logging clients have
+ * become active since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The #MemoryRegionSection.
+ * @old: A bitmap of dirty memory logging clients that were active in
+ * the previous transaction.
+ * @new: A bitmap of dirty memory logging clients that are active in
+ * the current transaction.
+ */
+ void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
+ int old, int new);
+
+ /**
+ * @log_stop:
+ *
+ * Called during an address space update transaction, after
+ * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
+ * #MemoryListener.region_nop() and possibly after
+ * #MemoryListener.log_start(), if dirty memory logging clients have
+ * become inactive since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The #MemoryRegionSection.
+ * @old: A bitmap of dirty memory logging clients that were active in
+ * the previous transaction.
+ * @new: A bitmap of dirty memory logging clients that are active in
+ * the current transaction.
+ */
+ void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
+ int old, int new);
+
+ /**
+ * @log_sync:
+ *
+ * Called by memory_region_snapshot_and_clear_dirty() and
+ * memory_global_dirty_log_sync(), before accessing QEMU's "official"
+ * copy of the dirty memory bitmap for a #MemoryRegionSection.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The #MemoryRegionSection.
+ */
+ void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
+
+ /**
+ * @log_sync_global:
+ *
+ * This is the global version of @log_sync when the listener does
+ * not have a way to synchronize the log with finer granularity.
+ * When the listener registers with @log_sync_global defined, then
+ * its @log_sync must be NULL. Vice versa.
+ *
+ * @listener: The #MemoryListener.
+ */
+ void (*log_sync_global)(MemoryListener *listener);
+
+ /**
+ * @log_clear:
+ *
+ * Called before reading the dirty memory bitmap for a
+ * #MemoryRegionSection.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The #MemoryRegionSection.
+ */
+ void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
+
+ /**
+ * @log_global_start:
+ *
+ * Called by memory_global_dirty_log_start(), which
+ * enables the %DIRTY_LOG_MIGRATION client on all memory regions in
+ * the address space. #MemoryListener.log_global_start() is also
+ * called when a #MemoryListener is added, if global dirty logging is
+ * active at that time.
+ *
+ * @listener: The #MemoryListener.
+ */
+ void (*log_global_start)(MemoryListener *listener);
+
+ /**
+ * @log_global_stop:
+ *
+ * Called by memory_global_dirty_log_stop(), which
+ * disables the %DIRTY_LOG_MIGRATION client on all memory regions in
+ * the address space.
+ *
+ * @listener: The #MemoryListener.
+ */
+ void (*log_global_stop)(MemoryListener *listener);
+
+ /**
+ * @log_global_after_sync:
+ *
+ * Called after reading the dirty memory bitmap
+ * for any #MemoryRegionSection.
+ *
+ * @listener: The #MemoryListener.
+ */
+ void (*log_global_after_sync)(MemoryListener *listener);
+
+ /**
+ * @eventfd_add:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that has had a new ioeventfd
+ * registration since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The new #MemoryRegionSection.
+ * @match_data: The @match_data parameter for the new ioeventfd.
+ * @data: The @data parameter for the new ioeventfd.
+ * @e: The #EventNotifier parameter for the new ioeventfd.
+ */
+ void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
+ bool match_data, uint64_t data, EventNotifier *e);
+
+ /**
+ * @eventfd_del:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that has dropped an ioeventfd
+ * registration since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The new #MemoryRegionSection.
+ * @match_data: The @match_data parameter for the dropped ioeventfd.
+ * @data: The @data parameter for the dropped ioeventfd.
+ * @e: The #EventNotifier parameter for the dropped ioeventfd.
+ */
+ void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
+ bool match_data, uint64_t data, EventNotifier *e);
+
+ /**
+ * @coalesced_io_add:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that has had a new coalesced
+ * MMIO range registration since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The new #MemoryRegionSection.
+ * @addr: The starting address for the coalesced MMIO range.
+ * @len: The length of the coalesced MMIO range.
+ */
+ void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
+ hwaddr addr, hwaddr len);
+
+ /**
+ * @coalesced_io_del:
+ *
+ * Called during an address space update transaction,
+ * for a section of the address space that has dropped a coalesced
+ * MMIO range since the last transaction.
+ *
+ * @listener: The #MemoryListener.
+ * @section: The new #MemoryRegionSection.
+ * @addr: The starting address for the coalesced MMIO range.
+ * @len: The length of the coalesced MMIO range.
+ */
+ void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
+ hwaddr addr, hwaddr len);
+ /**
+ * @priority:
+ *
+ * Govern the order in which memory listeners are invoked. Lower priorities
+ * are invoked earlier for "add" or "start" callbacks, and later for "delete"
+ * or "stop" callbacks.
+ */
+ unsigned priority;
+
+ /**
+ * @name:
+ *
+ * Name of the listener. It can be used in contexts where we'd like to
+ * identify one memory listener with the rest.
+ */
+ const char *name;
+
+ /* private: */
+ AddressSpace *address_space;
+ QTAILQ_ENTRY(MemoryListener) link;
+ QTAILQ_ENTRY(MemoryListener) link_as;
+};
+
+/**
+ * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
+ */
+struct AddressSpace {
+ /* private: */
+ struct rcu_head rcu;
+ char *name;
+ MemoryRegion *root;
+
+ /* Accessed via RCU. */
+ struct FlatView *current_map;
+
+ int ioeventfd_nb;
+ struct MemoryRegionIoeventfd *ioeventfds;
+ QTAILQ_HEAD(, MemoryListener) listeners;
+ QTAILQ_ENTRY(AddressSpace) address_spaces_link;
+};
+
+typedef struct AddressSpaceDispatch AddressSpaceDispatch;
+typedef struct FlatRange FlatRange;
+
+/* Flattened global view of current active memory hierarchy. Kept in sorted
+ * order.
+ */
+struct FlatView {
+ struct rcu_head rcu;
+ unsigned ref;
+ FlatRange *ranges;
+ unsigned nr;
+ unsigned nr_allocated;
+ struct AddressSpaceDispatch *dispatch;
+ MemoryRegion *root;
+};
+
+static inline FlatView *address_space_to_flatview(AddressSpace *as)
+{
+ return qatomic_rcu_read(&as->current_map);
+}
+
+/**
+ * typedef flatview_cb: callback for flatview_for_each_range()
+ *
+ * @start: start address of the range within the FlatView
+ * @len: length of the range in bytes
+ * @mr: MemoryRegion covering this range
+ * @offset_in_region: offset of the first byte of the range within @mr
+ * @opaque: data pointer passed to flatview_for_each_range()
+ *
+ * Returns: true to stop the iteration, false to keep going.
+ */
+typedef bool (*flatview_cb)(Int128 start,
+ Int128 len,
+ const MemoryRegion *mr,
+ hwaddr offset_in_region,
+ void *opaque);
+
+/**
+ * flatview_for_each_range: Iterate through a FlatView
+ * @fv: the FlatView to iterate through
+ * @cb: function to call for each range
+ * @opaque: opaque data pointer to pass to @cb
+ *
+ * A FlatView is made up of a list of non-overlapping ranges, each of
+ * which is a slice of a MemoryRegion. This function iterates through
+ * each range in @fv, calling @cb. The callback function can terminate
+ * iteration early by returning 'true'.
+ */
+void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque);
+
+static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
+ MemoryRegionSection *b)
+{
+ return a->mr == b->mr &&
+ a->fv == b->fv &&
+ a->offset_within_region == b->offset_within_region &&
+ a->offset_within_address_space == b->offset_within_address_space &&
+ int128_eq(a->size, b->size) &&
+ a->readonly == b->readonly &&
+ a->nonvolatile == b->nonvolatile;
+}
+
+/**
+ * memory_region_section_new_copy: Copy a memory region section
+ *
+ * Allocate memory for a new copy, copy the memory region section, and
+ * properly take a reference on all relevant members.
+ *
+ * @s: the #MemoryRegionSection to copy
+ */
+MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s);
+
+/**
+ * memory_region_section_new_copy: Free a copied memory region section
+ *
+ * Free a copy of a memory section created via memory_region_section_new_copy().
+ * properly dropping references on all relevant members.
+ *
+ * @s: the #MemoryRegionSection to copy
+ */
+void memory_region_section_free_copy(MemoryRegionSection *s);
+
+/**
+ * memory_region_init: Initialize a memory region
+ *
+ * The region typically acts as a container for other memory regions. Use
+ * memory_region_add_subregion() to add subregions.
+ *
+ * @mr: the #MemoryRegion to be initialized
+ * @owner: the object that tracks the region's reference count
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region; any subregions beyond this size will be clipped
+ */
+void memory_region_init(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size);
+
+/**
+ * memory_region_ref: Add 1 to a memory region's reference count
+ *
+ * Whenever memory regions are accessed outside the BQL, they need to be
+ * preserved against hot-unplug. MemoryRegions actually do not have their
+ * own reference count; they piggyback on a QOM object, their "owner".
+ * This function adds a reference to the owner.
+ *
+ * All MemoryRegions must have an owner if they can disappear, even if the
+ * device they belong to operates exclusively under the BQL. This is because
+ * the region could be returned at any time by memory_region_find, and this
+ * is usually under guest control.
+ *
+ * @mr: the #MemoryRegion
+ */
+void memory_region_ref(MemoryRegion *mr);
+
+/**
+ * memory_region_unref: Remove 1 to a memory region's reference count
+ *
+ * Whenever memory regions are accessed outside the BQL, they need to be
+ * preserved against hot-unplug. MemoryRegions actually do not have their
+ * own reference count; they piggyback on a QOM object, their "owner".
+ * This function removes a reference to the owner and possibly destroys it.
+ *
+ * @mr: the #MemoryRegion
+ */
+void memory_region_unref(MemoryRegion *mr);
+
+/**
+ * memory_region_init_io: Initialize an I/O memory region.
+ *
+ * Accesses into the region will cause the callbacks in @ops to be called.
+ * if @size is nonzero, subregions will be clipped to @size.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @ops: a structure containing read and write callbacks to be used when
+ * I/O is performed on the region.
+ * @opaque: passed to the read and write callbacks of the @ops structure.
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region.
+ */
+void memory_region_init_io(MemoryRegion *mr,
+ Object *owner,
+ const MemoryRegionOps *ops,
+ void *opaque,
+ const char *name,
+ uint64_t size);
+
+/**
+ * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses
+ * into the region will modify memory
+ * directly.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: size of the region.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Note that this function does not do anything to cause the data in the
+ * RAM memory region to be migrated; that is the responsibility of the caller.
+ */
+void memory_region_init_ram_nomigrate(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ Error **errp);
+
+/**
+ * memory_region_init_ram_flags_nomigrate: Initialize RAM memory region.
+ * Accesses into the region will
+ * modify memory directly.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: size of the region.
+ * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Note that this function does not do anything to cause the data in the
+ * RAM memory region to be migrated; that is the responsibility of the caller.
+ */
+void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ uint32_t ram_flags,
+ Error **errp);
+
+/**
+ * memory_region_init_resizeable_ram: Initialize memory region with resizeable
+ * RAM. Accesses into the region will
+ * modify memory directly. Only an initial
+ * portion of this RAM is actually used.
+ * Changing the size while migrating
+ * can result in the migration being
+ * canceled.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: used size of the region.
+ * @max_size: max size of the region.
+ * @resized: callback to notify owner about used size change.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Note that this function does not do anything to cause the data in the
+ * RAM memory region to be migrated; that is the responsibility of the caller.
+ */
+void memory_region_init_resizeable_ram(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ uint64_t max_size,
+ void (*resized)(const char*,
+ uint64_t length,
+ void *host),
+ Error **errp);
+#ifdef CONFIG_POSIX
+
+/**
+ * memory_region_init_ram_from_file: Initialize RAM memory region with a
+ * mmap-ed backend.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: size of the region.
+ * @align: alignment of the region base address; if 0, the default alignment
+ * (getpagesize()) will be used.
+ * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
+ * RAM_NORESERVE,
+ * @path: the path in which to allocate the RAM.
+ * @readonly: true to open @path for reading, false for read/write.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Note that this function does not do anything to cause the data in the
+ * RAM memory region to be migrated; that is the responsibility of the caller.
+ */
+void memory_region_init_ram_from_file(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ uint64_t align,
+ uint32_t ram_flags,
+ const char *path,
+ bool readonly,
+ Error **errp);
+
+/**
+ * memory_region_init_ram_from_fd: Initialize RAM memory region with a
+ * mmap-ed backend.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: the name of the region.
+ * @size: size of the region.
+ * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
+ * RAM_NORESERVE, RAM_PROTECTED.
+ * @fd: the fd to mmap.
+ * @offset: offset within the file referenced by fd
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Note that this function does not do anything to cause the data in the
+ * RAM memory region to be migrated; that is the responsibility of the caller.
+ */
+void memory_region_init_ram_from_fd(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ uint32_t ram_flags,
+ int fd,
+ ram_addr_t offset,
+ Error **errp);
+#endif
+
+/**
+ * memory_region_init_ram_ptr: Initialize RAM memory region from a
+ * user-provided pointer. Accesses into the
+ * region will modify memory directly.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: size of the region.
+ * @ptr: memory to be mapped; must contain at least @size bytes.
+ *
+ * Note that this function does not do anything to cause the data in the
+ * RAM memory region to be migrated; that is the responsibility of the caller.
+ */
+void memory_region_init_ram_ptr(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ void *ptr);
+
+/**
+ * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
+ * a user-provided pointer.
+ *
+ * A RAM device represents a mapping to a physical device, such as to a PCI
+ * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
+ * into the VM address space and access to the region will modify memory
+ * directly. However, the memory region should not be included in a memory
+ * dump (device may not be enabled/mapped at the time of the dump), and
+ * operations incompatible with manipulating MMIO should be avoided. Replaces
+ * skip_dump flag.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: the name of the region.
+ * @size: size of the region.
+ * @ptr: memory to be mapped; must contain at least @size bytes.
+ *
+ * Note that this function does not do anything to cause the data in the
+ * RAM memory region to be migrated; that is the responsibility of the caller.
+ * (For RAM device memory regions, migrating the contents rarely makes sense.)
+ */
+void memory_region_init_ram_device_ptr(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ void *ptr);
+
+/**
+ * memory_region_init_alias: Initialize a memory region that aliases all or a
+ * part of another memory region.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: used for debugging; not visible to the user or ABI
+ * @orig: the region to be referenced; @mr will be equivalent to
+ * @orig between @offset and @offset + @size - 1.
+ * @offset: start of the section in @orig to be referenced.
+ * @size: size of the region.
+ */
+void memory_region_init_alias(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ MemoryRegion *orig,
+ hwaddr offset,
+ uint64_t size);
+
+/**
+ * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
+ *
+ * This has the same effect as calling memory_region_init_ram_nomigrate()
+ * and then marking the resulting region read-only with
+ * memory_region_set_readonly().
+ *
+ * Note that this function does not do anything to cause the data in the
+ * RAM side of the memory region to be migrated; that is the responsibility
+ * of the caller.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: size of the region.
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+void memory_region_init_rom_nomigrate(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ Error **errp);
+
+/**
+ * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region.
+ * Writes are handled via callbacks.
+ *
+ * Note that this function does not do anything to cause the data in the
+ * RAM side of the memory region to be migrated; that is the responsibility
+ * of the caller.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @ops: callbacks for write access handling (must not be NULL).
+ * @opaque: passed to the read and write callbacks of the @ops structure.
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: size of the region.
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
+ Object *owner,
+ const MemoryRegionOps *ops,
+ void *opaque,
+ const char *name,
+ uint64_t size,
+ Error **errp);
+
+/**
+ * memory_region_init_iommu: Initialize a memory region of a custom type
+ * that translates addresses
+ *
+ * An IOMMU region translates addresses and forwards accesses to a target
+ * memory region.
+ *
+ * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
+ * @_iommu_mr should be a pointer to enough memory for an instance of
+ * that subclass, @instance_size is the size of that subclass, and
+ * @mrtypename is its name. This function will initialize @_iommu_mr as an
+ * instance of the subclass, and its methods will then be called to handle
+ * accesses to the memory region. See the documentation of
+ * #IOMMUMemoryRegionClass for further details.
+ *
+ * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
+ * @instance_size: the IOMMUMemoryRegion subclass instance size
+ * @mrtypename: the type name of the #IOMMUMemoryRegion
+ * @owner: the object that tracks the region's reference count
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region.
+ */
+void memory_region_init_iommu(void *_iommu_mr,
+ size_t instance_size,
+ const char *mrtypename,
+ Object *owner,
+ const char *name,
+ uint64_t size);
+
+/**
+ * memory_region_init_ram - Initialize RAM memory region. Accesses into the
+ * region will modify memory directly.
+ *
+ * @mr: the #MemoryRegion to be initialized
+ * @owner: the object that tracks the region's reference count (must be
+ * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
+ * @name: name of the memory region
+ * @size: size of the region in bytes
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * This function allocates RAM for a board model or device, and
+ * arranges for it to be migrated (by calling vmstate_register_ram()
+ * if @owner is a DeviceState, or vmstate_register_ram_global() if
+ * @owner is NULL).
+ *
+ * TODO: Currently we restrict @owner to being either NULL (for
+ * global RAM regions with no owner) or devices, so that we can
+ * give the RAM block a unique name for migration purposes.
+ * We should lift this restriction and allow arbitrary Objects.
+ * If you pass a non-NULL non-device @owner then we will assert.
+ */
+void memory_region_init_ram(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ Error **errp);
+
+/**
+ * memory_region_init_rom: Initialize a ROM memory region.
+ *
+ * This has the same effect as calling memory_region_init_ram()
+ * and then marking the resulting region read-only with
+ * memory_region_set_readonly(). This includes arranging for the
+ * contents to be migrated.
+ *
+ * TODO: Currently we restrict @owner to being either NULL (for
+ * global RAM regions with no owner) or devices, so that we can
+ * give the RAM block a unique name for migration purposes.
+ * We should lift this restriction and allow arbitrary Objects.
+ * If you pass a non-NULL non-device @owner then we will assert.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: size of the region.
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+void memory_region_init_rom(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ Error **errp);
+
+/**
+ * memory_region_init_rom_device: Initialize a ROM memory region.
+ * Writes are handled via callbacks.
+ *
+ * This function initializes a memory region backed by RAM for reads
+ * and callbacks for writes, and arranges for the RAM backing to
+ * be migrated (by calling vmstate_register_ram()
+ * if @owner is a DeviceState, or vmstate_register_ram_global() if
+ * @owner is NULL).
+ *
+ * TODO: Currently we restrict @owner to being either NULL (for
+ * global RAM regions with no owner) or devices, so that we can
+ * give the RAM block a unique name for migration purposes.
+ * We should lift this restriction and allow arbitrary Objects.
+ * If you pass a non-NULL non-device @owner then we will assert.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @ops: callbacks for write access handling (must not be NULL).
+ * @opaque: passed to the read and write callbacks of the @ops structure.
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: size of the region.
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+void memory_region_init_rom_device(MemoryRegion *mr,
+ Object *owner,
+ const MemoryRegionOps *ops,
+ void *opaque,
+ const char *name,
+ uint64_t size,
+ Error **errp);
+
+
+/**
+ * memory_region_owner: get a memory region's owner.
+ *
+ * @mr: the memory region being queried.
+ */
+Object *memory_region_owner(MemoryRegion *mr);
+
+/**
+ * memory_region_size: get a memory region's size.
+ *
+ * @mr: the memory region being queried.
+ */
+uint64_t memory_region_size(MemoryRegion *mr);
+
+/**
+ * memory_region_is_ram: check whether a memory region is random access
+ *
+ * Returns %true if a memory region is random access.
+ *
+ * @mr: the memory region being queried
+ */
+static inline bool memory_region_is_ram(MemoryRegion *mr)
+{
+ return mr->ram;
+}
+
+/**
+ * memory_region_is_ram_device: check whether a memory region is a ram device
+ *
+ * Returns %true if a memory region is a device backed ram region
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_ram_device(MemoryRegion *mr);
+
+/**
+ * memory_region_is_romd: check whether a memory region is in ROMD mode
+ *
+ * Returns %true if a memory region is a ROM device and currently set to allow
+ * direct reads.
+ *
+ * @mr: the memory region being queried
+ */
+static inline bool memory_region_is_romd(MemoryRegion *mr)
+{
+ return mr->rom_device && mr->romd_mode;
+}
+
+/**
+ * memory_region_is_protected: check whether a memory region is protected
+ *
+ * Returns %true if a memory region is protected RAM and cannot be accessed
+ * via standard mechanisms, e.g. DMA.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_protected(MemoryRegion *mr);
+
+/**
+ * memory_region_get_iommu: check whether a memory region is an iommu
+ *
+ * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
+ * otherwise NULL.
+ *
+ * @mr: the memory region being queried
+ */
+static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
+{
+ if (mr->alias) {
+ return memory_region_get_iommu(mr->alias);
+ }
+ if (mr->is_iommu) {
+ return (IOMMUMemoryRegion *) mr;
+ }
+ return NULL;
+}
+
+/**
+ * memory_region_get_iommu_class_nocheck: returns iommu memory region class
+ * if an iommu or NULL if not
+ *
+ * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
+ * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
+ *
+ * @iommu_mr: the memory region being queried
+ */
+static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
+ IOMMUMemoryRegion *iommu_mr)
+{
+ return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
+}
+
+#define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
+
+/**
+ * memory_region_iommu_get_min_page_size: get minimum supported page size
+ * for an iommu
+ *
+ * Returns minimum supported page size for an iommu.
+ *
+ * @iommu_mr: the memory region being queried
+ */
+uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
+
+/**
+ * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
+ *
+ * Note: for any IOMMU implementation, an in-place mapping change
+ * should be notified with an UNMAP followed by a MAP.
+ *
+ * @iommu_mr: the memory region that was changed
+ * @iommu_idx: the IOMMU index for the translation table which has changed
+ * @event: TLB event with the new entry in the IOMMU translation table.
+ * The entry replaces all old entries for the same virtual I/O address
+ * range.
+ */
+void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
+ int iommu_idx,
+ IOMMUTLBEvent event);
+
+/**
+ * memory_region_notify_iommu_one: notify a change in an IOMMU translation
+ * entry to a single notifier
+ *
+ * This works just like memory_region_notify_iommu(), but it only
+ * notifies a specific notifier, not all of them.
+ *
+ * @notifier: the notifier to be notified
+ * @event: TLB event with the new entry in the IOMMU translation table.
+ * The entry replaces all old entries for the same virtual I/O address
+ * range.
+ */
+void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
+ IOMMUTLBEvent *event);
+
+/**
+ * memory_region_register_iommu_notifier: register a notifier for changes to
+ * IOMMU translation entries.
+ *
+ * Returns 0 on success, or a negative errno otherwise. In particular,
+ * -EINVAL indicates that at least one of the attributes of the notifier
+ * is not supported (flag/range) by the IOMMU memory region. In case of error
+ * the error object must be created.
+ *
+ * @mr: the memory region to observe
+ * @n: the IOMMUNotifier to be added; the notify callback receives a
+ * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
+ * ceases to be valid on exit from the notifier.
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+int memory_region_register_iommu_notifier(MemoryRegion *mr,
+ IOMMUNotifier *n, Error **errp);
+
+/**
+ * memory_region_iommu_replay: replay existing IOMMU translations to
+ * a notifier with the minimum page granularity returned by
+ * mr->iommu_ops->get_page_size().
+ *
+ * Note: this is not related to record-and-replay functionality.
+ *
+ * @iommu_mr: the memory region to observe
+ * @n: the notifier to which to replay iommu mappings
+ */
+void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
+
+/**
+ * memory_region_unregister_iommu_notifier: unregister a notifier for
+ * changes to IOMMU translation entries.
+ *
+ * @mr: the memory region which was observed and for which notity_stopped()
+ * needs to be called
+ * @n: the notifier to be removed.
+ */
+void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
+ IOMMUNotifier *n);
+
+/**
+ * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
+ * defined on the IOMMU.
+ *
+ * Returns 0 on success, or a negative errno otherwise. In particular,
+ * -EINVAL indicates that the IOMMU does not support the requested
+ * attribute.
+ *
+ * @iommu_mr: the memory region
+ * @attr: the requested attribute
+ * @data: a pointer to the requested attribute data
+ */
+int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
+ enum IOMMUMemoryRegionAttr attr,
+ void *data);
+
+/**
+ * memory_region_iommu_attrs_to_index: return the IOMMU index to
+ * use for translations with the given memory transaction attributes.
+ *
+ * @iommu_mr: the memory region
+ * @attrs: the memory transaction attributes
+ */
+int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
+ MemTxAttrs attrs);
+
+/**
+ * memory_region_iommu_num_indexes: return the total number of IOMMU
+ * indexes that this IOMMU supports.
+ *
+ * @iommu_mr: the memory region
+ */
+int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
+
+/**
+ * memory_region_iommu_set_page_size_mask: set the supported page
+ * sizes for a given IOMMU memory region
+ *
+ * @iommu_mr: IOMMU memory region
+ * @page_size_mask: supported page size mask
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
+ uint64_t page_size_mask,
+ Error **errp);
+
+/**
+ * memory_region_name: get a memory region's name
+ *
+ * Returns the string that was used to initialize the memory region.
+ *
+ * @mr: the memory region being queried
+ */
+const char *memory_region_name(const MemoryRegion *mr);
+
+/**
+ * memory_region_is_logging: return whether a memory region is logging writes
+ *
+ * Returns %true if the memory region is logging writes for the given client
+ *
+ * @mr: the memory region being queried
+ * @client: the client being queried
+ */
+bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
+
+/**
+ * memory_region_get_dirty_log_mask: return the clients for which a
+ * memory region is logging writes.
+ *
+ * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
+ * are the bit indices.
+ *
+ * @mr: the memory region being queried
+ */
+uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
+
+/**
+ * memory_region_is_rom: check whether a memory region is ROM
+ *
+ * Returns %true if a memory region is read-only memory.
+ *
+ * @mr: the memory region being queried
+ */
+static inline bool memory_region_is_rom(MemoryRegion *mr)
+{
+ return mr->ram && mr->readonly;
+}
+
+/**
+ * memory_region_is_nonvolatile: check whether a memory region is non-volatile
+ *
+ * Returns %true is a memory region is non-volatile memory.
+ *
+ * @mr: the memory region being queried
+ */
+static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
+{
+ return mr->nonvolatile;
+}
+
+/**
+ * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
+ *
+ * Returns a file descriptor backing a file-based RAM memory region,
+ * or -1 if the region is not a file-based RAM memory region.
+ *
+ * @mr: the RAM or alias memory region being queried.
+ */
+int memory_region_get_fd(MemoryRegion *mr);
+
+/**
+ * memory_region_from_host: Convert a pointer into a RAM memory region
+ * and an offset within it.
+ *
+ * Given a host pointer inside a RAM memory region (created with
+ * memory_region_init_ram() or memory_region_init_ram_ptr()), return
+ * the MemoryRegion and the offset within it.
+ *
+ * Use with care; by the time this function returns, the returned pointer is
+ * not protected by RCU anymore. If the caller is not within an RCU critical
+ * section and does not hold the iothread lock, it must have other means of
+ * protecting the pointer, such as a reference to the region that includes
+ * the incoming ram_addr_t.
+ *
+ * @ptr: the host pointer to be converted
+ * @offset: the offset within memory region
+ */
+MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
+
+/**
+ * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
+ *
+ * Returns a host pointer to a RAM memory region (created with
+ * memory_region_init_ram() or memory_region_init_ram_ptr()).
+ *
+ * Use with care; by the time this function returns, the returned pointer is
+ * not protected by RCU anymore. If the caller is not within an RCU critical
+ * section and does not hold the iothread lock, it must have other means of
+ * protecting the pointer, such as a reference to the region that includes
+ * the incoming ram_addr_t.
+ *
+ * @mr: the memory region being queried.
+ */
+void *memory_region_get_ram_ptr(MemoryRegion *mr);
+
+/* memory_region_ram_resize: Resize a RAM region.
+ *
+ * Resizing RAM while migrating can result in the migration being canceled.
+ * Care has to be taken if the guest might have already detected the memory.
+ *
+ * @mr: a memory region created with @memory_region_init_resizeable_ram.
+ * @newsize: the new size the region
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
+ Error **errp);
+
+/**
+ * memory_region_msync: Synchronize selected address range of
+ * a memory mapped region
+ *
+ * @mr: the memory region to be msync
+ * @addr: the initial address of the range to be sync
+ * @size: the size of the range to be sync
+ */
+void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size);
+
+/**
+ * memory_region_writeback: Trigger cache writeback for
+ * selected address range
+ *
+ * @mr: the memory region to be updated
+ * @addr: the initial address of the range to be written back
+ * @size: the size of the range to be written back
+ */
+void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
+
+/**
+ * memory_region_set_log: Turn dirty logging on or off for a region.
+ *
+ * Turns dirty logging on or off for a specified client (display, migration).
+ * Only meaningful for RAM regions.
+ *
+ * @mr: the memory region being updated.
+ * @log: whether dirty logging is to be enabled or disabled.
+ * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
+ */
+void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
+
+/**
+ * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
+ *
+ * Marks a range of bytes as dirty, after it has been dirtied outside
+ * guest code.
+ *
+ * @mr: the memory region being dirtied.
+ * @addr: the address (relative to the start of the region) being dirtied.
+ * @size: size of the range being dirtied.
+ */
+void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
+ hwaddr size);
+
+/**
+ * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
+ *
+ * This function is called when the caller wants to clear the remote
+ * dirty bitmap of a memory range within the memory region. This can
+ * be used by e.g. KVM to manually clear dirty log when
+ * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
+ * kernel.
+ *
+ * @mr: the memory region to clear the dirty log upon
+ * @start: start address offset within the memory region
+ * @len: length of the memory region to clear dirty bitmap
+ */
+void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
+ hwaddr len);
+
+/**
+ * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
+ * bitmap and clear it.
+ *
+ * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
+ * returns the snapshot. The snapshot can then be used to query dirty
+ * status, using memory_region_snapshot_get_dirty. Snapshotting allows
+ * querying the same page multiple times, which is especially useful for
+ * display updates where the scanlines often are not page aligned.
+ *
+ * The dirty bitmap region which gets copyed into the snapshot (and
+ * cleared afterwards) can be larger than requested. The boundaries
+ * are rounded up/down so complete bitmap longs (covering 64 pages on
+ * 64bit hosts) can be copied over into the bitmap snapshot. Which
+ * isn't a problem for display updates as the extra pages are outside
+ * the visible area, and in case the visible area changes a full
+ * display redraw is due anyway. Should other use cases for this
+ * function emerge we might have to revisit this implementation
+ * detail.
+ *
+ * Use g_free to release DirtyBitmapSnapshot.
+ *
+ * @mr: the memory region being queried.
+ * @addr: the address (relative to the start of the region) being queried.
+ * @size: the size of the range being queried.
+ * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
+ */
+DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
+ hwaddr addr,
+ hwaddr size,
+ unsigned client);
+
+/**
+ * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
+ * in the specified dirty bitmap snapshot.
+ *
+ * @mr: the memory region being queried.
+ * @snap: the dirty bitmap snapshot
+ * @addr: the address (relative to the start of the region) being queried.
+ * @size: the size of the range being queried.
+ */
+bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
+ DirtyBitmapSnapshot *snap,
+ hwaddr addr, hwaddr size);
+
+/**
+ * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
+ * client.
+ *
+ * Marks a range of pages as no longer dirty.
+ *
+ * @mr: the region being updated.
+ * @addr: the start of the subrange being cleaned.
+ * @size: the size of the subrange being cleaned.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ * %DIRTY_MEMORY_VGA.
+ */
+void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
+ hwaddr size, unsigned client);
+
+/**
+ * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
+ * TBs (for self-modifying code).
+ *
+ * The MemoryRegionOps->write() callback of a ROM device must use this function
+ * to mark byte ranges that have been modified internally, such as by directly
+ * accessing the memory returned by memory_region_get_ram_ptr().
+ *
+ * This function marks the range dirty and invalidates TBs so that TCG can
+ * detect self-modifying code.
+ *
+ * @mr: the region being flushed.
+ * @addr: the start, relative to the start of the region, of the range being
+ * flushed.
+ * @size: the size, in bytes, of the range being flushed.
+ */
+void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
+
+/**
+ * memory_region_set_readonly: Turn a memory region read-only (or read-write)
+ *
+ * Allows a memory region to be marked as read-only (turning it into a ROM).
+ * only useful on RAM regions.
+ *
+ * @mr: the region being updated.
+ * @readonly: whether rhe region is to be ROM or RAM.
+ */
+void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
+
+/**
+ * memory_region_set_nonvolatile: Turn a memory region non-volatile
+ *
+ * Allows a memory region to be marked as non-volatile.
+ * only useful on RAM regions.
+ *
+ * @mr: the region being updated.
+ * @nonvolatile: whether rhe region is to be non-volatile.
+ */
+void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
+
+/**
+ * memory_region_rom_device_set_romd: enable/disable ROMD mode
+ *
+ * Allows a ROM device (initialized with memory_region_init_rom_device() to
+ * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
+ * device is mapped to guest memory and satisfies read access directly.
+ * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
+ * Writes are always handled by the #MemoryRegion.write function.
+ *
+ * @mr: the memory region to be updated
+ * @romd_mode: %true to put the region into ROMD mode
+ */
+void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
+
+/**
+ * memory_region_set_coalescing: Enable memory coalescing for the region.
+ *
+ * Enabled writes to a region to be queued for later processing. MMIO ->write
+ * callbacks may be delayed until a non-coalesced MMIO is issued.
+ * Only useful for IO regions. Roughly similar to write-combining hardware.
+ *
+ * @mr: the memory region to be write coalesced
+ */
+void memory_region_set_coalescing(MemoryRegion *mr);
+
+/**
+ * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
+ * a region.
+ *
+ * Like memory_region_set_coalescing(), but works on a sub-range of a region.
+ * Multiple calls can be issued coalesced disjoint ranges.
+ *
+ * @mr: the memory region to be updated.
+ * @offset: the start of the range within the region to be coalesced.
+ * @size: the size of the subrange to be coalesced.
+ */
+void memory_region_add_coalescing(MemoryRegion *mr,
+ hwaddr offset,
+ uint64_t size);
+
+/**
+ * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
+ *
+ * Disables any coalescing caused by memory_region_set_coalescing() or
+ * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
+ * hardware.
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_clear_coalescing(MemoryRegion *mr);
+
+/**
+ * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
+ * accesses.
+ *
+ * Ensure that pending coalesced MMIO request are flushed before the memory
+ * region is accessed. This property is automatically enabled for all regions
+ * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_set_flush_coalesced(MemoryRegion *mr);
+
+/**
+ * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
+ * accesses.
+ *
+ * Clear the automatic coalesced MMIO flushing enabled via
+ * memory_region_set_flush_coalesced. Note that this service has no effect on
+ * memory regions that have MMIO coalescing enabled for themselves. For them,
+ * automatic flushing will stop once coalescing is disabled.
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_clear_flush_coalesced(MemoryRegion *mr);
+
+/**
+ * memory_region_add_eventfd: Request an eventfd to be triggered when a word
+ * is written to a location.
+ *
+ * Marks a word in an IO region (initialized with memory_region_init_io())
+ * as a trigger for an eventfd event. The I/O callback will not be called.
+ * The caller must be prepared to handle failure (that is, take the required
+ * action if the callback _is_ called).
+ *
+ * @mr: the memory region being updated.
+ * @addr: the address within @mr that is to be monitored
+ * @size: the size of the access to trigger the eventfd
+ * @match_data: whether to match against @data, instead of just @addr
+ * @data: the data to match against the guest write
+ * @e: event notifier to be triggered when @addr, @size, and @data all match.
+ **/
+void memory_region_add_eventfd(MemoryRegion *mr,
+ hwaddr addr,
+ unsigned size,
+ bool match_data,
+ uint64_t data,
+ EventNotifier *e);
+
+/**
+ * memory_region_del_eventfd: Cancel an eventfd.
+ *
+ * Cancels an eventfd trigger requested by a previous
+ * memory_region_add_eventfd() call.
+ *
+ * @mr: the memory region being updated.
+ * @addr: the address within @mr that is to be monitored
+ * @size: the size of the access to trigger the eventfd
+ * @match_data: whether to match against @data, instead of just @addr
+ * @data: the data to match against the guest write
+ * @e: event notifier to be triggered when @addr, @size, and @data all match.
+ */
+void memory_region_del_eventfd(MemoryRegion *mr,
+ hwaddr addr,
+ unsigned size,
+ bool match_data,
+ uint64_t data,
+ EventNotifier *e);
+
+/**
+ * memory_region_add_subregion: Add a subregion to a container.
+ *
+ * Adds a subregion at @offset. The subregion may not overlap with other
+ * subregions (except for those explicitly marked as overlapping). A region
+ * may only be added once as a subregion (unless removed with
+ * memory_region_del_subregion()); use memory_region_init_alias() if you
+ * want a region to be a subregion in multiple locations.
+ *
+ * @mr: the region to contain the new subregion; must be a container
+ * initialized with memory_region_init().
+ * @offset: the offset relative to @mr where @subregion is added.
+ * @subregion: the subregion to be added.
+ */
+void memory_region_add_subregion(MemoryRegion *mr,
+ hwaddr offset,
+ MemoryRegion *subregion);
+/**
+ * memory_region_add_subregion_overlap: Add a subregion to a container
+ * with overlap.
+ *
+ * Adds a subregion at @offset. The subregion may overlap with other
+ * subregions. Conflicts are resolved by having a higher @priority hide a
+ * lower @priority. Subregions without priority are taken as @priority 0.
+ * A region may only be added once as a subregion (unless removed with
+ * memory_region_del_subregion()); use memory_region_init_alias() if you
+ * want a region to be a subregion in multiple locations.
+ *
+ * @mr: the region to contain the new subregion; must be a container
+ * initialized with memory_region_init().
+ * @offset: the offset relative to @mr where @subregion is added.
+ * @subregion: the subregion to be added.
+ * @priority: used for resolving overlaps; highest priority wins.
+ */
+void memory_region_add_subregion_overlap(MemoryRegion *mr,
+ hwaddr offset,
+ MemoryRegion *subregion,
+ int priority);
+
+/**
+ * memory_region_get_ram_addr: Get the ram address associated with a memory
+ * region
+ *
+ * @mr: the region to be queried
+ */
+ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
+
+uint64_t memory_region_get_alignment(const MemoryRegion *mr);
+/**
+ * memory_region_del_subregion: Remove a subregion.
+ *
+ * Removes a subregion from its container.
+ *
+ * @mr: the container to be updated.
+ * @subregion: the region being removed; must be a current subregion of @mr.
+ */
+void memory_region_del_subregion(MemoryRegion *mr,
+ MemoryRegion *subregion);
+
+/*
+ * memory_region_set_enabled: dynamically enable or disable a region
+ *
+ * Enables or disables a memory region. A disabled memory region
+ * ignores all accesses to itself and its subregions. It does not
+ * obscure sibling subregions with lower priority - it simply behaves as
+ * if it was removed from the hierarchy.
+ *
+ * Regions default to being enabled.
+ *
+ * @mr: the region to be updated
+ * @enabled: whether to enable or disable the region
+ */
+void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
+
+/*
+ * memory_region_set_address: dynamically update the address of a region
+ *
+ * Dynamically updates the address of a region, relative to its container.
+ * May be used on regions are currently part of a memory hierarchy.
+ *
+ * @mr: the region to be updated
+ * @addr: new address, relative to container region
+ */
+void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
+
+/*
+ * memory_region_set_size: dynamically update the size of a region.
+ *
+ * Dynamically updates the size of a region.
+ *
+ * @mr: the region to be updated
+ * @size: used size of the region.
+ */
+void memory_region_set_size(MemoryRegion *mr, uint64_t size);
+
+/*
+ * memory_region_set_alias_offset: dynamically update a memory alias's offset
+ *
+ * Dynamically updates the offset into the target region that an alias points
+ * to, as if the fourth argument to memory_region_init_alias() has changed.
+ *
+ * @mr: the #MemoryRegion to be updated; should be an alias.
+ * @offset: the new offset into the target memory region
+ */
+void memory_region_set_alias_offset(MemoryRegion *mr,
+ hwaddr offset);
+
+/**
+ * memory_region_present: checks if an address relative to a @container
+ * translates into #MemoryRegion within @container
+ *
+ * Answer whether a #MemoryRegion within @container covers the address
+ * @addr.
+ *
+ * @container: a #MemoryRegion within which @addr is a relative address
+ * @addr: the area within @container to be searched
+ */
+bool memory_region_present(MemoryRegion *container, hwaddr addr);
+
+/**
+ * memory_region_is_mapped: returns true if #MemoryRegion is mapped
+ * into any address space.
+ *
+ * @mr: a #MemoryRegion which should be checked if it's mapped
+ */
+bool memory_region_is_mapped(MemoryRegion *mr);
+
+/**
+ * memory_region_get_ram_discard_manager: get the #RamDiscardManager for a
+ * #MemoryRegion
+ *
+ * The #RamDiscardManager cannot change while a memory region is mapped.
+ *
+ * @mr: the #MemoryRegion
+ */
+RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr);
+
+/**
+ * memory_region_has_ram_discard_manager: check whether a #MemoryRegion has a
+ * #RamDiscardManager assigned
+ *
+ * @mr: the #MemoryRegion
+ */
+static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr)
+{
+ return !!memory_region_get_ram_discard_manager(mr);
+}
+
+/**
+ * memory_region_set_ram_discard_manager: set the #RamDiscardManager for a
+ * #MemoryRegion
+ *
+ * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion
+ * that does not cover RAM, or a #MemoryRegion that already has a
+ * #RamDiscardManager assigned.
+ *
+ * @mr: the #MemoryRegion
+ * @rdm: #RamDiscardManager to set
+ */
+void memory_region_set_ram_discard_manager(MemoryRegion *mr,
+ RamDiscardManager *rdm);
+
+/**
+ * memory_region_find: translate an address/size relative to a
+ * MemoryRegion into a #MemoryRegionSection.
+ *
+ * Locates the first #MemoryRegion within @mr that overlaps the range
+ * given by @addr and @size.
+ *
+ * Returns a #MemoryRegionSection that describes a contiguous overlap.
+ * It will have the following characteristics:
+ * - @size = 0 iff no overlap was found
+ * - @mr is non-%NULL iff an overlap was found
+ *
+ * Remember that in the return value the @offset_within_region is
+ * relative to the returned region (in the .@mr field), not to the
+ * @mr argument.
+ *
+ * Similarly, the .@offset_within_address_space is relative to the
+ * address space that contains both regions, the passed and the
+ * returned one. However, in the special case where the @mr argument
+ * has no container (and thus is the root of the address space), the
+ * following will hold:
+ * - @offset_within_address_space >= @addr
+ * - @offset_within_address_space + .@size <= @addr + @size
+ *
+ * @mr: a MemoryRegion within which @addr is a relative address
+ * @addr: start of the area within @as to be searched
+ * @size: size of the area to be searched
+ */
+MemoryRegionSection memory_region_find(MemoryRegion *mr,
+ hwaddr addr, uint64_t size);
+
+/**
+ * memory_global_dirty_log_sync: synchronize the dirty log for all memory
+ *
+ * Synchronizes the dirty page log for all address spaces.
+ */
+void memory_global_dirty_log_sync(void);
+
+/**
+ * memory_global_dirty_log_sync: synchronize the dirty log for all memory
+ *
+ * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
+ * This function must be called after the dirty log bitmap is cleared, and
+ * before dirty guest memory pages are read. If you are using
+ * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
+ * care of doing this.
+ */
+void memory_global_after_dirty_log_sync(void);
+
+/**
+ * memory_region_transaction_begin: Start a transaction.
+ *
+ * During a transaction, changes will be accumulated and made visible
+ * only when the transaction ends (is committed).
+ */
+void memory_region_transaction_begin(void);
+
+/**
+ * memory_region_transaction_commit: Commit a transaction and make changes
+ * visible to the guest.
+ */
+void memory_region_transaction_commit(void);
+
+/**
+ * memory_listener_register: register callbacks to be called when memory
+ * sections are mapped or unmapped into an address
+ * space
+ *
+ * @listener: an object containing the callbacks to be called
+ * @filter: if non-%NULL, only regions in this address space will be observed
+ */
+void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
+
+/**
+ * memory_listener_unregister: undo the effect of memory_listener_register()
+ *
+ * @listener: an object containing the callbacks to be removed
+ */
+void memory_listener_unregister(MemoryListener *listener);
+
+/**
+ * memory_global_dirty_log_start: begin dirty logging for all regions
+ *
+ * @flags: purpose of starting dirty log, migration or dirty rate
+ */
+void memory_global_dirty_log_start(unsigned int flags);
+
+/**
+ * memory_global_dirty_log_stop: end dirty logging for all regions
+ *
+ * @flags: purpose of stopping dirty log, migration or dirty rate
+ */
+void memory_global_dirty_log_stop(unsigned int flags);
+
+void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
+
+/**
+ * memory_region_dispatch_read: perform a read directly to the specified
+ * MemoryRegion.
+ *
+ * @mr: #MemoryRegion to access
+ * @addr: address within that region
+ * @pval: pointer to uint64_t which the data is written to
+ * @op: size, sign, and endianness of the memory operation
+ * @attrs: memory transaction attributes to use for the access
+ */
+MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
+ hwaddr addr,
+ uint64_t *pval,
+ MemOp op,
+ MemTxAttrs attrs);
+/**
+ * memory_region_dispatch_write: perform a write directly to the specified
+ * MemoryRegion.
+ *
+ * @mr: #MemoryRegion to access
+ * @addr: address within that region
+ * @data: data to write
+ * @op: size, sign, and endianness of the memory operation
+ * @attrs: memory transaction attributes to use for the access
+ */
+MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
+ hwaddr addr,
+ uint64_t data,
+ MemOp op,
+ MemTxAttrs attrs);
+
+/**
+ * address_space_init: initializes an address space
+ *
+ * @as: an uninitialized #AddressSpace
+ * @root: a #MemoryRegion that routes addresses for the address space
+ * @name: an address space name. The name is only used for debugging
+ * output.
+ */
+void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
+
+/**
+ * address_space_destroy: destroy an address space
+ *
+ * Releases all resources associated with an address space. After an address space
+ * is destroyed, its root memory region (given by address_space_init()) may be destroyed
+ * as well.
+ *
+ * @as: address space to be destroyed
+ */
+void address_space_destroy(AddressSpace *as);
+
+/**
+ * address_space_remove_listeners: unregister all listeners of an address space
+ *
+ * Removes all callbacks previously registered with memory_listener_register()
+ * for @as.
+ *
+ * @as: an initialized #AddressSpace
+ */
+void address_space_remove_listeners(AddressSpace *as);
+
+/**
+ * address_space_rw: read from or write to an address space.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @attrs: memory transaction attributes
+ * @buf: buffer with the data transferred
+ * @len: the number of bytes to read or write
+ * @is_write: indicates the transfer direction
+ */
+MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, void *buf,
+ hwaddr len, bool is_write);
+
+/**
+ * address_space_write: write to address space.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @attrs: memory transaction attributes
+ * @buf: buffer with the data transferred
+ * @len: the number of bytes to write
+ */
+MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs,
+ const void *buf, hwaddr len);
+
+/**
+ * address_space_write_rom: write to address space, including ROM.
+ *
+ * This function writes to the specified address space, but will
+ * write data to both ROM and RAM. This is used for non-guest
+ * writes like writes from the gdb debug stub or initial loading
+ * of ROM contents.
+ *
+ * Note that portions of the write which attempt to write data to
+ * a device will be silently ignored -- only real RAM and ROM will
+ * be written to.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @attrs: memory transaction attributes
+ * @buf: buffer with the data transferred
+ * @len: the number of bytes to write
+ */
+MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs,
+ const void *buf, hwaddr len);
+
+/* address_space_ld*: load from an address space
+ * address_space_st*: store to an address space
+ *
+ * These functions perform a load or store of the byte, word,
+ * longword or quad to the specified address within the AddressSpace.
+ * The _le suffixed functions treat the data as little endian;
+ * _be indicates big endian; no suffix indicates "same endianness
+ * as guest CPU".
+ *
+ * The "guest CPU endianness" accessors are deprecated for use outside
+ * target-* code; devices should be CPU-agnostic and use either the LE
+ * or the BE accessors.
+ *
+ * @as #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @val: data value, for stores
+ * @attrs: memory transaction attributes
+ * @result: location to write the success/failure of the transaction;
+ * if NULL, this information is discarded
+ */
+
+#define SUFFIX
+#define ARG1 as
+#define ARG1_DECL AddressSpace *as
+#include "exec/memory_ldst.h.inc"
+
+#define SUFFIX
+#define ARG1 as
+#define ARG1_DECL AddressSpace *as
+#include "exec/memory_ldst_phys.h.inc"
+
+struct MemoryRegionCache {
+ void *ptr;
+ hwaddr xlat;
+ hwaddr len;
+ FlatView *fv;
+ MemoryRegionSection mrs;
+ bool is_write;
+};
+
+#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL })
+
+
+/* address_space_ld*_cached: load from a cached #MemoryRegion
+ * address_space_st*_cached: store into a cached #MemoryRegion
+ *
+ * These functions perform a load or store of the byte, word,
+ * longword or quad to the specified address. The address is
+ * a physical address in the AddressSpace, but it must lie within
+ * a #MemoryRegion that was mapped with address_space_cache_init.
+ *
+ * The _le suffixed functions treat the data as little endian;
+ * _be indicates big endian; no suffix indicates "same endianness
+ * as guest CPU".
+ *
+ * The "guest CPU endianness" accessors are deprecated for use outside
+ * target-* code; devices should be CPU-agnostic and use either the LE
+ * or the BE accessors.
+ *
+ * @cache: previously initialized #MemoryRegionCache to be accessed
+ * @addr: address within the address space
+ * @val: data value, for stores
+ * @attrs: memory transaction attributes
+ * @result: location to write the success/failure of the transaction;
+ * if NULL, this information is discarded
+ */
+
+#define SUFFIX _cached_slow
+#define ARG1 cache
+#define ARG1_DECL MemoryRegionCache *cache
+#include "exec/memory_ldst.h.inc"
+
+/* Inline fast path for direct RAM access. */
+static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
+{
+ assert(addr < cache->len);
+ if (likely(cache->ptr)) {
+ return ldub_p(cache->ptr + addr);
+ } else {
+ return address_space_ldub_cached_slow(cache, addr, attrs, result);
+ }
+}
+
+static inline void address_space_stb_cached(MemoryRegionCache *cache,
+ hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
+{
+ assert(addr < cache->len);
+ if (likely(cache->ptr)) {
+ stb_p(cache->ptr + addr, val);
+ } else {
+ address_space_stb_cached_slow(cache, addr, val, attrs, result);
+ }
+}
+
+#define ENDIANNESS _le
+#include "exec/memory_ldst_cached.h.inc"
+
+#define ENDIANNESS _be
+#include "exec/memory_ldst_cached.h.inc"
+
+#define SUFFIX _cached
+#define ARG1 cache
+#define ARG1_DECL MemoryRegionCache *cache
+#include "exec/memory_ldst_phys.h.inc"
+
+/* address_space_cache_init: prepare for repeated access to a physical
+ * memory region
+ *
+ * @cache: #MemoryRegionCache to be filled
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @len: length of buffer
+ * @is_write: indicates the transfer direction
+ *
+ * Will only work with RAM, and may map a subset of the requested range by
+ * returning a value that is less than @len. On failure, return a negative
+ * errno value.
+ *
+ * Because it only works with RAM, this function can be used for
+ * read-modify-write operations. In this case, is_write should be %true.
+ *
+ * Note that addresses passed to the address_space_*_cached functions
+ * are relative to @addr.
+ */
+int64_t address_space_cache_init(MemoryRegionCache *cache,
+ AddressSpace *as,
+ hwaddr addr,
+ hwaddr len,
+ bool is_write);
+
+/**
+ * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
+ *
+ * @cache: The #MemoryRegionCache to operate on.
+ * @addr: The first physical address that was written, relative to the
+ * address that was passed to @address_space_cache_init.
+ * @access_len: The number of bytes that were written starting at @addr.
+ */
+void address_space_cache_invalidate(MemoryRegionCache *cache,
+ hwaddr addr,
+ hwaddr access_len);
+
+/**
+ * address_space_cache_destroy: free a #MemoryRegionCache
+ *
+ * @cache: The #MemoryRegionCache whose memory should be released.
+ */
+void address_space_cache_destroy(MemoryRegionCache *cache);
+
+/* address_space_get_iotlb_entry: translate an address into an IOTLB
+ * entry. Should be called from an RCU critical section.
+ */
+IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
+ bool is_write, MemTxAttrs attrs);
+
+/* address_space_translate: translate an address range into an address space
+ * into a MemoryRegion and an address range into that section. Should be
+ * called from an RCU critical section, to avoid that the last reference
+ * to the returned region disappears after address_space_translate returns.
+ *
+ * @fv: #FlatView to be accessed
+ * @addr: address within that address space
+ * @xlat: pointer to address within the returned memory region section's
+ * #MemoryRegion.
+ * @len: pointer to length
+ * @is_write: indicates the transfer direction
+ * @attrs: memory attributes
+ */
+MemoryRegion *flatview_translate(FlatView *fv,
+ hwaddr addr, hwaddr *xlat,
+ hwaddr *len, bool is_write,
+ MemTxAttrs attrs);
+
+static inline MemoryRegion *address_space_translate(AddressSpace *as,
+ hwaddr addr, hwaddr *xlat,
+ hwaddr *len, bool is_write,
+ MemTxAttrs attrs)
+{
+ return flatview_translate(address_space_to_flatview(as),
+ addr, xlat, len, is_write, attrs);
+}
+
+/* address_space_access_valid: check for validity of accessing an address
+ * space range
+ *
+ * Check whether memory is assigned to the given address space range, and
+ * access is permitted by any IOMMU regions that are active for the address
+ * space.
+ *
+ * For now, addr and len should be aligned to a page size. This limitation
+ * will be lifted in the future.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @len: length of the area to be checked
+ * @is_write: indicates the transfer direction
+ * @attrs: memory attributes
+ */
+bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
+ bool is_write, MemTxAttrs attrs);
+
+/* address_space_map: map a physical memory region into a host virtual address
+ *
+ * May map a subset of the requested range, given by and returned in @plen.
+ * May return %NULL and set *@plen to zero(0), if resources needed to perform
+ * the mapping are exhausted.
+ * Use only for reads OR writes - not for read-modify-write operations.
+ * Use cpu_register_map_client() to know when retrying the map operation is
+ * likely to succeed.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @plen: pointer to length of buffer; updated on return
+ * @is_write: indicates the transfer direction
+ * @attrs: memory attributes
+ */
+void *address_space_map(AddressSpace *as, hwaddr addr,
+ hwaddr *plen, bool is_write, MemTxAttrs attrs);
+
+/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
+ *
+ * Will also mark the memory as dirty if @is_write == %true. @access_len gives
+ * the amount of memory that was actually read or written by the caller.
+ *
+ * @as: #AddressSpace used
+ * @buffer: host pointer as returned by address_space_map()
+ * @len: buffer length as returned by address_space_map()
+ * @access_len: amount of data actually transferred
+ * @is_write: indicates the transfer direction
+ */
+void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
+ bool is_write, hwaddr access_len);
+
+
+/* Internal functions, part of the implementation of address_space_read. */
+MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, void *buf, hwaddr len);
+MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
+ MemTxAttrs attrs, void *buf,
+ hwaddr len, hwaddr addr1, hwaddr l,
+ MemoryRegion *mr);
+void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
+
+/* Internal functions, part of the implementation of address_space_read_cached
+ * and address_space_write_cached. */
+MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
+ hwaddr addr, void *buf, hwaddr len);
+MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
+ hwaddr addr, const void *buf,
+ hwaddr len);
+
+static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
+{
+ if (is_write) {
+ return memory_region_is_ram(mr) && !mr->readonly &&
+ !mr->rom_device && !memory_region_is_ram_device(mr);
+ } else {
+ return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
+ memory_region_is_romd(mr);
+ }
+}
+
+/**
+ * address_space_read: read from an address space.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault). Called within RCU critical section.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @attrs: memory transaction attributes
+ * @buf: buffer with the data transferred
+ * @len: length of the data transferred
+ */
+static inline __attribute__((__always_inline__))
+MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, void *buf,
+ hwaddr len)
+{
+ MemTxResult result = MEMTX_OK;
+ hwaddr l, addr1;
+ void *ptr;
+ MemoryRegion *mr;
+ FlatView *fv;
+
+ if (__builtin_constant_p(len)) {
+ if (len) {
+ RCU_READ_LOCK_GUARD();
+ fv = address_space_to_flatview(as);
+ l = len;
+ mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
+ if (len == l && memory_access_is_direct(mr, false)) {
+ ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
+ memcpy(buf, ptr, len);
+ } else {
+ result = flatview_read_continue(fv, addr, attrs, buf, len,
+ addr1, l, mr);
+ }
+ }
+ } else {
+ result = address_space_read_full(as, addr, attrs, buf, len);
+ }
+ return result;
+}
+
+/**
+ * address_space_read_cached: read from a cached RAM region
+ *
+ * @cache: Cached region to be addressed
+ * @addr: address relative to the base of the RAM region
+ * @buf: buffer with the data transferred
+ * @len: length of the data transferred
+ */
+static inline MemTxResult
+address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
+ void *buf, hwaddr len)
+{
+ assert(addr < cache->len && len <= cache->len - addr);
+ fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr);
+ if (likely(cache->ptr)) {
+ memcpy(buf, cache->ptr + addr, len);
+ return MEMTX_OK;
+ } else {
+ return address_space_read_cached_slow(cache, addr, buf, len);
+ }
+}
+
+/**
+ * address_space_write_cached: write to a cached RAM region
+ *
+ * @cache: Cached region to be addressed
+ * @addr: address relative to the base of the RAM region
+ * @buf: buffer with the data transferred
+ * @len: length of the data transferred
+ */
+static inline MemTxResult
+address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
+ const void *buf, hwaddr len)
+{
+ assert(addr < cache->len && len <= cache->len - addr);
+ if (likely(cache->ptr)) {
+ memcpy(cache->ptr + addr, buf, len);
+ return MEMTX_OK;
+ } else {
+ return address_space_write_cached_slow(cache, addr, buf, len);
+ }
+}
+
+#ifdef NEED_CPU_H
+/* enum device_endian to MemOp. */
+static inline MemOp devend_memop(enum device_endian end)
+{
+ QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN &&
+ DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN);
+
+#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
+ /* Swap if non-host endianness or native (target) endianness */
+ return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP;
+#else
+ const int non_host_endianness =
+ DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN;
+
+ /* In this case, native (target) endianness needs no swap. */
+ return (end == non_host_endianness) ? MO_BSWAP : 0;
+#endif
+}
+#endif
+
+/*
+ * Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
+ * to manage the actual amount of memory consumed by the VM (then, the memory
+ * provided by RAM blocks might be bigger than the desired memory consumption).
+ * This *must* be set if:
+ * - Discarding parts of a RAM blocks does not result in the change being
+ * reflected in the VM and the pages getting freed.
+ * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous
+ * discards blindly.
+ * - Discarding parts of a RAM blocks will result in integrity issues (e.g.,
+ * encrypted VMs).
+ * Technologies that only temporarily pin the current working set of a
+ * driver are fine, because we don't expect such pages to be discarded
+ * (esp. based on guest action like balloon inflation).
+ *
+ * This is *not* to be used to protect from concurrent discards (esp.,
+ * postcopy).
+ *
+ * Returns 0 if successful. Returns -EBUSY if a technology that relies on
+ * discards to work reliably is active.
+ */
+int ram_block_discard_disable(bool state);
+
+/*
+ * See ram_block_discard_disable(): only disable uncoordinated discards,
+ * keeping coordinated discards (via the RamDiscardManager) enabled.
+ */
+int ram_block_uncoordinated_discard_disable(bool state);
+
+/*
+ * Inhibit technologies that disable discarding of pages in RAM blocks.
+ *
+ * Returns 0 if successful. Returns -EBUSY if discards are already set to
+ * broken.
+ */
+int ram_block_discard_require(bool state);
+
+/*
+ * See ram_block_discard_require(): only inhibit technologies that disable
+ * uncoordinated discarding of pages in RAM blocks, allowing co-existance with
+ * technologies that only inhibit uncoordinated discards (via the
+ * RamDiscardManager).
+ */
+int ram_block_coordinated_discard_require(bool state);
+
+/*
+ * Test if any discarding of memory in ram blocks is disabled.
+ */
+bool ram_block_discard_is_disabled(void);
+
+/*
+ * Test if any discarding of memory in ram blocks is required to work reliably.
+ */
+bool ram_block_discard_is_required(void);
+
+#endif
+
+#endif
diff --git a/include/exec/memory_ldst.h.inc b/include/exec/memory_ldst.h.inc
new file mode 100644
index 000000000..7c3a641f7
--- /dev/null
+++ b/include/exec/memory_ldst.h.inc
@@ -0,0 +1,71 @@
+/*
+ * Physical memory access templates
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2015 Linaro, Inc.
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifdef TARGET_ENDIANNESS
+extern uint16_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
+extern uint32_t glue(address_space_ldl, SUFFIX)(ARG1_DECL,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
+extern uint64_t glue(address_space_ldq, SUFFIX)(ARG1_DECL,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
+extern void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
+extern void glue(address_space_stw, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result);
+extern void glue(address_space_stl, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
+extern void glue(address_space_stq, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result);
+#else
+extern uint8_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
+extern uint16_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
+extern uint16_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
+extern uint32_t glue(address_space_ldl_le, SUFFIX)(ARG1_DECL,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
+extern uint32_t glue(address_space_ldl_be, SUFFIX)(ARG1_DECL,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
+extern uint64_t glue(address_space_ldq_le, SUFFIX)(ARG1_DECL,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
+extern uint64_t glue(address_space_ldq_be, SUFFIX)(ARG1_DECL,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
+extern void glue(address_space_stb, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result);
+extern void glue(address_space_stw_le, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result);
+extern void glue(address_space_stw_be, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result);
+extern void glue(address_space_stl_le, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
+extern void glue(address_space_stl_be, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
+extern void glue(address_space_stq_le, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result);
+extern void glue(address_space_stq_be, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result);
+#endif
+
+#undef ARG1_DECL
+#undef ARG1
+#undef SUFFIX
+#undef TARGET_ENDIANNESS
diff --git a/include/exec/memory_ldst_cached.h.inc b/include/exec/memory_ldst_cached.h.inc
new file mode 100644
index 000000000..d7834f852
--- /dev/null
+++ b/include/exec/memory_ldst_cached.h.inc
@@ -0,0 +1,111 @@
+/*
+ * Memory access templates for MemoryRegionCache
+ *
+ * Copyright (c) 2018 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define ADDRESS_SPACE_LD_CACHED(size) \
+ glue(glue(address_space_ld, size), glue(ENDIANNESS, _cached))
+#define ADDRESS_SPACE_LD_CACHED_SLOW(size) \
+ glue(glue(address_space_ld, size), glue(ENDIANNESS, _cached_slow))
+#define LD_P(size) \
+ glue(glue(ld, size), glue(ENDIANNESS, _p))
+
+static inline uint16_t ADDRESS_SPACE_LD_CACHED(uw)(MemoryRegionCache *cache,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
+{
+ assert(addr < cache->len && 2 <= cache->len - addr);
+ fuzz_dma_read_cb(cache->xlat + addr, 2, cache->mrs.mr);
+ if (likely(cache->ptr)) {
+ return LD_P(uw)(cache->ptr + addr);
+ } else {
+ return ADDRESS_SPACE_LD_CACHED_SLOW(uw)(cache, addr, attrs, result);
+ }
+}
+
+static inline uint32_t ADDRESS_SPACE_LD_CACHED(l)(MemoryRegionCache *cache,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
+{
+ assert(addr < cache->len && 4 <= cache->len - addr);
+ fuzz_dma_read_cb(cache->xlat + addr, 4, cache->mrs.mr);
+ if (likely(cache->ptr)) {
+ return LD_P(l)(cache->ptr + addr);
+ } else {
+ return ADDRESS_SPACE_LD_CACHED_SLOW(l)(cache, addr, attrs, result);
+ }
+}
+
+static inline uint64_t ADDRESS_SPACE_LD_CACHED(q)(MemoryRegionCache *cache,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
+{
+ assert(addr < cache->len && 8 <= cache->len - addr);
+ fuzz_dma_read_cb(cache->xlat + addr, 8, cache->mrs.mr);
+ if (likely(cache->ptr)) {
+ return LD_P(q)(cache->ptr + addr);
+ } else {
+ return ADDRESS_SPACE_LD_CACHED_SLOW(q)(cache, addr, attrs, result);
+ }
+}
+
+#undef ADDRESS_SPACE_LD_CACHED
+#undef ADDRESS_SPACE_LD_CACHED_SLOW
+#undef LD_P
+
+#define ADDRESS_SPACE_ST_CACHED(size) \
+ glue(glue(address_space_st, size), glue(ENDIANNESS, _cached))
+#define ADDRESS_SPACE_ST_CACHED_SLOW(size) \
+ glue(glue(address_space_st, size), glue(ENDIANNESS, _cached_slow))
+#define ST_P(size) \
+ glue(glue(st, size), glue(ENDIANNESS, _p))
+
+static inline void ADDRESS_SPACE_ST_CACHED(w)(MemoryRegionCache *cache,
+ hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result)
+{
+ assert(addr < cache->len && 2 <= cache->len - addr);
+ if (likely(cache->ptr)) {
+ ST_P(w)(cache->ptr + addr, val);
+ } else {
+ ADDRESS_SPACE_ST_CACHED_SLOW(w)(cache, addr, val, attrs, result);
+ }
+}
+
+static inline void ADDRESS_SPACE_ST_CACHED(l)(MemoryRegionCache *cache,
+ hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
+{
+ assert(addr < cache->len && 4 <= cache->len - addr);
+ if (likely(cache->ptr)) {
+ ST_P(l)(cache->ptr + addr, val);
+ } else {
+ ADDRESS_SPACE_ST_CACHED_SLOW(l)(cache, addr, val, attrs, result);
+ }
+}
+
+static inline void ADDRESS_SPACE_ST_CACHED(q)(MemoryRegionCache *cache,
+ hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
+{
+ assert(addr < cache->len && 8 <= cache->len - addr);
+ if (likely(cache->ptr)) {
+ ST_P(q)(cache->ptr + addr, val);
+ } else {
+ ADDRESS_SPACE_ST_CACHED_SLOW(q)(cache, addr, val, attrs, result);
+ }
+}
+
+#undef ADDRESS_SPACE_ST_CACHED
+#undef ADDRESS_SPACE_ST_CACHED_SLOW
+#undef ST_P
+
+#undef ENDIANNESS
diff --git a/include/exec/memory_ldst_phys.h.inc b/include/exec/memory_ldst_phys.h.inc
new file mode 100644
index 000000000..ecd678610
--- /dev/null
+++ b/include/exec/memory_ldst_phys.h.inc
@@ -0,0 +1,147 @@
+/*
+ * Physical memory access templates
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2015 Linaro, Inc.
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifdef TARGET_ENDIANNESS
+static inline uint16_t glue(lduw_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+{
+ return glue(address_space_lduw, SUFFIX)(ARG1, addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline uint32_t glue(ldl_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+{
+ return glue(address_space_ldl, SUFFIX)(ARG1, addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline uint64_t glue(ldq_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+{
+ return glue(address_space_ldq, SUFFIX)(ARG1, addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline void glue(stw_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint16_t val)
+{
+ glue(address_space_stw, SUFFIX)(ARG1, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline void glue(stl_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
+{
+ glue(address_space_stl, SUFFIX)(ARG1, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline void glue(stq_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val)
+{
+ glue(address_space_stq, SUFFIX)(ARG1, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+#else
+static inline uint8_t glue(ldub_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+{
+ return glue(address_space_ldub, SUFFIX)(ARG1, addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline uint16_t glue(lduw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+{
+ return glue(address_space_lduw_le, SUFFIX)(ARG1, addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline uint16_t glue(lduw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+{
+ return glue(address_space_lduw_be, SUFFIX)(ARG1, addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline uint32_t glue(ldl_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+{
+ return glue(address_space_ldl_le, SUFFIX)(ARG1, addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline uint32_t glue(ldl_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+{
+ return glue(address_space_ldl_be, SUFFIX)(ARG1, addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline uint64_t glue(ldq_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+{
+ return glue(address_space_ldq_le, SUFFIX)(ARG1, addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline uint64_t glue(ldq_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+{
+ return glue(address_space_ldq_be, SUFFIX)(ARG1, addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline void glue(stb_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint8_t val)
+{
+ glue(address_space_stb, SUFFIX)(ARG1, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline void glue(stw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint16_t val)
+{
+ glue(address_space_stw_le, SUFFIX)(ARG1, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline void glue(stw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint16_t val)
+{
+ glue(address_space_stw_be, SUFFIX)(ARG1, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline void glue(stl_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
+{
+ glue(address_space_stl_le, SUFFIX)(ARG1, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline void glue(stl_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
+{
+ glue(address_space_stl_be, SUFFIX)(ARG1, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline void glue(stq_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val)
+{
+ glue(address_space_stq_le, SUFFIX)(ARG1, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline void glue(stq_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val)
+{
+ glue(address_space_stq_be, SUFFIX)(ARG1, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+#endif
+
+#undef ARG1_DECL
+#undef ARG1
+#undef SUFFIX
+#undef TARGET_ENDIANNESS
diff --git a/include/exec/page-vary.h b/include/exec/page-vary.h
new file mode 100644
index 000000000..c22a7a742
--- /dev/null
+++ b/include/exec/page-vary.h
@@ -0,0 +1,34 @@
+/*
+ * Definitions for cpus with variable page sizes.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef EXEC_PAGE_VARY_H
+#define EXEC_PAGE_VARY_H
+
+typedef struct {
+ bool decided;
+ int bits;
+ uint64_t mask;
+} TargetPageBits;
+
+#ifdef IN_PAGE_VARY
+extern bool set_preferred_target_page_bits_common(int bits);
+extern void finalize_target_page_bits_common(int min);
+#endif
+
+#endif /* EXEC_PAGE_VARY_H */
diff --git a/include/exec/plugin-gen.h b/include/exec/plugin-gen.h
new file mode 100644
index 000000000..f92f16973
--- /dev/null
+++ b/include/exec/plugin-gen.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
+ *
+ * License: GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * plugin-gen.h - TCG-dependent definitions for generating plugin code
+ *
+ * This header should be included only from plugin.c and C files that emit
+ * TCG code.
+ */
+#ifndef QEMU_PLUGIN_GEN_H
+#define QEMU_PLUGIN_GEN_H
+
+#include "qemu/plugin.h"
+#include "tcg/tcg.h"
+
+struct DisasContextBase;
+
+#ifdef CONFIG_PLUGIN
+
+bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool supress);
+void plugin_gen_tb_end(CPUState *cpu);
+void plugin_gen_insn_start(CPUState *cpu, const struct DisasContextBase *db);
+void plugin_gen_insn_end(void);
+
+void plugin_gen_disable_mem_helpers(void);
+void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info);
+
+static inline void plugin_insn_append(abi_ptr pc, const void *from, size_t size)
+{
+ struct qemu_plugin_insn *insn = tcg_ctx->plugin_insn;
+ abi_ptr off;
+
+ if (insn == NULL) {
+ return;
+ }
+ off = pc - insn->vaddr;
+ if (off < insn->data->len) {
+ g_byte_array_set_size(insn->data, off);
+ } else if (off > insn->data->len) {
+ /* we have an unexpected gap */
+ g_assert_not_reached();
+ }
+
+ insn->data = g_byte_array_append(insn->data, from, size);
+}
+
+#else /* !CONFIG_PLUGIN */
+
+static inline
+bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool supress)
+{
+ return false;
+}
+
+static inline
+void plugin_gen_insn_start(CPUState *cpu, const struct DisasContextBase *db)
+{ }
+
+static inline void plugin_gen_insn_end(void)
+{ }
+
+static inline void plugin_gen_tb_end(CPUState *cpu)
+{ }
+
+static inline void plugin_gen_disable_mem_helpers(void)
+{ }
+
+static inline void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info)
+{ }
+
+static inline void plugin_insn_append(abi_ptr pc, const void *from, size_t size)
+{ }
+
+#endif /* CONFIG_PLUGIN */
+
+#endif /* QEMU_PLUGIN_GEN_H */
+
diff --git a/include/exec/poison.h b/include/exec/poison.h
new file mode 100644
index 000000000..7ad4ad18e
--- /dev/null
+++ b/include/exec/poison.h
@@ -0,0 +1,96 @@
+/* Poison identifiers that should not be used when building
+ target independent device code. */
+
+#ifndef HW_POISON_H
+#define HW_POISON_H
+
+#include "config-poison.h"
+
+#pragma GCC poison TARGET_I386
+#pragma GCC poison TARGET_X86_64
+#pragma GCC poison TARGET_AARCH64
+#pragma GCC poison TARGET_ALPHA
+#pragma GCC poison TARGET_ARM
+#pragma GCC poison TARGET_CRIS
+#pragma GCC poison TARGET_HEXAGON
+#pragma GCC poison TARGET_HPPA
+#pragma GCC poison TARGET_M68K
+#pragma GCC poison TARGET_MICROBLAZE
+#pragma GCC poison TARGET_MIPS
+#pragma GCC poison TARGET_ABI_MIPSN32
+#pragma GCC poison TARGET_ABI_MIPSO32
+#pragma GCC poison TARGET_MIPS64
+#pragma GCC poison TARGET_ABI_MIPSN64
+#pragma GCC poison TARGET_NIOS2
+#pragma GCC poison TARGET_OPENRISC
+#pragma GCC poison TARGET_PPC
+#pragma GCC poison TARGET_PPC64
+#pragma GCC poison TARGET_ABI32
+#pragma GCC poison TARGET_RX
+#pragma GCC poison TARGET_S390X
+#pragma GCC poison TARGET_SH4
+#pragma GCC poison TARGET_SPARC
+#pragma GCC poison TARGET_SPARC64
+#pragma GCC poison TARGET_TRICORE
+#pragma GCC poison TARGET_XTENSA
+
+#pragma GCC poison TARGET_ALIGNED_ONLY
+#pragma GCC poison TARGET_HAS_BFLT
+#pragma GCC poison TARGET_NAME
+#pragma GCC poison TARGET_SUPPORTS_MTTCG
+#pragma GCC poison TARGET_WORDS_BIGENDIAN
+#pragma GCC poison BSWAP_NEEDED
+
+#pragma GCC poison TARGET_LONG_BITS
+#pragma GCC poison TARGET_FMT_lx
+#pragma GCC poison TARGET_FMT_ld
+#pragma GCC poison TARGET_FMT_lu
+
+#pragma GCC poison TARGET_PAGE_SIZE
+#pragma GCC poison TARGET_PAGE_MASK
+#pragma GCC poison TARGET_PAGE_BITS
+#pragma GCC poison TARGET_PAGE_ALIGN
+
+#pragma GCC poison CPUArchState
+
+#pragma GCC poison CPU_INTERRUPT_HARD
+#pragma GCC poison CPU_INTERRUPT_EXITTB
+#pragma GCC poison CPU_INTERRUPT_HALT
+#pragma GCC poison CPU_INTERRUPT_DEBUG
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_0
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_1
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_2
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_3
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_4
+#pragma GCC poison CPU_INTERRUPT_TGT_INT_0
+#pragma GCC poison CPU_INTERRUPT_TGT_INT_1
+#pragma GCC poison CPU_INTERRUPT_TGT_INT_2
+
+#pragma GCC poison CONFIG_ALPHA_DIS
+#pragma GCC poison CONFIG_ARM_A64_DIS
+#pragma GCC poison CONFIG_ARM_DIS
+#pragma GCC poison CONFIG_CRIS_DIS
+#pragma GCC poison CONFIG_HPPA_DIS
+#pragma GCC poison CONFIG_I386_DIS
+#pragma GCC poison CONFIG_HEXAGON_DIS
+#pragma GCC poison CONFIG_M68K_DIS
+#pragma GCC poison CONFIG_MICROBLAZE_DIS
+#pragma GCC poison CONFIG_MIPS_DIS
+#pragma GCC poison CONFIG_NANOMIPS_DIS
+#pragma GCC poison CONFIG_NIOS2_DIS
+#pragma GCC poison CONFIG_PPC_DIS
+#pragma GCC poison CONFIG_RISCV_DIS
+#pragma GCC poison CONFIG_S390_DIS
+#pragma GCC poison CONFIG_SH4_DIS
+#pragma GCC poison CONFIG_SPARC_DIS
+#pragma GCC poison CONFIG_XTENSA_DIS
+
+#pragma GCC poison CONFIG_HAX
+#pragma GCC poison CONFIG_HVF
+#pragma GCC poison CONFIG_LINUX_USER
+#pragma GCC poison CONFIG_KVM
+#pragma GCC poison CONFIG_SOFTMMU
+#pragma GCC poison CONFIG_WHPX
+#pragma GCC poison CONFIG_XEN
+
+#endif
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
new file mode 100644
index 000000000..64fb936c7
--- /dev/null
+++ b/include/exec/ram_addr.h
@@ -0,0 +1,523 @@
+/*
+ * Declarations for cpu physical memory functions
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ *
+ */
+
+/*
+ * This header is for use by exec.c and memory.c ONLY. Do not include it.
+ * The functions declared here will be removed soon.
+ */
+
+#ifndef RAM_ADDR_H
+#define RAM_ADDR_H
+
+#ifndef CONFIG_USER_ONLY
+#include "cpu.h"
+#include "sysemu/xen.h"
+#include "sysemu/tcg.h"
+#include "exec/ramlist.h"
+#include "exec/ramblock.h"
+
+extern uint64_t total_dirty_pages;
+
+/**
+ * clear_bmap_size: calculate clear bitmap size
+ *
+ * @pages: number of guest pages
+ * @shift: guest page number shift
+ *
+ * Returns: number of bits for the clear bitmap
+ */
+static inline long clear_bmap_size(uint64_t pages, uint8_t shift)
+{
+ return DIV_ROUND_UP(pages, 1UL << shift);
+}
+
+/**
+ * clear_bmap_set: set clear bitmap for the page range
+ *
+ * @rb: the ramblock to operate on
+ * @start: the start page number
+ * @size: number of pages to set in the bitmap
+ *
+ * Returns: None
+ */
+static inline void clear_bmap_set(RAMBlock *rb, uint64_t start,
+ uint64_t npages)
+{
+ uint8_t shift = rb->clear_bmap_shift;
+
+ bitmap_set_atomic(rb->clear_bmap, start >> shift,
+ clear_bmap_size(npages, shift));
+}
+
+/**
+ * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set
+ *
+ * @rb: the ramblock to operate on
+ * @page: the page number to check
+ *
+ * Returns: true if the bit was set, false otherwise
+ */
+static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page)
+{
+ uint8_t shift = rb->clear_bmap_shift;
+
+ return bitmap_test_and_clear_atomic(rb->clear_bmap, page >> shift, 1);
+}
+
+static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
+{
+ return (b && b->host && offset < b->used_length) ? true : false;
+}
+
+static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
+{
+ assert(offset_in_ramblock(block, offset));
+ return (char *)block->host + offset;
+}
+
+static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr,
+ RAMBlock *rb)
+{
+ uint64_t host_addr_offset =
+ (uint64_t)(uintptr_t)(host_addr - (void *)rb->host);
+ return host_addr_offset >> TARGET_PAGE_BITS;
+}
+
+bool ramblock_is_pmem(RAMBlock *rb);
+
+long qemu_minrampagesize(void);
+long qemu_maxrampagesize(void);
+
+/**
+ * qemu_ram_alloc_from_file,
+ * qemu_ram_alloc_from_fd: Allocate a ram block from the specified backing
+ * file or device
+ *
+ * Parameters:
+ * @size: the size in bytes of the ram block
+ * @mr: the memory region where the ram block is
+ * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
+ * RAM_NORESERVE.
+ * @mem_path or @fd: specify the backing file or device
+ * @readonly: true to open @path for reading, false for read/write.
+ * @errp: pointer to Error*, to store an error if it happens
+ *
+ * Return:
+ * On success, return a pointer to the ram block.
+ * On failure, return NULL.
+ */
+RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
+ uint32_t ram_flags, const char *mem_path,
+ bool readonly, Error **errp);
+RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
+ uint32_t ram_flags, int fd, off_t offset,
+ bool readonly, Error **errp);
+
+RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
+ MemoryRegion *mr, Error **errp);
+RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags, MemoryRegion *mr,
+ Error **errp);
+RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
+ void (*resized)(const char*,
+ uint64_t length,
+ void *host),
+ MemoryRegion *mr, Error **errp);
+void qemu_ram_free(RAMBlock *block);
+
+int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
+
+void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length);
+
+/* Clear whole block of mem */
+static inline void qemu_ram_block_writeback(RAMBlock *block)
+{
+ qemu_ram_msync(block, 0, block->used_length);
+}
+
+#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
+#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
+
+void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end);
+
+static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
+ ram_addr_t length,
+ unsigned client)
+{
+ DirtyMemoryBlocks *blocks;
+ unsigned long end, page;
+ unsigned long idx, offset, base;
+ bool dirty = false;
+
+ assert(client < DIRTY_MEMORY_NUM);
+
+ end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
+ page = start >> TARGET_PAGE_BITS;
+
+ WITH_RCU_READ_LOCK_GUARD() {
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ base = page - offset;
+ while (page < end) {
+ unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
+ unsigned long num = next - base;
+ unsigned long found = find_next_bit(blocks->blocks[idx],
+ num, offset);
+ if (found < num) {
+ dirty = true;
+ break;
+ }
+
+ page = next;
+ idx++;
+ offset = 0;
+ base += DIRTY_MEMORY_BLOCK_SIZE;
+ }
+ }
+
+ return dirty;
+}
+
+static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
+ ram_addr_t length,
+ unsigned client)
+{
+ DirtyMemoryBlocks *blocks;
+ unsigned long end, page;
+ unsigned long idx, offset, base;
+ bool dirty = true;
+
+ assert(client < DIRTY_MEMORY_NUM);
+
+ end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
+ page = start >> TARGET_PAGE_BITS;
+
+ RCU_READ_LOCK_GUARD();
+
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ base = page - offset;
+ while (page < end) {
+ unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
+ unsigned long num = next - base;
+ unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
+ if (found < num) {
+ dirty = false;
+ break;
+ }
+
+ page = next;
+ idx++;
+ offset = 0;
+ base += DIRTY_MEMORY_BLOCK_SIZE;
+ }
+
+ return dirty;
+}
+
+static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
+ unsigned client)
+{
+ return cpu_physical_memory_get_dirty(addr, 1, client);
+}
+
+static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
+{
+ bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
+ bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
+ bool migration =
+ cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
+ return !(vga && code && migration);
+}
+
+static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
+ ram_addr_t length,
+ uint8_t mask)
+{
+ uint8_t ret = 0;
+
+ if (mask & (1 << DIRTY_MEMORY_VGA) &&
+ !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
+ ret |= (1 << DIRTY_MEMORY_VGA);
+ }
+ if (mask & (1 << DIRTY_MEMORY_CODE) &&
+ !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
+ ret |= (1 << DIRTY_MEMORY_CODE);
+ }
+ if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
+ !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
+ ret |= (1 << DIRTY_MEMORY_MIGRATION);
+ }
+ return ret;
+}
+
+static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
+ unsigned client)
+{
+ unsigned long page, idx, offset;
+ DirtyMemoryBlocks *blocks;
+
+ assert(client < DIRTY_MEMORY_NUM);
+
+ page = addr >> TARGET_PAGE_BITS;
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+
+ RCU_READ_LOCK_GUARD();
+
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ set_bit_atomic(offset, blocks->blocks[idx]);
+}
+
+static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
+ ram_addr_t length,
+ uint8_t mask)
+{
+ DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
+ unsigned long end, page;
+ unsigned long idx, offset, base;
+ int i;
+
+ if (!mask && !xen_enabled()) {
+ return;
+ }
+
+ end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
+ page = start >> TARGET_PAGE_BITS;
+
+ WITH_RCU_READ_LOCK_GUARD() {
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ blocks[i] = qatomic_rcu_read(&ram_list.dirty_memory[i]);
+ }
+
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ base = page - offset;
+ while (page < end) {
+ unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
+
+ if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
+ offset, next - page);
+ }
+ if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
+ offset, next - page);
+ }
+ if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
+ offset, next - page);
+ }
+
+ page = next;
+ idx++;
+ offset = 0;
+ base += DIRTY_MEMORY_BLOCK_SIZE;
+ }
+ }
+
+ xen_hvm_modified_memory(start, length);
+}
+
+#if !defined(_WIN32)
+static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
+ ram_addr_t start,
+ ram_addr_t pages)
+{
+ unsigned long i, j;
+ unsigned long page_number, c;
+ hwaddr addr;
+ ram_addr_t ram_addr;
+ unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
+ unsigned long hpratio = qemu_real_host_page_size / TARGET_PAGE_SIZE;
+ unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
+
+ /* start address is aligned at the start of a word? */
+ if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
+ (hpratio == 1)) {
+ unsigned long **blocks[DIRTY_MEMORY_NUM];
+ unsigned long idx;
+ unsigned long offset;
+ long k;
+ long nr = BITS_TO_LONGS(pages);
+
+ idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
+ DIRTY_MEMORY_BLOCK_SIZE);
+
+ WITH_RCU_READ_LOCK_GUARD() {
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ blocks[i] =
+ qatomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
+ }
+
+ for (k = 0; k < nr; k++) {
+ if (bitmap[k]) {
+ unsigned long temp = leul_to_cpu(bitmap[k]);
+
+ qatomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
+
+ if (global_dirty_tracking) {
+ qatomic_or(
+ &blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
+ temp);
+ if (unlikely(
+ global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
+ total_dirty_pages += ctpopl(temp);
+ }
+ }
+
+ if (tcg_enabled()) {
+ qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
+ temp);
+ }
+ }
+
+ if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
+ offset = 0;
+ idx++;
+ }
+ }
+ }
+
+ xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
+ } else {
+ uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
+
+ if (!global_dirty_tracking) {
+ clients &= ~(1 << DIRTY_MEMORY_MIGRATION);
+ }
+
+ /*
+ * bitmap-traveling is faster than memory-traveling (for addr...)
+ * especially when most of the memory is not dirty.
+ */
+ for (i = 0; i < len; i++) {
+ if (bitmap[i] != 0) {
+ c = leul_to_cpu(bitmap[i]);
+ if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
+ total_dirty_pages += ctpopl(c);
+ }
+ do {
+ j = ctzl(c);
+ c &= ~(1ul << j);
+ page_number = (i * HOST_LONG_BITS + j) * hpratio;
+ addr = page_number * TARGET_PAGE_SIZE;
+ ram_addr = start + addr;
+ cpu_physical_memory_set_dirty_range(ram_addr,
+ TARGET_PAGE_SIZE * hpratio, clients);
+ } while (c != 0);
+ }
+ }
+ }
+}
+#endif /* not _WIN32 */
+
+bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
+ ram_addr_t length,
+ unsigned client);
+
+DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
+ (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client);
+
+bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
+ ram_addr_t start,
+ ram_addr_t length);
+
+static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
+ ram_addr_t length)
+{
+ cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
+ cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
+ cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
+}
+
+
+/* Called with RCU critical section */
+static inline
+uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
+ ram_addr_t start,
+ ram_addr_t length)
+{
+ ram_addr_t addr;
+ unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS);
+ uint64_t num_dirty = 0;
+ unsigned long *dest = rb->bmap;
+
+ /* start address and length is aligned at the start of a word? */
+ if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) ==
+ (start + rb->offset) &&
+ !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) {
+ int k;
+ int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
+ unsigned long * const *src;
+ unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = BIT_WORD((word * BITS_PER_LONG) %
+ DIRTY_MEMORY_BLOCK_SIZE);
+ unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
+
+ src = qatomic_rcu_read(
+ &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
+
+ for (k = page; k < page + nr; k++) {
+ if (src[idx][offset]) {
+ unsigned long bits = qatomic_xchg(&src[idx][offset], 0);
+ unsigned long new_dirty;
+ new_dirty = ~dest[k];
+ dest[k] |= bits;
+ new_dirty &= bits;
+ num_dirty += ctpopl(new_dirty);
+ }
+
+ if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
+ offset = 0;
+ idx++;
+ }
+ }
+
+ if (rb->clear_bmap) {
+ /*
+ * Postpone the dirty bitmap clear to the point before we
+ * really send the pages, also we will split the clear
+ * dirty procedure into smaller chunks.
+ */
+ clear_bmap_set(rb, start >> TARGET_PAGE_BITS,
+ length >> TARGET_PAGE_BITS);
+ } else {
+ /* Slow path - still do that in a huge chunk */
+ memory_region_clear_dirty_bitmap(rb->mr, start, length);
+ }
+ } else {
+ ram_addr_t offset = rb->offset;
+
+ for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
+ if (cpu_physical_memory_test_and_clear_dirty(
+ start + addr + offset,
+ TARGET_PAGE_SIZE,
+ DIRTY_MEMORY_MIGRATION)) {
+ long k = (start + addr) >> TARGET_PAGE_BITS;
+ if (!test_and_set_bit(k, dest)) {
+ num_dirty++;
+ }
+ }
+ }
+ }
+
+ return num_dirty;
+}
+#endif
+#endif
diff --git a/include/exec/ramblock.h b/include/exec/ramblock.h
new file mode 100644
index 000000000..664701b75
--- /dev/null
+++ b/include/exec/ramblock.h
@@ -0,0 +1,74 @@
+/*
+ * Declarations for cpu physical memory functions
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ *
+ */
+
+/*
+ * This header is for use by exec.c and memory.c ONLY. Do not include it.
+ * The functions declared here will be removed soon.
+ */
+
+#ifndef QEMU_EXEC_RAMBLOCK_H
+#define QEMU_EXEC_RAMBLOCK_H
+
+#ifndef CONFIG_USER_ONLY
+#include "cpu-common.h"
+
+struct RAMBlock {
+ struct rcu_head rcu;
+ struct MemoryRegion *mr;
+ uint8_t *host;
+ uint8_t *colo_cache; /* For colo, VM's ram cache */
+ ram_addr_t offset;
+ ram_addr_t used_length;
+ ram_addr_t max_length;
+ void (*resized)(const char*, uint64_t length, void *host);
+ uint32_t flags;
+ /* Protected by iothread lock. */
+ char idstr[256];
+ /* RCU-enabled, writes protected by the ramlist lock */
+ QLIST_ENTRY(RAMBlock) next;
+ QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
+ int fd;
+ size_t page_size;
+ /* dirty bitmap used during migration */
+ unsigned long *bmap;
+ /* bitmap of already received pages in postcopy */
+ unsigned long *receivedmap;
+
+ /*
+ * bitmap to track already cleared dirty bitmap. When the bit is
+ * set, it means the corresponding memory chunk needs a log-clear.
+ * Set this up to non-NULL to enable the capability to postpone
+ * and split clearing of dirty bitmap on the remote node (e.g.,
+ * KVM). The bitmap will be set only when doing global sync.
+ *
+ * NOTE: this bitmap is different comparing to the other bitmaps
+ * in that one bit can represent multiple guest pages (which is
+ * decided by the `clear_bmap_shift' variable below). On
+ * destination side, this should always be NULL, and the variable
+ * `clear_bmap_shift' is meaningless.
+ */
+ unsigned long *clear_bmap;
+ uint8_t clear_bmap_shift;
+
+ /*
+ * RAM block length that corresponds to the used_length on the migration
+ * source (after RAM block sizes were synchronized). Especially, after
+ * starting to run the guest, used_length and postcopy_length can differ.
+ * Used to register/unregister uffd handlers and as the size of the received
+ * bitmap. Receiving any page beyond this length will bail out, as it
+ * could not have been valid on the source.
+ */
+ ram_addr_t postcopy_length;
+};
+#endif
+#endif
diff --git a/include/exec/ramlist.h b/include/exec/ramlist.h
new file mode 100644
index 000000000..2ad2a81ac
--- /dev/null
+++ b/include/exec/ramlist.h
@@ -0,0 +1,85 @@
+#ifndef RAMLIST_H
+#define RAMLIST_H
+
+#include "qemu/queue.h"
+#include "qemu/thread.h"
+#include "qemu/rcu.h"
+#include "qemu/rcu_queue.h"
+
+typedef struct RAMBlockNotifier RAMBlockNotifier;
+
+#define DIRTY_MEMORY_VGA 0
+#define DIRTY_MEMORY_CODE 1
+#define DIRTY_MEMORY_MIGRATION 2
+#define DIRTY_MEMORY_NUM 3 /* num of dirty bits */
+
+/* The dirty memory bitmap is split into fixed-size blocks to allow growth
+ * under RCU. The bitmap for a block can be accessed as follows:
+ *
+ * rcu_read_lock();
+ *
+ * DirtyMemoryBlocks *blocks =
+ * qatomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
+ *
+ * ram_addr_t idx = (addr >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
+ * unsigned long *block = blocks.blocks[idx];
+ * ...access block bitmap...
+ *
+ * rcu_read_unlock();
+ *
+ * Remember to check for the end of the block when accessing a range of
+ * addresses. Move on to the next block if you reach the end.
+ *
+ * Organization into blocks allows dirty memory to grow (but not shrink) under
+ * RCU. When adding new RAMBlocks requires the dirty memory to grow, a new
+ * DirtyMemoryBlocks array is allocated with pointers to existing blocks kept
+ * the same. Other threads can safely access existing blocks while dirty
+ * memory is being grown. When no threads are using the old DirtyMemoryBlocks
+ * anymore it is freed by RCU (but the underlying blocks stay because they are
+ * pointed to from the new DirtyMemoryBlocks).
+ */
+#define DIRTY_MEMORY_BLOCK_SIZE ((ram_addr_t)256 * 1024 * 8)
+typedef struct {
+ struct rcu_head rcu;
+ unsigned long *blocks[];
+} DirtyMemoryBlocks;
+
+typedef struct RAMList {
+ QemuMutex mutex;
+ RAMBlock *mru_block;
+ /* RCU-enabled, writes protected by the ramlist lock. */
+ QLIST_HEAD(, RAMBlock) blocks;
+ DirtyMemoryBlocks *dirty_memory[DIRTY_MEMORY_NUM];
+ uint32_t version;
+ QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
+} RAMList;
+extern RAMList ram_list;
+
+/* Should be holding either ram_list.mutex, or the RCU lock. */
+#define INTERNAL_RAMBLOCK_FOREACH(block) \
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
+/* Never use the INTERNAL_ version except for defining other macros */
+#define RAMBLOCK_FOREACH(block) INTERNAL_RAMBLOCK_FOREACH(block)
+
+void qemu_mutex_lock_ramlist(void);
+void qemu_mutex_unlock_ramlist(void);
+
+struct RAMBlockNotifier {
+ void (*ram_block_added)(RAMBlockNotifier *n, void *host, size_t size,
+ size_t max_size);
+ void (*ram_block_removed)(RAMBlockNotifier *n, void *host, size_t size,
+ size_t max_size);
+ void (*ram_block_resized)(RAMBlockNotifier *n, void *host, size_t old_size,
+ size_t new_size);
+ QLIST_ENTRY(RAMBlockNotifier) next;
+};
+
+void ram_block_notifier_add(RAMBlockNotifier *n);
+void ram_block_notifier_remove(RAMBlockNotifier *n);
+void ram_block_notify_add(void *host, size_t size, size_t max_size);
+void ram_block_notify_remove(void *host, size_t size, size_t max_size);
+void ram_block_notify_resize(void *host, size_t old_size, size_t new_size);
+
+GString *ram_block_format(void);
+
+#endif /* RAMLIST_H */
diff --git a/include/exec/softmmu-semi.h b/include/exec/softmmu-semi.h
new file mode 100644
index 000000000..fbcae88f4
--- /dev/null
+++ b/include/exec/softmmu-semi.h
@@ -0,0 +1,101 @@
+/*
+ * Helper routines to provide target memory access for semihosting
+ * syscalls in system emulation mode.
+ *
+ * Copyright (c) 2007 CodeSourcery.
+ *
+ * This code is licensed under the GPL
+ */
+
+#ifndef SOFTMMU_SEMI_H
+#define SOFTMMU_SEMI_H
+
+#include "cpu.h"
+
+static inline uint64_t softmmu_tget64(CPUArchState *env, target_ulong addr)
+{
+ uint64_t val;
+
+ cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 8, 0);
+ return tswap64(val);
+}
+
+static inline uint32_t softmmu_tget32(CPUArchState *env, target_ulong addr)
+{
+ uint32_t val;
+
+ cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 4, 0);
+ return tswap32(val);
+}
+
+static inline uint32_t softmmu_tget8(CPUArchState *env, target_ulong addr)
+{
+ uint8_t val;
+
+ cpu_memory_rw_debug(env_cpu(env), addr, &val, 1, 0);
+ return val;
+}
+
+#define get_user_u64(arg, p) ({ arg = softmmu_tget64(env, p); 0; })
+#define get_user_u32(arg, p) ({ arg = softmmu_tget32(env, p) ; 0; })
+#define get_user_u8(arg, p) ({ arg = softmmu_tget8(env, p) ; 0; })
+#define get_user_ual(arg, p) get_user_u32(arg, p)
+
+static inline void softmmu_tput64(CPUArchState *env,
+ target_ulong addr, uint64_t val)
+{
+ val = tswap64(val);
+ cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 8, 1);
+}
+
+static inline void softmmu_tput32(CPUArchState *env,
+ target_ulong addr, uint32_t val)
+{
+ val = tswap32(val);
+ cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 4, 1);
+}
+#define put_user_u64(arg, p) ({ softmmu_tput64(env, p, arg) ; 0; })
+#define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; })
+#define put_user_ual(arg, p) put_user_u32(arg, p)
+
+static void *softmmu_lock_user(CPUArchState *env,
+ target_ulong addr, target_ulong len, int copy)
+{
+ uint8_t *p;
+ /* TODO: Make this something that isn't fixed size. */
+ p = malloc(len);
+ if (p && copy) {
+ cpu_memory_rw_debug(env_cpu(env), addr, p, len, 0);
+ }
+ return p;
+}
+#define lock_user(type, p, len, copy) softmmu_lock_user(env, p, len, copy)
+static char *softmmu_lock_user_string(CPUArchState *env, target_ulong addr)
+{
+ char *p;
+ char *s;
+ uint8_t c;
+ /* TODO: Make this something that isn't fixed size. */
+ s = p = malloc(1024);
+ if (!s) {
+ return NULL;
+ }
+ do {
+ cpu_memory_rw_debug(env_cpu(env), addr, &c, 1, 0);
+ addr++;
+ *(p++) = c;
+ } while (c);
+ return s;
+}
+#define lock_user_string(p) softmmu_lock_user_string(env, p)
+static void softmmu_unlock_user(CPUArchState *env, void *p, target_ulong addr,
+ target_ulong len)
+{
+ if (len) {
+ cpu_memory_rw_debug(env_cpu(env), addr, p, len, 1);
+ }
+ free(p);
+}
+#define unlock_user(s, args, len) softmmu_unlock_user(env, s, args, len)
+
+#endif
diff --git a/include/exec/target_page.h b/include/exec/target_page.h
new file mode 100644
index 000000000..96726c36a
--- /dev/null
+++ b/include/exec/target_page.h
@@ -0,0 +1,21 @@
+/*
+ * Target page sizes and friends for non target files
+ *
+ * Copyright (c) 2017 Red Hat Inc
+ *
+ * Authors:
+ * David Alan Gilbert <dgilbert@redhat.com>
+ * Juan Quintela <quintela@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef EXEC_TARGET_PAGE_H
+#define EXEC_TARGET_PAGE_H
+
+size_t qemu_target_page_size(void);
+int qemu_target_page_bits(void);
+int qemu_target_page_bits_min(void);
+
+#endif
diff --git a/include/exec/translate-all.h b/include/exec/translate-all.h
new file mode 100644
index 000000000..9f646389a
--- /dev/null
+++ b/include/exec/translate-all.h
@@ -0,0 +1,40 @@
+/*
+ * Translated block handling
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef TRANSLATE_ALL_H
+#define TRANSLATE_ALL_H
+
+#include "exec/exec-all.h"
+
+
+/* translate-all.c */
+struct page_collection *page_collection_lock(tb_page_addr_t start,
+ tb_page_addr_t end);
+void page_collection_unlock(struct page_collection *set);
+void tb_invalidate_phys_page_fast(struct page_collection *pages,
+ tb_page_addr_t start, int len,
+ uintptr_t retaddr);
+void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
+void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
+
+#ifdef CONFIG_USER_ONLY
+void page_protect(tb_page_addr_t page_addr);
+int page_unprotect(target_ulong address, uintptr_t pc);
+#endif
+
+#endif /* TRANSLATE_ALL_H */
diff --git a/include/exec/translator.h b/include/exec/translator.h
new file mode 100644
index 000000000..9bc46eda5
--- /dev/null
+++ b/include/exec/translator.h
@@ -0,0 +1,190 @@
+/*
+ * Generic intermediate code generation.
+ *
+ * Copyright (C) 2016-2017 LluĂ­s Vilanova <vilanova@ac.upc.edu>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef EXEC__TRANSLATOR_H
+#define EXEC__TRANSLATOR_H
+
+/*
+ * Include this header from a target-specific file, and add a
+ *
+ * DisasContextBase base;
+ *
+ * member in your target-specific DisasContext.
+ */
+
+
+#include "qemu/bswap.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/plugin-gen.h"
+#include "exec/translate-all.h"
+#include "tcg/tcg.h"
+
+
+/**
+ * DisasJumpType:
+ * @DISAS_NEXT: Next instruction in program order.
+ * @DISAS_TOO_MANY: Too many instructions translated.
+ * @DISAS_NORETURN: Following code is dead.
+ * @DISAS_TARGET_*: Start of target-specific conditions.
+ *
+ * What instruction to disassemble next.
+ */
+typedef enum DisasJumpType {
+ DISAS_NEXT,
+ DISAS_TOO_MANY,
+ DISAS_NORETURN,
+ DISAS_TARGET_0,
+ DISAS_TARGET_1,
+ DISAS_TARGET_2,
+ DISAS_TARGET_3,
+ DISAS_TARGET_4,
+ DISAS_TARGET_5,
+ DISAS_TARGET_6,
+ DISAS_TARGET_7,
+ DISAS_TARGET_8,
+ DISAS_TARGET_9,
+ DISAS_TARGET_10,
+ DISAS_TARGET_11,
+} DisasJumpType;
+
+/**
+ * DisasContextBase:
+ * @tb: Translation block for this disassembly.
+ * @pc_first: Address of first guest instruction in this TB.
+ * @pc_next: Address of next guest instruction in this TB (current during
+ * disassembly).
+ * @is_jmp: What instruction to disassemble next.
+ * @num_insns: Number of translated instructions (including current).
+ * @max_insns: Maximum number of instructions to be translated in this TB.
+ * @singlestep_enabled: "Hardware" single stepping enabled.
+ *
+ * Architecture-agnostic disassembly context.
+ */
+typedef struct DisasContextBase {
+ const TranslationBlock *tb;
+ target_ulong pc_first;
+ target_ulong pc_next;
+ DisasJumpType is_jmp;
+ int num_insns;
+ int max_insns;
+ bool singlestep_enabled;
+#ifdef CONFIG_USER_ONLY
+ /*
+ * Guest address of the last byte of the last protected page.
+ *
+ * Pages containing the translated instructions are made non-writable in
+ * order to achieve consistency in case another thread is modifying the
+ * code while translate_insn() fetches the instruction bytes piecemeal.
+ * Such writer threads are blocked on mmap_lock() in page_unprotect().
+ */
+ target_ulong page_protect_end;
+#endif
+} DisasContextBase;
+
+/**
+ * TranslatorOps:
+ * @init_disas_context:
+ * Initialize the target-specific portions of DisasContext struct.
+ * The generic DisasContextBase has already been initialized.
+ *
+ * @tb_start:
+ * Emit any code required before the start of the main loop,
+ * after the generic gen_tb_start().
+ *
+ * @insn_start:
+ * Emit the tcg_gen_insn_start opcode.
+ *
+ * @translate_insn:
+ * Disassemble one instruction and set db->pc_next for the start
+ * of the following instruction. Set db->is_jmp as necessary to
+ * terminate the main loop.
+ *
+ * @tb_stop:
+ * Emit any opcodes required to exit the TB, based on db->is_jmp.
+ *
+ * @disas_log:
+ * Print instruction disassembly to log.
+ */
+typedef struct TranslatorOps {
+ void (*init_disas_context)(DisasContextBase *db, CPUState *cpu);
+ void (*tb_start)(DisasContextBase *db, CPUState *cpu);
+ void (*insn_start)(DisasContextBase *db, CPUState *cpu);
+ void (*translate_insn)(DisasContextBase *db, CPUState *cpu);
+ void (*tb_stop)(DisasContextBase *db, CPUState *cpu);
+ void (*disas_log)(const DisasContextBase *db, CPUState *cpu);
+} TranslatorOps;
+
+/**
+ * translator_loop:
+ * @ops: Target-specific operations.
+ * @db: Disassembly context.
+ * @cpu: Target vCPU.
+ * @tb: Translation block.
+ * @max_insns: Maximum number of insns to translate.
+ *
+ * Generic translator loop.
+ *
+ * Translation will stop in the following cases (in order):
+ * - When is_jmp set by #TranslatorOps::breakpoint_check.
+ * - set to DISAS_TOO_MANY exits after translating one more insn
+ * - set to any other value than DISAS_NEXT exits immediately.
+ * - When is_jmp set by #TranslatorOps::translate_insn.
+ * - set to any value other than DISAS_NEXT exits immediately.
+ * - When the TCG operation buffer is full.
+ * - When single-stepping is enabled (system-wide or on the current vCPU).
+ * - When too many instructions have been translated.
+ */
+void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
+ CPUState *cpu, TranslationBlock *tb, int max_insns);
+
+void translator_loop_temp_check(DisasContextBase *db);
+
+/**
+ * translator_use_goto_tb
+ * @db: Disassembly context
+ * @dest: target pc of the goto
+ *
+ * Return true if goto_tb is allowed between the current TB
+ * and the destination PC.
+ */
+bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest);
+
+/*
+ * Translator Load Functions
+ *
+ * These are intended to replace the direct usage of the cpu_ld*_code
+ * functions and are mandatory for front-ends that have been migrated
+ * to the common translator_loop. These functions are only intended
+ * to be called from the translation stage and should not be called
+ * from helper functions. Those functions should be converted to encode
+ * the relevant information at translation time.
+ */
+
+#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
+ type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \
+ abi_ptr pc, bool do_swap); \
+ static inline type fullname(CPUArchState *env, \
+ DisasContextBase *dcbase, abi_ptr pc) \
+ { \
+ return fullname ## _swap(env, dcbase, pc, false); \
+ }
+
+#define FOR_EACH_TRANSLATOR_LD(F) \
+ F(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */) \
+ F(translator_ldsw, int16_t, cpu_ldsw_code, bswap16) \
+ F(translator_lduw, uint16_t, cpu_lduw_code, bswap16) \
+ F(translator_ldl, uint32_t, cpu_ldl_code, bswap32) \
+ F(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
+
+FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
+
+#undef GEN_TRANSLATOR_LD
+
+#endif /* EXEC__TRANSLATOR_H */
diff --git a/include/exec/user/abitypes.h b/include/exec/user/abitypes.h
new file mode 100644
index 000000000..743b8bb9e
--- /dev/null
+++ b/include/exec/user/abitypes.h
@@ -0,0 +1,71 @@
+#ifndef EXEC_USER_ABITYPES_H
+#define EXEC_USER_ABITYPES_H
+
+#include "cpu.h"
+
+#ifdef TARGET_ABI32
+#define TARGET_ABI_BITS 32
+#else
+#define TARGET_ABI_BITS TARGET_LONG_BITS
+#endif
+
+#ifdef TARGET_M68K
+#define ABI_INT_ALIGNMENT 2
+#define ABI_LONG_ALIGNMENT 2
+#define ABI_LLONG_ALIGNMENT 2
+#endif
+
+#if (defined(TARGET_I386) && !defined(TARGET_X86_64)) || defined(TARGET_SH4)
+#define ABI_LLONG_ALIGNMENT 4
+#endif
+
+#ifndef ABI_SHORT_ALIGNMENT
+#define ABI_SHORT_ALIGNMENT 2
+#endif
+#ifndef ABI_INT_ALIGNMENT
+#define ABI_INT_ALIGNMENT 4
+#endif
+#ifndef ABI_LONG_ALIGNMENT
+#define ABI_LONG_ALIGNMENT (TARGET_ABI_BITS / 8)
+#endif
+#ifndef ABI_LLONG_ALIGNMENT
+#define ABI_LLONG_ALIGNMENT 8
+#endif
+
+typedef int16_t abi_short __attribute__ ((aligned(ABI_SHORT_ALIGNMENT)));
+typedef uint16_t abi_ushort __attribute__((aligned(ABI_SHORT_ALIGNMENT)));
+typedef int32_t abi_int __attribute__((aligned(ABI_INT_ALIGNMENT)));
+typedef uint32_t abi_uint __attribute__((aligned(ABI_INT_ALIGNMENT)));
+typedef int64_t abi_llong __attribute__((aligned(ABI_LLONG_ALIGNMENT)));
+typedef uint64_t abi_ullong __attribute__((aligned(ABI_LLONG_ALIGNMENT)));
+
+#ifdef TARGET_ABI32
+typedef uint32_t abi_ulong __attribute__((aligned(ABI_LONG_ALIGNMENT)));
+typedef int32_t abi_long __attribute__((aligned(ABI_LONG_ALIGNMENT)));
+#define TARGET_ABI_FMT_lx "%08x"
+#define TARGET_ABI_FMT_ld "%d"
+#define TARGET_ABI_FMT_lu "%u"
+
+static inline abi_ulong tswapal(abi_ulong v)
+{
+ return tswap32(v);
+}
+
+#else
+typedef target_ulong abi_ulong __attribute__((aligned(ABI_LONG_ALIGNMENT)));
+typedef target_long abi_long __attribute__((aligned(ABI_LONG_ALIGNMENT)));
+#define TARGET_ABI_FMT_lx TARGET_FMT_lx
+#define TARGET_ABI_FMT_ld TARGET_FMT_ld
+#define TARGET_ABI_FMT_lu TARGET_FMT_lu
+/* for consistency, define ABI32 too */
+#if TARGET_ABI_BITS == 32
+#define TARGET_ABI32 1
+#endif
+
+static inline abi_ulong tswapal(abi_ulong v)
+{
+ return tswapl(v);
+}
+
+#endif
+#endif
diff --git a/include/exec/user/thunk.h b/include/exec/user/thunk.h
new file mode 100644
index 000000000..300a840d5
--- /dev/null
+++ b/include/exec/user/thunk.h
@@ -0,0 +1,203 @@
+/*
+ * Generic thunking code to convert data between host and target CPU
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef THUNK_H
+#define THUNK_H
+
+#include "cpu.h"
+#include "exec/user/abitypes.h"
+
+/* types enums definitions */
+
+typedef enum argtype {
+ TYPE_NULL,
+ TYPE_CHAR,
+ TYPE_SHORT,
+ TYPE_INT,
+ TYPE_LONG,
+ TYPE_ULONG,
+ TYPE_PTRVOID, /* pointer on unknown data */
+ TYPE_LONGLONG,
+ TYPE_ULONGLONG,
+ TYPE_PTR,
+ TYPE_ARRAY,
+ TYPE_STRUCT,
+ TYPE_OLDDEVT,
+} argtype;
+
+#define MK_PTR(type) TYPE_PTR, type
+#define MK_ARRAY(type, size) TYPE_ARRAY, (int)(size), type
+#define MK_STRUCT(id) TYPE_STRUCT, id
+
+#define THUNK_TARGET 0
+#define THUNK_HOST 1
+
+typedef struct {
+ /* standard struct handling */
+ const argtype *field_types;
+ int nb_fields;
+ int *field_offsets[2];
+ /* special handling */
+ void (*convert[2])(void *dst, const void *src);
+ void (*print)(void *arg);
+ int size[2];
+ int align[2];
+ const char *name;
+} StructEntry;
+
+/* Translation table for bitmasks... */
+typedef struct bitmask_transtbl {
+ unsigned int target_mask;
+ unsigned int target_bits;
+ unsigned int host_mask;
+ unsigned int host_bits;
+} bitmask_transtbl;
+
+void thunk_register_struct(int id, const char *name, const argtype *types);
+void thunk_register_struct_direct(int id, const char *name,
+ const StructEntry *se1);
+const argtype *thunk_convert(void *dst, const void *src,
+ const argtype *type_ptr, int to_host);
+const argtype *thunk_print(void *arg, const argtype *type_ptr);
+
+extern StructEntry *struct_entries;
+
+int thunk_type_size_array(const argtype *type_ptr, int is_host);
+int thunk_type_align_array(const argtype *type_ptr, int is_host);
+
+static inline int thunk_type_size(const argtype *type_ptr, int is_host)
+{
+ int type, size;
+ const StructEntry *se;
+
+ type = *type_ptr;
+ switch(type) {
+ case TYPE_CHAR:
+ return 1;
+ case TYPE_SHORT:
+ return 2;
+ case TYPE_INT:
+ return 4;
+ case TYPE_LONGLONG:
+ case TYPE_ULONGLONG:
+ return 8;
+ case TYPE_LONG:
+ case TYPE_ULONG:
+ case TYPE_PTRVOID:
+ case TYPE_PTR:
+ if (is_host) {
+ return sizeof(void *);
+ } else {
+ return TARGET_ABI_BITS / 8;
+ }
+ break;
+ case TYPE_OLDDEVT:
+ if (is_host) {
+#if defined(HOST_X86_64)
+ return 8;
+#elif defined(HOST_ALPHA) || defined(HOST_IA64) || defined(HOST_MIPS) || \
+ defined(HOST_PARISC) || defined(HOST_SPARC64)
+ return 4;
+#elif defined(HOST_PPC)
+ return sizeof(void *);
+#else
+ return 2;
+#endif
+ } else {
+#if defined(TARGET_X86_64)
+ return 8;
+#elif defined(TARGET_ALPHA) || defined(TARGET_IA64) || defined(TARGET_MIPS) || \
+ defined(TARGET_PARISC) || defined(TARGET_SPARC64)
+ return 4;
+#elif defined(TARGET_PPC)
+ return TARGET_ABI_BITS / 8;
+#else
+ return 2;
+#endif
+ }
+ break;
+ case TYPE_ARRAY:
+ size = type_ptr[1];
+ return size * thunk_type_size_array(type_ptr + 2, is_host);
+ case TYPE_STRUCT:
+ se = struct_entries + type_ptr[1];
+ return se->size[is_host];
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static inline int thunk_type_align(const argtype *type_ptr, int is_host)
+{
+ int type;
+ const StructEntry *se;
+
+ type = *type_ptr;
+ switch(type) {
+ case TYPE_CHAR:
+ return 1;
+ case TYPE_SHORT:
+ if (is_host) {
+ return __alignof__(short);
+ } else {
+ return ABI_SHORT_ALIGNMENT;
+ }
+ case TYPE_INT:
+ if (is_host) {
+ return __alignof__(int);
+ } else {
+ return ABI_INT_ALIGNMENT;
+ }
+ case TYPE_LONGLONG:
+ case TYPE_ULONGLONG:
+ if (is_host) {
+ return __alignof__(long long);
+ } else {
+ return ABI_LLONG_ALIGNMENT;
+ }
+ case TYPE_LONG:
+ case TYPE_ULONG:
+ case TYPE_PTRVOID:
+ case TYPE_PTR:
+ if (is_host) {
+ return __alignof__(long);
+ } else {
+ return ABI_LONG_ALIGNMENT;
+ }
+ break;
+ case TYPE_OLDDEVT:
+ return thunk_type_size(type_ptr, is_host);
+ case TYPE_ARRAY:
+ return thunk_type_align_array(type_ptr + 2, is_host);
+ case TYPE_STRUCT:
+ se = struct_entries + type_ptr[1];
+ return se->align[is_host];
+ default:
+ g_assert_not_reached();
+ }
+}
+
+unsigned int target_to_host_bitmask(unsigned int target_mask,
+ const bitmask_transtbl * trans_tbl);
+unsigned int host_to_target_bitmask(unsigned int host_mask,
+ const bitmask_transtbl * trans_tbl);
+
+void thunk_init(unsigned int max_structs);
+
+#endif