aboutsummaryrefslogtreecommitdiffstats
path: root/include/exec/cpu-defs.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/exec/cpu-defs.h')
-rw-r--r--include/exec/cpu-defs.h245
1 files changed, 245 insertions, 0 deletions
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
new file mode 100644
index 000000000..ba3cd32a1
--- /dev/null
+++ b/include/exec/cpu-defs.h
@@ -0,0 +1,245 @@
+/*
+ * common defines for all CPUs
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef CPU_DEFS_H
+#define CPU_DEFS_H
+
+#ifndef NEED_CPU_H
+#error cpu.h included from common code
+#endif
+
+#include "qemu/host-utils.h"
+#include "qemu/thread.h"
+#ifndef CONFIG_USER_ONLY
+#include "exec/hwaddr.h"
+#endif
+#include "exec/memattrs.h"
+#include "hw/core/cpu.h"
+
+#include "cpu-param.h"
+
+#ifndef TARGET_LONG_BITS
+# error TARGET_LONG_BITS must be defined in cpu-param.h
+#endif
+#ifndef NB_MMU_MODES
+# error NB_MMU_MODES must be defined in cpu-param.h
+#endif
+#ifndef TARGET_PHYS_ADDR_SPACE_BITS
+# error TARGET_PHYS_ADDR_SPACE_BITS must be defined in cpu-param.h
+#endif
+#ifndef TARGET_VIRT_ADDR_SPACE_BITS
+# error TARGET_VIRT_ADDR_SPACE_BITS must be defined in cpu-param.h
+#endif
+#ifndef TARGET_PAGE_BITS
+# ifdef TARGET_PAGE_BITS_VARY
+# ifndef TARGET_PAGE_BITS_MIN
+# error TARGET_PAGE_BITS_MIN must be defined in cpu-param.h
+# endif
+# else
+# error TARGET_PAGE_BITS must be defined in cpu-param.h
+# endif
+#endif
+
+#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
+
+/* target_ulong is the type of a virtual address */
+#if TARGET_LONG_SIZE == 4
+typedef int32_t target_long;
+typedef uint32_t target_ulong;
+#define TARGET_FMT_lx "%08x"
+#define TARGET_FMT_ld "%d"
+#define TARGET_FMT_lu "%u"
+#elif TARGET_LONG_SIZE == 8
+typedef int64_t target_long;
+typedef uint64_t target_ulong;
+#define TARGET_FMT_lx "%016" PRIx64
+#define TARGET_FMT_ld "%" PRId64
+#define TARGET_FMT_lu "%" PRIu64
+#else
+#error TARGET_LONG_SIZE undefined
+#endif
+
+#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
+
+/* use a fully associative victim tlb of 8 entries */
+#define CPU_VTLB_SIZE 8
+
+#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
+#define CPU_TLB_ENTRY_BITS 4
+#else
+#define CPU_TLB_ENTRY_BITS 5
+#endif
+
+#define CPU_TLB_DYN_MIN_BITS 6
+#define CPU_TLB_DYN_DEFAULT_BITS 8
+
+# if HOST_LONG_BITS == 32
+/* Make sure we do not require a double-word shift for the TLB load */
+# define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS)
+# else /* HOST_LONG_BITS == 64 */
+/*
+ * Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) ==
+ * 2**34 == 16G of address space. This is roughly what one would expect a
+ * TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel
+ * Skylake's Level-2 STLB has 16 1G entries.
+ * Also, make sure we do not size the TLB past the guest's address space.
+ */
+# ifdef TARGET_PAGE_BITS_VARY
+# define CPU_TLB_DYN_MAX_BITS \
+ MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
+# else
+# define CPU_TLB_DYN_MAX_BITS \
+ MIN_CONST(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
+# endif
+# endif
+
+typedef struct CPUTLBEntry {
+ /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
+ bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
+ go directly to ram.
+ bit 3 : indicates that the entry is invalid
+ bit 2..0 : zero
+ */
+ union {
+ struct {
+ target_ulong addr_read;
+ target_ulong addr_write;
+ target_ulong addr_code;
+ /* Addend to virtual address to get host address. IO accesses
+ use the corresponding iotlb value. */
+ uintptr_t addend;
+ };
+ /* padding to get a power of two size */
+ uint8_t dummy[1 << CPU_TLB_ENTRY_BITS];
+ };
+} CPUTLBEntry;
+
+QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
+
+/* The IOTLB is not accessed directly inline by generated TCG code,
+ * so the CPUIOTLBEntry layout is not as critical as that of the
+ * CPUTLBEntry. (This is also why we don't want to combine the two
+ * structs into one.)
+ */
+typedef struct CPUIOTLBEntry {
+ /*
+ * @addr contains:
+ * - in the lower TARGET_PAGE_BITS, a physical section number
+ * - with the lower TARGET_PAGE_BITS masked off, an offset which
+ * must be added to the virtual address to obtain:
+ * + the ram_addr_t of the target RAM (if the physical section
+ * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
+ * + the offset within the target MemoryRegion (otherwise)
+ */
+ hwaddr addr;
+ MemTxAttrs attrs;
+} CPUIOTLBEntry;
+
+/*
+ * Data elements that are per MMU mode, minus the bits accessed by
+ * the TCG fast path.
+ */
+typedef struct CPUTLBDesc {
+ /*
+ * Describe a region covering all of the large pages allocated
+ * into the tlb. When any page within this region is flushed,
+ * we must flush the entire tlb. The region is matched if
+ * (addr & large_page_mask) == large_page_addr.
+ */
+ target_ulong large_page_addr;
+ target_ulong large_page_mask;
+ /* host time (in ns) at the beginning of the time window */
+ int64_t window_begin_ns;
+ /* maximum number of entries observed in the window */
+ size_t window_max_entries;
+ size_t n_used_entries;
+ /* The next index to use in the tlb victim table. */
+ size_t vindex;
+ /* The tlb victim table, in two parts. */
+ CPUTLBEntry vtable[CPU_VTLB_SIZE];
+ CPUIOTLBEntry viotlb[CPU_VTLB_SIZE];
+ /* The iotlb. */
+ CPUIOTLBEntry *iotlb;
+} CPUTLBDesc;
+
+/*
+ * Data elements that are per MMU mode, accessed by the fast path.
+ * The structure is aligned to aid loading the pair with one insn.
+ */
+typedef struct CPUTLBDescFast {
+ /* Contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */
+ uintptr_t mask;
+ /* The array of tlb entries itself. */
+ CPUTLBEntry *table;
+} CPUTLBDescFast QEMU_ALIGNED(2 * sizeof(void *));
+
+/*
+ * Data elements that are shared between all MMU modes.
+ */
+typedef struct CPUTLBCommon {
+ /* Serialize updates to f.table and d.vtable, and others as noted. */
+ QemuSpin lock;
+ /*
+ * Within dirty, for each bit N, modifications have been made to
+ * mmu_idx N since the last time that mmu_idx was flushed.
+ * Protected by tlb_c.lock.
+ */
+ uint16_t dirty;
+ /*
+ * Statistics. These are not lock protected, but are read and
+ * written atomically. This allows the monitor to print a snapshot
+ * of the stats without interfering with the cpu.
+ */
+ size_t full_flush_count;
+ size_t part_flush_count;
+ size_t elide_flush_count;
+} CPUTLBCommon;
+
+/*
+ * The entire softmmu tlb, for all MMU modes.
+ * The meaning of each of the MMU modes is defined in the target code.
+ * Since this is placed within CPUNegativeOffsetState, the smallest
+ * negative offsets are at the end of the struct.
+ */
+
+typedef struct CPUTLB {
+ CPUTLBCommon c;
+ CPUTLBDesc d[NB_MMU_MODES];
+ CPUTLBDescFast f[NB_MMU_MODES];
+} CPUTLB;
+
+/* This will be used by TCG backends to compute offsets. */
+#define TLB_MASK_TABLE_OFS(IDX) \
+ ((int)offsetof(ArchCPU, neg.tlb.f[IDX]) - (int)offsetof(ArchCPU, env))
+
+#else
+
+typedef struct CPUTLB { } CPUTLB;
+
+#endif /* !CONFIG_USER_ONLY && CONFIG_TCG */
+
+/*
+ * This structure must be placed in ArchCPU immediately
+ * before CPUArchState, as a field named "neg".
+ */
+typedef struct CPUNegativeOffsetState {
+ CPUTLB tlb;
+ IcountDecr icount_decr;
+} CPUNegativeOffsetState;
+
+#endif