diff options
Diffstat (limited to 'accel')
57 files changed, 17296 insertions, 0 deletions
diff --git a/accel/Kconfig b/accel/Kconfig new file mode 100644 index 000000000..8bdedb7d1 --- /dev/null +++ b/accel/Kconfig @@ -0,0 +1,21 @@ +config WHPX + bool + +config NVMM + bool + +config HAX + bool + +config HVF + bool + +config TCG + bool + +config KVM + bool + +config XEN + bool + select FSDEV_9P if VIRTFS diff --git a/accel/accel-common.c b/accel/accel-common.c new file mode 100644 index 000000000..7b8ec7e0f --- /dev/null +++ b/accel/accel-common.c @@ -0,0 +1,137 @@ +/* + * QEMU accel class, components common to system emulation and user mode + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/accel.h" + +#include "cpu.h" +#include "hw/core/accel-cpu.h" + +#ifndef CONFIG_USER_ONLY +#include "accel-softmmu.h" +#endif /* !CONFIG_USER_ONLY */ + +static const TypeInfo accel_type = { + .name = TYPE_ACCEL, + .parent = TYPE_OBJECT, + .class_size = sizeof(AccelClass), + .instance_size = sizeof(AccelState), +}; + +/* Lookup AccelClass from opt_name. Returns NULL if not found */ +AccelClass *accel_find(const char *opt_name) +{ + char *class_name = g_strdup_printf(ACCEL_CLASS_NAME("%s"), opt_name); + AccelClass *ac = ACCEL_CLASS(module_object_class_by_name(class_name)); + g_free(class_name); + return ac; +} + +static void accel_init_cpu_int_aux(ObjectClass *klass, void *opaque) +{ + CPUClass *cc = CPU_CLASS(klass); + AccelCPUClass *accel_cpu = opaque; + + /* + * The first callback allows accel-cpu to run initializations + * for the CPU, customizing CPU behavior according to the accelerator. + * + * The second one allows the CPU to customize the accel-cpu + * behavior according to the CPU. + * + * The second is currently only used by TCG, to specialize the + * TCGCPUOps depending on the CPU type. + */ + cc->accel_cpu = accel_cpu; + if (accel_cpu->cpu_class_init) { + accel_cpu->cpu_class_init(cc); + } + if (cc->init_accel_cpu) { + cc->init_accel_cpu(accel_cpu, cc); + } +} + +/* initialize the arch-specific accel CpuClass interfaces */ +static void accel_init_cpu_interfaces(AccelClass *ac) +{ + const char *ac_name; /* AccelClass name */ + char *acc_name; /* AccelCPUClass name */ + ObjectClass *acc; /* AccelCPUClass */ + + ac_name = object_class_get_name(OBJECT_CLASS(ac)); + g_assert(ac_name != NULL); + + acc_name = g_strdup_printf("%s-%s", ac_name, CPU_RESOLVING_TYPE); + acc = object_class_by_name(acc_name); + g_free(acc_name); + + if (acc) { + object_class_foreach(accel_init_cpu_int_aux, + CPU_RESOLVING_TYPE, false, acc); + } +} + +void accel_init_interfaces(AccelClass *ac) +{ +#ifndef CONFIG_USER_ONLY + accel_init_ops_interfaces(ac); +#endif /* !CONFIG_USER_ONLY */ + + accel_init_cpu_interfaces(ac); +} + +void accel_cpu_instance_init(CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (cc->accel_cpu && cc->accel_cpu->cpu_instance_init) { + cc->accel_cpu->cpu_instance_init(cpu); + } +} + +bool accel_cpu_realizefn(CPUState *cpu, Error **errp) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (cc->accel_cpu && cc->accel_cpu->cpu_realizefn) { + return cc->accel_cpu->cpu_realizefn(cpu, errp); + } + return true; +} + +static const TypeInfo accel_cpu_type = { + .name = TYPE_ACCEL_CPU, + .parent = TYPE_OBJECT, + .abstract = true, + .class_size = sizeof(AccelCPUClass), +}; + +static void register_accel_types(void) +{ + type_register_static(&accel_type); + type_register_static(&accel_cpu_type); +} + +type_init(register_accel_types); diff --git a/accel/accel-softmmu.c b/accel/accel-softmmu.c new file mode 100644 index 000000000..67276e4f5 --- /dev/null +++ b/accel/accel-softmmu.c @@ -0,0 +1,100 @@ +/* + * QEMU accel class, system emulation components + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/accel.h" +#include "hw/boards.h" +#include "sysemu/cpus.h" + +#include "accel-softmmu.h" + +int accel_init_machine(AccelState *accel, MachineState *ms) +{ + AccelClass *acc = ACCEL_GET_CLASS(accel); + int ret; + ms->accelerator = accel; + *(acc->allowed) = true; + ret = acc->init_machine(ms); + if (ret < 0) { + ms->accelerator = NULL; + *(acc->allowed) = false; + object_unref(OBJECT(accel)); + } else { + object_set_accelerator_compat_props(acc->compat_props); + } + return ret; +} + +AccelState *current_accel(void) +{ + return current_machine->accelerator; +} + +void accel_setup_post(MachineState *ms) +{ + AccelState *accel = ms->accelerator; + AccelClass *acc = ACCEL_GET_CLASS(accel); + if (acc->setup_post) { + acc->setup_post(ms, accel); + } +} + +/* initialize the arch-independent accel operation interfaces */ +void accel_init_ops_interfaces(AccelClass *ac) +{ + const char *ac_name; + char *ops_name; + AccelOpsClass *ops; + + ac_name = object_class_get_name(OBJECT_CLASS(ac)); + g_assert(ac_name != NULL); + + ops_name = g_strdup_printf("%s" ACCEL_OPS_SUFFIX, ac_name); + ops = ACCEL_OPS_CLASS(module_object_class_by_name(ops_name)); + g_free(ops_name); + + /* + * all accelerators need to define ops, providing at least a mandatory + * non-NULL create_vcpu_thread operation. + */ + g_assert(ops != NULL); + if (ops->ops_init) { + ops->ops_init(ops); + } + cpus_register_accel(ops); +} + +static const TypeInfo accel_ops_type_info = { + .name = TYPE_ACCEL_OPS, + .parent = TYPE_OBJECT, + .abstract = true, + .class_size = sizeof(AccelOpsClass), +}; + +static void accel_softmmu_register_types(void) +{ + type_register_static(&accel_ops_type_info); +} +type_init(accel_softmmu_register_types); diff --git a/accel/accel-softmmu.h b/accel/accel-softmmu.h new file mode 100644 index 000000000..5e192f188 --- /dev/null +++ b/accel/accel-softmmu.h @@ -0,0 +1,15 @@ +/* + * QEMU System Emulation accel internal functions + * + * Copyright 2021 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef ACCEL_SOFTMMU_H +#define ACCEL_SOFTMMU_H + +void accel_init_ops_interfaces(AccelClass *ac); + +#endif /* ACCEL_SOFTMMU_H */ diff --git a/accel/accel-user.c b/accel/accel-user.c new file mode 100644 index 000000000..22b6a1a1a --- /dev/null +++ b/accel/accel-user.c @@ -0,0 +1,24 @@ +/* + * QEMU accel class, user-mode components + * + * Copyright 2021 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/accel.h" + +AccelState *current_accel(void) +{ + static AccelState *accel; + + if (!accel) { + AccelClass *ac = accel_find("tcg"); + + g_assert(ac != NULL); + accel = ACCEL(object_new_with_class(OBJECT_CLASS(ac))); + } + return accel; +} diff --git a/accel/dummy-cpus.c b/accel/dummy-cpus.c new file mode 100644 index 000000000..10429fdfb --- /dev/null +++ b/accel/dummy-cpus.c @@ -0,0 +1,72 @@ +/* + * Dummy cpu thread code + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/rcu.h" +#include "sysemu/cpus.h" +#include "qemu/guest-random.h" +#include "qemu/main-loop.h" +#include "hw/core/cpu.h" + +static void *dummy_cpu_thread_fn(void *arg) +{ + CPUState *cpu = arg; + sigset_t waitset; + int r; + + rcu_register_thread(); + + qemu_mutex_lock_iothread(); + qemu_thread_get_self(cpu->thread); + cpu->thread_id = qemu_get_thread_id(); + cpu->can_do_io = 1; + current_cpu = cpu; + + sigemptyset(&waitset); + sigaddset(&waitset, SIG_IPI); + + /* signal CPU creation */ + cpu_thread_signal_created(cpu); + qemu_guest_random_seed_thread_part2(cpu->random_seed); + + do { + qemu_mutex_unlock_iothread(); + do { + int sig; + r = sigwait(&waitset, &sig); + } while (r == -1 && (errno == EAGAIN || errno == EINTR)); + if (r == -1) { + perror("sigwait"); + exit(1); + } + qemu_mutex_lock_iothread(); + qemu_wait_io_event(cpu); + } while (!cpu->unplug); + + qemu_mutex_unlock_iothread(); + rcu_unregister_thread(); + return NULL; +} + +void dummy_start_vcpu_thread(CPUState *cpu) +{ + char thread_name[VCPU_THREAD_NAME_SIZE]; + + cpu->thread = g_malloc0(sizeof(QemuThread)); + cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + qemu_cond_init(cpu->halt_cond); + snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY", + cpu->cpu_index); + qemu_thread_create(cpu->thread, thread_name, dummy_cpu_thread_fn, cpu, + QEMU_THREAD_JOINABLE); +} diff --git a/accel/hvf/entitlements.plist b/accel/hvf/entitlements.plist new file mode 100644 index 000000000..154f3308e --- /dev/null +++ b/accel/hvf/entitlements.plist @@ -0,0 +1,8 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<plist version="1.0"> +<dict> + <key>com.apple.security.hypervisor</key> + <true/> +</dict> +</plist> diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c new file mode 100644 index 000000000..54457c76c --- /dev/null +++ b/accel/hvf/hvf-accel-ops.c @@ -0,0 +1,488 @@ +/* + * Copyright 2008 IBM Corporation + * 2008 Red Hat, Inc. + * Copyright 2011 Intel Corporation + * Copyright 2016 Veertu, Inc. + * Copyright 2017 The Android Open Source Project + * + * QEMU Hypervisor.framework support + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + * + * This file contain code under public domain from the hvdos project: + * https://github.com/mist64/hvdos + * + * Parts Copyright (c) 2011 NetApp, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "exec/address-spaces.h" +#include "exec/exec-all.h" +#include "sysemu/cpus.h" +#include "sysemu/hvf.h" +#include "sysemu/hvf_int.h" +#include "sysemu/runstate.h" +#include "qemu/guest-random.h" + +HVFState *hvf_state; + +#ifdef __aarch64__ +#define HV_VM_DEFAULT NULL +#endif + +/* Memory slots */ + +hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size) +{ + hvf_slot *slot; + int x; + for (x = 0; x < hvf_state->num_slots; ++x) { + slot = &hvf_state->slots[x]; + if (slot->size && start < (slot->start + slot->size) && + (start + size) > slot->start) { + return slot; + } + } + return NULL; +} + +struct mac_slot { + int present; + uint64_t size; + uint64_t gpa_start; + uint64_t gva; +}; + +struct mac_slot mac_slots[32]; + +static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags) +{ + struct mac_slot *macslot; + hv_return_t ret; + + macslot = &mac_slots[slot->slot_id]; + + if (macslot->present) { + if (macslot->size != slot->size) { + macslot->present = 0; + ret = hv_vm_unmap(macslot->gpa_start, macslot->size); + assert_hvf_ok(ret); + } + } + + if (!slot->size) { + return 0; + } + + macslot->present = 1; + macslot->gpa_start = slot->start; + macslot->size = slot->size; + ret = hv_vm_map(slot->mem, slot->start, slot->size, flags); + assert_hvf_ok(ret); + return 0; +} + +static void hvf_set_phys_mem(MemoryRegionSection *section, bool add) +{ + hvf_slot *mem; + MemoryRegion *area = section->mr; + bool writeable = !area->readonly && !area->rom_device; + hv_memory_flags_t flags; + uint64_t page_size = qemu_real_host_page_size; + + if (!memory_region_is_ram(area)) { + if (writeable) { + return; + } else if (!memory_region_is_romd(area)) { + /* + * If the memory device is not in romd_mode, then we actually want + * to remove the hvf memory slot so all accesses will trap. + */ + add = false; + } + } + + if (!QEMU_IS_ALIGNED(int128_get64(section->size), page_size) || + !QEMU_IS_ALIGNED(section->offset_within_address_space, page_size)) { + /* Not page aligned, so we can not map as RAM */ + add = false; + } + + mem = hvf_find_overlap_slot( + section->offset_within_address_space, + int128_get64(section->size)); + + if (mem && add) { + if (mem->size == int128_get64(section->size) && + mem->start == section->offset_within_address_space && + mem->mem == (memory_region_get_ram_ptr(area) + + section->offset_within_region)) { + return; /* Same region was attempted to register, go away. */ + } + } + + /* Region needs to be reset. set the size to 0 and remap it. */ + if (mem) { + mem->size = 0; + if (do_hvf_set_memory(mem, 0)) { + error_report("Failed to reset overlapping slot"); + abort(); + } + } + + if (!add) { + return; + } + + if (area->readonly || + (!memory_region_is_ram(area) && memory_region_is_romd(area))) { + flags = HV_MEMORY_READ | HV_MEMORY_EXEC; + } else { + flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC; + } + + /* Now make a new slot. */ + int x; + + for (x = 0; x < hvf_state->num_slots; ++x) { + mem = &hvf_state->slots[x]; + if (!mem->size) { + break; + } + } + + if (x == hvf_state->num_slots) { + error_report("No free slots"); + abort(); + } + + mem->size = int128_get64(section->size); + mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region; + mem->start = section->offset_within_address_space; + mem->region = area; + + if (do_hvf_set_memory(mem, flags)) { + error_report("Error registering new memory slot"); + abort(); + } +} + +static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) +{ + if (!cpu->vcpu_dirty) { + hvf_get_registers(cpu); + cpu->vcpu_dirty = true; + } +} + +static void hvf_cpu_synchronize_state(CPUState *cpu) +{ + if (!cpu->vcpu_dirty) { + run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL); + } +} + +static void do_hvf_cpu_synchronize_set_dirty(CPUState *cpu, + run_on_cpu_data arg) +{ + /* QEMU state is the reference, push it to HVF now and on next entry */ + cpu->vcpu_dirty = true; +} + +static void hvf_cpu_synchronize_post_reset(CPUState *cpu) +{ + run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL); +} + +static void hvf_cpu_synchronize_post_init(CPUState *cpu) +{ + run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL); +} + +static void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu) +{ + run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL); +} + +static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on) +{ + hvf_slot *slot; + + slot = hvf_find_overlap_slot( + section->offset_within_address_space, + int128_get64(section->size)); + + /* protect region against writes; begin tracking it */ + if (on) { + slot->flags |= HVF_SLOT_LOG; + hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size, + HV_MEMORY_READ | HV_MEMORY_EXEC); + /* stop tracking region*/ + } else { + slot->flags &= ~HVF_SLOT_LOG; + hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size, + HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC); + } +} + +static void hvf_log_start(MemoryListener *listener, + MemoryRegionSection *section, int old, int new) +{ + if (old != 0) { + return; + } + + hvf_set_dirty_tracking(section, 1); +} + +static void hvf_log_stop(MemoryListener *listener, + MemoryRegionSection *section, int old, int new) +{ + if (new != 0) { + return; + } + + hvf_set_dirty_tracking(section, 0); +} + +static void hvf_log_sync(MemoryListener *listener, + MemoryRegionSection *section) +{ + /* + * sync of dirty pages is handled elsewhere; just make sure we keep + * tracking the region. + */ + hvf_set_dirty_tracking(section, 1); +} + +static void hvf_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + hvf_set_phys_mem(section, true); +} + +static void hvf_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + hvf_set_phys_mem(section, false); +} + +static MemoryListener hvf_memory_listener = { + .name = "hvf", + .priority = 10, + .region_add = hvf_region_add, + .region_del = hvf_region_del, + .log_start = hvf_log_start, + .log_stop = hvf_log_stop, + .log_sync = hvf_log_sync, +}; + +static void dummy_signal(int sig) +{ +} + +bool hvf_allowed; + +static int hvf_accel_init(MachineState *ms) +{ + int x; + hv_return_t ret; + HVFState *s; + + ret = hv_vm_create(HV_VM_DEFAULT); + assert_hvf_ok(ret); + + s = g_new0(HVFState, 1); + + s->num_slots = ARRAY_SIZE(s->slots); + for (x = 0; x < s->num_slots; ++x) { + s->slots[x].size = 0; + s->slots[x].slot_id = x; + } + + hvf_state = s; + memory_listener_register(&hvf_memory_listener, &address_space_memory); + + return hvf_arch_init(); +} + +static void hvf_accel_class_init(ObjectClass *oc, void *data) +{ + AccelClass *ac = ACCEL_CLASS(oc); + ac->name = "HVF"; + ac->init_machine = hvf_accel_init; + ac->allowed = &hvf_allowed; +} + +static const TypeInfo hvf_accel_type = { + .name = TYPE_HVF_ACCEL, + .parent = TYPE_ACCEL, + .class_init = hvf_accel_class_init, +}; + +static void hvf_type_init(void) +{ + type_register_static(&hvf_accel_type); +} + +type_init(hvf_type_init); + +static void hvf_vcpu_destroy(CPUState *cpu) +{ + hv_return_t ret = hv_vcpu_destroy(cpu->hvf->fd); + assert_hvf_ok(ret); + + hvf_arch_vcpu_destroy(cpu); + g_free(cpu->hvf); + cpu->hvf = NULL; +} + +static int hvf_init_vcpu(CPUState *cpu) +{ + int r; + + cpu->hvf = g_malloc0(sizeof(*cpu->hvf)); + + /* init cpu signals */ + struct sigaction sigact; + + memset(&sigact, 0, sizeof(sigact)); + sigact.sa_handler = dummy_signal; + sigaction(SIG_IPI, &sigact, NULL); + + pthread_sigmask(SIG_BLOCK, NULL, &cpu->hvf->unblock_ipi_mask); + sigdelset(&cpu->hvf->unblock_ipi_mask, SIG_IPI); + +#ifdef __aarch64__ + r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL); +#else + r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT); +#endif + cpu->vcpu_dirty = 1; + assert_hvf_ok(r); + + return hvf_arch_init_vcpu(cpu); +} + +/* + * The HVF-specific vCPU thread function. This one should only run when the host + * CPU supports the VMX "unrestricted guest" feature. + */ +static void *hvf_cpu_thread_fn(void *arg) +{ + CPUState *cpu = arg; + + int r; + + assert(hvf_enabled()); + + rcu_register_thread(); + + qemu_mutex_lock_iothread(); + qemu_thread_get_self(cpu->thread); + + cpu->thread_id = qemu_get_thread_id(); + cpu->can_do_io = 1; + current_cpu = cpu; + + hvf_init_vcpu(cpu); + + /* signal CPU creation */ + cpu_thread_signal_created(cpu); + qemu_guest_random_seed_thread_part2(cpu->random_seed); + + do { + if (cpu_can_run(cpu)) { + r = hvf_vcpu_exec(cpu); + if (r == EXCP_DEBUG) { + cpu_handle_guest_debug(cpu); + } + } + qemu_wait_io_event(cpu); + } while (!cpu->unplug || cpu_can_run(cpu)); + + hvf_vcpu_destroy(cpu); + cpu_thread_signal_destroyed(cpu); + qemu_mutex_unlock_iothread(); + rcu_unregister_thread(); + return NULL; +} + +static void hvf_start_vcpu_thread(CPUState *cpu) +{ + char thread_name[VCPU_THREAD_NAME_SIZE]; + + /* + * HVF currently does not support TCG, and only runs in + * unrestricted-guest mode. + */ + assert(hvf_enabled()); + + cpu->thread = g_malloc0(sizeof(QemuThread)); + cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + qemu_cond_init(cpu->halt_cond); + + snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF", + cpu->cpu_index); + qemu_thread_create(cpu->thread, thread_name, hvf_cpu_thread_fn, + cpu, QEMU_THREAD_JOINABLE); +} + +static void hvf_accel_ops_class_init(ObjectClass *oc, void *data) +{ + AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); + + ops->create_vcpu_thread = hvf_start_vcpu_thread; + ops->kick_vcpu_thread = hvf_kick_vcpu_thread; + + ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset; + ops->synchronize_post_init = hvf_cpu_synchronize_post_init; + ops->synchronize_state = hvf_cpu_synchronize_state; + ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm; +}; +static const TypeInfo hvf_accel_ops_type = { + .name = ACCEL_OPS_NAME("hvf"), + + .parent = TYPE_ACCEL_OPS, + .class_init = hvf_accel_ops_class_init, + .abstract = true, +}; +static void hvf_accel_ops_register_types(void) +{ + type_register_static(&hvf_accel_ops_type); +} +type_init(hvf_accel_ops_register_types); diff --git a/accel/hvf/hvf-all.c b/accel/hvf/hvf-all.c new file mode 100644 index 000000000..f185b0830 --- /dev/null +++ b/accel/hvf/hvf-all.c @@ -0,0 +1,47 @@ +/* + * QEMU Hypervisor.framework support + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/error-report.h" +#include "sysemu/hvf.h" +#include "sysemu/hvf_int.h" + +void assert_hvf_ok(hv_return_t ret) +{ + if (ret == HV_SUCCESS) { + return; + } + + switch (ret) { + case HV_ERROR: + error_report("Error: HV_ERROR"); + break; + case HV_BUSY: + error_report("Error: HV_BUSY"); + break; + case HV_BAD_ARGUMENT: + error_report("Error: HV_BAD_ARGUMENT"); + break; + case HV_NO_RESOURCES: + error_report("Error: HV_NO_RESOURCES"); + break; + case HV_NO_DEVICE: + error_report("Error: HV_NO_DEVICE"); + break; + case HV_UNSUPPORTED: + error_report("Error: HV_UNSUPPORTED"); + break; + default: + error_report("Unknown Error"); + } + + abort(); +} diff --git a/accel/hvf/meson.build b/accel/hvf/meson.build new file mode 100644 index 000000000..fc52cb784 --- /dev/null +++ b/accel/hvf/meson.build @@ -0,0 +1,7 @@ +hvf_ss = ss.source_set() +hvf_ss.add(files( + 'hvf-all.c', + 'hvf-accel-ops.c', +)) + +specific_ss.add_all(when: 'CONFIG_HVF', if_true: hvf_ss) diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c new file mode 100644 index 000000000..7516c67a3 --- /dev/null +++ b/accel/kvm/kvm-accel-ops.c @@ -0,0 +1,100 @@ +/* + * QEMU KVM support + * + * Copyright IBM, Corp. 2008 + * Red Hat, Inc. 2008 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * Glauber Costa <gcosta@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "sysemu/kvm_int.h" +#include "sysemu/runstate.h" +#include "sysemu/cpus.h" +#include "qemu/guest-random.h" +#include "qapi/error.h" + +#include "kvm-cpus.h" + +static void *kvm_vcpu_thread_fn(void *arg) +{ + CPUState *cpu = arg; + int r; + + rcu_register_thread(); + + qemu_mutex_lock_iothread(); + qemu_thread_get_self(cpu->thread); + cpu->thread_id = qemu_get_thread_id(); + cpu->can_do_io = 1; + current_cpu = cpu; + + r = kvm_init_vcpu(cpu, &error_fatal); + kvm_init_cpu_signals(cpu); + + /* signal CPU creation */ + cpu_thread_signal_created(cpu); + qemu_guest_random_seed_thread_part2(cpu->random_seed); + + do { + if (cpu_can_run(cpu)) { + r = kvm_cpu_exec(cpu); + if (r == EXCP_DEBUG) { + cpu_handle_guest_debug(cpu); + } + } + qemu_wait_io_event(cpu); + } while (!cpu->unplug || cpu_can_run(cpu)); + + kvm_destroy_vcpu(cpu); + cpu_thread_signal_destroyed(cpu); + qemu_mutex_unlock_iothread(); + rcu_unregister_thread(); + return NULL; +} + +static void kvm_start_vcpu_thread(CPUState *cpu) +{ + char thread_name[VCPU_THREAD_NAME_SIZE]; + + cpu->thread = g_malloc0(sizeof(QemuThread)); + cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + qemu_cond_init(cpu->halt_cond); + snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM", + cpu->cpu_index); + qemu_thread_create(cpu->thread, thread_name, kvm_vcpu_thread_fn, + cpu, QEMU_THREAD_JOINABLE); +} + +static void kvm_accel_ops_class_init(ObjectClass *oc, void *data) +{ + AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); + + ops->create_vcpu_thread = kvm_start_vcpu_thread; + ops->synchronize_post_reset = kvm_cpu_synchronize_post_reset; + ops->synchronize_post_init = kvm_cpu_synchronize_post_init; + ops->synchronize_state = kvm_cpu_synchronize_state; + ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm; +} + +static const TypeInfo kvm_accel_ops_type = { + .name = ACCEL_OPS_NAME("kvm"), + + .parent = TYPE_ACCEL_OPS, + .class_init = kvm_accel_ops_class_init, + .abstract = true, +}; + +static void kvm_accel_ops_register_types(void) +{ + type_register_static(&kvm_accel_ops_type); +} +type_init(kvm_accel_ops_register_types); diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c new file mode 100644 index 000000000..eecd8031c --- /dev/null +++ b/accel/kvm/kvm-all.c @@ -0,0 +1,3669 @@ +/* + * QEMU KVM support + * + * Copyright IBM, Corp. 2008 + * Red Hat, Inc. 2008 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * Glauber Costa <gcosta@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include <sys/ioctl.h> +#include <poll.h> + +#include <linux/kvm.h> + +#include "qemu/atomic.h" +#include "qemu/option.h" +#include "qemu/config-file.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "hw/s390x/adapter.h" +#include "exec/gdbstub.h" +#include "sysemu/kvm_int.h" +#include "sysemu/runstate.h" +#include "sysemu/cpus.h" +#include "qemu/bswap.h" +#include "exec/memory.h" +#include "exec/ram_addr.h" +#include "qemu/event_notifier.h" +#include "qemu/main-loop.h" +#include "trace.h" +#include "hw/irq.h" +#include "qapi/visitor.h" +#include "qapi/qapi-types-common.h" +#include "qapi/qapi-visit-common.h" +#include "sysemu/reset.h" +#include "qemu/guest-random.h" +#include "sysemu/hw_accel.h" +#include "kvm-cpus.h" + +#include "hw/boards.h" + +/* This check must be after config-host.h is included */ +#ifdef CONFIG_EVENTFD +#include <sys/eventfd.h> +#endif + +/* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We + * need to use the real host PAGE_SIZE, as that's what KVM will use. + */ +#ifdef PAGE_SIZE +#undef PAGE_SIZE +#endif +#define PAGE_SIZE qemu_real_host_page_size + +//#define DEBUG_KVM + +#ifdef DEBUG_KVM +#define DPRINTF(fmt, ...) \ + do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) \ + do { } while (0) +#endif + +#define KVM_MSI_HASHTAB_SIZE 256 + +struct KVMParkedVcpu { + unsigned long vcpu_id; + int kvm_fd; + QLIST_ENTRY(KVMParkedVcpu) node; +}; + +enum KVMDirtyRingReaperState { + KVM_DIRTY_RING_REAPER_NONE = 0, + /* The reaper is sleeping */ + KVM_DIRTY_RING_REAPER_WAIT, + /* The reaper is reaping for dirty pages */ + KVM_DIRTY_RING_REAPER_REAPING, +}; + +/* + * KVM reaper instance, responsible for collecting the KVM dirty bits + * via the dirty ring. + */ +struct KVMDirtyRingReaper { + /* The reaper thread */ + QemuThread reaper_thr; + volatile uint64_t reaper_iteration; /* iteration number of reaper thr */ + volatile enum KVMDirtyRingReaperState reaper_state; /* reap thr state */ +}; + +struct KVMState +{ + AccelState parent_obj; + + int nr_slots; + int fd; + int vmfd; + int coalesced_mmio; + int coalesced_pio; + struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; + bool coalesced_flush_in_progress; + int vcpu_events; + int robust_singlestep; + int debugregs; +#ifdef KVM_CAP_SET_GUEST_DEBUG + QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints; +#endif + int max_nested_state_len; + int many_ioeventfds; + int intx_set_mask; + int kvm_shadow_mem; + bool kernel_irqchip_allowed; + bool kernel_irqchip_required; + OnOffAuto kernel_irqchip_split; + bool sync_mmu; + uint64_t manual_dirty_log_protect; + /* The man page (and posix) say ioctl numbers are signed int, but + * they're not. Linux, glibc and *BSD all treat ioctl numbers as + * unsigned, and treating them as signed here can break things */ + unsigned irq_set_ioctl; + unsigned int sigmask_len; + GHashTable *gsimap; +#ifdef KVM_CAP_IRQ_ROUTING + struct kvm_irq_routing *irq_routes; + int nr_allocated_irq_routes; + unsigned long *used_gsi_bitmap; + unsigned int gsi_count; + QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE]; +#endif + KVMMemoryListener memory_listener; + QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus; + + /* For "info mtree -f" to tell if an MR is registered in KVM */ + int nr_as; + struct KVMAs { + KVMMemoryListener *ml; + AddressSpace *as; + } *as; + uint64_t kvm_dirty_ring_bytes; /* Size of the per-vcpu dirty ring */ + uint32_t kvm_dirty_ring_size; /* Number of dirty GFNs per ring */ + struct KVMDirtyRingReaper reaper; +}; + +KVMState *kvm_state; +bool kvm_kernel_irqchip; +bool kvm_split_irqchip; +bool kvm_async_interrupts_allowed; +bool kvm_halt_in_kernel_allowed; +bool kvm_eventfds_allowed; +bool kvm_irqfds_allowed; +bool kvm_resamplefds_allowed; +bool kvm_msi_via_irqfd_allowed; +bool kvm_gsi_routing_allowed; +bool kvm_gsi_direct_mapping; +bool kvm_allowed; +bool kvm_readonly_mem_allowed; +bool kvm_vm_attributes_allowed; +bool kvm_direct_msi_allowed; +bool kvm_ioeventfd_any_length_allowed; +bool kvm_msi_use_devid; +static bool kvm_immediate_exit; +static hwaddr kvm_max_slot_size = ~0; + +static const KVMCapabilityInfo kvm_required_capabilites[] = { + KVM_CAP_INFO(USER_MEMORY), + KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS), + KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS), + KVM_CAP_LAST_INFO +}; + +static NotifierList kvm_irqchip_change_notifiers = + NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers); + +struct KVMResampleFd { + int gsi; + EventNotifier *resample_event; + QLIST_ENTRY(KVMResampleFd) node; +}; +typedef struct KVMResampleFd KVMResampleFd; + +/* + * Only used with split irqchip where we need to do the resample fd + * kick for the kernel from userspace. + */ +static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list = + QLIST_HEAD_INITIALIZER(kvm_resample_fd_list); + +static QemuMutex kml_slots_lock; + +#define kvm_slots_lock() qemu_mutex_lock(&kml_slots_lock) +#define kvm_slots_unlock() qemu_mutex_unlock(&kml_slots_lock) + +static void kvm_slot_init_dirty_bitmap(KVMSlot *mem); + +static inline void kvm_resample_fd_remove(int gsi) +{ + KVMResampleFd *rfd; + + QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) { + if (rfd->gsi == gsi) { + QLIST_REMOVE(rfd, node); + g_free(rfd); + break; + } + } +} + +static inline void kvm_resample_fd_insert(int gsi, EventNotifier *event) +{ + KVMResampleFd *rfd = g_new0(KVMResampleFd, 1); + + rfd->gsi = gsi; + rfd->resample_event = event; + + QLIST_INSERT_HEAD(&kvm_resample_fd_list, rfd, node); +} + +void kvm_resample_fd_notify(int gsi) +{ + KVMResampleFd *rfd; + + QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) { + if (rfd->gsi == gsi) { + event_notifier_set(rfd->resample_event); + trace_kvm_resample_fd_notify(gsi); + return; + } + } +} + +int kvm_get_max_memslots(void) +{ + KVMState *s = KVM_STATE(current_accel()); + + return s->nr_slots; +} + +/* Called with KVMMemoryListener.slots_lock held */ +static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml) +{ + KVMState *s = kvm_state; + int i; + + for (i = 0; i < s->nr_slots; i++) { + if (kml->slots[i].memory_size == 0) { + return &kml->slots[i]; + } + } + + return NULL; +} + +bool kvm_has_free_slot(MachineState *ms) +{ + KVMState *s = KVM_STATE(ms->accelerator); + bool result; + KVMMemoryListener *kml = &s->memory_listener; + + kvm_slots_lock(); + result = !!kvm_get_free_slot(kml); + kvm_slots_unlock(); + + return result; +} + +/* Called with KVMMemoryListener.slots_lock held */ +static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml) +{ + KVMSlot *slot = kvm_get_free_slot(kml); + + if (slot) { + return slot; + } + + fprintf(stderr, "%s: no free slot available\n", __func__); + abort(); +} + +static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml, + hwaddr start_addr, + hwaddr size) +{ + KVMState *s = kvm_state; + int i; + + for (i = 0; i < s->nr_slots; i++) { + KVMSlot *mem = &kml->slots[i]; + + if (start_addr == mem->start_addr && size == mem->memory_size) { + return mem; + } + } + + return NULL; +} + +/* + * Calculate and align the start address and the size of the section. + * Return the size. If the size is 0, the aligned section is empty. + */ +static hwaddr kvm_align_section(MemoryRegionSection *section, + hwaddr *start) +{ + hwaddr size = int128_get64(section->size); + hwaddr delta, aligned; + + /* kvm works in page size chunks, but the function may be called + with sub-page size and unaligned start address. Pad the start + address to next and truncate size to previous page boundary. */ + aligned = ROUND_UP(section->offset_within_address_space, + qemu_real_host_page_size); + delta = aligned - section->offset_within_address_space; + *start = aligned; + if (delta > size) { + return 0; + } + + return (size - delta) & qemu_real_host_page_mask; +} + +int kvm_physical_memory_addr_from_host(KVMState *s, void *ram, + hwaddr *phys_addr) +{ + KVMMemoryListener *kml = &s->memory_listener; + int i, ret = 0; + + kvm_slots_lock(); + for (i = 0; i < s->nr_slots; i++) { + KVMSlot *mem = &kml->slots[i]; + + if (ram >= mem->ram && ram < mem->ram + mem->memory_size) { + *phys_addr = mem->start_addr + (ram - mem->ram); + ret = 1; + break; + } + } + kvm_slots_unlock(); + + return ret; +} + +static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new) +{ + KVMState *s = kvm_state; + struct kvm_userspace_memory_region mem; + int ret; + + mem.slot = slot->slot | (kml->as_id << 16); + mem.guest_phys_addr = slot->start_addr; + mem.userspace_addr = (unsigned long)slot->ram; + mem.flags = slot->flags; + + if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) { + /* Set the slot size to 0 before setting the slot to the desired + * value. This is needed based on KVM commit 75d61fbc. */ + mem.memory_size = 0; + ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); + if (ret < 0) { + goto err; + } + } + mem.memory_size = slot->memory_size; + ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); + slot->old_flags = mem.flags; +err: + trace_kvm_set_user_memory(mem.slot, mem.flags, mem.guest_phys_addr, + mem.memory_size, mem.userspace_addr, ret); + if (ret < 0) { + error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d," + " start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s", + __func__, mem.slot, slot->start_addr, + (uint64_t)mem.memory_size, strerror(errno)); + } + return ret; +} + +static int do_kvm_destroy_vcpu(CPUState *cpu) +{ + KVMState *s = kvm_state; + long mmap_size; + struct KVMParkedVcpu *vcpu = NULL; + int ret = 0; + + DPRINTF("kvm_destroy_vcpu\n"); + + ret = kvm_arch_destroy_vcpu(cpu); + if (ret < 0) { + goto err; + } + + mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); + if (mmap_size < 0) { + ret = mmap_size; + DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n"); + goto err; + } + + ret = munmap(cpu->kvm_run, mmap_size); + if (ret < 0) { + goto err; + } + + if (cpu->kvm_dirty_gfns) { + ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes); + if (ret < 0) { + goto err; + } + } + + vcpu = g_malloc0(sizeof(*vcpu)); + vcpu->vcpu_id = kvm_arch_vcpu_id(cpu); + vcpu->kvm_fd = cpu->kvm_fd; + QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node); +err: + return ret; +} + +void kvm_destroy_vcpu(CPUState *cpu) +{ + if (do_kvm_destroy_vcpu(cpu) < 0) { + error_report("kvm_destroy_vcpu failed"); + exit(EXIT_FAILURE); + } +} + +static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id) +{ + struct KVMParkedVcpu *cpu; + + QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) { + if (cpu->vcpu_id == vcpu_id) { + int kvm_fd; + + QLIST_REMOVE(cpu, node); + kvm_fd = cpu->kvm_fd; + g_free(cpu); + return kvm_fd; + } + } + + return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id); +} + +int kvm_init_vcpu(CPUState *cpu, Error **errp) +{ + KVMState *s = kvm_state; + long mmap_size; + int ret; + + trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu)); + + ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu)); + if (ret < 0) { + error_setg_errno(errp, -ret, "kvm_init_vcpu: kvm_get_vcpu failed (%lu)", + kvm_arch_vcpu_id(cpu)); + goto err; + } + + cpu->kvm_fd = ret; + cpu->kvm_state = s; + cpu->vcpu_dirty = true; + cpu->dirty_pages = 0; + + mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); + if (mmap_size < 0) { + ret = mmap_size; + error_setg_errno(errp, -mmap_size, + "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed"); + goto err; + } + + cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, + cpu->kvm_fd, 0); + if (cpu->kvm_run == MAP_FAILED) { + ret = -errno; + error_setg_errno(errp, ret, + "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)", + kvm_arch_vcpu_id(cpu)); + goto err; + } + + if (s->coalesced_mmio && !s->coalesced_mmio_ring) { + s->coalesced_mmio_ring = + (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE; + } + + if (s->kvm_dirty_ring_size) { + /* Use MAP_SHARED to share pages with the kernel */ + cpu->kvm_dirty_gfns = mmap(NULL, s->kvm_dirty_ring_bytes, + PROT_READ | PROT_WRITE, MAP_SHARED, + cpu->kvm_fd, + PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET); + if (cpu->kvm_dirty_gfns == MAP_FAILED) { + ret = -errno; + DPRINTF("mmap'ing vcpu dirty gfns failed: %d\n", ret); + goto err; + } + } + + ret = kvm_arch_init_vcpu(cpu); + if (ret < 0) { + error_setg_errno(errp, -ret, + "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)", + kvm_arch_vcpu_id(cpu)); + } +err: + return ret; +} + +/* + * dirty pages logging control + */ + +static int kvm_mem_flags(MemoryRegion *mr) +{ + bool readonly = mr->readonly || memory_region_is_romd(mr); + int flags = 0; + + if (memory_region_get_dirty_log_mask(mr) != 0) { + flags |= KVM_MEM_LOG_DIRTY_PAGES; + } + if (readonly && kvm_readonly_mem_allowed) { + flags |= KVM_MEM_READONLY; + } + return flags; +} + +/* Called with KVMMemoryListener.slots_lock held */ +static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem, + MemoryRegion *mr) +{ + mem->flags = kvm_mem_flags(mr); + + /* If nothing changed effectively, no need to issue ioctl */ + if (mem->flags == mem->old_flags) { + return 0; + } + + kvm_slot_init_dirty_bitmap(mem); + return kvm_set_user_memory_region(kml, mem, false); +} + +static int kvm_section_update_flags(KVMMemoryListener *kml, + MemoryRegionSection *section) +{ + hwaddr start_addr, size, slot_size; + KVMSlot *mem; + int ret = 0; + + size = kvm_align_section(section, &start_addr); + if (!size) { + return 0; + } + + kvm_slots_lock(); + + while (size && !ret) { + slot_size = MIN(kvm_max_slot_size, size); + mem = kvm_lookup_matching_slot(kml, start_addr, slot_size); + if (!mem) { + /* We don't have a slot if we want to trap every access. */ + goto out; + } + + ret = kvm_slot_update_flags(kml, mem, section->mr); + start_addr += slot_size; + size -= slot_size; + } + +out: + kvm_slots_unlock(); + return ret; +} + +static void kvm_log_start(MemoryListener *listener, + MemoryRegionSection *section, + int old, int new) +{ + KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); + int r; + + if (old != 0) { + return; + } + + r = kvm_section_update_flags(kml, section); + if (r < 0) { + abort(); + } +} + +static void kvm_log_stop(MemoryListener *listener, + MemoryRegionSection *section, + int old, int new) +{ + KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); + int r; + + if (new != 0) { + return; + } + + r = kvm_section_update_flags(kml, section); + if (r < 0) { + abort(); + } +} + +/* get kvm's dirty pages bitmap and update qemu's */ +static void kvm_slot_sync_dirty_pages(KVMSlot *slot) +{ + ram_addr_t start = slot->ram_start_offset; + ram_addr_t pages = slot->memory_size / qemu_real_host_page_size; + + cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages); +} + +static void kvm_slot_reset_dirty_pages(KVMSlot *slot) +{ + memset(slot->dirty_bmap, 0, slot->dirty_bmap_size); +} + +#define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1)) + +/* Allocate the dirty bitmap for a slot */ +static void kvm_slot_init_dirty_bitmap(KVMSlot *mem) +{ + if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) { + return; + } + + /* + * XXX bad kernel interface alert + * For dirty bitmap, kernel allocates array of size aligned to + * bits-per-long. But for case when the kernel is 64bits and + * the userspace is 32bits, userspace can't align to the same + * bits-per-long, since sizeof(long) is different between kernel + * and user space. This way, userspace will provide buffer which + * may be 4 bytes less than the kernel will use, resulting in + * userspace memory corruption (which is not detectable by valgrind + * too, in most cases). + * So for now, let's align to 64 instead of HOST_LONG_BITS here, in + * a hope that sizeof(long) won't become >8 any time soon. + * + * Note: the granule of kvm dirty log is qemu_real_host_page_size. + * And mem->memory_size is aligned to it (otherwise this mem can't + * be registered to KVM). + */ + hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size, + /*HOST_LONG_BITS*/ 64) / 8; + mem->dirty_bmap = g_malloc0(bitmap_size); + mem->dirty_bmap_size = bitmap_size; +} + +/* + * Sync dirty bitmap from kernel to KVMSlot.dirty_bmap, return true if + * succeeded, false otherwise + */ +static bool kvm_slot_get_dirty_log(KVMState *s, KVMSlot *slot) +{ + struct kvm_dirty_log d = {}; + int ret; + + d.dirty_bitmap = slot->dirty_bmap; + d.slot = slot->slot | (slot->as_id << 16); + ret = kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d); + + if (ret == -ENOENT) { + /* kernel does not have dirty bitmap in this slot */ + ret = 0; + } + if (ret) { + error_report_once("%s: KVM_GET_DIRTY_LOG failed with %d", + __func__, ret); + } + return ret == 0; +} + +/* Should be with all slots_lock held for the address spaces. */ +static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id, + uint32_t slot_id, uint64_t offset) +{ + KVMMemoryListener *kml; + KVMSlot *mem; + + if (as_id >= s->nr_as) { + return; + } + + kml = s->as[as_id].ml; + mem = &kml->slots[slot_id]; + + if (!mem->memory_size || offset >= + (mem->memory_size / qemu_real_host_page_size)) { + return; + } + + set_bit(offset, mem->dirty_bmap); +} + +static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn) +{ + return gfn->flags == KVM_DIRTY_GFN_F_DIRTY; +} + +static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn) +{ + gfn->flags = KVM_DIRTY_GFN_F_RESET; +} + +/* + * Should be with all slots_lock held for the address spaces. It returns the + * dirty page we've collected on this dirty ring. + */ +static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu) +{ + struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur; + uint32_t ring_size = s->kvm_dirty_ring_size; + uint32_t count = 0, fetch = cpu->kvm_fetch_index; + + assert(dirty_gfns && ring_size); + trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index); + + while (true) { + cur = &dirty_gfns[fetch % ring_size]; + if (!dirty_gfn_is_dirtied(cur)) { + break; + } + kvm_dirty_ring_mark_page(s, cur->slot >> 16, cur->slot & 0xffff, + cur->offset); + dirty_gfn_set_collected(cur); + trace_kvm_dirty_ring_page(cpu->cpu_index, fetch, cur->offset); + fetch++; + count++; + } + cpu->kvm_fetch_index = fetch; + cpu->dirty_pages += count; + + return count; +} + +/* Must be with slots_lock held */ +static uint64_t kvm_dirty_ring_reap_locked(KVMState *s) +{ + int ret; + CPUState *cpu; + uint64_t total = 0; + int64_t stamp; + + stamp = get_clock(); + + CPU_FOREACH(cpu) { + total += kvm_dirty_ring_reap_one(s, cpu); + } + + if (total) { + ret = kvm_vm_ioctl(s, KVM_RESET_DIRTY_RINGS); + assert(ret == total); + } + + stamp = get_clock() - stamp; + + if (total) { + trace_kvm_dirty_ring_reap(total, stamp / 1000); + } + + return total; +} + +/* + * Currently for simplicity, we must hold BQL before calling this. We can + * consider to drop the BQL if we're clear with all the race conditions. + */ +static uint64_t kvm_dirty_ring_reap(KVMState *s) +{ + uint64_t total; + + /* + * We need to lock all kvm slots for all address spaces here, + * because: + * + * (1) We need to mark dirty for dirty bitmaps in multiple slots + * and for tons of pages, so it's better to take the lock here + * once rather than once per page. And more importantly, + * + * (2) We must _NOT_ publish dirty bits to the other threads + * (e.g., the migration thread) via the kvm memory slot dirty + * bitmaps before correctly re-protect those dirtied pages. + * Otherwise we can have potential risk of data corruption if + * the page data is read in the other thread before we do + * reset below. + */ + kvm_slots_lock(); + total = kvm_dirty_ring_reap_locked(s); + kvm_slots_unlock(); + + return total; +} + +static void do_kvm_cpu_synchronize_kick(CPUState *cpu, run_on_cpu_data arg) +{ + /* No need to do anything */ +} + +/* + * Kick all vcpus out in a synchronized way. When returned, we + * guarantee that every vcpu has been kicked and at least returned to + * userspace once. + */ +static void kvm_cpu_synchronize_kick_all(void) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + run_on_cpu(cpu, do_kvm_cpu_synchronize_kick, RUN_ON_CPU_NULL); + } +} + +/* + * Flush all the existing dirty pages to the KVM slot buffers. When + * this call returns, we guarantee that all the touched dirty pages + * before calling this function have been put into the per-kvmslot + * dirty bitmap. + * + * This function must be called with BQL held. + */ +static void kvm_dirty_ring_flush(void) +{ + trace_kvm_dirty_ring_flush(0); + /* + * The function needs to be serialized. Since this function + * should always be with BQL held, serialization is guaranteed. + * However, let's be sure of it. + */ + assert(qemu_mutex_iothread_locked()); + /* + * First make sure to flush the hardware buffers by kicking all + * vcpus out in a synchronous way. + */ + kvm_cpu_synchronize_kick_all(); + kvm_dirty_ring_reap(kvm_state); + trace_kvm_dirty_ring_flush(1); +} + +/** + * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space + * + * This function will first try to fetch dirty bitmap from the kernel, + * and then updates qemu's dirty bitmap. + * + * NOTE: caller must be with kml->slots_lock held. + * + * @kml: the KVM memory listener object + * @section: the memory section to sync the dirty bitmap with + */ +static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml, + MemoryRegionSection *section) +{ + KVMState *s = kvm_state; + KVMSlot *mem; + hwaddr start_addr, size; + hwaddr slot_size; + + size = kvm_align_section(section, &start_addr); + while (size) { + slot_size = MIN(kvm_max_slot_size, size); + mem = kvm_lookup_matching_slot(kml, start_addr, slot_size); + if (!mem) { + /* We don't have a slot if we want to trap every access. */ + return; + } + if (kvm_slot_get_dirty_log(s, mem)) { + kvm_slot_sync_dirty_pages(mem); + } + start_addr += slot_size; + size -= slot_size; + } +} + +/* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */ +#define KVM_CLEAR_LOG_SHIFT 6 +#define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT) +#define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN) + +static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start, + uint64_t size) +{ + KVMState *s = kvm_state; + uint64_t end, bmap_start, start_delta, bmap_npages; + struct kvm_clear_dirty_log d; + unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size; + int ret; + + /* + * We need to extend either the start or the size or both to + * satisfy the KVM interface requirement. Firstly, do the start + * page alignment on 64 host pages + */ + bmap_start = start & KVM_CLEAR_LOG_MASK; + start_delta = start - bmap_start; + bmap_start /= psize; + + /* + * The kernel interface has restriction on the size too, that either: + * + * (1) the size is 64 host pages aligned (just like the start), or + * (2) the size fills up until the end of the KVM memslot. + */ + bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN) + << KVM_CLEAR_LOG_SHIFT; + end = mem->memory_size / psize; + if (bmap_npages > end - bmap_start) { + bmap_npages = end - bmap_start; + } + start_delta /= psize; + + /* + * Prepare the bitmap to clear dirty bits. Here we must guarantee + * that we won't clear any unknown dirty bits otherwise we might + * accidentally clear some set bits which are not yet synced from + * the kernel into QEMU's bitmap, then we'll lose track of the + * guest modifications upon those pages (which can directly lead + * to guest data loss or panic after migration). + * + * Layout of the KVMSlot.dirty_bmap: + * + * |<-------- bmap_npages -----------..>| + * [1] + * start_delta size + * |----------------|-------------|------------------|------------| + * ^ ^ ^ ^ + * | | | | + * start bmap_start (start) end + * of memslot of memslot + * + * [1] bmap_npages can be aligned to either 64 pages or the end of slot + */ + + assert(bmap_start % BITS_PER_LONG == 0); + /* We should never do log_clear before log_sync */ + assert(mem->dirty_bmap); + if (start_delta || bmap_npages - size / psize) { + /* Slow path - we need to manipulate a temp bitmap */ + bmap_clear = bitmap_new(bmap_npages); + bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap, + bmap_start, start_delta + size / psize); + /* + * We need to fill the holes at start because that was not + * specified by the caller and we extended the bitmap only for + * 64 pages alignment + */ + bitmap_clear(bmap_clear, 0, start_delta); + d.dirty_bitmap = bmap_clear; + } else { + /* + * Fast path - both start and size align well with BITS_PER_LONG + * (or the end of memory slot) + */ + d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start); + } + + d.first_page = bmap_start; + /* It should never overflow. If it happens, say something */ + assert(bmap_npages <= UINT32_MAX); + d.num_pages = bmap_npages; + d.slot = mem->slot | (as_id << 16); + + ret = kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d); + if (ret < 0 && ret != -ENOENT) { + error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, " + "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d", + __func__, d.slot, (uint64_t)d.first_page, + (uint32_t)d.num_pages, ret); + } else { + ret = 0; + trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages); + } + + /* + * After we have updated the remote dirty bitmap, we update the + * cached bitmap as well for the memslot, then if another user + * clears the same region we know we shouldn't clear it again on + * the remote otherwise it's data loss as well. + */ + bitmap_clear(mem->dirty_bmap, bmap_start + start_delta, + size / psize); + /* This handles the NULL case well */ + g_free(bmap_clear); + return ret; +} + + +/** + * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range + * + * NOTE: this will be a no-op if we haven't enabled manual dirty log + * protection in the host kernel because in that case this operation + * will be done within log_sync(). + * + * @kml: the kvm memory listener + * @section: the memory range to clear dirty bitmap + */ +static int kvm_physical_log_clear(KVMMemoryListener *kml, + MemoryRegionSection *section) +{ + KVMState *s = kvm_state; + uint64_t start, size, offset, count; + KVMSlot *mem; + int ret = 0, i; + + if (!s->manual_dirty_log_protect) { + /* No need to do explicit clear */ + return ret; + } + + start = section->offset_within_address_space; + size = int128_get64(section->size); + + if (!size) { + /* Nothing more we can do... */ + return ret; + } + + kvm_slots_lock(); + + for (i = 0; i < s->nr_slots; i++) { + mem = &kml->slots[i]; + /* Discard slots that are empty or do not overlap the section */ + if (!mem->memory_size || + mem->start_addr > start + size - 1 || + start > mem->start_addr + mem->memory_size - 1) { + continue; + } + + if (start >= mem->start_addr) { + /* The slot starts before section or is aligned to it. */ + offset = start - mem->start_addr; + count = MIN(mem->memory_size - offset, size); + } else { + /* The slot starts after section. */ + offset = 0; + count = MIN(mem->memory_size, size - (mem->start_addr - start)); + } + ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count); + if (ret < 0) { + break; + } + } + + kvm_slots_unlock(); + + return ret; +} + +static void kvm_coalesce_mmio_region(MemoryListener *listener, + MemoryRegionSection *secion, + hwaddr start, hwaddr size) +{ + KVMState *s = kvm_state; + + if (s->coalesced_mmio) { + struct kvm_coalesced_mmio_zone zone; + + zone.addr = start; + zone.size = size; + zone.pad = 0; + + (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone); + } +} + +static void kvm_uncoalesce_mmio_region(MemoryListener *listener, + MemoryRegionSection *secion, + hwaddr start, hwaddr size) +{ + KVMState *s = kvm_state; + + if (s->coalesced_mmio) { + struct kvm_coalesced_mmio_zone zone; + + zone.addr = start; + zone.size = size; + zone.pad = 0; + + (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone); + } +} + +static void kvm_coalesce_pio_add(MemoryListener *listener, + MemoryRegionSection *section, + hwaddr start, hwaddr size) +{ + KVMState *s = kvm_state; + + if (s->coalesced_pio) { + struct kvm_coalesced_mmio_zone zone; + + zone.addr = start; + zone.size = size; + zone.pio = 1; + + (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone); + } +} + +static void kvm_coalesce_pio_del(MemoryListener *listener, + MemoryRegionSection *section, + hwaddr start, hwaddr size) +{ + KVMState *s = kvm_state; + + if (s->coalesced_pio) { + struct kvm_coalesced_mmio_zone zone; + + zone.addr = start; + zone.size = size; + zone.pio = 1; + + (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone); + } +} + +static MemoryListener kvm_coalesced_pio_listener = { + .name = "kvm-coalesced-pio", + .coalesced_io_add = kvm_coalesce_pio_add, + .coalesced_io_del = kvm_coalesce_pio_del, +}; + +int kvm_check_extension(KVMState *s, unsigned int extension) +{ + int ret; + + ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension); + if (ret < 0) { + ret = 0; + } + + return ret; +} + +int kvm_vm_check_extension(KVMState *s, unsigned int extension) +{ + int ret; + + ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension); + if (ret < 0) { + /* VM wide version not implemented, use global one instead */ + ret = kvm_check_extension(s, extension); + } + + return ret; +} + +typedef struct HWPoisonPage { + ram_addr_t ram_addr; + QLIST_ENTRY(HWPoisonPage) list; +} HWPoisonPage; + +static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list = + QLIST_HEAD_INITIALIZER(hwpoison_page_list); + +static void kvm_unpoison_all(void *param) +{ + HWPoisonPage *page, *next_page; + + QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) { + QLIST_REMOVE(page, list); + qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE); + g_free(page); + } +} + +void kvm_hwpoison_page_add(ram_addr_t ram_addr) +{ + HWPoisonPage *page; + + QLIST_FOREACH(page, &hwpoison_page_list, list) { + if (page->ram_addr == ram_addr) { + return; + } + } + page = g_new(HWPoisonPage, 1); + page->ram_addr = ram_addr; + QLIST_INSERT_HEAD(&hwpoison_page_list, page, list); +} + +static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size) +{ +#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) + /* The kernel expects ioeventfd values in HOST_WORDS_BIGENDIAN + * endianness, but the memory core hands them in target endianness. + * For example, PPC is always treated as big-endian even if running + * on KVM and on PPC64LE. Correct here. + */ + switch (size) { + case 2: + val = bswap16(val); + break; + case 4: + val = bswap32(val); + break; + } +#endif + return val; +} + +static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val, + bool assign, uint32_t size, bool datamatch) +{ + int ret; + struct kvm_ioeventfd iofd = { + .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0, + .addr = addr, + .len = size, + .flags = 0, + .fd = fd, + }; + + trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size, + datamatch); + if (!kvm_enabled()) { + return -ENOSYS; + } + + if (datamatch) { + iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH; + } + if (!assign) { + iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; + } + + ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd); + + if (ret < 0) { + return -errno; + } + + return 0; +} + +static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val, + bool assign, uint32_t size, bool datamatch) +{ + struct kvm_ioeventfd kick = { + .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0, + .addr = addr, + .flags = KVM_IOEVENTFD_FLAG_PIO, + .len = size, + .fd = fd, + }; + int r; + trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch); + if (!kvm_enabled()) { + return -ENOSYS; + } + if (datamatch) { + kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH; + } + if (!assign) { + kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; + } + r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); + if (r < 0) { + return r; + } + return 0; +} + + +static int kvm_check_many_ioeventfds(void) +{ + /* Userspace can use ioeventfd for io notification. This requires a host + * that supports eventfd(2) and an I/O thread; since eventfd does not + * support SIGIO it cannot interrupt the vcpu. + * + * Older kernels have a 6 device limit on the KVM io bus. Find out so we + * can avoid creating too many ioeventfds. + */ +#if defined(CONFIG_EVENTFD) + int ioeventfds[7]; + int i, ret = 0; + for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) { + ioeventfds[i] = eventfd(0, EFD_CLOEXEC); + if (ioeventfds[i] < 0) { + break; + } + ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true); + if (ret < 0) { + close(ioeventfds[i]); + break; + } + } + + /* Decide whether many devices are supported or not */ + ret = i == ARRAY_SIZE(ioeventfds); + + while (i-- > 0) { + kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true); + close(ioeventfds[i]); + } + return ret; +#else + return 0; +#endif +} + +static const KVMCapabilityInfo * +kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list) +{ + while (list->name) { + if (!kvm_check_extension(s, list->value)) { + return list; + } + list++; + } + return NULL; +} + +void kvm_set_max_memslot_size(hwaddr max_slot_size) +{ + g_assert( + ROUND_UP(max_slot_size, qemu_real_host_page_size) == max_slot_size + ); + kvm_max_slot_size = max_slot_size; +} + +static void kvm_set_phys_mem(KVMMemoryListener *kml, + MemoryRegionSection *section, bool add) +{ + KVMSlot *mem; + int err; + MemoryRegion *mr = section->mr; + bool writeable = !mr->readonly && !mr->rom_device; + hwaddr start_addr, size, slot_size, mr_offset; + ram_addr_t ram_start_offset; + void *ram; + + if (!memory_region_is_ram(mr)) { + if (writeable || !kvm_readonly_mem_allowed) { + return; + } else if (!mr->romd_mode) { + /* If the memory device is not in romd_mode, then we actually want + * to remove the kvm memory slot so all accesses will trap. */ + add = false; + } + } + + size = kvm_align_section(section, &start_addr); + if (!size) { + return; + } + + /* The offset of the kvmslot within the memory region */ + mr_offset = section->offset_within_region + start_addr - + section->offset_within_address_space; + + /* use aligned delta to align the ram address and offset */ + ram = memory_region_get_ram_ptr(mr) + mr_offset; + ram_start_offset = memory_region_get_ram_addr(mr) + mr_offset; + + kvm_slots_lock(); + + if (!add) { + do { + slot_size = MIN(kvm_max_slot_size, size); + mem = kvm_lookup_matching_slot(kml, start_addr, slot_size); + if (!mem) { + goto out; + } + if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { + /* + * NOTE: We should be aware of the fact that here we're only + * doing a best effort to sync dirty bits. No matter whether + * we're using dirty log or dirty ring, we ignored two facts: + * + * (1) dirty bits can reside in hardware buffers (PML) + * + * (2) after we collected dirty bits here, pages can be dirtied + * again before we do the final KVM_SET_USER_MEMORY_REGION to + * remove the slot. + * + * Not easy. Let's cross the fingers until it's fixed. + */ + if (kvm_state->kvm_dirty_ring_size) { + kvm_dirty_ring_reap_locked(kvm_state); + } else { + kvm_slot_get_dirty_log(kvm_state, mem); + } + kvm_slot_sync_dirty_pages(mem); + } + + /* unregister the slot */ + g_free(mem->dirty_bmap); + mem->dirty_bmap = NULL; + mem->memory_size = 0; + mem->flags = 0; + err = kvm_set_user_memory_region(kml, mem, false); + if (err) { + fprintf(stderr, "%s: error unregistering slot: %s\n", + __func__, strerror(-err)); + abort(); + } + start_addr += slot_size; + size -= slot_size; + } while (size); + goto out; + } + + /* register the new slot */ + do { + slot_size = MIN(kvm_max_slot_size, size); + mem = kvm_alloc_slot(kml); + mem->as_id = kml->as_id; + mem->memory_size = slot_size; + mem->start_addr = start_addr; + mem->ram_start_offset = ram_start_offset; + mem->ram = ram; + mem->flags = kvm_mem_flags(mr); + kvm_slot_init_dirty_bitmap(mem); + err = kvm_set_user_memory_region(kml, mem, true); + if (err) { + fprintf(stderr, "%s: error registering slot: %s\n", __func__, + strerror(-err)); + abort(); + } + start_addr += slot_size; + ram_start_offset += slot_size; + ram += slot_size; + size -= slot_size; + } while (size); + +out: + kvm_slots_unlock(); +} + +static void *kvm_dirty_ring_reaper_thread(void *data) +{ + KVMState *s = data; + struct KVMDirtyRingReaper *r = &s->reaper; + + rcu_register_thread(); + + trace_kvm_dirty_ring_reaper("init"); + + while (true) { + r->reaper_state = KVM_DIRTY_RING_REAPER_WAIT; + trace_kvm_dirty_ring_reaper("wait"); + /* + * TODO: provide a smarter timeout rather than a constant? + */ + sleep(1); + + trace_kvm_dirty_ring_reaper("wakeup"); + r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING; + + qemu_mutex_lock_iothread(); + kvm_dirty_ring_reap(s); + qemu_mutex_unlock_iothread(); + + r->reaper_iteration++; + } + + trace_kvm_dirty_ring_reaper("exit"); + + rcu_unregister_thread(); + + return NULL; +} + +static int kvm_dirty_ring_reaper_init(KVMState *s) +{ + struct KVMDirtyRingReaper *r = &s->reaper; + + qemu_thread_create(&r->reaper_thr, "kvm-reaper", + kvm_dirty_ring_reaper_thread, + s, QEMU_THREAD_JOINABLE); + + return 0; +} + +static void kvm_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); + + memory_region_ref(section->mr); + kvm_set_phys_mem(kml, section, true); +} + +static void kvm_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); + + kvm_set_phys_mem(kml, section, false); + memory_region_unref(section->mr); +} + +static void kvm_log_sync(MemoryListener *listener, + MemoryRegionSection *section) +{ + KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); + + kvm_slots_lock(); + kvm_physical_sync_dirty_bitmap(kml, section); + kvm_slots_unlock(); +} + +static void kvm_log_sync_global(MemoryListener *l) +{ + KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener); + KVMState *s = kvm_state; + KVMSlot *mem; + int i; + + /* Flush all kernel dirty addresses into KVMSlot dirty bitmap */ + kvm_dirty_ring_flush(); + + /* + * TODO: make this faster when nr_slots is big while there are + * only a few used slots (small VMs). + */ + kvm_slots_lock(); + for (i = 0; i < s->nr_slots; i++) { + mem = &kml->slots[i]; + if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { + kvm_slot_sync_dirty_pages(mem); + /* + * This is not needed by KVM_GET_DIRTY_LOG because the + * ioctl will unconditionally overwrite the whole region. + * However kvm dirty ring has no such side effect. + */ + kvm_slot_reset_dirty_pages(mem); + } + } + kvm_slots_unlock(); +} + +static void kvm_log_clear(MemoryListener *listener, + MemoryRegionSection *section) +{ + KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); + int r; + + r = kvm_physical_log_clear(kml, section); + if (r < 0) { + error_report_once("%s: kvm log clear failed: mr=%s " + "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__, + section->mr->name, section->offset_within_region, + int128_get64(section->size)); + abort(); + } +} + +static void kvm_mem_ioeventfd_add(MemoryListener *listener, + MemoryRegionSection *section, + bool match_data, uint64_t data, + EventNotifier *e) +{ + int fd = event_notifier_get_fd(e); + int r; + + r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space, + data, true, int128_get64(section->size), + match_data); + if (r < 0) { + fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n", + __func__, strerror(-r), -r); + abort(); + } +} + +static void kvm_mem_ioeventfd_del(MemoryListener *listener, + MemoryRegionSection *section, + bool match_data, uint64_t data, + EventNotifier *e) +{ + int fd = event_notifier_get_fd(e); + int r; + + r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space, + data, false, int128_get64(section->size), + match_data); + if (r < 0) { + fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n", + __func__, strerror(-r), -r); + abort(); + } +} + +static void kvm_io_ioeventfd_add(MemoryListener *listener, + MemoryRegionSection *section, + bool match_data, uint64_t data, + EventNotifier *e) +{ + int fd = event_notifier_get_fd(e); + int r; + + r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space, + data, true, int128_get64(section->size), + match_data); + if (r < 0) { + fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n", + __func__, strerror(-r), -r); + abort(); + } +} + +static void kvm_io_ioeventfd_del(MemoryListener *listener, + MemoryRegionSection *section, + bool match_data, uint64_t data, + EventNotifier *e) + +{ + int fd = event_notifier_get_fd(e); + int r; + + r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space, + data, false, int128_get64(section->size), + match_data); + if (r < 0) { + fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n", + __func__, strerror(-r), -r); + abort(); + } +} + +void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, + AddressSpace *as, int as_id, const char *name) +{ + int i; + + kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot)); + kml->as_id = as_id; + + for (i = 0; i < s->nr_slots; i++) { + kml->slots[i].slot = i; + } + + kml->listener.region_add = kvm_region_add; + kml->listener.region_del = kvm_region_del; + kml->listener.log_start = kvm_log_start; + kml->listener.log_stop = kvm_log_stop; + kml->listener.priority = 10; + kml->listener.name = name; + + if (s->kvm_dirty_ring_size) { + kml->listener.log_sync_global = kvm_log_sync_global; + } else { + kml->listener.log_sync = kvm_log_sync; + kml->listener.log_clear = kvm_log_clear; + } + + memory_listener_register(&kml->listener, as); + + for (i = 0; i < s->nr_as; ++i) { + if (!s->as[i].as) { + s->as[i].as = as; + s->as[i].ml = kml; + break; + } + } +} + +static MemoryListener kvm_io_listener = { + .name = "kvm-io", + .eventfd_add = kvm_io_ioeventfd_add, + .eventfd_del = kvm_io_ioeventfd_del, + .priority = 10, +}; + +int kvm_set_irq(KVMState *s, int irq, int level) +{ + struct kvm_irq_level event; + int ret; + + assert(kvm_async_interrupts_enabled()); + + event.level = level; + event.irq = irq; + ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event); + if (ret < 0) { + perror("kvm_set_irq"); + abort(); + } + + return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status; +} + +#ifdef KVM_CAP_IRQ_ROUTING +typedef struct KVMMSIRoute { + struct kvm_irq_routing_entry kroute; + QTAILQ_ENTRY(KVMMSIRoute) entry; +} KVMMSIRoute; + +static void set_gsi(KVMState *s, unsigned int gsi) +{ + set_bit(gsi, s->used_gsi_bitmap); +} + +static void clear_gsi(KVMState *s, unsigned int gsi) +{ + clear_bit(gsi, s->used_gsi_bitmap); +} + +void kvm_init_irq_routing(KVMState *s) +{ + int gsi_count, i; + + gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1; + if (gsi_count > 0) { + /* Round up so we can search ints using ffs */ + s->used_gsi_bitmap = bitmap_new(gsi_count); + s->gsi_count = gsi_count; + } + + s->irq_routes = g_malloc0(sizeof(*s->irq_routes)); + s->nr_allocated_irq_routes = 0; + + if (!kvm_direct_msi_allowed) { + for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) { + QTAILQ_INIT(&s->msi_hashtab[i]); + } + } + + kvm_arch_init_irq_routing(s); +} + +void kvm_irqchip_commit_routes(KVMState *s) +{ + int ret; + + if (kvm_gsi_direct_mapping()) { + return; + } + + if (!kvm_gsi_routing_enabled()) { + return; + } + + s->irq_routes->flags = 0; + trace_kvm_irqchip_commit_routes(); + ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes); + assert(ret == 0); +} + +static void kvm_add_routing_entry(KVMState *s, + struct kvm_irq_routing_entry *entry) +{ + struct kvm_irq_routing_entry *new; + int n, size; + + if (s->irq_routes->nr == s->nr_allocated_irq_routes) { + n = s->nr_allocated_irq_routes * 2; + if (n < 64) { + n = 64; + } + size = sizeof(struct kvm_irq_routing); + size += n * sizeof(*new); + s->irq_routes = g_realloc(s->irq_routes, size); + s->nr_allocated_irq_routes = n; + } + n = s->irq_routes->nr++; + new = &s->irq_routes->entries[n]; + + *new = *entry; + + set_gsi(s, entry->gsi); +} + +static int kvm_update_routing_entry(KVMState *s, + struct kvm_irq_routing_entry *new_entry) +{ + struct kvm_irq_routing_entry *entry; + int n; + + for (n = 0; n < s->irq_routes->nr; n++) { + entry = &s->irq_routes->entries[n]; + if (entry->gsi != new_entry->gsi) { + continue; + } + + if(!memcmp(entry, new_entry, sizeof *entry)) { + return 0; + } + + *entry = *new_entry; + + return 0; + } + + return -ESRCH; +} + +void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin) +{ + struct kvm_irq_routing_entry e = {}; + + assert(pin < s->gsi_count); + + e.gsi = irq; + e.type = KVM_IRQ_ROUTING_IRQCHIP; + e.flags = 0; + e.u.irqchip.irqchip = irqchip; + e.u.irqchip.pin = pin; + kvm_add_routing_entry(s, &e); +} + +void kvm_irqchip_release_virq(KVMState *s, int virq) +{ + struct kvm_irq_routing_entry *e; + int i; + + if (kvm_gsi_direct_mapping()) { + return; + } + + for (i = 0; i < s->irq_routes->nr; i++) { + e = &s->irq_routes->entries[i]; + if (e->gsi == virq) { + s->irq_routes->nr--; + *e = s->irq_routes->entries[s->irq_routes->nr]; + } + } + clear_gsi(s, virq); + kvm_arch_release_virq_post(virq); + trace_kvm_irqchip_release_virq(virq); +} + +void kvm_irqchip_add_change_notifier(Notifier *n) +{ + notifier_list_add(&kvm_irqchip_change_notifiers, n); +} + +void kvm_irqchip_remove_change_notifier(Notifier *n) +{ + notifier_remove(n); +} + +void kvm_irqchip_change_notify(void) +{ + notifier_list_notify(&kvm_irqchip_change_notifiers, NULL); +} + +static unsigned int kvm_hash_msi(uint32_t data) +{ + /* This is optimized for IA32 MSI layout. However, no other arch shall + * repeat the mistake of not providing a direct MSI injection API. */ + return data & 0xff; +} + +static void kvm_flush_dynamic_msi_routes(KVMState *s) +{ + KVMMSIRoute *route, *next; + unsigned int hash; + + for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) { + QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) { + kvm_irqchip_release_virq(s, route->kroute.gsi); + QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry); + g_free(route); + } + } +} + +static int kvm_irqchip_get_virq(KVMState *s) +{ + int next_virq; + + /* + * PIC and IOAPIC share the first 16 GSI numbers, thus the available + * GSI numbers are more than the number of IRQ route. Allocating a GSI + * number can succeed even though a new route entry cannot be added. + * When this happens, flush dynamic MSI entries to free IRQ route entries. + */ + if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) { + kvm_flush_dynamic_msi_routes(s); + } + + /* Return the lowest unused GSI in the bitmap */ + next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count); + if (next_virq >= s->gsi_count) { + return -ENOSPC; + } else { + return next_virq; + } +} + +static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg) +{ + unsigned int hash = kvm_hash_msi(msg.data); + KVMMSIRoute *route; + + QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) { + if (route->kroute.u.msi.address_lo == (uint32_t)msg.address && + route->kroute.u.msi.address_hi == (msg.address >> 32) && + route->kroute.u.msi.data == le32_to_cpu(msg.data)) { + return route; + } + } + return NULL; +} + +int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) +{ + struct kvm_msi msi; + KVMMSIRoute *route; + + if (kvm_direct_msi_allowed) { + msi.address_lo = (uint32_t)msg.address; + msi.address_hi = msg.address >> 32; + msi.data = le32_to_cpu(msg.data); + msi.flags = 0; + memset(msi.pad, 0, sizeof(msi.pad)); + + return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi); + } + + route = kvm_lookup_msi_route(s, msg); + if (!route) { + int virq; + + virq = kvm_irqchip_get_virq(s); + if (virq < 0) { + return virq; + } + + route = g_malloc0(sizeof(KVMMSIRoute)); + route->kroute.gsi = virq; + route->kroute.type = KVM_IRQ_ROUTING_MSI; + route->kroute.flags = 0; + route->kroute.u.msi.address_lo = (uint32_t)msg.address; + route->kroute.u.msi.address_hi = msg.address >> 32; + route->kroute.u.msi.data = le32_to_cpu(msg.data); + + kvm_add_routing_entry(s, &route->kroute); + kvm_irqchip_commit_routes(s); + + QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route, + entry); + } + + assert(route->kroute.type == KVM_IRQ_ROUTING_MSI); + + return kvm_set_irq(s, route->kroute.gsi, 1); +} + +int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev) +{ + struct kvm_irq_routing_entry kroute = {}; + int virq; + MSIMessage msg = {0, 0}; + + if (pci_available && dev) { + msg = pci_get_msi_message(dev, vector); + } + + if (kvm_gsi_direct_mapping()) { + return kvm_arch_msi_data_to_gsi(msg.data); + } + + if (!kvm_gsi_routing_enabled()) { + return -ENOSYS; + } + + virq = kvm_irqchip_get_virq(s); + if (virq < 0) { + return virq; + } + + kroute.gsi = virq; + kroute.type = KVM_IRQ_ROUTING_MSI; + kroute.flags = 0; + kroute.u.msi.address_lo = (uint32_t)msg.address; + kroute.u.msi.address_hi = msg.address >> 32; + kroute.u.msi.data = le32_to_cpu(msg.data); + if (pci_available && kvm_msi_devid_required()) { + kroute.flags = KVM_MSI_VALID_DEVID; + kroute.u.msi.devid = pci_requester_id(dev); + } + if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) { + kvm_irqchip_release_virq(s, virq); + return -EINVAL; + } + + trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A", + vector, virq); + + kvm_add_routing_entry(s, &kroute); + kvm_arch_add_msi_route_post(&kroute, vector, dev); + kvm_irqchip_commit_routes(s); + + return virq; +} + +int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg, + PCIDevice *dev) +{ + struct kvm_irq_routing_entry kroute = {}; + + if (kvm_gsi_direct_mapping()) { + return 0; + } + + if (!kvm_irqchip_in_kernel()) { + return -ENOSYS; + } + + kroute.gsi = virq; + kroute.type = KVM_IRQ_ROUTING_MSI; + kroute.flags = 0; + kroute.u.msi.address_lo = (uint32_t)msg.address; + kroute.u.msi.address_hi = msg.address >> 32; + kroute.u.msi.data = le32_to_cpu(msg.data); + if (pci_available && kvm_msi_devid_required()) { + kroute.flags = KVM_MSI_VALID_DEVID; + kroute.u.msi.devid = pci_requester_id(dev); + } + if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) { + return -EINVAL; + } + + trace_kvm_irqchip_update_msi_route(virq); + + return kvm_update_routing_entry(s, &kroute); +} + +static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event, + EventNotifier *resample, int virq, + bool assign) +{ + int fd = event_notifier_get_fd(event); + int rfd = resample ? event_notifier_get_fd(resample) : -1; + + struct kvm_irqfd irqfd = { + .fd = fd, + .gsi = virq, + .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN, + }; + + if (rfd != -1) { + assert(assign); + if (kvm_irqchip_is_split()) { + /* + * When the slow irqchip (e.g. IOAPIC) is in the + * userspace, KVM kernel resamplefd will not work because + * the EOI of the interrupt will be delivered to userspace + * instead, so the KVM kernel resamplefd kick will be + * skipped. The userspace here mimics what the kernel + * provides with resamplefd, remember the resamplefd and + * kick it when we receive EOI of this IRQ. + * + * This is hackery because IOAPIC is mostly bypassed + * (except EOI broadcasts) when irqfd is used. However + * this can bring much performance back for split irqchip + * with INTx IRQs (for VFIO, this gives 93% perf of the + * full fast path, which is 46% perf boost comparing to + * the INTx slow path). + */ + kvm_resample_fd_insert(virq, resample); + } else { + irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE; + irqfd.resamplefd = rfd; + } + } else if (!assign) { + if (kvm_irqchip_is_split()) { + kvm_resample_fd_remove(virq); + } + } + + if (!kvm_irqfds_enabled()) { + return -ENOSYS; + } + + return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd); +} + +int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter) +{ + struct kvm_irq_routing_entry kroute = {}; + int virq; + + if (!kvm_gsi_routing_enabled()) { + return -ENOSYS; + } + + virq = kvm_irqchip_get_virq(s); + if (virq < 0) { + return virq; + } + + kroute.gsi = virq; + kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER; + kroute.flags = 0; + kroute.u.adapter.summary_addr = adapter->summary_addr; + kroute.u.adapter.ind_addr = adapter->ind_addr; + kroute.u.adapter.summary_offset = adapter->summary_offset; + kroute.u.adapter.ind_offset = adapter->ind_offset; + kroute.u.adapter.adapter_id = adapter->adapter_id; + + kvm_add_routing_entry(s, &kroute); + + return virq; +} + +int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint) +{ + struct kvm_irq_routing_entry kroute = {}; + int virq; + + if (!kvm_gsi_routing_enabled()) { + return -ENOSYS; + } + if (!kvm_check_extension(s, KVM_CAP_HYPERV_SYNIC)) { + return -ENOSYS; + } + virq = kvm_irqchip_get_virq(s); + if (virq < 0) { + return virq; + } + + kroute.gsi = virq; + kroute.type = KVM_IRQ_ROUTING_HV_SINT; + kroute.flags = 0; + kroute.u.hv_sint.vcpu = vcpu; + kroute.u.hv_sint.sint = sint; + + kvm_add_routing_entry(s, &kroute); + kvm_irqchip_commit_routes(s); + + return virq; +} + +#else /* !KVM_CAP_IRQ_ROUTING */ + +void kvm_init_irq_routing(KVMState *s) +{ +} + +void kvm_irqchip_release_virq(KVMState *s, int virq) +{ +} + +int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) +{ + abort(); +} + +int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev) +{ + return -ENOSYS; +} + +int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter) +{ + return -ENOSYS; +} + +int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint) +{ + return -ENOSYS; +} + +static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event, + EventNotifier *resample, int virq, + bool assign) +{ + abort(); +} + +int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg) +{ + return -ENOSYS; +} +#endif /* !KVM_CAP_IRQ_ROUTING */ + +int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, + EventNotifier *rn, int virq) +{ + return kvm_irqchip_assign_irqfd(s, n, rn, virq, true); +} + +int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, + int virq) +{ + return kvm_irqchip_assign_irqfd(s, n, NULL, virq, false); +} + +int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, + EventNotifier *rn, qemu_irq irq) +{ + gpointer key, gsi; + gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi); + + if (!found) { + return -ENXIO; + } + return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi)); +} + +int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, + qemu_irq irq) +{ + gpointer key, gsi; + gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi); + + if (!found) { + return -ENXIO; + } + return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi)); +} + +void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi) +{ + g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi)); +} + +static void kvm_irqchip_create(KVMState *s) +{ + int ret; + + assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO); + if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) { + ; + } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) { + ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0); + if (ret < 0) { + fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret)); + exit(1); + } + } else { + return; + } + + /* First probe and see if there's a arch-specific hook to create the + * in-kernel irqchip for us */ + ret = kvm_arch_irqchip_create(s); + if (ret == 0) { + if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) { + perror("Split IRQ chip mode not supported."); + exit(1); + } else { + ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP); + } + } + if (ret < 0) { + fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret)); + exit(1); + } + + kvm_kernel_irqchip = true; + /* If we have an in-kernel IRQ chip then we must have asynchronous + * interrupt delivery (though the reverse is not necessarily true) + */ + kvm_async_interrupts_allowed = true; + kvm_halt_in_kernel_allowed = true; + + kvm_init_irq_routing(s); + + s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal); +} + +/* Find number of supported CPUs using the recommended + * procedure from the kernel API documentation to cope with + * older kernels that may be missing capabilities. + */ +static int kvm_recommended_vcpus(KVMState *s) +{ + int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS); + return (ret) ? ret : 4; +} + +static int kvm_max_vcpus(KVMState *s) +{ + int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS); + return (ret) ? ret : kvm_recommended_vcpus(s); +} + +static int kvm_max_vcpu_id(KVMState *s) +{ + int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID); + return (ret) ? ret : kvm_max_vcpus(s); +} + +bool kvm_vcpu_id_is_valid(int vcpu_id) +{ + KVMState *s = KVM_STATE(current_accel()); + return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s); +} + +bool kvm_dirty_ring_enabled(void) +{ + return kvm_state->kvm_dirty_ring_size ? true : false; +} + +static int kvm_init(MachineState *ms) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + static const char upgrade_note[] = + "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n" + "(see http://sourceforge.net/projects/kvm).\n"; + struct { + const char *name; + int num; + } num_cpus[] = { + { "SMP", ms->smp.cpus }, + { "hotpluggable", ms->smp.max_cpus }, + { NULL, } + }, *nc = num_cpus; + int soft_vcpus_limit, hard_vcpus_limit; + KVMState *s; + const KVMCapabilityInfo *missing_cap; + int ret; + int type = 0; + uint64_t dirty_log_manual_caps; + + qemu_mutex_init(&kml_slots_lock); + + s = KVM_STATE(ms->accelerator); + + /* + * On systems where the kernel can support different base page + * sizes, host page size may be different from TARGET_PAGE_SIZE, + * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum + * page size for the system though. + */ + assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size); + + s->sigmask_len = 8; + +#ifdef KVM_CAP_SET_GUEST_DEBUG + QTAILQ_INIT(&s->kvm_sw_breakpoints); +#endif + QLIST_INIT(&s->kvm_parked_vcpus); + s->fd = qemu_open_old("/dev/kvm", O_RDWR); + if (s->fd == -1) { + fprintf(stderr, "Could not access KVM kernel module: %m\n"); + ret = -errno; + goto err; + } + + ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); + if (ret < KVM_API_VERSION) { + if (ret >= 0) { + ret = -EINVAL; + } + fprintf(stderr, "kvm version too old\n"); + goto err; + } + + if (ret > KVM_API_VERSION) { + ret = -EINVAL; + fprintf(stderr, "kvm version not supported\n"); + goto err; + } + + kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT); + s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS); + + /* If unspecified, use the default value */ + if (!s->nr_slots) { + s->nr_slots = 32; + } + + s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE); + if (s->nr_as <= 1) { + s->nr_as = 1; + } + s->as = g_new0(struct KVMAs, s->nr_as); + + if (object_property_find(OBJECT(current_machine), "kvm-type")) { + g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine), + "kvm-type", + &error_abort); + type = mc->kvm_type(ms, kvm_type); + } else if (mc->kvm_type) { + type = mc->kvm_type(ms, NULL); + } + + do { + ret = kvm_ioctl(s, KVM_CREATE_VM, type); + } while (ret == -EINTR); + + if (ret < 0) { + fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret, + strerror(-ret)); + +#ifdef TARGET_S390X + if (ret == -EINVAL) { + fprintf(stderr, + "Host kernel setup problem detected. Please verify:\n"); + fprintf(stderr, "- for kernels supporting the switch_amode or" + " user_mode parameters, whether\n"); + fprintf(stderr, + " user space is running in primary address space\n"); + fprintf(stderr, + "- for kernels supporting the vm.allocate_pgste sysctl, " + "whether it is enabled\n"); + } +#elif defined(TARGET_PPC) + if (ret == -EINVAL) { + fprintf(stderr, + "PPC KVM module is not loaded. Try modprobe kvm_%s.\n", + (type == 2) ? "pr" : "hv"); + } +#endif + goto err; + } + + s->vmfd = ret; + + /* check the vcpu limits */ + soft_vcpus_limit = kvm_recommended_vcpus(s); + hard_vcpus_limit = kvm_max_vcpus(s); + + while (nc->name) { + if (nc->num > soft_vcpus_limit) { + warn_report("Number of %s cpus requested (%d) exceeds " + "the recommended cpus supported by KVM (%d)", + nc->name, nc->num, soft_vcpus_limit); + + if (nc->num > hard_vcpus_limit) { + fprintf(stderr, "Number of %s cpus requested (%d) exceeds " + "the maximum cpus supported by KVM (%d)\n", + nc->name, nc->num, hard_vcpus_limit); + exit(1); + } + } + nc++; + } + + missing_cap = kvm_check_extension_list(s, kvm_required_capabilites); + if (!missing_cap) { + missing_cap = + kvm_check_extension_list(s, kvm_arch_required_capabilities); + } + if (missing_cap) { + ret = -EINVAL; + fprintf(stderr, "kvm does not support %s\n%s", + missing_cap->name, upgrade_note); + goto err; + } + + s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); + s->coalesced_pio = s->coalesced_mmio && + kvm_check_extension(s, KVM_CAP_COALESCED_PIO); + + /* + * Enable KVM dirty ring if supported, otherwise fall back to + * dirty logging mode + */ + if (s->kvm_dirty_ring_size > 0) { + uint64_t ring_bytes; + + ring_bytes = s->kvm_dirty_ring_size * sizeof(struct kvm_dirty_gfn); + + /* Read the max supported pages */ + ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING); + if (ret > 0) { + if (ring_bytes > ret) { + error_report("KVM dirty ring size %" PRIu32 " too big " + "(maximum is %ld). Please use a smaller value.", + s->kvm_dirty_ring_size, + (long)ret / sizeof(struct kvm_dirty_gfn)); + ret = -EINVAL; + goto err; + } + + ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING, 0, ring_bytes); + if (ret) { + error_report("Enabling of KVM dirty ring failed: %s. " + "Suggested minimum value is 1024.", strerror(-ret)); + goto err; + } + + s->kvm_dirty_ring_bytes = ring_bytes; + } else { + warn_report("KVM dirty ring not available, using bitmap method"); + s->kvm_dirty_ring_size = 0; + } + } + + /* + * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is + * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no + * page is wr-protected initially, which is against how kvm dirty ring is + * usage - kvm dirty ring requires all pages are wr-protected at the very + * beginning. Enabling this feature for dirty ring causes data corruption. + * + * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log, + * we may expect a higher stall time when starting the migration. In the + * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too: + * instead of clearing dirty bit, it can be a way to explicitly wr-protect + * guest pages. + */ + if (!s->kvm_dirty_ring_size) { + dirty_log_manual_caps = + kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2); + dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | + KVM_DIRTY_LOG_INITIALLY_SET); + s->manual_dirty_log_protect = dirty_log_manual_caps; + if (dirty_log_manual_caps) { + ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, + dirty_log_manual_caps); + if (ret) { + warn_report("Trying to enable capability %"PRIu64" of " + "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. " + "Falling back to the legacy mode. ", + dirty_log_manual_caps); + s->manual_dirty_log_protect = 0; + } + } + } + +#ifdef KVM_CAP_VCPU_EVENTS + s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); +#endif + + s->robust_singlestep = + kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP); + +#ifdef KVM_CAP_DEBUGREGS + s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS); +#endif + + s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE); + +#ifdef KVM_CAP_IRQ_ROUTING + kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0); +#endif + + s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3); + + s->irq_set_ioctl = KVM_IRQ_LINE; + if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) { + s->irq_set_ioctl = KVM_IRQ_LINE_STATUS; + } + + kvm_readonly_mem_allowed = + (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0); + + kvm_eventfds_allowed = + (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0); + + kvm_irqfds_allowed = + (kvm_check_extension(s, KVM_CAP_IRQFD) > 0); + + kvm_resamplefds_allowed = + (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0); + + kvm_vm_attributes_allowed = + (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0); + + kvm_ioeventfd_any_length_allowed = + (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0); + + kvm_state = s; + + ret = kvm_arch_init(ms, s); + if (ret < 0) { + goto err; + } + + if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) { + s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; + } + + qemu_register_reset(kvm_unpoison_all, NULL); + + if (s->kernel_irqchip_allowed) { + kvm_irqchip_create(s); + } + + if (kvm_eventfds_allowed) { + s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add; + s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del; + } + s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region; + s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region; + + kvm_memory_listener_register(s, &s->memory_listener, + &address_space_memory, 0, "kvm-memory"); + if (kvm_eventfds_allowed) { + memory_listener_register(&kvm_io_listener, + &address_space_io); + } + memory_listener_register(&kvm_coalesced_pio_listener, + &address_space_io); + + s->many_ioeventfds = kvm_check_many_ioeventfds(); + + s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU); + if (!s->sync_mmu) { + ret = ram_block_discard_disable(true); + assert(!ret); + } + + if (s->kvm_dirty_ring_size) { + ret = kvm_dirty_ring_reaper_init(s); + if (ret) { + goto err; + } + } + + return 0; + +err: + assert(ret < 0); + if (s->vmfd >= 0) { + close(s->vmfd); + } + if (s->fd != -1) { + close(s->fd); + } + g_free(s->memory_listener.slots); + + return ret; +} + +void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len) +{ + s->sigmask_len = sigmask_len; +} + +static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction, + int size, uint32_t count) +{ + int i; + uint8_t *ptr = data; + + for (i = 0; i < count; i++) { + address_space_rw(&address_space_io, port, attrs, + ptr, size, + direction == KVM_EXIT_IO_OUT); + ptr += size; + } +} + +static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run) +{ + fprintf(stderr, "KVM internal error. Suberror: %d\n", + run->internal.suberror); + + if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) { + int i; + + for (i = 0; i < run->internal.ndata; ++i) { + fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n", + i, (uint64_t)run->internal.data[i]); + } + } + if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) { + fprintf(stderr, "emulation failure\n"); + if (!kvm_arch_stop_on_emulation_error(cpu)) { + cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); + return EXCP_INTERRUPT; + } + } + /* FIXME: Should trigger a qmp message to let management know + * something went wrong. + */ + return -1; +} + +void kvm_flush_coalesced_mmio_buffer(void) +{ + KVMState *s = kvm_state; + + if (s->coalesced_flush_in_progress) { + return; + } + + s->coalesced_flush_in_progress = true; + + if (s->coalesced_mmio_ring) { + struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring; + while (ring->first != ring->last) { + struct kvm_coalesced_mmio *ent; + + ent = &ring->coalesced_mmio[ring->first]; + + if (ent->pio == 1) { + address_space_write(&address_space_io, ent->phys_addr, + MEMTXATTRS_UNSPECIFIED, ent->data, + ent->len); + } else { + cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len); + } + smp_wmb(); + ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX; + } + } + + s->coalesced_flush_in_progress = false; +} + +bool kvm_cpu_check_are_resettable(void) +{ + return kvm_arch_cpu_check_are_resettable(); +} + +static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) +{ + if (!cpu->vcpu_dirty) { + kvm_arch_get_registers(cpu); + cpu->vcpu_dirty = true; + } +} + +void kvm_cpu_synchronize_state(CPUState *cpu) +{ + if (!cpu->vcpu_dirty) { + run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL); + } +} + +static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg) +{ + kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE); + cpu->vcpu_dirty = false; +} + +void kvm_cpu_synchronize_post_reset(CPUState *cpu) +{ + run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL); +} + +static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg) +{ + kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); + cpu->vcpu_dirty = false; +} + +void kvm_cpu_synchronize_post_init(CPUState *cpu) +{ + run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL); +} + +static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg) +{ + cpu->vcpu_dirty = true; +} + +void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu) +{ + run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL); +} + +#ifdef KVM_HAVE_MCE_INJECTION +static __thread void *pending_sigbus_addr; +static __thread int pending_sigbus_code; +static __thread bool have_sigbus_pending; +#endif + +static void kvm_cpu_kick(CPUState *cpu) +{ + qatomic_set(&cpu->kvm_run->immediate_exit, 1); +} + +static void kvm_cpu_kick_self(void) +{ + if (kvm_immediate_exit) { + kvm_cpu_kick(current_cpu); + } else { + qemu_cpu_kick_self(); + } +} + +static void kvm_eat_signals(CPUState *cpu) +{ + struct timespec ts = { 0, 0 }; + siginfo_t siginfo; + sigset_t waitset; + sigset_t chkset; + int r; + + if (kvm_immediate_exit) { + qatomic_set(&cpu->kvm_run->immediate_exit, 0); + /* Write kvm_run->immediate_exit before the cpu->exit_request + * write in kvm_cpu_exec. + */ + smp_wmb(); + return; + } + + sigemptyset(&waitset); + sigaddset(&waitset, SIG_IPI); + + do { + r = sigtimedwait(&waitset, &siginfo, &ts); + if (r == -1 && !(errno == EAGAIN || errno == EINTR)) { + perror("sigtimedwait"); + exit(1); + } + + r = sigpending(&chkset); + if (r == -1) { + perror("sigpending"); + exit(1); + } + } while (sigismember(&chkset, SIG_IPI)); +} + +int kvm_cpu_exec(CPUState *cpu) +{ + struct kvm_run *run = cpu->kvm_run; + int ret, run_ret; + + DPRINTF("kvm_cpu_exec()\n"); + + if (kvm_arch_process_async_events(cpu)) { + qatomic_set(&cpu->exit_request, 0); + return EXCP_HLT; + } + + qemu_mutex_unlock_iothread(); + cpu_exec_start(cpu); + + do { + MemTxAttrs attrs; + + if (cpu->vcpu_dirty) { + kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE); + cpu->vcpu_dirty = false; + } + + kvm_arch_pre_run(cpu, run); + if (qatomic_read(&cpu->exit_request)) { + DPRINTF("interrupt exit requested\n"); + /* + * KVM requires us to reenter the kernel after IO exits to complete + * instruction emulation. This self-signal will ensure that we + * leave ASAP again. + */ + kvm_cpu_kick_self(); + } + + /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit. + * Matching barrier in kvm_eat_signals. + */ + smp_rmb(); + + run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0); + + attrs = kvm_arch_post_run(cpu, run); + +#ifdef KVM_HAVE_MCE_INJECTION + if (unlikely(have_sigbus_pending)) { + qemu_mutex_lock_iothread(); + kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code, + pending_sigbus_addr); + have_sigbus_pending = false; + qemu_mutex_unlock_iothread(); + } +#endif + + if (run_ret < 0) { + if (run_ret == -EINTR || run_ret == -EAGAIN) { + DPRINTF("io window exit\n"); + kvm_eat_signals(cpu); + ret = EXCP_INTERRUPT; + break; + } + fprintf(stderr, "error: kvm run failed %s\n", + strerror(-run_ret)); +#ifdef TARGET_PPC + if (run_ret == -EBUSY) { + fprintf(stderr, + "This is probably because your SMT is enabled.\n" + "VCPU can only run on primary threads with all " + "secondary threads offline.\n"); + } +#endif + ret = -1; + break; + } + + trace_kvm_run_exit(cpu->cpu_index, run->exit_reason); + switch (run->exit_reason) { + case KVM_EXIT_IO: + DPRINTF("handle_io\n"); + /* Called outside BQL */ + kvm_handle_io(run->io.port, attrs, + (uint8_t *)run + run->io.data_offset, + run->io.direction, + run->io.size, + run->io.count); + ret = 0; + break; + case KVM_EXIT_MMIO: + DPRINTF("handle_mmio\n"); + /* Called outside BQL */ + address_space_rw(&address_space_memory, + run->mmio.phys_addr, attrs, + run->mmio.data, + run->mmio.len, + run->mmio.is_write); + ret = 0; + break; + case KVM_EXIT_IRQ_WINDOW_OPEN: + DPRINTF("irq_window_open\n"); + ret = EXCP_INTERRUPT; + break; + case KVM_EXIT_SHUTDOWN: + DPRINTF("shutdown\n"); + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + ret = EXCP_INTERRUPT; + break; + case KVM_EXIT_UNKNOWN: + fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n", + (uint64_t)run->hw.hardware_exit_reason); + ret = -1; + break; + case KVM_EXIT_INTERNAL_ERROR: + ret = kvm_handle_internal_error(cpu, run); + break; + case KVM_EXIT_DIRTY_RING_FULL: + /* + * We shouldn't continue if the dirty ring of this vcpu is + * still full. Got kicked by KVM_RESET_DIRTY_RINGS. + */ + trace_kvm_dirty_ring_full(cpu->cpu_index); + qemu_mutex_lock_iothread(); + kvm_dirty_ring_reap(kvm_state); + qemu_mutex_unlock_iothread(); + ret = 0; + break; + case KVM_EXIT_SYSTEM_EVENT: + switch (run->system_event.type) { + case KVM_SYSTEM_EVENT_SHUTDOWN: + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + ret = EXCP_INTERRUPT; + break; + case KVM_SYSTEM_EVENT_RESET: + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + ret = EXCP_INTERRUPT; + break; + case KVM_SYSTEM_EVENT_CRASH: + kvm_cpu_synchronize_state(cpu); + qemu_mutex_lock_iothread(); + qemu_system_guest_panicked(cpu_get_crash_info(cpu)); + qemu_mutex_unlock_iothread(); + ret = 0; + break; + default: + DPRINTF("kvm_arch_handle_exit\n"); + ret = kvm_arch_handle_exit(cpu, run); + break; + } + break; + default: + DPRINTF("kvm_arch_handle_exit\n"); + ret = kvm_arch_handle_exit(cpu, run); + break; + } + } while (ret == 0); + + cpu_exec_end(cpu); + qemu_mutex_lock_iothread(); + + if (ret < 0) { + cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); + vm_stop(RUN_STATE_INTERNAL_ERROR); + } + + qatomic_set(&cpu->exit_request, 0); + return ret; +} + +int kvm_ioctl(KVMState *s, int type, ...) +{ + int ret; + void *arg; + va_list ap; + + va_start(ap, type); + arg = va_arg(ap, void *); + va_end(ap); + + trace_kvm_ioctl(type, arg); + ret = ioctl(s->fd, type, arg); + if (ret == -1) { + ret = -errno; + } + return ret; +} + +int kvm_vm_ioctl(KVMState *s, int type, ...) +{ + int ret; + void *arg; + va_list ap; + + va_start(ap, type); + arg = va_arg(ap, void *); + va_end(ap); + + trace_kvm_vm_ioctl(type, arg); + ret = ioctl(s->vmfd, type, arg); + if (ret == -1) { + ret = -errno; + } + return ret; +} + +int kvm_vcpu_ioctl(CPUState *cpu, int type, ...) +{ + int ret; + void *arg; + va_list ap; + + va_start(ap, type); + arg = va_arg(ap, void *); + va_end(ap); + + trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg); + ret = ioctl(cpu->kvm_fd, type, arg); + if (ret == -1) { + ret = -errno; + } + return ret; +} + +int kvm_device_ioctl(int fd, int type, ...) +{ + int ret; + void *arg; + va_list ap; + + va_start(ap, type); + arg = va_arg(ap, void *); + va_end(ap); + + trace_kvm_device_ioctl(fd, type, arg); + ret = ioctl(fd, type, arg); + if (ret == -1) { + ret = -errno; + } + return ret; +} + +int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr) +{ + int ret; + struct kvm_device_attr attribute = { + .group = group, + .attr = attr, + }; + + if (!kvm_vm_attributes_allowed) { + return 0; + } + + ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute); + /* kvm returns 0 on success for HAS_DEVICE_ATTR */ + return ret ? 0 : 1; +} + +int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr) +{ + struct kvm_device_attr attribute = { + .group = group, + .attr = attr, + .flags = 0, + }; + + return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1; +} + +int kvm_device_access(int fd, int group, uint64_t attr, + void *val, bool write, Error **errp) +{ + struct kvm_device_attr kvmattr; + int err; + + kvmattr.flags = 0; + kvmattr.group = group; + kvmattr.attr = attr; + kvmattr.addr = (uintptr_t)val; + + err = kvm_device_ioctl(fd, + write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR, + &kvmattr); + if (err < 0) { + error_setg_errno(errp, -err, + "KVM_%s_DEVICE_ATTR failed: Group %d " + "attr 0x%016" PRIx64, + write ? "SET" : "GET", group, attr); + } + return err; +} + +bool kvm_has_sync_mmu(void) +{ + return kvm_state->sync_mmu; +} + +int kvm_has_vcpu_events(void) +{ + return kvm_state->vcpu_events; +} + +int kvm_has_robust_singlestep(void) +{ + return kvm_state->robust_singlestep; +} + +int kvm_has_debugregs(void) +{ + return kvm_state->debugregs; +} + +int kvm_max_nested_state_length(void) +{ + return kvm_state->max_nested_state_len; +} + +int kvm_has_many_ioeventfds(void) +{ + if (!kvm_enabled()) { + return 0; + } + return kvm_state->many_ioeventfds; +} + +int kvm_has_gsi_routing(void) +{ +#ifdef KVM_CAP_IRQ_ROUTING + return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING); +#else + return false; +#endif +} + +int kvm_has_intx_set_mask(void) +{ + return kvm_state->intx_set_mask; +} + +bool kvm_arm_supports_user_irq(void) +{ + return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ); +} + +#ifdef KVM_CAP_SET_GUEST_DEBUG +struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, + target_ulong pc) +{ + struct kvm_sw_breakpoint *bp; + + QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) { + if (bp->pc == pc) { + return bp; + } + } + return NULL; +} + +int kvm_sw_breakpoints_active(CPUState *cpu) +{ + return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints); +} + +struct kvm_set_guest_debug_data { + struct kvm_guest_debug dbg; + int err; +}; + +static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data) +{ + struct kvm_set_guest_debug_data *dbg_data = + (struct kvm_set_guest_debug_data *) data.host_ptr; + + dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG, + &dbg_data->dbg); +} + +int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap) +{ + struct kvm_set_guest_debug_data data; + + data.dbg.control = reinject_trap; + + if (cpu->singlestep_enabled) { + data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; + } + kvm_arch_update_guest_debug(cpu, &data.dbg); + + run_on_cpu(cpu, kvm_invoke_set_guest_debug, + RUN_ON_CPU_HOST_PTR(&data)); + return data.err; +} + +int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr, + target_ulong len, int type) +{ + struct kvm_sw_breakpoint *bp; + int err; + + if (type == GDB_BREAKPOINT_SW) { + bp = kvm_find_sw_breakpoint(cpu, addr); + if (bp) { + bp->use_count++; + return 0; + } + + bp = g_malloc(sizeof(struct kvm_sw_breakpoint)); + bp->pc = addr; + bp->use_count = 1; + err = kvm_arch_insert_sw_breakpoint(cpu, bp); + if (err) { + g_free(bp); + return err; + } + + QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry); + } else { + err = kvm_arch_insert_hw_breakpoint(addr, len, type); + if (err) { + return err; + } + } + + CPU_FOREACH(cpu) { + err = kvm_update_guest_debug(cpu, 0); + if (err) { + return err; + } + } + return 0; +} + +int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr, + target_ulong len, int type) +{ + struct kvm_sw_breakpoint *bp; + int err; + + if (type == GDB_BREAKPOINT_SW) { + bp = kvm_find_sw_breakpoint(cpu, addr); + if (!bp) { + return -ENOENT; + } + + if (bp->use_count > 1) { + bp->use_count--; + return 0; + } + + err = kvm_arch_remove_sw_breakpoint(cpu, bp); + if (err) { + return err; + } + + QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry); + g_free(bp); + } else { + err = kvm_arch_remove_hw_breakpoint(addr, len, type); + if (err) { + return err; + } + } + + CPU_FOREACH(cpu) { + err = kvm_update_guest_debug(cpu, 0); + if (err) { + return err; + } + } + return 0; +} + +void kvm_remove_all_breakpoints(CPUState *cpu) +{ + struct kvm_sw_breakpoint *bp, *next; + KVMState *s = cpu->kvm_state; + CPUState *tmpcpu; + + QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { + if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) { + /* Try harder to find a CPU that currently sees the breakpoint. */ + CPU_FOREACH(tmpcpu) { + if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) { + break; + } + } + } + QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry); + g_free(bp); + } + kvm_arch_remove_all_hw_breakpoints(); + + CPU_FOREACH(cpu) { + kvm_update_guest_debug(cpu, 0); + } +} + +#else /* !KVM_CAP_SET_GUEST_DEBUG */ + +int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap) +{ + return -EINVAL; +} + +int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr, + target_ulong len, int type) +{ + return -EINVAL; +} + +int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr, + target_ulong len, int type) +{ + return -EINVAL; +} + +void kvm_remove_all_breakpoints(CPUState *cpu) +{ +} +#endif /* !KVM_CAP_SET_GUEST_DEBUG */ + +static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset) +{ + KVMState *s = kvm_state; + struct kvm_signal_mask *sigmask; + int r; + + sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset)); + + sigmask->len = s->sigmask_len; + memcpy(sigmask->sigset, sigset, sizeof(*sigset)); + r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask); + g_free(sigmask); + + return r; +} + +static void kvm_ipi_signal(int sig) +{ + if (current_cpu) { + assert(kvm_immediate_exit); + kvm_cpu_kick(current_cpu); + } +} + +void kvm_init_cpu_signals(CPUState *cpu) +{ + int r; + sigset_t set; + struct sigaction sigact; + + memset(&sigact, 0, sizeof(sigact)); + sigact.sa_handler = kvm_ipi_signal; + sigaction(SIG_IPI, &sigact, NULL); + + pthread_sigmask(SIG_BLOCK, NULL, &set); +#if defined KVM_HAVE_MCE_INJECTION + sigdelset(&set, SIGBUS); + pthread_sigmask(SIG_SETMASK, &set, NULL); +#endif + sigdelset(&set, SIG_IPI); + if (kvm_immediate_exit) { + r = pthread_sigmask(SIG_SETMASK, &set, NULL); + } else { + r = kvm_set_signal_mask(cpu, &set); + } + if (r) { + fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r)); + exit(1); + } +} + +/* Called asynchronously in VCPU thread. */ +int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) +{ +#ifdef KVM_HAVE_MCE_INJECTION + if (have_sigbus_pending) { + return 1; + } + have_sigbus_pending = true; + pending_sigbus_addr = addr; + pending_sigbus_code = code; + qatomic_set(&cpu->exit_request, 1); + return 0; +#else + return 1; +#endif +} + +/* Called synchronously (via signalfd) in main thread. */ +int kvm_on_sigbus(int code, void *addr) +{ +#ifdef KVM_HAVE_MCE_INJECTION + /* Action required MCE kills the process if SIGBUS is blocked. Because + * that's what happens in the I/O thread, where we handle MCE via signalfd, + * we can only get action optional here. + */ + assert(code != BUS_MCEERR_AR); + kvm_arch_on_sigbus_vcpu(first_cpu, code, addr); + return 0; +#else + return 1; +#endif +} + +int kvm_create_device(KVMState *s, uint64_t type, bool test) +{ + int ret; + struct kvm_create_device create_dev; + + create_dev.type = type; + create_dev.fd = -1; + create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0; + + if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) { + return -ENOTSUP; + } + + ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev); + if (ret) { + return ret; + } + + return test ? 0 : create_dev.fd; +} + +bool kvm_device_supported(int vmfd, uint64_t type) +{ + struct kvm_create_device create_dev = { + .type = type, + .fd = -1, + .flags = KVM_CREATE_DEVICE_TEST, + }; + + if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) { + return false; + } + + return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0); +} + +int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source) +{ + struct kvm_one_reg reg; + int r; + + reg.id = id; + reg.addr = (uintptr_t) source; + r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); + if (r) { + trace_kvm_failed_reg_set(id, strerror(-r)); + } + return r; +} + +int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target) +{ + struct kvm_one_reg reg; + int r; + + reg.id = id; + reg.addr = (uintptr_t) target; + r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); + if (r) { + trace_kvm_failed_reg_get(id, strerror(-r)); + } + return r; +} + +static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as, + hwaddr start_addr, hwaddr size) +{ + KVMState *kvm = KVM_STATE(ms->accelerator); + int i; + + for (i = 0; i < kvm->nr_as; ++i) { + if (kvm->as[i].as == as && kvm->as[i].ml) { + size = MIN(kvm_max_slot_size, size); + return NULL != kvm_lookup_matching_slot(kvm->as[i].ml, + start_addr, size); + } + } + + return false; +} + +static void kvm_get_kvm_shadow_mem(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + int64_t value = s->kvm_shadow_mem; + + visit_type_int(v, name, &value, errp); +} + +static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + int64_t value; + + if (s->fd != -1) { + error_setg(errp, "Cannot set properties after the accelerator has been initialized"); + return; + } + + if (!visit_type_int(v, name, &value, errp)) { + return; + } + + s->kvm_shadow_mem = value; +} + +static void kvm_set_kernel_irqchip(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + OnOffSplit mode; + + if (s->fd != -1) { + error_setg(errp, "Cannot set properties after the accelerator has been initialized"); + return; + } + + if (!visit_type_OnOffSplit(v, name, &mode, errp)) { + return; + } + switch (mode) { + case ON_OFF_SPLIT_ON: + s->kernel_irqchip_allowed = true; + s->kernel_irqchip_required = true; + s->kernel_irqchip_split = ON_OFF_AUTO_OFF; + break; + case ON_OFF_SPLIT_OFF: + s->kernel_irqchip_allowed = false; + s->kernel_irqchip_required = false; + s->kernel_irqchip_split = ON_OFF_AUTO_OFF; + break; + case ON_OFF_SPLIT_SPLIT: + s->kernel_irqchip_allowed = true; + s->kernel_irqchip_required = true; + s->kernel_irqchip_split = ON_OFF_AUTO_ON; + break; + default: + /* The value was checked in visit_type_OnOffSplit() above. If + * we get here, then something is wrong in QEMU. + */ + abort(); + } +} + +bool kvm_kernel_irqchip_allowed(void) +{ + return kvm_state->kernel_irqchip_allowed; +} + +bool kvm_kernel_irqchip_required(void) +{ + return kvm_state->kernel_irqchip_required; +} + +bool kvm_kernel_irqchip_split(void) +{ + return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON; +} + +static void kvm_get_dirty_ring_size(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + uint32_t value = s->kvm_dirty_ring_size; + + visit_type_uint32(v, name, &value, errp); +} + +static void kvm_set_dirty_ring_size(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + Error *error = NULL; + uint32_t value; + + if (s->fd != -1) { + error_setg(errp, "Cannot set properties after the accelerator has been initialized"); + return; + } + + visit_type_uint32(v, name, &value, &error); + if (error) { + error_propagate(errp, error); + return; + } + if (value & (value - 1)) { + error_setg(errp, "dirty-ring-size must be a power of two."); + return; + } + + s->kvm_dirty_ring_size = value; +} + +static void kvm_accel_instance_init(Object *obj) +{ + KVMState *s = KVM_STATE(obj); + + s->fd = -1; + s->vmfd = -1; + s->kvm_shadow_mem = -1; + s->kernel_irqchip_allowed = true; + s->kernel_irqchip_split = ON_OFF_AUTO_AUTO; + /* KVM dirty ring is by default off */ + s->kvm_dirty_ring_size = 0; +} + +static void kvm_accel_class_init(ObjectClass *oc, void *data) +{ + AccelClass *ac = ACCEL_CLASS(oc); + ac->name = "KVM"; + ac->init_machine = kvm_init; + ac->has_memory = kvm_accel_has_memory; + ac->allowed = &kvm_allowed; + + object_class_property_add(oc, "kernel-irqchip", "on|off|split", + NULL, kvm_set_kernel_irqchip, + NULL, NULL); + object_class_property_set_description(oc, "kernel-irqchip", + "Configure KVM in-kernel irqchip"); + + object_class_property_add(oc, "kvm-shadow-mem", "int", + kvm_get_kvm_shadow_mem, kvm_set_kvm_shadow_mem, + NULL, NULL); + object_class_property_set_description(oc, "kvm-shadow-mem", + "KVM shadow MMU size"); + + object_class_property_add(oc, "dirty-ring-size", "uint32", + kvm_get_dirty_ring_size, kvm_set_dirty_ring_size, + NULL, NULL); + object_class_property_set_description(oc, "dirty-ring-size", + "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)"); +} + +static const TypeInfo kvm_accel_type = { + .name = TYPE_KVM_ACCEL, + .parent = TYPE_ACCEL, + .instance_init = kvm_accel_instance_init, + .class_init = kvm_accel_class_init, + .instance_size = sizeof(KVMState), +}; + +static void kvm_type_init(void) +{ + type_register_static(&kvm_accel_type); +} + +type_init(kvm_type_init); diff --git a/accel/kvm/kvm-cpus.h b/accel/kvm/kvm-cpus.h new file mode 100644 index 000000000..bf0bd1bee --- /dev/null +++ b/accel/kvm/kvm-cpus.h @@ -0,0 +1,22 @@ +/* + * Accelerator CPUS Interface + * + * Copyright 2020 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef KVM_CPUS_H +#define KVM_CPUS_H + +#include "sysemu/cpus.h" + +int kvm_init_vcpu(CPUState *cpu, Error **errp); +int kvm_cpu_exec(CPUState *cpu); +void kvm_destroy_vcpu(CPUState *cpu); +void kvm_cpu_synchronize_post_reset(CPUState *cpu); +void kvm_cpu_synchronize_post_init(CPUState *cpu); +void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu); + +#endif /* KVM_CPUS_H */ diff --git a/accel/kvm/meson.build b/accel/kvm/meson.build new file mode 100644 index 000000000..397a1fe1f --- /dev/null +++ b/accel/kvm/meson.build @@ -0,0 +1,7 @@ +kvm_ss = ss.source_set() +kvm_ss.add(files( + 'kvm-all.c', + 'kvm-accel-ops.c', +)) + +specific_ss.add_all(when: 'CONFIG_KVM', if_true: kvm_ss) diff --git a/accel/kvm/trace-events b/accel/kvm/trace-events new file mode 100644 index 000000000..399aaeb0e --- /dev/null +++ b/accel/kvm/trace-events @@ -0,0 +1,28 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# kvm-all.c +kvm_ioctl(int type, void *arg) "type 0x%x, arg %p" +kvm_vm_ioctl(int type, void *arg) "type 0x%x, arg %p" +kvm_vcpu_ioctl(int cpu_index, int type, void *arg) "cpu_index %d, type 0x%x, arg %p" +kvm_run_exit(int cpu_index, uint32_t reason) "cpu_index %d, reason %d" +kvm_device_ioctl(int fd, int type, void *arg) "dev fd %d, type 0x%x, arg %p" +kvm_failed_reg_get(uint64_t id, const char *msg) "Warning: Unable to retrieve ONEREG %" PRIu64 " from KVM: %s" +kvm_failed_reg_set(uint64_t id, const char *msg) "Warning: Unable to set ONEREG %" PRIu64 " to KVM: %s" +kvm_init_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu" +kvm_irqchip_commit_routes(void) "" +kvm_irqchip_add_msi_route(char *name, int vector, int virq) "dev %s vector %d virq %d" +kvm_irqchip_update_msi_route(int virq) "Updating MSI route virq=%d" +kvm_irqchip_release_virq(int virq) "virq %d" +kvm_set_ioeventfd_mmio(int fd, uint64_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%" PRIx64 " val=0x%x assign: %d size: %d match: %d" +kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%x val=0x%x assign: %d size: %d match: %d" +kvm_set_user_memory(uint32_t slot, uint32_t flags, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, int ret) "Slot#%d flags=0x%x gpa=0x%"PRIx64 " size=0x%"PRIx64 " ua=0x%"PRIx64 " ret=%d" +kvm_clear_dirty_log(uint32_t slot, uint64_t start, uint32_t size) "slot#%"PRId32" start 0x%"PRIx64" size 0x%"PRIx32 +kvm_resample_fd_notify(int gsi) "gsi %d" +kvm_dirty_ring_full(int id) "vcpu %d" +kvm_dirty_ring_reap_vcpu(int id) "vcpu %d" +kvm_dirty_ring_page(int vcpu, uint32_t slot, uint64_t offset) "vcpu %d fetch %"PRIu32" offset 0x%"PRIx64 +kvm_dirty_ring_reaper(const char *s) "%s" +kvm_dirty_ring_reap(uint64_t count, int64_t t) "reaped %"PRIu64" pages (took %"PRIi64" us)" +kvm_dirty_ring_reaper_kick(const char *reason) "%s" +kvm_dirty_ring_flush(int finished) "%d" + diff --git a/accel/kvm/trace.h b/accel/kvm/trace.h new file mode 100644 index 000000000..67c935a6f --- /dev/null +++ b/accel/kvm/trace.h @@ -0,0 +1 @@ +#include "trace/trace-accel_kvm.h" diff --git a/accel/meson.build b/accel/meson.build new file mode 100644 index 000000000..dfd808d2c --- /dev/null +++ b/accel/meson.build @@ -0,0 +1,18 @@ +specific_ss.add(files('accel-common.c')) +softmmu_ss.add(files('accel-softmmu.c')) +user_ss.add(files('accel-user.c')) + +subdir('hvf') +subdir('qtest') +subdir('kvm') +subdir('tcg') +subdir('xen') +subdir('stubs') + +dummy_ss = ss.source_set() +dummy_ss.add(files( + 'dummy-cpus.c', +)) + +specific_ss.add_all(when: ['CONFIG_SOFTMMU', 'CONFIG_POSIX'], if_true: dummy_ss) +specific_ss.add_all(when: ['CONFIG_XEN'], if_true: dummy_ss) diff --git a/accel/qtest/meson.build b/accel/qtest/meson.build new file mode 100644 index 000000000..4c6560029 --- /dev/null +++ b/accel/qtest/meson.build @@ -0,0 +1,2 @@ +qtest_module_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_POSIX'], + if_true: files('qtest.c')) diff --git a/accel/qtest/qtest.c b/accel/qtest/qtest.c new file mode 100644 index 000000000..7e6b8110d --- /dev/null +++ b/accel/qtest/qtest.c @@ -0,0 +1,73 @@ +/* + * QTest accelerator code + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/rcu.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qemu/option.h" +#include "qemu/config-file.h" +#include "qemu/accel.h" +#include "sysemu/qtest.h" +#include "sysemu/cpus.h" +#include "sysemu/cpu-timers.h" +#include "qemu/guest-random.h" +#include "qemu/main-loop.h" +#include "hw/core/cpu.h" + +static int qtest_init_accel(MachineState *ms) +{ + return 0; +} + +static void qtest_accel_class_init(ObjectClass *oc, void *data) +{ + AccelClass *ac = ACCEL_CLASS(oc); + ac->name = "QTest"; + ac->init_machine = qtest_init_accel; + ac->allowed = &qtest_allowed; +} + +#define TYPE_QTEST_ACCEL ACCEL_CLASS_NAME("qtest") + +static const TypeInfo qtest_accel_type = { + .name = TYPE_QTEST_ACCEL, + .parent = TYPE_ACCEL, + .class_init = qtest_accel_class_init, +}; +module_obj(TYPE_QTEST_ACCEL); + +static void qtest_accel_ops_class_init(ObjectClass *oc, void *data) +{ + AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); + + ops->create_vcpu_thread = dummy_start_vcpu_thread; + ops->get_virtual_clock = qtest_get_virtual_clock; +}; + +static const TypeInfo qtest_accel_ops_type = { + .name = ACCEL_OPS_NAME("qtest"), + + .parent = TYPE_ACCEL_OPS, + .class_init = qtest_accel_ops_class_init, + .abstract = true, +}; +module_obj(ACCEL_OPS_NAME("qtest")); + +static void qtest_type_init(void) +{ + type_register_static(&qtest_accel_type); + type_register_static(&qtest_accel_ops_type); +} + +type_init(qtest_type_init); diff --git a/accel/stubs/hax-stub.c b/accel/stubs/hax-stub.c new file mode 100644 index 000000000..49077f88e --- /dev/null +++ b/accel/stubs/hax-stub.c @@ -0,0 +1,22 @@ +/* + * QEMU HAXM support + * + * Copyright (c) 2015, Intel Corporation + * + * Copyright 2016 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "sysemu/hax.h" + +int hax_sync_vcpus(void) +{ + return 0; +} diff --git a/accel/stubs/kvm-stub.c b/accel/stubs/kvm-stub.c new file mode 100644 index 000000000..5319573e0 --- /dev/null +++ b/accel/stubs/kvm-stub.c @@ -0,0 +1,155 @@ +/* + * QEMU KVM stub + * + * Copyright Red Hat, Inc. 2010 + * + * Author: Paolo Bonzini <pbonzini@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "sysemu/kvm.h" + +#ifndef CONFIG_USER_ONLY +#include "hw/pci/msi.h" +#endif + +KVMState *kvm_state; +bool kvm_kernel_irqchip; +bool kvm_async_interrupts_allowed; +bool kvm_eventfds_allowed; +bool kvm_irqfds_allowed; +bool kvm_resamplefds_allowed; +bool kvm_msi_via_irqfd_allowed; +bool kvm_gsi_routing_allowed; +bool kvm_gsi_direct_mapping; +bool kvm_allowed; +bool kvm_readonly_mem_allowed; +bool kvm_ioeventfd_any_length_allowed; +bool kvm_msi_use_devid; + +void kvm_flush_coalesced_mmio_buffer(void) +{ +} + +void kvm_cpu_synchronize_state(CPUState *cpu) +{ +} + +bool kvm_has_sync_mmu(void) +{ + return false; +} + +int kvm_has_many_ioeventfds(void) +{ + return 0; +} + +int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap) +{ + return -ENOSYS; +} + +int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr, + target_ulong len, int type) +{ + return -EINVAL; +} + +int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr, + target_ulong len, int type) +{ + return -EINVAL; +} + +void kvm_remove_all_breakpoints(CPUState *cpu) +{ +} + +int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) +{ + return 1; +} + +int kvm_on_sigbus(int code, void *addr) +{ + return 1; +} + +#ifndef CONFIG_USER_ONLY +int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev) +{ + return -ENOSYS; +} + +void kvm_init_irq_routing(KVMState *s) +{ +} + +void kvm_irqchip_release_virq(KVMState *s, int virq) +{ +} + +int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg, + PCIDevice *dev) +{ + return -ENOSYS; +} + +void kvm_irqchip_commit_routes(KVMState *s) +{ +} + +void kvm_irqchip_add_change_notifier(Notifier *n) +{ +} + +void kvm_irqchip_remove_change_notifier(Notifier *n) +{ +} + +void kvm_irqchip_change_notify(void) +{ +} + +int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter) +{ + return -ENOSYS; +} + +int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, + EventNotifier *rn, int virq) +{ + return -ENOSYS; +} + +int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, + int virq) +{ + return -ENOSYS; +} + +bool kvm_has_free_slot(MachineState *ms) +{ + return false; +} + +void kvm_init_cpu_signals(CPUState *cpu) +{ + abort(); +} + +bool kvm_arm_supports_user_irq(void) +{ + return false; +} + +bool kvm_dirty_ring_enabled(void) +{ + return false; +} +#endif diff --git a/accel/stubs/meson.build b/accel/stubs/meson.build new file mode 100644 index 000000000..12dd1539a --- /dev/null +++ b/accel/stubs/meson.build @@ -0,0 +1,4 @@ +specific_ss.add(when: 'CONFIG_HAX', if_false: files('hax-stub.c')) +specific_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c')) +specific_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c')) +specific_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c')) diff --git a/accel/stubs/tcg-stub.c b/accel/stubs/tcg-stub.c new file mode 100644 index 000000000..d8162673a --- /dev/null +++ b/accel/stubs/tcg-stub.c @@ -0,0 +1,39 @@ +/* + * QEMU TCG accelerator stub + * + * Copyright Red Hat, Inc. 2013 + * + * Author: Paolo Bonzini <pbonzini@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "exec/exec-all.h" + +void tb_flush(CPUState *cpu) +{ +} + +void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) +{ +} + +void *probe_access(CPUArchState *env, target_ulong addr, int size, + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) +{ + /* Handled by hardware accelerator. */ + g_assert_not_reached(); +} + +void QEMU_NORETURN cpu_loop_exit(CPUState *cpu) +{ + g_assert_not_reached(); +} + +void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc) +{ + g_assert_not_reached(); +} diff --git a/accel/stubs/xen-stub.c b/accel/stubs/xen-stub.c new file mode 100644 index 000000000..7054965c4 --- /dev/null +++ b/accel/stubs/xen-stub.c @@ -0,0 +1,16 @@ +/* + * Copyright (C) 2014 Citrix Systems UK Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "sysemu/xen.h" +#include "qapi/qapi-commands-migration.h" + +bool xen_allowed; + +void qmp_xen_set_global_dirty_log(bool enable, Error **errp) +{ +} diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc new file mode 100644 index 000000000..1df1f243e --- /dev/null +++ b/accel/tcg/atomic_common.c.inc @@ -0,0 +1,124 @@ +/* + * Common Atomic Helper Functions + * + * This file should be included before the various instantiations of + * the atomic_template.h helpers. + * + * Copyright (c) 2019 Linaro + * Written by Alex BennĂ©e <alex.bennee@linaro.org> + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +static void atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr, + MemOpIdx oi) +{ + CPUState *cpu = env_cpu(env); + + trace_guest_rmw_before_exec(cpu, addr, oi); +} + +static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr, + MemOpIdx oi) +{ + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW); +} + +#if HAVE_ATOMIC128 +static void atomic_trace_ld_pre(CPUArchState *env, target_ulong addr, + MemOpIdx oi) +{ + trace_guest_ld_before_exec(env_cpu(env), addr, oi); +} + +static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr, + MemOpIdx oi) +{ + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); +} + +static void atomic_trace_st_pre(CPUArchState *env, target_ulong addr, + MemOpIdx oi) +{ + trace_guest_st_before_exec(env_cpu(env), addr, oi); +} + +static void atomic_trace_st_post(CPUArchState *env, target_ulong addr, + MemOpIdx oi) +{ + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} +#endif + +/* + * Atomic helpers callable from TCG. + * These have a common interface and all defer to cpu_atomic_* + * using the host return address from GETPC(). + */ + +#define CMPXCHG_HELPER(OP, TYPE) \ + TYPE HELPER(atomic_##OP)(CPUArchState *env, target_ulong addr, \ + TYPE oldv, TYPE newv, uint32_t oi) \ + { return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); } + +CMPXCHG_HELPER(cmpxchgb, uint32_t) +CMPXCHG_HELPER(cmpxchgw_be, uint32_t) +CMPXCHG_HELPER(cmpxchgw_le, uint32_t) +CMPXCHG_HELPER(cmpxchgl_be, uint32_t) +CMPXCHG_HELPER(cmpxchgl_le, uint32_t) + +#ifdef CONFIG_ATOMIC64 +CMPXCHG_HELPER(cmpxchgq_be, uint64_t) +CMPXCHG_HELPER(cmpxchgq_le, uint64_t) +#endif + +#undef CMPXCHG_HELPER + +#define ATOMIC_HELPER(OP, TYPE) \ + TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, target_ulong addr, \ + TYPE val, uint32_t oi) \ + { return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); } + +#ifdef CONFIG_ATOMIC64 +#define GEN_ATOMIC_HELPERS(OP) \ + ATOMIC_HELPER(glue(OP,b), uint32_t) \ + ATOMIC_HELPER(glue(OP,w_be), uint32_t) \ + ATOMIC_HELPER(glue(OP,w_le), uint32_t) \ + ATOMIC_HELPER(glue(OP,l_be), uint32_t) \ + ATOMIC_HELPER(glue(OP,l_le), uint32_t) \ + ATOMIC_HELPER(glue(OP,q_be), uint64_t) \ + ATOMIC_HELPER(glue(OP,q_le), uint64_t) +#else +#define GEN_ATOMIC_HELPERS(OP) \ + ATOMIC_HELPER(glue(OP,b), uint32_t) \ + ATOMIC_HELPER(glue(OP,w_be), uint32_t) \ + ATOMIC_HELPER(glue(OP,w_le), uint32_t) \ + ATOMIC_HELPER(glue(OP,l_be), uint32_t) \ + ATOMIC_HELPER(glue(OP,l_le), uint32_t) +#endif + +GEN_ATOMIC_HELPERS(fetch_add) +GEN_ATOMIC_HELPERS(fetch_and) +GEN_ATOMIC_HELPERS(fetch_or) +GEN_ATOMIC_HELPERS(fetch_xor) +GEN_ATOMIC_HELPERS(fetch_smin) +GEN_ATOMIC_HELPERS(fetch_umin) +GEN_ATOMIC_HELPERS(fetch_smax) +GEN_ATOMIC_HELPERS(fetch_umax) + +GEN_ATOMIC_HELPERS(add_fetch) +GEN_ATOMIC_HELPERS(and_fetch) +GEN_ATOMIC_HELPERS(or_fetch) +GEN_ATOMIC_HELPERS(xor_fetch) +GEN_ATOMIC_HELPERS(smin_fetch) +GEN_ATOMIC_HELPERS(umin_fetch) +GEN_ATOMIC_HELPERS(smax_fetch) +GEN_ATOMIC_HELPERS(umax_fetch) + +GEN_ATOMIC_HELPERS(xchg) + +#undef ATOMIC_HELPER +#undef GEN_ATOMIC_HELPERS diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h new file mode 100644 index 000000000..2d917b6b1 --- /dev/null +++ b/accel/tcg/atomic_template.h @@ -0,0 +1,352 @@ +/* + * Atomic helper templates + * Included from tcg-runtime.c and cputlb.c. + * + * Copyright (c) 2016 Red Hat, Inc + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/plugin.h" + +#if DATA_SIZE == 16 +# define SUFFIX o +# define DATA_TYPE Int128 +# define BSWAP bswap128 +# define SHIFT 4 +#elif DATA_SIZE == 8 +# define SUFFIX q +# define DATA_TYPE aligned_uint64_t +# define SDATA_TYPE aligned_int64_t +# define BSWAP bswap64 +# define SHIFT 3 +#elif DATA_SIZE == 4 +# define SUFFIX l +# define DATA_TYPE uint32_t +# define SDATA_TYPE int32_t +# define BSWAP bswap32 +# define SHIFT 2 +#elif DATA_SIZE == 2 +# define SUFFIX w +# define DATA_TYPE uint16_t +# define SDATA_TYPE int16_t +# define BSWAP bswap16 +# define SHIFT 1 +#elif DATA_SIZE == 1 +# define SUFFIX b +# define DATA_TYPE uint8_t +# define SDATA_TYPE int8_t +# define BSWAP +# define SHIFT 0 +#else +# error unsupported data size +#endif + +#if DATA_SIZE >= 4 +# define ABI_TYPE DATA_TYPE +#else +# define ABI_TYPE uint32_t +#endif + +/* Define host-endian atomic operations. Note that END is used within + the ATOMIC_NAME macro, and redefined below. */ +#if DATA_SIZE == 1 +# define END +#elif defined(HOST_WORDS_BIGENDIAN) +# define END _be +#else +# define END _le +#endif + +ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, + ABI_TYPE cmpv, ABI_TYPE newv, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_READ | PAGE_WRITE, retaddr); + DATA_TYPE ret; + + atomic_trace_rmw_pre(env, addr, oi); +#if DATA_SIZE == 16 + ret = atomic16_cmpxchg(haddr, cmpv, newv); +#else + ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv); +#endif + ATOMIC_MMU_CLEANUP; + atomic_trace_rmw_post(env, addr, oi); + return ret; +} + +#if DATA_SIZE >= 16 +#if HAVE_ATOMIC128 +ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_READ, retaddr); + DATA_TYPE val; + + atomic_trace_ld_pre(env, addr, oi); + val = atomic16_read(haddr); + ATOMIC_MMU_CLEANUP; + atomic_trace_ld_post(env, addr, oi); + return val; +} + +void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_WRITE, retaddr); + + atomic_trace_st_pre(env, addr, oi); + atomic16_set(haddr, val); + ATOMIC_MMU_CLEANUP; + atomic_trace_st_post(env, addr, oi); +} +#endif +#else +ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_READ | PAGE_WRITE, retaddr); + DATA_TYPE ret; + + atomic_trace_rmw_pre(env, addr, oi); + ret = qatomic_xchg__nocheck(haddr, val); + ATOMIC_MMU_CLEANUP; + atomic_trace_rmw_post(env, addr, oi); + return ret; +} + +#define GEN_ATOMIC_HELPER(X) \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ + ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ +{ \ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ + PAGE_READ | PAGE_WRITE, retaddr); \ + DATA_TYPE ret; \ + atomic_trace_rmw_pre(env, addr, oi); \ + ret = qatomic_##X(haddr, val); \ + ATOMIC_MMU_CLEANUP; \ + atomic_trace_rmw_post(env, addr, oi); \ + return ret; \ +} + +GEN_ATOMIC_HELPER(fetch_add) +GEN_ATOMIC_HELPER(fetch_and) +GEN_ATOMIC_HELPER(fetch_or) +GEN_ATOMIC_HELPER(fetch_xor) +GEN_ATOMIC_HELPER(add_fetch) +GEN_ATOMIC_HELPER(and_fetch) +GEN_ATOMIC_HELPER(or_fetch) +GEN_ATOMIC_HELPER(xor_fetch) + +#undef GEN_ATOMIC_HELPER + +/* + * These helpers are, as a whole, full barriers. Within the helper, + * the leading barrier is explicit and the trailing barrier is within + * cmpxchg primitive. + * + * Trace this load + RMW loop as a single RMW op. This way, regardless + * of CF_PARALLEL's value, we'll trace just a read and a write. + */ +#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ + ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ +{ \ + XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ + PAGE_READ | PAGE_WRITE, retaddr); \ + XDATA_TYPE cmp, old, new, val = xval; \ + atomic_trace_rmw_pre(env, addr, oi); \ + smp_mb(); \ + cmp = qatomic_read__nocheck(haddr); \ + do { \ + old = cmp; new = FN(old, val); \ + cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \ + } while (cmp != old); \ + ATOMIC_MMU_CLEANUP; \ + atomic_trace_rmw_post(env, addr, oi); \ + return RET; \ +} + +GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old) + +GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new) +GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new) +GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new) +GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) + +#undef GEN_ATOMIC_HELPER_FN +#endif /* DATA SIZE >= 16 */ + +#undef END + +#if DATA_SIZE > 1 + +/* Define reverse-host-endian atomic operations. Note that END is used + within the ATOMIC_NAME macro. */ +#ifdef HOST_WORDS_BIGENDIAN +# define END _le +#else +# define END _be +#endif + +ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, + ABI_TYPE cmpv, ABI_TYPE newv, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_READ | PAGE_WRITE, retaddr); + DATA_TYPE ret; + + atomic_trace_rmw_pre(env, addr, oi); +#if DATA_SIZE == 16 + ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv)); +#else + ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv)); +#endif + ATOMIC_MMU_CLEANUP; + atomic_trace_rmw_post(env, addr, oi); + return BSWAP(ret); +} + +#if DATA_SIZE >= 16 +#if HAVE_ATOMIC128 +ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_READ, retaddr); + DATA_TYPE val; + + atomic_trace_ld_pre(env, addr, oi); + val = atomic16_read(haddr); + ATOMIC_MMU_CLEANUP; + atomic_trace_ld_post(env, addr, oi); + return BSWAP(val); +} + +void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_WRITE, retaddr); + + atomic_trace_st_pre(env, addr, oi); + val = BSWAP(val); + atomic16_set(haddr, val); + ATOMIC_MMU_CLEANUP; + atomic_trace_st_post(env, addr, oi); +} +#endif +#else +ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, + PAGE_READ | PAGE_WRITE, retaddr); + ABI_TYPE ret; + + atomic_trace_rmw_pre(env, addr, oi); + ret = qatomic_xchg__nocheck(haddr, BSWAP(val)); + ATOMIC_MMU_CLEANUP; + atomic_trace_rmw_post(env, addr, oi); + return BSWAP(ret); +} + +#define GEN_ATOMIC_HELPER(X) \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ + ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ +{ \ + DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ + PAGE_READ | PAGE_WRITE, retaddr); \ + DATA_TYPE ret; \ + atomic_trace_rmw_pre(env, addr, oi); \ + ret = qatomic_##X(haddr, BSWAP(val)); \ + ATOMIC_MMU_CLEANUP; \ + atomic_trace_rmw_post(env, addr, oi); \ + return BSWAP(ret); \ +} + +GEN_ATOMIC_HELPER(fetch_and) +GEN_ATOMIC_HELPER(fetch_or) +GEN_ATOMIC_HELPER(fetch_xor) +GEN_ATOMIC_HELPER(and_fetch) +GEN_ATOMIC_HELPER(or_fetch) +GEN_ATOMIC_HELPER(xor_fetch) + +#undef GEN_ATOMIC_HELPER + +/* These helpers are, as a whole, full barriers. Within the helper, + * the leading barrier is explicit and the trailing barrier is within + * cmpxchg primitive. + * + * Trace this load + RMW loop as a single RMW op. This way, regardless + * of CF_PARALLEL's value, we'll trace just a read and a write. + */ +#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ + ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ +{ \ + XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ + PAGE_READ | PAGE_WRITE, retaddr); \ + XDATA_TYPE ldo, ldn, old, new, val = xval; \ + atomic_trace_rmw_pre(env, addr, oi); \ + smp_mb(); \ + ldn = qatomic_read__nocheck(haddr); \ + do { \ + ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \ + ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \ + } while (ldo != ldn); \ + ATOMIC_MMU_CLEANUP; \ + atomic_trace_rmw_post(env, addr, oi); \ + return RET; \ +} + +GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old) + +GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new) +GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new) +GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new) +GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) + +/* Note that for addition, we need to use a separate cmpxchg loop instead + of bswaps for the reverse-host-endian helpers. */ +#define ADD(X, Y) (X + Y) +GEN_ATOMIC_HELPER_FN(fetch_add, ADD, DATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new) +#undef ADD + +#undef GEN_ATOMIC_HELPER_FN +#endif /* DATA_SIZE >= 16 */ + +#undef END +#endif /* DATA_SIZE > 1 */ + +#undef BSWAP +#undef ABI_TYPE +#undef DATA_TYPE +#undef SDATA_TYPE +#undef SUFFIX +#undef DATA_SIZE +#undef SHIFT diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c new file mode 100644 index 000000000..be6fe45aa --- /dev/null +++ b/accel/tcg/cpu-exec-common.c @@ -0,0 +1,83 @@ +/* + * emulator main execution loop + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "sysemu/cpus.h" +#include "sysemu/tcg.h" +#include "exec/exec-all.h" + +bool tcg_allowed; + +/* exit the current TB, but without causing any exception to be raised */ +void cpu_loop_exit_noexc(CPUState *cpu) +{ + cpu->exception_index = -1; + cpu_loop_exit(cpu); +} + +#if defined(CONFIG_SOFTMMU) +void cpu_reloading_memory_map(void) +{ + if (qemu_in_vcpu_thread() && current_cpu->running) { + /* The guest can in theory prolong the RCU critical section as long + * as it feels like. The major problem with this is that because it + * can do multiple reconfigurations of the memory map within the + * critical section, we could potentially accumulate an unbounded + * collection of memory data structures awaiting reclamation. + * + * Because the only thing we're currently protecting with RCU is the + * memory data structures, it's sufficient to break the critical section + * in this callback, which we know will get called every time the + * memory map is rearranged. + * + * (If we add anything else in the system that uses RCU to protect + * its data structures, we will need to implement some other mechanism + * to force TCG CPUs to exit the critical section, at which point this + * part of this callback might become unnecessary.) + * + * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which + * only protects cpu->as->dispatch. Since we know our caller is about + * to reload it, it's safe to split the critical section. + */ + rcu_read_unlock(); + rcu_read_lock(); + } +} +#endif + +void cpu_loop_exit(CPUState *cpu) +{ + /* Undo the setting in cpu_tb_exec. */ + cpu->can_do_io = 1; + siglongjmp(cpu->jmp_env, 1); +} + +void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc) +{ + if (pc) { + cpu_restore_state(cpu, pc, true); + } + cpu_loop_exit(cpu); +} + +void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc) +{ + cpu->exception_index = EXCP_ATOMIC; + cpu_loop_exit_restore(cpu, pc); +} diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c new file mode 100644 index 000000000..409ec8c38 --- /dev/null +++ b/accel/tcg/cpu-exec.c @@ -0,0 +1,1093 @@ +/* + * emulator main execution loop + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/qemu-print.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" +#include "qapi/type-helpers.h" +#include "hw/core/tcg-cpu-ops.h" +#include "trace.h" +#include "disas/disas.h" +#include "exec/exec-all.h" +#include "tcg/tcg.h" +#include "qemu/atomic.h" +#include "qemu/compiler.h" +#include "qemu/timer.h" +#include "qemu/rcu.h" +#include "exec/log.h" +#include "qemu/main-loop.h" +#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) +#include "hw/i386/apic.h" +#endif +#include "sysemu/cpus.h" +#include "exec/cpu-all.h" +#include "sysemu/cpu-timers.h" +#include "sysemu/replay.h" +#include "sysemu/tcg.h" +#include "exec/helper-proto.h" +#include "tb-hash.h" +#include "tb-context.h" +#include "internal.h" + +/* -icount align implementation. */ + +typedef struct SyncClocks { + int64_t diff_clk; + int64_t last_cpu_icount; + int64_t realtime_clock; +} SyncClocks; + +#if !defined(CONFIG_USER_ONLY) +/* Allow the guest to have a max 3ms advance. + * The difference between the 2 clocks could therefore + * oscillate around 0. + */ +#define VM_CLOCK_ADVANCE 3000000 +#define THRESHOLD_REDUCE 1.5 +#define MAX_DELAY_PRINT_RATE 2000000000LL +#define MAX_NB_PRINTS 100 + +static int64_t max_delay; +static int64_t max_advance; + +static void align_clocks(SyncClocks *sc, CPUState *cpu) +{ + int64_t cpu_icount; + + if (!icount_align_option) { + return; + } + + cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; + sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount); + sc->last_cpu_icount = cpu_icount; + + if (sc->diff_clk > VM_CLOCK_ADVANCE) { +#ifndef _WIN32 + struct timespec sleep_delay, rem_delay; + sleep_delay.tv_sec = sc->diff_clk / 1000000000LL; + sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL; + if (nanosleep(&sleep_delay, &rem_delay) < 0) { + sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec; + } else { + sc->diff_clk = 0; + } +#else + Sleep(sc->diff_clk / SCALE_MS); + sc->diff_clk = 0; +#endif + } +} + +static void print_delay(const SyncClocks *sc) +{ + static float threshold_delay; + static int64_t last_realtime_clock; + static int nb_prints; + + if (icount_align_option && + sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE && + nb_prints < MAX_NB_PRINTS) { + if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) || + (-sc->diff_clk / (float)1000000000LL < + (threshold_delay - THRESHOLD_REDUCE))) { + threshold_delay = (-sc->diff_clk / 1000000000LL) + 1; + qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n", + threshold_delay - 1, + threshold_delay); + nb_prints++; + last_realtime_clock = sc->realtime_clock; + } + } +} + +static void init_delay_params(SyncClocks *sc, CPUState *cpu) +{ + if (!icount_align_option) { + return; + } + sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); + sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; + sc->last_cpu_icount + = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; + if (sc->diff_clk < max_delay) { + max_delay = sc->diff_clk; + } + if (sc->diff_clk > max_advance) { + max_advance = sc->diff_clk; + } + + /* Print every 2s max if the guest is late. We limit the number + of printed messages to NB_PRINT_MAX(currently 100) */ + print_delay(sc); +} +#else +static void align_clocks(SyncClocks *sc, const CPUState *cpu) +{ +} + +static void init_delay_params(SyncClocks *sc, const CPUState *cpu) +{ +} +#endif /* CONFIG USER ONLY */ + +uint32_t curr_cflags(CPUState *cpu) +{ + uint32_t cflags = cpu->tcg_cflags; + + /* + * Record gdb single-step. We should be exiting the TB by raising + * EXCP_DEBUG, but to simplify other tests, disable chaining too. + * + * For singlestep and -d nochain, suppress goto_tb so that + * we can log -d cpu,exec after every TB. + */ + if (unlikely(cpu->singlestep_enabled)) { + cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1; + } else if (singlestep) { + cflags |= CF_NO_GOTO_TB | 1; + } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { + cflags |= CF_NO_GOTO_TB; + } + + return cflags; +} + +/* Might cause an exception, so have a longjmp destination ready */ +static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, + target_ulong cs_base, + uint32_t flags, uint32_t cflags) +{ + TranslationBlock *tb; + uint32_t hash; + + /* we should never be trying to look up an INVALID tb */ + tcg_debug_assert(!(cflags & CF_INVALID)); + + hash = tb_jmp_cache_hash_func(pc); + tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]); + + if (likely(tb && + tb->pc == pc && + tb->cs_base == cs_base && + tb->flags == flags && + tb->trace_vcpu_dstate == *cpu->trace_dstate && + tb_cflags(tb) == cflags)) { + return tb; + } + tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); + if (tb == NULL) { + return NULL; + } + qatomic_set(&cpu->tb_jmp_cache[hash], tb); + return tb; +} + +static inline void log_cpu_exec(target_ulong pc, CPUState *cpu, + const TranslationBlock *tb) +{ + if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) + && qemu_log_in_addr_range(pc)) { + + qemu_log_mask(CPU_LOG_EXEC, + "Trace %d: %p [" TARGET_FMT_lx + "/" TARGET_FMT_lx "/%08x/%08x] %s\n", + cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc, + tb->flags, tb->cflags, lookup_symbol(pc)); + +#if defined(DEBUG_DISAS) + if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { + FILE *logfile = qemu_log_lock(); + int flags = 0; + + if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) { + flags |= CPU_DUMP_FPU; + } +#if defined(TARGET_I386) + flags |= CPU_DUMP_CCOP; +#endif + log_cpu_state(cpu, flags); + qemu_log_unlock(logfile); + } +#endif /* DEBUG_DISAS */ + } +} + +static bool check_for_breakpoints(CPUState *cpu, target_ulong pc, + uint32_t *cflags) +{ + CPUBreakpoint *bp; + bool match_page = false; + + if (likely(QTAILQ_EMPTY(&cpu->breakpoints))) { + return false; + } + + /* + * Singlestep overrides breakpoints. + * This requirement is visible in the record-replay tests, where + * we would fail to make forward progress in reverse-continue. + * + * TODO: gdb singlestep should only override gdb breakpoints, + * so that one could (gdb) singlestep into the guest kernel's + * architectural breakpoint handler. + */ + if (cpu->singlestep_enabled) { + return false; + } + + QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { + /* + * If we have an exact pc match, trigger the breakpoint. + * Otherwise, note matches within the page. + */ + if (pc == bp->pc) { + bool match_bp = false; + + if (bp->flags & BP_GDB) { + match_bp = true; + } else if (bp->flags & BP_CPU) { +#ifdef CONFIG_USER_ONLY + g_assert_not_reached(); +#else + CPUClass *cc = CPU_GET_CLASS(cpu); + assert(cc->tcg_ops->debug_check_breakpoint); + match_bp = cc->tcg_ops->debug_check_breakpoint(cpu); +#endif + } + + if (match_bp) { + cpu->exception_index = EXCP_DEBUG; + return true; + } + } else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) { + match_page = true; + } + } + + /* + * Within the same page as a breakpoint, single-step, + * returning to helper_lookup_tb_ptr after each insn looking + * for the actual breakpoint. + * + * TODO: Perhaps better to record all of the TBs associated + * with a given virtual page that contains a breakpoint, and + * then invalidate them when a new overlapping breakpoint is + * set on the page. Non-overlapping TBs would not be + * invalidated, nor would any TB need to be invalidated as + * breakpoints are removed. + */ + if (match_page) { + *cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1; + } + return false; +} + +/** + * helper_lookup_tb_ptr: quick check for next tb + * @env: current cpu state + * + * Look for an existing TB matching the current cpu state. + * If found, return the code pointer. If not found, return + * the tcg epilogue so that we return into cpu_tb_exec. + */ +const void *HELPER(lookup_tb_ptr)(CPUArchState *env) +{ + CPUState *cpu = env_cpu(env); + TranslationBlock *tb; + target_ulong cs_base, pc; + uint32_t flags, cflags; + + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); + + cflags = curr_cflags(cpu); + if (check_for_breakpoints(cpu, pc, &cflags)) { + cpu_loop_exit(cpu); + } + + tb = tb_lookup(cpu, pc, cs_base, flags, cflags); + if (tb == NULL) { + return tcg_code_gen_epilogue; + } + + log_cpu_exec(pc, cpu, tb); + + return tb->tc.ptr; +} + +/* Execute a TB, and fix up the CPU state afterwards if necessary */ +/* + * Disable CFI checks. + * TCG creates binary blobs at runtime, with the transformed code. + * A TB is a blob of binary code, created at runtime and called with an + * indirect function call. Since such function did not exist at compile time, + * the CFI runtime has no way to verify its signature and would fail. + * TCG is not considered a security-sensitive part of QEMU so this does not + * affect the impact of CFI in environment with high security requirements + */ +static inline TranslationBlock * QEMU_DISABLE_CFI +cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) +{ + CPUArchState *env = cpu->env_ptr; + uintptr_t ret; + TranslationBlock *last_tb; + const void *tb_ptr = itb->tc.ptr; + + log_cpu_exec(itb->pc, cpu, itb); + + qemu_thread_jit_execute(); + ret = tcg_qemu_tb_exec(env, tb_ptr); + cpu->can_do_io = 1; + /* + * TODO: Delay swapping back to the read-write region of the TB + * until we actually need to modify the TB. The read-only copy, + * coming from the rx region, shares the same host TLB entry as + * the code that executed the exit_tb opcode that arrived here. + * If we insist on touching both the RX and the RW pages, we + * double the host TLB pressure. + */ + last_tb = tcg_splitwx_to_rw((void *)(ret & ~TB_EXIT_MASK)); + *tb_exit = ret & TB_EXIT_MASK; + + trace_exec_tb_exit(last_tb, *tb_exit); + + if (*tb_exit > TB_EXIT_IDX1) { + /* We didn't start executing this TB (eg because the instruction + * counter hit zero); we must restore the guest PC to the address + * of the start of the TB. + */ + CPUClass *cc = CPU_GET_CLASS(cpu); + qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc, + "Stopped execution of TB chain before %p [" + TARGET_FMT_lx "] %s\n", + last_tb->tc.ptr, last_tb->pc, + lookup_symbol(last_tb->pc)); + if (cc->tcg_ops->synchronize_from_tb) { + cc->tcg_ops->synchronize_from_tb(cpu, last_tb); + } else { + assert(cc->set_pc); + cc->set_pc(cpu, last_tb->pc); + } + } + + /* + * If gdb single-step, and we haven't raised another exception, + * raise a debug exception. Single-step with another exception + * is handled in cpu_handle_exception. + */ + if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) { + cpu->exception_index = EXCP_DEBUG; + cpu_loop_exit(cpu); + } + + return last_tb; +} + + +static void cpu_exec_enter(CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (cc->tcg_ops->cpu_exec_enter) { + cc->tcg_ops->cpu_exec_enter(cpu); + } +} + +static void cpu_exec_exit(CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (cc->tcg_ops->cpu_exec_exit) { + cc->tcg_ops->cpu_exec_exit(cpu); + } +} + +void cpu_exec_step_atomic(CPUState *cpu) +{ + CPUArchState *env = (CPUArchState *)cpu->env_ptr; + TranslationBlock *tb; + target_ulong cs_base, pc; + uint32_t flags, cflags; + int tb_exit; + + if (sigsetjmp(cpu->jmp_env, 0) == 0) { + start_exclusive(); + g_assert(cpu == current_cpu); + g_assert(!cpu->running); + cpu->running = true; + + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); + + cflags = curr_cflags(cpu); + /* Execute in a serial context. */ + cflags &= ~CF_PARALLEL; + /* After 1 insn, return and release the exclusive lock. */ + cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1; + /* + * No need to check_for_breakpoints here. + * We only arrive in cpu_exec_step_atomic after beginning execution + * of an insn that includes an atomic operation we can't handle. + * Any breakpoint for this insn will have been recognized earlier. + */ + + tb = tb_lookup(cpu, pc, cs_base, flags, cflags); + if (tb == NULL) { + mmap_lock(); + tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); + mmap_unlock(); + } + + cpu_exec_enter(cpu); + /* execute the generated code */ + trace_exec_tb(tb, pc); + cpu_tb_exec(cpu, tb, &tb_exit); + cpu_exec_exit(cpu); + } else { + /* + * The mmap_lock is dropped by tb_gen_code if it runs out of + * memory. + */ +#ifndef CONFIG_SOFTMMU + clear_helper_retaddr(); + tcg_debug_assert(!have_mmap_lock()); +#endif + if (qemu_mutex_iothread_locked()) { + qemu_mutex_unlock_iothread(); + } + assert_no_pages_locked(); + qemu_plugin_disable_mem_helpers(cpu); + } + + /* + * As we start the exclusive region before codegen we must still + * be in the region if we longjump out of either the codegen or + * the execution. + */ + g_assert(cpu_in_exclusive_context(cpu)); + cpu->running = false; + end_exclusive(); +} + +struct tb_desc { + target_ulong pc; + target_ulong cs_base; + CPUArchState *env; + tb_page_addr_t phys_page1; + uint32_t flags; + uint32_t cflags; + uint32_t trace_vcpu_dstate; +}; + +static bool tb_lookup_cmp(const void *p, const void *d) +{ + const TranslationBlock *tb = p; + const struct tb_desc *desc = d; + + if (tb->pc == desc->pc && + tb->page_addr[0] == desc->phys_page1 && + tb->cs_base == desc->cs_base && + tb->flags == desc->flags && + tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && + tb_cflags(tb) == desc->cflags) { + /* check next page if needed */ + if (tb->page_addr[1] == -1) { + return true; + } else { + tb_page_addr_t phys_page2; + target_ulong virt_page2; + + virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + phys_page2 = get_page_addr_code(desc->env, virt_page2); + if (tb->page_addr[1] == phys_page2) { + return true; + } + } + } + return false; +} + +TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, + target_ulong cs_base, uint32_t flags, + uint32_t cflags) +{ + tb_page_addr_t phys_pc; + struct tb_desc desc; + uint32_t h; + + desc.env = (CPUArchState *)cpu->env_ptr; + desc.cs_base = cs_base; + desc.flags = flags; + desc.cflags = cflags; + desc.trace_vcpu_dstate = *cpu->trace_dstate; + desc.pc = pc; + phys_pc = get_page_addr_code(desc.env, pc); + if (phys_pc == -1) { + return NULL; + } + desc.phys_page1 = phys_pc & TARGET_PAGE_MASK; + h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate); + return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); +} + +void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr) +{ + if (TCG_TARGET_HAS_direct_jump) { + uintptr_t offset = tb->jmp_target_arg[n]; + uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr; + uintptr_t jmp_rx = tc_ptr + offset; + uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff; + tb_target_set_jmp_target(tc_ptr, jmp_rx, jmp_rw, addr); + } else { + tb->jmp_target_arg[n] = addr; + } +} + +static inline void tb_add_jump(TranslationBlock *tb, int n, + TranslationBlock *tb_next) +{ + uintptr_t old; + + qemu_thread_jit_write(); + assert(n < ARRAY_SIZE(tb->jmp_list_next)); + qemu_spin_lock(&tb_next->jmp_lock); + + /* make sure the destination TB is valid */ + if (tb_next->cflags & CF_INVALID) { + goto out_unlock_next; + } + /* Atomically claim the jump destination slot only if it was NULL */ + old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, + (uintptr_t)tb_next); + if (old) { + goto out_unlock_next; + } + + /* patch the native jump address */ + tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr); + + /* add in TB jmp list */ + tb->jmp_list_next[n] = tb_next->jmp_list_head; + tb_next->jmp_list_head = (uintptr_t)tb | n; + + qemu_spin_unlock(&tb_next->jmp_lock); + + qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, + "Linking TBs %p [" TARGET_FMT_lx + "] index %d -> %p [" TARGET_FMT_lx "]\n", + tb->tc.ptr, tb->pc, n, + tb_next->tc.ptr, tb_next->pc); + return; + + out_unlock_next: + qemu_spin_unlock(&tb_next->jmp_lock); + return; +} + +static inline bool cpu_handle_halt(CPUState *cpu) +{ +#ifndef CONFIG_USER_ONLY + if (cpu->halted) { +#if defined(TARGET_I386) + if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { + X86CPU *x86_cpu = X86_CPU(cpu); + qemu_mutex_lock_iothread(); + apic_poll_irq(x86_cpu->apic_state); + cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); + qemu_mutex_unlock_iothread(); + } +#endif /* TARGET_I386 */ + if (!cpu_has_work(cpu)) { + return true; + } + + cpu->halted = 0; + } +#endif /* !CONFIG_USER_ONLY */ + + return false; +} + +static inline void cpu_handle_debug_exception(CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + CPUWatchpoint *wp; + + if (!cpu->watchpoint_hit) { + QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { + wp->flags &= ~BP_WATCHPOINT_HIT; + } + } + + if (cc->tcg_ops->debug_excp_handler) { + cc->tcg_ops->debug_excp_handler(cpu); + } +} + +static inline bool cpu_handle_exception(CPUState *cpu, int *ret) +{ + if (cpu->exception_index < 0) { +#ifndef CONFIG_USER_ONLY + if (replay_has_exception() + && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) { + /* Execute just one insn to trigger exception pending in the log */ + cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) | 1; + } +#endif + return false; + } + if (cpu->exception_index >= EXCP_INTERRUPT) { + /* exit request from the cpu execution loop */ + *ret = cpu->exception_index; + if (*ret == EXCP_DEBUG) { + cpu_handle_debug_exception(cpu); + } + cpu->exception_index = -1; + return true; + } else { +#if defined(CONFIG_USER_ONLY) + /* if user mode only, we simulate a fake exception + which will be handled outside the cpu execution + loop */ +#if defined(TARGET_I386) + CPUClass *cc = CPU_GET_CLASS(cpu); + cc->tcg_ops->fake_user_interrupt(cpu); +#endif /* TARGET_I386 */ + *ret = cpu->exception_index; + cpu->exception_index = -1; + return true; +#else + if (replay_exception()) { + CPUClass *cc = CPU_GET_CLASS(cpu); + qemu_mutex_lock_iothread(); + cc->tcg_ops->do_interrupt(cpu); + qemu_mutex_unlock_iothread(); + cpu->exception_index = -1; + + if (unlikely(cpu->singlestep_enabled)) { + /* + * After processing the exception, ensure an EXCP_DEBUG is + * raised when single-stepping so that GDB doesn't miss the + * next instruction. + */ + *ret = EXCP_DEBUG; + cpu_handle_debug_exception(cpu); + return true; + } + } else if (!replay_has_interrupt()) { + /* give a chance to iothread in replay mode */ + *ret = EXCP_INTERRUPT; + return true; + } +#endif + } + + return false; +} + +#ifndef CONFIG_USER_ONLY +/* + * CPU_INTERRUPT_POLL is a virtual event which gets converted into a + * "real" interrupt event later. It does not need to be recorded for + * replay purposes. + */ +static inline bool need_replay_interrupt(int interrupt_request) +{ +#if defined(TARGET_I386) + return !(interrupt_request & CPU_INTERRUPT_POLL); +#else + return true; +#endif +} +#endif /* !CONFIG_USER_ONLY */ + +static inline bool cpu_handle_interrupt(CPUState *cpu, + TranslationBlock **last_tb) +{ + /* + * If we have requested custom cflags with CF_NOIRQ we should + * skip checking here. Any pending interrupts will get picked up + * by the next TB we execute under normal cflags. + */ + if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) { + return false; + } + + /* Clear the interrupt flag now since we're processing + * cpu->interrupt_request and cpu->exit_request. + * Ensure zeroing happens before reading cpu->exit_request or + * cpu->interrupt_request (see also smp_wmb in cpu_exit()) + */ + qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0); + + if (unlikely(qatomic_read(&cpu->interrupt_request))) { + int interrupt_request; + qemu_mutex_lock_iothread(); + interrupt_request = cpu->interrupt_request; + if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { + /* Mask out external interrupts for this step. */ + interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; + } + if (interrupt_request & CPU_INTERRUPT_DEBUG) { + cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; + cpu->exception_index = EXCP_DEBUG; + qemu_mutex_unlock_iothread(); + return true; + } +#if !defined(CONFIG_USER_ONLY) + if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { + /* Do nothing */ + } else if (interrupt_request & CPU_INTERRUPT_HALT) { + replay_interrupt(); + cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; + cpu->halted = 1; + cpu->exception_index = EXCP_HLT; + qemu_mutex_unlock_iothread(); + return true; + } +#if defined(TARGET_I386) + else if (interrupt_request & CPU_INTERRUPT_INIT) { + X86CPU *x86_cpu = X86_CPU(cpu); + CPUArchState *env = &x86_cpu->env; + replay_interrupt(); + cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); + do_cpu_init(x86_cpu); + cpu->exception_index = EXCP_HALTED; + qemu_mutex_unlock_iothread(); + return true; + } +#else + else if (interrupt_request & CPU_INTERRUPT_RESET) { + replay_interrupt(); + cpu_reset(cpu); + qemu_mutex_unlock_iothread(); + return true; + } +#endif /* !TARGET_I386 */ + /* The target hook has 3 exit conditions: + False when the interrupt isn't processed, + True when it is, and we should restart on a new TB, + and via longjmp via cpu_loop_exit. */ + else { + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (cc->tcg_ops->cpu_exec_interrupt && + cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) { + if (need_replay_interrupt(interrupt_request)) { + replay_interrupt(); + } + /* + * After processing the interrupt, ensure an EXCP_DEBUG is + * raised when single-stepping so that GDB doesn't miss the + * next instruction. + */ + cpu->exception_index = + (cpu->singlestep_enabled ? EXCP_DEBUG : -1); + *last_tb = NULL; + } + /* The target hook may have updated the 'cpu->interrupt_request'; + * reload the 'interrupt_request' value */ + interrupt_request = cpu->interrupt_request; + } +#endif /* !CONFIG_USER_ONLY */ + if (interrupt_request & CPU_INTERRUPT_EXITTB) { + cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; + /* ensure that no TB jump will be modified as + the program flow was changed */ + *last_tb = NULL; + } + + /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ + qemu_mutex_unlock_iothread(); + } + + /* Finally, check if we need to exit to the main loop. */ + if (unlikely(qatomic_read(&cpu->exit_request)) + || (icount_enabled() + && (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT) + && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) { + qatomic_set(&cpu->exit_request, 0); + if (cpu->exception_index == -1) { + cpu->exception_index = EXCP_INTERRUPT; + } + return true; + } + + return false; +} + +static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, + TranslationBlock **last_tb, int *tb_exit) +{ + int32_t insns_left; + + trace_exec_tb(tb, tb->pc); + tb = cpu_tb_exec(cpu, tb, tb_exit); + if (*tb_exit != TB_EXIT_REQUESTED) { + *last_tb = tb; + return; + } + + *last_tb = NULL; + insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32); + if (insns_left < 0) { + /* Something asked us to stop executing chained TBs; just + * continue round the main loop. Whatever requested the exit + * will also have set something else (eg exit_request or + * interrupt_request) which will be handled by + * cpu_handle_interrupt. cpu_handle_interrupt will also + * clear cpu->icount_decr.u16.high. + */ + return; + } + + /* Instruction counter expired. */ + assert(icount_enabled()); +#ifndef CONFIG_USER_ONLY + /* Ensure global icount has gone forward */ + icount_update(cpu); + /* Refill decrementer and continue execution. */ + insns_left = MIN(0xffff, cpu->icount_budget); + cpu_neg(cpu)->icount_decr.u16.low = insns_left; + cpu->icount_extra = cpu->icount_budget - insns_left; + + /* + * If the next tb has more instructions than we have left to + * execute we need to ensure we find/generate a TB with exactly + * insns_left instructions in it. + */ + if (insns_left > 0 && insns_left < tb->icount) { + assert(insns_left <= CF_COUNT_MASK); + assert(cpu->icount_extra == 0); + cpu->cflags_next_tb = (tb->cflags & ~CF_COUNT_MASK) | insns_left; + } +#endif +} + +/* main execution loop */ + +int cpu_exec(CPUState *cpu) +{ + int ret; + SyncClocks sc = { 0 }; + + /* replay_interrupt may need current_cpu */ + current_cpu = cpu; + + if (cpu_handle_halt(cpu)) { + return EXCP_HALTED; + } + + rcu_read_lock(); + + cpu_exec_enter(cpu); + + /* Calculate difference between guest clock and host clock. + * This delay includes the delay of the last cycle, so + * what we have to do is sleep until it is 0. As for the + * advance/delay we gain here, we try to fix it next time. + */ + init_delay_params(&sc, cpu); + + /* prepare setjmp context for exception handling */ + if (sigsetjmp(cpu->jmp_env, 0) != 0) { +#if defined(__clang__) + /* + * Some compilers wrongly smash all local variables after + * siglongjmp (the spec requires that only non-volatile locals + * which are changed between the sigsetjmp and siglongjmp are + * permitted to be trashed). There were bug reports for gcc + * 4.5.0 and clang. The bug is fixed in all versions of gcc + * that we support, but is still unfixed in clang: + * https://bugs.llvm.org/show_bug.cgi?id=21183 + * + * Reload an essential local variable here for those compilers. + * Newer versions of gcc would complain about this code (-Wclobbered), + * so we only perform the workaround for clang. + */ + cpu = current_cpu; +#else + /* Non-buggy compilers preserve this; assert the correct value. */ + g_assert(cpu == current_cpu); +#endif + +#ifndef CONFIG_SOFTMMU + clear_helper_retaddr(); + tcg_debug_assert(!have_mmap_lock()); +#endif + if (qemu_mutex_iothread_locked()) { + qemu_mutex_unlock_iothread(); + } + qemu_plugin_disable_mem_helpers(cpu); + + assert_no_pages_locked(); + } + + /* if an exception is pending, we execute it here */ + while (!cpu_handle_exception(cpu, &ret)) { + TranslationBlock *last_tb = NULL; + int tb_exit = 0; + + while (!cpu_handle_interrupt(cpu, &last_tb)) { + TranslationBlock *tb; + target_ulong cs_base, pc; + uint32_t flags, cflags; + + cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags); + + /* + * When requested, use an exact setting for cflags for the next + * execution. This is used for icount, precise smc, and stop- + * after-access watchpoints. Since this request should never + * have CF_INVALID set, -1 is a convenient invalid value that + * does not require tcg headers for cpu_common_reset. + */ + cflags = cpu->cflags_next_tb; + if (cflags == -1) { + cflags = curr_cflags(cpu); + } else { + cpu->cflags_next_tb = -1; + } + + if (check_for_breakpoints(cpu, pc, &cflags)) { + break; + } + + tb = tb_lookup(cpu, pc, cs_base, flags, cflags); + if (tb == NULL) { + mmap_lock(); + tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); + mmap_unlock(); + /* + * We add the TB in the virtual pc hash table + * for the fast lookup + */ + qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); + } + +#ifndef CONFIG_USER_ONLY + /* + * We don't take care of direct jumps when address mapping + * changes in system emulation. So it's not safe to make a + * direct jump to a TB spanning two pages because the mapping + * for the second page can change. + */ + if (tb->page_addr[1] != -1) { + last_tb = NULL; + } +#endif + /* See if we can patch the calling TB. */ + if (last_tb) { + tb_add_jump(last_tb, tb_exit, tb); + } + + cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit); + + /* Try to align the host and virtual clocks + if the guest is in advance */ + align_clocks(&sc, cpu); + } + } + + cpu_exec_exit(cpu); + rcu_read_unlock(); + + return ret; +} + +void tcg_exec_realizefn(CPUState *cpu, Error **errp) +{ + static bool tcg_target_initialized; + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (!tcg_target_initialized) { + cc->tcg_ops->initialize(); + tcg_target_initialized = true; + } + tlb_init(cpu); + qemu_plugin_vcpu_init_hook(cpu); + +#ifndef CONFIG_USER_ONLY + tcg_iommu_init_notifier_list(cpu); +#endif /* !CONFIG_USER_ONLY */ +} + +/* undo the initializations in reverse order */ +void tcg_exec_unrealizefn(CPUState *cpu) +{ +#ifndef CONFIG_USER_ONLY + tcg_iommu_free_notifier_list(cpu); +#endif /* !CONFIG_USER_ONLY */ + + qemu_plugin_vcpu_exit_hook(cpu); + tlb_destroy(cpu); +} + +#ifndef CONFIG_USER_ONLY + +void dump_drift_info(GString *buf) +{ + if (!icount_enabled()) { + return; + } + + g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n", + (cpu_get_clock() - icount_get()) / SCALE_MS); + if (icount_align_option) { + g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n", + -max_delay / SCALE_MS); + g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n", + max_advance / SCALE_MS); + } else { + g_string_append_printf(buf, "Max guest delay NA\n"); + g_string_append_printf(buf, "Max guest advance NA\n"); + } +} + +HumanReadableText *qmp_x_query_jit(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + + if (!tcg_enabled()) { + error_setg(errp, "JIT information is only available with accel=tcg"); + return NULL; + } + + dump_exec_info(buf); + dump_drift_info(buf); + + return human_readable_text_from_str(buf); +} + +HumanReadableText *qmp_x_query_opcount(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + + if (!tcg_enabled()) { + error_setg(errp, "Opcode count information is only available with accel=tcg"); + return NULL; + } + + dump_opcount_info(buf); + + return human_readable_text_from_str(buf); +} + +#endif /* !CONFIG_USER_ONLY */ diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c new file mode 100644 index 000000000..b69a95344 --- /dev/null +++ b/accel/tcg/cputlb.c @@ -0,0 +1,2619 @@ +/* + * Common CPU TLB handling + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "hw/core/tcg-cpu-ops.h" +#include "exec/exec-all.h" +#include "exec/memory.h" +#include "exec/cpu_ldst.h" +#include "exec/cputlb.h" +#include "exec/memory-internal.h" +#include "exec/ram_addr.h" +#include "tcg/tcg.h" +#include "qemu/error-report.h" +#include "exec/log.h" +#include "exec/helper-proto.h" +#include "qemu/atomic.h" +#include "qemu/atomic128.h" +#include "exec/translate-all.h" +#include "trace/trace-root.h" +#include "tb-hash.h" +#include "internal.h" +#ifdef CONFIG_PLUGIN +#include "qemu/plugin-memory.h" +#endif +#include "tcg/tcg-ldst.h" + +/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ +/* #define DEBUG_TLB */ +/* #define DEBUG_TLB_LOG */ + +#ifdef DEBUG_TLB +# define DEBUG_TLB_GATE 1 +# ifdef DEBUG_TLB_LOG +# define DEBUG_TLB_LOG_GATE 1 +# else +# define DEBUG_TLB_LOG_GATE 0 +# endif +#else +# define DEBUG_TLB_GATE 0 +# define DEBUG_TLB_LOG_GATE 0 +#endif + +#define tlb_debug(fmt, ...) do { \ + if (DEBUG_TLB_LOG_GATE) { \ + qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ + ## __VA_ARGS__); \ + } else if (DEBUG_TLB_GATE) { \ + fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ + } \ +} while (0) + +#define assert_cpu_is_self(cpu) do { \ + if (DEBUG_TLB_GATE) { \ + g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ + } \ + } while (0) + +/* run_on_cpu_data.target_ptr should always be big enough for a + * target_ulong even on 32 bit builds */ +QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); + +/* We currently can't handle more than 16 bits in the MMUIDX bitmask. + */ +QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); +#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) + +static inline size_t tlb_n_entries(CPUTLBDescFast *fast) +{ + return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; +} + +static inline size_t sizeof_tlb(CPUTLBDescFast *fast) +{ + return fast->mask + (1 << CPU_TLB_ENTRY_BITS); +} + +static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, + size_t max_entries) +{ + desc->window_begin_ns = ns; + desc->window_max_entries = max_entries; +} + +static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) +{ + unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr); + + for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { + qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); + } +} + +static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) +{ + /* Discard jump cache entries for any tb which might potentially + overlap the flushed page. */ + tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); + tb_jmp_cache_clear_page(cpu, addr); +} + +/** + * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary + * @desc: The CPUTLBDesc portion of the TLB + * @fast: The CPUTLBDescFast portion of the same TLB + * + * Called with tlb_lock_held. + * + * We have two main constraints when resizing a TLB: (1) we only resize it + * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing + * the array or unnecessarily flushing it), which means we do not control how + * frequently the resizing can occur; (2) we don't have access to the guest's + * future scheduling decisions, and therefore have to decide the magnitude of + * the resize based on past observations. + * + * In general, a memory-hungry process can benefit greatly from an appropriately + * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that + * we just have to make the TLB as large as possible; while an oversized TLB + * results in minimal TLB miss rates, it also takes longer to be flushed + * (flushes can be _very_ frequent), and the reduced locality can also hurt + * performance. + * + * To achieve near-optimal performance for all kinds of workloads, we: + * + * 1. Aggressively increase the size of the TLB when the use rate of the + * TLB being flushed is high, since it is likely that in the near future this + * memory-hungry process will execute again, and its memory hungriness will + * probably be similar. + * + * 2. Slowly reduce the size of the TLB as the use rate declines over a + * reasonably large time window. The rationale is that if in such a time window + * we have not observed a high TLB use rate, it is likely that we won't observe + * it in the near future. In that case, once a time window expires we downsize + * the TLB to match the maximum use rate observed in the window. + * + * 3. Try to keep the maximum use rate in a time window in the 30-70% range, + * since in that range performance is likely near-optimal. Recall that the TLB + * is direct mapped, so we want the use rate to be low (or at least not too + * high), since otherwise we are likely to have a significant amount of + * conflict misses. + */ +static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, + int64_t now) +{ + size_t old_size = tlb_n_entries(fast); + size_t rate; + size_t new_size = old_size; + int64_t window_len_ms = 100; + int64_t window_len_ns = window_len_ms * 1000 * 1000; + bool window_expired = now > desc->window_begin_ns + window_len_ns; + + if (desc->n_used_entries > desc->window_max_entries) { + desc->window_max_entries = desc->n_used_entries; + } + rate = desc->window_max_entries * 100 / old_size; + + if (rate > 70) { + new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); + } else if (rate < 30 && window_expired) { + size_t ceil = pow2ceil(desc->window_max_entries); + size_t expected_rate = desc->window_max_entries * 100 / ceil; + + /* + * Avoid undersizing when the max number of entries seen is just below + * a pow2. For instance, if max_entries == 1025, the expected use rate + * would be 1025/2048==50%. However, if max_entries == 1023, we'd get + * 1023/1024==99.9% use rate, so we'd likely end up doubling the size + * later. Thus, make sure that the expected use rate remains below 70%. + * (and since we double the size, that means the lowest rate we'd + * expect to get is 35%, which is still in the 30-70% range where + * we consider that the size is appropriate.) + */ + if (expected_rate > 70) { + ceil *= 2; + } + new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); + } + + if (new_size == old_size) { + if (window_expired) { + tlb_window_reset(desc, now, desc->n_used_entries); + } + return; + } + + g_free(fast->table); + g_free(desc->iotlb); + + tlb_window_reset(desc, now, 0); + /* desc->n_used_entries is cleared by the caller */ + fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; + fast->table = g_try_new(CPUTLBEntry, new_size); + desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); + + /* + * If the allocations fail, try smaller sizes. We just freed some + * memory, so going back to half of new_size has a good chance of working. + * Increased memory pressure elsewhere in the system might cause the + * allocations to fail though, so we progressively reduce the allocation + * size, aborting if we cannot even allocate the smallest TLB we support. + */ + while (fast->table == NULL || desc->iotlb == NULL) { + if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { + error_report("%s: %s", __func__, strerror(errno)); + abort(); + } + new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); + fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; + + g_free(fast->table); + g_free(desc->iotlb); + fast->table = g_try_new(CPUTLBEntry, new_size); + desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); + } +} + +static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) +{ + desc->n_used_entries = 0; + desc->large_page_addr = -1; + desc->large_page_mask = -1; + desc->vindex = 0; + memset(fast->table, -1, sizeof_tlb(fast)); + memset(desc->vtable, -1, sizeof(desc->vtable)); +} + +static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, + int64_t now) +{ + CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; + CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; + + tlb_mmu_resize_locked(desc, fast, now); + tlb_mmu_flush_locked(desc, fast); +} + +static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) +{ + size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; + + tlb_window_reset(desc, now, 0); + desc->n_used_entries = 0; + fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; + fast->table = g_new(CPUTLBEntry, n_entries); + desc->iotlb = g_new(CPUIOTLBEntry, n_entries); + tlb_mmu_flush_locked(desc, fast); +} + +static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) +{ + env_tlb(env)->d[mmu_idx].n_used_entries++; +} + +static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) +{ + env_tlb(env)->d[mmu_idx].n_used_entries--; +} + +void tlb_init(CPUState *cpu) +{ + CPUArchState *env = cpu->env_ptr; + int64_t now = get_clock_realtime(); + int i; + + qemu_spin_init(&env_tlb(env)->c.lock); + + /* All tlbs are initialized flushed. */ + env_tlb(env)->c.dirty = 0; + + for (i = 0; i < NB_MMU_MODES; i++) { + tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); + } +} + +void tlb_destroy(CPUState *cpu) +{ + CPUArchState *env = cpu->env_ptr; + int i; + + qemu_spin_destroy(&env_tlb(env)->c.lock); + for (i = 0; i < NB_MMU_MODES; i++) { + CPUTLBDesc *desc = &env_tlb(env)->d[i]; + CPUTLBDescFast *fast = &env_tlb(env)->f[i]; + + g_free(fast->table); + g_free(desc->iotlb); + } +} + +/* flush_all_helper: run fn across all cpus + * + * If the wait flag is set then the src cpu's helper will be queued as + * "safe" work and the loop exited creating a synchronisation point + * where all queued work will be finished before execution starts + * again. + */ +static void flush_all_helper(CPUState *src, run_on_cpu_func fn, + run_on_cpu_data d) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + if (cpu != src) { + async_run_on_cpu(cpu, fn, d); + } + } +} + +void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) +{ + CPUState *cpu; + size_t full = 0, part = 0, elide = 0; + + CPU_FOREACH(cpu) { + CPUArchState *env = cpu->env_ptr; + + full += qatomic_read(&env_tlb(env)->c.full_flush_count); + part += qatomic_read(&env_tlb(env)->c.part_flush_count); + elide += qatomic_read(&env_tlb(env)->c.elide_flush_count); + } + *pfull = full; + *ppart = part; + *pelide = elide; +} + +static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) +{ + CPUArchState *env = cpu->env_ptr; + uint16_t asked = data.host_int; + uint16_t all_dirty, work, to_clean; + int64_t now = get_clock_realtime(); + + assert_cpu_is_self(cpu); + + tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); + + qemu_spin_lock(&env_tlb(env)->c.lock); + + all_dirty = env_tlb(env)->c.dirty; + to_clean = asked & all_dirty; + all_dirty &= ~to_clean; + env_tlb(env)->c.dirty = all_dirty; + + for (work = to_clean; work != 0; work &= work - 1) { + int mmu_idx = ctz32(work); + tlb_flush_one_mmuidx_locked(env, mmu_idx, now); + } + + qemu_spin_unlock(&env_tlb(env)->c.lock); + + cpu_tb_jmp_cache_clear(cpu); + + if (to_clean == ALL_MMUIDX_BITS) { + qatomic_set(&env_tlb(env)->c.full_flush_count, + env_tlb(env)->c.full_flush_count + 1); + } else { + qatomic_set(&env_tlb(env)->c.part_flush_count, + env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); + if (to_clean != asked) { + qatomic_set(&env_tlb(env)->c.elide_flush_count, + env_tlb(env)->c.elide_flush_count + + ctpop16(asked & ~to_clean)); + } + } +} + +void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) +{ + tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); + + if (cpu->created && !qemu_cpu_is_self(cpu)) { + async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, + RUN_ON_CPU_HOST_INT(idxmap)); + } else { + tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); + } +} + +void tlb_flush(CPUState *cpu) +{ + tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); +} + +void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) +{ + const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; + + tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); + + flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); + fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); +} + +void tlb_flush_all_cpus(CPUState *src_cpu) +{ + tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); +} + +void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) +{ + const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; + + tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); + + flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); + async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); +} + +void tlb_flush_all_cpus_synced(CPUState *src_cpu) +{ + tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); +} + +static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry, + target_ulong page, target_ulong mask) +{ + page &= mask; + mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK; + + return (page == (tlb_entry->addr_read & mask) || + page == (tlb_addr_write(tlb_entry) & mask) || + page == (tlb_entry->addr_code & mask)); +} + +static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, + target_ulong page) +{ + return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); +} + +/** + * tlb_entry_is_empty - return true if the entry is not in use + * @te: pointer to CPUTLBEntry + */ +static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) +{ + return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; +} + +/* Called with tlb_c.lock held */ +static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry, + target_ulong page, + target_ulong mask) +{ + if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) { + memset(tlb_entry, -1, sizeof(*tlb_entry)); + return true; + } + return false; +} + +static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, + target_ulong page) +{ + return tlb_flush_entry_mask_locked(tlb_entry, page, -1); +} + +/* Called with tlb_c.lock held */ +static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx, + target_ulong page, + target_ulong mask) +{ + CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; + int k; + + assert_cpu_is_self(env_cpu(env)); + for (k = 0; k < CPU_VTLB_SIZE; k++) { + if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) { + tlb_n_used_entries_dec(env, mmu_idx); + } + } +} + +static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, + target_ulong page) +{ + tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1); +} + +static void tlb_flush_page_locked(CPUArchState *env, int midx, + target_ulong page) +{ + target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; + target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; + + /* Check if we need to flush due to large pages. */ + if ((page & lp_mask) == lp_addr) { + tlb_debug("forcing full flush midx %d (" + TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", + midx, lp_addr, lp_mask); + tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); + } else { + if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { + tlb_n_used_entries_dec(env, midx); + } + tlb_flush_vtlb_page_locked(env, midx, page); + } +} + +/** + * tlb_flush_page_by_mmuidx_async_0: + * @cpu: cpu on which to flush + * @addr: page of virtual address to flush + * @idxmap: set of mmu_idx to flush + * + * Helper for tlb_flush_page_by_mmuidx and friends, flush one page + * at @addr from the tlbs indicated by @idxmap from @cpu. + */ +static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, + target_ulong addr, + uint16_t idxmap) +{ + CPUArchState *env = cpu->env_ptr; + int mmu_idx; + + assert_cpu_is_self(cpu); + + tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap); + + qemu_spin_lock(&env_tlb(env)->c.lock); + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + if ((idxmap >> mmu_idx) & 1) { + tlb_flush_page_locked(env, mmu_idx, addr); + } + } + qemu_spin_unlock(&env_tlb(env)->c.lock); + + tb_flush_jmp_cache(cpu, addr); +} + +/** + * tlb_flush_page_by_mmuidx_async_1: + * @cpu: cpu on which to flush + * @data: encoded addr + idxmap + * + * Helper for tlb_flush_page_by_mmuidx and friends, called through + * async_run_on_cpu. The idxmap parameter is encoded in the page + * offset of the target_ptr field. This limits the set of mmu_idx + * that can be passed via this method. + */ +static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, + run_on_cpu_data data) +{ + target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; + target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; + uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; + + tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); +} + +typedef struct { + target_ulong addr; + uint16_t idxmap; +} TLBFlushPageByMMUIdxData; + +/** + * tlb_flush_page_by_mmuidx_async_2: + * @cpu: cpu on which to flush + * @data: allocated addr + idxmap + * + * Helper for tlb_flush_page_by_mmuidx and friends, called through + * async_run_on_cpu. The addr+idxmap parameters are stored in a + * TLBFlushPageByMMUIdxData structure that has been allocated + * specifically for this helper. Free the structure when done. + */ +static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, + run_on_cpu_data data) +{ + TLBFlushPageByMMUIdxData *d = data.host_ptr; + + tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); + g_free(d); +} + +void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) +{ + tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); + + /* This should already be page aligned */ + addr &= TARGET_PAGE_MASK; + + if (qemu_cpu_is_self(cpu)) { + tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); + } else if (idxmap < TARGET_PAGE_SIZE) { + /* + * Most targets have only a few mmu_idx. In the case where + * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid + * allocating memory for this operation. + */ + async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, + RUN_ON_CPU_TARGET_PTR(addr | idxmap)); + } else { + TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); + + /* Otherwise allocate a structure, freed by the worker. */ + d->addr = addr; + d->idxmap = idxmap; + async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, + RUN_ON_CPU_HOST_PTR(d)); + } +} + +void tlb_flush_page(CPUState *cpu, target_ulong addr) +{ + tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); +} + +void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, + uint16_t idxmap) +{ + tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); + + /* This should already be page aligned */ + addr &= TARGET_PAGE_MASK; + + /* + * Allocate memory to hold addr+idxmap only when needed. + * See tlb_flush_page_by_mmuidx for details. + */ + if (idxmap < TARGET_PAGE_SIZE) { + flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, + RUN_ON_CPU_TARGET_PTR(addr | idxmap)); + } else { + CPUState *dst_cpu; + + /* Allocate a separate data block for each destination cpu. */ + CPU_FOREACH(dst_cpu) { + if (dst_cpu != src_cpu) { + TLBFlushPageByMMUIdxData *d + = g_new(TLBFlushPageByMMUIdxData, 1); + + d->addr = addr; + d->idxmap = idxmap; + async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, + RUN_ON_CPU_HOST_PTR(d)); + } + } + } + + tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); +} + +void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) +{ + tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); +} + +void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, + target_ulong addr, + uint16_t idxmap) +{ + tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); + + /* This should already be page aligned */ + addr &= TARGET_PAGE_MASK; + + /* + * Allocate memory to hold addr+idxmap only when needed. + * See tlb_flush_page_by_mmuidx for details. + */ + if (idxmap < TARGET_PAGE_SIZE) { + flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, + RUN_ON_CPU_TARGET_PTR(addr | idxmap)); + async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, + RUN_ON_CPU_TARGET_PTR(addr | idxmap)); + } else { + CPUState *dst_cpu; + TLBFlushPageByMMUIdxData *d; + + /* Allocate a separate data block for each destination cpu. */ + CPU_FOREACH(dst_cpu) { + if (dst_cpu != src_cpu) { + d = g_new(TLBFlushPageByMMUIdxData, 1); + d->addr = addr; + d->idxmap = idxmap; + async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, + RUN_ON_CPU_HOST_PTR(d)); + } + } + + d = g_new(TLBFlushPageByMMUIdxData, 1); + d->addr = addr; + d->idxmap = idxmap; + async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, + RUN_ON_CPU_HOST_PTR(d)); + } +} + +void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) +{ + tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); +} + +static void tlb_flush_range_locked(CPUArchState *env, int midx, + target_ulong addr, target_ulong len, + unsigned bits) +{ + CPUTLBDesc *d = &env_tlb(env)->d[midx]; + CPUTLBDescFast *f = &env_tlb(env)->f[midx]; + target_ulong mask = MAKE_64BIT_MASK(0, bits); + + /* + * If @bits is smaller than the tlb size, there may be multiple entries + * within the TLB; otherwise all addresses that match under @mask hit + * the same TLB entry. + * TODO: Perhaps allow bits to be a few bits less than the size. + * For now, just flush the entire TLB. + * + * If @len is larger than the tlb size, then it will take longer to + * test all of the entries in the TLB than it will to flush it all. + */ + if (mask < f->mask || len > f->mask) { + tlb_debug("forcing full flush midx %d (" + TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n", + midx, addr, mask, len); + tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); + return; + } + + /* + * Check if we need to flush due to large pages. + * Because large_page_mask contains all 1's from the msb, + * we only need to test the end of the range. + */ + if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) { + tlb_debug("forcing full flush midx %d (" + TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", + midx, d->large_page_addr, d->large_page_mask); + tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); + return; + } + + for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) { + target_ulong page = addr + i; + CPUTLBEntry *entry = tlb_entry(env, midx, page); + + if (tlb_flush_entry_mask_locked(entry, page, mask)) { + tlb_n_used_entries_dec(env, midx); + } + tlb_flush_vtlb_page_mask_locked(env, midx, page, mask); + } +} + +typedef struct { + target_ulong addr; + target_ulong len; + uint16_t idxmap; + uint16_t bits; +} TLBFlushRangeData; + +static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, + TLBFlushRangeData d) +{ + CPUArchState *env = cpu->env_ptr; + int mmu_idx; + + assert_cpu_is_self(cpu); + + tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n", + d.addr, d.bits, d.len, d.idxmap); + + qemu_spin_lock(&env_tlb(env)->c.lock); + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + if ((d.idxmap >> mmu_idx) & 1) { + tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits); + } + } + qemu_spin_unlock(&env_tlb(env)->c.lock); + + for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) { + tb_flush_jmp_cache(cpu, d.addr + i); + } +} + +static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu, + run_on_cpu_data data) +{ + TLBFlushRangeData *d = data.host_ptr; + tlb_flush_range_by_mmuidx_async_0(cpu, *d); + g_free(d); +} + +void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, + target_ulong len, uint16_t idxmap, + unsigned bits) +{ + TLBFlushRangeData d; + + /* + * If all bits are significant, and len is small, + * this devolves to tlb_flush_page. + */ + if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { + tlb_flush_page_by_mmuidx(cpu, addr, idxmap); + return; + } + /* If no page bits are significant, this devolves to tlb_flush. */ + if (bits < TARGET_PAGE_BITS) { + tlb_flush_by_mmuidx(cpu, idxmap); + return; + } + + /* This should already be page aligned */ + d.addr = addr & TARGET_PAGE_MASK; + d.len = len; + d.idxmap = idxmap; + d.bits = bits; + + if (qemu_cpu_is_self(cpu)) { + tlb_flush_range_by_mmuidx_async_0(cpu, d); + } else { + /* Otherwise allocate a structure, freed by the worker. */ + TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); + async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1, + RUN_ON_CPU_HOST_PTR(p)); + } +} + +void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, + uint16_t idxmap, unsigned bits) +{ + tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); +} + +void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, + target_ulong addr, target_ulong len, + uint16_t idxmap, unsigned bits) +{ + TLBFlushRangeData d; + CPUState *dst_cpu; + + /* + * If all bits are significant, and len is small, + * this devolves to tlb_flush_page. + */ + if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { + tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap); + return; + } + /* If no page bits are significant, this devolves to tlb_flush. */ + if (bits < TARGET_PAGE_BITS) { + tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap); + return; + } + + /* This should already be page aligned */ + d.addr = addr & TARGET_PAGE_MASK; + d.len = len; + d.idxmap = idxmap; + d.bits = bits; + + /* Allocate a separate data block for each destination cpu. */ + CPU_FOREACH(dst_cpu) { + if (dst_cpu != src_cpu) { + TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); + async_run_on_cpu(dst_cpu, + tlb_flush_range_by_mmuidx_async_1, + RUN_ON_CPU_HOST_PTR(p)); + } + } + + tlb_flush_range_by_mmuidx_async_0(src_cpu, d); +} + +void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, + target_ulong addr, + uint16_t idxmap, unsigned bits) +{ + tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE, + idxmap, bits); +} + +void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, + target_ulong addr, + target_ulong len, + uint16_t idxmap, + unsigned bits) +{ + TLBFlushRangeData d, *p; + CPUState *dst_cpu; + + /* + * If all bits are significant, and len is small, + * this devolves to tlb_flush_page. + */ + if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { + tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); + return; + } + /* If no page bits are significant, this devolves to tlb_flush. */ + if (bits < TARGET_PAGE_BITS) { + tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); + return; + } + + /* This should already be page aligned */ + d.addr = addr & TARGET_PAGE_MASK; + d.len = len; + d.idxmap = idxmap; + d.bits = bits; + + /* Allocate a separate data block for each destination cpu. */ + CPU_FOREACH(dst_cpu) { + if (dst_cpu != src_cpu) { + p = g_memdup(&d, sizeof(d)); + async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1, + RUN_ON_CPU_HOST_PTR(p)); + } + } + + p = g_memdup(&d, sizeof(d)); + async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1, + RUN_ON_CPU_HOST_PTR(p)); +} + +void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, + target_ulong addr, + uint16_t idxmap, + unsigned bits) +{ + tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE, + idxmap, bits); +} + +/* update the TLBs so that writes to code in the virtual page 'addr' + can be detected */ +void tlb_protect_code(ram_addr_t ram_addr) +{ + cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, + DIRTY_MEMORY_CODE); +} + +/* update the TLB so that writes in physical page 'phys_addr' are no longer + tested for self modifying code */ +void tlb_unprotect_code(ram_addr_t ram_addr) +{ + cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); +} + + +/* + * Dirty write flag handling + * + * When the TCG code writes to a location it looks up the address in + * the TLB and uses that data to compute the final address. If any of + * the lower bits of the address are set then the slow path is forced. + * There are a number of reasons to do this but for normal RAM the + * most usual is detecting writes to code regions which may invalidate + * generated code. + * + * Other vCPUs might be reading their TLBs during guest execution, so we update + * te->addr_write with qatomic_set. We don't need to worry about this for + * oversized guests as MTTCG is disabled for them. + * + * Called with tlb_c.lock held. + */ +static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, + uintptr_t start, uintptr_t length) +{ + uintptr_t addr = tlb_entry->addr_write; + + if ((addr & (TLB_INVALID_MASK | TLB_MMIO | + TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { + addr &= TARGET_PAGE_MASK; + addr += tlb_entry->addend; + if ((addr - start) < length) { +#if TCG_OVERSIZED_GUEST + tlb_entry->addr_write |= TLB_NOTDIRTY; +#else + qatomic_set(&tlb_entry->addr_write, + tlb_entry->addr_write | TLB_NOTDIRTY); +#endif + } + } +} + +/* + * Called with tlb_c.lock held. + * Called only from the vCPU context, i.e. the TLB's owner thread. + */ +static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) +{ + *d = *s; +} + +/* This is a cross vCPU call (i.e. another vCPU resetting the flags of + * the target vCPU). + * We must take tlb_c.lock to avoid racing with another vCPU update. The only + * thing actually updated is the target TLB entry ->addr_write flags. + */ +void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) +{ + CPUArchState *env; + + int mmu_idx; + + env = cpu->env_ptr; + qemu_spin_lock(&env_tlb(env)->c.lock); + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + unsigned int i; + unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); + + for (i = 0; i < n; i++) { + tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], + start1, length); + } + + for (i = 0; i < CPU_VTLB_SIZE; i++) { + tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], + start1, length); + } + } + qemu_spin_unlock(&env_tlb(env)->c.lock); +} + +/* Called with tlb_c.lock held */ +static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, + target_ulong vaddr) +{ + if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { + tlb_entry->addr_write = vaddr; + } +} + +/* update the TLB corresponding to virtual page vaddr + so that it is no longer dirty */ +void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) +{ + CPUArchState *env = cpu->env_ptr; + int mmu_idx; + + assert_cpu_is_self(cpu); + + vaddr &= TARGET_PAGE_MASK; + qemu_spin_lock(&env_tlb(env)->c.lock); + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); + } + + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + int k; + for (k = 0; k < CPU_VTLB_SIZE; k++) { + tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); + } + } + qemu_spin_unlock(&env_tlb(env)->c.lock); +} + +/* Our TLB does not support large pages, so remember the area covered by + large pages and trigger a full TLB flush if these are invalidated. */ +static void tlb_add_large_page(CPUArchState *env, int mmu_idx, + target_ulong vaddr, target_ulong size) +{ + target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; + target_ulong lp_mask = ~(size - 1); + + if (lp_addr == (target_ulong)-1) { + /* No previous large page. */ + lp_addr = vaddr; + } else { + /* Extend the existing region to include the new page. + This is a compromise between unnecessary flushes and + the cost of maintaining a full variable size TLB. */ + lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; + while (((lp_addr ^ vaddr) & lp_mask) != 0) { + lp_mask <<= 1; + } + } + env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; + env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; +} + +/* Add a new TLB entry. At most one entry for a given virtual address + * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the + * supplied size is only used by tlb_flush_page. + * + * Called from TCG-generated code, which is under an RCU read-side + * critical section. + */ +void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, + hwaddr paddr, MemTxAttrs attrs, int prot, + int mmu_idx, target_ulong size) +{ + CPUArchState *env = cpu->env_ptr; + CPUTLB *tlb = env_tlb(env); + CPUTLBDesc *desc = &tlb->d[mmu_idx]; + MemoryRegionSection *section; + unsigned int index; + target_ulong address; + target_ulong write_address; + uintptr_t addend; + CPUTLBEntry *te, tn; + hwaddr iotlb, xlat, sz, paddr_page; + target_ulong vaddr_page; + int asidx = cpu_asidx_from_attrs(cpu, attrs); + int wp_flags; + bool is_ram, is_romd; + + assert_cpu_is_self(cpu); + + if (size <= TARGET_PAGE_SIZE) { + sz = TARGET_PAGE_SIZE; + } else { + tlb_add_large_page(env, mmu_idx, vaddr, size); + sz = size; + } + vaddr_page = vaddr & TARGET_PAGE_MASK; + paddr_page = paddr & TARGET_PAGE_MASK; + + section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, + &xlat, &sz, attrs, &prot); + assert(sz >= TARGET_PAGE_SIZE); + + tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx + " prot=%x idx=%d\n", + vaddr, paddr, prot, mmu_idx); + + address = vaddr_page; + if (size < TARGET_PAGE_SIZE) { + /* Repeat the MMU check and TLB fill on every access. */ + address |= TLB_INVALID_MASK; + } + if (attrs.byte_swap) { + address |= TLB_BSWAP; + } + + is_ram = memory_region_is_ram(section->mr); + is_romd = memory_region_is_romd(section->mr); + + if (is_ram || is_romd) { + /* RAM and ROMD both have associated host memory. */ + addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; + } else { + /* I/O does not; force the host address to NULL. */ + addend = 0; + } + + write_address = address; + if (is_ram) { + iotlb = memory_region_get_ram_addr(section->mr) + xlat; + /* + * Computing is_clean is expensive; avoid all that unless + * the page is actually writable. + */ + if (prot & PAGE_WRITE) { + if (section->readonly) { + write_address |= TLB_DISCARD_WRITE; + } else if (cpu_physical_memory_is_clean(iotlb)) { + write_address |= TLB_NOTDIRTY; + } + } + } else { + /* I/O or ROMD */ + iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; + /* + * Writes to romd devices must go through MMIO to enable write. + * Reads to romd devices go through the ram_ptr found above, + * but of course reads to I/O must go through MMIO. + */ + write_address |= TLB_MMIO; + if (!is_romd) { + address = write_address; + } + } + + wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, + TARGET_PAGE_SIZE); + + index = tlb_index(env, mmu_idx, vaddr_page); + te = tlb_entry(env, mmu_idx, vaddr_page); + + /* + * Hold the TLB lock for the rest of the function. We could acquire/release + * the lock several times in the function, but it is faster to amortize the + * acquisition cost by acquiring it just once. Note that this leads to + * a longer critical section, but this is not a concern since the TLB lock + * is unlikely to be contended. + */ + qemu_spin_lock(&tlb->c.lock); + + /* Note that the tlb is no longer clean. */ + tlb->c.dirty |= 1 << mmu_idx; + + /* Make sure there's no cached translation for the new page. */ + tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); + + /* + * Only evict the old entry to the victim tlb if it's for a + * different page; otherwise just overwrite the stale data. + */ + if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { + unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; + CPUTLBEntry *tv = &desc->vtable[vidx]; + + /* Evict the old entry into the victim tlb. */ + copy_tlb_helper_locked(tv, te); + desc->viotlb[vidx] = desc->iotlb[index]; + tlb_n_used_entries_dec(env, mmu_idx); + } + + /* refill the tlb */ + /* + * At this point iotlb contains a physical section number in the lower + * TARGET_PAGE_BITS, and either + * + the ram_addr_t of the page base of the target RAM (RAM) + * + the offset within section->mr of the page base (I/O, ROMD) + * We subtract the vaddr_page (which is page aligned and thus won't + * disturb the low bits) to give an offset which can be added to the + * (non-page-aligned) vaddr of the eventual memory access to get + * the MemoryRegion offset for the access. Note that the vaddr we + * subtract here is that of the page base, and not the same as the + * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). + */ + desc->iotlb[index].addr = iotlb - vaddr_page; + desc->iotlb[index].attrs = attrs; + + /* Now calculate the new entry */ + tn.addend = addend - vaddr_page; + if (prot & PAGE_READ) { + tn.addr_read = address; + if (wp_flags & BP_MEM_READ) { + tn.addr_read |= TLB_WATCHPOINT; + } + } else { + tn.addr_read = -1; + } + + if (prot & PAGE_EXEC) { + tn.addr_code = address; + } else { + tn.addr_code = -1; + } + + tn.addr_write = -1; + if (prot & PAGE_WRITE) { + tn.addr_write = write_address; + if (prot & PAGE_WRITE_INV) { + tn.addr_write |= TLB_INVALID_MASK; + } + if (wp_flags & BP_MEM_WRITE) { + tn.addr_write |= TLB_WATCHPOINT; + } + } + + copy_tlb_helper_locked(te, &tn); + tlb_n_used_entries_inc(env, mmu_idx); + qemu_spin_unlock(&tlb->c.lock); +} + +/* Add a new TLB entry, but without specifying the memory + * transaction attributes to be used. + */ +void tlb_set_page(CPUState *cpu, target_ulong vaddr, + hwaddr paddr, int prot, + int mmu_idx, target_ulong size) +{ + tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, + prot, mmu_idx, size); +} + +static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) +{ + ram_addr_t ram_addr; + + ram_addr = qemu_ram_addr_from_host(ptr); + if (ram_addr == RAM_ADDR_INVALID) { + error_report("Bad ram pointer %p", ptr); + abort(); + } + return ram_addr; +} + +/* + * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the + * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must + * be discarded and looked up again (e.g. via tlb_entry()). + */ +static void tlb_fill(CPUState *cpu, target_ulong addr, int size, + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + bool ok; + + /* + * This is not a probe, so only valid return is success; failure + * should result in exception + longjmp to the cpu loop. + */ + ok = cc->tcg_ops->tlb_fill(cpu, addr, size, + access_type, mmu_idx, false, retaddr); + assert(ok); +} + +static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr); +} + +static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, + uintptr_t retaddr) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (!cpu->ignore_memory_transaction_failures && + cc->tcg_ops->do_transaction_failed) { + cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size, + access_type, mmu_idx, attrs, + response, retaddr); + } +} + +static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, + int mmu_idx, target_ulong addr, uintptr_t retaddr, + MMUAccessType access_type, MemOp op) +{ + CPUState *cpu = env_cpu(env); + hwaddr mr_offset; + MemoryRegionSection *section; + MemoryRegion *mr; + uint64_t val; + bool locked = false; + MemTxResult r; + + section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); + mr = section->mr; + mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; + cpu->mem_io_pc = retaddr; + if (!cpu->can_do_io) { + cpu_io_recompile(cpu, retaddr); + } + + if (!qemu_mutex_iothread_locked()) { + qemu_mutex_lock_iothread(); + locked = true; + } + r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); + if (r != MEMTX_OK) { + hwaddr physaddr = mr_offset + + section->offset_within_address_space - + section->offset_within_region; + + cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, + mmu_idx, iotlbentry->attrs, r, retaddr); + } + if (locked) { + qemu_mutex_unlock_iothread(); + } + + return val; +} + +/* + * Save a potentially trashed IOTLB entry for later lookup by plugin. + * This is read by tlb_plugin_lookup if the iotlb entry doesn't match + * because of the side effect of io_writex changing memory layout. + */ +static void save_iotlb_data(CPUState *cs, hwaddr addr, + MemoryRegionSection *section, hwaddr mr_offset) +{ +#ifdef CONFIG_PLUGIN + SavedIOTLB *saved = &cs->saved_iotlb; + saved->addr = addr; + saved->section = section; + saved->mr_offset = mr_offset; +#endif +} + +static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, + int mmu_idx, uint64_t val, target_ulong addr, + uintptr_t retaddr, MemOp op) +{ + CPUState *cpu = env_cpu(env); + hwaddr mr_offset; + MemoryRegionSection *section; + MemoryRegion *mr; + bool locked = false; + MemTxResult r; + + section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); + mr = section->mr; + mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; + if (!cpu->can_do_io) { + cpu_io_recompile(cpu, retaddr); + } + cpu->mem_io_pc = retaddr; + + /* + * The memory_region_dispatch may trigger a flush/resize + * so for plugins we save the iotlb_data just in case. + */ + save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset); + + if (!qemu_mutex_iothread_locked()) { + qemu_mutex_lock_iothread(); + locked = true; + } + r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); + if (r != MEMTX_OK) { + hwaddr physaddr = mr_offset + + section->offset_within_address_space - + section->offset_within_region; + + cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), + MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, + retaddr); + } + if (locked) { + qemu_mutex_unlock_iothread(); + } +} + +static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) +{ +#if TCG_OVERSIZED_GUEST + return *(target_ulong *)((uintptr_t)entry + ofs); +#else + /* ofs might correspond to .addr_write, so use qatomic_read */ + return qatomic_read((target_ulong *)((uintptr_t)entry + ofs)); +#endif +} + +/* Return true if ADDR is present in the victim tlb, and has been copied + back to the main tlb. */ +static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, + size_t elt_ofs, target_ulong page) +{ + size_t vidx; + + assert_cpu_is_self(env_cpu(env)); + for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { + CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; + target_ulong cmp; + + /* elt_ofs might correspond to .addr_write, so use qatomic_read */ +#if TCG_OVERSIZED_GUEST + cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); +#else + cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); +#endif + + if (cmp == page) { + /* Found entry in victim tlb, swap tlb and iotlb. */ + CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; + + qemu_spin_lock(&env_tlb(env)->c.lock); + copy_tlb_helper_locked(&tmptlb, tlb); + copy_tlb_helper_locked(tlb, vtlb); + copy_tlb_helper_locked(vtlb, &tmptlb); + qemu_spin_unlock(&env_tlb(env)->c.lock); + + CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; + CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; + tmpio = *io; *io = *vio; *vio = tmpio; + return true; + } + } + return false; +} + +/* Macro to call the above, with local variables from the use context. */ +#define VICTIM_TLB_HIT(TY, ADDR) \ + victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ + (ADDR) & TARGET_PAGE_MASK) + +/* + * Return a ram_addr_t for the virtual address for execution. + * + * Return -1 if we can't translate and execute from an entire page + * of RAM. This will force us to execute by loading and translating + * one insn at a time, without caching. + * + * NOTE: This function will trigger an exception if the page is + * not executable. + */ +tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, + void **hostp) +{ + uintptr_t mmu_idx = cpu_mmu_index(env, true); + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + void *p; + + if (unlikely(!tlb_hit(entry->addr_code, addr))) { + if (!VICTIM_TLB_HIT(addr_code, addr)) { + tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); + + if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { + /* + * The MMU protection covers a smaller range than a target + * page, so we must redo the MMU check for every insn. + */ + return -1; + } + } + assert(tlb_hit(entry->addr_code, addr)); + } + + if (unlikely(entry->addr_code & TLB_MMIO)) { + /* The region is not backed by RAM. */ + if (hostp) { + *hostp = NULL; + } + return -1; + } + + p = (void *)((uintptr_t)addr + entry->addend); + if (hostp) { + *hostp = p; + } + return qemu_ram_addr_from_host_nofail(p); +} + +tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) +{ + return get_page_addr_code_hostp(env, addr, NULL); +} + +static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, + CPUIOTLBEntry *iotlbentry, uintptr_t retaddr) +{ + ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr; + + trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); + + if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { + struct page_collection *pages + = page_collection_lock(ram_addr, ram_addr + size); + tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr); + page_collection_unlock(pages); + } + + /* + * Set both VGA and migration bits for simplicity and to remove + * the notdirty callback faster. + */ + cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); + + /* We remove the notdirty callback only if the code has been flushed. */ + if (!cpu_physical_memory_is_clean(ram_addr)) { + trace_memory_notdirty_set_dirty(mem_vaddr); + tlb_set_dirty(cpu, mem_vaddr); + } +} + +static int probe_access_internal(CPUArchState *env, target_ulong addr, + int fault_size, MMUAccessType access_type, + int mmu_idx, bool nonfault, + void **phost, uintptr_t retaddr) +{ + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + target_ulong tlb_addr, page_addr; + size_t elt_ofs; + int flags; + + switch (access_type) { + case MMU_DATA_LOAD: + elt_ofs = offsetof(CPUTLBEntry, addr_read); + break; + case MMU_DATA_STORE: + elt_ofs = offsetof(CPUTLBEntry, addr_write); + break; + case MMU_INST_FETCH: + elt_ofs = offsetof(CPUTLBEntry, addr_code); + break; + default: + g_assert_not_reached(); + } + tlb_addr = tlb_read_ofs(entry, elt_ofs); + + page_addr = addr & TARGET_PAGE_MASK; + if (!tlb_hit_page(tlb_addr, page_addr)) { + if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { + CPUState *cs = env_cpu(env); + CPUClass *cc = CPU_GET_CLASS(cs); + + if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, + mmu_idx, nonfault, retaddr)) { + /* Non-faulting page table read failed. */ + *phost = NULL; + return TLB_INVALID_MASK; + } + + /* TLB resize via tlb_fill may have moved the entry. */ + entry = tlb_entry(env, mmu_idx, addr); + } + tlb_addr = tlb_read_ofs(entry, elt_ofs); + } + flags = tlb_addr & TLB_FLAGS_MASK; + + /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ + if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { + *phost = NULL; + return TLB_MMIO; + } + + /* Everything else is RAM. */ + *phost = (void *)((uintptr_t)addr + entry->addend); + return flags; +} + +int probe_access_flags(CPUArchState *env, target_ulong addr, + MMUAccessType access_type, int mmu_idx, + bool nonfault, void **phost, uintptr_t retaddr) +{ + int flags; + + flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, + nonfault, phost, retaddr); + + /* Handle clean RAM pages. */ + if (unlikely(flags & TLB_NOTDIRTY)) { + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + + notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); + flags &= ~TLB_NOTDIRTY; + } + + return flags; +} + +void *probe_access(CPUArchState *env, target_ulong addr, int size, + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) +{ + void *host; + int flags; + + g_assert(-(addr | TARGET_PAGE_MASK) >= size); + + flags = probe_access_internal(env, addr, size, access_type, mmu_idx, + false, &host, retaddr); + + /* Per the interface, size == 0 merely faults the access. */ + if (size == 0) { + return NULL; + } + + if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + + /* Handle watchpoints. */ + if (flags & TLB_WATCHPOINT) { + int wp_access = (access_type == MMU_DATA_STORE + ? BP_MEM_WRITE : BP_MEM_READ); + cpu_check_watchpoint(env_cpu(env), addr, size, + iotlbentry->attrs, wp_access, retaddr); + } + + /* Handle clean RAM pages. */ + if (flags & TLB_NOTDIRTY) { + notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); + } + } + + return host; +} + +void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, + MMUAccessType access_type, int mmu_idx) +{ + void *host; + int flags; + + flags = probe_access_internal(env, addr, 0, access_type, + mmu_idx, true, &host, 0); + + /* No combination of flags are expected by the caller. */ + return flags ? NULL : host; +} + +#ifdef CONFIG_PLUGIN +/* + * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. + * This should be a hot path as we will have just looked this path up + * in the softmmu lookup code (or helper). We don't handle re-fills or + * checking the victim table. This is purely informational. + * + * This almost never fails as the memory access being instrumented + * should have just filled the TLB. The one corner case is io_writex + * which can cause TLB flushes and potential resizing of the TLBs + * losing the information we need. In those cases we need to recover + * data from a copy of the iotlbentry. As long as this always occurs + * from the same thread (which a mem callback will be) this is safe. + */ + +bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, + bool is_store, struct qemu_plugin_hwaddr *data) +{ + CPUArchState *env = cpu->env_ptr; + CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); + uintptr_t index = tlb_index(env, mmu_idx, addr); + target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read; + + if (likely(tlb_hit(tlb_addr, addr))) { + /* We must have an iotlb entry for MMIO */ + if (tlb_addr & TLB_MMIO) { + CPUIOTLBEntry *iotlbentry; + iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + data->is_io = true; + data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); + data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; + } else { + data->is_io = false; + data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend); + } + return true; + } else { + SavedIOTLB *saved = &cpu->saved_iotlb; + data->is_io = true; + data->v.io.section = saved->section; + data->v.io.offset = saved->mr_offset; + return true; + } +} + +#endif + +/* + * Probe for an atomic operation. Do not allow unaligned operations, + * or io operations to proceed. Return the host address. + * + * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. + */ +static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, + MemOpIdx oi, int size, int prot, + uintptr_t retaddr) +{ + size_t mmu_idx = get_mmuidx(oi); + MemOp mop = get_memop(oi); + int a_bits = get_alignment_bits(mop); + uintptr_t index; + CPUTLBEntry *tlbe; + target_ulong tlb_addr; + void *hostaddr; + + /* Adjust the given return address. */ + retaddr -= GETPC_ADJ; + + /* Enforce guest required alignment. */ + if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { + /* ??? Maybe indicate atomic op to cpu_unaligned_access */ + cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + /* Enforce qemu required alignment. */ + if (unlikely(addr & (size - 1))) { + /* We get here if guest alignment was not requested, + or was not enforced by cpu_unaligned_access above. + We might widen the access and emulate, but for now + mark an exception and exit the cpu loop. */ + goto stop_the_world; + } + + index = tlb_index(env, mmu_idx, addr); + tlbe = tlb_entry(env, mmu_idx, addr); + + /* Check TLB entry and enforce page permissions. */ + if (prot & PAGE_WRITE) { + tlb_addr = tlb_addr_write(tlbe); + if (!tlb_hit(tlb_addr, addr)) { + if (!VICTIM_TLB_HIT(addr_write, addr)) { + tlb_fill(env_cpu(env), addr, size, + MMU_DATA_STORE, mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + tlbe = tlb_entry(env, mmu_idx, addr); + } + tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; + } + + /* Let the guest notice RMW on a write-only page. */ + if ((prot & PAGE_READ) && + unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { + tlb_fill(env_cpu(env), addr, size, + MMU_DATA_LOAD, mmu_idx, retaddr); + /* + * Since we don't support reads and writes to different addresses, + * and we do have the proper page loaded for write, this shouldn't + * ever return. But just in case, handle via stop-the-world. + */ + goto stop_the_world; + } + } else /* if (prot & PAGE_READ) */ { + tlb_addr = tlbe->addr_read; + if (!tlb_hit(tlb_addr, addr)) { + if (!VICTIM_TLB_HIT(addr_write, addr)) { + tlb_fill(env_cpu(env), addr, size, + MMU_DATA_LOAD, mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + tlbe = tlb_entry(env, mmu_idx, addr); + } + tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK; + } + } + + /* Notice an IO access or a needs-MMU-lookup access */ + if (unlikely(tlb_addr & TLB_MMIO)) { + /* There's really nothing that can be done to + support this apart from stop-the-world. */ + goto stop_the_world; + } + + hostaddr = (void *)((uintptr_t)addr + tlbe->addend); + + if (unlikely(tlb_addr & TLB_NOTDIRTY)) { + notdirty_write(env_cpu(env), addr, size, + &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr); + } + + return hostaddr; + + stop_the_world: + cpu_loop_exit_atomic(env_cpu(env), retaddr); +} + +/* + * Verify that we have passed the correct MemOp to the correct function. + * + * In the case of the helper_*_mmu functions, we will have done this by + * using the MemOp to look up the helper during code generation. + * + * In the case of the cpu_*_mmu functions, this is up to the caller. + * We could present one function to target code, and dispatch based on + * the MemOp, but so far we have worked hard to avoid an indirect function + * call along the memory path. + */ +static void validate_memop(MemOpIdx oi, MemOp expected) +{ +#ifdef CONFIG_DEBUG_TCG + MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); + assert(have == expected); +#endif +} + +/* + * Load Helpers + * + * We support two different access types. SOFTMMU_CODE_ACCESS is + * specifically for reading instructions from system memory. It is + * called by the translation loop and in some helpers where the code + * is disassembled. It shouldn't be called directly by guest code. + */ + +typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); + +static inline uint64_t QEMU_ALWAYS_INLINE +load_memop(const void *haddr, MemOp op) +{ + switch (op) { + case MO_UB: + return ldub_p(haddr); + case MO_BEUW: + return lduw_be_p(haddr); + case MO_LEUW: + return lduw_le_p(haddr); + case MO_BEUL: + return (uint32_t)ldl_be_p(haddr); + case MO_LEUL: + return (uint32_t)ldl_le_p(haddr); + case MO_BEQ: + return ldq_be_p(haddr); + case MO_LEQ: + return ldq_le_p(haddr); + default: + qemu_build_not_reached(); + } +} + +static inline uint64_t QEMU_ALWAYS_INLINE +load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi, + uintptr_t retaddr, MemOp op, bool code_read, + FullLoadHelper *full_load) +{ + uintptr_t mmu_idx = get_mmuidx(oi); + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read; + const size_t tlb_off = code_read ? + offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); + const MMUAccessType access_type = + code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; + unsigned a_bits = get_alignment_bits(get_memop(oi)); + void *haddr; + uint64_t res; + size_t size = memop_size(op); + + /* Handle CPU specific unaligned behaviour */ + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(env_cpu(env), addr, access_type, + mmu_idx, retaddr); + } + + /* If the TLB entry is for a different page, reload and try again. */ + if (!tlb_hit(tlb_addr, addr)) { + if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, + addr & TARGET_PAGE_MASK)) { + tlb_fill(env_cpu(env), addr, size, + access_type, mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); + } + tlb_addr = code_read ? entry->addr_code : entry->addr_read; + tlb_addr &= ~TLB_INVALID_MASK; + } + + /* Handle anything that isn't just a straight memory access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + CPUIOTLBEntry *iotlbentry; + bool need_swap; + + /* For anything that is unaligned, recurse through full_load. */ + if ((addr & (size - 1)) != 0) { + goto do_unaligned_access; + } + + iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + + /* Handle watchpoints. */ + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { + /* On watchpoint hit, this will longjmp out. */ + cpu_check_watchpoint(env_cpu(env), addr, size, + iotlbentry->attrs, BP_MEM_READ, retaddr); + } + + need_swap = size > 1 && (tlb_addr & TLB_BSWAP); + + /* Handle I/O access. */ + if (likely(tlb_addr & TLB_MMIO)) { + return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, + access_type, op ^ (need_swap * MO_BSWAP)); + } + + haddr = (void *)((uintptr_t)addr + entry->addend); + + /* + * Keep these two load_memop separate to ensure that the compiler + * is able to fold the entire function to a single instruction. + * There is a build-time assert inside to remind you of this. ;-) + */ + if (unlikely(need_swap)) { + return load_memop(haddr, op ^ MO_BSWAP); + } + return load_memop(haddr, op); + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (size > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 + >= TARGET_PAGE_SIZE)) { + target_ulong addr1, addr2; + uint64_t r1, r2; + unsigned shift; + do_unaligned_access: + addr1 = addr & ~((target_ulong)size - 1); + addr2 = addr1 + size; + r1 = full_load(env, addr1, oi, retaddr); + r2 = full_load(env, addr2, oi, retaddr); + shift = (addr & (size - 1)) * 8; + + if (memop_big_endian(op)) { + /* Big-endian combine. */ + res = (r1 << shift) | (r2 >> ((size * 8) - shift)); + } else { + /* Little-endian combine. */ + res = (r1 >> shift) | (r2 << ((size * 8) - shift)); + } + return res & MAKE_64BIT_MASK(0, size * 8); + } + + haddr = (void *)((uintptr_t)addr + entry->addend); + return load_memop(haddr, op); +} + +/* + * For the benefit of TCG generated code, we want to avoid the + * complication of ABI-specific return type promotion and always + * return a value extended to the register size of the host. This is + * tcg_target_long, except in the case of a 32-bit host and 64-bit + * data, and for that we always have uint64_t. + * + * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. + */ + +static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_UB); + return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); +} + +tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return full_ldub_mmu(env, addr, oi, retaddr); +} + +static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_LEUW); + return load_helper(env, addr, oi, retaddr, MO_LEUW, false, + full_le_lduw_mmu); +} + +tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return full_le_lduw_mmu(env, addr, oi, retaddr); +} + +static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_BEUW); + return load_helper(env, addr, oi, retaddr, MO_BEUW, false, + full_be_lduw_mmu); +} + +tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return full_be_lduw_mmu(env, addr, oi, retaddr); +} + +static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_LEUL); + return load_helper(env, addr, oi, retaddr, MO_LEUL, false, + full_le_ldul_mmu); +} + +tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return full_le_ldul_mmu(env, addr, oi, retaddr); +} + +static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_BEUL); + return load_helper(env, addr, oi, retaddr, MO_BEUL, false, + full_be_ldul_mmu); +} + +tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return full_be_ldul_mmu(env, addr, oi, retaddr); +} + +uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_LEQ); + return load_helper(env, addr, oi, retaddr, MO_LEQ, false, + helper_le_ldq_mmu); +} + +uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_BEQ); + return load_helper(env, addr, oi, retaddr, MO_BEQ, false, + helper_be_ldq_mmu); +} + +/* + * Provide signed versions of the load routines as well. We can of course + * avoid this for 64-bit data, or for 32-bit data on 32-bit host. + */ + + +tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); +} + +tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); +} + +tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); +} + +tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); +} + +tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); +} + +/* + * Load helpers for cpu_ldst.h. + */ + +static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t retaddr, + FullLoadHelper *full_load) +{ + uint64_t ret; + + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + ret = full_load(env, addr, oi, retaddr); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return ret; +} + +uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) +{ + return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu); +} + +uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu); +} + +uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu); +} + +uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + return cpu_load_helper(env, addr, oi, MO_BEQ, helper_be_ldq_mmu); +} + +uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu); +} + +uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu); +} + +uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu); +} + +/* + * Store Helpers + */ + +static inline void QEMU_ALWAYS_INLINE +store_memop(void *haddr, uint64_t val, MemOp op) +{ + switch (op) { + case MO_UB: + stb_p(haddr, val); + break; + case MO_BEUW: + stw_be_p(haddr, val); + break; + case MO_LEUW: + stw_le_p(haddr, val); + break; + case MO_BEUL: + stl_be_p(haddr, val); + break; + case MO_LEUL: + stl_le_p(haddr, val); + break; + case MO_BEQ: + stq_be_p(haddr, val); + break; + case MO_LEQ: + stq_le_p(haddr, val); + break; + default: + qemu_build_not_reached(); + } +} + +static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr); + +static void __attribute__((noinline)) +store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, + uintptr_t retaddr, size_t size, uintptr_t mmu_idx, + bool big_endian) +{ + const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); + uintptr_t index, index2; + CPUTLBEntry *entry, *entry2; + target_ulong page2, tlb_addr, tlb_addr2; + MemOpIdx oi; + size_t size2; + int i; + + /* + * Ensure the second page is in the TLB. Note that the first page + * is already guaranteed to be filled, and that the second page + * cannot evict the first. + */ + page2 = (addr + size) & TARGET_PAGE_MASK; + size2 = (addr + size) & ~TARGET_PAGE_MASK; + index2 = tlb_index(env, mmu_idx, page2); + entry2 = tlb_entry(env, mmu_idx, page2); + + tlb_addr2 = tlb_addr_write(entry2); + if (!tlb_hit_page(tlb_addr2, page2)) { + if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { + tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, + mmu_idx, retaddr); + index2 = tlb_index(env, mmu_idx, page2); + entry2 = tlb_entry(env, mmu_idx, page2); + } + tlb_addr2 = tlb_addr_write(entry2); + } + + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); + tlb_addr = tlb_addr_write(entry); + + /* + * Handle watchpoints. Since this may trap, all checks + * must happen before any store. + */ + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { + cpu_check_watchpoint(env_cpu(env), addr, size - size2, + env_tlb(env)->d[mmu_idx].iotlb[index].attrs, + BP_MEM_WRITE, retaddr); + } + if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { + cpu_check_watchpoint(env_cpu(env), page2, size2, + env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, + BP_MEM_WRITE, retaddr); + } + + /* + * XXX: not efficient, but simple. + * This loop must go in the forward direction to avoid issues + * with self-modifying code in Windows 64-bit. + */ + oi = make_memop_idx(MO_UB, mmu_idx); + if (big_endian) { + for (i = 0; i < size; ++i) { + /* Big-endian extract. */ + uint8_t val8 = val >> (((size - 1) * 8) - (i * 8)); + full_stb_mmu(env, addr + i, val8, oi, retaddr); + } + } else { + for (i = 0; i < size; ++i) { + /* Little-endian extract. */ + uint8_t val8 = val >> (i * 8); + full_stb_mmu(env, addr + i, val8, oi, retaddr); + } + } +} + +static inline void QEMU_ALWAYS_INLINE +store_helper(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr, MemOp op) +{ + uintptr_t mmu_idx = get_mmuidx(oi); + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + target_ulong tlb_addr = tlb_addr_write(entry); + const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); + unsigned a_bits = get_alignment_bits(get_memop(oi)); + void *haddr; + size_t size = memop_size(op); + + /* Handle CPU specific unaligned behaviour */ + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + /* If the TLB entry is for a different page, reload and try again. */ + if (!tlb_hit(tlb_addr, addr)) { + if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, + addr & TARGET_PAGE_MASK)) { + tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, + mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); + } + tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; + } + + /* Handle anything that isn't just a straight memory access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + CPUIOTLBEntry *iotlbentry; + bool need_swap; + + /* For anything that is unaligned, recurse through byte stores. */ + if ((addr & (size - 1)) != 0) { + goto do_unaligned_access; + } + + iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + + /* Handle watchpoints. */ + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { + /* On watchpoint hit, this will longjmp out. */ + cpu_check_watchpoint(env_cpu(env), addr, size, + iotlbentry->attrs, BP_MEM_WRITE, retaddr); + } + + need_swap = size > 1 && (tlb_addr & TLB_BSWAP); + + /* Handle I/O access. */ + if (tlb_addr & TLB_MMIO) { + io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, + op ^ (need_swap * MO_BSWAP)); + return; + } + + /* Ignore writes to ROM. */ + if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) { + return; + } + + /* Handle clean RAM pages. */ + if (tlb_addr & TLB_NOTDIRTY) { + notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr); + } + + haddr = (void *)((uintptr_t)addr + entry->addend); + + /* + * Keep these two store_memop separate to ensure that the compiler + * is able to fold the entire function to a single instruction. + * There is a build-time assert inside to remind you of this. ;-) + */ + if (unlikely(need_swap)) { + store_memop(haddr, val, op ^ MO_BSWAP); + } else { + store_memop(haddr, val, op); + } + return; + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (size > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 + >= TARGET_PAGE_SIZE)) { + do_unaligned_access: + store_helper_unaligned(env, addr, val, retaddr, size, + mmu_idx, memop_big_endian(op)); + return; + } + + haddr = (void *)((uintptr_t)addr + entry->addend); + store_memop(haddr, val, op); +} + +static void __attribute__((noinline)) +full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_UB); + store_helper(env, addr, val, oi, retaddr, MO_UB); +} + +void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + full_stb_mmu(env, addr, val, oi, retaddr); +} + +static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_LEUW); + store_helper(env, addr, val, oi, retaddr, MO_LEUW); +} + +void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + full_le_stw_mmu(env, addr, val, oi, retaddr); +} + +static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_BEUW); + store_helper(env, addr, val, oi, retaddr, MO_BEUW); +} + +void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + full_be_stw_mmu(env, addr, val, oi, retaddr); +} + +static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_LEUL); + store_helper(env, addr, val, oi, retaddr, MO_LEUL); +} + +void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + full_le_stl_mmu(env, addr, val, oi, retaddr); +} + +static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_BEUL); + store_helper(env, addr, val, oi, retaddr, MO_BEUL); +} + +void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + full_be_stl_mmu(env, addr, val, oi, retaddr); +} + +void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_LEQ); + store_helper(env, addr, val, oi, retaddr, MO_LEQ); +} + +void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_BEQ); + store_helper(env, addr, val, oi, retaddr, MO_BEQ); +} + +/* + * Store Helpers for cpu_ldst.h + */ + +typedef void FullStoreHelper(CPUArchState *env, target_ulong addr, + uint64_t val, MemOpIdx oi, uintptr_t retaddr); + +static inline void cpu_store_helper(CPUArchState *env, target_ulong addr, + uint64_t val, MemOpIdx oi, uintptr_t ra, + FullStoreHelper *full_store) +{ + trace_guest_st_before_exec(env_cpu(env), addr, oi); + full_store(env, addr, val, oi, ra); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu); +} + +void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu); +} + +void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu); +} + +void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu); +} + +void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu); +} + +void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu); +} + +void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu); +} + +#include "ldst_common.c.inc" + +/* + * First set of functions passes in OI and RETADDR. + * This makes them callable from other helpers. + */ + +#define ATOMIC_NAME(X) \ + glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) + +#define ATOMIC_MMU_CLEANUP +#define ATOMIC_MMU_IDX get_mmuidx(oi) + +#include "atomic_common.c.inc" + +#define DATA_SIZE 1 +#include "atomic_template.h" + +#define DATA_SIZE 2 +#include "atomic_template.h" + +#define DATA_SIZE 4 +#include "atomic_template.h" + +#ifdef CONFIG_ATOMIC64 +#define DATA_SIZE 8 +#include "atomic_template.h" +#endif + +#if HAVE_CMPXCHG128 || HAVE_ATOMIC128 +#define DATA_SIZE 16 +#include "atomic_template.h" +#endif + +/* Code access functions. */ + +static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code); +} + +uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) +{ + MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); + return full_ldub_code(env, addr, oi, 0); +} + +static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code); +} + +uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) +{ + MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); + return full_lduw_code(env, addr, oi, 0); +} + +static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code); +} + +uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) +{ + MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); + return full_ldl_code(env, addr, oi, 0); +} + +static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) +{ + return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code); +} + +uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) +{ + MemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true)); + return full_ldq_code(env, addr, oi, 0); +} diff --git a/accel/tcg/hmp.c b/accel/tcg/hmp.c new file mode 100644 index 000000000..d2ea35265 --- /dev/null +++ b/accel/tcg/hmp.c @@ -0,0 +1,15 @@ +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" +#include "exec/exec-all.h" +#include "monitor/monitor.h" +#include "sysemu/tcg.h" + +static void hmp_tcg_register(void) +{ + monitor_register_hmp_info_hrt("jit", qmp_x_query_jit); + monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount); +} + +type_init(hmp_tcg_register); diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h new file mode 100644 index 000000000..881bc1ede --- /dev/null +++ b/accel/tcg/internal.h @@ -0,0 +1,22 @@ +/* + * Internal execution defines for qemu + * + * Copyright (c) 2003 Fabrice Bellard + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#ifndef ACCEL_TCG_INTERNAL_H +#define ACCEL_TCG_INTERNAL_H + +#include "exec/exec-all.h" + +TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc, + target_ulong cs_base, uint32_t flags, + int cflags); + +void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); +void page_init(void); +void tb_htable_init(void); + +#endif /* ACCEL_TCG_INTERNAL_H */ diff --git a/accel/tcg/ldst_common.c.inc b/accel/tcg/ldst_common.c.inc new file mode 100644 index 000000000..bfefb275e --- /dev/null +++ b/accel/tcg/ldst_common.c.inc @@ -0,0 +1,307 @@ +/* + * Routines common to user and system emulation of load/store. + * + * Copyright (c) 2003 Fabrice Bellard + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + return cpu_ldb_mmu(env, addr, oi, ra); +} + +int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra); +} + +uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); + return cpu_ldw_be_mmu(env, addr, oi, ra); +} + +int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra); +} + +uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); + return cpu_ldl_be_mmu(env, addr, oi, ra); +} + +uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx); + return cpu_ldq_be_mmu(env, addr, oi, ra); +} + +uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); + return cpu_ldw_le_mmu(env, addr, oi, ra); +} + +int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra); +} + +uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); + return cpu_ldl_le_mmu(env, addr, oi, ra); +} + +uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx); + return cpu_ldq_le_mmu(env, addr, oi, ra); +} + +void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + cpu_stb_mmu(env, addr, val, oi, ra); +} + +void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); + cpu_stw_be_mmu(env, addr, val, oi, ra); +} + +void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); + cpu_stl_be_mmu(env, addr, val, oi, ra); +} + +void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx); + cpu_stq_be_mmu(env, addr, val, oi, ra); +} + +void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); + cpu_stw_le_mmu(env, addr, val, oi, ra); +} + +void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); + cpu_stl_le_mmu(env, addr, val, oi, ra); +} + +void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx); + cpu_stq_le_mmu(env, addr, val, oi, ra); +} + +/*--------------------------*/ + +uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldub_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return (int8_t)cpu_ldub_data_ra(env, addr, ra); +} + +uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_lduw_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return (int16_t)cpu_lduw_be_data_ra(env, addr, ra); +} + +uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldl_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldq_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_lduw_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return (int16_t)cpu_lduw_le_data_ra(env, addr, ra); +} + +uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldl_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldq_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +void cpu_stb_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stb_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stw_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stl_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr, + uint64_t val, uintptr_t ra) +{ + cpu_stq_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stw_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stl_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr, + uint64_t val, uintptr_t ra) +{ + cpu_stq_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +/*--------------------------*/ + +uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldub_data_ra(env, addr, 0); +} + +int cpu_ldsb_data(CPUArchState *env, abi_ptr addr) +{ + return (int8_t)cpu_ldub_data(env, addr); +} + +uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_lduw_be_data_ra(env, addr, 0); +} + +int cpu_ldsw_be_data(CPUArchState *env, abi_ptr addr) +{ + return (int16_t)cpu_lduw_be_data(env, addr); +} + +uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldl_be_data_ra(env, addr, 0); +} + +uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldq_be_data_ra(env, addr, 0); +} + +uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_lduw_le_data_ra(env, addr, 0); +} + +int cpu_ldsw_le_data(CPUArchState *env, abi_ptr addr) +{ + return (int16_t)cpu_lduw_le_data(env, addr); +} + +uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldl_le_data_ra(env, addr, 0); +} + +uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldq_le_data_ra(env, addr, 0); +} + +void cpu_stb_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stb_data_ra(env, addr, val, 0); +} + +void cpu_stw_be_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stw_be_data_ra(env, addr, val, 0); +} + +void cpu_stl_be_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stl_be_data_ra(env, addr, val, 0); +} + +void cpu_stq_be_data(CPUArchState *env, abi_ptr addr, uint64_t val) +{ + cpu_stq_be_data_ra(env, addr, val, 0); +} + +void cpu_stw_le_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stw_le_data_ra(env, addr, val, 0); +} + +void cpu_stl_le_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stl_le_data_ra(env, addr, val, 0); +} + +void cpu_stq_le_data(CPUArchState *env, abi_ptr addr, uint64_t val) +{ + cpu_stq_le_data_ra(env, addr, val, 0); +} diff --git a/accel/tcg/meson.build b/accel/tcg/meson.build new file mode 100644 index 000000000..7a0a79d73 --- /dev/null +++ b/accel/tcg/meson.build @@ -0,0 +1,26 @@ +tcg_ss = ss.source_set() +tcg_ss.add(files( + 'tcg-all.c', + 'cpu-exec-common.c', + 'cpu-exec.c', + 'tcg-runtime-gvec.c', + 'tcg-runtime.c', + 'translate-all.c', + 'translator.c', +)) +tcg_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c')) +tcg_ss.add(when: 'CONFIG_SOFTMMU', if_false: files('user-exec-stub.c')) +tcg_ss.add(when: 'CONFIG_PLUGIN', if_true: [files('plugin-gen.c')]) +specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss) + +specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files( + 'cputlb.c', + 'hmp.c', +)) + +tcg_module_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files( + 'tcg-accel-ops.c', + 'tcg-accel-ops-mttcg.c', + 'tcg-accel-ops-icount.c', + 'tcg-accel-ops-rr.c', +)) diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c new file mode 100644 index 000000000..22d95fe1c --- /dev/null +++ b/accel/tcg/plugin-gen.c @@ -0,0 +1,926 @@ +/* + * plugin-gen.c - TCG-related bits of plugin infrastructure + * + * Copyright (C) 2018, Emilio G. Cota <cota@braap.org> + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * We support instrumentation at an instruction granularity. That is, + * if a plugin wants to instrument the memory accesses performed by a + * particular instruction, it can just do that instead of instrumenting + * all memory accesses. Thus, in order to do this we first have to + * translate a TB, so that plugins can decide what/where to instrument. + * + * Injecting the desired instrumentation could be done with a second + * translation pass that combined the instrumentation requests, but that + * would be ugly and inefficient since we would decode the guest code twice. + * Instead, during TB translation we add "empty" instrumentation calls for all + * possible instrumentation events, and then once we collect the instrumentation + * requests from plugins, we either "fill in" those empty events or remove them + * if they have no requests. + * + * When "filling in" an event we first copy the empty callback's TCG ops. This + * might seem unnecessary, but it is done to support an arbitrary number + * of callbacks per event. Take for example a regular instruction callback. + * We first generate a callback to an empty helper function. Then, if two + * plugins register one callback each for this instruction, we make two copies + * of the TCG ops generated for the empty callback, substituting the function + * pointer that points to the empty helper function with the plugins' desired + * callback functions. After that we remove the empty callback's ops. + * + * Note that the location in TCGOp.args[] of the pointer to a helper function + * varies across different guest and host architectures. Instead of duplicating + * the logic that figures this out, we rely on the fact that the empty + * callbacks point to empty functions that are unique pointers in the program. + * Thus, to find the right location we just have to look for a match in + * TCGOp.args[]. This is the main reason why we first copy an empty callback's + * TCG ops and then fill them in; regardless of whether we have one or many + * callbacks for that event, the logic to add all of them is the same. + * + * When generating more than one callback per event, we make a small + * optimization to avoid generating redundant operations. For instance, for the + * second and all subsequent callbacks of an event, we do not need to reload the + * CPU's index into a TCG temp, since the first callback did it already. + */ +#include "qemu/osdep.h" +#include "tcg/tcg.h" +#include "tcg/tcg-op.h" +#include "exec/exec-all.h" +#include "exec/plugin-gen.h" +#include "exec/translator.h" + +#ifdef CONFIG_SOFTMMU +# define CONFIG_SOFTMMU_GATE 1 +#else +# define CONFIG_SOFTMMU_GATE 0 +#endif + +/* + * plugin_cb_start TCG op args[]: + * 0: enum plugin_gen_from + * 1: enum plugin_gen_cb + * 2: set to 1 for mem callback that is a write, 0 otherwise. + */ + +enum plugin_gen_from { + PLUGIN_GEN_FROM_TB, + PLUGIN_GEN_FROM_INSN, + PLUGIN_GEN_FROM_MEM, + PLUGIN_GEN_AFTER_INSN, + PLUGIN_GEN_N_FROMS, +}; + +enum plugin_gen_cb { + PLUGIN_GEN_CB_UDATA, + PLUGIN_GEN_CB_INLINE, + PLUGIN_GEN_CB_MEM, + PLUGIN_GEN_ENABLE_MEM_HELPER, + PLUGIN_GEN_DISABLE_MEM_HELPER, + PLUGIN_GEN_N_CBS, +}; + +/* + * These helpers are stubs that get dynamically switched out for calls + * direct to the plugin if they are subscribed to. + */ +void HELPER(plugin_vcpu_udata_cb)(uint32_t cpu_index, void *udata) +{ } + +void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index, + qemu_plugin_meminfo_t info, uint64_t vaddr, + void *userdata) +{ } + +static void do_gen_mem_cb(TCGv vaddr, uint32_t info) +{ + TCGv_i32 cpu_index = tcg_temp_new_i32(); + TCGv_i32 meminfo = tcg_const_i32(info); + TCGv_i64 vaddr64 = tcg_temp_new_i64(); + TCGv_ptr udata = tcg_const_ptr(NULL); + + tcg_gen_ld_i32(cpu_index, cpu_env, + -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); + tcg_gen_extu_tl_i64(vaddr64, vaddr); + + gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, vaddr64, udata); + + tcg_temp_free_ptr(udata); + tcg_temp_free_i64(vaddr64); + tcg_temp_free_i32(meminfo); + tcg_temp_free_i32(cpu_index); +} + +static void gen_empty_udata_cb(void) +{ + TCGv_i32 cpu_index = tcg_temp_new_i32(); + TCGv_ptr udata = tcg_const_ptr(NULL); /* will be overwritten later */ + + tcg_gen_ld_i32(cpu_index, cpu_env, + -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); + gen_helper_plugin_vcpu_udata_cb(cpu_index, udata); + + tcg_temp_free_ptr(udata); + tcg_temp_free_i32(cpu_index); +} + +/* + * For now we only support addi_i64. + * When we support more ops, we can generate one empty inline cb for each. + */ +static void gen_empty_inline_cb(void) +{ + TCGv_i64 val = tcg_temp_new_i64(); + TCGv_ptr ptr = tcg_const_ptr(NULL); /* overwritten later */ + + tcg_gen_ld_i64(val, ptr, 0); + /* pass an immediate != 0 so that it doesn't get optimized away */ + tcg_gen_addi_i64(val, val, 0xdeadface); + tcg_gen_st_i64(val, ptr, 0); + tcg_temp_free_ptr(ptr); + tcg_temp_free_i64(val); +} + +static void gen_empty_mem_cb(TCGv addr, uint32_t info) +{ + do_gen_mem_cb(addr, info); +} + +/* + * Share the same function for enable/disable. When enabling, the NULL + * pointer will be overwritten later. + */ +static void gen_empty_mem_helper(void) +{ + TCGv_ptr ptr; + + ptr = tcg_const_ptr(NULL); + tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) - + offsetof(ArchCPU, env)); + tcg_temp_free_ptr(ptr); +} + +static void gen_plugin_cb_start(enum plugin_gen_from from, + enum plugin_gen_cb type, unsigned wr) +{ + tcg_gen_plugin_cb_start(from, type, wr); +} + +static void gen_wrapped(enum plugin_gen_from from, + enum plugin_gen_cb type, void (*func)(void)) +{ + gen_plugin_cb_start(from, type, 0); + func(); + tcg_gen_plugin_cb_end(); +} + +static void plugin_gen_empty_callback(enum plugin_gen_from from) +{ + switch (from) { + case PLUGIN_GEN_AFTER_INSN: + gen_wrapped(from, PLUGIN_GEN_DISABLE_MEM_HELPER, + gen_empty_mem_helper); + break; + case PLUGIN_GEN_FROM_INSN: + /* + * Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being + * the first callback of an instruction + */ + gen_wrapped(from, PLUGIN_GEN_ENABLE_MEM_HELPER, + gen_empty_mem_helper); + /* fall through */ + case PLUGIN_GEN_FROM_TB: + gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb); + gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb); + break; + default: + g_assert_not_reached(); + } +} + +union mem_gen_fn { + void (*mem_fn)(TCGv, uint32_t); + void (*inline_fn)(void); +}; + +static void gen_mem_wrapped(enum plugin_gen_cb type, + const union mem_gen_fn *f, TCGv addr, + uint32_t info, bool is_mem) +{ + enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info); + + gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, rw); + if (is_mem) { + f->mem_fn(addr, info); + } else { + f->inline_fn(); + } + tcg_gen_plugin_cb_end(); +} + +void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info) +{ + union mem_gen_fn fn; + + fn.mem_fn = gen_empty_mem_cb; + gen_mem_wrapped(PLUGIN_GEN_CB_MEM, &fn, addr, info, true); + + fn.inline_fn = gen_empty_inline_cb; + gen_mem_wrapped(PLUGIN_GEN_CB_INLINE, &fn, 0, info, false); +} + +static TCGOp *find_op(TCGOp *op, TCGOpcode opc) +{ + while (op) { + if (op->opc == opc) { + return op; + } + op = QTAILQ_NEXT(op, link); + } + return NULL; +} + +static TCGOp *rm_ops_range(TCGOp *begin, TCGOp *end) +{ + TCGOp *ret = QTAILQ_NEXT(end, link); + + QTAILQ_REMOVE_SEVERAL(&tcg_ctx->ops, begin, end, link); + return ret; +} + +/* remove all ops until (and including) plugin_cb_end */ +static TCGOp *rm_ops(TCGOp *op) +{ + TCGOp *end_op = find_op(op, INDEX_op_plugin_cb_end); + + tcg_debug_assert(end_op); + return rm_ops_range(op, end_op); +} + +static TCGOp *copy_op_nocheck(TCGOp **begin_op, TCGOp *op) +{ + *begin_op = QTAILQ_NEXT(*begin_op, link); + tcg_debug_assert(*begin_op); + op = tcg_op_insert_after(tcg_ctx, op, (*begin_op)->opc); + memcpy(op->args, (*begin_op)->args, sizeof(op->args)); + return op; +} + +static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc) +{ + op = copy_op_nocheck(begin_op, op); + tcg_debug_assert((*begin_op)->opc == opc); + return op; +} + +static TCGOp *copy_extu_i32_i64(TCGOp **begin_op, TCGOp *op) +{ + if (TCG_TARGET_REG_BITS == 32) { + /* mov_i32 */ + op = copy_op(begin_op, op, INDEX_op_mov_i32); + /* mov_i32 w/ $0 */ + op = copy_op(begin_op, op, INDEX_op_mov_i32); + } else { + /* extu_i32_i64 */ + op = copy_op(begin_op, op, INDEX_op_extu_i32_i64); + } + return op; +} + +static TCGOp *copy_mov_i64(TCGOp **begin_op, TCGOp *op) +{ + if (TCG_TARGET_REG_BITS == 32) { + /* 2x mov_i32 */ + op = copy_op(begin_op, op, INDEX_op_mov_i32); + op = copy_op(begin_op, op, INDEX_op_mov_i32); + } else { + /* mov_i64 */ + op = copy_op(begin_op, op, INDEX_op_mov_i64); + } + return op; +} + +static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr) +{ + if (UINTPTR_MAX == UINT32_MAX) { + /* mov_i32 */ + op = copy_op(begin_op, op, INDEX_op_mov_i32); + op->args[1] = tcgv_i32_arg(tcg_constant_i32((uintptr_t)ptr)); + } else { + /* mov_i64 */ + op = copy_op(begin_op, op, INDEX_op_mov_i64); + op->args[1] = tcgv_i64_arg(tcg_constant_i64((uintptr_t)ptr)); + } + return op; +} + +static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op) +{ + if (TARGET_LONG_BITS == 32) { + /* extu_i32_i64 */ + op = copy_extu_i32_i64(begin_op, op); + } else { + /* mov_i64 */ + op = copy_mov_i64(begin_op, op); + } + return op; +} + +static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op) +{ + if (TCG_TARGET_REG_BITS == 32) { + /* 2x ld_i32 */ + op = copy_op(begin_op, op, INDEX_op_ld_i32); + op = copy_op(begin_op, op, INDEX_op_ld_i32); + } else { + /* ld_i64 */ + op = copy_op(begin_op, op, INDEX_op_ld_i64); + } + return op; +} + +static TCGOp *copy_st_i64(TCGOp **begin_op, TCGOp *op) +{ + if (TCG_TARGET_REG_BITS == 32) { + /* 2x st_i32 */ + op = copy_op(begin_op, op, INDEX_op_st_i32); + op = copy_op(begin_op, op, INDEX_op_st_i32); + } else { + /* st_i64 */ + op = copy_op(begin_op, op, INDEX_op_st_i64); + } + return op; +} + +static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v) +{ + if (TCG_TARGET_REG_BITS == 32) { + /* all 32-bit backends must implement add2_i32 */ + g_assert(TCG_TARGET_HAS_add2_i32); + op = copy_op(begin_op, op, INDEX_op_add2_i32); + op->args[4] = tcgv_i32_arg(tcg_constant_i32(v)); + op->args[5] = tcgv_i32_arg(tcg_constant_i32(v >> 32)); + } else { + op = copy_op(begin_op, op, INDEX_op_add_i64); + op->args[2] = tcgv_i64_arg(tcg_constant_i64(v)); + } + return op; +} + +static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op) +{ + if (UINTPTR_MAX == UINT32_MAX) { + /* st_i32 */ + op = copy_op(begin_op, op, INDEX_op_st_i32); + } else { + /* st_i64 */ + op = copy_st_i64(begin_op, op); + } + return op; +} + +static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func, + void *func, int *cb_idx) +{ + /* copy all ops until the call */ + do { + op = copy_op_nocheck(begin_op, op); + } while (op->opc != INDEX_op_call); + + /* fill in the op call */ + op->param1 = (*begin_op)->param1; + op->param2 = (*begin_op)->param2; + tcg_debug_assert(op->life == 0); + if (*cb_idx == -1) { + int i; + + /* + * Instead of working out the position of the callback in args[], just + * look for @empty_func, since it should be a unique pointer. + */ + for (i = 0; i < MAX_OPC_PARAM_ARGS; i++) { + if ((uintptr_t)(*begin_op)->args[i] == (uintptr_t)empty_func) { + *cb_idx = i; + break; + } + } + tcg_debug_assert(i < MAX_OPC_PARAM_ARGS); + } + op->args[*cb_idx] = (uintptr_t)func; + op->args[*cb_idx + 1] = (*begin_op)->args[*cb_idx + 1]; + + return op; +} + +/* + * When we append/replace ops here we are sensitive to changing patterns of + * TCGOps generated by the tcg_gen_FOO calls when we generated the + * empty callbacks. This will assert very quickly in a debug build as + * we assert the ops we are replacing are the correct ones. + */ +static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb, + TCGOp *begin_op, TCGOp *op, int *cb_idx) +{ + /* const_ptr */ + op = copy_const_ptr(&begin_op, op, cb->userp); + + /* copy the ld_i32, but note that we only have to copy it once */ + begin_op = QTAILQ_NEXT(begin_op, link); + tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32); + if (*cb_idx == -1) { + op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32); + memcpy(op->args, begin_op->args, sizeof(op->args)); + } + + /* call */ + op = copy_call(&begin_op, op, HELPER(plugin_vcpu_udata_cb), + cb->f.vcpu_udata, cb_idx); + + return op; +} + +static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb, + TCGOp *begin_op, TCGOp *op, + int *unused) +{ + /* const_ptr */ + op = copy_const_ptr(&begin_op, op, cb->userp); + + /* ld_i64 */ + op = copy_ld_i64(&begin_op, op); + + /* add_i64 */ + op = copy_add_i64(&begin_op, op, cb->inline_insn.imm); + + /* st_i64 */ + op = copy_st_i64(&begin_op, op); + + return op; +} + +static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb, + TCGOp *begin_op, TCGOp *op, int *cb_idx) +{ + enum plugin_gen_cb type = begin_op->args[1]; + + tcg_debug_assert(type == PLUGIN_GEN_CB_MEM); + + /* const_i32 == mov_i32 ("info", so it remains as is) */ + op = copy_op(&begin_op, op, INDEX_op_mov_i32); + + /* const_ptr */ + op = copy_const_ptr(&begin_op, op, cb->userp); + + /* copy the ld_i32, but note that we only have to copy it once */ + begin_op = QTAILQ_NEXT(begin_op, link); + tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32); + if (*cb_idx == -1) { + op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32); + memcpy(op->args, begin_op->args, sizeof(op->args)); + } + + /* extu_tl_i64 */ + op = copy_extu_tl_i64(&begin_op, op); + + if (type == PLUGIN_GEN_CB_MEM) { + /* call */ + op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb), + cb->f.vcpu_udata, cb_idx); + } + + return op; +} + +typedef TCGOp *(*inject_fn)(const struct qemu_plugin_dyn_cb *cb, + TCGOp *begin_op, TCGOp *op, int *intp); +typedef bool (*op_ok_fn)(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb); + +static bool op_ok(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb) +{ + return true; +} + +static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb) +{ + int w; + + w = op->args[2]; + return !!(cb->rw & (w + 1)); +} + +static void inject_cb_type(const GArray *cbs, TCGOp *begin_op, + inject_fn inject, op_ok_fn ok) +{ + TCGOp *end_op; + TCGOp *op; + int cb_idx = -1; + int i; + + if (!cbs || cbs->len == 0) { + rm_ops(begin_op); + return; + } + + end_op = find_op(begin_op, INDEX_op_plugin_cb_end); + tcg_debug_assert(end_op); + + op = end_op; + for (i = 0; i < cbs->len; i++) { + struct qemu_plugin_dyn_cb *cb = + &g_array_index(cbs, struct qemu_plugin_dyn_cb, i); + + if (!ok(begin_op, cb)) { + continue; + } + op = inject(cb, begin_op, op, &cb_idx); + } + rm_ops_range(begin_op, end_op); +} + +static void +inject_udata_cb(const GArray *cbs, TCGOp *begin_op) +{ + inject_cb_type(cbs, begin_op, append_udata_cb, op_ok); +} + +static void +inject_inline_cb(const GArray *cbs, TCGOp *begin_op, op_ok_fn ok) +{ + inject_cb_type(cbs, begin_op, append_inline_cb, ok); +} + +static void +inject_mem_cb(const GArray *cbs, TCGOp *begin_op) +{ + inject_cb_type(cbs, begin_op, append_mem_cb, op_rw); +} + +/* we could change the ops in place, but we can reuse more code by copying */ +static void inject_mem_helper(TCGOp *begin_op, GArray *arr) +{ + TCGOp *orig_op = begin_op; + TCGOp *end_op; + TCGOp *op; + + end_op = find_op(begin_op, INDEX_op_plugin_cb_end); + tcg_debug_assert(end_op); + + /* const ptr */ + op = copy_const_ptr(&begin_op, end_op, arr); + + /* st_ptr */ + op = copy_st_ptr(&begin_op, op); + + rm_ops_range(orig_op, end_op); +} + +/* + * Tracking memory accesses performed from helpers requires extra work. + * If an instruction is emulated with helpers, we do two things: + * (1) copy the CB descriptors, and keep track of it so that they can be + * freed later on, and (2) point CPUState.plugin_mem_cbs to the descriptors, so + * that we can read them at run-time (i.e. when the helper executes). + * This run-time access is performed from qemu_plugin_vcpu_mem_cb. + * + * Note that plugin_gen_disable_mem_helpers undoes (2). Since it + * is possible that the code we generate after the instruction is + * dead, we also add checks before generating tb_exit etc. + */ +static void inject_mem_enable_helper(struct qemu_plugin_insn *plugin_insn, + TCGOp *begin_op) +{ + GArray *cbs[2]; + GArray *arr; + size_t n_cbs, i; + + cbs[0] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR]; + cbs[1] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE]; + + n_cbs = 0; + for (i = 0; i < ARRAY_SIZE(cbs); i++) { + n_cbs += cbs[i]->len; + } + + plugin_insn->mem_helper = plugin_insn->calls_helpers && n_cbs; + if (likely(!plugin_insn->mem_helper)) { + rm_ops(begin_op); + return; + } + + arr = g_array_sized_new(false, false, + sizeof(struct qemu_plugin_dyn_cb), n_cbs); + + for (i = 0; i < ARRAY_SIZE(cbs); i++) { + g_array_append_vals(arr, cbs[i]->data, cbs[i]->len); + } + + qemu_plugin_add_dyn_cb_arr(arr); + inject_mem_helper(begin_op, arr); +} + +static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn, + TCGOp *begin_op) +{ + if (likely(!plugin_insn->mem_helper)) { + rm_ops(begin_op); + return; + } + inject_mem_helper(begin_op, NULL); +} + +/* called before finishing a TB with exit_tb, goto_tb or goto_ptr */ +void plugin_gen_disable_mem_helpers(void) +{ + TCGv_ptr ptr; + + if (likely(tcg_ctx->plugin_insn == NULL || + !tcg_ctx->plugin_insn->mem_helper)) { + return; + } + ptr = tcg_const_ptr(NULL); + tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) - + offsetof(ArchCPU, env)); + tcg_temp_free_ptr(ptr); + tcg_ctx->plugin_insn->mem_helper = false; +} + +static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op) +{ + inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op); +} + +static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op) +{ + inject_inline_cb(ptb->cbs[PLUGIN_CB_INLINE], begin_op, op_ok); +} + +static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op, int insn_idx) +{ + struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); + + inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op); +} + +static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op, int insn_idx) +{ + struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); + inject_inline_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE], + begin_op, op_ok); +} + +static void plugin_gen_mem_regular(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op, int insn_idx) +{ + struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); + inject_mem_cb(insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], begin_op); +} + +static void plugin_gen_mem_inline(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op, int insn_idx) +{ + const GArray *cbs; + struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); + + cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE]; + inject_inline_cb(cbs, begin_op, op_rw); +} + +static void plugin_gen_enable_mem_helper(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op, int insn_idx) +{ + struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); + inject_mem_enable_helper(insn, begin_op); +} + +static void plugin_gen_disable_mem_helper(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op, int insn_idx) +{ + struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); + inject_mem_disable_helper(insn, begin_op); +} + +/* #define DEBUG_PLUGIN_GEN_OPS */ +static void pr_ops(void) +{ +#ifdef DEBUG_PLUGIN_GEN_OPS + TCGOp *op; + int i = 0; + + QTAILQ_FOREACH(op, &tcg_ctx->ops, link) { + const char *name = ""; + const char *type = ""; + + if (op->opc == INDEX_op_plugin_cb_start) { + switch (op->args[0]) { + case PLUGIN_GEN_FROM_TB: + name = "tb"; + break; + case PLUGIN_GEN_FROM_INSN: + name = "insn"; + break; + case PLUGIN_GEN_FROM_MEM: + name = "mem"; + break; + case PLUGIN_GEN_AFTER_INSN: + name = "after insn"; + break; + default: + break; + } + switch (op->args[1]) { + case PLUGIN_GEN_CB_UDATA: + type = "udata"; + break; + case PLUGIN_GEN_CB_INLINE: + type = "inline"; + break; + case PLUGIN_GEN_CB_MEM: + type = "mem"; + break; + case PLUGIN_GEN_ENABLE_MEM_HELPER: + type = "enable mem helper"; + break; + case PLUGIN_GEN_DISABLE_MEM_HELPER: + type = "disable mem helper"; + break; + default: + break; + } + } + printf("op[%2i]: %s %s %s\n", i, tcg_op_defs[op->opc].name, name, type); + i++; + } +#endif +} + +static void plugin_gen_inject(const struct qemu_plugin_tb *plugin_tb) +{ + TCGOp *op; + int insn_idx = -1; + + pr_ops(); + + QTAILQ_FOREACH(op, &tcg_ctx->ops, link) { + switch (op->opc) { + case INDEX_op_insn_start: + insn_idx++; + break; + case INDEX_op_plugin_cb_start: + { + enum plugin_gen_from from = op->args[0]; + enum plugin_gen_cb type = op->args[1]; + + switch (from) { + case PLUGIN_GEN_FROM_TB: + { + g_assert(insn_idx == -1); + + switch (type) { + case PLUGIN_GEN_CB_UDATA: + plugin_gen_tb_udata(plugin_tb, op); + break; + case PLUGIN_GEN_CB_INLINE: + plugin_gen_tb_inline(plugin_tb, op); + break; + default: + g_assert_not_reached(); + } + break; + } + case PLUGIN_GEN_FROM_INSN: + { + g_assert(insn_idx >= 0); + + switch (type) { + case PLUGIN_GEN_CB_UDATA: + plugin_gen_insn_udata(plugin_tb, op, insn_idx); + break; + case PLUGIN_GEN_CB_INLINE: + plugin_gen_insn_inline(plugin_tb, op, insn_idx); + break; + case PLUGIN_GEN_ENABLE_MEM_HELPER: + plugin_gen_enable_mem_helper(plugin_tb, op, insn_idx); + break; + default: + g_assert_not_reached(); + } + break; + } + case PLUGIN_GEN_FROM_MEM: + { + g_assert(insn_idx >= 0); + + switch (type) { + case PLUGIN_GEN_CB_MEM: + plugin_gen_mem_regular(plugin_tb, op, insn_idx); + break; + case PLUGIN_GEN_CB_INLINE: + plugin_gen_mem_inline(plugin_tb, op, insn_idx); + break; + default: + g_assert_not_reached(); + } + + break; + } + case PLUGIN_GEN_AFTER_INSN: + { + g_assert(insn_idx >= 0); + + switch (type) { + case PLUGIN_GEN_DISABLE_MEM_HELPER: + plugin_gen_disable_mem_helper(plugin_tb, op, insn_idx); + break; + default: + g_assert_not_reached(); + } + break; + } + default: + g_assert_not_reached(); + } + break; + } + default: + /* plugins don't care about any other ops */ + break; + } + } + pr_ops(); +} + +bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_only) +{ + struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; + bool ret = false; + + if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) { + ret = true; + + ptb->vaddr = tb->pc; + ptb->vaddr2 = -1; + get_page_addr_code_hostp(cpu->env_ptr, tb->pc, &ptb->haddr1); + ptb->haddr2 = NULL; + ptb->mem_only = mem_only; + + plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB); + } + return ret; +} + +void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db) +{ + struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; + struct qemu_plugin_insn *pinsn; + + pinsn = qemu_plugin_tb_insn_get(ptb, db->pc_next); + tcg_ctx->plugin_insn = pinsn; + plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN); + + /* + * Detect page crossing to get the new host address. + * Note that we skip this when haddr1 == NULL, e.g. when we're + * fetching instructions from a region not backed by RAM. + */ + if (likely(ptb->haddr1 != NULL && ptb->vaddr2 == -1) && + unlikely((db->pc_next & TARGET_PAGE_MASK) != + (db->pc_first & TARGET_PAGE_MASK))) { + get_page_addr_code_hostp(cpu->env_ptr, db->pc_next, + &ptb->haddr2); + ptb->vaddr2 = db->pc_next; + } + if (likely(ptb->vaddr2 == -1)) { + pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr; + } else { + pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2; + } +} + +void plugin_gen_insn_end(void) +{ + plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN); +} + +void plugin_gen_tb_end(CPUState *cpu) +{ + struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; + int i; + + /* collect instrumentation requests */ + qemu_plugin_tb_trans_cb(cpu, ptb); + + /* inject the instrumentation at the appropriate places */ + plugin_gen_inject(ptb); + + /* clean up */ + for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) { + if (ptb->cbs[i]) { + g_array_set_size(ptb->cbs[i], 0); + } + } + ptb->n = 0; + tcg_ctx->plugin_insn = NULL; +} diff --git a/accel/tcg/plugin-helpers.h b/accel/tcg/plugin-helpers.h new file mode 100644 index 000000000..9829abe4a --- /dev/null +++ b/accel/tcg/plugin-helpers.h @@ -0,0 +1,4 @@ +#ifdef CONFIG_PLUGIN +DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb, TCG_CALL_NO_RWG, void, i32, ptr) +DEF_HELPER_FLAGS_4(plugin_vcpu_mem_cb, TCG_CALL_NO_RWG, void, i32, i32, i64, ptr) +#endif diff --git a/accel/tcg/tb-context.h b/accel/tcg/tb-context.h new file mode 100644 index 000000000..cac62d974 --- /dev/null +++ b/accel/tcg/tb-context.h @@ -0,0 +1,42 @@ +/* + * Internal structs that QEMU exports to TCG + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef QEMU_TB_CONTEXT_H +#define QEMU_TB_CONTEXT_H + +#include "qemu/thread.h" +#include "qemu/qht.h" + +#define CODE_GEN_HTABLE_BITS 15 +#define CODE_GEN_HTABLE_SIZE (1 << CODE_GEN_HTABLE_BITS) + +typedef struct TBContext TBContext; + +struct TBContext { + + struct qht htable; + + /* statistics */ + unsigned tb_flush_count; + unsigned tb_phys_invalidate_count; +}; + +extern TBContext tb_ctx; + +#endif diff --git a/accel/tcg/tb-hash.h b/accel/tcg/tb-hash.h new file mode 100644 index 000000000..0a273d960 --- /dev/null +++ b/accel/tcg/tb-hash.h @@ -0,0 +1,69 @@ +/* + * internal execution defines for qemu + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef EXEC_TB_HASH_H +#define EXEC_TB_HASH_H + +#include "exec/cpu-defs.h" +#include "exec/exec-all.h" +#include "qemu/xxhash.h" + +#ifdef CONFIG_SOFTMMU + +/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for + addresses on the same page. The top bits are the same. This allows + TLB invalidation to quickly clear a subset of the hash table. */ +#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2) +#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS) +#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1) +#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE) + +static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) +{ + target_ulong tmp; + tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); + return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK; +} + +static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) +{ + target_ulong tmp; + tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); + return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK) + | (tmp & TB_JMP_ADDR_MASK)); +} + +#else + +/* In user-mode we can get better hashing because we do not have a TLB */ +static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) +{ + return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1); +} + +#endif /* CONFIG_SOFTMMU */ + +static inline +uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags, + uint32_t cf_mask, uint32_t trace_vcpu_dstate) +{ + return qemu_xxhash7(phys_pc, pc, flags, cf_mask, trace_vcpu_dstate); +} + +#endif diff --git a/accel/tcg/tcg-accel-ops-icount.c b/accel/tcg/tcg-accel-ops-icount.c new file mode 100644 index 000000000..ea42d1d51 --- /dev/null +++ b/accel/tcg/tcg-accel-ops-icount.c @@ -0,0 +1,143 @@ +/* + * QEMU TCG Single Threaded vCPUs implementation using instruction counting + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "sysemu/tcg.h" +#include "sysemu/replay.h" +#include "qemu/main-loop.h" +#include "qemu/guest-random.h" +#include "exec/exec-all.h" + +#include "tcg-accel-ops.h" +#include "tcg-accel-ops-icount.h" +#include "tcg-accel-ops-rr.h" + +static int64_t icount_get_limit(void) +{ + int64_t deadline; + + if (replay_mode != REPLAY_MODE_PLAY) { + /* + * Include all the timers, because they may need an attention. + * Too long CPU execution may create unnecessary delay in UI. + */ + deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL, + QEMU_TIMER_ATTR_ALL); + /* Check realtime timers, because they help with input processing */ + deadline = qemu_soonest_timeout(deadline, + qemu_clock_deadline_ns_all(QEMU_CLOCK_REALTIME, + QEMU_TIMER_ATTR_ALL)); + + /* + * Maintain prior (possibly buggy) behaviour where if no deadline + * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than + * INT32_MAX nanoseconds ahead, we still use INT32_MAX + * nanoseconds. + */ + if ((deadline < 0) || (deadline > INT32_MAX)) { + deadline = INT32_MAX; + } + + return icount_round(deadline); + } else { + return replay_get_instructions(); + } +} + +static void icount_notify_aio_contexts(void) +{ + /* Wake up other AioContexts. */ + qemu_clock_notify(QEMU_CLOCK_VIRTUAL); + qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL); +} + +void icount_handle_deadline(void) +{ + assert(qemu_in_vcpu_thread()); + int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL, + QEMU_TIMER_ATTR_ALL); + + /* + * Instructions, interrupts, and exceptions are processed in cpu-exec. + * Don't interrupt cpu thread, when these events are waiting + * (i.e., there is no checkpoint) + */ + if (deadline == 0 + && (replay_mode != REPLAY_MODE_PLAY || replay_has_checkpoint())) { + icount_notify_aio_contexts(); + } +} + +void icount_prepare_for_run(CPUState *cpu) +{ + int insns_left; + + /* + * These should always be cleared by icount_process_data after + * each vCPU execution. However u16.high can be raised + * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt + */ + g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0); + g_assert(cpu->icount_extra == 0); + + cpu->icount_budget = icount_get_limit(); + insns_left = MIN(0xffff, cpu->icount_budget); + cpu_neg(cpu)->icount_decr.u16.low = insns_left; + cpu->icount_extra = cpu->icount_budget - insns_left; + + replay_mutex_lock(); + + if (cpu->icount_budget == 0 && replay_has_checkpoint()) { + icount_notify_aio_contexts(); + } +} + +void icount_process_data(CPUState *cpu) +{ + /* Account for executed instructions */ + icount_update(cpu); + + /* Reset the counters */ + cpu_neg(cpu)->icount_decr.u16.low = 0; + cpu->icount_extra = 0; + cpu->icount_budget = 0; + + replay_account_executed_instructions(); + + replay_mutex_unlock(); +} + +void icount_handle_interrupt(CPUState *cpu, int mask) +{ + int old_mask = cpu->interrupt_request; + + tcg_handle_interrupt(cpu, mask); + if (qemu_cpu_is_self(cpu) && + !cpu->can_do_io + && (mask & ~old_mask) != 0) { + cpu_abort(cpu, "Raised interrupt while not in I/O function"); + } +} diff --git a/accel/tcg/tcg-accel-ops-icount.h b/accel/tcg/tcg-accel-ops-icount.h new file mode 100644 index 000000000..d884aa2aa --- /dev/null +++ b/accel/tcg/tcg-accel-ops-icount.h @@ -0,0 +1,19 @@ +/* + * QEMU TCG Single Threaded vCPUs implementation using instruction counting + * + * Copyright 2020 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef TCG_CPUS_ICOUNT_H +#define TCG_CPUS_ICOUNT_H + +void icount_handle_deadline(void); +void icount_prepare_for_run(CPUState *cpu); +void icount_process_data(CPUState *cpu); + +void icount_handle_interrupt(CPUState *cpu, int mask); + +#endif /* TCG_CPUS_ICOUNT_H */ diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c new file mode 100644 index 000000000..29632bd4c --- /dev/null +++ b/accel/tcg/tcg-accel-ops-mttcg.c @@ -0,0 +1,159 @@ +/* + * QEMU TCG Multi Threaded vCPUs implementation + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "sysemu/tcg.h" +#include "sysemu/replay.h" +#include "qemu/main-loop.h" +#include "qemu/notify.h" +#include "qemu/guest-random.h" +#include "exec/exec-all.h" +#include "hw/boards.h" + +#include "tcg-accel-ops.h" +#include "tcg-accel-ops-mttcg.h" + +typedef struct MttcgForceRcuNotifier { + Notifier notifier; + CPUState *cpu; +} MttcgForceRcuNotifier; + +static void do_nothing(CPUState *cpu, run_on_cpu_data d) +{ +} + +static void mttcg_force_rcu(Notifier *notify, void *data) +{ + CPUState *cpu = container_of(notify, MttcgForceRcuNotifier, notifier)->cpu; + + /* + * Called with rcu_registry_lock held, using async_run_on_cpu() ensures + * that there are no deadlocks. + */ + async_run_on_cpu(cpu, do_nothing, RUN_ON_CPU_NULL); +} + +/* + * In the multi-threaded case each vCPU has its own thread. The TLS + * variable current_cpu can be used deep in the code to find the + * current CPUState for a given thread. + */ + +static void *mttcg_cpu_thread_fn(void *arg) +{ + MttcgForceRcuNotifier force_rcu; + CPUState *cpu = arg; + + assert(tcg_enabled()); + g_assert(!icount_enabled()); + + rcu_register_thread(); + force_rcu.notifier.notify = mttcg_force_rcu; + force_rcu.cpu = cpu; + rcu_add_force_rcu_notifier(&force_rcu.notifier); + tcg_register_thread(); + + qemu_mutex_lock_iothread(); + qemu_thread_get_self(cpu->thread); + + cpu->thread_id = qemu_get_thread_id(); + cpu->can_do_io = 1; + current_cpu = cpu; + cpu_thread_signal_created(cpu); + qemu_guest_random_seed_thread_part2(cpu->random_seed); + + /* process any pending work */ + cpu->exit_request = 1; + + do { + if (cpu_can_run(cpu)) { + int r; + qemu_mutex_unlock_iothread(); + r = tcg_cpus_exec(cpu); + qemu_mutex_lock_iothread(); + switch (r) { + case EXCP_DEBUG: + cpu_handle_guest_debug(cpu); + break; + case EXCP_HALTED: + /* + * during start-up the vCPU is reset and the thread is + * kicked several times. If we don't ensure we go back + * to sleep in the halted state we won't cleanly + * start-up when the vCPU is enabled. + * + * cpu->halted should ensure we sleep in wait_io_event + */ + g_assert(cpu->halted); + break; + case EXCP_ATOMIC: + qemu_mutex_unlock_iothread(); + cpu_exec_step_atomic(cpu); + qemu_mutex_lock_iothread(); + default: + /* Ignore everything else? */ + break; + } + } + + qatomic_mb_set(&cpu->exit_request, 0); + qemu_wait_io_event(cpu); + } while (!cpu->unplug || cpu_can_run(cpu)); + + tcg_cpus_destroy(cpu); + qemu_mutex_unlock_iothread(); + rcu_remove_force_rcu_notifier(&force_rcu.notifier); + rcu_unregister_thread(); + return NULL; +} + +void mttcg_kick_vcpu_thread(CPUState *cpu) +{ + cpu_exit(cpu); +} + +void mttcg_start_vcpu_thread(CPUState *cpu) +{ + char thread_name[VCPU_THREAD_NAME_SIZE]; + + g_assert(tcg_enabled()); + tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1); + + cpu->thread = g_malloc0(sizeof(QemuThread)); + cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + qemu_cond_init(cpu->halt_cond); + + /* create a thread per vCPU with TCG (MTTCG) */ + snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG", + cpu->cpu_index); + + qemu_thread_create(cpu->thread, thread_name, mttcg_cpu_thread_fn, + cpu, QEMU_THREAD_JOINABLE); + +#ifdef _WIN32 + cpu->hThread = qemu_thread_get_handle(cpu->thread); +#endif +} diff --git a/accel/tcg/tcg-accel-ops-mttcg.h b/accel/tcg/tcg-accel-ops-mttcg.h new file mode 100644 index 000000000..9fdc5a2ab --- /dev/null +++ b/accel/tcg/tcg-accel-ops-mttcg.h @@ -0,0 +1,19 @@ +/* + * QEMU TCG Multi Threaded vCPUs implementation + * + * Copyright 2021 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef TCG_CPUS_MTTCG_H +#define TCG_CPUS_MTTCG_H + +/* kick MTTCG vCPU thread */ +void mttcg_kick_vcpu_thread(CPUState *cpu); + +/* start an mttcg vCPU thread */ +void mttcg_start_vcpu_thread(CPUState *cpu); + +#endif /* TCG_CPUS_MTTCG_H */ diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c new file mode 100644 index 000000000..bf59f53db --- /dev/null +++ b/accel/tcg/tcg-accel-ops-rr.c @@ -0,0 +1,305 @@ +/* + * QEMU TCG Single Threaded vCPUs implementation + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "sysemu/tcg.h" +#include "sysemu/replay.h" +#include "qemu/main-loop.h" +#include "qemu/notify.h" +#include "qemu/guest-random.h" +#include "exec/exec-all.h" + +#include "tcg-accel-ops.h" +#include "tcg-accel-ops-rr.h" +#include "tcg-accel-ops-icount.h" + +/* Kick all RR vCPUs */ +void rr_kick_vcpu_thread(CPUState *unused) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + cpu_exit(cpu); + }; +} + +/* + * TCG vCPU kick timer + * + * The kick timer is responsible for moving single threaded vCPU + * emulation on to the next vCPU. If more than one vCPU is running a + * timer event with force a cpu->exit so the next vCPU can get + * scheduled. + * + * The timer is removed if all vCPUs are idle and restarted again once + * idleness is complete. + */ + +static QEMUTimer *rr_kick_vcpu_timer; +static CPUState *rr_current_cpu; + +static inline int64_t rr_next_kick_time(void) +{ + return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD; +} + +/* Kick the currently round-robin scheduled vCPU to next */ +static void rr_kick_next_cpu(void) +{ + CPUState *cpu; + do { + cpu = qatomic_mb_read(&rr_current_cpu); + if (cpu) { + cpu_exit(cpu); + } + } while (cpu != qatomic_mb_read(&rr_current_cpu)); +} + +static void rr_kick_thread(void *opaque) +{ + timer_mod(rr_kick_vcpu_timer, rr_next_kick_time()); + rr_kick_next_cpu(); +} + +static void rr_start_kick_timer(void) +{ + if (!rr_kick_vcpu_timer && CPU_NEXT(first_cpu)) { + rr_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + rr_kick_thread, NULL); + } + if (rr_kick_vcpu_timer && !timer_pending(rr_kick_vcpu_timer)) { + timer_mod(rr_kick_vcpu_timer, rr_next_kick_time()); + } +} + +static void rr_stop_kick_timer(void) +{ + if (rr_kick_vcpu_timer && timer_pending(rr_kick_vcpu_timer)) { + timer_del(rr_kick_vcpu_timer); + } +} + +static void rr_wait_io_event(void) +{ + CPUState *cpu; + + while (all_cpu_threads_idle()) { + rr_stop_kick_timer(); + qemu_cond_wait_iothread(first_cpu->halt_cond); + } + + rr_start_kick_timer(); + + CPU_FOREACH(cpu) { + qemu_wait_io_event_common(cpu); + } +} + +/* + * Destroy any remaining vCPUs which have been unplugged and have + * finished running + */ +static void rr_deal_with_unplugged_cpus(void) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + if (cpu->unplug && !cpu_can_run(cpu)) { + tcg_cpus_destroy(cpu); + break; + } + } +} + +static void rr_force_rcu(Notifier *notify, void *data) +{ + rr_kick_next_cpu(); +} + +/* + * In the single-threaded case each vCPU is simulated in turn. If + * there is more than a single vCPU we create a simple timer to kick + * the vCPU and ensure we don't get stuck in a tight loop in one vCPU. + * This is done explicitly rather than relying on side-effects + * elsewhere. + */ + +static void *rr_cpu_thread_fn(void *arg) +{ + Notifier force_rcu; + CPUState *cpu = arg; + + assert(tcg_enabled()); + rcu_register_thread(); + force_rcu.notify = rr_force_rcu; + rcu_add_force_rcu_notifier(&force_rcu); + tcg_register_thread(); + + qemu_mutex_lock_iothread(); + qemu_thread_get_self(cpu->thread); + + cpu->thread_id = qemu_get_thread_id(); + cpu->can_do_io = 1; + cpu_thread_signal_created(cpu); + qemu_guest_random_seed_thread_part2(cpu->random_seed); + + /* wait for initial kick-off after machine start */ + while (first_cpu->stopped) { + qemu_cond_wait_iothread(first_cpu->halt_cond); + + /* process any pending work */ + CPU_FOREACH(cpu) { + current_cpu = cpu; + qemu_wait_io_event_common(cpu); + } + } + + rr_start_kick_timer(); + + cpu = first_cpu; + + /* process any pending work */ + cpu->exit_request = 1; + + while (1) { + qemu_mutex_unlock_iothread(); + replay_mutex_lock(); + qemu_mutex_lock_iothread(); + + if (icount_enabled()) { + /* Account partial waits to QEMU_CLOCK_VIRTUAL. */ + icount_account_warp_timer(); + /* + * Run the timers here. This is much more efficient than + * waking up the I/O thread and waiting for completion. + */ + icount_handle_deadline(); + } + + replay_mutex_unlock(); + + if (!cpu) { + cpu = first_cpu; + } + + while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) { + + qatomic_mb_set(&rr_current_cpu, cpu); + current_cpu = cpu; + + qemu_clock_enable(QEMU_CLOCK_VIRTUAL, + (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0); + + if (cpu_can_run(cpu)) { + int r; + + qemu_mutex_unlock_iothread(); + if (icount_enabled()) { + icount_prepare_for_run(cpu); + } + r = tcg_cpus_exec(cpu); + if (icount_enabled()) { + icount_process_data(cpu); + } + qemu_mutex_lock_iothread(); + + if (r == EXCP_DEBUG) { + cpu_handle_guest_debug(cpu); + break; + } else if (r == EXCP_ATOMIC) { + qemu_mutex_unlock_iothread(); + cpu_exec_step_atomic(cpu); + qemu_mutex_lock_iothread(); + break; + } + } else if (cpu->stop) { + if (cpu->unplug) { + cpu = CPU_NEXT(cpu); + } + break; + } + + cpu = CPU_NEXT(cpu); + } /* while (cpu && !cpu->exit_request).. */ + + /* Does not need qatomic_mb_set because a spurious wakeup is okay. */ + qatomic_set(&rr_current_cpu, NULL); + + if (cpu && cpu->exit_request) { + qatomic_mb_set(&cpu->exit_request, 0); + } + + if (icount_enabled() && all_cpu_threads_idle()) { + /* + * When all cpus are sleeping (e.g in WFI), to avoid a deadlock + * in the main_loop, wake it up in order to start the warp timer. + */ + qemu_notify_event(); + } + + rr_wait_io_event(); + rr_deal_with_unplugged_cpus(); + } + + rcu_remove_force_rcu_notifier(&force_rcu); + rcu_unregister_thread(); + return NULL; +} + +void rr_start_vcpu_thread(CPUState *cpu) +{ + char thread_name[VCPU_THREAD_NAME_SIZE]; + static QemuCond *single_tcg_halt_cond; + static QemuThread *single_tcg_cpu_thread; + + g_assert(tcg_enabled()); + tcg_cpu_init_cflags(cpu, false); + + if (!single_tcg_cpu_thread) { + cpu->thread = g_malloc0(sizeof(QemuThread)); + cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + qemu_cond_init(cpu->halt_cond); + + /* share a single thread for all cpus with TCG */ + snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG"); + qemu_thread_create(cpu->thread, thread_name, + rr_cpu_thread_fn, + cpu, QEMU_THREAD_JOINABLE); + + single_tcg_halt_cond = cpu->halt_cond; + single_tcg_cpu_thread = cpu->thread; +#ifdef _WIN32 + cpu->hThread = qemu_thread_get_handle(cpu->thread); +#endif + } else { + /* we share the thread */ + cpu->thread = single_tcg_cpu_thread; + cpu->halt_cond = single_tcg_halt_cond; + cpu->thread_id = first_cpu->thread_id; + cpu->can_do_io = 1; + cpu->created = true; + } +} diff --git a/accel/tcg/tcg-accel-ops-rr.h b/accel/tcg/tcg-accel-ops-rr.h new file mode 100644 index 000000000..54f6ae6e8 --- /dev/null +++ b/accel/tcg/tcg-accel-ops-rr.h @@ -0,0 +1,21 @@ +/* + * QEMU TCG Single Threaded vCPUs implementation + * + * Copyright 2020 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef TCG_CPUS_RR_H +#define TCG_CPUS_RR_H + +#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10) + +/* Kick all RR vCPUs. */ +void rr_kick_vcpu_thread(CPUState *unused); + +/* start the round robin vcpu thread */ +void rr_start_vcpu_thread(CPUState *cpu); + +#endif /* TCG_CPUS_RR_H */ diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c new file mode 100644 index 000000000..1a8e8390b --- /dev/null +++ b/accel/tcg/tcg-accel-ops.c @@ -0,0 +1,133 @@ +/* + * QEMU TCG vCPU common functionality + * + * Functionality common to all TCG vCPU variants: mttcg, rr and icount. + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "sysemu/tcg.h" +#include "sysemu/replay.h" +#include "qemu/main-loop.h" +#include "qemu/guest-random.h" +#include "exec/exec-all.h" + +#include "tcg-accel-ops.h" +#include "tcg-accel-ops-mttcg.h" +#include "tcg-accel-ops-rr.h" +#include "tcg-accel-ops-icount.h" + +/* common functionality among all TCG variants */ + +void tcg_cpu_init_cflags(CPUState *cpu, bool parallel) +{ + uint32_t cflags = cpu->cluster_index << CF_CLUSTER_SHIFT; + cflags |= parallel ? CF_PARALLEL : 0; + cflags |= icount_enabled() ? CF_USE_ICOUNT : 0; + cpu->tcg_cflags = cflags; +} + +void tcg_cpus_destroy(CPUState *cpu) +{ + cpu_thread_signal_destroyed(cpu); +} + +int tcg_cpus_exec(CPUState *cpu) +{ + int ret; +#ifdef CONFIG_PROFILER + int64_t ti; +#endif + assert(tcg_enabled()); +#ifdef CONFIG_PROFILER + ti = profile_getclock(); +#endif + cpu_exec_start(cpu); + ret = cpu_exec(cpu); + cpu_exec_end(cpu); +#ifdef CONFIG_PROFILER + qatomic_set(&tcg_ctx->prof.cpu_exec_time, + tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti); +#endif + return ret; +} + +/* mask must never be zero, except for A20 change call */ +void tcg_handle_interrupt(CPUState *cpu, int mask) +{ + g_assert(qemu_mutex_iothread_locked()); + + cpu->interrupt_request |= mask; + + /* + * If called from iothread context, wake the target cpu in + * case its halted. + */ + if (!qemu_cpu_is_self(cpu)) { + qemu_cpu_kick(cpu); + } else { + qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); + } +} + +static void tcg_accel_ops_init(AccelOpsClass *ops) +{ + if (qemu_tcg_mttcg_enabled()) { + ops->create_vcpu_thread = mttcg_start_vcpu_thread; + ops->kick_vcpu_thread = mttcg_kick_vcpu_thread; + ops->handle_interrupt = tcg_handle_interrupt; + } else if (icount_enabled()) { + ops->create_vcpu_thread = rr_start_vcpu_thread; + ops->kick_vcpu_thread = rr_kick_vcpu_thread; + ops->handle_interrupt = icount_handle_interrupt; + ops->get_virtual_clock = icount_get; + ops->get_elapsed_ticks = icount_get; + } else { + ops->create_vcpu_thread = rr_start_vcpu_thread; + ops->kick_vcpu_thread = rr_kick_vcpu_thread; + ops->handle_interrupt = tcg_handle_interrupt; + } +} + +static void tcg_accel_ops_class_init(ObjectClass *oc, void *data) +{ + AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); + + ops->ops_init = tcg_accel_ops_init; +} + +static const TypeInfo tcg_accel_ops_type = { + .name = ACCEL_OPS_NAME("tcg"), + + .parent = TYPE_ACCEL_OPS, + .class_init = tcg_accel_ops_class_init, + .abstract = true, +}; +module_obj(ACCEL_OPS_NAME("tcg")); + +static void tcg_accel_ops_register_types(void) +{ + type_register_static(&tcg_accel_ops_type); +} +type_init(tcg_accel_ops_register_types); diff --git a/accel/tcg/tcg-accel-ops.h b/accel/tcg/tcg-accel-ops.h new file mode 100644 index 000000000..6a5fcef88 --- /dev/null +++ b/accel/tcg/tcg-accel-ops.h @@ -0,0 +1,22 @@ +/* + * QEMU TCG vCPU common functionality + * + * Functionality common to all TCG vcpu variants: mttcg, rr and icount. + * + * Copyright 2020 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef TCG_CPUS_H +#define TCG_CPUS_H + +#include "sysemu/cpus.h" + +void tcg_cpus_destroy(CPUState *cpu); +int tcg_cpus_exec(CPUState *cpu); +void tcg_handle_interrupt(CPUState *cpu, int mask); +void tcg_cpu_init_cflags(CPUState *cpu, bool parallel); + +#endif /* TCG_CPUS_H */ diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c new file mode 100644 index 000000000..d6336a9c9 --- /dev/null +++ b/accel/tcg/tcg-all.c @@ -0,0 +1,248 @@ +/* + * QEMU System Emulator, accelerator interfaces + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "sysemu/tcg.h" +#include "sysemu/cpu-timers.h" +#include "tcg/tcg.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/accel.h" +#include "qapi/qapi-builtin-visit.h" +#include "qemu/units.h" +#if !defined(CONFIG_USER_ONLY) +#include "hw/boards.h" +#endif +#include "internal.h" + +struct TCGState { + AccelState parent_obj; + + bool mttcg_enabled; + int splitwx_enabled; + unsigned long tb_size; +}; +typedef struct TCGState TCGState; + +#define TYPE_TCG_ACCEL ACCEL_CLASS_NAME("tcg") + +DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE, + TYPE_TCG_ACCEL) + +/* + * We default to false if we know other options have been enabled + * which are currently incompatible with MTTCG. Otherwise when each + * guest (target) has been updated to support: + * - atomic instructions + * - memory ordering primitives (barriers) + * they can set the appropriate CONFIG flags in ${target}-softmmu.mak + * + * Once a guest architecture has been converted to the new primitives + * there are two remaining limitations to check. + * + * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host) + * - The host must have a stronger memory order than the guest + * + * It may be possible in future to support strong guests on weak hosts + * but that will require tagging all load/stores in a guest with their + * implicit memory order requirements which would likely slow things + * down a lot. + */ + +static bool check_tcg_memory_orders_compatible(void) +{ +#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO) + return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0; +#else + return false; +#endif +} + +static bool default_mttcg_enabled(void) +{ + if (icount_enabled() || TCG_OVERSIZED_GUEST) { + return false; + } else { +#ifdef TARGET_SUPPORTS_MTTCG + return check_tcg_memory_orders_compatible(); +#else + return false; +#endif + } +} + +static void tcg_accel_instance_init(Object *obj) +{ + TCGState *s = TCG_STATE(obj); + + s->mttcg_enabled = default_mttcg_enabled(); + + /* If debugging enabled, default "auto on", otherwise off. */ +#if defined(CONFIG_DEBUG_TCG) && !defined(CONFIG_USER_ONLY) + s->splitwx_enabled = -1; +#else + s->splitwx_enabled = 0; +#endif +} + +bool mttcg_enabled; + +static int tcg_init_machine(MachineState *ms) +{ + TCGState *s = TCG_STATE(current_accel()); +#ifdef CONFIG_USER_ONLY + unsigned max_cpus = 1; +#else + unsigned max_cpus = ms->smp.max_cpus; +#endif + + tcg_allowed = true; + mttcg_enabled = s->mttcg_enabled; + + page_init(); + tb_htable_init(); + tcg_init(s->tb_size * MiB, s->splitwx_enabled, max_cpus); + +#if defined(CONFIG_SOFTMMU) + /* + * There's no guest base to take into account, so go ahead and + * initialize the prologue now. + */ + tcg_prologue_init(tcg_ctx); +#endif + + return 0; +} + +static char *tcg_get_thread(Object *obj, Error **errp) +{ + TCGState *s = TCG_STATE(obj); + + return g_strdup(s->mttcg_enabled ? "multi" : "single"); +} + +static void tcg_set_thread(Object *obj, const char *value, Error **errp) +{ + TCGState *s = TCG_STATE(obj); + + if (strcmp(value, "multi") == 0) { + if (TCG_OVERSIZED_GUEST) { + error_setg(errp, "No MTTCG when guest word size > hosts"); + } else if (icount_enabled()) { + error_setg(errp, "No MTTCG when icount is enabled"); + } else { +#ifndef TARGET_SUPPORTS_MTTCG + warn_report("Guest not yet converted to MTTCG - " + "you may get unexpected results"); +#endif + if (!check_tcg_memory_orders_compatible()) { + warn_report("Guest expects a stronger memory ordering " + "than the host provides"); + error_printf("This may cause strange/hard to debug errors\n"); + } + s->mttcg_enabled = true; + } + } else if (strcmp(value, "single") == 0) { + s->mttcg_enabled = false; + } else { + error_setg(errp, "Invalid 'thread' setting %s", value); + } +} + +static void tcg_get_tb_size(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + TCGState *s = TCG_STATE(obj); + uint32_t value = s->tb_size; + + visit_type_uint32(v, name, &value, errp); +} + +static void tcg_set_tb_size(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + TCGState *s = TCG_STATE(obj); + uint32_t value; + + if (!visit_type_uint32(v, name, &value, errp)) { + return; + } + + s->tb_size = value; +} + +static bool tcg_get_splitwx(Object *obj, Error **errp) +{ + TCGState *s = TCG_STATE(obj); + return s->splitwx_enabled; +} + +static void tcg_set_splitwx(Object *obj, bool value, Error **errp) +{ + TCGState *s = TCG_STATE(obj); + s->splitwx_enabled = value; +} + +static void tcg_accel_class_init(ObjectClass *oc, void *data) +{ + AccelClass *ac = ACCEL_CLASS(oc); + ac->name = "tcg"; + ac->init_machine = tcg_init_machine; + ac->allowed = &tcg_allowed; + + object_class_property_add_str(oc, "thread", + tcg_get_thread, + tcg_set_thread); + + object_class_property_add(oc, "tb-size", "int", + tcg_get_tb_size, tcg_set_tb_size, + NULL, NULL); + object_class_property_set_description(oc, "tb-size", + "TCG translation block cache size"); + + object_class_property_add_bool(oc, "split-wx", + tcg_get_splitwx, tcg_set_splitwx); + object_class_property_set_description(oc, "split-wx", + "Map jit pages into separate RW and RX regions"); +} + +static const TypeInfo tcg_accel_type = { + .name = TYPE_TCG_ACCEL, + .parent = TYPE_ACCEL, + .instance_init = tcg_accel_instance_init, + .class_init = tcg_accel_class_init, + .instance_size = sizeof(TCGState), +}; +module_obj(TYPE_TCG_ACCEL); + +static void register_accel_types(void) +{ + type_register_static(&tcg_accel_type); +} + +type_init(register_accel_types); diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c new file mode 100644 index 000000000..ac7d28c25 --- /dev/null +++ b/accel/tcg/tcg-runtime-gvec.c @@ -0,0 +1,1534 @@ +/* + * Generic vectorized operation runtime + * + * Copyright (c) 2018 Linaro + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "tcg/tcg-gvec-desc.h" + + +static inline void clear_high(void *d, intptr_t oprsz, uint32_t desc) +{ + intptr_t maxsz = simd_maxsz(desc); + intptr_t i; + + if (unlikely(maxsz > oprsz)) { + for (i = oprsz; i < maxsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = 0; + } + } +} + +void HELPER(gvec_add8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) + *(uint8_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_add16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) + *(uint16_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_add32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) + *(uint32_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_add64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) + *(uint64_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_adds8)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) + (uint8_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_adds16)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) + (uint16_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_adds32)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) + (uint32_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_adds64)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) + b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sub8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) - *(uint8_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sub16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) - *(uint16_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sub32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) - *(uint32_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sub64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) - *(uint64_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_subs8)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) - (uint8_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_subs16)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) - (uint16_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_subs32)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) - (uint32_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_subs64)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) - b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_mul8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) * *(uint8_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_mul16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) * *(uint16_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_mul32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) * *(uint32_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_mul64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) * *(uint64_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_muls8)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) * (uint8_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_muls16)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) * (uint16_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_muls32)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) * (uint32_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_muls64)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) * b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_neg8)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = -*(uint8_t *)(a + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_neg16)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = -*(uint16_t *)(a + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_neg32)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = -*(uint32_t *)(a + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_neg64)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = -*(uint64_t *)(a + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_abs8)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int8_t)) { + int8_t aa = *(int8_t *)(a + i); + *(int8_t *)(d + i) = aa < 0 ? -aa : aa; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_abs16)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int16_t)) { + int16_t aa = *(int16_t *)(a + i); + *(int16_t *)(d + i) = aa < 0 ? -aa : aa; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_abs32)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int32_t)) { + int32_t aa = *(int32_t *)(a + i); + *(int32_t *)(d + i) = aa < 0 ? -aa : aa; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_abs64)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int64_t)) { + int64_t aa = *(int64_t *)(a + i); + *(int64_t *)(d + i) = aa < 0 ? -aa : aa; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_mov)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + + memcpy(d, a, oprsz); + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_dup64)(void *d, uint32_t desc, uint64_t c) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + if (c == 0) { + oprsz = 0; + } else { + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = c; + } + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_dup32)(void *d, uint32_t desc, uint32_t c) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + if (c == 0) { + oprsz = 0; + } else { + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = c; + } + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_dup16)(void *d, uint32_t desc, uint32_t c) +{ + HELPER(gvec_dup32)(d, desc, 0x00010001 * (c & 0xffff)); +} + +void HELPER(gvec_dup8)(void *d, uint32_t desc, uint32_t c) +{ + HELPER(gvec_dup32)(d, desc, 0x01010101 * (c & 0xff)); +} + +void HELPER(gvec_not)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = ~*(uint64_t *)(a + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_and)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) & *(uint64_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_or)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) | *(uint64_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_xor)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) ^ *(uint64_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_andc)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) &~ *(uint64_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_orc)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) |~ *(uint64_t *)(b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_nand)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = ~(*(uint64_t *)(a + i) & *(uint64_t *)(b + i)); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_nor)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = ~(*(uint64_t *)(a + i) | *(uint64_t *)(b + i)); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_eqv)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = ~(*(uint64_t *)(a + i) ^ *(uint64_t *)(b + i)); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ands)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) & b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_xors)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) ^ b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ors)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) | b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl8i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) << shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl16i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) << shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl32i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) << shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl64i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) << shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr8i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr16i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr32i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr64i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar8i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(int8_t *)(d + i) = *(int8_t *)(a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar16i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(int16_t *)(d + i) = *(int16_t *)(a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar32i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(int32_t *)(d + i) = *(int32_t *)(a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar64i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(int64_t *)(d + i) = *(int64_t *)(a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotl8i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = rol8(*(uint8_t *)(a + i), shift); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotl16i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = rol16(*(uint16_t *)(a + i), shift); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotl32i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = rol32(*(uint32_t *)(a + i), shift); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotl64i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = rol64(*(uint64_t *)(a + i), shift); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl8v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + uint8_t sh = *(uint8_t *)(b + i) & 7; + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) << sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl16v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + uint8_t sh = *(uint16_t *)(b + i) & 15; + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) << sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl32v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint8_t sh = *(uint32_t *)(b + i) & 31; + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) << sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl64v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint8_t sh = *(uint64_t *)(b + i) & 63; + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) << sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr8v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + uint8_t sh = *(uint8_t *)(b + i) & 7; + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr16v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + uint8_t sh = *(uint16_t *)(b + i) & 15; + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr32v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint8_t sh = *(uint32_t *)(b + i) & 31; + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr64v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint8_t sh = *(uint64_t *)(b + i) & 63; + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar8v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int8_t)) { + uint8_t sh = *(uint8_t *)(b + i) & 7; + *(int8_t *)(d + i) = *(int8_t *)(a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar16v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int16_t)) { + uint8_t sh = *(uint16_t *)(b + i) & 15; + *(int16_t *)(d + i) = *(int16_t *)(a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar32v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int32_t)) { + uint8_t sh = *(uint32_t *)(b + i) & 31; + *(int32_t *)(d + i) = *(int32_t *)(a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar64v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int64_t)) { + uint8_t sh = *(uint64_t *)(b + i) & 63; + *(int64_t *)(d + i) = *(int64_t *)(a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotl8v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + uint8_t sh = *(uint8_t *)(b + i) & 7; + *(uint8_t *)(d + i) = rol8(*(uint8_t *)(a + i), sh); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotl16v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + uint8_t sh = *(uint16_t *)(b + i) & 15; + *(uint16_t *)(d + i) = rol16(*(uint16_t *)(a + i), sh); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotl32v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint8_t sh = *(uint32_t *)(b + i) & 31; + *(uint32_t *)(d + i) = rol32(*(uint32_t *)(a + i), sh); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotl64v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint8_t sh = *(uint64_t *)(b + i) & 63; + *(uint64_t *)(d + i) = rol64(*(uint64_t *)(a + i), sh); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotr8v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + uint8_t sh = *(uint8_t *)(b + i) & 7; + *(uint8_t *)(d + i) = ror8(*(uint8_t *)(a + i), sh); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotr16v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + uint8_t sh = *(uint16_t *)(b + i) & 15; + *(uint16_t *)(d + i) = ror16(*(uint16_t *)(a + i), sh); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotr32v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint8_t sh = *(uint32_t *)(b + i) & 31; + *(uint32_t *)(d + i) = ror32(*(uint32_t *)(a + i), sh); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_rotr64v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint8_t sh = *(uint64_t *)(b + i) & 63; + *(uint64_t *)(d + i) = ror64(*(uint64_t *)(a + i), sh); + } + clear_high(d, oprsz, desc); +} + +#define DO_CMP1(NAME, TYPE, OP) \ +void HELPER(NAME)(void *d, void *a, void *b, uint32_t desc) \ +{ \ + intptr_t oprsz = simd_oprsz(desc); \ + intptr_t i; \ + for (i = 0; i < oprsz; i += sizeof(TYPE)) { \ + *(TYPE *)(d + i) = -(*(TYPE *)(a + i) OP *(TYPE *)(b + i)); \ + } \ + clear_high(d, oprsz, desc); \ +} + +#define DO_CMP2(SZ) \ + DO_CMP1(gvec_eq##SZ, uint##SZ##_t, ==) \ + DO_CMP1(gvec_ne##SZ, uint##SZ##_t, !=) \ + DO_CMP1(gvec_lt##SZ, int##SZ##_t, <) \ + DO_CMP1(gvec_le##SZ, int##SZ##_t, <=) \ + DO_CMP1(gvec_ltu##SZ, uint##SZ##_t, <) \ + DO_CMP1(gvec_leu##SZ, uint##SZ##_t, <=) + +DO_CMP2(8) +DO_CMP2(16) +DO_CMP2(32) +DO_CMP2(64) + +#undef DO_CMP1 +#undef DO_CMP2 + +void HELPER(gvec_ssadd8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int8_t)) { + int r = *(int8_t *)(a + i) + *(int8_t *)(b + i); + if (r > INT8_MAX) { + r = INT8_MAX; + } else if (r < INT8_MIN) { + r = INT8_MIN; + } + *(int8_t *)(d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ssadd16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int16_t)) { + int r = *(int16_t *)(a + i) + *(int16_t *)(b + i); + if (r > INT16_MAX) { + r = INT16_MAX; + } else if (r < INT16_MIN) { + r = INT16_MIN; + } + *(int16_t *)(d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ssadd32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int32_t)) { + int32_t ai = *(int32_t *)(a + i); + int32_t bi = *(int32_t *)(b + i); + int32_t di; + if (sadd32_overflow(ai, bi, &di)) { + di = (di < 0 ? INT32_MAX : INT32_MIN); + } + *(int32_t *)(d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ssadd64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int64_t)) { + int64_t ai = *(int64_t *)(a + i); + int64_t bi = *(int64_t *)(b + i); + int64_t di; + if (sadd64_overflow(ai, bi, &di)) { + di = (di < 0 ? INT64_MAX : INT64_MIN); + } + *(int64_t *)(d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sssub8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + int r = *(int8_t *)(a + i) - *(int8_t *)(b + i); + if (r > INT8_MAX) { + r = INT8_MAX; + } else if (r < INT8_MIN) { + r = INT8_MIN; + } + *(uint8_t *)(d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sssub16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int16_t)) { + int r = *(int16_t *)(a + i) - *(int16_t *)(b + i); + if (r > INT16_MAX) { + r = INT16_MAX; + } else if (r < INT16_MIN) { + r = INT16_MIN; + } + *(int16_t *)(d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sssub32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int32_t)) { + int32_t ai = *(int32_t *)(a + i); + int32_t bi = *(int32_t *)(b + i); + int32_t di; + if (ssub32_overflow(ai, bi, &di)) { + di = (di < 0 ? INT32_MAX : INT32_MIN); + } + *(int32_t *)(d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sssub64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int64_t)) { + int64_t ai = *(int64_t *)(a + i); + int64_t bi = *(int64_t *)(b + i); + int64_t di; + if (ssub64_overflow(ai, bi, &di)) { + di = (di < 0 ? INT64_MAX : INT64_MIN); + } + *(int64_t *)(d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_usadd8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + unsigned r = *(uint8_t *)(a + i) + *(uint8_t *)(b + i); + if (r > UINT8_MAX) { + r = UINT8_MAX; + } + *(uint8_t *)(d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_usadd16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + unsigned r = *(uint16_t *)(a + i) + *(uint16_t *)(b + i); + if (r > UINT16_MAX) { + r = UINT16_MAX; + } + *(uint16_t *)(d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_usadd32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint32_t ai = *(uint32_t *)(a + i); + uint32_t bi = *(uint32_t *)(b + i); + uint32_t di; + if (uadd32_overflow(ai, bi, &di)) { + di = UINT32_MAX; + } + *(uint32_t *)(d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_usadd64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint64_t ai = *(uint64_t *)(a + i); + uint64_t bi = *(uint64_t *)(b + i); + uint64_t di; + if (uadd64_overflow(ai, bi, &di)) { + di = UINT64_MAX; + } + *(uint64_t *)(d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ussub8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + int r = *(uint8_t *)(a + i) - *(uint8_t *)(b + i); + if (r < 0) { + r = 0; + } + *(uint8_t *)(d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ussub16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + int r = *(uint16_t *)(a + i) - *(uint16_t *)(b + i); + if (r < 0) { + r = 0; + } + *(uint16_t *)(d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ussub32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint32_t ai = *(uint32_t *)(a + i); + uint32_t bi = *(uint32_t *)(b + i); + uint32_t di; + if (usub32_overflow(ai, bi, &di)) { + di = 0; + } + *(uint32_t *)(d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ussub64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint64_t ai = *(uint64_t *)(a + i); + uint64_t bi = *(uint64_t *)(b + i); + uint64_t di; + if (usub64_overflow(ai, bi, &di)) { + di = 0; + } + *(uint64_t *)(d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smin8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int8_t)) { + int8_t aa = *(int8_t *)(a + i); + int8_t bb = *(int8_t *)(b + i); + int8_t dd = aa < bb ? aa : bb; + *(int8_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smin16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int16_t)) { + int16_t aa = *(int16_t *)(a + i); + int16_t bb = *(int16_t *)(b + i); + int16_t dd = aa < bb ? aa : bb; + *(int16_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smin32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int32_t)) { + int32_t aa = *(int32_t *)(a + i); + int32_t bb = *(int32_t *)(b + i); + int32_t dd = aa < bb ? aa : bb; + *(int32_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smin64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int64_t)) { + int64_t aa = *(int64_t *)(a + i); + int64_t bb = *(int64_t *)(b + i); + int64_t dd = aa < bb ? aa : bb; + *(int64_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smax8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int8_t)) { + int8_t aa = *(int8_t *)(a + i); + int8_t bb = *(int8_t *)(b + i); + int8_t dd = aa > bb ? aa : bb; + *(int8_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smax16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int16_t)) { + int16_t aa = *(int16_t *)(a + i); + int16_t bb = *(int16_t *)(b + i); + int16_t dd = aa > bb ? aa : bb; + *(int16_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smax32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int32_t)) { + int32_t aa = *(int32_t *)(a + i); + int32_t bb = *(int32_t *)(b + i); + int32_t dd = aa > bb ? aa : bb; + *(int32_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smax64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int64_t)) { + int64_t aa = *(int64_t *)(a + i); + int64_t bb = *(int64_t *)(b + i); + int64_t dd = aa > bb ? aa : bb; + *(int64_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umin8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + uint8_t aa = *(uint8_t *)(a + i); + uint8_t bb = *(uint8_t *)(b + i); + uint8_t dd = aa < bb ? aa : bb; + *(uint8_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umin16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + uint16_t aa = *(uint16_t *)(a + i); + uint16_t bb = *(uint16_t *)(b + i); + uint16_t dd = aa < bb ? aa : bb; + *(uint16_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umin32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint32_t aa = *(uint32_t *)(a + i); + uint32_t bb = *(uint32_t *)(b + i); + uint32_t dd = aa < bb ? aa : bb; + *(uint32_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umin64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint64_t aa = *(uint64_t *)(a + i); + uint64_t bb = *(uint64_t *)(b + i); + uint64_t dd = aa < bb ? aa : bb; + *(uint64_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umax8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + uint8_t aa = *(uint8_t *)(a + i); + uint8_t bb = *(uint8_t *)(b + i); + uint8_t dd = aa > bb ? aa : bb; + *(uint8_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umax16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + uint16_t aa = *(uint16_t *)(a + i); + uint16_t bb = *(uint16_t *)(b + i); + uint16_t dd = aa > bb ? aa : bb; + *(uint16_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umax32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint32_t aa = *(uint32_t *)(a + i); + uint32_t bb = *(uint32_t *)(b + i); + uint32_t dd = aa > bb ? aa : bb; + *(uint32_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umax64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint64_t aa = *(uint64_t *)(a + i); + uint64_t bb = *(uint64_t *)(b + i); + uint64_t dd = aa > bb ? aa : bb; + *(uint64_t *)(d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_bitsel)(void *d, void *a, void *b, void *c, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint64_t aa = *(uint64_t *)(a + i); + uint64_t bb = *(uint64_t *)(b + i); + uint64_t cc = *(uint64_t *)(c + i); + *(uint64_t *)(d + i) = (bb & aa) | (cc & ~aa); + } + clear_high(d, oprsz, desc); +} diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c new file mode 100644 index 000000000..e4e030043 --- /dev/null +++ b/accel/tcg/tcg-runtime.c @@ -0,0 +1,150 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" +#include "exec/exec-all.h" +#include "disas/disas.h" +#include "exec/log.h" +#include "tcg/tcg.h" + +/* 32-bit helpers */ + +int32_t HELPER(div_i32)(int32_t arg1, int32_t arg2) +{ + return arg1 / arg2; +} + +int32_t HELPER(rem_i32)(int32_t arg1, int32_t arg2) +{ + return arg1 % arg2; +} + +uint32_t HELPER(divu_i32)(uint32_t arg1, uint32_t arg2) +{ + return arg1 / arg2; +} + +uint32_t HELPER(remu_i32)(uint32_t arg1, uint32_t arg2) +{ + return arg1 % arg2; +} + +/* 64-bit helpers */ + +uint64_t HELPER(shl_i64)(uint64_t arg1, uint64_t arg2) +{ + return arg1 << arg2; +} + +uint64_t HELPER(shr_i64)(uint64_t arg1, uint64_t arg2) +{ + return arg1 >> arg2; +} + +int64_t HELPER(sar_i64)(int64_t arg1, int64_t arg2) +{ + return arg1 >> arg2; +} + +int64_t HELPER(div_i64)(int64_t arg1, int64_t arg2) +{ + return arg1 / arg2; +} + +int64_t HELPER(rem_i64)(int64_t arg1, int64_t arg2) +{ + return arg1 % arg2; +} + +uint64_t HELPER(divu_i64)(uint64_t arg1, uint64_t arg2) +{ + return arg1 / arg2; +} + +uint64_t HELPER(remu_i64)(uint64_t arg1, uint64_t arg2) +{ + return arg1 % arg2; +} + +uint64_t HELPER(muluh_i64)(uint64_t arg1, uint64_t arg2) +{ + uint64_t l, h; + mulu64(&l, &h, arg1, arg2); + return h; +} + +int64_t HELPER(mulsh_i64)(int64_t arg1, int64_t arg2) +{ + uint64_t l, h; + muls64(&l, &h, arg1, arg2); + return h; +} + +uint32_t HELPER(clz_i32)(uint32_t arg, uint32_t zero_val) +{ + return arg ? clz32(arg) : zero_val; +} + +uint32_t HELPER(ctz_i32)(uint32_t arg, uint32_t zero_val) +{ + return arg ? ctz32(arg) : zero_val; +} + +uint64_t HELPER(clz_i64)(uint64_t arg, uint64_t zero_val) +{ + return arg ? clz64(arg) : zero_val; +} + +uint64_t HELPER(ctz_i64)(uint64_t arg, uint64_t zero_val) +{ + return arg ? ctz64(arg) : zero_val; +} + +uint32_t HELPER(clrsb_i32)(uint32_t arg) +{ + return clrsb32(arg); +} + +uint64_t HELPER(clrsb_i64)(uint64_t arg) +{ + return clrsb64(arg); +} + +uint32_t HELPER(ctpop_i32)(uint32_t arg) +{ + return ctpop32(arg); +} + +uint64_t HELPER(ctpop_i64)(uint64_t arg) +{ + return ctpop64(arg); +} + +void HELPER(exit_atomic)(CPUArchState *env) +{ + cpu_loop_exit_atomic(env_cpu(env), GETPC()); +} diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h new file mode 100644 index 000000000..37cbd722b --- /dev/null +++ b/accel/tcg/tcg-runtime.h @@ -0,0 +1,287 @@ +DEF_HELPER_FLAGS_2(div_i32, TCG_CALL_NO_RWG_SE, s32, s32, s32) +DEF_HELPER_FLAGS_2(rem_i32, TCG_CALL_NO_RWG_SE, s32, s32, s32) +DEF_HELPER_FLAGS_2(divu_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(remu_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) + +DEF_HELPER_FLAGS_2(div_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) +DEF_HELPER_FLAGS_2(rem_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) +DEF_HELPER_FLAGS_2(divu_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(remu_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(shl_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(shr_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(sar_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) + +DEF_HELPER_FLAGS_2(mulsh_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) +DEF_HELPER_FLAGS_2(muluh_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(clz_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(ctz_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(clz_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(ctz_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_1(clrsb_i32, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(clrsb_i64, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(ctpop_i32, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(ctpop_i64, TCG_CALL_NO_RWG_SE, i64, i64) + +DEF_HELPER_FLAGS_1(lookup_tb_ptr, TCG_CALL_NO_WG_SE, cptr, env) + +DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env) + +#ifndef IN_HELPER_PROTO +/* + * Pass calls to memset directly to libc, without a thunk in qemu. + * Do not re-declare memset, especially since we fudge the type here; + * we assume sizeof(void *) == sizeof(size_t), which is true for + * all supported hosts. + */ +#define helper_memset memset +DEF_HELPER_FLAGS_3(memset, TCG_CALL_NO_RWG, ptr, ptr, int, ptr) +#endif /* IN_HELPER_PROTO */ + +DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +#ifdef CONFIG_ATOMIC64 +DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG, + i64, env, tl, i64, i64, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG, + i64, env, tl, i64, i64, i32) +#endif + +#ifdef CONFIG_ATOMIC64 +#define GEN_ATOMIC_HELPERS(NAME) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_le), \ + TCG_CALL_NO_WG, i64, env, tl, i64, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be), \ + TCG_CALL_NO_WG, i64, env, tl, i64, i32) +#else +#define GEN_ATOMIC_HELPERS(NAME) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) +#endif /* CONFIG_ATOMIC64 */ + +GEN_ATOMIC_HELPERS(fetch_add) +GEN_ATOMIC_HELPERS(fetch_and) +GEN_ATOMIC_HELPERS(fetch_or) +GEN_ATOMIC_HELPERS(fetch_xor) +GEN_ATOMIC_HELPERS(fetch_smin) +GEN_ATOMIC_HELPERS(fetch_umin) +GEN_ATOMIC_HELPERS(fetch_smax) +GEN_ATOMIC_HELPERS(fetch_umax) + +GEN_ATOMIC_HELPERS(add_fetch) +GEN_ATOMIC_HELPERS(and_fetch) +GEN_ATOMIC_HELPERS(or_fetch) +GEN_ATOMIC_HELPERS(xor_fetch) +GEN_ATOMIC_HELPERS(smin_fetch) +GEN_ATOMIC_HELPERS(umin_fetch) +GEN_ATOMIC_HELPERS(smax_fetch) +GEN_ATOMIC_HELPERS(umax_fetch) + +GEN_ATOMIC_HELPERS(xchg) + +#undef GEN_ATOMIC_HELPERS + +DEF_HELPER_FLAGS_3(gvec_mov, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_dup8, TCG_CALL_NO_RWG, void, ptr, i32, i32) +DEF_HELPER_FLAGS_3(gvec_dup16, TCG_CALL_NO_RWG, void, ptr, i32, i32) +DEF_HELPER_FLAGS_3(gvec_dup32, TCG_CALL_NO_RWG, void, ptr, i32, i32) +DEF_HELPER_FLAGS_3(gvec_dup64, TCG_CALL_NO_RWG, void, ptr, i32, i64) + +DEF_HELPER_FLAGS_4(gvec_add8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_add16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_add32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_add64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_adds8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_adds16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_adds32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_adds64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(gvec_sub8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sub16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sub32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sub64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_subs8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_subs16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_subs32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_subs64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(gvec_mul8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_mul16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_mul32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_mul64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_muls8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_muls16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_muls32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_muls64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(gvec_ssadd8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ssadd16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ssadd32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ssadd64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_sssub8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sssub16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sssub32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sssub64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_usadd8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_usadd16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_usadd32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_usadd64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_ussub8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ussub16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ussub32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ussub64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_smin8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smin16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smin32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smin64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_smax8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smax16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smax32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smax64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_umin8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umin16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umin32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umin64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_umax8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umax16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umax32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umax64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_neg8, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_neg16, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_neg32, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_neg64, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_abs8, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_abs16, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_abs32, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_abs64, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_not, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_and, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_or, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_xor, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_andc, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_orc, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_nand, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_nor, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_eqv, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_ands, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_xors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_ors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_3(gvec_shl8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_shl16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_shl32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_shl64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_shr8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_shr16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_shr32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_shr64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_sar8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_sar16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_sar32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_sar64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_rotl8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_rotl16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_rotl32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_rotl64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_shl8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_shl16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_shl32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_shl64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_shr8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_shr16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_shr32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_shr64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_sar8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sar16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sar32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sar64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_rotl8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_rotl16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_rotl32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_rotl64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_rotr8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_rotr16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_rotr32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_rotr64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_eq8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_eq16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_eq32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_eq64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_ne8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ne16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ne32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ne64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_lt8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_lt16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_lt32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_lt64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_le8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_le16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_le32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_le64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_ltu8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ltu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ltu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ltu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_leu8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_leu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_bitsel, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) diff --git a/accel/tcg/trace-events b/accel/tcg/trace-events new file mode 100644 index 000000000..59eab96f2 --- /dev/null +++ b/accel/tcg/trace-events @@ -0,0 +1,10 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# TCG related tracing +# cpu-exec.c +exec_tb(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR +exec_tb_nocache(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR +exec_tb_exit(void *last_tb, unsigned int flags) "tb:%p flags=0x%x" + +# translate-all.c +translate_block(void *tb, uintptr_t pc, const void *tb_code) "tb:%p, pc:0x%"PRIxPTR", tb_code:%p" diff --git a/accel/tcg/trace.h b/accel/tcg/trace.h new file mode 100644 index 000000000..db61fad3c --- /dev/null +++ b/accel/tcg/trace.h @@ -0,0 +1 @@ +#include "trace/trace-accel_tcg.h" diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c new file mode 100644 index 000000000..bd71db59a --- /dev/null +++ b/accel/tcg/translate-all.c @@ -0,0 +1,2490 @@ +/* + * Host code generation + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" + +#define NO_CPU_IO_DEFS +#include "trace.h" +#include "disas/disas.h" +#include "exec/exec-all.h" +#include "tcg/tcg.h" +#if defined(CONFIG_USER_ONLY) +#include "qemu.h" +#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) +#include <sys/param.h> +#if __FreeBSD_version >= 700104 +#define HAVE_KINFO_GETVMMAP +#define sigqueue sigqueue_freebsd /* avoid redefinition */ +#include <sys/proc.h> +#include <machine/profile.h> +#define _KERNEL +#include <sys/user.h> +#undef _KERNEL +#undef sigqueue +#include <libutil.h> +#endif +#endif +#else +#include "exec/ram_addr.h" +#endif + +#include "exec/cputlb.h" +#include "exec/translate-all.h" +#include "qemu/bitmap.h" +#include "qemu/qemu-print.h" +#include "qemu/timer.h" +#include "qemu/main-loop.h" +#include "exec/log.h" +#include "sysemu/cpus.h" +#include "sysemu/cpu-timers.h" +#include "sysemu/tcg.h" +#include "qapi/error.h" +#include "hw/core/tcg-cpu-ops.h" +#include "tb-hash.h" +#include "tb-context.h" +#include "internal.h" + +/* #define DEBUG_TB_INVALIDATE */ +/* #define DEBUG_TB_FLUSH */ +/* make various TB consistency checks */ +/* #define DEBUG_TB_CHECK */ + +#ifdef DEBUG_TB_INVALIDATE +#define DEBUG_TB_INVALIDATE_GATE 1 +#else +#define DEBUG_TB_INVALIDATE_GATE 0 +#endif + +#ifdef DEBUG_TB_FLUSH +#define DEBUG_TB_FLUSH_GATE 1 +#else +#define DEBUG_TB_FLUSH_GATE 0 +#endif + +#if !defined(CONFIG_USER_ONLY) +/* TB consistency checks only implemented for usermode emulation. */ +#undef DEBUG_TB_CHECK +#endif + +#ifdef DEBUG_TB_CHECK +#define DEBUG_TB_CHECK_GATE 1 +#else +#define DEBUG_TB_CHECK_GATE 0 +#endif + +/* Access to the various translations structures need to be serialised via locks + * for consistency. + * In user-mode emulation access to the memory related structures are protected + * with mmap_lock. + * In !user-mode we use per-page locks. + */ +#ifdef CONFIG_SOFTMMU +#define assert_memory_lock() +#else +#define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) +#endif + +#define SMC_BITMAP_USE_THRESHOLD 10 + +typedef struct PageDesc { + /* list of TBs intersecting this ram page */ + uintptr_t first_tb; +#ifdef CONFIG_SOFTMMU + /* in order to optimize self modifying code, we count the number + of lookups we do to a given page to use a bitmap */ + unsigned long *code_bitmap; + unsigned int code_write_count; +#else + unsigned long flags; + void *target_data; +#endif +#ifndef CONFIG_USER_ONLY + QemuSpin lock; +#endif +} PageDesc; + +/** + * struct page_entry - page descriptor entry + * @pd: pointer to the &struct PageDesc of the page this entry represents + * @index: page index of the page + * @locked: whether the page is locked + * + * This struct helps us keep track of the locked state of a page, without + * bloating &struct PageDesc. + * + * A page lock protects accesses to all fields of &struct PageDesc. + * + * See also: &struct page_collection. + */ +struct page_entry { + PageDesc *pd; + tb_page_addr_t index; + bool locked; +}; + +/** + * struct page_collection - tracks a set of pages (i.e. &struct page_entry's) + * @tree: Binary search tree (BST) of the pages, with key == page index + * @max: Pointer to the page in @tree with the highest page index + * + * To avoid deadlock we lock pages in ascending order of page index. + * When operating on a set of pages, we need to keep track of them so that + * we can lock them in order and also unlock them later. For this we collect + * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the + * @tree implementation we use does not provide an O(1) operation to obtain the + * highest-ranked element, we use @max to keep track of the inserted page + * with the highest index. This is valuable because if a page is not in + * the tree and its index is higher than @max's, then we can lock it + * without breaking the locking order rule. + * + * Note on naming: 'struct page_set' would be shorter, but we already have a few + * page_set_*() helpers, so page_collection is used instead to avoid confusion. + * + * See also: page_collection_lock(). + */ +struct page_collection { + GTree *tree; + struct page_entry *max; +}; + +/* list iterators for lists of tagged pointers in TranslationBlock */ +#define TB_FOR_EACH_TAGGED(head, tb, n, field) \ + for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \ + tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \ + tb = (TranslationBlock *)((uintptr_t)tb & ~1)) + +#define PAGE_FOR_EACH_TB(pagedesc, tb, n) \ + TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next) + +#define TB_FOR_EACH_JMP(head_tb, tb, n) \ + TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next) + +/* + * In system mode we want L1_MAP to be based on ram offsets, + * while in user mode we want it to be based on virtual addresses. + * + * TODO: For user mode, see the caveat re host vs guest virtual + * address spaces near GUEST_ADDR_MAX. + */ +#if !defined(CONFIG_USER_ONLY) +#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS +# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS +#else +# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS +#endif +#else +# define L1_MAP_ADDR_SPACE_BITS MIN(HOST_LONG_BITS, TARGET_ABI_BITS) +#endif + +/* Size of the L2 (and L3, etc) page tables. */ +#define V_L2_BITS 10 +#define V_L2_SIZE (1 << V_L2_BITS) + +/* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */ +QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS > + sizeof_field(TranslationBlock, trace_vcpu_dstate) + * BITS_PER_BYTE); + +/* + * L1 Mapping properties + */ +static int v_l1_size; +static int v_l1_shift; +static int v_l2_levels; + +/* The bottom level has pointers to PageDesc, and is indexed by + * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size. + */ +#define V_L1_MIN_BITS 4 +#define V_L1_MAX_BITS (V_L2_BITS + 3) +#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS) + +static void *l1_map[V_L1_MAX_SIZE]; + +TBContext tb_ctx; + +static void page_table_config_init(void) +{ + uint32_t v_l1_bits; + + assert(TARGET_PAGE_BITS); + /* The bits remaining after N lower levels of page tables. */ + v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS; + if (v_l1_bits < V_L1_MIN_BITS) { + v_l1_bits += V_L2_BITS; + } + + v_l1_size = 1 << v_l1_bits; + v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits; + v_l2_levels = v_l1_shift / V_L2_BITS - 1; + + assert(v_l1_bits <= V_L1_MAX_BITS); + assert(v_l1_shift % V_L2_BITS == 0); + assert(v_l2_levels >= 0); +} + +/* Encode VAL as a signed leb128 sequence at P. + Return P incremented past the encoded value. */ +static uint8_t *encode_sleb128(uint8_t *p, target_long val) +{ + int more, byte; + + do { + byte = val & 0x7f; + val >>= 7; + more = !((val == 0 && (byte & 0x40) == 0) + || (val == -1 && (byte & 0x40) != 0)); + if (more) { + byte |= 0x80; + } + *p++ = byte; + } while (more); + + return p; +} + +/* Decode a signed leb128 sequence at *PP; increment *PP past the + decoded value. Return the decoded value. */ +static target_long decode_sleb128(const uint8_t **pp) +{ + const uint8_t *p = *pp; + target_long val = 0; + int byte, shift = 0; + + do { + byte = *p++; + val |= (target_ulong)(byte & 0x7f) << shift; + shift += 7; + } while (byte & 0x80); + if (shift < TARGET_LONG_BITS && (byte & 0x40)) { + val |= -(target_ulong)1 << shift; + } + + *pp = p; + return val; +} + +/* Encode the data collected about the instructions while compiling TB. + Place the data at BLOCK, and return the number of bytes consumed. + + The logical table consists of TARGET_INSN_START_WORDS target_ulong's, + which come from the target's insn_start data, followed by a uintptr_t + which comes from the host pc of the end of the code implementing the insn. + + Each line of the table is encoded as sleb128 deltas from the previous + line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }. + That is, the first column is seeded with the guest pc, the last column + with the host pc, and the middle columns with zeros. */ + +static int encode_search(TranslationBlock *tb, uint8_t *block) +{ + uint8_t *highwater = tcg_ctx->code_gen_highwater; + uint8_t *p = block; + int i, j, n; + + for (i = 0, n = tb->icount; i < n; ++i) { + target_ulong prev; + + for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { + if (i == 0) { + prev = (j == 0 ? tb->pc : 0); + } else { + prev = tcg_ctx->gen_insn_data[i - 1][j]; + } + p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev); + } + prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]); + p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev); + + /* Test for (pending) buffer overflow. The assumption is that any + one row beginning below the high water mark cannot overrun + the buffer completely. Thus we can test for overflow after + encoding a row without having to check during encoding. */ + if (unlikely(p > highwater)) { + return -1; + } + } + + return p - block; +} + +/* The cpu state corresponding to 'searched_pc' is restored. + * When reset_icount is true, current TB will be interrupted and + * icount should be recalculated. + */ +static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, + uintptr_t searched_pc, bool reset_icount) +{ + target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; + uintptr_t host_pc = (uintptr_t)tb->tc.ptr; + CPUArchState *env = cpu->env_ptr; + const uint8_t *p = tb->tc.ptr + tb->tc.size; + int i, j, num_insns = tb->icount; +#ifdef CONFIG_PROFILER + TCGProfile *prof = &tcg_ctx->prof; + int64_t ti = profile_getclock(); +#endif + + searched_pc -= GETPC_ADJ; + + if (searched_pc < host_pc) { + return -1; + } + + /* Reconstruct the stored insn data while looking for the point at + which the end of the insn exceeds the searched_pc. */ + for (i = 0; i < num_insns; ++i) { + for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { + data[j] += decode_sleb128(&p); + } + host_pc += decode_sleb128(&p); + if (host_pc > searched_pc) { + goto found; + } + } + return -1; + + found: + if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) { + assert(icount_enabled()); + /* Reset the cycle counter to the start of the block + and shift if to the number of actually executed instructions */ + cpu_neg(cpu)->icount_decr.u16.low += num_insns - i; + } + restore_state_to_opc(env, tb, data); + +#ifdef CONFIG_PROFILER + qatomic_set(&prof->restore_time, + prof->restore_time + profile_getclock() - ti); + qatomic_set(&prof->restore_count, prof->restore_count + 1); +#endif + return 0; +} + +bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit) +{ + /* + * The host_pc has to be in the rx region of the code buffer. + * If it is not we will not be able to resolve it here. + * The two cases where host_pc will not be correct are: + * + * - fault during translation (instruction fetch) + * - fault from helper (not using GETPC() macro) + * + * Either way we need return early as we can't resolve it here. + */ + if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) { + TranslationBlock *tb = tcg_tb_lookup(host_pc); + if (tb) { + cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit); + return true; + } + } + return false; +} + +void page_init(void) +{ + page_size_init(); + page_table_config_init(); + +#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) + { +#ifdef HAVE_KINFO_GETVMMAP + struct kinfo_vmentry *freep; + int i, cnt; + + freep = kinfo_getvmmap(getpid(), &cnt); + if (freep) { + mmap_lock(); + for (i = 0; i < cnt; i++) { + unsigned long startaddr, endaddr; + + startaddr = freep[i].kve_start; + endaddr = freep[i].kve_end; + if (h2g_valid(startaddr)) { + startaddr = h2g(startaddr) & TARGET_PAGE_MASK; + + if (h2g_valid(endaddr)) { + endaddr = h2g(endaddr); + page_set_flags(startaddr, endaddr, PAGE_RESERVED); + } else { +#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS + endaddr = ~0ul; + page_set_flags(startaddr, endaddr, PAGE_RESERVED); +#endif + } + } + } + free(freep); + mmap_unlock(); + } +#else + FILE *f; + + last_brk = (unsigned long)sbrk(0); + + f = fopen("/compat/linux/proc/self/maps", "r"); + if (f) { + mmap_lock(); + + do { + unsigned long startaddr, endaddr; + int n; + + n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); + + if (n == 2 && h2g_valid(startaddr)) { + startaddr = h2g(startaddr) & TARGET_PAGE_MASK; + + if (h2g_valid(endaddr)) { + endaddr = h2g(endaddr); + } else { + endaddr = ~0ul; + } + page_set_flags(startaddr, endaddr, PAGE_RESERVED); + } + } while (!feof(f)); + + fclose(f); + mmap_unlock(); + } +#endif + } +#endif +} + +static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) +{ + PageDesc *pd; + void **lp; + int i; + + /* Level 1. Always allocated. */ + lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1)); + + /* Level 2..N-1. */ + for (i = v_l2_levels; i > 0; i--) { + void **p = qatomic_rcu_read(lp); + + if (p == NULL) { + void *existing; + + if (!alloc) { + return NULL; + } + p = g_new0(void *, V_L2_SIZE); + existing = qatomic_cmpxchg(lp, NULL, p); + if (unlikely(existing)) { + g_free(p); + p = existing; + } + } + + lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); + } + + pd = qatomic_rcu_read(lp); + if (pd == NULL) { + void *existing; + + if (!alloc) { + return NULL; + } + pd = g_new0(PageDesc, V_L2_SIZE); +#ifndef CONFIG_USER_ONLY + { + int i; + + for (i = 0; i < V_L2_SIZE; i++) { + qemu_spin_init(&pd[i].lock); + } + } +#endif + existing = qatomic_cmpxchg(lp, NULL, pd); + if (unlikely(existing)) { +#ifndef CONFIG_USER_ONLY + { + int i; + + for (i = 0; i < V_L2_SIZE; i++) { + qemu_spin_destroy(&pd[i].lock); + } + } +#endif + g_free(pd); + pd = existing; + } + } + + return pd + (index & (V_L2_SIZE - 1)); +} + +static inline PageDesc *page_find(tb_page_addr_t index) +{ + return page_find_alloc(index, 0); +} + +static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, + PageDesc **ret_p2, tb_page_addr_t phys2, int alloc); + +/* In user-mode page locks aren't used; mmap_lock is enough */ +#ifdef CONFIG_USER_ONLY + +#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock()) + +static inline void page_lock(PageDesc *pd) +{ } + +static inline void page_unlock(PageDesc *pd) +{ } + +static inline void page_lock_tb(const TranslationBlock *tb) +{ } + +static inline void page_unlock_tb(const TranslationBlock *tb) +{ } + +struct page_collection * +page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) +{ + return NULL; +} + +void page_collection_unlock(struct page_collection *set) +{ } +#else /* !CONFIG_USER_ONLY */ + +#ifdef CONFIG_DEBUG_TCG + +static __thread GHashTable *ht_pages_locked_debug; + +static void ht_pages_locked_debug_init(void) +{ + if (ht_pages_locked_debug) { + return; + } + ht_pages_locked_debug = g_hash_table_new(NULL, NULL); +} + +static bool page_is_locked(const PageDesc *pd) +{ + PageDesc *found; + + ht_pages_locked_debug_init(); + found = g_hash_table_lookup(ht_pages_locked_debug, pd); + return !!found; +} + +static void page_lock__debug(PageDesc *pd) +{ + ht_pages_locked_debug_init(); + g_assert(!page_is_locked(pd)); + g_hash_table_insert(ht_pages_locked_debug, pd, pd); +} + +static void page_unlock__debug(const PageDesc *pd) +{ + bool removed; + + ht_pages_locked_debug_init(); + g_assert(page_is_locked(pd)); + removed = g_hash_table_remove(ht_pages_locked_debug, pd); + g_assert(removed); +} + +static void +do_assert_page_locked(const PageDesc *pd, const char *file, int line) +{ + if (unlikely(!page_is_locked(pd))) { + error_report("assert_page_lock: PageDesc %p not locked @ %s:%d", + pd, file, line); + abort(); + } +} + +#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__) + +void assert_no_pages_locked(void) +{ + ht_pages_locked_debug_init(); + g_assert(g_hash_table_size(ht_pages_locked_debug) == 0); +} + +#else /* !CONFIG_DEBUG_TCG */ + +#define assert_page_locked(pd) + +static inline void page_lock__debug(const PageDesc *pd) +{ +} + +static inline void page_unlock__debug(const PageDesc *pd) +{ +} + +#endif /* CONFIG_DEBUG_TCG */ + +static inline void page_lock(PageDesc *pd) +{ + page_lock__debug(pd); + qemu_spin_lock(&pd->lock); +} + +static inline void page_unlock(PageDesc *pd) +{ + qemu_spin_unlock(&pd->lock); + page_unlock__debug(pd); +} + +/* lock the page(s) of a TB in the correct acquisition order */ +static inline void page_lock_tb(const TranslationBlock *tb) +{ + page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0); +} + +static inline void page_unlock_tb(const TranslationBlock *tb) +{ + PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); + + page_unlock(p1); + if (unlikely(tb->page_addr[1] != -1)) { + PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); + + if (p2 != p1) { + page_unlock(p2); + } + } +} + +static inline struct page_entry * +page_entry_new(PageDesc *pd, tb_page_addr_t index) +{ + struct page_entry *pe = g_malloc(sizeof(*pe)); + + pe->index = index; + pe->pd = pd; + pe->locked = false; + return pe; +} + +static void page_entry_destroy(gpointer p) +{ + struct page_entry *pe = p; + + g_assert(pe->locked); + page_unlock(pe->pd); + g_free(pe); +} + +/* returns false on success */ +static bool page_entry_trylock(struct page_entry *pe) +{ + bool busy; + + busy = qemu_spin_trylock(&pe->pd->lock); + if (!busy) { + g_assert(!pe->locked); + pe->locked = true; + page_lock__debug(pe->pd); + } + return busy; +} + +static void do_page_entry_lock(struct page_entry *pe) +{ + page_lock(pe->pd); + g_assert(!pe->locked); + pe->locked = true; +} + +static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data) +{ + struct page_entry *pe = value; + + do_page_entry_lock(pe); + return FALSE; +} + +static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data) +{ + struct page_entry *pe = value; + + if (pe->locked) { + pe->locked = false; + page_unlock(pe->pd); + } + return FALSE; +} + +/* + * Trylock a page, and if successful, add the page to a collection. + * Returns true ("busy") if the page could not be locked; false otherwise. + */ +static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr) +{ + tb_page_addr_t index = addr >> TARGET_PAGE_BITS; + struct page_entry *pe; + PageDesc *pd; + + pe = g_tree_lookup(set->tree, &index); + if (pe) { + return false; + } + + pd = page_find(index); + if (pd == NULL) { + return false; + } + + pe = page_entry_new(pd, index); + g_tree_insert(set->tree, &pe->index, pe); + + /* + * If this is either (1) the first insertion or (2) a page whose index + * is higher than any other so far, just lock the page and move on. + */ + if (set->max == NULL || pe->index > set->max->index) { + set->max = pe; + do_page_entry_lock(pe); + return false; + } + /* + * Try to acquire out-of-order lock; if busy, return busy so that we acquire + * locks in order. + */ + return page_entry_trylock(pe); +} + +static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata) +{ + tb_page_addr_t a = *(const tb_page_addr_t *)ap; + tb_page_addr_t b = *(const tb_page_addr_t *)bp; + + if (a == b) { + return 0; + } else if (a < b) { + return -1; + } + return 1; +} + +/* + * Lock a range of pages ([@start,@end[) as well as the pages of all + * intersecting TBs. + * Locking order: acquire locks in ascending order of page index. + */ +struct page_collection * +page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) +{ + struct page_collection *set = g_malloc(sizeof(*set)); + tb_page_addr_t index; + PageDesc *pd; + + start >>= TARGET_PAGE_BITS; + end >>= TARGET_PAGE_BITS; + g_assert(start <= end); + + set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL, + page_entry_destroy); + set->max = NULL; + assert_no_pages_locked(); + + retry: + g_tree_foreach(set->tree, page_entry_lock, NULL); + + for (index = start; index <= end; index++) { + TranslationBlock *tb; + int n; + + pd = page_find(index); + if (pd == NULL) { + continue; + } + if (page_trylock_add(set, index << TARGET_PAGE_BITS)) { + g_tree_foreach(set->tree, page_entry_unlock, NULL); + goto retry; + } + assert_page_locked(pd); + PAGE_FOR_EACH_TB(pd, tb, n) { + if (page_trylock_add(set, tb->page_addr[0]) || + (tb->page_addr[1] != -1 && + page_trylock_add(set, tb->page_addr[1]))) { + /* drop all locks, and reacquire in order */ + g_tree_foreach(set->tree, page_entry_unlock, NULL); + goto retry; + } + } + } + return set; +} + +void page_collection_unlock(struct page_collection *set) +{ + /* entries are unlocked and freed via page_entry_destroy */ + g_tree_destroy(set->tree); + g_free(set); +} + +#endif /* !CONFIG_USER_ONLY */ + +static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, + PageDesc **ret_p2, tb_page_addr_t phys2, int alloc) +{ + PageDesc *p1, *p2; + tb_page_addr_t page1; + tb_page_addr_t page2; + + assert_memory_lock(); + g_assert(phys1 != -1); + + page1 = phys1 >> TARGET_PAGE_BITS; + page2 = phys2 >> TARGET_PAGE_BITS; + + p1 = page_find_alloc(page1, alloc); + if (ret_p1) { + *ret_p1 = p1; + } + if (likely(phys2 == -1)) { + page_lock(p1); + return; + } else if (page1 == page2) { + page_lock(p1); + if (ret_p2) { + *ret_p2 = p1; + } + return; + } + p2 = page_find_alloc(page2, alloc); + if (ret_p2) { + *ret_p2 = p2; + } + if (page1 < page2) { + page_lock(p1); + page_lock(p2); + } else { + page_lock(p2); + page_lock(p1); + } +} + +static bool tb_cmp(const void *ap, const void *bp) +{ + const TranslationBlock *a = ap; + const TranslationBlock *b = bp; + + return a->pc == b->pc && + a->cs_base == b->cs_base && + a->flags == b->flags && + (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) && + a->trace_vcpu_dstate == b->trace_vcpu_dstate && + a->page_addr[0] == b->page_addr[0] && + a->page_addr[1] == b->page_addr[1]; +} + +void tb_htable_init(void) +{ + unsigned int mode = QHT_MODE_AUTO_RESIZE; + + qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode); +} + +/* call with @p->lock held */ +static inline void invalidate_page_bitmap(PageDesc *p) +{ + assert_page_locked(p); +#ifdef CONFIG_SOFTMMU + g_free(p->code_bitmap); + p->code_bitmap = NULL; + p->code_write_count = 0; +#endif +} + +/* Set to NULL all the 'first_tb' fields in all PageDescs. */ +static void page_flush_tb_1(int level, void **lp) +{ + int i; + + if (*lp == NULL) { + return; + } + if (level == 0) { + PageDesc *pd = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + page_lock(&pd[i]); + pd[i].first_tb = (uintptr_t)NULL; + invalidate_page_bitmap(pd + i); + page_unlock(&pd[i]); + } + } else { + void **pp = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + page_flush_tb_1(level - 1, pp + i); + } + } +} + +static void page_flush_tb(void) +{ + int i, l1_sz = v_l1_size; + + for (i = 0; i < l1_sz; i++) { + page_flush_tb_1(v_l2_levels, l1_map + i); + } +} + +static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data) +{ + const TranslationBlock *tb = value; + size_t *size = data; + + *size += tb->tc.size; + return false; +} + +/* flush all the translation blocks */ +static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) +{ + bool did_flush = false; + + mmap_lock(); + /* If it is already been done on request of another CPU, + * just retry. + */ + if (tb_ctx.tb_flush_count != tb_flush_count.host_int) { + goto done; + } + did_flush = true; + + if (DEBUG_TB_FLUSH_GATE) { + size_t nb_tbs = tcg_nb_tbs(); + size_t host_size = 0; + + tcg_tb_foreach(tb_host_size_iter, &host_size); + printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n", + tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0); + } + + CPU_FOREACH(cpu) { + cpu_tb_jmp_cache_clear(cpu); + } + + qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE); + page_flush_tb(); + + tcg_region_reset_all(); + /* XXX: flush processor icache at this point if cache flush is + expensive */ + qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1); + +done: + mmap_unlock(); + if (did_flush) { + qemu_plugin_flush_cb(); + } +} + +void tb_flush(CPUState *cpu) +{ + if (tcg_enabled()) { + unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count); + + if (cpu_in_exclusive_context(cpu)) { + do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count)); + } else { + async_safe_run_on_cpu(cpu, do_tb_flush, + RUN_ON_CPU_HOST_INT(tb_flush_count)); + } + } +} + +/* + * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only, + * so in order to prevent bit rot we compile them unconditionally in user-mode, + * and let the optimizer get rid of them by wrapping their user-only callers + * with if (DEBUG_TB_CHECK_GATE). + */ +#ifdef CONFIG_USER_ONLY + +static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp) +{ + TranslationBlock *tb = p; + target_ulong addr = *(target_ulong *)userp; + + if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) { + printf("ERROR invalidate: address=" TARGET_FMT_lx + " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size); + } +} + +/* verify that all the pages have correct rights for code + * + * Called with mmap_lock held. + */ +static void tb_invalidate_check(target_ulong address) +{ + address &= TARGET_PAGE_MASK; + qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address); +} + +static void do_tb_page_check(void *p, uint32_t hash, void *userp) +{ + TranslationBlock *tb = p; + int flags1, flags2; + + flags1 = page_get_flags(tb->pc); + flags2 = page_get_flags(tb->pc + tb->size - 1); + if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { + printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", + (long)tb->pc, tb->size, flags1, flags2); + } +} + +/* verify that all the pages have correct rights for code */ +static void tb_page_check(void) +{ + qht_iter(&tb_ctx.htable, do_tb_page_check, NULL); +} + +#endif /* CONFIG_USER_ONLY */ + +/* + * user-mode: call with mmap_lock held + * !user-mode: call with @pd->lock held + */ +static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb) +{ + TranslationBlock *tb1; + uintptr_t *pprev; + unsigned int n1; + + assert_page_locked(pd); + pprev = &pd->first_tb; + PAGE_FOR_EACH_TB(pd, tb1, n1) { + if (tb1 == tb) { + *pprev = tb1->page_next[n1]; + return; + } + pprev = &tb1->page_next[n1]; + } + g_assert_not_reached(); +} + +/* remove @orig from its @n_orig-th jump list */ +static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig) +{ + uintptr_t ptr, ptr_locked; + TranslationBlock *dest; + TranslationBlock *tb; + uintptr_t *pprev; + int n; + + /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */ + ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1); + dest = (TranslationBlock *)(ptr & ~1); + if (dest == NULL) { + return; + } + + qemu_spin_lock(&dest->jmp_lock); + /* + * While acquiring the lock, the jump might have been removed if the + * destination TB was invalidated; check again. + */ + ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]); + if (ptr_locked != ptr) { + qemu_spin_unlock(&dest->jmp_lock); + /* + * The only possibility is that the jump was unlinked via + * tb_jump_unlink(dest). Seeing here another destination would be a bug, + * because we set the LSB above. + */ + g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID); + return; + } + /* + * We first acquired the lock, and since the destination pointer matches, + * we know for sure that @orig is in the jmp list. + */ + pprev = &dest->jmp_list_head; + TB_FOR_EACH_JMP(dest, tb, n) { + if (tb == orig && n == n_orig) { + *pprev = tb->jmp_list_next[n]; + /* no need to set orig->jmp_dest[n]; setting the LSB was enough */ + qemu_spin_unlock(&dest->jmp_lock); + return; + } + pprev = &tb->jmp_list_next[n]; + } + g_assert_not_reached(); +} + +/* reset the jump entry 'n' of a TB so that it is not chained to + another TB */ +static inline void tb_reset_jump(TranslationBlock *tb, int n) +{ + uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]); + tb_set_jmp_target(tb, n, addr); +} + +/* remove any jumps to the TB */ +static inline void tb_jmp_unlink(TranslationBlock *dest) +{ + TranslationBlock *tb; + int n; + + qemu_spin_lock(&dest->jmp_lock); + + TB_FOR_EACH_JMP(dest, tb, n) { + tb_reset_jump(tb, n); + qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1); + /* No need to clear the list entry; setting the dest ptr is enough */ + } + dest->jmp_list_head = (uintptr_t)NULL; + + qemu_spin_unlock(&dest->jmp_lock); +} + +/* + * In user-mode, call with mmap_lock held. + * In !user-mode, if @rm_from_page_list is set, call with the TB's pages' + * locks held. + */ +static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) +{ + CPUState *cpu; + PageDesc *p; + uint32_t h; + tb_page_addr_t phys_pc; + uint32_t orig_cflags = tb_cflags(tb); + + assert_memory_lock(); + + /* make sure no further incoming jumps will be chained to this TB */ + qemu_spin_lock(&tb->jmp_lock); + qatomic_set(&tb->cflags, tb->cflags | CF_INVALID); + qemu_spin_unlock(&tb->jmp_lock); + + /* remove the TB from the hash list */ + phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); + h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags, + tb->trace_vcpu_dstate); + if (!qht_remove(&tb_ctx.htable, tb, h)) { + return; + } + + /* remove the TB from the page list */ + if (rm_from_page_list) { + p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); + tb_page_remove(p, tb); + invalidate_page_bitmap(p); + if (tb->page_addr[1] != -1) { + p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); + tb_page_remove(p, tb); + invalidate_page_bitmap(p); + } + } + + /* remove the TB from the hash list */ + h = tb_jmp_cache_hash_func(tb->pc); + CPU_FOREACH(cpu) { + if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) { + qatomic_set(&cpu->tb_jmp_cache[h], NULL); + } + } + + /* suppress this TB from the two jump lists */ + tb_remove_from_jmp_list(tb, 0); + tb_remove_from_jmp_list(tb, 1); + + /* suppress any remaining jumps to this TB */ + tb_jmp_unlink(tb); + + qatomic_set(&tb_ctx.tb_phys_invalidate_count, + tb_ctx.tb_phys_invalidate_count + 1); +} + +static void tb_phys_invalidate__locked(TranslationBlock *tb) +{ + qemu_thread_jit_write(); + do_tb_phys_invalidate(tb, true); + qemu_thread_jit_execute(); +} + +/* invalidate one TB + * + * Called with mmap_lock held in user-mode. + */ +void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) +{ + if (page_addr == -1 && tb->page_addr[0] != -1) { + page_lock_tb(tb); + do_tb_phys_invalidate(tb, true); + page_unlock_tb(tb); + } else { + do_tb_phys_invalidate(tb, false); + } +} + +#ifdef CONFIG_SOFTMMU +/* call with @p->lock held */ +static void build_page_bitmap(PageDesc *p) +{ + int n, tb_start, tb_end; + TranslationBlock *tb; + + assert_page_locked(p); + p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE); + + PAGE_FOR_EACH_TB(p, tb, n) { + /* NOTE: this is subtle as a TB may span two physical pages */ + if (n == 0) { + /* NOTE: tb_end may be after the end of the page, but + it is not a problem */ + tb_start = tb->pc & ~TARGET_PAGE_MASK; + tb_end = tb_start + tb->size; + if (tb_end > TARGET_PAGE_SIZE) { + tb_end = TARGET_PAGE_SIZE; + } + } else { + tb_start = 0; + tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); + } + bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start); + } +} +#endif + +/* add the tb in the target page and protect it if necessary + * + * Called with mmap_lock held for user-mode emulation. + * Called with @p->lock held in !user-mode. + */ +static inline void tb_page_add(PageDesc *p, TranslationBlock *tb, + unsigned int n, tb_page_addr_t page_addr) +{ +#ifndef CONFIG_USER_ONLY + bool page_already_protected; +#endif + + assert_page_locked(p); + + tb->page_addr[n] = page_addr; + tb->page_next[n] = p->first_tb; +#ifndef CONFIG_USER_ONLY + page_already_protected = p->first_tb != (uintptr_t)NULL; +#endif + p->first_tb = (uintptr_t)tb | n; + invalidate_page_bitmap(p); + +#if defined(CONFIG_USER_ONLY) + /* translator_loop() must have made all TB pages non-writable */ + assert(!(p->flags & PAGE_WRITE)); +#else + /* if some code is already present, then the pages are already + protected. So we handle the case where only the first TB is + allocated in a physical page */ + if (!page_already_protected) { + tlb_protect_code(page_addr); + } +#endif +} + +/* + * Add a new TB and link it to the physical page tables. phys_page2 is + * (-1) to indicate that only one page contains the TB. + * + * Called with mmap_lock held for user-mode emulation. + * + * Returns a pointer @tb, or a pointer to an existing TB that matches @tb. + * Note that in !user-mode, another thread might have already added a TB + * for the same block of guest code that @tb corresponds to. In that case, + * the caller should discard the original @tb, and use instead the returned TB. + */ +static TranslationBlock * +tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, + tb_page_addr_t phys_page2) +{ + PageDesc *p; + PageDesc *p2 = NULL; + void *existing_tb = NULL; + uint32_t h; + + assert_memory_lock(); + tcg_debug_assert(!(tb->cflags & CF_INVALID)); + + /* + * Add the TB to the page list, acquiring first the pages's locks. + * We keep the locks held until after inserting the TB in the hash table, + * so that if the insertion fails we know for sure that the TBs are still + * in the page descriptors. + * Note that inserting into the hash table first isn't an option, since + * we can only insert TBs that are fully initialized. + */ + page_lock_pair(&p, phys_pc, &p2, phys_page2, 1); + tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK); + if (p2) { + tb_page_add(p2, tb, 1, phys_page2); + } else { + tb->page_addr[1] = -1; + } + + /* add in the hash table */ + h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags, + tb->trace_vcpu_dstate); + qht_insert(&tb_ctx.htable, tb, h, &existing_tb); + + /* remove TB from the page(s) if we couldn't insert it */ + if (unlikely(existing_tb)) { + tb_page_remove(p, tb); + invalidate_page_bitmap(p); + if (p2) { + tb_page_remove(p2, tb); + invalidate_page_bitmap(p2); + } + tb = existing_tb; + } + + if (p2 && p2 != p) { + page_unlock(p2); + } + page_unlock(p); + +#ifdef CONFIG_USER_ONLY + if (DEBUG_TB_CHECK_GATE) { + tb_page_check(); + } +#endif + return tb; +} + +/* Called with mmap_lock held for user mode emulation. */ +TranslationBlock *tb_gen_code(CPUState *cpu, + target_ulong pc, target_ulong cs_base, + uint32_t flags, int cflags) +{ + CPUArchState *env = cpu->env_ptr; + TranslationBlock *tb, *existing_tb; + tb_page_addr_t phys_pc, phys_page2; + target_ulong virt_page2; + tcg_insn_unit *gen_code_buf; + int gen_code_size, search_size, max_insns; +#ifdef CONFIG_PROFILER + TCGProfile *prof = &tcg_ctx->prof; + int64_t ti; +#endif + + assert_memory_lock(); + qemu_thread_jit_write(); + + phys_pc = get_page_addr_code(env, pc); + + if (phys_pc == -1) { + /* Generate a one-shot TB with 1 insn in it */ + cflags = (cflags & ~CF_COUNT_MASK) | CF_LAST_IO | 1; + } + + max_insns = cflags & CF_COUNT_MASK; + if (max_insns == 0) { + max_insns = TCG_MAX_INSNS; + } + QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS); + + buffer_overflow: + tb = tcg_tb_alloc(tcg_ctx); + if (unlikely(!tb)) { + /* flush must be done */ + tb_flush(cpu); + mmap_unlock(); + /* Make the execution loop process the flush as soon as possible. */ + cpu->exception_index = EXCP_INTERRUPT; + cpu_loop_exit(cpu); + } + + gen_code_buf = tcg_ctx->code_gen_ptr; + tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf); + tb->pc = pc; + tb->cs_base = cs_base; + tb->flags = flags; + tb->cflags = cflags; + tb->trace_vcpu_dstate = *cpu->trace_dstate; + tcg_ctx->tb_cflags = cflags; + tb_overflow: + +#ifdef CONFIG_PROFILER + /* includes aborted translations because of exceptions */ + qatomic_set(&prof->tb_count1, prof->tb_count1 + 1); + ti = profile_getclock(); +#endif + + gen_code_size = sigsetjmp(tcg_ctx->jmp_trans, 0); + if (unlikely(gen_code_size != 0)) { + goto error_return; + } + + tcg_func_start(tcg_ctx); + + tcg_ctx->cpu = env_cpu(env); + gen_intermediate_code(cpu, tb, max_insns); + assert(tb->size != 0); + tcg_ctx->cpu = NULL; + max_insns = tb->icount; + + trace_translate_block(tb, tb->pc, tb->tc.ptr); + + /* generate machine code */ + tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; + tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; + tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset; + if (TCG_TARGET_HAS_direct_jump) { + tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg; + tcg_ctx->tb_jmp_target_addr = NULL; + } else { + tcg_ctx->tb_jmp_insn_offset = NULL; + tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg; + } + +#ifdef CONFIG_PROFILER + qatomic_set(&prof->tb_count, prof->tb_count + 1); + qatomic_set(&prof->interm_time, + prof->interm_time + profile_getclock() - ti); + ti = profile_getclock(); +#endif + + gen_code_size = tcg_gen_code(tcg_ctx, tb); + if (unlikely(gen_code_size < 0)) { + error_return: + switch (gen_code_size) { + case -1: + /* + * Overflow of code_gen_buffer, or the current slice of it. + * + * TODO: We don't need to re-do gen_intermediate_code, nor + * should we re-do the tcg optimization currently hidden + * inside tcg_gen_code. All that should be required is to + * flush the TBs, allocate a new TB, re-initialize it per + * above, and re-do the actual code generation. + */ + qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, + "Restarting code generation for " + "code_gen_buffer overflow\n"); + goto buffer_overflow; + + case -2: + /* + * The code generated for the TranslationBlock is too large. + * The maximum size allowed by the unwind info is 64k. + * There may be stricter constraints from relocations + * in the tcg backend. + * + * Try again with half as many insns as we attempted this time. + * If a single insn overflows, there's a bug somewhere... + */ + assert(max_insns > 1); + max_insns /= 2; + qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, + "Restarting code generation with " + "smaller translation block (max %d insns)\n", + max_insns); + goto tb_overflow; + + default: + g_assert_not_reached(); + } + } + search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); + if (unlikely(search_size < 0)) { + goto buffer_overflow; + } + tb->tc.size = gen_code_size; + +#ifdef CONFIG_PROFILER + qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti); + qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size); + qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size); + qatomic_set(&prof->search_out_len, prof->search_out_len + search_size); +#endif + +#ifdef DEBUG_DISAS + if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && + qemu_log_in_addr_range(tb->pc)) { + FILE *logfile = qemu_log_lock(); + int code_size, data_size; + const tcg_target_ulong *rx_data_gen_ptr; + size_t chunk_start; + int insn = 0; + + if (tcg_ctx->data_gen_ptr) { + rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr); + code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr; + data_size = gen_code_size - code_size; + } else { + rx_data_gen_ptr = 0; + code_size = gen_code_size; + data_size = 0; + } + + /* Dump header and the first instruction */ + qemu_log("OUT: [size=%d]\n", gen_code_size); + qemu_log(" -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n", + tcg_ctx->gen_insn_data[insn][0]); + chunk_start = tcg_ctx->gen_insn_end_off[insn]; + log_disas(tb->tc.ptr, chunk_start); + + /* + * Dump each instruction chunk, wrapping up empty chunks into + * the next instruction. The whole array is offset so the + * first entry is the beginning of the 2nd instruction. + */ + while (insn < tb->icount) { + size_t chunk_end = tcg_ctx->gen_insn_end_off[insn]; + if (chunk_end > chunk_start) { + qemu_log(" -- guest addr 0x" TARGET_FMT_lx "\n", + tcg_ctx->gen_insn_data[insn][0]); + log_disas(tb->tc.ptr + chunk_start, chunk_end - chunk_start); + chunk_start = chunk_end; + } + insn++; + } + + if (chunk_start < code_size) { + qemu_log(" -- tb slow paths + alignment\n"); + log_disas(tb->tc.ptr + chunk_start, code_size - chunk_start); + } + + /* Finally dump any data we may have after the block */ + if (data_size) { + int i; + qemu_log(" data: [size=%d]\n", data_size); + for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) { + if (sizeof(tcg_target_ulong) == 8) { + qemu_log("0x%08" PRIxPTR ": .quad 0x%016" TCG_PRIlx "\n", + (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]); + } else if (sizeof(tcg_target_ulong) == 4) { + qemu_log("0x%08" PRIxPTR ": .long 0x%08" TCG_PRIlx "\n", + (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]); + } else { + qemu_build_not_reached(); + } + } + } + qemu_log("\n"); + qemu_log_flush(); + qemu_log_unlock(logfile); + } +#endif + + qatomic_set(&tcg_ctx->code_gen_ptr, (void *) + ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, + CODE_GEN_ALIGN)); + + /* init jump list */ + qemu_spin_init(&tb->jmp_lock); + tb->jmp_list_head = (uintptr_t)NULL; + tb->jmp_list_next[0] = (uintptr_t)NULL; + tb->jmp_list_next[1] = (uintptr_t)NULL; + tb->jmp_dest[0] = (uintptr_t)NULL; + tb->jmp_dest[1] = (uintptr_t)NULL; + + /* init original jump addresses which have been set during tcg_gen_code() */ + if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { + tb_reset_jump(tb, 0); + } + if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { + tb_reset_jump(tb, 1); + } + + /* + * If the TB is not associated with a physical RAM page then + * it must be a temporary one-insn TB, and we have nothing to do + * except fill in the page_addr[] fields. Return early before + * attempting to link to other TBs or add to the lookup table. + */ + if (phys_pc == -1) { + tb->page_addr[0] = tb->page_addr[1] = -1; + return tb; + } + + /* + * Insert TB into the corresponding region tree before publishing it + * through QHT. Otherwise rewinding happened in the TB might fail to + * lookup itself using host PC. + */ + tcg_tb_insert(tb); + + /* check next page if needed */ + virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; + phys_page2 = -1; + if ((pc & TARGET_PAGE_MASK) != virt_page2) { + phys_page2 = get_page_addr_code(env, virt_page2); + } + /* + * No explicit memory barrier is required -- tb_link_page() makes the + * TB visible in a consistent state. + */ + existing_tb = tb_link_page(tb, phys_pc, phys_page2); + /* if the TB already exists, discard what we just translated */ + if (unlikely(existing_tb != tb)) { + uintptr_t orig_aligned = (uintptr_t)gen_code_buf; + + orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize); + qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned); + tcg_tb_remove(tb); + return existing_tb; + } + return tb; +} + +/* + * @p must be non-NULL. + * user-mode: call with mmap_lock held. + * !user-mode: call with all @pages locked. + */ +static void +tb_invalidate_phys_page_range__locked(struct page_collection *pages, + PageDesc *p, tb_page_addr_t start, + tb_page_addr_t end, + uintptr_t retaddr) +{ + TranslationBlock *tb; + tb_page_addr_t tb_start, tb_end; + int n; +#ifdef TARGET_HAS_PRECISE_SMC + CPUState *cpu = current_cpu; + CPUArchState *env = NULL; + bool current_tb_not_found = retaddr != 0; + bool current_tb_modified = false; + TranslationBlock *current_tb = NULL; + target_ulong current_pc = 0; + target_ulong current_cs_base = 0; + uint32_t current_flags = 0; +#endif /* TARGET_HAS_PRECISE_SMC */ + + assert_page_locked(p); + +#if defined(TARGET_HAS_PRECISE_SMC) + if (cpu != NULL) { + env = cpu->env_ptr; + } +#endif + + /* we remove all the TBs in the range [start, end[ */ + /* XXX: see if in some cases it could be faster to invalidate all + the code */ + PAGE_FOR_EACH_TB(p, tb, n) { + assert_page_locked(p); + /* NOTE: this is subtle as a TB may span two physical pages */ + if (n == 0) { + /* NOTE: tb_end may be after the end of the page, but + it is not a problem */ + tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); + tb_end = tb_start + tb->size; + } else { + tb_start = tb->page_addr[1]; + tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); + } + if (!(tb_end <= start || tb_start >= end)) { +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_not_found) { + current_tb_not_found = false; + /* now we have a real cpu fault */ + current_tb = tcg_tb_lookup(retaddr); + } + if (current_tb == tb && + (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { + /* + * If we are modifying the current TB, we must stop + * its execution. We could be more precise by checking + * that the modification is after the current PC, but it + * would require a specialized function to partially + * restore the CPU state. + */ + current_tb_modified = true; + cpu_restore_state_from_tb(cpu, current_tb, retaddr, true); + cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, + ¤t_flags); + } +#endif /* TARGET_HAS_PRECISE_SMC */ + tb_phys_invalidate__locked(tb); + } + } +#if !defined(CONFIG_USER_ONLY) + /* if no code remaining, no need to continue to use slow writes */ + if (!p->first_tb) { + invalidate_page_bitmap(p); + tlb_unprotect_code(start); + } +#endif +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_modified) { + page_collection_unlock(pages); + /* Force execution of one insn next time. */ + cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); + mmap_unlock(); + cpu_loop_exit_noexc(cpu); + } +#endif +} + +/* + * Invalidate all TBs which intersect with the target physical address range + * [start;end[. NOTE: start and end must refer to the *same* physical page. + * 'is_cpu_write_access' should be true if called from a real cpu write + * access: the virtual CPU will exit the current TB if code is modified inside + * this TB. + * + * Called with mmap_lock held for user-mode emulation + */ +void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end) +{ + struct page_collection *pages; + PageDesc *p; + + assert_memory_lock(); + + p = page_find(start >> TARGET_PAGE_BITS); + if (p == NULL) { + return; + } + pages = page_collection_lock(start, end); + tb_invalidate_phys_page_range__locked(pages, p, start, end, 0); + page_collection_unlock(pages); +} + +/* + * Invalidate all TBs which intersect with the target physical address range + * [start;end[. NOTE: start and end may refer to *different* physical pages. + * 'is_cpu_write_access' should be true if called from a real cpu write + * access: the virtual CPU will exit the current TB if code is modified inside + * this TB. + * + * Called with mmap_lock held for user-mode emulation. + */ +#ifdef CONFIG_SOFTMMU +void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end) +#else +void tb_invalidate_phys_range(target_ulong start, target_ulong end) +#endif +{ + struct page_collection *pages; + tb_page_addr_t next; + + assert_memory_lock(); + + pages = page_collection_lock(start, end); + for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + start < end; + start = next, next += TARGET_PAGE_SIZE) { + PageDesc *pd = page_find(start >> TARGET_PAGE_BITS); + tb_page_addr_t bound = MIN(next, end); + + if (pd == NULL) { + continue; + } + tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0); + } + page_collection_unlock(pages); +} + +#ifdef CONFIG_SOFTMMU +/* len must be <= 8 and start must be a multiple of len. + * Called via softmmu_template.h when code areas are written to with + * iothread mutex not held. + * + * Call with all @pages in the range [@start, @start + len[ locked. + */ +void tb_invalidate_phys_page_fast(struct page_collection *pages, + tb_page_addr_t start, int len, + uintptr_t retaddr) +{ + PageDesc *p; + + assert_memory_lock(); + + p = page_find(start >> TARGET_PAGE_BITS); + if (!p) { + return; + } + + assert_page_locked(p); + if (!p->code_bitmap && + ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { + build_page_bitmap(p); + } + if (p->code_bitmap) { + unsigned int nr; + unsigned long b; + + nr = start & ~TARGET_PAGE_MASK; + b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)); + if (b & ((1 << len) - 1)) { + goto do_invalidate; + } + } else { + do_invalidate: + tb_invalidate_phys_page_range__locked(pages, p, start, start + len, + retaddr); + } +} +#else +/* Called with mmap_lock held. If pc is not 0 then it indicates the + * host PC of the faulting store instruction that caused this invalidate. + * Returns true if the caller needs to abort execution of the current + * TB (because it was modified by this store and the guest CPU has + * precise-SMC semantics). + */ +static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) +{ + TranslationBlock *tb; + PageDesc *p; + int n; +#ifdef TARGET_HAS_PRECISE_SMC + TranslationBlock *current_tb = NULL; + CPUState *cpu = current_cpu; + CPUArchState *env = NULL; + int current_tb_modified = 0; + target_ulong current_pc = 0; + target_ulong current_cs_base = 0; + uint32_t current_flags = 0; +#endif + + assert_memory_lock(); + + addr &= TARGET_PAGE_MASK; + p = page_find(addr >> TARGET_PAGE_BITS); + if (!p) { + return false; + } + +#ifdef TARGET_HAS_PRECISE_SMC + if (p->first_tb && pc != 0) { + current_tb = tcg_tb_lookup(pc); + } + if (cpu != NULL) { + env = cpu->env_ptr; + } +#endif + assert_page_locked(p); + PAGE_FOR_EACH_TB(p, tb, n) { +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb == tb && + (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { + /* If we are modifying the current TB, we must stop + its execution. We could be more precise by checking + that the modification is after the current PC, but it + would require a specialized function to partially + restore the CPU state */ + + current_tb_modified = 1; + cpu_restore_state_from_tb(cpu, current_tb, pc, true); + cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, + ¤t_flags); + } +#endif /* TARGET_HAS_PRECISE_SMC */ + tb_phys_invalidate(tb, addr); + } + p->first_tb = (uintptr_t)NULL; +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_modified) { + /* Force execution of one insn next time. */ + cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); + return true; + } +#endif + + return false; +} +#endif + +/* user-mode: call with mmap_lock held */ +void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr) +{ + TranslationBlock *tb; + + assert_memory_lock(); + + tb = tcg_tb_lookup(retaddr); + if (tb) { + /* We can use retranslation to find the PC. */ + cpu_restore_state_from_tb(cpu, tb, retaddr, true); + tb_phys_invalidate(tb, -1); + } else { + /* The exception probably happened in a helper. The CPU state should + have been saved before calling it. Fetch the PC from there. */ + CPUArchState *env = cpu->env_ptr; + target_ulong pc, cs_base; + tb_page_addr_t addr; + uint32_t flags; + + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); + addr = get_page_addr_code(env, pc); + if (addr != -1) { + tb_invalidate_phys_range(addr, addr + 1); + } + } +} + +#ifndef CONFIG_USER_ONLY +/* + * In deterministic execution mode, instructions doing device I/Os + * must be at the end of the TB. + * + * Called by softmmu_template.h, with iothread mutex not held. + */ +void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) +{ + TranslationBlock *tb; + CPUClass *cc; + uint32_t n; + + tb = tcg_tb_lookup(retaddr); + if (!tb) { + cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", + (void *)retaddr); + } + cpu_restore_state_from_tb(cpu, tb, retaddr, true); + + /* + * Some guests must re-execute the branch when re-executing a delay + * slot instruction. When this is the case, adjust icount and N + * to account for the re-execution of the branch. + */ + n = 1; + cc = CPU_GET_CLASS(cpu); + if (cc->tcg_ops->io_recompile_replay_branch && + cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) { + cpu_neg(cpu)->icount_decr.u16.low++; + n = 2; + } + + /* + * Exit the loop and potentially generate a new TB executing the + * just the I/O insns. We also limit instrumentation to memory + * operations only (which execute after completion) so we don't + * double instrument the instruction. + */ + cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n; + + qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, + "cpu_io_recompile: rewound execution of TB to " + TARGET_FMT_lx "\n", tb->pc); + + cpu_loop_exit_noexc(cpu); +} + +static void print_qht_statistics(struct qht_stats hst, GString *buf) +{ + uint32_t hgram_opts; + size_t hgram_bins; + char *hgram; + + if (!hst.head_buckets) { + return; + } + g_string_append_printf(buf, "TB hash buckets %zu/%zu " + "(%0.2f%% head buckets used)\n", + hst.used_head_buckets, hst.head_buckets, + (double)hst.used_head_buckets / + hst.head_buckets * 100); + + hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; + hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT; + if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) { + hgram_opts |= QDIST_PR_NODECIMAL; + } + hgram = qdist_pr(&hst.occupancy, 10, hgram_opts); + g_string_append_printf(buf, "TB hash occupancy %0.2f%% avg chain occ. " + "Histogram: %s\n", + qdist_avg(&hst.occupancy) * 100, hgram); + g_free(hgram); + + hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; + hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain); + if (hgram_bins > 10) { + hgram_bins = 10; + } else { + hgram_bins = 0; + hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE; + } + hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts); + g_string_append_printf(buf, "TB hash avg chain %0.3f buckets. " + "Histogram: %s\n", + qdist_avg(&hst.chain), hgram); + g_free(hgram); +} + +struct tb_tree_stats { + size_t nb_tbs; + size_t host_size; + size_t target_size; + size_t max_target_size; + size_t direct_jmp_count; + size_t direct_jmp2_count; + size_t cross_page; +}; + +static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data) +{ + const TranslationBlock *tb = value; + struct tb_tree_stats *tst = data; + + tst->nb_tbs++; + tst->host_size += tb->tc.size; + tst->target_size += tb->size; + if (tb->size > tst->max_target_size) { + tst->max_target_size = tb->size; + } + if (tb->page_addr[1] != -1) { + tst->cross_page++; + } + if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { + tst->direct_jmp_count++; + if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { + tst->direct_jmp2_count++; + } + } + return false; +} + +void dump_exec_info(GString *buf) +{ + struct tb_tree_stats tst = {}; + struct qht_stats hst; + size_t nb_tbs, flush_full, flush_part, flush_elide; + + tcg_tb_foreach(tb_tree_stats_iter, &tst); + nb_tbs = tst.nb_tbs; + /* XXX: avoid using doubles ? */ + g_string_append_printf(buf, "Translation buffer state:\n"); + /* + * Report total code size including the padding and TB structs; + * otherwise users might think "-accel tcg,tb-size" is not honoured. + * For avg host size we use the precise numbers from tb_tree_stats though. + */ + g_string_append_printf(buf, "gen code size %zu/%zu\n", + tcg_code_size(), tcg_code_capacity()); + g_string_append_printf(buf, "TB count %zu\n", nb_tbs); + g_string_append_printf(buf, "TB avg target size %zu max=%zu bytes\n", + nb_tbs ? tst.target_size / nb_tbs : 0, + tst.max_target_size); + g_string_append_printf(buf, "TB avg host size %zu bytes " + "(expansion ratio: %0.1f)\n", + nb_tbs ? tst.host_size / nb_tbs : 0, + tst.target_size ? + (double)tst.host_size / tst.target_size : 0); + g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n", + tst.cross_page, + nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0); + g_string_append_printf(buf, "direct jump count %zu (%zu%%) " + "(2 jumps=%zu %zu%%)\n", + tst.direct_jmp_count, + nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0, + tst.direct_jmp2_count, + nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0); + + qht_statistics_init(&tb_ctx.htable, &hst); + print_qht_statistics(hst, buf); + qht_statistics_destroy(&hst); + + g_string_append_printf(buf, "\nStatistics:\n"); + g_string_append_printf(buf, "TB flush count %u\n", + qatomic_read(&tb_ctx.tb_flush_count)); + g_string_append_printf(buf, "TB invalidate count %u\n", + qatomic_read(&tb_ctx.tb_phys_invalidate_count)); + + tlb_flush_counts(&flush_full, &flush_part, &flush_elide); + g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full); + g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part); + g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide); + tcg_dump_info(buf); +} + +void dump_opcount_info(GString *buf) +{ + tcg_dump_op_count(buf); +} + +#else /* CONFIG_USER_ONLY */ + +void cpu_interrupt(CPUState *cpu, int mask) +{ + g_assert(qemu_mutex_iothread_locked()); + cpu->interrupt_request |= mask; + qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); +} + +/* + * Walks guest process memory "regions" one by one + * and calls callback function 'fn' for each region. + */ +struct walk_memory_regions_data { + walk_memory_regions_fn fn; + void *priv; + target_ulong start; + int prot; +}; + +static int walk_memory_regions_end(struct walk_memory_regions_data *data, + target_ulong end, int new_prot) +{ + if (data->start != -1u) { + int rc = data->fn(data->priv, data->start, end, data->prot); + if (rc != 0) { + return rc; + } + } + + data->start = (new_prot ? end : -1u); + data->prot = new_prot; + + return 0; +} + +static int walk_memory_regions_1(struct walk_memory_regions_data *data, + target_ulong base, int level, void **lp) +{ + target_ulong pa; + int i, rc; + + if (*lp == NULL) { + return walk_memory_regions_end(data, base, 0); + } + + if (level == 0) { + PageDesc *pd = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + int prot = pd[i].flags; + + pa = base | (i << TARGET_PAGE_BITS); + if (prot != data->prot) { + rc = walk_memory_regions_end(data, pa, prot); + if (rc != 0) { + return rc; + } + } + } + } else { + void **pp = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + pa = base | ((target_ulong)i << + (TARGET_PAGE_BITS + V_L2_BITS * level)); + rc = walk_memory_regions_1(data, pa, level - 1, pp + i); + if (rc != 0) { + return rc; + } + } + } + + return 0; +} + +int walk_memory_regions(void *priv, walk_memory_regions_fn fn) +{ + struct walk_memory_regions_data data; + uintptr_t i, l1_sz = v_l1_size; + + data.fn = fn; + data.priv = priv; + data.start = -1u; + data.prot = 0; + + for (i = 0; i < l1_sz; i++) { + target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS); + int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i); + if (rc != 0) { + return rc; + } + } + + return walk_memory_regions_end(&data, 0, 0); +} + +static int dump_region(void *priv, target_ulong start, + target_ulong end, unsigned long prot) +{ + FILE *f = (FILE *)priv; + + (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx + " "TARGET_FMT_lx" %c%c%c\n", + start, end, end - start, + ((prot & PAGE_READ) ? 'r' : '-'), + ((prot & PAGE_WRITE) ? 'w' : '-'), + ((prot & PAGE_EXEC) ? 'x' : '-')); + + return 0; +} + +/* dump memory mappings */ +void page_dump(FILE *f) +{ + const int length = sizeof(target_ulong) * 2; + (void) fprintf(f, "%-*s %-*s %-*s %s\n", + length, "start", length, "end", length, "size", "prot"); + walk_memory_regions(f, dump_region); +} + +int page_get_flags(target_ulong address) +{ + PageDesc *p; + + p = page_find(address >> TARGET_PAGE_BITS); + if (!p) { + return 0; + } + return p->flags; +} + +/* Modify the flags of a page and invalidate the code if necessary. + The flag PAGE_WRITE_ORG is positioned automatically depending + on PAGE_WRITE. The mmap_lock should already be held. */ +void page_set_flags(target_ulong start, target_ulong end, int flags) +{ + target_ulong addr, len; + bool reset_target_data; + + /* This function should never be called with addresses outside the + guest address space. If this assert fires, it probably indicates + a missing call to h2g_valid. */ + assert(end - 1 <= GUEST_ADDR_MAX); + assert(start < end); + /* Only set PAGE_ANON with new mappings. */ + assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET)); + assert_memory_lock(); + + start = start & TARGET_PAGE_MASK; + end = TARGET_PAGE_ALIGN(end); + + if (flags & PAGE_WRITE) { + flags |= PAGE_WRITE_ORG; + } + reset_target_data = !(flags & PAGE_VALID) || (flags & PAGE_RESET); + flags &= ~PAGE_RESET; + + for (addr = start, len = end - start; + len != 0; + len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { + PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); + + /* If the write protection bit is set, then we invalidate + the code inside. */ + if (!(p->flags & PAGE_WRITE) && + (flags & PAGE_WRITE) && + p->first_tb) { + tb_invalidate_phys_page(addr, 0); + } + if (reset_target_data) { + g_free(p->target_data); + p->target_data = NULL; + p->flags = flags; + } else { + /* Using mprotect on a page does not change MAP_ANON. */ + p->flags = (p->flags & PAGE_ANON) | flags; + } + } +} + +void *page_get_target_data(target_ulong address) +{ + PageDesc *p = page_find(address >> TARGET_PAGE_BITS); + return p ? p->target_data : NULL; +} + +void *page_alloc_target_data(target_ulong address, size_t size) +{ + PageDesc *p = page_find(address >> TARGET_PAGE_BITS); + void *ret = NULL; + + if (p->flags & PAGE_VALID) { + ret = p->target_data; + if (!ret) { + p->target_data = ret = g_malloc0(size); + } + } + return ret; +} + +int page_check_range(target_ulong start, target_ulong len, int flags) +{ + PageDesc *p; + target_ulong end; + target_ulong addr; + + /* This function should never be called with addresses outside the + guest address space. If this assert fires, it probably indicates + a missing call to h2g_valid. */ + if (TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS) { + assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); + } + + if (len == 0) { + return 0; + } + if (start + len - 1 < start) { + /* We've wrapped around. */ + return -1; + } + + /* must do before we loose bits in the next step */ + end = TARGET_PAGE_ALIGN(start + len); + start = start & TARGET_PAGE_MASK; + + for (addr = start, len = end - start; + len != 0; + len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { + p = page_find(addr >> TARGET_PAGE_BITS); + if (!p) { + return -1; + } + if (!(p->flags & PAGE_VALID)) { + return -1; + } + + if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) { + return -1; + } + if (flags & PAGE_WRITE) { + if (!(p->flags & PAGE_WRITE_ORG)) { + return -1; + } + /* unprotect the page if it was put read-only because it + contains translated code */ + if (!(p->flags & PAGE_WRITE)) { + if (!page_unprotect(addr, 0)) { + return -1; + } + } + } + } + return 0; +} + +void page_protect(tb_page_addr_t page_addr) +{ + target_ulong addr; + PageDesc *p; + int prot; + + p = page_find(page_addr >> TARGET_PAGE_BITS); + if (p && (p->flags & PAGE_WRITE)) { + /* + * Force the host page as non writable (writes will have a page fault + + * mprotect overhead). + */ + page_addr &= qemu_host_page_mask; + prot = 0; + for (addr = page_addr; addr < page_addr + qemu_host_page_size; + addr += TARGET_PAGE_SIZE) { + + p = page_find(addr >> TARGET_PAGE_BITS); + if (!p) { + continue; + } + prot |= p->flags; + p->flags &= ~PAGE_WRITE; + } + mprotect(g2h_untagged(page_addr), qemu_host_page_size, + (prot & PAGE_BITS) & ~PAGE_WRITE); + if (DEBUG_TB_INVALIDATE_GATE) { + printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr); + } + } +} + +/* called from signal handler: invalidate the code and unprotect the + * page. Return 0 if the fault was not handled, 1 if it was handled, + * and 2 if it was handled but the caller must cause the TB to be + * immediately exited. (We can only return 2 if the 'pc' argument is + * non-zero.) + */ +int page_unprotect(target_ulong address, uintptr_t pc) +{ + unsigned int prot; + bool current_tb_invalidated; + PageDesc *p; + target_ulong host_start, host_end, addr; + + /* Technically this isn't safe inside a signal handler. However we + know this only ever happens in a synchronous SEGV handler, so in + practice it seems to be ok. */ + mmap_lock(); + + p = page_find(address >> TARGET_PAGE_BITS); + if (!p) { + mmap_unlock(); + return 0; + } + + /* if the page was really writable, then we change its + protection back to writable */ + if (p->flags & PAGE_WRITE_ORG) { + current_tb_invalidated = false; + if (p->flags & PAGE_WRITE) { + /* If the page is actually marked WRITE then assume this is because + * this thread raced with another one which got here first and + * set the page to PAGE_WRITE and did the TB invalidate for us. + */ +#ifdef TARGET_HAS_PRECISE_SMC + TranslationBlock *current_tb = tcg_tb_lookup(pc); + if (current_tb) { + current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID; + } +#endif + } else { + host_start = address & qemu_host_page_mask; + host_end = host_start + qemu_host_page_size; + + prot = 0; + for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) { + p = page_find(addr >> TARGET_PAGE_BITS); + p->flags |= PAGE_WRITE; + prot |= p->flags; + + /* and since the content will be modified, we must invalidate + the corresponding translated code. */ + current_tb_invalidated |= tb_invalidate_phys_page(addr, pc); +#ifdef CONFIG_USER_ONLY + if (DEBUG_TB_CHECK_GATE) { + tb_invalidate_check(addr); + } +#endif + } + mprotect((void *)g2h_untagged(host_start), qemu_host_page_size, + prot & PAGE_BITS); + } + mmap_unlock(); + /* If current TB was invalidated return to main loop */ + return current_tb_invalidated ? 2 : 1; + } + mmap_unlock(); + return 0; +} +#endif /* CONFIG_USER_ONLY */ + +/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */ +void tcg_flush_softmmu_tlb(CPUState *cs) +{ +#ifdef CONFIG_SOFTMMU + tlb_flush(cs); +#endif +} diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c new file mode 100644 index 000000000..f06c31426 --- /dev/null +++ b/accel/tcg/translator.c @@ -0,0 +1,178 @@ +/* + * Generic intermediate code generation. + * + * Copyright (C) 2016-2017 LluĂs Vilanova <vilanova@ac.upc.edu> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "tcg/tcg.h" +#include "tcg/tcg-op.h" +#include "exec/exec-all.h" +#include "exec/gen-icount.h" +#include "exec/log.h" +#include "exec/translator.h" +#include "exec/plugin-gen.h" +#include "sysemu/replay.h" + +/* Pairs with tcg_clear_temp_count. + To be called by #TranslatorOps.{translate_insn,tb_stop} if + (1) the target is sufficiently clean to support reporting, + (2) as and when all temporaries are known to be consumed. + For most targets, (2) is at the end of translate_insn. */ +void translator_loop_temp_check(DisasContextBase *db) +{ + if (tcg_check_temp_count()) { + qemu_log("warning: TCG temporary leaks before " + TARGET_FMT_lx "\n", db->pc_next); + } +} + +bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest) +{ + /* Suppress goto_tb if requested. */ + if (tb_cflags(db->tb) & CF_NO_GOTO_TB) { + return false; + } + + /* Check for the dest on the same page as the start of the TB. */ + return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0; +} + +static inline void translator_page_protect(DisasContextBase *dcbase, + target_ulong pc) +{ +#ifdef CONFIG_USER_ONLY + dcbase->page_protect_end = pc | ~TARGET_PAGE_MASK; + page_protect(pc); +#endif +} + +void translator_loop(const TranslatorOps *ops, DisasContextBase *db, + CPUState *cpu, TranslationBlock *tb, int max_insns) +{ + uint32_t cflags = tb_cflags(tb); + bool plugin_enabled; + + /* Initialize DisasContext */ + db->tb = tb; + db->pc_first = tb->pc; + db->pc_next = db->pc_first; + db->is_jmp = DISAS_NEXT; + db->num_insns = 0; + db->max_insns = max_insns; + db->singlestep_enabled = cflags & CF_SINGLE_STEP; + translator_page_protect(db, db->pc_next); + + ops->init_disas_context(db, cpu); + tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ + + /* Reset the temp count so that we can identify leaks */ + tcg_clear_temp_count(); + + /* Start translating. */ + gen_tb_start(db->tb); + ops->tb_start(db, cpu); + tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ + + plugin_enabled = plugin_gen_tb_start(cpu, tb, cflags & CF_MEMI_ONLY); + + while (true) { + db->num_insns++; + ops->insn_start(db, cpu); + tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ + + if (plugin_enabled) { + plugin_gen_insn_start(cpu, db); + } + + /* Disassemble one instruction. The translate_insn hook should + update db->pc_next and db->is_jmp to indicate what should be + done next -- either exiting this loop or locate the start of + the next instruction. */ + if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) { + /* Accept I/O on the last instruction. */ + gen_io_start(); + ops->translate_insn(db, cpu); + } else { + /* we should only see CF_MEMI_ONLY for io_recompile */ + tcg_debug_assert(!(cflags & CF_MEMI_ONLY)); + ops->translate_insn(db, cpu); + } + + /* Stop translation if translate_insn so indicated. */ + if (db->is_jmp != DISAS_NEXT) { + break; + } + + /* + * We can't instrument after instructions that change control + * flow although this only really affects post-load operations. + */ + if (plugin_enabled) { + plugin_gen_insn_end(); + } + + /* Stop translation if the output buffer is full, + or we have executed all of the allowed instructions. */ + if (tcg_op_buf_full() || db->num_insns >= db->max_insns) { + db->is_jmp = DISAS_TOO_MANY; + break; + } + } + + /* Emit code to exit the TB, as indicated by db->is_jmp. */ + ops->tb_stop(db, cpu); + gen_tb_end(db->tb, db->num_insns); + + if (plugin_enabled) { + plugin_gen_tb_end(cpu); + } + + /* The disas_log hook may use these values rather than recompute. */ + tb->size = db->pc_next - db->pc_first; + tb->icount = db->num_insns; + +#ifdef DEBUG_DISAS + if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) + && qemu_log_in_addr_range(db->pc_first)) { + FILE *logfile = qemu_log_lock(); + qemu_log("----------------\n"); + ops->disas_log(db, cpu); + qemu_log("\n"); + qemu_log_unlock(logfile); + } +#endif +} + +static inline void translator_maybe_page_protect(DisasContextBase *dcbase, + target_ulong pc, size_t len) +{ +#ifdef CONFIG_USER_ONLY + target_ulong end = pc + len - 1; + + if (end > dcbase->page_protect_end) { + translator_page_protect(dcbase, end); + } +#endif +} + +#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \ + type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \ + abi_ptr pc, bool do_swap) \ + { \ + translator_maybe_page_protect(dcbase, pc, sizeof(type)); \ + type ret = load_fn(env, pc); \ + if (do_swap) { \ + ret = swap_fn(ret); \ + } \ + plugin_insn_append(pc, &ret, sizeof(ret)); \ + return ret; \ + } + +FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD) + +#undef GEN_TRANSLATOR_LD diff --git a/accel/tcg/user-exec-stub.c b/accel/tcg/user-exec-stub.c new file mode 100644 index 000000000..968cd3ca6 --- /dev/null +++ b/accel/tcg/user-exec-stub.c @@ -0,0 +1,39 @@ +#include "qemu/osdep.h" +#include "hw/core/cpu.h" +#include "sysemu/replay.h" + +bool enable_cpu_pm = false; + +void cpu_resume(CPUState *cpu) +{ +} + +void cpu_remove_sync(CPUState *cpu) +{ +} + +void qemu_init_vcpu(CPUState *cpu) +{ +} + +/* User mode emulation does not support record/replay yet. */ + +bool replay_exception(void) +{ + return true; +} + +bool replay_has_exception(void) +{ + return false; +} + +bool replay_interrupt(void) +{ + return true; +} + +bool replay_has_interrupt(void) +{ + return false; +} diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c new file mode 100644 index 000000000..1528a21fa --- /dev/null +++ b/accel/tcg/user-exec.c @@ -0,0 +1,542 @@ +/* + * User emulator execution + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#include "qemu/osdep.h" +#include "hw/core/tcg-cpu-ops.h" +#include "disas/disas.h" +#include "exec/exec-all.h" +#include "tcg/tcg.h" +#include "qemu/bitops.h" +#include "exec/cpu_ldst.h" +#include "exec/translate-all.h" +#include "exec/helper-proto.h" +#include "qemu/atomic128.h" +#include "trace/trace-root.h" +#include "tcg/tcg-ldst.h" +#include "internal.h" + +__thread uintptr_t helper_retaddr; + +//#define DEBUG_SIGNAL + +/* + * Adjust the pc to pass to cpu_restore_state; return the memop type. + */ +MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write) +{ + switch (helper_retaddr) { + default: + /* + * Fault during host memory operation within a helper function. + * The helper's host return address, saved here, gives us a + * pointer into the generated code that will unwind to the + * correct guest pc. + */ + *pc = helper_retaddr; + break; + + case 0: + /* + * Fault during host memory operation within generated code. + * (Or, a unrelated bug within qemu, but we can't tell from here). + * + * We take the host pc from the signal frame. However, we cannot + * use that value directly. Within cpu_restore_state_from_tb, we + * assume PC comes from GETPC(), as used by the helper functions, + * so we adjust the address by -GETPC_ADJ to form an address that + * is within the call insn, so that the address does not accidentally + * match the beginning of the next guest insn. However, when the + * pc comes from the signal frame it points to the actual faulting + * host memory insn and not the return from a call insn. + * + * Therefore, adjust to compensate for what will be done later + * by cpu_restore_state_from_tb. + */ + *pc += GETPC_ADJ; + break; + + case 1: + /* + * Fault during host read for translation, or loosely, "execution". + * + * The guest pc is already pointing to the start of the TB for which + * code is being generated. If the guest translator manages the + * page crossings correctly, this is exactly the correct address + * (and if the translator doesn't handle page boundaries correctly + * there's little we can do about that here). Therefore, do not + * trigger the unwinder. + * + * Like tb_gen_code, release the memory lock before cpu_loop_exit. + */ + mmap_unlock(); + *pc = 0; + return MMU_INST_FETCH; + } + + return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; +} + +/** + * handle_sigsegv_accerr_write: + * @cpu: the cpu context + * @old_set: the sigset_t from the signal ucontext_t + * @host_pc: the host pc, adjusted for the signal + * @guest_addr: the guest address of the fault + * + * Return true if the write fault has been handled, and should be re-tried. + * + * Note that it is important that we don't call page_unprotect() unless + * this is really a "write to nonwriteable page" fault, because + * page_unprotect() assumes that if it is called for an access to + * a page that's writeable this means we had two threads racing and + * another thread got there first and already made the page writeable; + * so we will retry the access. If we were to call page_unprotect() + * for some other kind of fault that should really be passed to the + * guest, we'd end up in an infinite loop of retrying the faulting access. + */ +bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, + uintptr_t host_pc, abi_ptr guest_addr) +{ + switch (page_unprotect(guest_addr, host_pc)) { + case 0: + /* + * Fault not caused by a page marked unwritable to protect + * cached translations, must be the guest binary's problem. + */ + return false; + case 1: + /* + * Fault caused by protection of cached translation; TBs + * invalidated, so resume execution. + */ + return true; + case 2: + /* + * Fault caused by protection of cached translation, and the + * currently executing TB was modified and must be exited immediately. + */ + sigprocmask(SIG_SETMASK, old_set, NULL); + cpu_loop_exit_noexc(cpu); + /* NORETURN */ + default: + g_assert_not_reached(); + } +} + +static int probe_access_internal(CPUArchState *env, target_ulong addr, + int fault_size, MMUAccessType access_type, + bool nonfault, uintptr_t ra) +{ + int acc_flag; + bool maperr; + + switch (access_type) { + case MMU_DATA_STORE: + acc_flag = PAGE_WRITE_ORG; + break; + case MMU_DATA_LOAD: + acc_flag = PAGE_READ; + break; + case MMU_INST_FETCH: + acc_flag = PAGE_EXEC; + break; + default: + g_assert_not_reached(); + } + + if (guest_addr_valid_untagged(addr)) { + int page_flags = page_get_flags(addr); + if (page_flags & acc_flag) { + return 0; /* success */ + } + maperr = !(page_flags & PAGE_VALID); + } else { + maperr = true; + } + + if (nonfault) { + return TLB_INVALID_MASK; + } + + cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra); +} + +int probe_access_flags(CPUArchState *env, target_ulong addr, + MMUAccessType access_type, int mmu_idx, + bool nonfault, void **phost, uintptr_t ra) +{ + int flags; + + flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra); + *phost = flags ? NULL : g2h(env_cpu(env), addr); + return flags; +} + +void *probe_access(CPUArchState *env, target_ulong addr, int size, + MMUAccessType access_type, int mmu_idx, uintptr_t ra) +{ + int flags; + + g_assert(-(addr | TARGET_PAGE_MASK) >= size); + flags = probe_access_internal(env, addr, size, access_type, false, ra); + g_assert(flags == 0); + + return size ? g2h(env_cpu(env), addr) : NULL; +} + +/* The softmmu versions of these helpers are in cputlb.c. */ + +/* + * Verify that we have passed the correct MemOp to the correct function. + * + * We could present one function to target code, and dispatch based on + * the MemOp, but so far we have worked hard to avoid an indirect function + * call along the memory path. + */ +static void validate_memop(MemOpIdx oi, MemOp expected) +{ +#ifdef CONFIG_DEBUG_TCG + MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); + assert(have == expected); +#endif +} + +void helper_unaligned_ld(CPUArchState *env, target_ulong addr) +{ + cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC()); +} + +void helper_unaligned_st(CPUArchState *env, target_ulong addr) +{ + cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC()); +} + +static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t ra, MMUAccessType type) +{ + MemOp mop = get_memop(oi); + int a_bits = get_alignment_bits(mop); + void *ret; + + /* Enforce guest required alignment. */ + if (unlikely(addr & ((1 << a_bits) - 1))) { + cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra); + } + + ret = g2h(env_cpu(env), addr); + set_helper_retaddr(ra); + return ret; +} + +uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + uint8_t ret; + + validate_memop(oi, MO_UB); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldub_p(haddr); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return ret; +} + +uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + uint16_t ret; + + validate_memop(oi, MO_BEUW); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = lduw_be_p(haddr); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return ret; +} + +uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + uint32_t ret; + + validate_memop(oi, MO_BEUL); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldl_be_p(haddr); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return ret; +} + +uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + uint64_t ret; + + validate_memop(oi, MO_BEQ); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldq_be_p(haddr); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return ret; +} + +uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + uint16_t ret; + + validate_memop(oi, MO_LEUW); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = lduw_le_p(haddr); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return ret; +} + +uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + uint32_t ret; + + validate_memop(oi, MO_LEUL); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldl_le_p(haddr); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return ret; +} + +uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + uint64_t ret; + + validate_memop(oi, MO_LEQ); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldq_le_p(haddr); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return ret; +} + +void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + + validate_memop(oi, MO_UB); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stb_p(haddr, val); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + + validate_memop(oi, MO_BEUW); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stw_be_p(haddr, val); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + + validate_memop(oi, MO_BEUL); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stl_be_p(haddr, val); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + + validate_memop(oi, MO_BEQ); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stq_be_p(haddr, val); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + + validate_memop(oi, MO_LEUW); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stw_le_p(haddr, val); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + + validate_memop(oi, MO_LEUL); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stl_le_p(haddr, val); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; + + validate_memop(oi, MO_LEQ); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stq_le_p(haddr, val); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) +{ + uint32_t ret; + + set_helper_retaddr(1); + ret = ldub_p(g2h_untagged(ptr)); + clear_helper_retaddr(); + return ret; +} + +uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr) +{ + uint32_t ret; + + set_helper_retaddr(1); + ret = lduw_p(g2h_untagged(ptr)); + clear_helper_retaddr(); + return ret; +} + +uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr) +{ + uint32_t ret; + + set_helper_retaddr(1); + ret = ldl_p(g2h_untagged(ptr)); + clear_helper_retaddr(); + return ret; +} + +uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr) +{ + uint64_t ret; + + set_helper_retaddr(1); + ret = ldq_p(g2h_untagged(ptr)); + clear_helper_retaddr(); + return ret; +} + +#include "ldst_common.c.inc" + +/* + * Do not allow unaligned operations to proceed. Return the host address. + * + * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. + */ +static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, + MemOpIdx oi, int size, int prot, + uintptr_t retaddr) +{ + MemOp mop = get_memop(oi); + int a_bits = get_alignment_bits(mop); + void *ret; + + /* Enforce guest required alignment. */ + if (unlikely(addr & ((1 << a_bits) - 1))) { + MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE; + cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr); + } + + /* Enforce qemu required alignment. */ + if (unlikely(addr & (size - 1))) { + cpu_loop_exit_atomic(env_cpu(env), retaddr); + } + + ret = g2h(env_cpu(env), addr); + set_helper_retaddr(retaddr); + return ret; +} + +#include "atomic_common.c.inc" + +/* + * First set of functions passes in OI and RETADDR. + * This makes them callable from other helpers. + */ + +#define ATOMIC_NAME(X) \ + glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) +#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) +#define ATOMIC_MMU_IDX MMU_USER_IDX + +#define DATA_SIZE 1 +#include "atomic_template.h" + +#define DATA_SIZE 2 +#include "atomic_template.h" + +#define DATA_SIZE 4 +#include "atomic_template.h" + +#ifdef CONFIG_ATOMIC64 +#define DATA_SIZE 8 +#include "atomic_template.h" +#endif + +#if HAVE_ATOMIC128 || HAVE_CMPXCHG128 +#define DATA_SIZE 16 +#include "atomic_template.h" +#endif diff --git a/accel/xen/meson.build b/accel/xen/meson.build new file mode 100644 index 000000000..002bdb03c --- /dev/null +++ b/accel/xen/meson.build @@ -0,0 +1 @@ +specific_ss.add(when: 'CONFIG_XEN', if_true: files('xen-all.c')) diff --git a/accel/xen/xen-all.c b/accel/xen/xen-all.c new file mode 100644 index 000000000..69aa7d018 --- /dev/null +++ b/accel/xen/xen-all.c @@ -0,0 +1,238 @@ +/* + * Copyright (C) 2014 Citrix Systems UK Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "hw/xen/xen-legacy-backend.h" +#include "hw/xen/xen_pt.h" +#include "chardev/char.h" +#include "qemu/accel.h" +#include "sysemu/cpus.h" +#include "sysemu/xen.h" +#include "sysemu/runstate.h" +#include "migration/misc.h" +#include "migration/global_state.h" +#include "hw/boards.h" + +//#define DEBUG_XEN + +#ifdef DEBUG_XEN +#define DPRINTF(fmt, ...) \ + do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) \ + do { } while (0) +#endif + +bool xen_allowed; + +xc_interface *xen_xc; +xenforeignmemory_handle *xen_fmem; +xendevicemodel_handle *xen_dmod; + +static int store_dev_info(int domid, Chardev *cs, const char *string) +{ + struct xs_handle *xs = NULL; + char *path = NULL; + char *newpath = NULL; + char *pts = NULL; + int ret = -1; + + /* Only continue if we're talking to a pty. */ + if (!CHARDEV_IS_PTY(cs)) { + return 0; + } + pts = cs->filename + 4; + + /* We now have everything we need to set the xenstore entry. */ + xs = xs_open(0); + if (xs == NULL) { + fprintf(stderr, "Could not contact XenStore\n"); + goto out; + } + + path = xs_get_domain_path(xs, domid); + if (path == NULL) { + fprintf(stderr, "xs_get_domain_path() error\n"); + goto out; + } + newpath = realloc(path, (strlen(path) + strlen(string) + + strlen("/tty") + 1)); + if (newpath == NULL) { + fprintf(stderr, "realloc error\n"); + goto out; + } + path = newpath; + + strcat(path, string); + strcat(path, "/tty"); + if (!xs_write(xs, XBT_NULL, path, pts, strlen(pts))) { + fprintf(stderr, "xs_write for '%s' fail", string); + goto out; + } + ret = 0; + +out: + free(path); + xs_close(xs); + + return ret; +} + +void xenstore_store_pv_console_info(int i, Chardev *chr) +{ + if (i == 0) { + store_dev_info(xen_domid, chr, "/console"); + } else { + char buf[32]; + snprintf(buf, sizeof(buf), "/device/console/%d", i); + store_dev_info(xen_domid, chr, buf); + } +} + + +static void xenstore_record_dm_state(struct xs_handle *xs, const char *state) +{ + char path[50]; + + if (xs == NULL) { + error_report("xenstore connection not initialized"); + exit(1); + } + + snprintf(path, sizeof (path), "device-model/%u/state", xen_domid); + /* + * This call may fail when running restricted so don't make it fatal in + * that case. Toolstacks should instead use QMP to listen for state changes. + */ + if (!xs_write(xs, XBT_NULL, path, state, strlen(state)) && + !xen_domid_restrict) { + error_report("error recording dm state"); + exit(1); + } +} + + +static void xen_change_state_handler(void *opaque, bool running, + RunState state) +{ + if (running) { + /* record state running */ + xenstore_record_dm_state(xenstore, "running"); + } +} + +static bool xen_get_igd_gfx_passthru(Object *obj, Error **errp) +{ + return xen_igd_gfx_pt_enabled(); +} + +static void xen_set_igd_gfx_passthru(Object *obj, bool value, Error **errp) +{ + xen_igd_gfx_pt_set(value, errp); +} + +static void xen_setup_post(MachineState *ms, AccelState *accel) +{ + int rc; + + if (xen_domid_restrict) { + rc = xen_restrict(xen_domid); + if (rc < 0) { + perror("xen: failed to restrict"); + exit(1); + } + } +} + +static int xen_init(MachineState *ms) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + + xen_xc = xc_interface_open(0, 0, 0); + if (xen_xc == NULL) { + xen_pv_printf(NULL, 0, "can't open xen interface\n"); + return -1; + } + xen_fmem = xenforeignmemory_open(0, 0); + if (xen_fmem == NULL) { + xen_pv_printf(NULL, 0, "can't open xen fmem interface\n"); + xc_interface_close(xen_xc); + return -1; + } + xen_dmod = xendevicemodel_open(0, 0); + if (xen_dmod == NULL) { + xen_pv_printf(NULL, 0, "can't open xen devicemodel interface\n"); + xenforeignmemory_close(xen_fmem); + xc_interface_close(xen_xc); + return -1; + } + qemu_add_vm_change_state_handler(xen_change_state_handler, NULL); + /* + * opt out of system RAM being allocated by generic code + */ + mc->default_ram_id = NULL; + return 0; +} + +static void xen_accel_class_init(ObjectClass *oc, void *data) +{ + AccelClass *ac = ACCEL_CLASS(oc); + static GlobalProperty compat[] = { + { "migration", "store-global-state", "off" }, + { "migration", "send-configuration", "off" }, + { "migration", "send-section-footer", "off" }, + }; + + ac->name = "Xen"; + ac->init_machine = xen_init; + ac->setup_post = xen_setup_post; + ac->allowed = &xen_allowed; + ac->compat_props = g_ptr_array_new(); + + compat_props_add(ac->compat_props, compat, G_N_ELEMENTS(compat)); + + object_class_property_add_bool(oc, "igd-passthru", + xen_get_igd_gfx_passthru, xen_set_igd_gfx_passthru); + object_class_property_set_description(oc, "igd-passthru", + "Set on/off to enable/disable igd passthrou"); +} + +#define TYPE_XEN_ACCEL ACCEL_CLASS_NAME("xen") + +static const TypeInfo xen_accel_type = { + .name = TYPE_XEN_ACCEL, + .parent = TYPE_ACCEL, + .class_init = xen_accel_class_init, +}; + +static void xen_accel_ops_class_init(ObjectClass *oc, void *data) +{ + AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); + + ops->create_vcpu_thread = dummy_start_vcpu_thread; +} + +static const TypeInfo xen_accel_ops_type = { + .name = ACCEL_OPS_NAME("xen"), + + .parent = TYPE_ACCEL_OPS, + .class_init = xen_accel_ops_class_init, + .abstract = true, +}; + +static void xen_type_init(void) +{ + type_register_static(&xen_accel_type); + type_register_static(&xen_accel_ops_type); +} +type_init(xen_type_init); |