diff options
author | Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com> | 2023-10-10 11:40:56 +0000 |
---|---|---|
committer | Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com> | 2023-10-10 11:40:56 +0000 |
commit | e02cda008591317b1625707ff8e115a4841aa889 (patch) | |
tree | aee302e3cf8b59ec2d32ec481be3d1afddfc8968 /hw/i386/xen | |
parent | cc668e6b7e0ffd8c9d130513d12053cf5eda1d3b (diff) |
Introduce Virtio-loopback epsilon release:
Epsilon release introduces a new compatibility layer which make virtio-loopback
design to work with QEMU and rust-vmm vhost-user backend without require any
changes.
Signed-off-by: Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>
Change-Id: I52e57563e08a7d0bdc002f8e928ee61ba0c53dd9
Diffstat (limited to 'hw/i386/xen')
-rw-r--r-- | hw/i386/xen/meson.build | 7 | ||||
-rw-r--r-- | hw/i386/xen/trace-events | 28 | ||||
-rw-r--r-- | hw/i386/xen/trace.h | 1 | ||||
-rw-r--r-- | hw/i386/xen/xen-hvm.c | 1620 | ||||
-rw-r--r-- | hw/i386/xen/xen-mapcache.c | 593 | ||||
-rw-r--r-- | hw/i386/xen/xen_apic.c | 103 | ||||
-rw-r--r-- | hw/i386/xen/xen_platform.c | 519 | ||||
-rw-r--r-- | hw/i386/xen/xen_pvdevice.c | 154 |
8 files changed, 3025 insertions, 0 deletions
diff --git a/hw/i386/xen/meson.build b/hw/i386/xen/meson.build new file mode 100644 index 000000000..be8413030 --- /dev/null +++ b/hw/i386/xen/meson.build @@ -0,0 +1,7 @@ +i386_ss.add(when: 'CONFIG_XEN', if_true: files( + 'xen-hvm.c', + 'xen-mapcache.c', + 'xen_apic.c', + 'xen_platform.c', + 'xen_pvdevice.c', +)) diff --git a/hw/i386/xen/trace-events b/hw/i386/xen/trace-events new file mode 100644 index 000000000..5d6be6109 --- /dev/null +++ b/hw/i386/xen/trace-events @@ -0,0 +1,28 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# xen_platform.c +xen_platform_log(char *s) "xen platform: %s" + +# xen_pvdevice.c +xen_pv_mmio_read(uint64_t addr) "WARNING: read from Xen PV Device MMIO space (address 0x%"PRIx64")" +xen_pv_mmio_write(uint64_t addr) "WARNING: write to Xen PV Device MMIO space (address 0x%"PRIx64")" + +# xen-hvm.c +xen_ram_alloc(unsigned long ram_addr, unsigned long size) "requested: 0x%lx, size 0x%lx" +xen_client_set_memory(uint64_t start_addr, unsigned long size, bool log_dirty) "0x%"PRIx64" size 0x%lx, log_dirty %i" +handle_ioreq(void *req, uint32_t type, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p type=%d dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" +handle_ioreq_read(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p read type=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" +handle_ioreq_write(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p write type=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" +cpu_ioreq_pio(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p pio dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" +cpu_ioreq_pio_read_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio read reg data=0x%"PRIx64" port=0x%"PRIx64" size=%d" +cpu_ioreq_pio_write_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio write reg data=0x%"PRIx64" port=0x%"PRIx64" size=%d" +cpu_ioreq_move(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p copy dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" +xen_map_resource_ioreq(uint32_t id, void *addr) "id: %u addr: %p" +cpu_ioreq_config_read(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x" +cpu_ioreq_config_write(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x" + +# xen-mapcache.c +xen_map_cache(uint64_t phys_addr) "want 0x%"PRIx64 +xen_remap_bucket(uint64_t index) "index 0x%"PRIx64 +xen_map_cache_return(void* ptr) "%p" + diff --git a/hw/i386/xen/trace.h b/hw/i386/xen/trace.h new file mode 100644 index 000000000..a02bf755d --- /dev/null +++ b/hw/i386/xen/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_i386_xen.h" diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c new file mode 100644 index 000000000..482be9541 --- /dev/null +++ b/hw/i386/xen/xen-hvm.c @@ -0,0 +1,1620 @@ +/* + * Copyright (C) 2010 Citrix Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" + +#include "cpu.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" +#include "hw/i386/pc.h" +#include "hw/southbridge/piix.h" +#include "hw/irq.h" +#include "hw/hw.h" +#include "hw/i386/apic-msidef.h" +#include "hw/xen/xen_common.h" +#include "hw/xen/xen-legacy-backend.h" +#include "hw/xen/xen-bus.h" +#include "hw/xen/xen-x86.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-migration.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qemu/range.h" +#include "sysemu/runstate.h" +#include "sysemu/sysemu.h" +#include "sysemu/xen.h" +#include "sysemu/xen-mapcache.h" +#include "trace.h" + +#include <xen/hvm/ioreq.h> +#include <xen/hvm/e820.h> + +//#define DEBUG_XEN_HVM + +#ifdef DEBUG_XEN_HVM +#define DPRINTF(fmt, ...) \ + do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) \ + do { } while (0) +#endif + +static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi; +static MemoryRegion *framebuffer; +static bool xen_in_migration; + +/* Compatibility with older version */ + +/* This allows QEMU to build on a system that has Xen 4.5 or earlier + * installed. This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h + * needs to be included before this block and hw/xen/xen_common.h needs to + * be included before xen/hvm/ioreq.h + */ +#ifndef IOREQ_TYPE_VMWARE_PORT +#define IOREQ_TYPE_VMWARE_PORT 3 +struct vmware_regs { + uint32_t esi; + uint32_t edi; + uint32_t ebx; + uint32_t ecx; + uint32_t edx; +}; +typedef struct vmware_regs vmware_regs_t; + +struct shared_vmport_iopage { + struct vmware_regs vcpu_vmport_regs[1]; +}; +typedef struct shared_vmport_iopage shared_vmport_iopage_t; +#endif + +static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i) +{ + return shared_page->vcpu_ioreq[i].vp_eport; +} +static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu) +{ + return &shared_page->vcpu_ioreq[vcpu]; +} + +#define BUFFER_IO_MAX_DELAY 100 + +typedef struct XenPhysmap { + hwaddr start_addr; + ram_addr_t size; + const char *name; + hwaddr phys_offset; + + QLIST_ENTRY(XenPhysmap) list; +} XenPhysmap; + +static QLIST_HEAD(, XenPhysmap) xen_physmap; + +typedef struct XenPciDevice { + PCIDevice *pci_dev; + uint32_t sbdf; + QLIST_ENTRY(XenPciDevice) entry; +} XenPciDevice; + +typedef struct XenIOState { + ioservid_t ioservid; + shared_iopage_t *shared_page; + shared_vmport_iopage_t *shared_vmport_page; + buffered_iopage_t *buffered_io_page; + xenforeignmemory_resource_handle *fres; + QEMUTimer *buffered_io_timer; + CPUState **cpu_by_vcpu_id; + /* the evtchn port for polling the notification, */ + evtchn_port_t *ioreq_local_port; + /* evtchn remote and local ports for buffered io */ + evtchn_port_t bufioreq_remote_port; + evtchn_port_t bufioreq_local_port; + /* the evtchn fd for polling */ + xenevtchn_handle *xce_handle; + /* which vcpu we are serving */ + int send_vcpu; + + struct xs_handle *xenstore; + MemoryListener memory_listener; + MemoryListener io_listener; + QLIST_HEAD(, XenPciDevice) dev_list; + DeviceListener device_listener; + hwaddr free_phys_offset; + const XenPhysmap *log_for_dirtybit; + /* Buffer used by xen_sync_dirty_bitmap */ + unsigned long *dirty_bitmap; + + Notifier exit; + Notifier suspend; + Notifier wakeup; +} XenIOState; + +/* Xen specific function for piix pci */ + +int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) +{ + return irq_num + (PCI_SLOT(pci_dev->devfn) << 2); +} + +void xen_piix3_set_irq(void *opaque, int irq_num, int level) +{ + xen_set_pci_intx_level(xen_domid, 0, 0, irq_num >> 2, + irq_num & 3, level); +} + +void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) +{ + int i; + + /* Scan for updates to PCI link routes (0x60-0x63). */ + for (i = 0; i < len; i++) { + uint8_t v = (val >> (8 * i)) & 0xff; + if (v & 0x80) { + v = 0; + } + v &= 0xf; + if (((address + i) >= PIIX_PIRQCA) && ((address + i) <= PIIX_PIRQCD)) { + xen_set_pci_link_route(xen_domid, address + i - PIIX_PIRQCA, v); + } + } +} + +int xen_is_pirq_msi(uint32_t msi_data) +{ + /* If vector is 0, the msi is remapped into a pirq, passed as + * dest_id. + */ + return ((msi_data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT) == 0; +} + +void xen_hvm_inject_msi(uint64_t addr, uint32_t data) +{ + xen_inject_msi(xen_domid, addr, data); +} + +static void xen_suspend_notifier(Notifier *notifier, void *data) +{ + xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3); +} + +/* Xen Interrupt Controller */ + +static void xen_set_irq(void *opaque, int irq, int level) +{ + xen_set_isa_irq_level(xen_domid, irq, level); +} + +qemu_irq *xen_interrupt_controller_init(void) +{ + return qemu_allocate_irqs(xen_set_irq, NULL, 16); +} + +/* Memory Ops */ + +static void xen_ram_init(PCMachineState *pcms, + ram_addr_t ram_size, MemoryRegion **ram_memory_p) +{ + X86MachineState *x86ms = X86_MACHINE(pcms); + MemoryRegion *sysmem = get_system_memory(); + ram_addr_t block_len; + uint64_t user_lowmem = + object_property_get_uint(qdev_get_machine(), + PC_MACHINE_MAX_RAM_BELOW_4G, + &error_abort); + + /* Handle the machine opt max-ram-below-4g. It is basically doing + * min(xen limit, user limit). + */ + if (!user_lowmem) { + user_lowmem = HVM_BELOW_4G_RAM_END; /* default */ + } + if (HVM_BELOW_4G_RAM_END <= user_lowmem) { + user_lowmem = HVM_BELOW_4G_RAM_END; + } + + if (ram_size >= user_lowmem) { + x86ms->above_4g_mem_size = ram_size - user_lowmem; + x86ms->below_4g_mem_size = user_lowmem; + } else { + x86ms->above_4g_mem_size = 0; + x86ms->below_4g_mem_size = ram_size; + } + if (!x86ms->above_4g_mem_size) { + block_len = ram_size; + } else { + /* + * Xen does not allocate the memory continuously, it keeps a + * hole of the size computed above or passed in. + */ + block_len = (4 * GiB) + x86ms->above_4g_mem_size; + } + memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len, + &error_fatal); + *ram_memory_p = &ram_memory; + + memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k", + &ram_memory, 0, 0xa0000); + memory_region_add_subregion(sysmem, 0, &ram_640k); + /* Skip of the VGA IO memory space, it will be registered later by the VGA + * emulated device. + * + * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load + * the Options ROM, so it is registered here as RAM. + */ + memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo", + &ram_memory, 0xc0000, + x86ms->below_4g_mem_size - 0xc0000); + memory_region_add_subregion(sysmem, 0xc0000, &ram_lo); + if (x86ms->above_4g_mem_size > 0) { + memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi", + &ram_memory, 0x100000000ULL, + x86ms->above_4g_mem_size); + memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi); + } +} + +void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr, + Error **errp) +{ + unsigned long nr_pfn; + xen_pfn_t *pfn_list; + int i; + + if (runstate_check(RUN_STATE_INMIGRATE)) { + /* RAM already populated in Xen */ + fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT + " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n", + __func__, size, ram_addr); + return; + } + + if (mr == &ram_memory) { + return; + } + + trace_xen_ram_alloc(ram_addr, size); + + nr_pfn = size >> TARGET_PAGE_BITS; + pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn); + + for (i = 0; i < nr_pfn; i++) { + pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i; + } + + if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) { + error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT, + ram_addr); + } + + g_free(pfn_list); +} + +static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size) +{ + XenPhysmap *physmap = NULL; + + start_addr &= TARGET_PAGE_MASK; + + QLIST_FOREACH(physmap, &xen_physmap, list) { + if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) { + return physmap; + } + } + return NULL; +} + +static hwaddr xen_phys_offset_to_gaddr(hwaddr phys_offset, ram_addr_t size) +{ + hwaddr addr = phys_offset & TARGET_PAGE_MASK; + XenPhysmap *physmap = NULL; + + QLIST_FOREACH(physmap, &xen_physmap, list) { + if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) { + return physmap->start_addr + (phys_offset - physmap->phys_offset); + } + } + + return phys_offset; +} + +#ifdef XEN_COMPAT_PHYSMAP +static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap) +{ + char path[80], value[17]; + + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr", + xen_domid, (uint64_t)physmap->phys_offset); + snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->start_addr); + if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { + return -1; + } + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size", + xen_domid, (uint64_t)physmap->phys_offset); + snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->size); + if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { + return -1; + } + if (physmap->name) { + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name", + xen_domid, (uint64_t)physmap->phys_offset); + if (!xs_write(state->xenstore, 0, path, + physmap->name, strlen(physmap->name))) { + return -1; + } + } + return 0; +} +#else +static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap) +{ + return 0; +} +#endif + +static int xen_add_to_physmap(XenIOState *state, + hwaddr start_addr, + ram_addr_t size, + MemoryRegion *mr, + hwaddr offset_within_region) +{ + unsigned long nr_pages; + int rc = 0; + XenPhysmap *physmap = NULL; + hwaddr pfn, start_gpfn; + hwaddr phys_offset = memory_region_get_ram_addr(mr); + const char *mr_name; + + if (get_physmapping(start_addr, size)) { + return 0; + } + if (size <= 0) { + return -1; + } + + /* Xen can only handle a single dirty log region for now and we want + * the linear framebuffer to be that region. + * Avoid tracking any regions that is not videoram and avoid tracking + * the legacy vga region. */ + if (mr == framebuffer && start_addr > 0xbffff) { + goto go_physmap; + } + return -1; + +go_physmap: + DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n", + start_addr, start_addr + size); + + mr_name = memory_region_name(mr); + + physmap = g_malloc(sizeof(XenPhysmap)); + + physmap->start_addr = start_addr; + physmap->size = size; + physmap->name = mr_name; + physmap->phys_offset = phys_offset; + + QLIST_INSERT_HEAD(&xen_physmap, physmap, list); + + if (runstate_check(RUN_STATE_INMIGRATE)) { + /* Now when we have a physmap entry we can replace a dummy mapping with + * a real one of guest foreign memory. */ + uint8_t *p = xen_replace_cache_entry(phys_offset, start_addr, size); + assert(p && p == memory_region_get_ram_ptr(mr)); + + return 0; + } + + pfn = phys_offset >> TARGET_PAGE_BITS; + start_gpfn = start_addr >> TARGET_PAGE_BITS; + nr_pages = size >> TARGET_PAGE_BITS; + rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, nr_pages, pfn, + start_gpfn); + if (rc) { + int saved_errno = errno; + + error_report("relocate_memory %lu pages from GFN %"HWADDR_PRIx + " to GFN %"HWADDR_PRIx" failed: %s", + nr_pages, pfn, start_gpfn, strerror(saved_errno)); + errno = saved_errno; + return -1; + } + + rc = xendevicemodel_pin_memory_cacheattr(xen_dmod, xen_domid, + start_addr >> TARGET_PAGE_BITS, + (start_addr + size - 1) >> TARGET_PAGE_BITS, + XEN_DOMCTL_MEM_CACHEATTR_WB); + if (rc) { + error_report("pin_memory_cacheattr failed: %s", strerror(errno)); + } + return xen_save_physmap(state, physmap); +} + +static int xen_remove_from_physmap(XenIOState *state, + hwaddr start_addr, + ram_addr_t size) +{ + int rc = 0; + XenPhysmap *physmap = NULL; + hwaddr phys_offset = 0; + + physmap = get_physmapping(start_addr, size); + if (physmap == NULL) { + return -1; + } + + phys_offset = physmap->phys_offset; + size = physmap->size; + + DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at " + "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset); + + size >>= TARGET_PAGE_BITS; + start_addr >>= TARGET_PAGE_BITS; + phys_offset >>= TARGET_PAGE_BITS; + rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, size, start_addr, + phys_offset); + if (rc) { + int saved_errno = errno; + + error_report("relocate_memory "RAM_ADDR_FMT" pages" + " from GFN %"HWADDR_PRIx + " to GFN %"HWADDR_PRIx" failed: %s", + size, start_addr, phys_offset, strerror(saved_errno)); + errno = saved_errno; + return -1; + } + + QLIST_REMOVE(physmap, list); + if (state->log_for_dirtybit == physmap) { + state->log_for_dirtybit = NULL; + g_free(state->dirty_bitmap); + state->dirty_bitmap = NULL; + } + g_free(physmap); + + return 0; +} + +static void xen_set_memory(struct MemoryListener *listener, + MemoryRegionSection *section, + bool add) +{ + XenIOState *state = container_of(listener, XenIOState, memory_listener); + hwaddr start_addr = section->offset_within_address_space; + ram_addr_t size = int128_get64(section->size); + bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA); + hvmmem_type_t mem_type; + + if (section->mr == &ram_memory) { + return; + } else { + if (add) { + xen_map_memory_section(xen_domid, state->ioservid, + section); + } else { + xen_unmap_memory_section(xen_domid, state->ioservid, + section); + } + } + + if (!memory_region_is_ram(section->mr)) { + return; + } + + if (log_dirty != add) { + return; + } + + trace_xen_client_set_memory(start_addr, size, log_dirty); + + start_addr &= TARGET_PAGE_MASK; + size = TARGET_PAGE_ALIGN(size); + + if (add) { + if (!memory_region_is_rom(section->mr)) { + xen_add_to_physmap(state, start_addr, size, + section->mr, section->offset_within_region); + } else { + mem_type = HVMMEM_ram_ro; + if (xen_set_mem_type(xen_domid, mem_type, + start_addr >> TARGET_PAGE_BITS, + size >> TARGET_PAGE_BITS)) { + DPRINTF("xen_set_mem_type error, addr: "TARGET_FMT_plx"\n", + start_addr); + } + } + } else { + if (xen_remove_from_physmap(state, start_addr, size) < 0) { + DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr); + } + } +} + +static void xen_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + memory_region_ref(section->mr); + xen_set_memory(listener, section, true); +} + +static void xen_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + xen_set_memory(listener, section, false); + memory_region_unref(section->mr); +} + +static void xen_io_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + XenIOState *state = container_of(listener, XenIOState, io_listener); + MemoryRegion *mr = section->mr; + + if (mr->ops == &unassigned_io_ops) { + return; + } + + memory_region_ref(mr); + + xen_map_io_section(xen_domid, state->ioservid, section); +} + +static void xen_io_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + XenIOState *state = container_of(listener, XenIOState, io_listener); + MemoryRegion *mr = section->mr; + + if (mr->ops == &unassigned_io_ops) { + return; + } + + xen_unmap_io_section(xen_domid, state->ioservid, section); + + memory_region_unref(mr); +} + +static void xen_device_realize(DeviceListener *listener, + DeviceState *dev) +{ + XenIOState *state = container_of(listener, XenIOState, device_listener); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + PCIDevice *pci_dev = PCI_DEVICE(dev); + XenPciDevice *xendev = g_new(XenPciDevice, 1); + + xendev->pci_dev = pci_dev; + xendev->sbdf = PCI_BUILD_BDF(pci_dev_bus_num(pci_dev), + pci_dev->devfn); + QLIST_INSERT_HEAD(&state->dev_list, xendev, entry); + + xen_map_pcidev(xen_domid, state->ioservid, pci_dev); + } +} + +static void xen_device_unrealize(DeviceListener *listener, + DeviceState *dev) +{ + XenIOState *state = container_of(listener, XenIOState, device_listener); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + PCIDevice *pci_dev = PCI_DEVICE(dev); + XenPciDevice *xendev, *next; + + xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev); + + QLIST_FOREACH_SAFE(xendev, &state->dev_list, entry, next) { + if (xendev->pci_dev == pci_dev) { + QLIST_REMOVE(xendev, entry); + g_free(xendev); + break; + } + } + } +} + +static void xen_sync_dirty_bitmap(XenIOState *state, + hwaddr start_addr, + ram_addr_t size) +{ + hwaddr npages = size >> TARGET_PAGE_BITS; + const int width = sizeof(unsigned long) * 8; + size_t bitmap_size = DIV_ROUND_UP(npages, width); + int rc, i, j; + const XenPhysmap *physmap = NULL; + + physmap = get_physmapping(start_addr, size); + if (physmap == NULL) { + /* not handled */ + return; + } + + if (state->log_for_dirtybit == NULL) { + state->log_for_dirtybit = physmap; + state->dirty_bitmap = g_new(unsigned long, bitmap_size); + } else if (state->log_for_dirtybit != physmap) { + /* Only one range for dirty bitmap can be tracked. */ + return; + } + + rc = xen_track_dirty_vram(xen_domid, start_addr >> TARGET_PAGE_BITS, + npages, state->dirty_bitmap); + if (rc < 0) { +#ifndef ENODATA +#define ENODATA ENOENT +#endif + if (errno == ENODATA) { + memory_region_set_dirty(framebuffer, 0, size); + DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx + ", 0x" TARGET_FMT_plx "): %s\n", + start_addr, start_addr + size, strerror(errno)); + } + return; + } + + for (i = 0; i < bitmap_size; i++) { + unsigned long map = state->dirty_bitmap[i]; + while (map != 0) { + j = ctzl(map); + map &= ~(1ul << j); + memory_region_set_dirty(framebuffer, + (i * width + j) * TARGET_PAGE_SIZE, + TARGET_PAGE_SIZE); + }; + } +} + +static void xen_log_start(MemoryListener *listener, + MemoryRegionSection *section, + int old, int new) +{ + XenIOState *state = container_of(listener, XenIOState, memory_listener); + + if (new & ~old & (1 << DIRTY_MEMORY_VGA)) { + xen_sync_dirty_bitmap(state, section->offset_within_address_space, + int128_get64(section->size)); + } +} + +static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section, + int old, int new) +{ + XenIOState *state = container_of(listener, XenIOState, memory_listener); + + if (old & ~new & (1 << DIRTY_MEMORY_VGA)) { + state->log_for_dirtybit = NULL; + g_free(state->dirty_bitmap); + state->dirty_bitmap = NULL; + /* Disable dirty bit tracking */ + xen_track_dirty_vram(xen_domid, 0, 0, NULL); + } +} + +static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section) +{ + XenIOState *state = container_of(listener, XenIOState, memory_listener); + + xen_sync_dirty_bitmap(state, section->offset_within_address_space, + int128_get64(section->size)); +} + +static void xen_log_global_start(MemoryListener *listener) +{ + if (xen_enabled()) { + xen_in_migration = true; + } +} + +static void xen_log_global_stop(MemoryListener *listener) +{ + xen_in_migration = false; +} + +static MemoryListener xen_memory_listener = { + .name = "xen-memory", + .region_add = xen_region_add, + .region_del = xen_region_del, + .log_start = xen_log_start, + .log_stop = xen_log_stop, + .log_sync = xen_log_sync, + .log_global_start = xen_log_global_start, + .log_global_stop = xen_log_global_stop, + .priority = 10, +}; + +static MemoryListener xen_io_listener = { + .name = "xen-io", + .region_add = xen_io_add, + .region_del = xen_io_del, + .priority = 10, +}; + +static DeviceListener xen_device_listener = { + .realize = xen_device_realize, + .unrealize = xen_device_unrealize, +}; + +/* get the ioreq packets from share mem */ +static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu) +{ + ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu); + + if (req->state != STATE_IOREQ_READY) { + DPRINTF("I/O request not ready: " + "%x, ptr: %x, port: %"PRIx64", " + "data: %"PRIx64", count: %u, size: %u\n", + req->state, req->data_is_ptr, req->addr, + req->data, req->count, req->size); + return NULL; + } + + xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */ + + req->state = STATE_IOREQ_INPROCESS; + return req; +} + +/* use poll to get the port notification */ +/* ioreq_vec--out,the */ +/* retval--the number of ioreq packet */ +static ioreq_t *cpu_get_ioreq(XenIOState *state) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + unsigned int max_cpus = ms->smp.max_cpus; + int i; + evtchn_port_t port; + + port = xenevtchn_pending(state->xce_handle); + if (port == state->bufioreq_local_port) { + timer_mod(state->buffered_io_timer, + BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); + return NULL; + } + + if (port != -1) { + for (i = 0; i < max_cpus; i++) { + if (state->ioreq_local_port[i] == port) { + break; + } + } + + if (i == max_cpus) { + hw_error("Fatal error while trying to get io event!\n"); + } + + /* unmask the wanted port again */ + xenevtchn_unmask(state->xce_handle, port); + + /* get the io packet from shared memory */ + state->send_vcpu = i; + return cpu_get_ioreq_from_shared_memory(state, i); + } + + /* read error or read nothing */ + return NULL; +} + +static uint32_t do_inp(uint32_t addr, unsigned long size) +{ + switch (size) { + case 1: + return cpu_inb(addr); + case 2: + return cpu_inw(addr); + case 4: + return cpu_inl(addr); + default: + hw_error("inp: bad size: %04x %lx", addr, size); + } +} + +static void do_outp(uint32_t addr, + unsigned long size, uint32_t val) +{ + switch (size) { + case 1: + return cpu_outb(addr, val); + case 2: + return cpu_outw(addr, val); + case 4: + return cpu_outl(addr, val); + default: + hw_error("outp: bad size: %04x %lx", addr, size); + } +} + +/* + * Helper functions which read/write an object from/to physical guest + * memory, as part of the implementation of an ioreq. + * + * Equivalent to + * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i, + * val, req->size, 0/1) + * except without the integer overflow problems. + */ +static void rw_phys_req_item(hwaddr addr, + ioreq_t *req, uint32_t i, void *val, int rw) +{ + /* Do everything unsigned so overflow just results in a truncated result + * and accesses to undesired parts of guest memory, which is up + * to the guest */ + hwaddr offset = (hwaddr)req->size * i; + if (req->df) { + addr -= offset; + } else { + addr += offset; + } + cpu_physical_memory_rw(addr, val, req->size, rw); +} + +static inline void read_phys_req_item(hwaddr addr, + ioreq_t *req, uint32_t i, void *val) +{ + rw_phys_req_item(addr, req, i, val, 0); +} +static inline void write_phys_req_item(hwaddr addr, + ioreq_t *req, uint32_t i, void *val) +{ + rw_phys_req_item(addr, req, i, val, 1); +} + + +static void cpu_ioreq_pio(ioreq_t *req) +{ + uint32_t i; + + trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr, + req->data, req->count, req->size); + + if (req->size > sizeof(uint32_t)) { + hw_error("PIO: bad size (%u)", req->size); + } + + if (req->dir == IOREQ_READ) { + if (!req->data_is_ptr) { + req->data = do_inp(req->addr, req->size); + trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr, + req->size); + } else { + uint32_t tmp; + + for (i = 0; i < req->count; i++) { + tmp = do_inp(req->addr, req->size); + write_phys_req_item(req->data, req, i, &tmp); + } + } + } else if (req->dir == IOREQ_WRITE) { + if (!req->data_is_ptr) { + trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr, + req->size); + do_outp(req->addr, req->size, req->data); + } else { + for (i = 0; i < req->count; i++) { + uint32_t tmp = 0; + + read_phys_req_item(req->data, req, i, &tmp); + do_outp(req->addr, req->size, tmp); + } + } + } +} + +static void cpu_ioreq_move(ioreq_t *req) +{ + uint32_t i; + + trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr, + req->data, req->count, req->size); + + if (req->size > sizeof(req->data)) { + hw_error("MMIO: bad size (%u)", req->size); + } + + if (!req->data_is_ptr) { + if (req->dir == IOREQ_READ) { + for (i = 0; i < req->count; i++) { + read_phys_req_item(req->addr, req, i, &req->data); + } + } else if (req->dir == IOREQ_WRITE) { + for (i = 0; i < req->count; i++) { + write_phys_req_item(req->addr, req, i, &req->data); + } + } + } else { + uint64_t tmp; + + if (req->dir == IOREQ_READ) { + for (i = 0; i < req->count; i++) { + read_phys_req_item(req->addr, req, i, &tmp); + write_phys_req_item(req->data, req, i, &tmp); + } + } else if (req->dir == IOREQ_WRITE) { + for (i = 0; i < req->count; i++) { + read_phys_req_item(req->data, req, i, &tmp); + write_phys_req_item(req->addr, req, i, &tmp); + } + } + } +} + +static void cpu_ioreq_config(XenIOState *state, ioreq_t *req) +{ + uint32_t sbdf = req->addr >> 32; + uint32_t reg = req->addr; + XenPciDevice *xendev; + + if (req->size != sizeof(uint8_t) && req->size != sizeof(uint16_t) && + req->size != sizeof(uint32_t)) { + hw_error("PCI config access: bad size (%u)", req->size); + } + + if (req->count != 1) { + hw_error("PCI config access: bad count (%u)", req->count); + } + + QLIST_FOREACH(xendev, &state->dev_list, entry) { + if (xendev->sbdf != sbdf) { + continue; + } + + if (!req->data_is_ptr) { + if (req->dir == IOREQ_READ) { + req->data = pci_host_config_read_common( + xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, + req->size); + trace_cpu_ioreq_config_read(req, xendev->sbdf, reg, + req->size, req->data); + } else if (req->dir == IOREQ_WRITE) { + trace_cpu_ioreq_config_write(req, xendev->sbdf, reg, + req->size, req->data); + pci_host_config_write_common( + xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, + req->data, req->size); + } + } else { + uint32_t tmp; + + if (req->dir == IOREQ_READ) { + tmp = pci_host_config_read_common( + xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, + req->size); + trace_cpu_ioreq_config_read(req, xendev->sbdf, reg, + req->size, tmp); + write_phys_req_item(req->data, req, 0, &tmp); + } else if (req->dir == IOREQ_WRITE) { + read_phys_req_item(req->data, req, 0, &tmp); + trace_cpu_ioreq_config_write(req, xendev->sbdf, reg, + req->size, tmp); + pci_host_config_write_common( + xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, + tmp, req->size); + } + } + } +} + +static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req) +{ + X86CPU *cpu; + CPUX86State *env; + + cpu = X86_CPU(current_cpu); + env = &cpu->env; + env->regs[R_EAX] = req->data; + env->regs[R_EBX] = vmport_regs->ebx; + env->regs[R_ECX] = vmport_regs->ecx; + env->regs[R_EDX] = vmport_regs->edx; + env->regs[R_ESI] = vmport_regs->esi; + env->regs[R_EDI] = vmport_regs->edi; +} + +static void regs_from_cpu(vmware_regs_t *vmport_regs) +{ + X86CPU *cpu = X86_CPU(current_cpu); + CPUX86State *env = &cpu->env; + + vmport_regs->ebx = env->regs[R_EBX]; + vmport_regs->ecx = env->regs[R_ECX]; + vmport_regs->edx = env->regs[R_EDX]; + vmport_regs->esi = env->regs[R_ESI]; + vmport_regs->edi = env->regs[R_EDI]; +} + +static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req) +{ + vmware_regs_t *vmport_regs; + + assert(state->shared_vmport_page); + vmport_regs = + &state->shared_vmport_page->vcpu_vmport_regs[state->send_vcpu]; + QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs)); + + current_cpu = state->cpu_by_vcpu_id[state->send_vcpu]; + regs_to_cpu(vmport_regs, req); + cpu_ioreq_pio(req); + regs_from_cpu(vmport_regs); + current_cpu = NULL; +} + +static void handle_ioreq(XenIOState *state, ioreq_t *req) +{ + trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr, + req->addr, req->data, req->count, req->size); + + if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) && + (req->size < sizeof (target_ulong))) { + req->data &= ((target_ulong) 1 << (8 * req->size)) - 1; + } + + if (req->dir == IOREQ_WRITE) + trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr, + req->addr, req->data, req->count, req->size); + + switch (req->type) { + case IOREQ_TYPE_PIO: + cpu_ioreq_pio(req); + break; + case IOREQ_TYPE_COPY: + cpu_ioreq_move(req); + break; + case IOREQ_TYPE_VMWARE_PORT: + handle_vmport_ioreq(state, req); + break; + case IOREQ_TYPE_TIMEOFFSET: + break; + case IOREQ_TYPE_INVALIDATE: + xen_invalidate_map_cache(); + break; + case IOREQ_TYPE_PCI_CONFIG: + cpu_ioreq_config(state, req); + break; + default: + hw_error("Invalid ioreq type 0x%x\n", req->type); + } + if (req->dir == IOREQ_READ) { + trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr, + req->addr, req->data, req->count, req->size); + } +} + +static int handle_buffered_iopage(XenIOState *state) +{ + buffered_iopage_t *buf_page = state->buffered_io_page; + buf_ioreq_t *buf_req = NULL; + ioreq_t req; + int qw; + + if (!buf_page) { + return 0; + } + + memset(&req, 0x00, sizeof(req)); + req.state = STATE_IOREQ_READY; + req.count = 1; + req.dir = IOREQ_WRITE; + + for (;;) { + uint32_t rdptr = buf_page->read_pointer, wrptr; + + xen_rmb(); + wrptr = buf_page->write_pointer; + xen_rmb(); + if (rdptr != buf_page->read_pointer) { + continue; + } + if (rdptr == wrptr) { + break; + } + buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM]; + req.size = 1U << buf_req->size; + req.addr = buf_req->addr; + req.data = buf_req->data; + req.type = buf_req->type; + xen_rmb(); + qw = (req.size == 8); + if (qw) { + if (rdptr + 1 == wrptr) { + hw_error("Incomplete quad word buffered ioreq"); + } + buf_req = &buf_page->buf_ioreq[(rdptr + 1) % + IOREQ_BUFFER_SLOT_NUM]; + req.data |= ((uint64_t)buf_req->data) << 32; + xen_rmb(); + } + + handle_ioreq(state, &req); + + /* Only req.data may get updated by handle_ioreq(), albeit even that + * should not happen as such data would never make it to the guest (we + * can only usefully see writes here after all). + */ + assert(req.state == STATE_IOREQ_READY); + assert(req.count == 1); + assert(req.dir == IOREQ_WRITE); + assert(!req.data_is_ptr); + + qatomic_add(&buf_page->read_pointer, qw + 1); + } + + return req.count; +} + +static void handle_buffered_io(void *opaque) +{ + XenIOState *state = opaque; + + if (handle_buffered_iopage(state)) { + timer_mod(state->buffered_io_timer, + BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); + } else { + timer_del(state->buffered_io_timer); + xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port); + } +} + +static void cpu_handle_ioreq(void *opaque) +{ + XenIOState *state = opaque; + ioreq_t *req = cpu_get_ioreq(state); + + handle_buffered_iopage(state); + if (req) { + ioreq_t copy = *req; + + xen_rmb(); + handle_ioreq(state, ©); + req->data = copy.data; + + if (req->state != STATE_IOREQ_INPROCESS) { + fprintf(stderr, "Badness in I/O request ... not in service?!: " + "%x, ptr: %x, port: %"PRIx64", " + "data: %"PRIx64", count: %u, size: %u, type: %u\n", + req->state, req->data_is_ptr, req->addr, + req->data, req->count, req->size, req->type); + destroy_hvm_domain(false); + return; + } + + xen_wmb(); /* Update ioreq contents /then/ update state. */ + + /* + * We do this before we send the response so that the tools + * have the opportunity to pick up on the reset before the + * guest resumes and does a hlt with interrupts disabled which + * causes Xen to powerdown the domain. + */ + if (runstate_is_running()) { + ShutdownCause request; + + if (qemu_shutdown_requested_get()) { + destroy_hvm_domain(false); + } + request = qemu_reset_requested_get(); + if (request) { + qemu_system_reset(request); + destroy_hvm_domain(true); + } + } + + req->state = STATE_IORESP_READY; + xenevtchn_notify(state->xce_handle, + state->ioreq_local_port[state->send_vcpu]); + } +} + +static void xen_main_loop_prepare(XenIOState *state) +{ + int evtchn_fd = -1; + + if (state->xce_handle != NULL) { + evtchn_fd = xenevtchn_fd(state->xce_handle); + } + + state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io, + state); + + if (evtchn_fd != -1) { + CPUState *cpu_state; + + DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__); + CPU_FOREACH(cpu_state) { + DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n", + __func__, cpu_state->cpu_index, cpu_state); + state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state; + } + qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state); + } +} + + +static void xen_hvm_change_state_handler(void *opaque, bool running, + RunState rstate) +{ + XenIOState *state = opaque; + + if (running) { + xen_main_loop_prepare(state); + } + + xen_set_ioreq_server_state(xen_domid, + state->ioservid, + (rstate == RUN_STATE_RUNNING)); +} + +static void xen_exit_notifier(Notifier *n, void *data) +{ + XenIOState *state = container_of(n, XenIOState, exit); + + xen_destroy_ioreq_server(xen_domid, state->ioservid); + if (state->fres != NULL) { + xenforeignmemory_unmap_resource(xen_fmem, state->fres); + } + + xenevtchn_close(state->xce_handle); + xs_daemon_close(state->xenstore); +} + +#ifdef XEN_COMPAT_PHYSMAP +static void xen_read_physmap(XenIOState *state) +{ + XenPhysmap *physmap = NULL; + unsigned int len, num, i; + char path[80], *value = NULL; + char **entries = NULL; + + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap", xen_domid); + entries = xs_directory(state->xenstore, 0, path, &num); + if (entries == NULL) + return; + + for (i = 0; i < num; i++) { + physmap = g_malloc(sizeof (XenPhysmap)); + physmap->phys_offset = strtoull(entries[i], NULL, 16); + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%s/start_addr", + xen_domid, entries[i]); + value = xs_read(state->xenstore, 0, path, &len); + if (value == NULL) { + g_free(physmap); + continue; + } + physmap->start_addr = strtoull(value, NULL, 16); + free(value); + + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%s/size", + xen_domid, entries[i]); + value = xs_read(state->xenstore, 0, path, &len); + if (value == NULL) { + g_free(physmap); + continue; + } + physmap->size = strtoull(value, NULL, 16); + free(value); + + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%s/name", + xen_domid, entries[i]); + physmap->name = xs_read(state->xenstore, 0, path, &len); + + QLIST_INSERT_HEAD(&xen_physmap, physmap, list); + } + free(entries); +} +#else +static void xen_read_physmap(XenIOState *state) +{ +} +#endif + +static void xen_wakeup_notifier(Notifier *notifier, void *data) +{ + xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0); +} + +static int xen_map_ioreq_server(XenIOState *state) +{ + void *addr = NULL; + xen_pfn_t ioreq_pfn; + xen_pfn_t bufioreq_pfn; + evtchn_port_t bufioreq_evtchn; + int rc; + + /* + * Attempt to map using the resource API and fall back to normal + * foreign mapping if this is not supported. + */ + QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0); + QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1); + state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid, + XENMEM_resource_ioreq_server, + state->ioservid, 0, 2, + &addr, + PROT_READ | PROT_WRITE, 0); + if (state->fres != NULL) { + trace_xen_map_resource_ioreq(state->ioservid, addr); + state->buffered_io_page = addr; + state->shared_page = addr + TARGET_PAGE_SIZE; + } else if (errno != EOPNOTSUPP) { + error_report("failed to map ioreq server resources: error %d handle=%p", + errno, xen_xc); + return -1; + } + + rc = xen_get_ioreq_server_info(xen_domid, state->ioservid, + (state->shared_page == NULL) ? + &ioreq_pfn : NULL, + (state->buffered_io_page == NULL) ? + &bufioreq_pfn : NULL, + &bufioreq_evtchn); + if (rc < 0) { + error_report("failed to get ioreq server info: error %d handle=%p", + errno, xen_xc); + return rc; + } + + if (state->shared_page == NULL) { + DPRINTF("shared page at pfn %lx\n", ioreq_pfn); + + state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid, + PROT_READ | PROT_WRITE, + 1, &ioreq_pfn, NULL); + if (state->shared_page == NULL) { + error_report("map shared IO page returned error %d handle=%p", + errno, xen_xc); + } + } + + if (state->buffered_io_page == NULL) { + DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn); + + state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid, + PROT_READ | PROT_WRITE, + 1, &bufioreq_pfn, + NULL); + if (state->buffered_io_page == NULL) { + error_report("map buffered IO page returned error %d", errno); + return -1; + } + } + + if (state->shared_page == NULL || state->buffered_io_page == NULL) { + return -1; + } + + DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn); + + state->bufioreq_remote_port = bufioreq_evtchn; + + return 0; +} + +void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory) +{ + MachineState *ms = MACHINE(pcms); + unsigned int max_cpus = ms->smp.max_cpus; + int i, rc; + xen_pfn_t ioreq_pfn; + XenIOState *state; + + state = g_malloc0(sizeof (XenIOState)); + + state->xce_handle = xenevtchn_open(NULL, 0); + if (state->xce_handle == NULL) { + perror("xen: event channel open"); + goto err; + } + + state->xenstore = xs_daemon_open(); + if (state->xenstore == NULL) { + perror("xen: xenstore open"); + goto err; + } + + xen_create_ioreq_server(xen_domid, &state->ioservid); + + state->exit.notify = xen_exit_notifier; + qemu_add_exit_notifier(&state->exit); + + state->suspend.notify = xen_suspend_notifier; + qemu_register_suspend_notifier(&state->suspend); + + state->wakeup.notify = xen_wakeup_notifier; + qemu_register_wakeup_notifier(&state->wakeup); + + /* + * Register wake-up support in QMP query-current-machine API + */ + qemu_register_wakeup_support(); + + rc = xen_map_ioreq_server(state); + if (rc < 0) { + goto err; + } + + rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn); + if (!rc) { + DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn); + state->shared_vmport_page = + xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE, + 1, &ioreq_pfn, NULL); + if (state->shared_vmport_page == NULL) { + error_report("map shared vmport IO page returned error %d handle=%p", + errno, xen_xc); + goto err; + } + } else if (rc != -ENOSYS) { + error_report("get vmport regs pfn returned error %d, rc=%d", + errno, rc); + goto err; + } + + /* Note: cpus is empty at this point in init */ + state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *)); + + rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true); + if (rc < 0) { + error_report("failed to enable ioreq server info: error %d handle=%p", + errno, xen_xc); + goto err; + } + + state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t)); + + /* FIXME: how about if we overflow the page here? */ + for (i = 0; i < max_cpus; i++) { + rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, + xen_vcpu_eport(state->shared_page, i)); + if (rc == -1) { + error_report("shared evtchn %d bind error %d", i, errno); + goto err; + } + state->ioreq_local_port[i] = rc; + } + + rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, + state->bufioreq_remote_port); + if (rc == -1) { + error_report("buffered evtchn bind error %d", errno); + goto err; + } + state->bufioreq_local_port = rc; + + /* Init RAM management */ +#ifdef XEN_COMPAT_PHYSMAP + xen_map_cache_init(xen_phys_offset_to_gaddr, state); +#else + xen_map_cache_init(NULL, state); +#endif + xen_ram_init(pcms, ms->ram_size, ram_memory); + + qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state); + + state->memory_listener = xen_memory_listener; + memory_listener_register(&state->memory_listener, &address_space_memory); + state->log_for_dirtybit = NULL; + + state->io_listener = xen_io_listener; + memory_listener_register(&state->io_listener, &address_space_io); + + state->device_listener = xen_device_listener; + QLIST_INIT(&state->dev_list); + device_listener_register(&state->device_listener); + + xen_bus_init(); + + /* Initialize backend core & drivers */ + if (xen_be_init() != 0) { + error_report("xen backend core setup failed"); + goto err; + } + xen_be_register_common(); + + QLIST_INIT(&xen_physmap); + xen_read_physmap(state); + + /* Disable ACPI build because Xen handles it */ + pcms->acpi_build_enabled = false; + + return; + +err: + error_report("xen hardware virtual machine initialisation failed"); + exit(1); +} + +void destroy_hvm_domain(bool reboot) +{ + xc_interface *xc_handle; + int sts; + int rc; + + unsigned int reason = reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff; + + if (xen_dmod) { + rc = xendevicemodel_shutdown(xen_dmod, xen_domid, reason); + if (!rc) { + return; + } + if (errno != ENOTTY /* old Xen */) { + perror("xendevicemodel_shutdown failed"); + } + /* well, try the old thing then */ + } + + xc_handle = xc_interface_open(0, 0, 0); + if (xc_handle == NULL) { + fprintf(stderr, "Cannot acquire xenctrl handle\n"); + } else { + sts = xc_domain_shutdown(xc_handle, xen_domid, reason); + if (sts != 0) { + fprintf(stderr, "xc_domain_shutdown failed to issue %s, " + "sts %d, %s\n", reboot ? "reboot" : "poweroff", + sts, strerror(errno)); + } else { + fprintf(stderr, "Issued domain %d %s\n", xen_domid, + reboot ? "reboot" : "poweroff"); + } + xc_interface_close(xc_handle); + } +} + +void xen_register_framebuffer(MemoryRegion *mr) +{ + framebuffer = mr; +} + +void xen_shutdown_fatal_error(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + va_end(ap); + fprintf(stderr, "Will destroy the domain.\n"); + /* destroy the domain */ + qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR); +} + +void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) +{ + if (unlikely(xen_in_migration)) { + int rc; + ram_addr_t start_pfn, nb_pages; + + start = xen_phys_offset_to_gaddr(start, length); + + if (length == 0) { + length = TARGET_PAGE_SIZE; + } + start_pfn = start >> TARGET_PAGE_BITS; + nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS) + - start_pfn; + rc = xen_modified_memory(xen_domid, start_pfn, nb_pages); + if (rc) { + fprintf(stderr, + "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n", + __func__, start, nb_pages, errno, strerror(errno)); + } + } +} + +void qmp_xen_set_global_dirty_log(bool enable, Error **errp) +{ + if (enable) { + memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION); + } else { + memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION); + } +} diff --git a/hw/i386/xen/xen-mapcache.c b/hw/i386/xen/xen-mapcache.c new file mode 100644 index 000000000..bd47c3d67 --- /dev/null +++ b/hw/i386/xen/xen-mapcache.c @@ -0,0 +1,593 @@ +/* + * Copyright (C) 2011 Citrix Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/error-report.h" + +#include <sys/resource.h> + +#include "hw/xen/xen-legacy-backend.h" +#include "qemu/bitmap.h" + +#include "sysemu/runstate.h" +#include "sysemu/xen-mapcache.h" +#include "trace.h" + + +//#define MAPCACHE_DEBUG + +#ifdef MAPCACHE_DEBUG +# define DPRINTF(fmt, ...) do { \ + fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \ +} while (0) +#else +# define DPRINTF(fmt, ...) do { } while (0) +#endif + +#if HOST_LONG_BITS == 32 +# define MCACHE_BUCKET_SHIFT 16 +# define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */ +#else +# define MCACHE_BUCKET_SHIFT 20 +# define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */ +#endif +#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT) + +/* This is the size of the virtual address space reserve to QEMU that will not + * be use by MapCache. + * From empirical tests I observed that qemu use 75MB more than the + * max_mcache_size. + */ +#define NON_MCACHE_MEMORY_SIZE (80 * MiB) + +typedef struct MapCacheEntry { + hwaddr paddr_index; + uint8_t *vaddr_base; + unsigned long *valid_mapping; + uint8_t lock; +#define XEN_MAPCACHE_ENTRY_DUMMY (1 << 0) + uint8_t flags; + hwaddr size; + struct MapCacheEntry *next; +} MapCacheEntry; + +typedef struct MapCacheRev { + uint8_t *vaddr_req; + hwaddr paddr_index; + hwaddr size; + QTAILQ_ENTRY(MapCacheRev) next; + bool dma; +} MapCacheRev; + +typedef struct MapCache { + MapCacheEntry *entry; + unsigned long nr_buckets; + QTAILQ_HEAD(, MapCacheRev) locked_entries; + + /* For most cases (>99.9%), the page address is the same. */ + MapCacheEntry *last_entry; + unsigned long max_mcache_size; + unsigned int mcache_bucket_shift; + + phys_offset_to_gaddr_t phys_offset_to_gaddr; + QemuMutex lock; + void *opaque; +} MapCache; + +static MapCache *mapcache; + +static inline void mapcache_lock(void) +{ + qemu_mutex_lock(&mapcache->lock); +} + +static inline void mapcache_unlock(void) +{ + qemu_mutex_unlock(&mapcache->lock); +} + +static inline int test_bits(int nr, int size, const unsigned long *addr) +{ + unsigned long res = find_next_zero_bit(addr, size + nr, nr); + if (res >= nr + size) + return 1; + else + return 0; +} + +void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque) +{ + unsigned long size; + struct rlimit rlimit_as; + + mapcache = g_malloc0(sizeof (MapCache)); + + mapcache->phys_offset_to_gaddr = f; + mapcache->opaque = opaque; + qemu_mutex_init(&mapcache->lock); + + QTAILQ_INIT(&mapcache->locked_entries); + + if (geteuid() == 0) { + rlimit_as.rlim_cur = RLIM_INFINITY; + rlimit_as.rlim_max = RLIM_INFINITY; + mapcache->max_mcache_size = MCACHE_MAX_SIZE; + } else { + getrlimit(RLIMIT_AS, &rlimit_as); + rlimit_as.rlim_cur = rlimit_as.rlim_max; + + if (rlimit_as.rlim_max != RLIM_INFINITY) { + warn_report("QEMU's maximum size of virtual" + " memory is not infinity"); + } + if (rlimit_as.rlim_max < MCACHE_MAX_SIZE + NON_MCACHE_MEMORY_SIZE) { + mapcache->max_mcache_size = rlimit_as.rlim_max - + NON_MCACHE_MEMORY_SIZE; + } else { + mapcache->max_mcache_size = MCACHE_MAX_SIZE; + } + } + + setrlimit(RLIMIT_AS, &rlimit_as); + + mapcache->nr_buckets = + (((mapcache->max_mcache_size >> XC_PAGE_SHIFT) + + (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >> + (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)); + + size = mapcache->nr_buckets * sizeof (MapCacheEntry); + size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1); + DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__, + mapcache->nr_buckets, size); + mapcache->entry = g_malloc0(size); +} + +static void xen_remap_bucket(MapCacheEntry *entry, + void *vaddr, + hwaddr size, + hwaddr address_index, + bool dummy) +{ + uint8_t *vaddr_base; + xen_pfn_t *pfns; + int *err; + unsigned int i; + hwaddr nb_pfn = size >> XC_PAGE_SHIFT; + + trace_xen_remap_bucket(address_index); + + pfns = g_malloc0(nb_pfn * sizeof (xen_pfn_t)); + err = g_malloc0(nb_pfn * sizeof (int)); + + if (entry->vaddr_base != NULL) { + if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) { + ram_block_notify_remove(entry->vaddr_base, entry->size, + entry->size); + } + + /* + * If an entry is being replaced by another mapping and we're using + * MAP_FIXED flag for it - there is possibility of a race for vaddr + * address with another thread doing an mmap call itself + * (see man 2 mmap). To avoid that we skip explicit unmapping here + * and allow the kernel to destroy the previous mappings by replacing + * them in mmap call later. + * + * Non-identical replacements are not allowed therefore. + */ + assert(!vaddr || (entry->vaddr_base == vaddr && entry->size == size)); + + if (!vaddr && munmap(entry->vaddr_base, entry->size) != 0) { + perror("unmap fails"); + exit(-1); + } + } + g_free(entry->valid_mapping); + entry->valid_mapping = NULL; + + for (i = 0; i < nb_pfn; i++) { + pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i; + } + + /* + * If the caller has requested the mapping at a specific address use + * MAP_FIXED to make sure it's honored. + */ + if (!dummy) { + vaddr_base = xenforeignmemory_map2(xen_fmem, xen_domid, vaddr, + PROT_READ | PROT_WRITE, + vaddr ? MAP_FIXED : 0, + nb_pfn, pfns, err); + if (vaddr_base == NULL) { + perror("xenforeignmemory_map2"); + exit(-1); + } + } else { + /* + * We create dummy mappings where we are unable to create a foreign + * mapping immediately due to certain circumstances (i.e. on resume now) + */ + vaddr_base = mmap(vaddr, size, PROT_READ | PROT_WRITE, + MAP_ANON | MAP_SHARED | (vaddr ? MAP_FIXED : 0), + -1, 0); + if (vaddr_base == MAP_FAILED) { + perror("mmap"); + exit(-1); + } + } + + if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) { + ram_block_notify_add(vaddr_base, size, size); + } + + entry->vaddr_base = vaddr_base; + entry->paddr_index = address_index; + entry->size = size; + entry->valid_mapping = (unsigned long *) g_malloc0(sizeof(unsigned long) * + BITS_TO_LONGS(size >> XC_PAGE_SHIFT)); + + if (dummy) { + entry->flags |= XEN_MAPCACHE_ENTRY_DUMMY; + } else { + entry->flags &= ~(XEN_MAPCACHE_ENTRY_DUMMY); + } + + bitmap_zero(entry->valid_mapping, nb_pfn); + for (i = 0; i < nb_pfn; i++) { + if (!err[i]) { + bitmap_set(entry->valid_mapping, i, 1); + } + } + + g_free(pfns); + g_free(err); +} + +static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size, + uint8_t lock, bool dma) +{ + MapCacheEntry *entry, *pentry = NULL, + *free_entry = NULL, *free_pentry = NULL; + hwaddr address_index; + hwaddr address_offset; + hwaddr cache_size = size; + hwaddr test_bit_size; + bool translated G_GNUC_UNUSED = false; + bool dummy = false; + +tryagain: + address_index = phys_addr >> MCACHE_BUCKET_SHIFT; + address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1); + + trace_xen_map_cache(phys_addr); + + /* test_bit_size is always a multiple of XC_PAGE_SIZE */ + if (size) { + test_bit_size = size + (phys_addr & (XC_PAGE_SIZE - 1)); + + if (test_bit_size % XC_PAGE_SIZE) { + test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE); + } + } else { + test_bit_size = XC_PAGE_SIZE; + } + + if (mapcache->last_entry != NULL && + mapcache->last_entry->paddr_index == address_index && + !lock && !size && + test_bits(address_offset >> XC_PAGE_SHIFT, + test_bit_size >> XC_PAGE_SHIFT, + mapcache->last_entry->valid_mapping)) { + trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset); + return mapcache->last_entry->vaddr_base + address_offset; + } + + /* size is always a multiple of MCACHE_BUCKET_SIZE */ + if (size) { + cache_size = size + address_offset; + if (cache_size % MCACHE_BUCKET_SIZE) { + cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE); + } + } else { + cache_size = MCACHE_BUCKET_SIZE; + } + + entry = &mapcache->entry[address_index % mapcache->nr_buckets]; + + while (entry && (lock || entry->lock) && entry->vaddr_base && + (entry->paddr_index != address_index || entry->size != cache_size || + !test_bits(address_offset >> XC_PAGE_SHIFT, + test_bit_size >> XC_PAGE_SHIFT, + entry->valid_mapping))) { + if (!free_entry && !entry->lock) { + free_entry = entry; + free_pentry = pentry; + } + pentry = entry; + entry = entry->next; + } + if (!entry && free_entry) { + entry = free_entry; + pentry = free_pentry; + } + if (!entry) { + entry = g_malloc0(sizeof (MapCacheEntry)); + pentry->next = entry; + xen_remap_bucket(entry, NULL, cache_size, address_index, dummy); + } else if (!entry->lock) { + if (!entry->vaddr_base || entry->paddr_index != address_index || + entry->size != cache_size || + !test_bits(address_offset >> XC_PAGE_SHIFT, + test_bit_size >> XC_PAGE_SHIFT, + entry->valid_mapping)) { + xen_remap_bucket(entry, NULL, cache_size, address_index, dummy); + } + } + + if(!test_bits(address_offset >> XC_PAGE_SHIFT, + test_bit_size >> XC_PAGE_SHIFT, + entry->valid_mapping)) { + mapcache->last_entry = NULL; +#ifdef XEN_COMPAT_PHYSMAP + if (!translated && mapcache->phys_offset_to_gaddr) { + phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size); + translated = true; + goto tryagain; + } +#endif + if (!dummy && runstate_check(RUN_STATE_INMIGRATE)) { + dummy = true; + goto tryagain; + } + trace_xen_map_cache_return(NULL); + return NULL; + } + + mapcache->last_entry = entry; + if (lock) { + MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev)); + entry->lock++; + reventry->dma = dma; + reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset; + reventry->paddr_index = mapcache->last_entry->paddr_index; + reventry->size = entry->size; + QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next); + } + + trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset); + return mapcache->last_entry->vaddr_base + address_offset; +} + +uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size, + uint8_t lock, bool dma) +{ + uint8_t *p; + + mapcache_lock(); + p = xen_map_cache_unlocked(phys_addr, size, lock, dma); + mapcache_unlock(); + return p; +} + +ram_addr_t xen_ram_addr_from_mapcache(void *ptr) +{ + MapCacheEntry *entry = NULL; + MapCacheRev *reventry; + hwaddr paddr_index; + hwaddr size; + ram_addr_t raddr; + int found = 0; + + mapcache_lock(); + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + if (reventry->vaddr_req == ptr) { + paddr_index = reventry->paddr_index; + size = reventry->size; + found = 1; + break; + } + } + if (!found) { + fprintf(stderr, "%s, could not find %p\n", __func__, ptr); + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, + reventry->vaddr_req); + } + abort(); + return 0; + } + + entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; + while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { + entry = entry->next; + } + if (!entry) { + DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr); + raddr = 0; + } else { + raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) + + ((unsigned long) ptr - (unsigned long) entry->vaddr_base); + } + mapcache_unlock(); + return raddr; +} + +static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer) +{ + MapCacheEntry *entry = NULL, *pentry = NULL; + MapCacheRev *reventry; + hwaddr paddr_index; + hwaddr size; + int found = 0; + + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + if (reventry->vaddr_req == buffer) { + paddr_index = reventry->paddr_index; + size = reventry->size; + found = 1; + break; + } + } + if (!found) { + DPRINTF("%s, could not find %p\n", __func__, buffer); + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req); + } + return; + } + QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next); + g_free(reventry); + + if (mapcache->last_entry != NULL && + mapcache->last_entry->paddr_index == paddr_index) { + mapcache->last_entry = NULL; + } + + entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; + while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { + pentry = entry; + entry = entry->next; + } + if (!entry) { + DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer); + return; + } + entry->lock--; + if (entry->lock > 0 || pentry == NULL) { + return; + } + + pentry->next = entry->next; + ram_block_notify_remove(entry->vaddr_base, entry->size, entry->size); + if (munmap(entry->vaddr_base, entry->size) != 0) { + perror("unmap fails"); + exit(-1); + } + g_free(entry->valid_mapping); + g_free(entry); +} + +void xen_invalidate_map_cache_entry(uint8_t *buffer) +{ + mapcache_lock(); + xen_invalidate_map_cache_entry_unlocked(buffer); + mapcache_unlock(); +} + +void xen_invalidate_map_cache(void) +{ + unsigned long i; + MapCacheRev *reventry; + + /* Flush pending AIO before destroying the mapcache */ + bdrv_drain_all(); + + mapcache_lock(); + + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + if (!reventry->dma) { + continue; + } + fprintf(stderr, "Locked DMA mapping while invalidating mapcache!" + " "TARGET_FMT_plx" -> %p is present\n", + reventry->paddr_index, reventry->vaddr_req); + } + + for (i = 0; i < mapcache->nr_buckets; i++) { + MapCacheEntry *entry = &mapcache->entry[i]; + + if (entry->vaddr_base == NULL) { + continue; + } + if (entry->lock > 0) { + continue; + } + + if (munmap(entry->vaddr_base, entry->size) != 0) { + perror("unmap fails"); + exit(-1); + } + + entry->paddr_index = 0; + entry->vaddr_base = NULL; + entry->size = 0; + g_free(entry->valid_mapping); + entry->valid_mapping = NULL; + } + + mapcache->last_entry = NULL; + + mapcache_unlock(); +} + +static uint8_t *xen_replace_cache_entry_unlocked(hwaddr old_phys_addr, + hwaddr new_phys_addr, + hwaddr size) +{ + MapCacheEntry *entry; + hwaddr address_index, address_offset; + hwaddr test_bit_size, cache_size = size; + + address_index = old_phys_addr >> MCACHE_BUCKET_SHIFT; + address_offset = old_phys_addr & (MCACHE_BUCKET_SIZE - 1); + + assert(size); + /* test_bit_size is always a multiple of XC_PAGE_SIZE */ + test_bit_size = size + (old_phys_addr & (XC_PAGE_SIZE - 1)); + if (test_bit_size % XC_PAGE_SIZE) { + test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE); + } + cache_size = size + address_offset; + if (cache_size % MCACHE_BUCKET_SIZE) { + cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE); + } + + entry = &mapcache->entry[address_index % mapcache->nr_buckets]; + while (entry && !(entry->paddr_index == address_index && + entry->size == cache_size)) { + entry = entry->next; + } + if (!entry) { + DPRINTF("Trying to update an entry for "TARGET_FMT_plx \ + "that is not in the mapcache!\n", old_phys_addr); + return NULL; + } + + address_index = new_phys_addr >> MCACHE_BUCKET_SHIFT; + address_offset = new_phys_addr & (MCACHE_BUCKET_SIZE - 1); + + fprintf(stderr, "Replacing a dummy mapcache entry for "TARGET_FMT_plx \ + " with "TARGET_FMT_plx"\n", old_phys_addr, new_phys_addr); + + xen_remap_bucket(entry, entry->vaddr_base, + cache_size, address_index, false); + if (!test_bits(address_offset >> XC_PAGE_SHIFT, + test_bit_size >> XC_PAGE_SHIFT, + entry->valid_mapping)) { + DPRINTF("Unable to update a mapcache entry for "TARGET_FMT_plx"!\n", + old_phys_addr); + return NULL; + } + + return entry->vaddr_base + address_offset; +} + +uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr, + hwaddr new_phys_addr, + hwaddr size) +{ + uint8_t *p; + + mapcache_lock(); + p = xen_replace_cache_entry_unlocked(old_phys_addr, new_phys_addr, size); + mapcache_unlock(); + return p; +} diff --git a/hw/i386/xen/xen_apic.c b/hw/i386/xen/xen_apic.c new file mode 100644 index 000000000..7c7a60b16 --- /dev/null +++ b/hw/i386/xen/xen_apic.c @@ -0,0 +1,103 @@ +/* + * Xen basic APIC support + * + * Copyright (c) 2012 Citrix + * + * Authors: + * Wei Liu <wei.liu2@citrix.com> + * + * This work is licensed under the terms of the GNU GPL version 2 or + * later. See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/i386/apic_internal.h" +#include "hw/pci/msi.h" +#include "hw/xen/xen.h" +#include "qemu/module.h" + +static uint64_t xen_apic_mem_read(void *opaque, hwaddr addr, + unsigned size) +{ + return ~(uint64_t)0; +} + +static void xen_apic_mem_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + if (size != sizeof(uint32_t)) { + fprintf(stderr, "Xen: APIC write data size = %d, invalid\n", size); + return; + } + + xen_hvm_inject_msi(addr, data); +} + +static const MemoryRegionOps xen_apic_io_ops = { + .read = xen_apic_mem_read, + .write = xen_apic_mem_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void xen_apic_realize(DeviceState *dev, Error **errp) +{ + APICCommonState *s = APIC_COMMON(dev); + + s->vapic_control = 0; + memory_region_init_io(&s->io_memory, OBJECT(s), &xen_apic_io_ops, s, + "xen-apic-msi", APIC_SPACE_SIZE); + msi_nonbroken = true; +} + +static void xen_apic_set_base(APICCommonState *s, uint64_t val) +{ +} + +static void xen_apic_set_tpr(APICCommonState *s, uint8_t val) +{ +} + +static uint8_t xen_apic_get_tpr(APICCommonState *s) +{ + return 0; +} + +static void xen_apic_vapic_base_update(APICCommonState *s) +{ +} + +static void xen_apic_external_nmi(APICCommonState *s) +{ +} + +static void xen_send_msi(MSIMessage *msi) +{ + xen_hvm_inject_msi(msi->address, msi->data); +} + +static void xen_apic_class_init(ObjectClass *klass, void *data) +{ + APICCommonClass *k = APIC_COMMON_CLASS(klass); + + k->realize = xen_apic_realize; + k->set_base = xen_apic_set_base; + k->set_tpr = xen_apic_set_tpr; + k->get_tpr = xen_apic_get_tpr; + k->vapic_base_update = xen_apic_vapic_base_update; + k->external_nmi = xen_apic_external_nmi; + k->send_msi = xen_send_msi; +} + +static const TypeInfo xen_apic_info = { + .name = "xen-apic", + .parent = TYPE_APIC_COMMON, + .instance_size = sizeof(APICCommonState), + .class_init = xen_apic_class_init, +}; + +static void xen_apic_register_types(void) +{ + type_register_static(&xen_apic_info); +} + +type_init(xen_apic_register_types) diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c new file mode 100644 index 000000000..72028449b --- /dev/null +++ b/hw/i386/xen/xen_platform.c @@ -0,0 +1,519 @@ +/* + * XEN platform pci device, formerly known as the event channel device + * + * Copyright (c) 2003-2004 Intel Corp. + * Copyright (c) 2006 XenSource + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/ide.h" +#include "hw/pci/pci.h" +#include "hw/xen/xen_common.h" +#include "migration/vmstate.h" +#include "hw/xen/xen-legacy-backend.h" +#include "trace.h" +#include "sysemu/xen.h" +#include "sysemu/block-backend.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qom/object.h" + +//#define DEBUG_PLATFORM + +#ifdef DEBUG_PLATFORM +#define DPRINTF(fmt, ...) do { \ + fprintf(stderr, "xen_platform: " fmt, ## __VA_ARGS__); \ +} while (0) +#else +#define DPRINTF(fmt, ...) do { } while (0) +#endif + +#define PFFLAG_ROM_LOCK 1 /* Sets whether ROM memory area is RW or RO */ + +struct PCIXenPlatformState { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + MemoryRegion fixed_io; + MemoryRegion bar; + MemoryRegion mmio_bar; + uint8_t flags; /* used only for version_id == 2 */ + uint16_t driver_product_version; + + /* Log from guest drivers */ + char log_buffer[4096]; + int log_buffer_off; +}; + +#define TYPE_XEN_PLATFORM "xen-platform" +OBJECT_DECLARE_SIMPLE_TYPE(PCIXenPlatformState, XEN_PLATFORM) + +#define XEN_PLATFORM_IOPORT 0x10 + +/* Send bytes to syslog */ +static void log_writeb(PCIXenPlatformState *s, char val) +{ + if (val == '\n' || s->log_buffer_off == sizeof(s->log_buffer) - 1) { + /* Flush buffer */ + s->log_buffer[s->log_buffer_off] = 0; + trace_xen_platform_log(s->log_buffer); + s->log_buffer_off = 0; + } else { + s->log_buffer[s->log_buffer_off++] = val; + } +} + +/* + * Unplug device flags. + * + * The logic got a little confused at some point in the past but this is + * what they do now. + * + * bit 0: Unplug all IDE and SCSI disks. + * bit 1: Unplug all NICs. + * bit 2: Unplug IDE disks except primary master. This is overridden if + * bit 0 is also present in the mask. + * bit 3: Unplug all NVMe disks. + * + */ +#define _UNPLUG_IDE_SCSI_DISKS 0 +#define UNPLUG_IDE_SCSI_DISKS (1u << _UNPLUG_IDE_SCSI_DISKS) + +#define _UNPLUG_ALL_NICS 1 +#define UNPLUG_ALL_NICS (1u << _UNPLUG_ALL_NICS) + +#define _UNPLUG_AUX_IDE_DISKS 2 +#define UNPLUG_AUX_IDE_DISKS (1u << _UNPLUG_AUX_IDE_DISKS) + +#define _UNPLUG_NVME_DISKS 3 +#define UNPLUG_NVME_DISKS (1u << _UNPLUG_NVME_DISKS) + +static void unplug_nic(PCIBus *b, PCIDevice *d, void *o) +{ + /* We have to ignore passthrough devices */ + if (pci_get_word(d->config + PCI_CLASS_DEVICE) == + PCI_CLASS_NETWORK_ETHERNET + && strcmp(d->name, "xen-pci-passthrough") != 0) { + object_unparent(OBJECT(d)); + } +} + +/* Remove the peer of the NIC device. Normally, this would be a tap device. */ +static void del_nic_peer(NICState *nic, void *opaque) +{ + NetClientState *nc; + + nc = qemu_get_queue(nic); + if (nc->peer) + qemu_del_net_client(nc->peer); +} + +static void pci_unplug_nics(PCIBus *bus) +{ + qemu_foreach_nic(del_nic_peer, NULL); + pci_for_each_device(bus, 0, unplug_nic, NULL); +} + +static void unplug_disks(PCIBus *b, PCIDevice *d, void *opaque) +{ + uint32_t flags = *(uint32_t *)opaque; + bool aux = (flags & UNPLUG_AUX_IDE_DISKS) && + !(flags & UNPLUG_IDE_SCSI_DISKS); + + /* We have to ignore passthrough devices */ + if (!strcmp(d->name, "xen-pci-passthrough")) { + return; + } + + switch (pci_get_word(d->config + PCI_CLASS_DEVICE)) { + case PCI_CLASS_STORAGE_IDE: + pci_piix3_xen_ide_unplug(DEVICE(d), aux); + break; + + case PCI_CLASS_STORAGE_SCSI: + if (!aux) { + object_unparent(OBJECT(d)); + } + break; + + case PCI_CLASS_STORAGE_EXPRESS: + if (flags & UNPLUG_NVME_DISKS) { + object_unparent(OBJECT(d)); + } + + default: + break; + } +} + +static void pci_unplug_disks(PCIBus *bus, uint32_t flags) +{ + pci_for_each_device(bus, 0, unplug_disks, &flags); +} + +static void platform_fixed_ioport_writew(void *opaque, uint32_t addr, uint32_t val) +{ + PCIXenPlatformState *s = opaque; + + switch (addr) { + case 0: { + PCIDevice *pci_dev = PCI_DEVICE(s); + /* Unplug devices. See comment above flag definitions */ + if (val & (UNPLUG_IDE_SCSI_DISKS | UNPLUG_AUX_IDE_DISKS | + UNPLUG_NVME_DISKS)) { + DPRINTF("unplug disks\n"); + pci_unplug_disks(pci_get_bus(pci_dev), val); + } + if (val & UNPLUG_ALL_NICS) { + DPRINTF("unplug nics\n"); + pci_unplug_nics(pci_get_bus(pci_dev)); + } + break; + } + case 2: + switch (val) { + case 1: + DPRINTF("Citrix Windows PV drivers loaded in guest\n"); + break; + case 0: + DPRINTF("Guest claimed to be running PV product 0?\n"); + break; + default: + DPRINTF("Unknown PV product %d loaded in guest\n", val); + break; + } + s->driver_product_version = val; + break; + } +} + +static void platform_fixed_ioport_writel(void *opaque, uint32_t addr, + uint32_t val) +{ + switch (addr) { + case 0: + /* PV driver version */ + break; + } +} + +static void platform_fixed_ioport_writeb(void *opaque, uint32_t addr, uint32_t val) +{ + PCIXenPlatformState *s = opaque; + + switch (addr) { + case 0: /* Platform flags */ { + hvmmem_type_t mem_type = (val & PFFLAG_ROM_LOCK) ? + HVMMEM_ram_ro : HVMMEM_ram_rw; + if (xen_set_mem_type(xen_domid, mem_type, 0xc0, 0x40)) { + DPRINTF("unable to change ro/rw state of ROM memory area!\n"); + } else { + s->flags = val & PFFLAG_ROM_LOCK; + DPRINTF("changed ro/rw state of ROM memory area. now is %s state.\n", + (mem_type == HVMMEM_ram_ro ? "ro":"rw")); + } + break; + } + case 2: + log_writeb(s, val); + break; + } +} + +static uint32_t platform_fixed_ioport_readw(void *opaque, uint32_t addr) +{ + switch (addr) { + case 0: + /* Magic value so that you can identify the interface. */ + return 0x49d2; + default: + return 0xffff; + } +} + +static uint32_t platform_fixed_ioport_readb(void *opaque, uint32_t addr) +{ + PCIXenPlatformState *s = opaque; + + switch (addr) { + case 0: + /* Platform flags */ + return s->flags; + case 2: + /* Version number */ + return 1; + default: + return 0xff; + } +} + +static void platform_fixed_ioport_reset(void *opaque) +{ + PCIXenPlatformState *s = opaque; + + platform_fixed_ioport_writeb(s, 0, 0); +} + +static uint64_t platform_fixed_ioport_read(void *opaque, + hwaddr addr, + unsigned size) +{ + switch (size) { + case 1: + return platform_fixed_ioport_readb(opaque, addr); + case 2: + return platform_fixed_ioport_readw(opaque, addr); + default: + return -1; + } +} + +static void platform_fixed_ioport_write(void *opaque, hwaddr addr, + + uint64_t val, unsigned size) +{ + switch (size) { + case 1: + platform_fixed_ioport_writeb(opaque, addr, val); + break; + case 2: + platform_fixed_ioport_writew(opaque, addr, val); + break; + case 4: + platform_fixed_ioport_writel(opaque, addr, val); + break; + } +} + + +static const MemoryRegionOps platform_fixed_io_ops = { + .read = platform_fixed_ioport_read, + .write = platform_fixed_ioport_write, + .valid = { + .unaligned = true, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + .unaligned = true, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void platform_fixed_ioport_init(PCIXenPlatformState* s) +{ + memory_region_init_io(&s->fixed_io, OBJECT(s), &platform_fixed_io_ops, s, + "xen-fixed", 16); + memory_region_add_subregion(get_system_io(), XEN_PLATFORM_IOPORT, + &s->fixed_io); +} + +/* Xen Platform PCI Device */ + +static uint64_t xen_platform_ioport_readb(void *opaque, hwaddr addr, + unsigned int size) +{ + if (addr == 0) { + return platform_fixed_ioport_readb(opaque, 0); + } else { + return ~0u; + } +} + +static void xen_platform_ioport_writeb(void *opaque, hwaddr addr, + uint64_t val, unsigned int size) +{ + PCIXenPlatformState *s = opaque; + PCIDevice *pci_dev = PCI_DEVICE(s); + + switch (addr) { + case 0: /* Platform flags */ + platform_fixed_ioport_writeb(opaque, 0, (uint32_t)val); + break; + case 4: + if (val == 1) { + /* + * SUSE unplug for Xenlinux + * xen-kmp used this since xen-3.0.4, instead the official protocol + * from xen-3.3+ It did an unconditional "outl(1, (ioaddr + 4));" + * Pre VMDP 1.7 used 4 and 8 depending on how VMDP was configured. + * If VMDP was to control both disk and LAN it would use 4. + * If it controlled just disk or just LAN, it would use 8 below. + */ + pci_unplug_disks(pci_get_bus(pci_dev), UNPLUG_IDE_SCSI_DISKS); + pci_unplug_nics(pci_get_bus(pci_dev)); + } + break; + case 8: + switch (val) { + case 1: + pci_unplug_disks(pci_get_bus(pci_dev), UNPLUG_IDE_SCSI_DISKS); + break; + case 2: + pci_unplug_nics(pci_get_bus(pci_dev)); + break; + default: + log_writeb(s, (uint32_t)val); + break; + } + break; + default: + break; + } +} + +static const MemoryRegionOps xen_pci_io_ops = { + .read = xen_platform_ioport_readb, + .write = xen_platform_ioport_writeb, + .impl.min_access_size = 1, + .impl.max_access_size = 1, +}; + +static void platform_ioport_bar_setup(PCIXenPlatformState *d) +{ + memory_region_init_io(&d->bar, OBJECT(d), &xen_pci_io_ops, d, + "xen-pci", 0x100); +} + +static uint64_t platform_mmio_read(void *opaque, hwaddr addr, + unsigned size) +{ + DPRINTF("Warning: attempted read from physical address " + "0x" TARGET_FMT_plx " in xen platform mmio space\n", addr); + + return 0; +} + +static void platform_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + DPRINTF("Warning: attempted write of 0x%"PRIx64" to physical " + "address 0x" TARGET_FMT_plx " in xen platform mmio space\n", + val, addr); +} + +static const MemoryRegionOps platform_mmio_handler = { + .read = &platform_mmio_read, + .write = &platform_mmio_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void platform_mmio_setup(PCIXenPlatformState *d) +{ + memory_region_init_io(&d->mmio_bar, OBJECT(d), &platform_mmio_handler, d, + "xen-mmio", 0x1000000); +} + +static int xen_platform_post_load(void *opaque, int version_id) +{ + PCIXenPlatformState *s = opaque; + + platform_fixed_ioport_writeb(s, 0, s->flags); + + return 0; +} + +static const VMStateDescription vmstate_xen_platform = { + .name = "platform", + .version_id = 4, + .minimum_version_id = 4, + .post_load = xen_platform_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, PCIXenPlatformState), + VMSTATE_UINT8(flags, PCIXenPlatformState), + VMSTATE_END_OF_LIST() + } +}; + +static void xen_platform_realize(PCIDevice *dev, Error **errp) +{ + PCIXenPlatformState *d = XEN_PLATFORM(dev); + uint8_t *pci_conf; + + /* Device will crash on reset if xen is not initialized */ + if (!xen_enabled()) { + error_setg(errp, "xen-platform device requires the Xen accelerator"); + return; + } + + pci_conf = dev->config; + + pci_set_word(pci_conf + PCI_COMMAND, PCI_COMMAND_IO | PCI_COMMAND_MEMORY); + + pci_config_set_prog_interface(pci_conf, 0); + + pci_conf[PCI_INTERRUPT_PIN] = 1; + + platform_ioport_bar_setup(d); + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &d->bar); + + /* reserve 16MB mmio address for share memory*/ + platform_mmio_setup(d); + pci_register_bar(dev, 1, PCI_BASE_ADDRESS_MEM_PREFETCH, + &d->mmio_bar); + + platform_fixed_ioport_init(d); +} + +static void platform_reset(DeviceState *dev) +{ + PCIXenPlatformState *s = XEN_PLATFORM(dev); + + platform_fixed_ioport_reset(s); +} + +static void xen_platform_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = xen_platform_realize; + k->vendor_id = PCI_VENDOR_ID_XEN; + k->device_id = PCI_DEVICE_ID_XEN_PLATFORM; + k->class_id = PCI_CLASS_OTHERS << 8 | 0x80; + k->subsystem_vendor_id = PCI_VENDOR_ID_XEN; + k->subsystem_id = PCI_DEVICE_ID_XEN_PLATFORM; + k->revision = 1; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->desc = "XEN platform pci device"; + dc->reset = platform_reset; + dc->vmsd = &vmstate_xen_platform; +} + +static const TypeInfo xen_platform_info = { + .name = TYPE_XEN_PLATFORM, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIXenPlatformState), + .class_init = xen_platform_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void xen_platform_register_types(void) +{ + type_register_static(&xen_platform_info); +} + +type_init(xen_platform_register_types) diff --git a/hw/i386/xen/xen_pvdevice.c b/hw/i386/xen/xen_pvdevice.c new file mode 100644 index 000000000..1ea95fa60 --- /dev/null +++ b/hw/i386/xen/xen_pvdevice.c @@ -0,0 +1,154 @@ +/* Copyright (c) Citrix Systems Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, + * with or without modification, are permitted provided + * that the following conditions are met: + * + * * Redistributions of source code must retain the above + * copyright notice, this list of conditions and the + * following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other + * materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "trace.h" +#include "qom/object.h" + +#define TYPE_XEN_PV_DEVICE "xen-pvdevice" + +OBJECT_DECLARE_SIMPLE_TYPE(XenPVDevice, XEN_PV_DEVICE) + +struct XenPVDevice { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + uint16_t vendor_id; + uint16_t device_id; + uint8_t revision; + uint32_t size; + MemoryRegion mmio; +}; + +static uint64_t xen_pv_mmio_read(void *opaque, hwaddr addr, + unsigned size) +{ + trace_xen_pv_mmio_read(addr); + + return ~(uint64_t)0; +} + +static void xen_pv_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + trace_xen_pv_mmio_write(addr); +} + +static const MemoryRegionOps xen_pv_mmio_ops = { + .read = &xen_pv_mmio_read, + .write = &xen_pv_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const VMStateDescription vmstate_xen_pvdevice = { + .name = "xen-pvdevice", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, XenPVDevice), + VMSTATE_END_OF_LIST() + } +}; + +static void xen_pv_realize(PCIDevice *pci_dev, Error **errp) +{ + XenPVDevice *d = XEN_PV_DEVICE(pci_dev); + uint8_t *pci_conf; + + /* device-id property must always be supplied */ + if (d->device_id == 0xffff) { + error_setg(errp, "Device ID invalid, it must always be supplied"); + return; + } + + pci_conf = pci_dev->config; + + pci_set_word(pci_conf + PCI_VENDOR_ID, d->vendor_id); + pci_set_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID, d->vendor_id); + pci_set_word(pci_conf + PCI_DEVICE_ID, d->device_id); + pci_set_word(pci_conf + PCI_SUBSYSTEM_ID, d->device_id); + pci_set_byte(pci_conf + PCI_REVISION_ID, d->revision); + + pci_set_word(pci_conf + PCI_COMMAND, PCI_COMMAND_MEMORY); + + pci_config_set_prog_interface(pci_conf, 0); + + pci_conf[PCI_INTERRUPT_PIN] = 1; + + memory_region_init_io(&d->mmio, NULL, &xen_pv_mmio_ops, d, + "mmio", d->size); + + pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_MEM_PREFETCH, + &d->mmio); +} + +static Property xen_pv_props[] = { + DEFINE_PROP_UINT16("vendor-id", XenPVDevice, vendor_id, PCI_VENDOR_ID_XEN), + DEFINE_PROP_UINT16("device-id", XenPVDevice, device_id, 0xffff), + DEFINE_PROP_UINT8("revision", XenPVDevice, revision, 0x01), + DEFINE_PROP_UINT32("size", XenPVDevice, size, 0x400000), + DEFINE_PROP_END_OF_LIST() +}; + +static void xen_pv_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = xen_pv_realize; + k->class_id = PCI_CLASS_SYSTEM_OTHER; + dc->desc = "Xen PV Device"; + device_class_set_props(dc, xen_pv_props); + dc->vmsd = &vmstate_xen_pvdevice; +} + +static const TypeInfo xen_pv_type_info = { + .name = TYPE_XEN_PV_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(XenPVDevice), + .class_init = xen_pv_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void xen_pv_register_types(void) +{ + type_register_static(&xen_pv_type_info); +} + +type_init(xen_pv_register_types) |