aboutsummaryrefslogtreecommitdiffstats
path: root/hw/vfio
diff options
context:
space:
mode:
Diffstat (limited to 'hw/vfio')
-rw-r--r--hw/vfio/Kconfig43
-rw-r--r--hw/vfio/amd-xgbe.c61
-rw-r--r--hw/vfio/ap.c186
-rw-r--r--hw/vfio/calxeda-xgmac.c61
-rw-r--r--hw/vfio/ccw.c791
-rw-r--r--hw/vfio/common.c2596
-rw-r--r--hw/vfio/display.c545
-rw-r--r--hw/vfio/igd.c616
-rw-r--r--hw/vfio/meson.build19
-rw-r--r--hw/vfio/migration.c911
-rw-r--r--hw/vfio/pci-quirks.c1769
-rw-r--r--hw/vfio/pci.c3328
-rw-r--r--hw/vfio/pci.h227
-rw-r--r--hw/vfio/platform.c720
-rw-r--r--hw/vfio/spapr.c255
-rw-r--r--hw/vfio/trace-events167
-rw-r--r--hw/vfio/trace.h1
17 files changed, 12296 insertions, 0 deletions
diff --git a/hw/vfio/Kconfig b/hw/vfio/Kconfig
new file mode 100644
index 000000000..7cdba0560
--- /dev/null
+++ b/hw/vfio/Kconfig
@@ -0,0 +1,43 @@
+config VFIO
+ bool
+ depends on LINUX
+
+config VFIO_PCI
+ bool
+ default y
+ select VFIO
+ select EDID
+ depends on LINUX && PCI
+
+config VFIO_CCW
+ bool
+ default y
+ select VFIO
+ depends on LINUX && S390_CCW_VIRTIO
+
+config VFIO_PLATFORM
+ bool
+ default y
+ select VFIO
+ depends on LINUX && PLATFORM_BUS
+
+config VFIO_XGMAC
+ bool
+ default y
+ depends on VFIO_PLATFORM
+
+config VFIO_AMD_XGBE
+ bool
+ default y
+ depends on VFIO_PLATFORM
+
+config VFIO_AP
+ bool
+ default y
+ select VFIO
+ depends on LINUX && S390_CCW_VIRTIO
+
+config VFIO_IGD
+ bool
+ default y if PC_PCI
+ depends on VFIO_PCI
diff --git a/hw/vfio/amd-xgbe.c b/hw/vfio/amd-xgbe.c
new file mode 100644
index 000000000..96bd608b8
--- /dev/null
+++ b/hw/vfio/amd-xgbe.c
@@ -0,0 +1,61 @@
+/*
+ * AMD XGBE VFIO device
+ *
+ * Copyright Linaro Limited, 2015
+ *
+ * Authors:
+ * Eric Auger <eric.auger@linaro.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "hw/vfio/vfio-amd-xgbe.h"
+#include "migration/vmstate.h"
+#include "qemu/module.h"
+
+static void amd_xgbe_realize(DeviceState *dev, Error **errp)
+{
+ VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev);
+ VFIOAmdXgbeDeviceClass *k = VFIO_AMD_XGBE_DEVICE_GET_CLASS(dev);
+
+ vdev->compat = g_strdup("amd,xgbe-seattle-v1a");
+ vdev->num_compat = 1;
+
+ k->parent_realize(dev, errp);
+}
+
+static const VMStateDescription vfio_platform_amd_xgbe_vmstate = {
+ .name = "vfio-amd-xgbe",
+ .unmigratable = 1,
+};
+
+static void vfio_amd_xgbe_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VFIOAmdXgbeDeviceClass *vcxc =
+ VFIO_AMD_XGBE_DEVICE_CLASS(klass);
+ device_class_set_parent_realize(dc, amd_xgbe_realize,
+ &vcxc->parent_realize);
+ dc->desc = "VFIO AMD XGBE";
+ dc->vmsd = &vfio_platform_amd_xgbe_vmstate;
+ /* Supported by TYPE_VIRT_MACHINE */
+ dc->user_creatable = true;
+}
+
+static const TypeInfo vfio_amd_xgbe_dev_info = {
+ .name = TYPE_VFIO_AMD_XGBE,
+ .parent = TYPE_VFIO_PLATFORM,
+ .instance_size = sizeof(VFIOAmdXgbeDevice),
+ .class_init = vfio_amd_xgbe_class_init,
+ .class_size = sizeof(VFIOAmdXgbeDeviceClass),
+};
+
+static void register_amd_xgbe_dev_type(void)
+{
+ type_register_static(&vfio_amd_xgbe_dev_info);
+}
+
+type_init(register_amd_xgbe_dev_type)
diff --git a/hw/vfio/ap.c b/hw/vfio/ap.c
new file mode 100644
index 000000000..e0dd561e8
--- /dev/null
+++ b/hw/vfio/ap.c
@@ -0,0 +1,186 @@
+/*
+ * VFIO based AP matrix device assignment
+ *
+ * Copyright 2018 IBM Corp.
+ * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ * Halil Pasic <pasic@linux.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "qemu/osdep.h"
+#include <linux/vfio.h>
+#include <sys/ioctl.h>
+#include "qapi/error.h"
+#include "hw/vfio/vfio.h"
+#include "hw/vfio/vfio-common.h"
+#include "hw/s390x/ap-device.h"
+#include "qemu/error-report.h"
+#include "qemu/module.h"
+#include "qemu/option.h"
+#include "qemu/config-file.h"
+#include "kvm/kvm_s390x.h"
+#include "migration/vmstate.h"
+#include "hw/qdev-properties.h"
+#include "hw/s390x/ap-bridge.h"
+#include "exec/address-spaces.h"
+#include "qom/object.h"
+
+#define TYPE_VFIO_AP_DEVICE "vfio-ap"
+
+struct VFIOAPDevice {
+ APDevice apdev;
+ VFIODevice vdev;
+};
+
+OBJECT_DECLARE_SIMPLE_TYPE(VFIOAPDevice, VFIO_AP_DEVICE)
+
+static void vfio_ap_compute_needs_reset(VFIODevice *vdev)
+{
+ vdev->needs_reset = false;
+}
+
+/*
+ * We don't need vfio_hot_reset_multi and vfio_eoi operations for
+ * vfio-ap device now.
+ */
+struct VFIODeviceOps vfio_ap_ops = {
+ .vfio_compute_needs_reset = vfio_ap_compute_needs_reset,
+};
+
+static void vfio_ap_put_device(VFIOAPDevice *vapdev)
+{
+ g_free(vapdev->vdev.name);
+ vfio_put_base_device(&vapdev->vdev);
+}
+
+static VFIOGroup *vfio_ap_get_group(VFIOAPDevice *vapdev, Error **errp)
+{
+ GError *gerror = NULL;
+ char *symlink, *group_path;
+ int groupid;
+
+ symlink = g_strdup_printf("%s/iommu_group", vapdev->vdev.sysfsdev);
+ group_path = g_file_read_link(symlink, &gerror);
+ g_free(symlink);
+
+ if (!group_path) {
+ error_setg(errp, "%s: no iommu_group found for %s: %s",
+ TYPE_VFIO_AP_DEVICE, vapdev->vdev.sysfsdev, gerror->message);
+ g_error_free(gerror);
+ return NULL;
+ }
+
+ if (sscanf(basename(group_path), "%d", &groupid) != 1) {
+ error_setg(errp, "vfio: failed to read %s", group_path);
+ g_free(group_path);
+ return NULL;
+ }
+
+ g_free(group_path);
+
+ return vfio_get_group(groupid, &address_space_memory, errp);
+}
+
+static void vfio_ap_realize(DeviceState *dev, Error **errp)
+{
+ int ret;
+ char *mdevid;
+ VFIOGroup *vfio_group;
+ APDevice *apdev = AP_DEVICE(dev);
+ VFIOAPDevice *vapdev = VFIO_AP_DEVICE(apdev);
+
+ vfio_group = vfio_ap_get_group(vapdev, errp);
+ if (!vfio_group) {
+ return;
+ }
+
+ vapdev->vdev.ops = &vfio_ap_ops;
+ vapdev->vdev.type = VFIO_DEVICE_TYPE_AP;
+ mdevid = basename(vapdev->vdev.sysfsdev);
+ vapdev->vdev.name = g_strdup_printf("%s", mdevid);
+ vapdev->vdev.dev = dev;
+
+ /*
+ * vfio-ap devices operate in a way compatible with discarding of
+ * memory in RAM blocks, as no pages are pinned in the host.
+ * This needs to be set before vfio_get_device() for vfio common to
+ * handle ram_block_discard_disable().
+ */
+ vapdev->vdev.ram_block_discard_allowed = true;
+
+ ret = vfio_get_device(vfio_group, mdevid, &vapdev->vdev, errp);
+ if (ret) {
+ goto out_get_dev_err;
+ }
+
+ return;
+
+out_get_dev_err:
+ vfio_ap_put_device(vapdev);
+ vfio_put_group(vfio_group);
+}
+
+static void vfio_ap_unrealize(DeviceState *dev)
+{
+ APDevice *apdev = AP_DEVICE(dev);
+ VFIOAPDevice *vapdev = VFIO_AP_DEVICE(apdev);
+ VFIOGroup *group = vapdev->vdev.group;
+
+ vfio_ap_put_device(vapdev);
+ vfio_put_group(group);
+}
+
+static Property vfio_ap_properties[] = {
+ DEFINE_PROP_STRING("sysfsdev", VFIOAPDevice, vdev.sysfsdev),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void vfio_ap_reset(DeviceState *dev)
+{
+ int ret;
+ APDevice *apdev = AP_DEVICE(dev);
+ VFIOAPDevice *vapdev = VFIO_AP_DEVICE(apdev);
+
+ ret = ioctl(vapdev->vdev.fd, VFIO_DEVICE_RESET);
+ if (ret) {
+ error_report("%s: failed to reset %s device: %s", __func__,
+ vapdev->vdev.name, strerror(errno));
+ }
+}
+
+static const VMStateDescription vfio_ap_vmstate = {
+ .name = "vfio-ap",
+ .unmigratable = 1,
+};
+
+static void vfio_ap_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ device_class_set_props(dc, vfio_ap_properties);
+ dc->vmsd = &vfio_ap_vmstate;
+ dc->desc = "VFIO-based AP device assignment";
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ dc->realize = vfio_ap_realize;
+ dc->unrealize = vfio_ap_unrealize;
+ dc->hotpluggable = true;
+ dc->reset = vfio_ap_reset;
+ dc->bus_type = TYPE_AP_BUS;
+}
+
+static const TypeInfo vfio_ap_info = {
+ .name = TYPE_VFIO_AP_DEVICE,
+ .parent = TYPE_AP_DEVICE,
+ .instance_size = sizeof(VFIOAPDevice),
+ .class_init = vfio_ap_class_init,
+};
+
+static void vfio_ap_type_init(void)
+{
+ type_register_static(&vfio_ap_info);
+}
+
+type_init(vfio_ap_type_init)
diff --git a/hw/vfio/calxeda-xgmac.c b/hw/vfio/calxeda-xgmac.c
new file mode 100644
index 000000000..87c382e73
--- /dev/null
+++ b/hw/vfio/calxeda-xgmac.c
@@ -0,0 +1,61 @@
+/*
+ * calxeda xgmac VFIO device
+ *
+ * Copyright Linaro Limited, 2014
+ *
+ * Authors:
+ * Eric Auger <eric.auger@linaro.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "hw/vfio/vfio-calxeda-xgmac.h"
+#include "migration/vmstate.h"
+#include "qemu/module.h"
+
+static void calxeda_xgmac_realize(DeviceState *dev, Error **errp)
+{
+ VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev);
+ VFIOCalxedaXgmacDeviceClass *k = VFIO_CALXEDA_XGMAC_DEVICE_GET_CLASS(dev);
+
+ vdev->compat = g_strdup("calxeda,hb-xgmac");
+ vdev->num_compat = 1;
+
+ k->parent_realize(dev, errp);
+}
+
+static const VMStateDescription vfio_platform_calxeda_xgmac_vmstate = {
+ .name = "vfio-calxeda-xgmac",
+ .unmigratable = 1,
+};
+
+static void vfio_calxeda_xgmac_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VFIOCalxedaXgmacDeviceClass *vcxc =
+ VFIO_CALXEDA_XGMAC_DEVICE_CLASS(klass);
+ device_class_set_parent_realize(dc, calxeda_xgmac_realize,
+ &vcxc->parent_realize);
+ dc->desc = "VFIO Calxeda XGMAC";
+ dc->vmsd = &vfio_platform_calxeda_xgmac_vmstate;
+ /* Supported by TYPE_VIRT_MACHINE */
+ dc->user_creatable = true;
+}
+
+static const TypeInfo vfio_calxeda_xgmac_dev_info = {
+ .name = TYPE_VFIO_CALXEDA_XGMAC,
+ .parent = TYPE_VFIO_PLATFORM,
+ .instance_size = sizeof(VFIOCalxedaXgmacDevice),
+ .class_init = vfio_calxeda_xgmac_class_init,
+ .class_size = sizeof(VFIOCalxedaXgmacDeviceClass),
+};
+
+static void register_calxeda_xgmac_dev_type(void)
+{
+ type_register_static(&vfio_calxeda_xgmac_dev_info);
+}
+
+type_init(register_calxeda_xgmac_dev_type)
diff --git a/hw/vfio/ccw.c b/hw/vfio/ccw.c
new file mode 100644
index 000000000..035473766
--- /dev/null
+++ b/hw/vfio/ccw.c
@@ -0,0 +1,791 @@
+/*
+ * vfio based subchannel assignment support
+ *
+ * Copyright 2017 IBM Corp.
+ * Copyright 2019 Red Hat, Inc.
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ * Pierre Morel <pmorel@linux.vnet.ibm.com>
+ * Cornelia Huck <cohuck@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "qemu/osdep.h"
+#include <linux/vfio.h>
+#include <linux/vfio_ccw.h>
+#include <sys/ioctl.h>
+
+#include "qapi/error.h"
+#include "hw/vfio/vfio.h"
+#include "hw/vfio/vfio-common.h"
+#include "hw/s390x/s390-ccw.h"
+#include "hw/s390x/vfio-ccw.h"
+#include "hw/qdev-properties.h"
+#include "hw/s390x/ccw-device.h"
+#include "exec/address-spaces.h"
+#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
+#include "qemu/module.h"
+
+struct VFIOCCWDevice {
+ S390CCWDevice cdev;
+ VFIODevice vdev;
+ uint64_t io_region_size;
+ uint64_t io_region_offset;
+ struct ccw_io_region *io_region;
+ uint64_t async_cmd_region_size;
+ uint64_t async_cmd_region_offset;
+ struct ccw_cmd_region *async_cmd_region;
+ uint64_t schib_region_size;
+ uint64_t schib_region_offset;
+ struct ccw_schib_region *schib_region;
+ uint64_t crw_region_size;
+ uint64_t crw_region_offset;
+ struct ccw_crw_region *crw_region;
+ EventNotifier io_notifier;
+ EventNotifier crw_notifier;
+ EventNotifier req_notifier;
+ bool force_orb_pfch;
+ bool warned_orb_pfch;
+};
+
+static inline void warn_once_pfch(VFIOCCWDevice *vcdev, SubchDev *sch,
+ const char *msg)
+{
+ warn_report_once_cond(&vcdev->warned_orb_pfch,
+ "vfio-ccw (devno %x.%x.%04x): %s",
+ sch->cssid, sch->ssid, sch->devno, msg);
+}
+
+static void vfio_ccw_compute_needs_reset(VFIODevice *vdev)
+{
+ vdev->needs_reset = false;
+}
+
+/*
+ * We don't need vfio_hot_reset_multi and vfio_eoi operations for
+ * vfio_ccw device now.
+ */
+struct VFIODeviceOps vfio_ccw_ops = {
+ .vfio_compute_needs_reset = vfio_ccw_compute_needs_reset,
+};
+
+static IOInstEnding vfio_ccw_handle_request(SubchDev *sch)
+{
+ S390CCWDevice *cdev = sch->driver_data;
+ VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
+ struct ccw_io_region *region = vcdev->io_region;
+ int ret;
+
+ if (!(sch->orb.ctrl0 & ORB_CTRL0_MASK_PFCH) && vcdev->force_orb_pfch) {
+ sch->orb.ctrl0 |= ORB_CTRL0_MASK_PFCH;
+ warn_once_pfch(vcdev, sch, "PFCH flag forced");
+ }
+
+ QEMU_BUILD_BUG_ON(sizeof(region->orb_area) != sizeof(ORB));
+ QEMU_BUILD_BUG_ON(sizeof(region->scsw_area) != sizeof(SCSW));
+ QEMU_BUILD_BUG_ON(sizeof(region->irb_area) != sizeof(IRB));
+
+ memset(region, 0, sizeof(*region));
+
+ memcpy(region->orb_area, &sch->orb, sizeof(ORB));
+ memcpy(region->scsw_area, &sch->curr_status.scsw, sizeof(SCSW));
+
+again:
+ ret = pwrite(vcdev->vdev.fd, region,
+ vcdev->io_region_size, vcdev->io_region_offset);
+ if (ret != vcdev->io_region_size) {
+ if (errno == EAGAIN) {
+ goto again;
+ }
+ error_report("vfio-ccw: write I/O region failed with errno=%d", errno);
+ ret = errno ? -errno : -EFAULT;
+ } else {
+ ret = 0;
+ }
+ switch (ret) {
+ case 0:
+ return IOINST_CC_EXPECTED;
+ case -EBUSY:
+ return IOINST_CC_BUSY;
+ case -ENODEV:
+ case -EACCES:
+ return IOINST_CC_NOT_OPERATIONAL;
+ case -EFAULT:
+ default:
+ sch_gen_unit_exception(sch);
+ css_inject_io_interrupt(sch);
+ return IOINST_CC_EXPECTED;
+ }
+}
+
+static IOInstEnding vfio_ccw_handle_store(SubchDev *sch)
+{
+ S390CCWDevice *cdev = sch->driver_data;
+ VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
+ SCHIB *schib = &sch->curr_status;
+ struct ccw_schib_region *region = vcdev->schib_region;
+ SCHIB *s;
+ int ret;
+
+ /* schib region not available so nothing else to do */
+ if (!region) {
+ return IOINST_CC_EXPECTED;
+ }
+
+ memset(region, 0, sizeof(*region));
+ ret = pread(vcdev->vdev.fd, region, vcdev->schib_region_size,
+ vcdev->schib_region_offset);
+
+ if (ret == -1) {
+ /*
+ * Device is probably damaged, but store subchannel does not
+ * have a nonzero cc defined for this scenario. Log an error,
+ * and presume things are otherwise fine.
+ */
+ error_report("vfio-ccw: store region read failed with errno=%d", errno);
+ return IOINST_CC_EXPECTED;
+ }
+
+ /*
+ * Selectively copy path-related bits of the SCHIB,
+ * rather than copying the entire struct.
+ */
+ s = (SCHIB *)region->schib_area;
+ schib->pmcw.pnom = s->pmcw.pnom;
+ schib->pmcw.lpum = s->pmcw.lpum;
+ schib->pmcw.pam = s->pmcw.pam;
+ schib->pmcw.pom = s->pmcw.pom;
+
+ if (s->scsw.flags & SCSW_FLAGS_MASK_PNO) {
+ schib->scsw.flags |= SCSW_FLAGS_MASK_PNO;
+ }
+
+ return IOINST_CC_EXPECTED;
+}
+
+static int vfio_ccw_handle_clear(SubchDev *sch)
+{
+ S390CCWDevice *cdev = sch->driver_data;
+ VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
+ struct ccw_cmd_region *region = vcdev->async_cmd_region;
+ int ret;
+
+ if (!vcdev->async_cmd_region) {
+ /* Async command region not available, fall back to emulation */
+ return -ENOSYS;
+ }
+
+ memset(region, 0, sizeof(*region));
+ region->command = VFIO_CCW_ASYNC_CMD_CSCH;
+
+again:
+ ret = pwrite(vcdev->vdev.fd, region,
+ vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset);
+ if (ret != vcdev->async_cmd_region_size) {
+ if (errno == EAGAIN) {
+ goto again;
+ }
+ error_report("vfio-ccw: write cmd region failed with errno=%d", errno);
+ ret = errno ? -errno : -EFAULT;
+ } else {
+ ret = 0;
+ }
+ switch (ret) {
+ case 0:
+ case -ENODEV:
+ case -EACCES:
+ return ret;
+ case -EFAULT:
+ default:
+ sch_gen_unit_exception(sch);
+ css_inject_io_interrupt(sch);
+ return 0;
+ }
+}
+
+static int vfio_ccw_handle_halt(SubchDev *sch)
+{
+ S390CCWDevice *cdev = sch->driver_data;
+ VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
+ struct ccw_cmd_region *region = vcdev->async_cmd_region;
+ int ret;
+
+ if (!vcdev->async_cmd_region) {
+ /* Async command region not available, fall back to emulation */
+ return -ENOSYS;
+ }
+
+ memset(region, 0, sizeof(*region));
+ region->command = VFIO_CCW_ASYNC_CMD_HSCH;
+
+again:
+ ret = pwrite(vcdev->vdev.fd, region,
+ vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset);
+ if (ret != vcdev->async_cmd_region_size) {
+ if (errno == EAGAIN) {
+ goto again;
+ }
+ error_report("vfio-ccw: write cmd region failed with errno=%d", errno);
+ ret = errno ? -errno : -EFAULT;
+ } else {
+ ret = 0;
+ }
+ switch (ret) {
+ case 0:
+ case -EBUSY:
+ case -ENODEV:
+ case -EACCES:
+ return ret;
+ case -EFAULT:
+ default:
+ sch_gen_unit_exception(sch);
+ css_inject_io_interrupt(sch);
+ return 0;
+ }
+}
+
+static void vfio_ccw_reset(DeviceState *dev)
+{
+ CcwDevice *ccw_dev = DO_UPCAST(CcwDevice, parent_obj, dev);
+ S390CCWDevice *cdev = DO_UPCAST(S390CCWDevice, parent_obj, ccw_dev);
+ VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
+
+ ioctl(vcdev->vdev.fd, VFIO_DEVICE_RESET);
+}
+
+static void vfio_ccw_crw_read(VFIOCCWDevice *vcdev)
+{
+ struct ccw_crw_region *region = vcdev->crw_region;
+ CRW crw;
+ int size;
+
+ /* Keep reading CRWs as long as data is returned */
+ do {
+ memset(region, 0, sizeof(*region));
+ size = pread(vcdev->vdev.fd, region, vcdev->crw_region_size,
+ vcdev->crw_region_offset);
+
+ if (size == -1) {
+ error_report("vfio-ccw: Read crw region failed with errno=%d",
+ errno);
+ break;
+ }
+
+ if (region->crw == 0) {
+ /* No more CRWs to queue */
+ break;
+ }
+
+ memcpy(&crw, &region->crw, sizeof(CRW));
+
+ css_crw_add_to_queue(crw);
+ } while (1);
+}
+
+static void vfio_ccw_req_notifier_handler(void *opaque)
+{
+ VFIOCCWDevice *vcdev = opaque;
+ Error *err = NULL;
+
+ if (!event_notifier_test_and_clear(&vcdev->req_notifier)) {
+ return;
+ }
+
+ qdev_unplug(DEVICE(vcdev), &err);
+ if (err) {
+ warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name);
+ }
+}
+
+static void vfio_ccw_crw_notifier_handler(void *opaque)
+{
+ VFIOCCWDevice *vcdev = opaque;
+
+ while (event_notifier_test_and_clear(&vcdev->crw_notifier)) {
+ vfio_ccw_crw_read(vcdev);
+ }
+}
+
+static void vfio_ccw_io_notifier_handler(void *opaque)
+{
+ VFIOCCWDevice *vcdev = opaque;
+ struct ccw_io_region *region = vcdev->io_region;
+ S390CCWDevice *cdev = S390_CCW_DEVICE(vcdev);
+ CcwDevice *ccw_dev = CCW_DEVICE(cdev);
+ SubchDev *sch = ccw_dev->sch;
+ SCHIB *schib = &sch->curr_status;
+ SCSW s;
+ IRB irb;
+ ESW esw;
+ int size;
+
+ if (!event_notifier_test_and_clear(&vcdev->io_notifier)) {
+ return;
+ }
+
+ size = pread(vcdev->vdev.fd, region, vcdev->io_region_size,
+ vcdev->io_region_offset);
+ if (size == -1) {
+ switch (errno) {
+ case ENODEV:
+ /* Generate a deferred cc 3 condition. */
+ schib->scsw.flags |= SCSW_FLAGS_MASK_CC;
+ schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ schib->scsw.ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
+ goto read_err;
+ case EFAULT:
+ /* Memory problem, generate channel data check. */
+ schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
+ schib->scsw.cstat = SCSW_CSTAT_DATA_CHECK;
+ schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
+ SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
+ goto read_err;
+ default:
+ /* Error, generate channel program check. */
+ schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
+ schib->scsw.cstat = SCSW_CSTAT_PROG_CHECK;
+ schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
+ SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
+ goto read_err;
+ }
+ } else if (size != vcdev->io_region_size) {
+ /* Information transfer error, generate channel-control check. */
+ schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
+ schib->scsw.cstat = SCSW_CSTAT_CHN_CTRL_CHK;
+ schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
+ SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
+ goto read_err;
+ }
+
+ memcpy(&irb, region->irb_area, sizeof(IRB));
+
+ /* Update control block via irb. */
+ s = schib->scsw;
+ copy_scsw_to_guest(&s, &irb.scsw);
+ schib->scsw = s;
+
+ copy_esw_to_guest(&esw, &irb.esw);
+ sch->esw = esw;
+
+ /* If a uint check is pending, copy sense data. */
+ if ((schib->scsw.dstat & SCSW_DSTAT_UNIT_CHECK) &&
+ (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE)) {
+ memcpy(sch->sense_data, irb.ecw, sizeof(irb.ecw));
+ }
+
+read_err:
+ css_inject_io_interrupt(sch);
+}
+
+static void vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev,
+ unsigned int irq,
+ Error **errp)
+{
+ VFIODevice *vdev = &vcdev->vdev;
+ struct vfio_irq_info *irq_info;
+ size_t argsz;
+ int fd;
+ EventNotifier *notifier;
+ IOHandler *fd_read;
+
+ switch (irq) {
+ case VFIO_CCW_IO_IRQ_INDEX:
+ notifier = &vcdev->io_notifier;
+ fd_read = vfio_ccw_io_notifier_handler;
+ break;
+ case VFIO_CCW_CRW_IRQ_INDEX:
+ notifier = &vcdev->crw_notifier;
+ fd_read = vfio_ccw_crw_notifier_handler;
+ break;
+ case VFIO_CCW_REQ_IRQ_INDEX:
+ notifier = &vcdev->req_notifier;
+ fd_read = vfio_ccw_req_notifier_handler;
+ break;
+ default:
+ error_setg(errp, "vfio: Unsupported device irq(%d)", irq);
+ return;
+ }
+
+ if (vdev->num_irqs < irq + 1) {
+ error_setg(errp, "vfio: IRQ %u not available (number of irqs %u)",
+ irq, vdev->num_irqs);
+ return;
+ }
+
+ argsz = sizeof(*irq_info);
+ irq_info = g_malloc0(argsz);
+ irq_info->index = irq;
+ irq_info->argsz = argsz;
+ if (ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO,
+ irq_info) < 0 || irq_info->count < 1) {
+ error_setg_errno(errp, errno, "vfio: Error getting irq info");
+ goto out_free_info;
+ }
+
+ if (event_notifier_init(notifier, 0)) {
+ error_setg_errno(errp, errno,
+ "vfio: Unable to init event notifier for irq (%d)",
+ irq);
+ goto out_free_info;
+ }
+
+ fd = event_notifier_get_fd(notifier);
+ qemu_set_fd_handler(fd, fd_read, NULL, vcdev);
+
+ if (vfio_set_irq_signaling(vdev, irq, 0,
+ VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
+ qemu_set_fd_handler(fd, NULL, NULL, vcdev);
+ event_notifier_cleanup(notifier);
+ }
+
+out_free_info:
+ g_free(irq_info);
+}
+
+static void vfio_ccw_unregister_irq_notifier(VFIOCCWDevice *vcdev,
+ unsigned int irq)
+{
+ Error *err = NULL;
+ EventNotifier *notifier;
+
+ switch (irq) {
+ case VFIO_CCW_IO_IRQ_INDEX:
+ notifier = &vcdev->io_notifier;
+ break;
+ case VFIO_CCW_CRW_IRQ_INDEX:
+ notifier = &vcdev->crw_notifier;
+ break;
+ case VFIO_CCW_REQ_IRQ_INDEX:
+ notifier = &vcdev->req_notifier;
+ break;
+ default:
+ error_report("vfio: Unsupported device irq(%d)", irq);
+ return;
+ }
+
+ if (vfio_set_irq_signaling(&vcdev->vdev, irq, 0,
+ VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
+ warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name);
+ }
+
+ qemu_set_fd_handler(event_notifier_get_fd(notifier),
+ NULL, NULL, vcdev);
+ event_notifier_cleanup(notifier);
+}
+
+static void vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp)
+{
+ VFIODevice *vdev = &vcdev->vdev;
+ struct vfio_region_info *info;
+ int ret;
+
+ /* Sanity check device */
+ if (!(vdev->flags & VFIO_DEVICE_FLAGS_CCW)) {
+ error_setg(errp, "vfio: Um, this isn't a vfio-ccw device");
+ return;
+ }
+
+ /*
+ * We always expect at least the I/O region to be present. We also
+ * may have a variable number of regions governed by capabilities.
+ */
+ if (vdev->num_regions < VFIO_CCW_CONFIG_REGION_INDEX + 1) {
+ error_setg(errp, "vfio: too few regions (%u), expected at least %u",
+ vdev->num_regions, VFIO_CCW_CONFIG_REGION_INDEX + 1);
+ return;
+ }
+
+ ret = vfio_get_region_info(vdev, VFIO_CCW_CONFIG_REGION_INDEX, &info);
+ if (ret) {
+ error_setg_errno(errp, -ret, "vfio: Error getting config info");
+ return;
+ }
+
+ vcdev->io_region_size = info->size;
+ if (sizeof(*vcdev->io_region) != vcdev->io_region_size) {
+ error_setg(errp, "vfio: Unexpected size of the I/O region");
+ goto out_err;
+ }
+
+ vcdev->io_region_offset = info->offset;
+ vcdev->io_region = g_malloc0(info->size);
+ g_free(info);
+
+ /* check for the optional async command region */
+ ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
+ VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD, &info);
+ if (!ret) {
+ vcdev->async_cmd_region_size = info->size;
+ if (sizeof(*vcdev->async_cmd_region) != vcdev->async_cmd_region_size) {
+ error_setg(errp, "vfio: Unexpected size of the async cmd region");
+ goto out_err;
+ }
+ vcdev->async_cmd_region_offset = info->offset;
+ vcdev->async_cmd_region = g_malloc0(info->size);
+ g_free(info);
+ }
+
+ ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
+ VFIO_REGION_SUBTYPE_CCW_SCHIB, &info);
+ if (!ret) {
+ vcdev->schib_region_size = info->size;
+ if (sizeof(*vcdev->schib_region) != vcdev->schib_region_size) {
+ error_setg(errp, "vfio: Unexpected size of the schib region");
+ goto out_err;
+ }
+ vcdev->schib_region_offset = info->offset;
+ vcdev->schib_region = g_malloc(info->size);
+ g_free(info);
+ }
+
+ ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
+ VFIO_REGION_SUBTYPE_CCW_CRW, &info);
+
+ if (!ret) {
+ vcdev->crw_region_size = info->size;
+ if (sizeof(*vcdev->crw_region) != vcdev->crw_region_size) {
+ error_setg(errp, "vfio: Unexpected size of the CRW region");
+ goto out_err;
+ }
+ vcdev->crw_region_offset = info->offset;
+ vcdev->crw_region = g_malloc(info->size);
+ g_free(info);
+ }
+
+ return;
+
+out_err:
+ g_free(vcdev->crw_region);
+ g_free(vcdev->schib_region);
+ g_free(vcdev->async_cmd_region);
+ g_free(vcdev->io_region);
+ g_free(info);
+ return;
+}
+
+static void vfio_ccw_put_region(VFIOCCWDevice *vcdev)
+{
+ g_free(vcdev->crw_region);
+ g_free(vcdev->schib_region);
+ g_free(vcdev->async_cmd_region);
+ g_free(vcdev->io_region);
+}
+
+static void vfio_ccw_put_device(VFIOCCWDevice *vcdev)
+{
+ g_free(vcdev->vdev.name);
+ vfio_put_base_device(&vcdev->vdev);
+}
+
+static void vfio_ccw_get_device(VFIOGroup *group, VFIOCCWDevice *vcdev,
+ Error **errp)
+{
+ char *name = g_strdup_printf("%x.%x.%04x", vcdev->cdev.hostid.cssid,
+ vcdev->cdev.hostid.ssid,
+ vcdev->cdev.hostid.devid);
+ VFIODevice *vbasedev;
+
+ QLIST_FOREACH(vbasedev, &group->device_list, next) {
+ if (strcmp(vbasedev->name, name) == 0) {
+ error_setg(errp, "vfio: subchannel %s has already been attached",
+ name);
+ goto out_err;
+ }
+ }
+
+ /*
+ * All vfio-ccw devices are believed to operate in a way compatible with
+ * discarding of memory in RAM blocks, ie. pages pinned in the host are
+ * in the current working set of the guest driver and therefore never
+ * overlap e.g., with pages available to the guest balloon driver. This
+ * needs to be set before vfio_get_device() for vfio common to handle
+ * ram_block_discard_disable().
+ */
+ vcdev->vdev.ram_block_discard_allowed = true;
+
+ if (vfio_get_device(group, vcdev->cdev.mdevid, &vcdev->vdev, errp)) {
+ goto out_err;
+ }
+
+ vcdev->vdev.ops = &vfio_ccw_ops;
+ vcdev->vdev.type = VFIO_DEVICE_TYPE_CCW;
+ vcdev->vdev.name = name;
+ vcdev->vdev.dev = &vcdev->cdev.parent_obj.parent_obj;
+
+ return;
+
+out_err:
+ g_free(name);
+}
+
+static VFIOGroup *vfio_ccw_get_group(S390CCWDevice *cdev, Error **errp)
+{
+ char *tmp, group_path[PATH_MAX];
+ ssize_t len;
+ int groupid;
+
+ tmp = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/%s/iommu_group",
+ cdev->hostid.cssid, cdev->hostid.ssid,
+ cdev->hostid.devid, cdev->mdevid);
+ len = readlink(tmp, group_path, sizeof(group_path));
+ g_free(tmp);
+
+ if (len <= 0 || len >= sizeof(group_path)) {
+ error_setg(errp, "vfio: no iommu_group found");
+ return NULL;
+ }
+
+ group_path[len] = 0;
+
+ if (sscanf(basename(group_path), "%d", &groupid) != 1) {
+ error_setg(errp, "vfio: failed to read %s", group_path);
+ return NULL;
+ }
+
+ return vfio_get_group(groupid, &address_space_memory, errp);
+}
+
+static void vfio_ccw_realize(DeviceState *dev, Error **errp)
+{
+ VFIOGroup *group;
+ CcwDevice *ccw_dev = DO_UPCAST(CcwDevice, parent_obj, dev);
+ S390CCWDevice *cdev = DO_UPCAST(S390CCWDevice, parent_obj, ccw_dev);
+ VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
+ S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
+ Error *err = NULL;
+
+ /* Call the class init function for subchannel. */
+ if (cdc->realize) {
+ cdc->realize(cdev, vcdev->vdev.sysfsdev, &err);
+ if (err) {
+ goto out_err_propagate;
+ }
+ }
+
+ group = vfio_ccw_get_group(cdev, &err);
+ if (!group) {
+ goto out_group_err;
+ }
+
+ vfio_ccw_get_device(group, vcdev, &err);
+ if (err) {
+ goto out_device_err;
+ }
+
+ vfio_ccw_get_region(vcdev, &err);
+ if (err) {
+ goto out_region_err;
+ }
+
+ vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX, &err);
+ if (err) {
+ goto out_io_notifier_err;
+ }
+
+ if (vcdev->crw_region) {
+ vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX, &err);
+ if (err) {
+ goto out_irq_notifier_err;
+ }
+ }
+
+ vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX, &err);
+ if (err) {
+ /*
+ * Report this error, but do not make it a failing condition.
+ * Lack of this IRQ in the host does not prevent normal operation.
+ */
+ error_report_err(err);
+ }
+
+ return;
+
+out_irq_notifier_err:
+ vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX);
+ vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX);
+ vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX);
+out_io_notifier_err:
+ vfio_ccw_put_region(vcdev);
+out_region_err:
+ vfio_ccw_put_device(vcdev);
+out_device_err:
+ vfio_put_group(group);
+out_group_err:
+ if (cdc->unrealize) {
+ cdc->unrealize(cdev);
+ }
+out_err_propagate:
+ error_propagate(errp, err);
+}
+
+static void vfio_ccw_unrealize(DeviceState *dev)
+{
+ CcwDevice *ccw_dev = DO_UPCAST(CcwDevice, parent_obj, dev);
+ S390CCWDevice *cdev = DO_UPCAST(S390CCWDevice, parent_obj, ccw_dev);
+ VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
+ S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
+ VFIOGroup *group = vcdev->vdev.group;
+
+ vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX);
+ vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX);
+ vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX);
+ vfio_ccw_put_region(vcdev);
+ vfio_ccw_put_device(vcdev);
+ vfio_put_group(group);
+
+ if (cdc->unrealize) {
+ cdc->unrealize(cdev);
+ }
+}
+
+static Property vfio_ccw_properties[] = {
+ DEFINE_PROP_STRING("sysfsdev", VFIOCCWDevice, vdev.sysfsdev),
+ DEFINE_PROP_BOOL("force-orb-pfch", VFIOCCWDevice, force_orb_pfch, false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static const VMStateDescription vfio_ccw_vmstate = {
+ .name = "vfio-ccw",
+ .unmigratable = 1,
+};
+
+static void vfio_ccw_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ S390CCWDeviceClass *cdc = S390_CCW_DEVICE_CLASS(klass);
+
+ device_class_set_props(dc, vfio_ccw_properties);
+ dc->vmsd = &vfio_ccw_vmstate;
+ dc->desc = "VFIO-based subchannel assignment";
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ dc->realize = vfio_ccw_realize;
+ dc->unrealize = vfio_ccw_unrealize;
+ dc->reset = vfio_ccw_reset;
+
+ cdc->handle_request = vfio_ccw_handle_request;
+ cdc->handle_halt = vfio_ccw_handle_halt;
+ cdc->handle_clear = vfio_ccw_handle_clear;
+ cdc->handle_store = vfio_ccw_handle_store;
+}
+
+static const TypeInfo vfio_ccw_info = {
+ .name = TYPE_VFIO_CCW,
+ .parent = TYPE_S390_CCW,
+ .instance_size = sizeof(VFIOCCWDevice),
+ .class_init = vfio_ccw_class_init,
+};
+
+static void register_vfio_ccw_type(void)
+{
+ type_register_static(&vfio_ccw_info);
+}
+
+type_init(register_vfio_ccw_type)
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
new file mode 100644
index 000000000..080046e3f
--- /dev/null
+++ b/hw/vfio/common.c
@@ -0,0 +1,2596 @@
+/*
+ * generic functions used by VFIO devices
+ *
+ * Copyright Red Hat, Inc. 2012
+ *
+ * Authors:
+ * Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Based on qemu-kvm device-assignment:
+ * Adapted for KVM by Qumranet.
+ * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
+ * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
+ * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
+ * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
+ * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
+ */
+
+#include "qemu/osdep.h"
+#include <sys/ioctl.h>
+#ifdef CONFIG_KVM
+#include <linux/kvm.h>
+#endif
+#include <linux/vfio.h>
+
+#include "hw/vfio/vfio-common.h"
+#include "hw/vfio/vfio.h"
+#include "exec/address-spaces.h"
+#include "exec/memory.h"
+#include "exec/ram_addr.h"
+#include "hw/hw.h"
+#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
+#include "qemu/range.h"
+#include "sysemu/kvm.h"
+#include "sysemu/reset.h"
+#include "sysemu/runstate.h"
+#include "trace.h"
+#include "qapi/error.h"
+#include "migration/migration.h"
+
+VFIOGroupList vfio_group_list =
+ QLIST_HEAD_INITIALIZER(vfio_group_list);
+static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces =
+ QLIST_HEAD_INITIALIZER(vfio_address_spaces);
+
+#ifdef CONFIG_KVM
+/*
+ * We have a single VFIO pseudo device per KVM VM. Once created it lives
+ * for the life of the VM. Closing the file descriptor only drops our
+ * reference to it and the device's reference to kvm. Therefore once
+ * initialized, this file descriptor is only released on QEMU exit and
+ * we'll re-use it should another vfio device be attached before then.
+ */
+static int vfio_kvm_device_fd = -1;
+#endif
+
+/*
+ * Common VFIO interrupt disable
+ */
+void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
+{
+ struct vfio_irq_set irq_set = {
+ .argsz = sizeof(irq_set),
+ .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
+ .index = index,
+ .start = 0,
+ .count = 0,
+ };
+
+ ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
+}
+
+void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
+{
+ struct vfio_irq_set irq_set = {
+ .argsz = sizeof(irq_set),
+ .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
+ .index = index,
+ .start = 0,
+ .count = 1,
+ };
+
+ ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
+}
+
+void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
+{
+ struct vfio_irq_set irq_set = {
+ .argsz = sizeof(irq_set),
+ .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
+ .index = index,
+ .start = 0,
+ .count = 1,
+ };
+
+ ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
+}
+
+static inline const char *action_to_str(int action)
+{
+ switch (action) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ return "MASK";
+ case VFIO_IRQ_SET_ACTION_UNMASK:
+ return "UNMASK";
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ return "TRIGGER";
+ default:
+ return "UNKNOWN ACTION";
+ }
+}
+
+static const char *index_to_str(VFIODevice *vbasedev, int index)
+{
+ if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
+ return NULL;
+ }
+
+ switch (index) {
+ case VFIO_PCI_INTX_IRQ_INDEX:
+ return "INTX";
+ case VFIO_PCI_MSI_IRQ_INDEX:
+ return "MSI";
+ case VFIO_PCI_MSIX_IRQ_INDEX:
+ return "MSIX";
+ case VFIO_PCI_ERR_IRQ_INDEX:
+ return "ERR";
+ case VFIO_PCI_REQ_IRQ_INDEX:
+ return "REQ";
+ default:
+ return NULL;
+ }
+}
+
+static int vfio_ram_block_discard_disable(VFIOContainer *container, bool state)
+{
+ switch (container->iommu_type) {
+ case VFIO_TYPE1v2_IOMMU:
+ case VFIO_TYPE1_IOMMU:
+ /*
+ * We support coordinated discarding of RAM via the RamDiscardManager.
+ */
+ return ram_block_uncoordinated_discard_disable(state);
+ default:
+ /*
+ * VFIO_SPAPR_TCE_IOMMU most probably works just fine with
+ * RamDiscardManager, however, it is completely untested.
+ *
+ * VFIO_SPAPR_TCE_v2_IOMMU with "DMA memory preregistering" does
+ * completely the opposite of managing mapping/pinning dynamically as
+ * required by RamDiscardManager. We would have to special-case sections
+ * with a RamDiscardManager.
+ */
+ return ram_block_discard_disable(state);
+ }
+}
+
+int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex,
+ int action, int fd, Error **errp)
+{
+ struct vfio_irq_set *irq_set;
+ int argsz, ret = 0;
+ const char *name;
+ int32_t *pfd;
+
+ argsz = sizeof(*irq_set) + sizeof(*pfd);
+
+ irq_set = g_malloc0(argsz);
+ irq_set->argsz = argsz;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action;
+ irq_set->index = index;
+ irq_set->start = subindex;
+ irq_set->count = 1;
+ pfd = (int32_t *)&irq_set->data;
+ *pfd = fd;
+
+ if (ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
+ ret = -errno;
+ }
+ g_free(irq_set);
+
+ if (!ret) {
+ return 0;
+ }
+
+ error_setg_errno(errp, -ret, "VFIO_DEVICE_SET_IRQS failure");
+
+ name = index_to_str(vbasedev, index);
+ if (name) {
+ error_prepend(errp, "%s-%d: ", name, subindex);
+ } else {
+ error_prepend(errp, "index %d-%d: ", index, subindex);
+ }
+ error_prepend(errp,
+ "Failed to %s %s eventfd signaling for interrupt ",
+ fd < 0 ? "tear down" : "set up", action_to_str(action));
+ return ret;
+}
+
+/*
+ * IO Port/MMIO - Beware of the endians, VFIO is always little endian
+ */
+void vfio_region_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIORegion *region = opaque;
+ VFIODevice *vbasedev = region->vbasedev;
+ union {
+ uint8_t byte;
+ uint16_t word;
+ uint32_t dword;
+ uint64_t qword;
+ } buf;
+
+ switch (size) {
+ case 1:
+ buf.byte = data;
+ break;
+ case 2:
+ buf.word = cpu_to_le16(data);
+ break;
+ case 4:
+ buf.dword = cpu_to_le32(data);
+ break;
+ case 8:
+ buf.qword = cpu_to_le64(data);
+ break;
+ default:
+ hw_error("vfio: unsupported write size, %u bytes", size);
+ break;
+ }
+
+ if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
+ error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
+ ",%d) failed: %m",
+ __func__, vbasedev->name, region->nr,
+ addr, data, size);
+ }
+
+ trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
+
+ /*
+ * A read or write to a BAR always signals an INTx EOI. This will
+ * do nothing if not pending (including not in INTx mode). We assume
+ * that a BAR access is in response to an interrupt and that BAR
+ * accesses will service the interrupt. Unfortunately, we don't know
+ * which access will service the interrupt, so we're potentially
+ * getting quite a few host interrupts per guest interrupt.
+ */
+ vbasedev->ops->vfio_eoi(vbasedev);
+}
+
+uint64_t vfio_region_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIORegion *region = opaque;
+ VFIODevice *vbasedev = region->vbasedev;
+ union {
+ uint8_t byte;
+ uint16_t word;
+ uint32_t dword;
+ uint64_t qword;
+ } buf;
+ uint64_t data = 0;
+
+ if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
+ error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
+ __func__, vbasedev->name, region->nr,
+ addr, size);
+ return (uint64_t)-1;
+ }
+ switch (size) {
+ case 1:
+ data = buf.byte;
+ break;
+ case 2:
+ data = le16_to_cpu(buf.word);
+ break;
+ case 4:
+ data = le32_to_cpu(buf.dword);
+ break;
+ case 8:
+ data = le64_to_cpu(buf.qword);
+ break;
+ default:
+ hw_error("vfio: unsupported read size, %u bytes", size);
+ break;
+ }
+
+ trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
+
+ /* Same as write above */
+ vbasedev->ops->vfio_eoi(vbasedev);
+
+ return data;
+}
+
+const MemoryRegionOps vfio_region_ops = {
+ .read = vfio_region_read,
+ .write = vfio_region_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+};
+
+/*
+ * Device state interfaces
+ */
+
+bool vfio_mig_active(void)
+{
+ VFIOGroup *group;
+ VFIODevice *vbasedev;
+
+ if (QLIST_EMPTY(&vfio_group_list)) {
+ return false;
+ }
+
+ QLIST_FOREACH(group, &vfio_group_list, next) {
+ QLIST_FOREACH(vbasedev, &group->device_list, next) {
+ if (vbasedev->migration_blocker) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+static bool vfio_devices_all_dirty_tracking(VFIOContainer *container)
+{
+ VFIOGroup *group;
+ VFIODevice *vbasedev;
+ MigrationState *ms = migrate_get_current();
+
+ if (!migration_is_setup_or_active(ms->state)) {
+ return false;
+ }
+
+ QLIST_FOREACH(group, &container->group_list, container_next) {
+ QLIST_FOREACH(vbasedev, &group->device_list, next) {
+ VFIOMigration *migration = vbasedev->migration;
+
+ if (!migration) {
+ return false;
+ }
+
+ if ((vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF)
+ && (migration->device_state & VFIO_DEVICE_STATE_RUNNING)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+static bool vfio_devices_all_running_and_saving(VFIOContainer *container)
+{
+ VFIOGroup *group;
+ VFIODevice *vbasedev;
+ MigrationState *ms = migrate_get_current();
+
+ if (!migration_is_setup_or_active(ms->state)) {
+ return false;
+ }
+
+ QLIST_FOREACH(group, &container->group_list, container_next) {
+ QLIST_FOREACH(vbasedev, &group->device_list, next) {
+ VFIOMigration *migration = vbasedev->migration;
+
+ if (!migration) {
+ return false;
+ }
+
+ if ((migration->device_state & VFIO_DEVICE_STATE_SAVING) &&
+ (migration->device_state & VFIO_DEVICE_STATE_RUNNING)) {
+ continue;
+ } else {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+static int vfio_dma_unmap_bitmap(VFIOContainer *container,
+ hwaddr iova, ram_addr_t size,
+ IOMMUTLBEntry *iotlb)
+{
+ struct vfio_iommu_type1_dma_unmap *unmap;
+ struct vfio_bitmap *bitmap;
+ uint64_t pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size;
+ int ret;
+
+ unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap));
+
+ unmap->argsz = sizeof(*unmap) + sizeof(*bitmap);
+ unmap->iova = iova;
+ unmap->size = size;
+ unmap->flags |= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP;
+ bitmap = (struct vfio_bitmap *)&unmap->data;
+
+ /*
+ * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
+ * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
+ * to qemu_real_host_page_size.
+ */
+
+ bitmap->pgsize = qemu_real_host_page_size;
+ bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
+ BITS_PER_BYTE;
+
+ if (bitmap->size > container->max_dirty_bitmap_size) {
+ error_report("UNMAP: Size of bitmap too big 0x%"PRIx64,
+ (uint64_t)bitmap->size);
+ ret = -E2BIG;
+ goto unmap_exit;
+ }
+
+ bitmap->data = g_try_malloc0(bitmap->size);
+ if (!bitmap->data) {
+ ret = -ENOMEM;
+ goto unmap_exit;
+ }
+
+ ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap);
+ if (!ret) {
+ cpu_physical_memory_set_dirty_lebitmap((unsigned long *)bitmap->data,
+ iotlb->translated_addr, pages);
+ } else {
+ error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
+ }
+
+ g_free(bitmap->data);
+unmap_exit:
+ g_free(unmap);
+ return ret;
+}
+
+/*
+ * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
+ */
+static int vfio_dma_unmap(VFIOContainer *container,
+ hwaddr iova, ram_addr_t size,
+ IOMMUTLBEntry *iotlb)
+{
+ struct vfio_iommu_type1_dma_unmap unmap = {
+ .argsz = sizeof(unmap),
+ .flags = 0,
+ .iova = iova,
+ .size = size,
+ };
+
+ if (iotlb && container->dirty_pages_supported &&
+ vfio_devices_all_running_and_saving(container)) {
+ return vfio_dma_unmap_bitmap(container, iova, size, iotlb);
+ }
+
+ while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
+ /*
+ * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
+ * v4.15) where an overflow in its wrap-around check prevents us from
+ * unmapping the last page of the address space. Test for the error
+ * condition and re-try the unmap excluding the last page. The
+ * expectation is that we've never mapped the last page anyway and this
+ * unmap request comes via vIOMMU support which also makes it unlikely
+ * that this page is used. This bug was introduced well after type1 v2
+ * support was introduced, so we shouldn't need to test for v1. A fix
+ * is queued for kernel v5.0 so this workaround can be removed once
+ * affected kernels are sufficiently deprecated.
+ */
+ if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) &&
+ container->iommu_type == VFIO_TYPE1v2_IOMMU) {
+ trace_vfio_dma_unmap_overflow_workaround();
+ unmap.size -= 1ULL << ctz64(container->pgsizes);
+ continue;
+ }
+ error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
+ return -errno;
+ }
+
+ return 0;
+}
+
+static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
+ ram_addr_t size, void *vaddr, bool readonly)
+{
+ struct vfio_iommu_type1_dma_map map = {
+ .argsz = sizeof(map),
+ .flags = VFIO_DMA_MAP_FLAG_READ,
+ .vaddr = (__u64)(uintptr_t)vaddr,
+ .iova = iova,
+ .size = size,
+ };
+
+ if (!readonly) {
+ map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
+ }
+
+ /*
+ * Try the mapping, if it fails with EBUSY, unmap the region and try
+ * again. This shouldn't be necessary, but we sometimes see it in
+ * the VGA ROM space.
+ */
+ if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
+ (errno == EBUSY && vfio_dma_unmap(container, iova, size, NULL) == 0 &&
+ ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
+ return 0;
+ }
+
+ error_report("VFIO_MAP_DMA failed: %s", strerror(errno));
+ return -errno;
+}
+
+static void vfio_host_win_add(VFIOContainer *container,
+ hwaddr min_iova, hwaddr max_iova,
+ uint64_t iova_pgsizes)
+{
+ VFIOHostDMAWindow *hostwin;
+
+ QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
+ if (ranges_overlap(hostwin->min_iova,
+ hostwin->max_iova - hostwin->min_iova + 1,
+ min_iova,
+ max_iova - min_iova + 1)) {
+ hw_error("%s: Overlapped IOMMU are not enabled", __func__);
+ }
+ }
+
+ hostwin = g_malloc0(sizeof(*hostwin));
+
+ hostwin->min_iova = min_iova;
+ hostwin->max_iova = max_iova;
+ hostwin->iova_pgsizes = iova_pgsizes;
+ QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next);
+}
+
+static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova,
+ hwaddr max_iova)
+{
+ VFIOHostDMAWindow *hostwin;
+
+ QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
+ if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) {
+ QLIST_REMOVE(hostwin, hostwin_next);
+ g_free(hostwin);
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+static bool vfio_listener_skipped_section(MemoryRegionSection *section)
+{
+ return (!memory_region_is_ram(section->mr) &&
+ !memory_region_is_iommu(section->mr)) ||
+ memory_region_is_protected(section->mr) ||
+ /*
+ * Sizing an enabled 64-bit BAR can cause spurious mappings to
+ * addresses in the upper part of the 64-bit address space. These
+ * are never accessed by the CPU and beyond the address width of
+ * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
+ */
+ section->offset_within_address_space & (1ULL << 63);
+}
+
+/* Called with rcu_read_lock held. */
+static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
+ ram_addr_t *ram_addr, bool *read_only)
+{
+ MemoryRegion *mr;
+ hwaddr xlat;
+ hwaddr len = iotlb->addr_mask + 1;
+ bool writable = iotlb->perm & IOMMU_WO;
+
+ /*
+ * The IOMMU TLB entry we have just covers translation through
+ * this IOMMU to its immediate target. We need to translate
+ * it the rest of the way through to memory.
+ */
+ mr = address_space_translate(&address_space_memory,
+ iotlb->translated_addr,
+ &xlat, &len, writable,
+ MEMTXATTRS_UNSPECIFIED);
+ if (!memory_region_is_ram(mr)) {
+ error_report("iommu map to non memory area %"HWADDR_PRIx"",
+ xlat);
+ return false;
+ } else if (memory_region_has_ram_discard_manager(mr)) {
+ RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr);
+ MemoryRegionSection tmp = {
+ .mr = mr,
+ .offset_within_region = xlat,
+ .size = int128_make64(len),
+ };
+
+ /*
+ * Malicious VMs can map memory into the IOMMU, which is expected
+ * to remain discarded. vfio will pin all pages, populating memory.
+ * Disallow that. vmstate priorities make sure any RamDiscardManager
+ * were already restored before IOMMUs are restored.
+ */
+ if (!ram_discard_manager_is_populated(rdm, &tmp)) {
+ error_report("iommu map to discarded memory (e.g., unplugged via"
+ " virtio-mem): %"HWADDR_PRIx"",
+ iotlb->translated_addr);
+ return false;
+ }
+
+ /*
+ * Malicious VMs might trigger discarding of IOMMU-mapped memory. The
+ * pages will remain pinned inside vfio until unmapped, resulting in a
+ * higher memory consumption than expected. If memory would get
+ * populated again later, there would be an inconsistency between pages
+ * pinned by vfio and pages seen by QEMU. This is the case until
+ * unmapped from the IOMMU (e.g., during device reset).
+ *
+ * With malicious guests, we really only care about pinning more memory
+ * than expected. RLIMIT_MEMLOCK set for the user/process can never be
+ * exceeded and can be used to mitigate this problem.
+ */
+ warn_report_once("Using vfio with vIOMMUs and coordinated discarding of"
+ " RAM (e.g., virtio-mem) works, however, malicious"
+ " guests can trigger pinning of more memory than"
+ " intended via an IOMMU. It's possible to mitigate "
+ " by setting/adjusting RLIMIT_MEMLOCK.");
+ }
+
+ /*
+ * Translation truncates length to the IOMMU page size,
+ * check that it did not truncate too much.
+ */
+ if (len & iotlb->addr_mask) {
+ error_report("iommu has granularity incompatible with target AS");
+ return false;
+ }
+
+ if (vaddr) {
+ *vaddr = memory_region_get_ram_ptr(mr) + xlat;
+ }
+
+ if (ram_addr) {
+ *ram_addr = memory_region_get_ram_addr(mr) + xlat;
+ }
+
+ if (read_only) {
+ *read_only = !writable || mr->readonly;
+ }
+
+ return true;
+}
+
+static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
+{
+ VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
+ VFIOContainer *container = giommu->container;
+ hwaddr iova = iotlb->iova + giommu->iommu_offset;
+ void *vaddr;
+ int ret;
+
+ trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP",
+ iova, iova + iotlb->addr_mask);
+
+ if (iotlb->target_as != &address_space_memory) {
+ error_report("Wrong target AS \"%s\", only system memory is allowed",
+ iotlb->target_as->name ? iotlb->target_as->name : "none");
+ return;
+ }
+
+ rcu_read_lock();
+
+ if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
+ bool read_only;
+
+ if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only)) {
+ goto out;
+ }
+ /*
+ * vaddr is only valid until rcu_read_unlock(). But after
+ * vfio_dma_map has set up the mapping the pages will be
+ * pinned by the kernel. This makes sure that the RAM backend
+ * of vaddr will always be there, even if the memory object is
+ * destroyed and its backing memory munmap-ed.
+ */
+ ret = vfio_dma_map(container, iova,
+ iotlb->addr_mask + 1, vaddr,
+ read_only);
+ if (ret) {
+ error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
+ "0x%"HWADDR_PRIx", %p) = %d (%m)",
+ container, iova,
+ iotlb->addr_mask + 1, vaddr, ret);
+ }
+ } else {
+ ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1, iotlb);
+ if (ret) {
+ error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
+ "0x%"HWADDR_PRIx") = %d (%m)",
+ container, iova,
+ iotlb->addr_mask + 1, ret);
+ }
+ }
+out:
+ rcu_read_unlock();
+}
+
+static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl,
+ MemoryRegionSection *section)
+{
+ VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
+ listener);
+ const hwaddr size = int128_get64(section->size);
+ const hwaddr iova = section->offset_within_address_space;
+ int ret;
+
+ /* Unmap with a single call. */
+ ret = vfio_dma_unmap(vrdl->container, iova, size , NULL);
+ if (ret) {
+ error_report("%s: vfio_dma_unmap() failed: %s", __func__,
+ strerror(-ret));
+ }
+}
+
+static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl,
+ MemoryRegionSection *section)
+{
+ VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
+ listener);
+ const hwaddr end = section->offset_within_region +
+ int128_get64(section->size);
+ hwaddr start, next, iova;
+ void *vaddr;
+ int ret;
+
+ /*
+ * Map in (aligned within memory region) minimum granularity, so we can
+ * unmap in minimum granularity later.
+ */
+ for (start = section->offset_within_region; start < end; start = next) {
+ next = ROUND_UP(start + 1, vrdl->granularity);
+ next = MIN(next, end);
+
+ iova = start - section->offset_within_region +
+ section->offset_within_address_space;
+ vaddr = memory_region_get_ram_ptr(section->mr) + start;
+
+ ret = vfio_dma_map(vrdl->container, iova, next - start,
+ vaddr, section->readonly);
+ if (ret) {
+ /* Rollback */
+ vfio_ram_discard_notify_discard(rdl, section);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void vfio_register_ram_discard_listener(VFIOContainer *container,
+ MemoryRegionSection *section)
+{
+ RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
+ VFIORamDiscardListener *vrdl;
+
+ /* Ignore some corner cases not relevant in practice. */
+ g_assert(QEMU_IS_ALIGNED(section->offset_within_region, TARGET_PAGE_SIZE));
+ g_assert(QEMU_IS_ALIGNED(section->offset_within_address_space,
+ TARGET_PAGE_SIZE));
+ g_assert(QEMU_IS_ALIGNED(int128_get64(section->size), TARGET_PAGE_SIZE));
+
+ vrdl = g_new0(VFIORamDiscardListener, 1);
+ vrdl->container = container;
+ vrdl->mr = section->mr;
+ vrdl->offset_within_address_space = section->offset_within_address_space;
+ vrdl->size = int128_get64(section->size);
+ vrdl->granularity = ram_discard_manager_get_min_granularity(rdm,
+ section->mr);
+
+ g_assert(vrdl->granularity && is_power_of_2(vrdl->granularity));
+ g_assert(container->pgsizes &&
+ vrdl->granularity >= 1ULL << ctz64(container->pgsizes));
+
+ ram_discard_listener_init(&vrdl->listener,
+ vfio_ram_discard_notify_populate,
+ vfio_ram_discard_notify_discard, true);
+ ram_discard_manager_register_listener(rdm, &vrdl->listener, section);
+ QLIST_INSERT_HEAD(&container->vrdl_list, vrdl, next);
+
+ /*
+ * Sanity-check if we have a theoretically problematic setup where we could
+ * exceed the maximum number of possible DMA mappings over time. We assume
+ * that each mapped section in the same address space as a RamDiscardManager
+ * section consumes exactly one DMA mapping, with the exception of
+ * RamDiscardManager sections; i.e., we don't expect to have gIOMMU sections
+ * in the same address space as RamDiscardManager sections.
+ *
+ * We assume that each section in the address space consumes one memslot.
+ * We take the number of KVM memory slots as a best guess for the maximum
+ * number of sections in the address space we could have over time,
+ * also consuming DMA mappings.
+ */
+ if (container->dma_max_mappings) {
+ unsigned int vrdl_count = 0, vrdl_mappings = 0, max_memslots = 512;
+
+#ifdef CONFIG_KVM
+ if (kvm_enabled()) {
+ max_memslots = kvm_get_max_memslots();
+ }
+#endif
+
+ QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
+ hwaddr start, end;
+
+ start = QEMU_ALIGN_DOWN(vrdl->offset_within_address_space,
+ vrdl->granularity);
+ end = ROUND_UP(vrdl->offset_within_address_space + vrdl->size,
+ vrdl->granularity);
+ vrdl_mappings += (end - start) / vrdl->granularity;
+ vrdl_count++;
+ }
+
+ if (vrdl_mappings + max_memslots - vrdl_count >
+ container->dma_max_mappings) {
+ warn_report("%s: possibly running out of DMA mappings. E.g., try"
+ " increasing the 'block-size' of virtio-mem devies."
+ " Maximum possible DMA mappings: %d, Maximum possible"
+ " memslots: %d", __func__, container->dma_max_mappings,
+ max_memslots);
+ }
+ }
+}
+
+static void vfio_unregister_ram_discard_listener(VFIOContainer *container,
+ MemoryRegionSection *section)
+{
+ RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
+ VFIORamDiscardListener *vrdl = NULL;
+
+ QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
+ if (vrdl->mr == section->mr &&
+ vrdl->offset_within_address_space ==
+ section->offset_within_address_space) {
+ break;
+ }
+ }
+
+ if (!vrdl) {
+ hw_error("vfio: Trying to unregister missing RAM discard listener");
+ }
+
+ ram_discard_manager_unregister_listener(rdm, &vrdl->listener);
+ QLIST_REMOVE(vrdl, next);
+ g_free(vrdl);
+}
+
+static void vfio_listener_region_add(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ VFIOContainer *container = container_of(listener, VFIOContainer, listener);
+ hwaddr iova, end;
+ Int128 llend, llsize;
+ void *vaddr;
+ int ret;
+ VFIOHostDMAWindow *hostwin;
+ bool hostwin_found;
+ Error *err = NULL;
+
+ if (vfio_listener_skipped_section(section)) {
+ trace_vfio_listener_region_add_skip(
+ section->offset_within_address_space,
+ section->offset_within_address_space +
+ int128_get64(int128_sub(section->size, int128_one())));
+ return;
+ }
+
+ if (unlikely((section->offset_within_address_space &
+ ~qemu_real_host_page_mask) !=
+ (section->offset_within_region & ~qemu_real_host_page_mask))) {
+ error_report("%s received unaligned region", __func__);
+ return;
+ }
+
+ iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
+ llend = int128_make64(section->offset_within_address_space);
+ llend = int128_add(llend, section->size);
+ llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask));
+
+ if (int128_ge(int128_make64(iova), llend)) {
+ if (memory_region_is_ram_device(section->mr)) {
+ trace_vfio_listener_region_add_no_dma_map(
+ memory_region_name(section->mr),
+ section->offset_within_address_space,
+ int128_getlo(section->size),
+ qemu_real_host_page_size);
+ }
+ return;
+ }
+ end = int128_get64(int128_sub(llend, int128_one()));
+
+ if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
+ hwaddr pgsize = 0;
+
+ /* For now intersections are not allowed, we may relax this later */
+ QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
+ if (ranges_overlap(hostwin->min_iova,
+ hostwin->max_iova - hostwin->min_iova + 1,
+ section->offset_within_address_space,
+ int128_get64(section->size))) {
+ error_setg(&err,
+ "region [0x%"PRIx64",0x%"PRIx64"] overlaps with existing"
+ "host DMA window [0x%"PRIx64",0x%"PRIx64"]",
+ section->offset_within_address_space,
+ section->offset_within_address_space +
+ int128_get64(section->size) - 1,
+ hostwin->min_iova, hostwin->max_iova);
+ goto fail;
+ }
+ }
+
+ ret = vfio_spapr_create_window(container, section, &pgsize);
+ if (ret) {
+ error_setg_errno(&err, -ret, "Failed to create SPAPR window");
+ goto fail;
+ }
+
+ vfio_host_win_add(container, section->offset_within_address_space,
+ section->offset_within_address_space +
+ int128_get64(section->size) - 1, pgsize);
+#ifdef CONFIG_KVM
+ if (kvm_enabled()) {
+ VFIOGroup *group;
+ IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
+ struct kvm_vfio_spapr_tce param;
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_VFIO_GROUP,
+ .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE,
+ .addr = (uint64_t)(unsigned long)&param,
+ };
+
+ if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD,
+ &param.tablefd)) {
+ QLIST_FOREACH(group, &container->group_list, container_next) {
+ param.groupfd = group->fd;
+ if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
+ error_report("vfio: failed to setup fd %d "
+ "for a group with fd %d: %s",
+ param.tablefd, param.groupfd,
+ strerror(errno));
+ return;
+ }
+ trace_vfio_spapr_group_attach(param.groupfd, param.tablefd);
+ }
+ }
+ }
+#endif
+ }
+
+ hostwin_found = false;
+ QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
+ if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
+ hostwin_found = true;
+ break;
+ }
+ }
+
+ if (!hostwin_found) {
+ error_setg(&err, "Container %p can't map guest IOVA region"
+ " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container, iova, end);
+ goto fail;
+ }
+
+ memory_region_ref(section->mr);
+
+ if (memory_region_is_iommu(section->mr)) {
+ VFIOGuestIOMMU *giommu;
+ IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
+ int iommu_idx;
+
+ trace_vfio_listener_region_add_iommu(iova, end);
+ /*
+ * FIXME: For VFIO iommu types which have KVM acceleration to
+ * avoid bouncing all map/unmaps through qemu this way, this
+ * would be the right place to wire that up (tell the KVM
+ * device emulation the VFIO iommu handles to use).
+ */
+ giommu = g_malloc0(sizeof(*giommu));
+ giommu->iommu = iommu_mr;
+ giommu->iommu_offset = section->offset_within_address_space -
+ section->offset_within_region;
+ giommu->container = container;
+ llend = int128_add(int128_make64(section->offset_within_region),
+ section->size);
+ llend = int128_sub(llend, int128_one());
+ iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
+ MEMTXATTRS_UNSPECIFIED);
+ iommu_notifier_init(&giommu->n, vfio_iommu_map_notify,
+ IOMMU_NOTIFIER_IOTLB_EVENTS,
+ section->offset_within_region,
+ int128_get64(llend),
+ iommu_idx);
+
+ ret = memory_region_iommu_set_page_size_mask(giommu->iommu,
+ container->pgsizes,
+ &err);
+ if (ret) {
+ g_free(giommu);
+ goto fail;
+ }
+
+ ret = memory_region_register_iommu_notifier(section->mr, &giommu->n,
+ &err);
+ if (ret) {
+ g_free(giommu);
+ goto fail;
+ }
+ QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
+ memory_region_iommu_replay(giommu->iommu, &giommu->n);
+
+ return;
+ }
+
+ /* Here we assume that memory_region_is_ram(section->mr)==true */
+
+ /*
+ * For RAM memory regions with a RamDiscardManager, we only want to map the
+ * actually populated parts - and update the mapping whenever we're notified
+ * about changes.
+ */
+ if (memory_region_has_ram_discard_manager(section->mr)) {
+ vfio_register_ram_discard_listener(container, section);
+ return;
+ }
+
+ vaddr = memory_region_get_ram_ptr(section->mr) +
+ section->offset_within_region +
+ (iova - section->offset_within_address_space);
+
+ trace_vfio_listener_region_add_ram(iova, end, vaddr);
+
+ llsize = int128_sub(llend, int128_make64(iova));
+
+ if (memory_region_is_ram_device(section->mr)) {
+ hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
+
+ if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) {
+ trace_vfio_listener_region_add_no_dma_map(
+ memory_region_name(section->mr),
+ section->offset_within_address_space,
+ int128_getlo(section->size),
+ pgmask + 1);
+ return;
+ }
+ }
+
+ ret = vfio_dma_map(container, iova, int128_get64(llsize),
+ vaddr, section->readonly);
+ if (ret) {
+ error_setg(&err, "vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
+ "0x%"HWADDR_PRIx", %p) = %d (%m)",
+ container, iova, int128_get64(llsize), vaddr, ret);
+ if (memory_region_is_ram_device(section->mr)) {
+ /* Allow unexpected mappings not to be fatal for RAM devices */
+ error_report_err(err);
+ return;
+ }
+ goto fail;
+ }
+
+ return;
+
+fail:
+ if (memory_region_is_ram_device(section->mr)) {
+ error_report("failed to vfio_dma_map. pci p2p may not work");
+ return;
+ }
+ /*
+ * On the initfn path, store the first error in the container so we
+ * can gracefully fail. Runtime, there's not much we can do other
+ * than throw a hardware error.
+ */
+ if (!container->initialized) {
+ if (!container->error) {
+ error_propagate_prepend(&container->error, err,
+ "Region %s: ",
+ memory_region_name(section->mr));
+ } else {
+ error_free(err);
+ }
+ } else {
+ error_report_err(err);
+ hw_error("vfio: DMA mapping failed, unable to continue");
+ }
+}
+
+static void vfio_listener_region_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ VFIOContainer *container = container_of(listener, VFIOContainer, listener);
+ hwaddr iova, end;
+ Int128 llend, llsize;
+ int ret;
+ bool try_unmap = true;
+
+ if (vfio_listener_skipped_section(section)) {
+ trace_vfio_listener_region_del_skip(
+ section->offset_within_address_space,
+ section->offset_within_address_space +
+ int128_get64(int128_sub(section->size, int128_one())));
+ return;
+ }
+
+ if (unlikely((section->offset_within_address_space &
+ ~qemu_real_host_page_mask) !=
+ (section->offset_within_region & ~qemu_real_host_page_mask))) {
+ error_report("%s received unaligned region", __func__);
+ return;
+ }
+
+ if (memory_region_is_iommu(section->mr)) {
+ VFIOGuestIOMMU *giommu;
+
+ QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
+ if (MEMORY_REGION(giommu->iommu) == section->mr &&
+ giommu->n.start == section->offset_within_region) {
+ memory_region_unregister_iommu_notifier(section->mr,
+ &giommu->n);
+ QLIST_REMOVE(giommu, giommu_next);
+ g_free(giommu);
+ break;
+ }
+ }
+
+ /*
+ * FIXME: We assume the one big unmap below is adequate to
+ * remove any individual page mappings in the IOMMU which
+ * might have been copied into VFIO. This works for a page table
+ * based IOMMU where a big unmap flattens a large range of IO-PTEs.
+ * That may not be true for all IOMMU types.
+ */
+ }
+
+ iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
+ llend = int128_make64(section->offset_within_address_space);
+ llend = int128_add(llend, section->size);
+ llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask));
+
+ if (int128_ge(int128_make64(iova), llend)) {
+ return;
+ }
+ end = int128_get64(int128_sub(llend, int128_one()));
+
+ llsize = int128_sub(llend, int128_make64(iova));
+
+ trace_vfio_listener_region_del(iova, end);
+
+ if (memory_region_is_ram_device(section->mr)) {
+ hwaddr pgmask;
+ VFIOHostDMAWindow *hostwin;
+ bool hostwin_found = false;
+
+ QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
+ if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
+ hostwin_found = true;
+ break;
+ }
+ }
+ assert(hostwin_found); /* or region_add() would have failed */
+
+ pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
+ try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
+ } else if (memory_region_has_ram_discard_manager(section->mr)) {
+ vfio_unregister_ram_discard_listener(container, section);
+ /* Unregistering will trigger an unmap. */
+ try_unmap = false;
+ }
+
+ if (try_unmap) {
+ if (int128_eq(llsize, int128_2_64())) {
+ /* The unmap ioctl doesn't accept a full 64-bit span. */
+ llsize = int128_rshift(llsize, 1);
+ ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL);
+ if (ret) {
+ error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
+ "0x%"HWADDR_PRIx") = %d (%m)",
+ container, iova, int128_get64(llsize), ret);
+ }
+ iova += int128_get64(llsize);
+ }
+ ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL);
+ if (ret) {
+ error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
+ "0x%"HWADDR_PRIx") = %d (%m)",
+ container, iova, int128_get64(llsize), ret);
+ }
+ }
+
+ memory_region_unref(section->mr);
+
+ if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
+ vfio_spapr_remove_window(container,
+ section->offset_within_address_space);
+ if (vfio_host_win_del(container,
+ section->offset_within_address_space,
+ section->offset_within_address_space +
+ int128_get64(section->size) - 1) < 0) {
+ hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx,
+ __func__, section->offset_within_address_space);
+ }
+ }
+}
+
+static void vfio_set_dirty_page_tracking(VFIOContainer *container, bool start)
+{
+ int ret;
+ struct vfio_iommu_type1_dirty_bitmap dirty = {
+ .argsz = sizeof(dirty),
+ };
+
+ if (start) {
+ dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START;
+ } else {
+ dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP;
+ }
+
+ ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty);
+ if (ret) {
+ error_report("Failed to set dirty tracking flag 0x%x errno: %d",
+ dirty.flags, errno);
+ }
+}
+
+static void vfio_listener_log_global_start(MemoryListener *listener)
+{
+ VFIOContainer *container = container_of(listener, VFIOContainer, listener);
+
+ vfio_set_dirty_page_tracking(container, true);
+}
+
+static void vfio_listener_log_global_stop(MemoryListener *listener)
+{
+ VFIOContainer *container = container_of(listener, VFIOContainer, listener);
+
+ vfio_set_dirty_page_tracking(container, false);
+}
+
+static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
+ uint64_t size, ram_addr_t ram_addr)
+{
+ struct vfio_iommu_type1_dirty_bitmap *dbitmap;
+ struct vfio_iommu_type1_dirty_bitmap_get *range;
+ uint64_t pages;
+ int ret;
+
+ dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range));
+
+ dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range);
+ dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
+ range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data;
+ range->iova = iova;
+ range->size = size;
+
+ /*
+ * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
+ * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
+ * to qemu_real_host_page_size.
+ */
+ range->bitmap.pgsize = qemu_real_host_page_size;
+
+ pages = REAL_HOST_PAGE_ALIGN(range->size) / qemu_real_host_page_size;
+ range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
+ BITS_PER_BYTE;
+ range->bitmap.data = g_try_malloc0(range->bitmap.size);
+ if (!range->bitmap.data) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap);
+ if (ret) {
+ error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64
+ " size: 0x%"PRIx64" err: %d", (uint64_t)range->iova,
+ (uint64_t)range->size, errno);
+ goto err_out;
+ }
+
+ cpu_physical_memory_set_dirty_lebitmap((unsigned long *)range->bitmap.data,
+ ram_addr, pages);
+
+ trace_vfio_get_dirty_bitmap(container->fd, range->iova, range->size,
+ range->bitmap.size, ram_addr);
+err_out:
+ g_free(range->bitmap.data);
+ g_free(dbitmap);
+
+ return ret;
+}
+
+typedef struct {
+ IOMMUNotifier n;
+ VFIOGuestIOMMU *giommu;
+} vfio_giommu_dirty_notifier;
+
+static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
+{
+ vfio_giommu_dirty_notifier *gdn = container_of(n,
+ vfio_giommu_dirty_notifier, n);
+ VFIOGuestIOMMU *giommu = gdn->giommu;
+ VFIOContainer *container = giommu->container;
+ hwaddr iova = iotlb->iova + giommu->iommu_offset;
+ ram_addr_t translated_addr;
+
+ trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask);
+
+ if (iotlb->target_as != &address_space_memory) {
+ error_report("Wrong target AS \"%s\", only system memory is allowed",
+ iotlb->target_as->name ? iotlb->target_as->name : "none");
+ return;
+ }
+
+ rcu_read_lock();
+ if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) {
+ int ret;
+
+ ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1,
+ translated_addr);
+ if (ret) {
+ error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", "
+ "0x%"HWADDR_PRIx") = %d (%m)",
+ container, iova,
+ iotlb->addr_mask + 1, ret);
+ }
+ }
+ rcu_read_unlock();
+}
+
+static int vfio_ram_discard_get_dirty_bitmap(MemoryRegionSection *section,
+ void *opaque)
+{
+ const hwaddr size = int128_get64(section->size);
+ const hwaddr iova = section->offset_within_address_space;
+ const ram_addr_t ram_addr = memory_region_get_ram_addr(section->mr) +
+ section->offset_within_region;
+ VFIORamDiscardListener *vrdl = opaque;
+
+ /*
+ * Sync the whole mapped region (spanning multiple individual mappings)
+ * in one go.
+ */
+ return vfio_get_dirty_bitmap(vrdl->container, iova, size, ram_addr);
+}
+
+static int vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainer *container,
+ MemoryRegionSection *section)
+{
+ RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
+ VFIORamDiscardListener *vrdl = NULL;
+
+ QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
+ if (vrdl->mr == section->mr &&
+ vrdl->offset_within_address_space ==
+ section->offset_within_address_space) {
+ break;
+ }
+ }
+
+ if (!vrdl) {
+ hw_error("vfio: Trying to sync missing RAM discard listener");
+ }
+
+ /*
+ * We only want/can synchronize the bitmap for actually mapped parts -
+ * which correspond to populated parts. Replay all populated parts.
+ */
+ return ram_discard_manager_replay_populated(rdm, section,
+ vfio_ram_discard_get_dirty_bitmap,
+ &vrdl);
+}
+
+static int vfio_sync_dirty_bitmap(VFIOContainer *container,
+ MemoryRegionSection *section)
+{
+ ram_addr_t ram_addr;
+
+ if (memory_region_is_iommu(section->mr)) {
+ VFIOGuestIOMMU *giommu;
+
+ QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
+ if (MEMORY_REGION(giommu->iommu) == section->mr &&
+ giommu->n.start == section->offset_within_region) {
+ Int128 llend;
+ vfio_giommu_dirty_notifier gdn = { .giommu = giommu };
+ int idx = memory_region_iommu_attrs_to_index(giommu->iommu,
+ MEMTXATTRS_UNSPECIFIED);
+
+ llend = int128_add(int128_make64(section->offset_within_region),
+ section->size);
+ llend = int128_sub(llend, int128_one());
+
+ iommu_notifier_init(&gdn.n,
+ vfio_iommu_map_dirty_notify,
+ IOMMU_NOTIFIER_MAP,
+ section->offset_within_region,
+ int128_get64(llend),
+ idx);
+ memory_region_iommu_replay(giommu->iommu, &gdn.n);
+ break;
+ }
+ }
+ return 0;
+ } else if (memory_region_has_ram_discard_manager(section->mr)) {
+ return vfio_sync_ram_discard_listener_dirty_bitmap(container, section);
+ }
+
+ ram_addr = memory_region_get_ram_addr(section->mr) +
+ section->offset_within_region;
+
+ return vfio_get_dirty_bitmap(container,
+ REAL_HOST_PAGE_ALIGN(section->offset_within_address_space),
+ int128_get64(section->size), ram_addr);
+}
+
+static void vfio_listener_log_sync(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ VFIOContainer *container = container_of(listener, VFIOContainer, listener);
+
+ if (vfio_listener_skipped_section(section) ||
+ !container->dirty_pages_supported) {
+ return;
+ }
+
+ if (vfio_devices_all_dirty_tracking(container)) {
+ vfio_sync_dirty_bitmap(container, section);
+ }
+}
+
+static const MemoryListener vfio_memory_listener = {
+ .name = "vfio",
+ .region_add = vfio_listener_region_add,
+ .region_del = vfio_listener_region_del,
+ .log_global_start = vfio_listener_log_global_start,
+ .log_global_stop = vfio_listener_log_global_stop,
+ .log_sync = vfio_listener_log_sync,
+};
+
+static void vfio_listener_release(VFIOContainer *container)
+{
+ memory_listener_unregister(&container->listener);
+ if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
+ memory_listener_unregister(&container->prereg_listener);
+ }
+}
+
+static struct vfio_info_cap_header *
+vfio_get_cap(void *ptr, uint32_t cap_offset, uint16_t id)
+{
+ struct vfio_info_cap_header *hdr;
+
+ for (hdr = ptr + cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
+ if (hdr->id == id) {
+ return hdr;
+ }
+ }
+
+ return NULL;
+}
+
+struct vfio_info_cap_header *
+vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
+{
+ if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) {
+ return NULL;
+ }
+
+ return vfio_get_cap((void *)info, info->cap_offset, id);
+}
+
+static struct vfio_info_cap_header *
+vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
+{
+ if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
+ return NULL;
+ }
+
+ return vfio_get_cap((void *)info, info->cap_offset, id);
+}
+
+struct vfio_info_cap_header *
+vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id)
+{
+ if (!(info->flags & VFIO_DEVICE_FLAGS_CAPS)) {
+ return NULL;
+ }
+
+ return vfio_get_cap((void *)info, info->cap_offset, id);
+}
+
+bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info,
+ unsigned int *avail)
+{
+ struct vfio_info_cap_header *hdr;
+ struct vfio_iommu_type1_info_dma_avail *cap;
+
+ /* If the capability cannot be found, assume no DMA limiting */
+ hdr = vfio_get_iommu_type1_info_cap(info,
+ VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL);
+ if (hdr == NULL) {
+ return false;
+ }
+
+ if (avail != NULL) {
+ cap = (void *) hdr;
+ *avail = cap->avail;
+ }
+
+ return true;
+}
+
+static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
+ struct vfio_region_info *info)
+{
+ struct vfio_info_cap_header *hdr;
+ struct vfio_region_info_cap_sparse_mmap *sparse;
+ int i, j;
+
+ hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
+ if (!hdr) {
+ return -ENODEV;
+ }
+
+ sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
+
+ trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
+ region->nr, sparse->nr_areas);
+
+ region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
+
+ for (i = 0, j = 0; i < sparse->nr_areas; i++) {
+ trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
+ sparse->areas[i].offset +
+ sparse->areas[i].size);
+
+ if (sparse->areas[i].size) {
+ region->mmaps[j].offset = sparse->areas[i].offset;
+ region->mmaps[j].size = sparse->areas[i].size;
+ j++;
+ }
+ }
+
+ region->nr_mmaps = j;
+ region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
+
+ return 0;
+}
+
+int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
+ int index, const char *name)
+{
+ struct vfio_region_info *info;
+ int ret;
+
+ ret = vfio_get_region_info(vbasedev, index, &info);
+ if (ret) {
+ return ret;
+ }
+
+ region->vbasedev = vbasedev;
+ region->flags = info->flags;
+ region->size = info->size;
+ region->fd_offset = info->offset;
+ region->nr = index;
+
+ if (region->size) {
+ region->mem = g_new0(MemoryRegion, 1);
+ memory_region_init_io(region->mem, obj, &vfio_region_ops,
+ region, name, region->size);
+
+ if (!vbasedev->no_mmap &&
+ region->flags & VFIO_REGION_INFO_FLAG_MMAP) {
+
+ ret = vfio_setup_region_sparse_mmaps(region, info);
+
+ if (ret) {
+ region->nr_mmaps = 1;
+ region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
+ region->mmaps[0].offset = 0;
+ region->mmaps[0].size = region->size;
+ }
+ }
+ }
+
+ g_free(info);
+
+ trace_vfio_region_setup(vbasedev->name, index, name,
+ region->flags, region->fd_offset, region->size);
+ return 0;
+}
+
+static void vfio_subregion_unmap(VFIORegion *region, int index)
+{
+ trace_vfio_region_unmap(memory_region_name(&region->mmaps[index].mem),
+ region->mmaps[index].offset,
+ region->mmaps[index].offset +
+ region->mmaps[index].size - 1);
+ memory_region_del_subregion(region->mem, &region->mmaps[index].mem);
+ munmap(region->mmaps[index].mmap, region->mmaps[index].size);
+ object_unparent(OBJECT(&region->mmaps[index].mem));
+ region->mmaps[index].mmap = NULL;
+}
+
+int vfio_region_mmap(VFIORegion *region)
+{
+ int i, prot = 0;
+ char *name;
+
+ if (!region->mem) {
+ return 0;
+ }
+
+ prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
+ prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
+
+ for (i = 0; i < region->nr_mmaps; i++) {
+ region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot,
+ MAP_SHARED, region->vbasedev->fd,
+ region->fd_offset +
+ region->mmaps[i].offset);
+ if (region->mmaps[i].mmap == MAP_FAILED) {
+ int ret = -errno;
+
+ trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
+ region->fd_offset +
+ region->mmaps[i].offset,
+ region->fd_offset +
+ region->mmaps[i].offset +
+ region->mmaps[i].size - 1, ret);
+
+ region->mmaps[i].mmap = NULL;
+
+ for (i--; i >= 0; i--) {
+ vfio_subregion_unmap(region, i);
+ }
+
+ return ret;
+ }
+
+ name = g_strdup_printf("%s mmaps[%d]",
+ memory_region_name(region->mem), i);
+ memory_region_init_ram_device_ptr(&region->mmaps[i].mem,
+ memory_region_owner(region->mem),
+ name, region->mmaps[i].size,
+ region->mmaps[i].mmap);
+ g_free(name);
+ memory_region_add_subregion(region->mem, region->mmaps[i].offset,
+ &region->mmaps[i].mem);
+
+ trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
+ region->mmaps[i].offset,
+ region->mmaps[i].offset +
+ region->mmaps[i].size - 1);
+ }
+
+ return 0;
+}
+
+void vfio_region_unmap(VFIORegion *region)
+{
+ int i;
+
+ if (!region->mem) {
+ return;
+ }
+
+ for (i = 0; i < region->nr_mmaps; i++) {
+ if (region->mmaps[i].mmap) {
+ vfio_subregion_unmap(region, i);
+ }
+ }
+}
+
+void vfio_region_exit(VFIORegion *region)
+{
+ int i;
+
+ if (!region->mem) {
+ return;
+ }
+
+ for (i = 0; i < region->nr_mmaps; i++) {
+ if (region->mmaps[i].mmap) {
+ memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
+ }
+ }
+
+ trace_vfio_region_exit(region->vbasedev->name, region->nr);
+}
+
+void vfio_region_finalize(VFIORegion *region)
+{
+ int i;
+
+ if (!region->mem) {
+ return;
+ }
+
+ for (i = 0; i < region->nr_mmaps; i++) {
+ if (region->mmaps[i].mmap) {
+ munmap(region->mmaps[i].mmap, region->mmaps[i].size);
+ object_unparent(OBJECT(&region->mmaps[i].mem));
+ }
+ }
+
+ object_unparent(OBJECT(region->mem));
+
+ g_free(region->mem);
+ g_free(region->mmaps);
+
+ trace_vfio_region_finalize(region->vbasedev->name, region->nr);
+
+ region->mem = NULL;
+ region->mmaps = NULL;
+ region->nr_mmaps = 0;
+ region->size = 0;
+ region->flags = 0;
+ region->nr = 0;
+}
+
+void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
+{
+ int i;
+
+ if (!region->mem) {
+ return;
+ }
+
+ for (i = 0; i < region->nr_mmaps; i++) {
+ if (region->mmaps[i].mmap) {
+ memory_region_set_enabled(&region->mmaps[i].mem, enabled);
+ }
+ }
+
+ trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
+ enabled);
+}
+
+void vfio_reset_handler(void *opaque)
+{
+ VFIOGroup *group;
+ VFIODevice *vbasedev;
+
+ QLIST_FOREACH(group, &vfio_group_list, next) {
+ QLIST_FOREACH(vbasedev, &group->device_list, next) {
+ if (vbasedev->dev->realized) {
+ vbasedev->ops->vfio_compute_needs_reset(vbasedev);
+ }
+ }
+ }
+
+ QLIST_FOREACH(group, &vfio_group_list, next) {
+ QLIST_FOREACH(vbasedev, &group->device_list, next) {
+ if (vbasedev->dev->realized && vbasedev->needs_reset) {
+ vbasedev->ops->vfio_hot_reset_multi(vbasedev);
+ }
+ }
+ }
+}
+
+static void vfio_kvm_device_add_group(VFIOGroup *group)
+{
+#ifdef CONFIG_KVM
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_VFIO_GROUP,
+ .attr = KVM_DEV_VFIO_GROUP_ADD,
+ .addr = (uint64_t)(unsigned long)&group->fd,
+ };
+
+ if (!kvm_enabled()) {
+ return;
+ }
+
+ if (vfio_kvm_device_fd < 0) {
+ struct kvm_create_device cd = {
+ .type = KVM_DEV_TYPE_VFIO,
+ };
+
+ if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
+ error_report("Failed to create KVM VFIO device: %m");
+ return;
+ }
+
+ vfio_kvm_device_fd = cd.fd;
+ }
+
+ if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
+ error_report("Failed to add group %d to KVM VFIO device: %m",
+ group->groupid);
+ }
+#endif
+}
+
+static void vfio_kvm_device_del_group(VFIOGroup *group)
+{
+#ifdef CONFIG_KVM
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_VFIO_GROUP,
+ .attr = KVM_DEV_VFIO_GROUP_DEL,
+ .addr = (uint64_t)(unsigned long)&group->fd,
+ };
+
+ if (vfio_kvm_device_fd < 0) {
+ return;
+ }
+
+ if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
+ error_report("Failed to remove group %d from KVM VFIO device: %m",
+ group->groupid);
+ }
+#endif
+}
+
+static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
+{
+ VFIOAddressSpace *space;
+
+ QLIST_FOREACH(space, &vfio_address_spaces, list) {
+ if (space->as == as) {
+ return space;
+ }
+ }
+
+ /* No suitable VFIOAddressSpace, create a new one */
+ space = g_malloc0(sizeof(*space));
+ space->as = as;
+ QLIST_INIT(&space->containers);
+
+ QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
+
+ return space;
+}
+
+static void vfio_put_address_space(VFIOAddressSpace *space)
+{
+ if (QLIST_EMPTY(&space->containers)) {
+ QLIST_REMOVE(space, list);
+ g_free(space);
+ }
+}
+
+/*
+ * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
+ */
+static int vfio_get_iommu_type(VFIOContainer *container,
+ Error **errp)
+{
+ int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU,
+ VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(iommu_types); i++) {
+ if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) {
+ return iommu_types[i];
+ }
+ }
+ error_setg(errp, "No available IOMMU models");
+ return -EINVAL;
+}
+
+static int vfio_init_container(VFIOContainer *container, int group_fd,
+ Error **errp)
+{
+ int iommu_type, ret;
+
+ iommu_type = vfio_get_iommu_type(container, errp);
+ if (iommu_type < 0) {
+ return iommu_type;
+ }
+
+ ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd);
+ if (ret) {
+ error_setg_errno(errp, errno, "Failed to set group container");
+ return -errno;
+ }
+
+ while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) {
+ if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
+ /*
+ * On sPAPR, despite the IOMMU subdriver always advertises v1 and
+ * v2, the running platform may not support v2 and there is no
+ * way to guess it until an IOMMU group gets added to the container.
+ * So in case it fails with v2, try v1 as a fallback.
+ */
+ iommu_type = VFIO_SPAPR_TCE_IOMMU;
+ continue;
+ }
+ error_setg_errno(errp, errno, "Failed to set iommu for container");
+ return -errno;
+ }
+
+ container->iommu_type = iommu_type;
+ return 0;
+}
+
+static int vfio_get_iommu_info(VFIOContainer *container,
+ struct vfio_iommu_type1_info **info)
+{
+
+ size_t argsz = sizeof(struct vfio_iommu_type1_info);
+
+ *info = g_new0(struct vfio_iommu_type1_info, 1);
+again:
+ (*info)->argsz = argsz;
+
+ if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) {
+ g_free(*info);
+ *info = NULL;
+ return -errno;
+ }
+
+ if (((*info)->argsz > argsz)) {
+ argsz = (*info)->argsz;
+ *info = g_realloc(*info, argsz);
+ goto again;
+ }
+
+ return 0;
+}
+
+static struct vfio_info_cap_header *
+vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
+{
+ struct vfio_info_cap_header *hdr;
+ void *ptr = info;
+
+ if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
+ return NULL;
+ }
+
+ for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
+ if (hdr->id == id) {
+ return hdr;
+ }
+ }
+
+ return NULL;
+}
+
+static void vfio_get_iommu_info_migration(VFIOContainer *container,
+ struct vfio_iommu_type1_info *info)
+{
+ struct vfio_info_cap_header *hdr;
+ struct vfio_iommu_type1_info_cap_migration *cap_mig;
+
+ hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION);
+ if (!hdr) {
+ return;
+ }
+
+ cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration,
+ header);
+
+ /*
+ * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
+ * qemu_real_host_page_size to mark those dirty.
+ */
+ if (cap_mig->pgsize_bitmap & qemu_real_host_page_size) {
+ container->dirty_pages_supported = true;
+ container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
+ container->dirty_pgsizes = cap_mig->pgsize_bitmap;
+ }
+}
+
+static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
+ Error **errp)
+{
+ VFIOContainer *container;
+ int ret, fd;
+ VFIOAddressSpace *space;
+
+ space = vfio_get_address_space(as);
+
+ /*
+ * VFIO is currently incompatible with discarding of RAM insofar as the
+ * madvise to purge (zap) the page from QEMU's address space does not
+ * interact with the memory API and therefore leaves stale virtual to
+ * physical mappings in the IOMMU if the page was previously pinned. We
+ * therefore set discarding broken for each group added to a container,
+ * whether the container is used individually or shared. This provides
+ * us with options to allow devices within a group to opt-in and allow
+ * discarding, so long as it is done consistently for a group (for instance
+ * if the device is an mdev device where it is known that the host vendor
+ * driver will never pin pages outside of the working set of the guest
+ * driver, which would thus not be discarding candidates).
+ *
+ * The first opportunity to induce pinning occurs here where we attempt to
+ * attach the group to existing containers within the AddressSpace. If any
+ * pages are already zapped from the virtual address space, such as from
+ * previous discards, new pinning will cause valid mappings to be
+ * re-established. Likewise, when the overall MemoryListener for a new
+ * container is registered, a replay of mappings within the AddressSpace
+ * will occur, re-establishing any previously zapped pages as well.
+ *
+ * Especially virtio-balloon is currently only prevented from discarding
+ * new memory, it will not yet set ram_block_discard_set_required() and
+ * therefore, neither stops us here or deals with the sudden memory
+ * consumption of inflated memory.
+ *
+ * We do support discarding of memory coordinated via the RamDiscardManager
+ * with some IOMMU types. vfio_ram_block_discard_disable() handles the
+ * details once we know which type of IOMMU we are using.
+ */
+
+ QLIST_FOREACH(container, &space->containers, next) {
+ if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
+ ret = vfio_ram_block_discard_disable(container, true);
+ if (ret) {
+ error_setg_errno(errp, -ret,
+ "Cannot set discarding of RAM broken");
+ if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER,
+ &container->fd)) {
+ error_report("vfio: error disconnecting group %d from"
+ " container", group->groupid);
+ }
+ return ret;
+ }
+ group->container = container;
+ QLIST_INSERT_HEAD(&container->group_list, group, container_next);
+ vfio_kvm_device_add_group(group);
+ return 0;
+ }
+ }
+
+ fd = qemu_open_old("/dev/vfio/vfio", O_RDWR);
+ if (fd < 0) {
+ error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio");
+ ret = -errno;
+ goto put_space_exit;
+ }
+
+ ret = ioctl(fd, VFIO_GET_API_VERSION);
+ if (ret != VFIO_API_VERSION) {
+ error_setg(errp, "supported vfio version: %d, "
+ "reported version: %d", VFIO_API_VERSION, ret);
+ ret = -EINVAL;
+ goto close_fd_exit;
+ }
+
+ container = g_malloc0(sizeof(*container));
+ container->space = space;
+ container->fd = fd;
+ container->error = NULL;
+ container->dirty_pages_supported = false;
+ container->dma_max_mappings = 0;
+ QLIST_INIT(&container->giommu_list);
+ QLIST_INIT(&container->hostwin_list);
+ QLIST_INIT(&container->vrdl_list);
+
+ ret = vfio_init_container(container, group->fd, errp);
+ if (ret) {
+ goto free_container_exit;
+ }
+
+ ret = vfio_ram_block_discard_disable(container, true);
+ if (ret) {
+ error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken");
+ goto free_container_exit;
+ }
+
+ switch (container->iommu_type) {
+ case VFIO_TYPE1v2_IOMMU:
+ case VFIO_TYPE1_IOMMU:
+ {
+ struct vfio_iommu_type1_info *info;
+
+ /*
+ * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
+ * IOVA whatsoever. That's not actually true, but the current
+ * kernel interface doesn't tell us what it can map, and the
+ * existing Type1 IOMMUs generally support any IOVA we're
+ * going to actually try in practice.
+ */
+ ret = vfio_get_iommu_info(container, &info);
+
+ if (ret || !(info->flags & VFIO_IOMMU_INFO_PGSIZES)) {
+ /* Assume 4k IOVA page size */
+ info->iova_pgsizes = 4096;
+ }
+ vfio_host_win_add(container, 0, (hwaddr)-1, info->iova_pgsizes);
+ container->pgsizes = info->iova_pgsizes;
+
+ /* The default in the kernel ("dma_entry_limit") is 65535. */
+ container->dma_max_mappings = 65535;
+ if (!ret) {
+ vfio_get_info_dma_avail(info, &container->dma_max_mappings);
+ vfio_get_iommu_info_migration(container, info);
+ }
+ g_free(info);
+ break;
+ }
+ case VFIO_SPAPR_TCE_v2_IOMMU:
+ case VFIO_SPAPR_TCE_IOMMU:
+ {
+ struct vfio_iommu_spapr_tce_info info;
+ bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU;
+
+ /*
+ * The host kernel code implementing VFIO_IOMMU_DISABLE is called
+ * when container fd is closed so we do not call it explicitly
+ * in this file.
+ */
+ if (!v2) {
+ ret = ioctl(fd, VFIO_IOMMU_ENABLE);
+ if (ret) {
+ error_setg_errno(errp, errno, "failed to enable container");
+ ret = -errno;
+ goto enable_discards_exit;
+ }
+ } else {
+ container->prereg_listener = vfio_prereg_listener;
+
+ memory_listener_register(&container->prereg_listener,
+ &address_space_memory);
+ if (container->error) {
+ memory_listener_unregister(&container->prereg_listener);
+ ret = -1;
+ error_propagate_prepend(errp, container->error,
+ "RAM memory listener initialization failed: ");
+ goto enable_discards_exit;
+ }
+ }
+
+ info.argsz = sizeof(info);
+ ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
+ if (ret) {
+ error_setg_errno(errp, errno,
+ "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
+ ret = -errno;
+ if (v2) {
+ memory_listener_unregister(&container->prereg_listener);
+ }
+ goto enable_discards_exit;
+ }
+
+ if (v2) {
+ container->pgsizes = info.ddw.pgsizes;
+ /*
+ * There is a default window in just created container.
+ * To make region_add/del simpler, we better remove this
+ * window now and let those iommu_listener callbacks
+ * create/remove them when needed.
+ */
+ ret = vfio_spapr_remove_window(container, info.dma32_window_start);
+ if (ret) {
+ error_setg_errno(errp, -ret,
+ "failed to remove existing window");
+ goto enable_discards_exit;
+ }
+ } else {
+ /* The default table uses 4K pages */
+ container->pgsizes = 0x1000;
+ vfio_host_win_add(container, info.dma32_window_start,
+ info.dma32_window_start +
+ info.dma32_window_size - 1,
+ 0x1000);
+ }
+ }
+ }
+
+ vfio_kvm_device_add_group(group);
+
+ QLIST_INIT(&container->group_list);
+ QLIST_INSERT_HEAD(&space->containers, container, next);
+
+ group->container = container;
+ QLIST_INSERT_HEAD(&container->group_list, group, container_next);
+
+ container->listener = vfio_memory_listener;
+
+ memory_listener_register(&container->listener, container->space->as);
+
+ if (container->error) {
+ ret = -1;
+ error_propagate_prepend(errp, container->error,
+ "memory listener initialization failed: ");
+ goto listener_release_exit;
+ }
+
+ container->initialized = true;
+
+ return 0;
+listener_release_exit:
+ QLIST_REMOVE(group, container_next);
+ QLIST_REMOVE(container, next);
+ vfio_kvm_device_del_group(group);
+ vfio_listener_release(container);
+
+enable_discards_exit:
+ vfio_ram_block_discard_disable(container, false);
+
+free_container_exit:
+ g_free(container);
+
+close_fd_exit:
+ close(fd);
+
+put_space_exit:
+ vfio_put_address_space(space);
+
+ return ret;
+}
+
+static void vfio_disconnect_container(VFIOGroup *group)
+{
+ VFIOContainer *container = group->container;
+
+ QLIST_REMOVE(group, container_next);
+ group->container = NULL;
+
+ /*
+ * Explicitly release the listener first before unset container,
+ * since unset may destroy the backend container if it's the last
+ * group.
+ */
+ if (QLIST_EMPTY(&container->group_list)) {
+ vfio_listener_release(container);
+ }
+
+ if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
+ error_report("vfio: error disconnecting group %d from container",
+ group->groupid);
+ }
+
+ if (QLIST_EMPTY(&container->group_list)) {
+ VFIOAddressSpace *space = container->space;
+ VFIOGuestIOMMU *giommu, *tmp;
+ VFIOHostDMAWindow *hostwin, *next;
+
+ QLIST_REMOVE(container, next);
+
+ QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
+ memory_region_unregister_iommu_notifier(
+ MEMORY_REGION(giommu->iommu), &giommu->n);
+ QLIST_REMOVE(giommu, giommu_next);
+ g_free(giommu);
+ }
+
+ QLIST_FOREACH_SAFE(hostwin, &container->hostwin_list, hostwin_next,
+ next) {
+ QLIST_REMOVE(hostwin, hostwin_next);
+ g_free(hostwin);
+ }
+
+ trace_vfio_disconnect_container(container->fd);
+ close(container->fd);
+ g_free(container);
+
+ vfio_put_address_space(space);
+ }
+}
+
+VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
+{
+ VFIOGroup *group;
+ char path[32];
+ struct vfio_group_status status = { .argsz = sizeof(status) };
+
+ QLIST_FOREACH(group, &vfio_group_list, next) {
+ if (group->groupid == groupid) {
+ /* Found it. Now is it already in the right context? */
+ if (group->container->space->as == as) {
+ return group;
+ } else {
+ error_setg(errp, "group %d used in multiple address spaces",
+ group->groupid);
+ return NULL;
+ }
+ }
+ }
+
+ group = g_malloc0(sizeof(*group));
+
+ snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
+ group->fd = qemu_open_old(path, O_RDWR);
+ if (group->fd < 0) {
+ error_setg_errno(errp, errno, "failed to open %s", path);
+ goto free_group_exit;
+ }
+
+ if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
+ error_setg_errno(errp, errno, "failed to get group %d status", groupid);
+ goto close_fd_exit;
+ }
+
+ if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
+ error_setg(errp, "group %d is not viable", groupid);
+ error_append_hint(errp,
+ "Please ensure all devices within the iommu_group "
+ "are bound to their vfio bus driver.\n");
+ goto close_fd_exit;
+ }
+
+ group->groupid = groupid;
+ QLIST_INIT(&group->device_list);
+
+ if (vfio_connect_container(group, as, errp)) {
+ error_prepend(errp, "failed to setup container for group %d: ",
+ groupid);
+ goto close_fd_exit;
+ }
+
+ if (QLIST_EMPTY(&vfio_group_list)) {
+ qemu_register_reset(vfio_reset_handler, NULL);
+ }
+
+ QLIST_INSERT_HEAD(&vfio_group_list, group, next);
+
+ return group;
+
+close_fd_exit:
+ close(group->fd);
+
+free_group_exit:
+ g_free(group);
+
+ return NULL;
+}
+
+void vfio_put_group(VFIOGroup *group)
+{
+ if (!group || !QLIST_EMPTY(&group->device_list)) {
+ return;
+ }
+
+ if (!group->ram_block_discard_allowed) {
+ vfio_ram_block_discard_disable(group->container, false);
+ }
+ vfio_kvm_device_del_group(group);
+ vfio_disconnect_container(group);
+ QLIST_REMOVE(group, next);
+ trace_vfio_put_group(group->fd);
+ close(group->fd);
+ g_free(group);
+
+ if (QLIST_EMPTY(&vfio_group_list)) {
+ qemu_unregister_reset(vfio_reset_handler, NULL);
+ }
+}
+
+int vfio_get_device(VFIOGroup *group, const char *name,
+ VFIODevice *vbasedev, Error **errp)
+{
+ struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
+ int ret, fd;
+
+ fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
+ if (fd < 0) {
+ error_setg_errno(errp, errno, "error getting device from group %d",
+ group->groupid);
+ error_append_hint(errp,
+ "Verify all devices in group %d are bound to vfio-<bus> "
+ "or pci-stub and not already in use\n", group->groupid);
+ return fd;
+ }
+
+ ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info);
+ if (ret) {
+ error_setg_errno(errp, errno, "error getting device info");
+ close(fd);
+ return ret;
+ }
+
+ /*
+ * Set discarding of RAM as not broken for this group if the driver knows
+ * the device operates compatibly with discarding. Setting must be
+ * consistent per group, but since compatibility is really only possible
+ * with mdev currently, we expect singleton groups.
+ */
+ if (vbasedev->ram_block_discard_allowed !=
+ group->ram_block_discard_allowed) {
+ if (!QLIST_EMPTY(&group->device_list)) {
+ error_setg(errp, "Inconsistent setting of support for discarding "
+ "RAM (e.g., balloon) within group");
+ close(fd);
+ return -1;
+ }
+
+ if (!group->ram_block_discard_allowed) {
+ group->ram_block_discard_allowed = true;
+ vfio_ram_block_discard_disable(group->container, false);
+ }
+ }
+
+ vbasedev->fd = fd;
+ vbasedev->group = group;
+ QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
+
+ vbasedev->num_irqs = dev_info.num_irqs;
+ vbasedev->num_regions = dev_info.num_regions;
+ vbasedev->flags = dev_info.flags;
+
+ trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions,
+ dev_info.num_irqs);
+
+ vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
+ return 0;
+}
+
+void vfio_put_base_device(VFIODevice *vbasedev)
+{
+ if (!vbasedev->group) {
+ return;
+ }
+ QLIST_REMOVE(vbasedev, next);
+ vbasedev->group = NULL;
+ trace_vfio_put_base_device(vbasedev->fd);
+ close(vbasedev->fd);
+}
+
+int vfio_get_region_info(VFIODevice *vbasedev, int index,
+ struct vfio_region_info **info)
+{
+ size_t argsz = sizeof(struct vfio_region_info);
+
+ *info = g_malloc0(argsz);
+
+ (*info)->index = index;
+retry:
+ (*info)->argsz = argsz;
+
+ if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) {
+ g_free(*info);
+ *info = NULL;
+ return -errno;
+ }
+
+ if ((*info)->argsz > argsz) {
+ argsz = (*info)->argsz;
+ *info = g_realloc(*info, argsz);
+
+ goto retry;
+ }
+
+ return 0;
+}
+
+int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
+ uint32_t subtype, struct vfio_region_info **info)
+{
+ int i;
+
+ for (i = 0; i < vbasedev->num_regions; i++) {
+ struct vfio_info_cap_header *hdr;
+ struct vfio_region_info_cap_type *cap_type;
+
+ if (vfio_get_region_info(vbasedev, i, info)) {
+ continue;
+ }
+
+ hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
+ if (!hdr) {
+ g_free(*info);
+ continue;
+ }
+
+ cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
+
+ trace_vfio_get_dev_region(vbasedev->name, i,
+ cap_type->type, cap_type->subtype);
+
+ if (cap_type->type == type && cap_type->subtype == subtype) {
+ return 0;
+ }
+
+ g_free(*info);
+ }
+
+ *info = NULL;
+ return -ENODEV;
+}
+
+bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type)
+{
+ struct vfio_region_info *info = NULL;
+ bool ret = false;
+
+ if (!vfio_get_region_info(vbasedev, region, &info)) {
+ if (vfio_get_region_info_cap(info, cap_type)) {
+ ret = true;
+ }
+ g_free(info);
+ }
+
+ return ret;
+}
+
+/*
+ * Interfaces for IBM EEH (Enhanced Error Handling)
+ */
+static bool vfio_eeh_container_ok(VFIOContainer *container)
+{
+ /*
+ * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
+ * implementation is broken if there are multiple groups in a
+ * container. The hardware works in units of Partitionable
+ * Endpoints (== IOMMU groups) and the EEH operations naively
+ * iterate across all groups in the container, without any logic
+ * to make sure the groups have their state synchronized. For
+ * certain operations (ENABLE) that might be ok, until an error
+ * occurs, but for others (GET_STATE) it's clearly broken.
+ */
+
+ /*
+ * XXX Once fixed kernels exist, test for them here
+ */
+
+ if (QLIST_EMPTY(&container->group_list)) {
+ return false;
+ }
+
+ if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) {
+ return false;
+ }
+
+ return true;
+}
+
+static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op)
+{
+ struct vfio_eeh_pe_op pe_op = {
+ .argsz = sizeof(pe_op),
+ .op = op,
+ };
+ int ret;
+
+ if (!vfio_eeh_container_ok(container)) {
+ error_report("vfio/eeh: EEH_PE_OP 0x%x: "
+ "kernel requires a container with exactly one group", op);
+ return -EPERM;
+ }
+
+ ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op);
+ if (ret < 0) {
+ error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op);
+ return -errno;
+ }
+
+ return ret;
+}
+
+static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
+{
+ VFIOAddressSpace *space = vfio_get_address_space(as);
+ VFIOContainer *container = NULL;
+
+ if (QLIST_EMPTY(&space->containers)) {
+ /* No containers to act on */
+ goto out;
+ }
+
+ container = QLIST_FIRST(&space->containers);
+
+ if (QLIST_NEXT(container, next)) {
+ /* We don't yet have logic to synchronize EEH state across
+ * multiple containers */
+ container = NULL;
+ goto out;
+ }
+
+out:
+ vfio_put_address_space(space);
+ return container;
+}
+
+bool vfio_eeh_as_ok(AddressSpace *as)
+{
+ VFIOContainer *container = vfio_eeh_as_container(as);
+
+ return (container != NULL) && vfio_eeh_container_ok(container);
+}
+
+int vfio_eeh_as_op(AddressSpace *as, uint32_t op)
+{
+ VFIOContainer *container = vfio_eeh_as_container(as);
+
+ if (!container) {
+ return -ENODEV;
+ }
+ return vfio_eeh_container_op(container, op);
+}
diff --git a/hw/vfio/display.c b/hw/vfio/display.c
new file mode 100644
index 000000000..89bc90508
--- /dev/null
+++ b/hw/vfio/display.c
@@ -0,0 +1,545 @@
+/*
+ * display support for mdev based vgpu devices
+ *
+ * Copyright Red Hat, Inc. 2017
+ *
+ * Authors:
+ * Gerd Hoffmann
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include <linux/vfio.h>
+#include <sys/ioctl.h>
+
+#include "hw/display/edid.h"
+#include "ui/console.h"
+#include "qapi/error.h"
+#include "pci.h"
+#include "trace.h"
+
+#ifndef DRM_PLANE_TYPE_PRIMARY
+# define DRM_PLANE_TYPE_PRIMARY 1
+# define DRM_PLANE_TYPE_CURSOR 2
+#endif
+
+#define pread_field(_fd, _reg, _ptr, _fld) \
+ (sizeof(_ptr->_fld) != \
+ pread(_fd, &(_ptr->_fld), sizeof(_ptr->_fld), \
+ _reg->offset + offsetof(typeof(*_ptr), _fld)))
+
+#define pwrite_field(_fd, _reg, _ptr, _fld) \
+ (sizeof(_ptr->_fld) != \
+ pwrite(_fd, &(_ptr->_fld), sizeof(_ptr->_fld), \
+ _reg->offset + offsetof(typeof(*_ptr), _fld)))
+
+
+static void vfio_display_edid_link_up(void *opaque)
+{
+ VFIOPCIDevice *vdev = opaque;
+ VFIODisplay *dpy = vdev->dpy;
+ int fd = vdev->vbasedev.fd;
+
+ dpy->edid_regs->link_state = VFIO_DEVICE_GFX_LINK_STATE_UP;
+ if (pwrite_field(fd, dpy->edid_info, dpy->edid_regs, link_state)) {
+ goto err;
+ }
+ trace_vfio_display_edid_link_up();
+ return;
+
+err:
+ trace_vfio_display_edid_write_error();
+}
+
+static void vfio_display_edid_update(VFIOPCIDevice *vdev, bool enabled,
+ int prefx, int prefy)
+{
+ VFIODisplay *dpy = vdev->dpy;
+ int fd = vdev->vbasedev.fd;
+ qemu_edid_info edid = {
+ .maxx = dpy->edid_regs->max_xres,
+ .maxy = dpy->edid_regs->max_yres,
+ .prefx = prefx ?: vdev->display_xres,
+ .prefy = prefy ?: vdev->display_yres,
+ };
+
+ timer_del(dpy->edid_link_timer);
+ dpy->edid_regs->link_state = VFIO_DEVICE_GFX_LINK_STATE_DOWN;
+ if (pwrite_field(fd, dpy->edid_info, dpy->edid_regs, link_state)) {
+ goto err;
+ }
+ trace_vfio_display_edid_link_down();
+
+ if (!enabled) {
+ return;
+ }
+
+ if (edid.maxx && edid.prefx > edid.maxx) {
+ edid.prefx = edid.maxx;
+ }
+ if (edid.maxy && edid.prefy > edid.maxy) {
+ edid.prefy = edid.maxy;
+ }
+ qemu_edid_generate(dpy->edid_blob,
+ dpy->edid_regs->edid_max_size,
+ &edid);
+ trace_vfio_display_edid_update(edid.prefx, edid.prefy);
+
+ dpy->edid_regs->edid_size = qemu_edid_size(dpy->edid_blob);
+ if (pwrite_field(fd, dpy->edid_info, dpy->edid_regs, edid_size)) {
+ goto err;
+ }
+ if (pwrite(fd, dpy->edid_blob, dpy->edid_regs->edid_size,
+ dpy->edid_info->offset + dpy->edid_regs->edid_offset)
+ != dpy->edid_regs->edid_size) {
+ goto err;
+ }
+
+ timer_mod(dpy->edid_link_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 100);
+ return;
+
+err:
+ trace_vfio_display_edid_write_error();
+ return;
+}
+
+static int vfio_display_edid_ui_info(void *opaque, uint32_t idx,
+ QemuUIInfo *info)
+{
+ VFIOPCIDevice *vdev = opaque;
+ VFIODisplay *dpy = vdev->dpy;
+
+ if (!dpy->edid_regs) {
+ return 0;
+ }
+
+ if (info->width && info->height) {
+ vfio_display_edid_update(vdev, true, info->width, info->height);
+ } else {
+ vfio_display_edid_update(vdev, false, 0, 0);
+ }
+
+ return 0;
+}
+
+static void vfio_display_edid_init(VFIOPCIDevice *vdev)
+{
+ VFIODisplay *dpy = vdev->dpy;
+ int fd = vdev->vbasedev.fd;
+ int ret;
+
+ ret = vfio_get_dev_region_info(&vdev->vbasedev,
+ VFIO_REGION_TYPE_GFX,
+ VFIO_REGION_SUBTYPE_GFX_EDID,
+ &dpy->edid_info);
+ if (ret) {
+ return;
+ }
+
+ trace_vfio_display_edid_available();
+ dpy->edid_regs = g_new0(struct vfio_region_gfx_edid, 1);
+ if (pread_field(fd, dpy->edid_info, dpy->edid_regs, edid_offset)) {
+ goto err;
+ }
+ if (pread_field(fd, dpy->edid_info, dpy->edid_regs, edid_max_size)) {
+ goto err;
+ }
+ if (pread_field(fd, dpy->edid_info, dpy->edid_regs, max_xres)) {
+ goto err;
+ }
+ if (pread_field(fd, dpy->edid_info, dpy->edid_regs, max_yres)) {
+ goto err;
+ }
+
+ dpy->edid_blob = g_malloc0(dpy->edid_regs->edid_max_size);
+
+ /* if xres + yres properties are unset use the maximum resolution */
+ if (!vdev->display_xres) {
+ vdev->display_xres = dpy->edid_regs->max_xres;
+ }
+ if (!vdev->display_yres) {
+ vdev->display_yres = dpy->edid_regs->max_yres;
+ }
+
+ dpy->edid_link_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
+ vfio_display_edid_link_up, vdev);
+
+ vfio_display_edid_update(vdev, true, 0, 0);
+ return;
+
+err:
+ trace_vfio_display_edid_write_error();
+ g_free(dpy->edid_regs);
+ dpy->edid_regs = NULL;
+ return;
+}
+
+static void vfio_display_edid_exit(VFIODisplay *dpy)
+{
+ if (!dpy->edid_regs) {
+ return;
+ }
+
+ g_free(dpy->edid_regs);
+ g_free(dpy->edid_blob);
+ timer_free(dpy->edid_link_timer);
+}
+
+static void vfio_display_update_cursor(VFIODMABuf *dmabuf,
+ struct vfio_device_gfx_plane_info *plane)
+{
+ if (dmabuf->pos_x != plane->x_pos || dmabuf->pos_y != plane->y_pos) {
+ dmabuf->pos_x = plane->x_pos;
+ dmabuf->pos_y = plane->y_pos;
+ dmabuf->pos_updates++;
+ }
+ if (dmabuf->hot_x != plane->x_hot || dmabuf->hot_y != plane->y_hot) {
+ dmabuf->hot_x = plane->x_hot;
+ dmabuf->hot_y = plane->y_hot;
+ dmabuf->hot_updates++;
+ }
+}
+
+static VFIODMABuf *vfio_display_get_dmabuf(VFIOPCIDevice *vdev,
+ uint32_t plane_type)
+{
+ VFIODisplay *dpy = vdev->dpy;
+ struct vfio_device_gfx_plane_info plane;
+ VFIODMABuf *dmabuf;
+ int fd, ret;
+
+ memset(&plane, 0, sizeof(plane));
+ plane.argsz = sizeof(plane);
+ plane.flags = VFIO_GFX_PLANE_TYPE_DMABUF;
+ plane.drm_plane_type = plane_type;
+ ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_QUERY_GFX_PLANE, &plane);
+ if (ret < 0) {
+ return NULL;
+ }
+ if (!plane.drm_format || !plane.size) {
+ return NULL;
+ }
+
+ QTAILQ_FOREACH(dmabuf, &dpy->dmabuf.bufs, next) {
+ if (dmabuf->dmabuf_id == plane.dmabuf_id) {
+ /* found in list, move to head, return it */
+ QTAILQ_REMOVE(&dpy->dmabuf.bufs, dmabuf, next);
+ QTAILQ_INSERT_HEAD(&dpy->dmabuf.bufs, dmabuf, next);
+ if (plane_type == DRM_PLANE_TYPE_CURSOR) {
+ vfio_display_update_cursor(dmabuf, &plane);
+ }
+ return dmabuf;
+ }
+ }
+
+ fd = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_GFX_DMABUF, &plane.dmabuf_id);
+ if (fd < 0) {
+ return NULL;
+ }
+
+ dmabuf = g_new0(VFIODMABuf, 1);
+ dmabuf->dmabuf_id = plane.dmabuf_id;
+ dmabuf->buf.width = plane.width;
+ dmabuf->buf.height = plane.height;
+ dmabuf->buf.stride = plane.stride;
+ dmabuf->buf.fourcc = plane.drm_format;
+ dmabuf->buf.modifier = plane.drm_format_mod;
+ dmabuf->buf.fd = fd;
+ if (plane_type == DRM_PLANE_TYPE_CURSOR) {
+ vfio_display_update_cursor(dmabuf, &plane);
+ }
+
+ QTAILQ_INSERT_HEAD(&dpy->dmabuf.bufs, dmabuf, next);
+ return dmabuf;
+}
+
+static void vfio_display_free_one_dmabuf(VFIODisplay *dpy, VFIODMABuf *dmabuf)
+{
+ QTAILQ_REMOVE(&dpy->dmabuf.bufs, dmabuf, next);
+ dpy_gl_release_dmabuf(dpy->con, &dmabuf->buf);
+ close(dmabuf->buf.fd);
+ g_free(dmabuf);
+}
+
+static void vfio_display_free_dmabufs(VFIOPCIDevice *vdev)
+{
+ VFIODisplay *dpy = vdev->dpy;
+ VFIODMABuf *dmabuf, *tmp;
+ uint32_t keep = 5;
+
+ QTAILQ_FOREACH_SAFE(dmabuf, &dpy->dmabuf.bufs, next, tmp) {
+ if (keep > 0) {
+ keep--;
+ continue;
+ }
+ assert(dmabuf != dpy->dmabuf.primary);
+ vfio_display_free_one_dmabuf(dpy, dmabuf);
+ }
+}
+
+static void vfio_display_dmabuf_update(void *opaque)
+{
+ VFIOPCIDevice *vdev = opaque;
+ VFIODisplay *dpy = vdev->dpy;
+ VFIODMABuf *primary, *cursor;
+ bool free_bufs = false, new_cursor = false;
+
+ primary = vfio_display_get_dmabuf(vdev, DRM_PLANE_TYPE_PRIMARY);
+ if (primary == NULL) {
+ if (dpy->ramfb) {
+ ramfb_display_update(dpy->con, dpy->ramfb);
+ }
+ return;
+ }
+
+ if (dpy->dmabuf.primary != primary) {
+ dpy->dmabuf.primary = primary;
+ qemu_console_resize(dpy->con,
+ primary->buf.width, primary->buf.height);
+ dpy_gl_scanout_dmabuf(dpy->con, &primary->buf);
+ free_bufs = true;
+ }
+
+ cursor = vfio_display_get_dmabuf(vdev, DRM_PLANE_TYPE_CURSOR);
+ if (dpy->dmabuf.cursor != cursor) {
+ dpy->dmabuf.cursor = cursor;
+ new_cursor = true;
+ free_bufs = true;
+ }
+
+ if (cursor && (new_cursor || cursor->hot_updates)) {
+ bool have_hot = (cursor->hot_x != 0xffffffff &&
+ cursor->hot_y != 0xffffffff);
+ dpy_gl_cursor_dmabuf(dpy->con, &cursor->buf, have_hot,
+ cursor->hot_x, cursor->hot_y);
+ cursor->hot_updates = 0;
+ } else if (!cursor && new_cursor) {
+ dpy_gl_cursor_dmabuf(dpy->con, NULL, false, 0, 0);
+ }
+
+ if (cursor && cursor->pos_updates) {
+ dpy_gl_cursor_position(dpy->con,
+ cursor->pos_x,
+ cursor->pos_y);
+ cursor->pos_updates = 0;
+ }
+
+ dpy_gl_update(dpy->con, 0, 0, primary->buf.width, primary->buf.height);
+
+ if (free_bufs) {
+ vfio_display_free_dmabufs(vdev);
+ }
+}
+
+static int vfio_display_get_flags(void *opaque)
+{
+ return GRAPHIC_FLAGS_GL | GRAPHIC_FLAGS_DMABUF;
+}
+
+static const GraphicHwOps vfio_display_dmabuf_ops = {
+ .get_flags = vfio_display_get_flags,
+ .gfx_update = vfio_display_dmabuf_update,
+ .ui_info = vfio_display_edid_ui_info,
+};
+
+static int vfio_display_dmabuf_init(VFIOPCIDevice *vdev, Error **errp)
+{
+ if (!display_opengl) {
+ error_setg(errp, "vfio-display-dmabuf: opengl not available");
+ return -1;
+ }
+
+ vdev->dpy = g_new0(VFIODisplay, 1);
+ vdev->dpy->con = graphic_console_init(DEVICE(vdev), 0,
+ &vfio_display_dmabuf_ops,
+ vdev);
+ if (vdev->enable_ramfb) {
+ vdev->dpy->ramfb = ramfb_setup(errp);
+ }
+ vfio_display_edid_init(vdev);
+ return 0;
+}
+
+static void vfio_display_dmabuf_exit(VFIODisplay *dpy)
+{
+ VFIODMABuf *dmabuf;
+
+ if (QTAILQ_EMPTY(&dpy->dmabuf.bufs)) {
+ return;
+ }
+
+ while ((dmabuf = QTAILQ_FIRST(&dpy->dmabuf.bufs)) != NULL) {
+ vfio_display_free_one_dmabuf(dpy, dmabuf);
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+void vfio_display_reset(VFIOPCIDevice *vdev)
+{
+ if (!vdev || !vdev->dpy || !vdev->dpy->con ||
+ !vdev->dpy->dmabuf.primary) {
+ return;
+ }
+
+ dpy_gl_scanout_disable(vdev->dpy->con);
+ vfio_display_dmabuf_exit(vdev->dpy);
+ dpy_gfx_update_full(vdev->dpy->con);
+}
+
+static void vfio_display_region_update(void *opaque)
+{
+ VFIOPCIDevice *vdev = opaque;
+ VFIODisplay *dpy = vdev->dpy;
+ struct vfio_device_gfx_plane_info plane = {
+ .argsz = sizeof(plane),
+ .flags = VFIO_GFX_PLANE_TYPE_REGION
+ };
+ pixman_format_code_t format;
+ int ret;
+
+ ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_QUERY_GFX_PLANE, &plane);
+ if (ret < 0) {
+ error_report("ioctl VFIO_DEVICE_QUERY_GFX_PLANE: %s",
+ strerror(errno));
+ return;
+ }
+ if (!plane.drm_format || !plane.size) {
+ if (dpy->ramfb) {
+ ramfb_display_update(dpy->con, dpy->ramfb);
+ dpy->region.surface = NULL;
+ }
+ return;
+ }
+ format = qemu_drm_format_to_pixman(plane.drm_format);
+ if (!format) {
+ return;
+ }
+
+ if (dpy->region.buffer.size &&
+ dpy->region.buffer.nr != plane.region_index) {
+ /* region changed */
+ vfio_region_exit(&dpy->region.buffer);
+ vfio_region_finalize(&dpy->region.buffer);
+ dpy->region.surface = NULL;
+ }
+
+ if (dpy->region.surface &&
+ (surface_width(dpy->region.surface) != plane.width ||
+ surface_height(dpy->region.surface) != plane.height ||
+ surface_format(dpy->region.surface) != format)) {
+ /* size changed */
+ dpy->region.surface = NULL;
+ }
+
+ if (!dpy->region.buffer.size) {
+ /* mmap region */
+ ret = vfio_region_setup(OBJECT(vdev), &vdev->vbasedev,
+ &dpy->region.buffer,
+ plane.region_index,
+ "display");
+ if (ret != 0) {
+ error_report("%s: vfio_region_setup(%d): %s",
+ __func__, plane.region_index, strerror(-ret));
+ goto err;
+ }
+ ret = vfio_region_mmap(&dpy->region.buffer);
+ if (ret != 0) {
+ error_report("%s: vfio_region_mmap(%d): %s", __func__,
+ plane.region_index, strerror(-ret));
+ goto err;
+ }
+ assert(dpy->region.buffer.mmaps[0].mmap != NULL);
+ }
+
+ if (dpy->region.surface == NULL) {
+ /* create surface */
+ dpy->region.surface = qemu_create_displaysurface_from
+ (plane.width, plane.height, format,
+ plane.stride, dpy->region.buffer.mmaps[0].mmap);
+ dpy_gfx_replace_surface(dpy->con, dpy->region.surface);
+ }
+
+ /* full screen update */
+ dpy_gfx_update(dpy->con, 0, 0,
+ surface_width(dpy->region.surface),
+ surface_height(dpy->region.surface));
+ return;
+
+err:
+ vfio_region_exit(&dpy->region.buffer);
+ vfio_region_finalize(&dpy->region.buffer);
+}
+
+static const GraphicHwOps vfio_display_region_ops = {
+ .gfx_update = vfio_display_region_update,
+};
+
+static int vfio_display_region_init(VFIOPCIDevice *vdev, Error **errp)
+{
+ vdev->dpy = g_new0(VFIODisplay, 1);
+ vdev->dpy->con = graphic_console_init(DEVICE(vdev), 0,
+ &vfio_display_region_ops,
+ vdev);
+ if (vdev->enable_ramfb) {
+ vdev->dpy->ramfb = ramfb_setup(errp);
+ }
+ return 0;
+}
+
+static void vfio_display_region_exit(VFIODisplay *dpy)
+{
+ if (!dpy->region.buffer.size) {
+ return;
+ }
+
+ vfio_region_exit(&dpy->region.buffer);
+ vfio_region_finalize(&dpy->region.buffer);
+}
+
+/* ---------------------------------------------------------------------- */
+
+int vfio_display_probe(VFIOPCIDevice *vdev, Error **errp)
+{
+ struct vfio_device_gfx_plane_info probe;
+ int ret;
+
+ memset(&probe, 0, sizeof(probe));
+ probe.argsz = sizeof(probe);
+ probe.flags = VFIO_GFX_PLANE_TYPE_PROBE | VFIO_GFX_PLANE_TYPE_DMABUF;
+ ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_QUERY_GFX_PLANE, &probe);
+ if (ret == 0) {
+ return vfio_display_dmabuf_init(vdev, errp);
+ }
+
+ memset(&probe, 0, sizeof(probe));
+ probe.argsz = sizeof(probe);
+ probe.flags = VFIO_GFX_PLANE_TYPE_PROBE | VFIO_GFX_PLANE_TYPE_REGION;
+ ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_QUERY_GFX_PLANE, &probe);
+ if (ret == 0) {
+ return vfio_display_region_init(vdev, errp);
+ }
+
+ if (vdev->display == ON_OFF_AUTO_AUTO) {
+ /* not an error in automatic mode */
+ return 0;
+ }
+
+ error_setg(errp, "vfio: device doesn't support any (known) display method");
+ return -1;
+}
+
+void vfio_display_finalize(VFIOPCIDevice *vdev)
+{
+ if (!vdev->dpy) {
+ return;
+ }
+
+ graphic_console_close(vdev->dpy->con);
+ vfio_display_dmabuf_exit(vdev->dpy);
+ vfio_display_region_exit(vdev->dpy);
+ vfio_display_edid_exit(vdev->dpy);
+ g_free(vdev->dpy);
+}
diff --git a/hw/vfio/igd.c b/hw/vfio/igd.c
new file mode 100644
index 000000000..d4685709a
--- /dev/null
+++ b/hw/vfio/igd.c
@@ -0,0 +1,616 @@
+/*
+ * IGD device quirks
+ *
+ * Copyright Red Hat, Inc. 2016
+ *
+ * Authors:
+ * Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "qapi/error.h"
+#include "hw/hw.h"
+#include "hw/nvram/fw_cfg.h"
+#include "pci.h"
+#include "trace.h"
+
+/*
+ * Intel IGD support
+ *
+ * Obviously IGD is not a discrete device, this is evidenced not only by it
+ * being integrated into the CPU, but by the various chipset and BIOS
+ * dependencies that it brings along with it. Intel is trying to move away
+ * from this and Broadwell and newer devices can run in what Intel calls
+ * "Universal Pass-Through" mode, or UPT. Theoretically in UPT mode, nothing
+ * more is required beyond assigning the IGD device to a VM. There are
+ * however support limitations to this mode. It only supports IGD as a
+ * secondary graphics device in the VM and it doesn't officially support any
+ * physical outputs.
+ *
+ * The code here attempts to enable what we'll call legacy mode assignment,
+ * IGD retains most of the capabilities we expect for it to have on bare
+ * metal. To enable this mode, the IGD device must be assigned to the VM
+ * at PCI address 00:02.0, it must have a ROM, it very likely needs VGA
+ * support, we must have VM BIOS support for reserving and populating some
+ * of the required tables, and we need to tweak the chipset with revisions
+ * and IDs and an LPC/ISA bridge device. The intention is to make all of
+ * this happen automatically by installing the device at the correct VM PCI
+ * bus address. If any of the conditions are not met, we cross our fingers
+ * and hope the user knows better.
+ *
+ * NB - It is possible to enable physical outputs in UPT mode by supplying
+ * an OpRegion table. We don't do this by default because the guest driver
+ * behaves differently if an OpRegion is provided and no monitor is attached
+ * vs no OpRegion and a monitor being attached or not. Effectively, if a
+ * headless setup is desired, the OpRegion gets in the way of that.
+ */
+
+/*
+ * This presumes the device is already known to be an Intel VGA device, so we
+ * take liberties in which device ID bits match which generation. This should
+ * not be taken as an indication that all the devices are supported, or even
+ * supportable, some of them don't even support VT-d.
+ * See linux:include/drm/i915_pciids.h for IDs.
+ */
+static int igd_gen(VFIOPCIDevice *vdev)
+{
+ if ((vdev->device_id & 0xfff) == 0xa84) {
+ return 8; /* Broxton */
+ }
+
+ switch (vdev->device_id & 0xff00) {
+ /* Old, untested, unavailable, unknown */
+ case 0x0000:
+ case 0x2500:
+ case 0x2700:
+ case 0x2900:
+ case 0x2a00:
+ case 0x2e00:
+ case 0x3500:
+ case 0xa000:
+ return -1;
+ /* SandyBridge, IvyBridge, ValleyView, Haswell */
+ case 0x0100:
+ case 0x0400:
+ case 0x0a00:
+ case 0x0c00:
+ case 0x0d00:
+ case 0x0f00:
+ return 6;
+ /* BroadWell, CherryView, SkyLake, KabyLake */
+ case 0x1600:
+ case 0x1900:
+ case 0x2200:
+ case 0x5900:
+ return 8;
+ }
+
+ return 8; /* Assume newer is compatible */
+}
+
+typedef struct VFIOIGDQuirk {
+ struct VFIOPCIDevice *vdev;
+ uint32_t index;
+ uint32_t bdsm;
+} VFIOIGDQuirk;
+
+#define IGD_GMCH 0x50 /* Graphics Control Register */
+#define IGD_BDSM 0x5c /* Base Data of Stolen Memory */
+
+
+/*
+ * The rather short list of registers that we copy from the host devices.
+ * The LPC/ISA bridge values are definitely needed to support the vBIOS, the
+ * host bridge values may or may not be needed depending on the guest OS.
+ * Since we're only munging revision and subsystem values on the host bridge,
+ * we don't require our own device. The LPC/ISA bridge needs to be our very
+ * own though.
+ */
+typedef struct {
+ uint8_t offset;
+ uint8_t len;
+} IGDHostInfo;
+
+static const IGDHostInfo igd_host_bridge_infos[] = {
+ {PCI_REVISION_ID, 2},
+ {PCI_SUBSYSTEM_VENDOR_ID, 2},
+ {PCI_SUBSYSTEM_ID, 2},
+};
+
+static const IGDHostInfo igd_lpc_bridge_infos[] = {
+ {PCI_VENDOR_ID, 2},
+ {PCI_DEVICE_ID, 2},
+ {PCI_REVISION_ID, 2},
+ {PCI_SUBSYSTEM_VENDOR_ID, 2},
+ {PCI_SUBSYSTEM_ID, 2},
+};
+
+static int vfio_pci_igd_copy(VFIOPCIDevice *vdev, PCIDevice *pdev,
+ struct vfio_region_info *info,
+ const IGDHostInfo *list, int len)
+{
+ int i, ret;
+
+ for (i = 0; i < len; i++) {
+ ret = pread(vdev->vbasedev.fd, pdev->config + list[i].offset,
+ list[i].len, info->offset + list[i].offset);
+ if (ret != list[i].len) {
+ error_report("IGD copy failed: %m");
+ return -errno;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Stuff a few values into the host bridge.
+ */
+static int vfio_pci_igd_host_init(VFIOPCIDevice *vdev,
+ struct vfio_region_info *info)
+{
+ PCIBus *bus;
+ PCIDevice *host_bridge;
+ int ret;
+
+ bus = pci_device_root_bus(&vdev->pdev);
+ host_bridge = pci_find_device(bus, 0, PCI_DEVFN(0, 0));
+
+ if (!host_bridge) {
+ error_report("Can't find host bridge");
+ return -ENODEV;
+ }
+
+ ret = vfio_pci_igd_copy(vdev, host_bridge, info, igd_host_bridge_infos,
+ ARRAY_SIZE(igd_host_bridge_infos));
+ if (!ret) {
+ trace_vfio_pci_igd_host_bridge_enabled(vdev->vbasedev.name);
+ }
+
+ return ret;
+}
+
+/*
+ * IGD LPC/ISA bridge support code. The vBIOS needs this, but we can't write
+ * arbitrary values into just any bridge, so we must create our own. We try
+ * to handle if the user has created it for us, which they might want to do
+ * to enable multifunction so we don't occupy the whole PCI slot.
+ */
+static void vfio_pci_igd_lpc_bridge_realize(PCIDevice *pdev, Error **errp)
+{
+ if (pdev->devfn != PCI_DEVFN(0x1f, 0)) {
+ error_setg(errp, "VFIO dummy ISA/LPC bridge must have address 1f.0");
+ }
+}
+
+static void vfio_pci_igd_lpc_bridge_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
+ dc->desc = "VFIO dummy ISA/LPC bridge for IGD assignment";
+ dc->hotpluggable = false;
+ k->realize = vfio_pci_igd_lpc_bridge_realize;
+ k->class_id = PCI_CLASS_BRIDGE_ISA;
+}
+
+static TypeInfo vfio_pci_igd_lpc_bridge_info = {
+ .name = "vfio-pci-igd-lpc-bridge",
+ .parent = TYPE_PCI_DEVICE,
+ .class_init = vfio_pci_igd_lpc_bridge_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { INTERFACE_CONVENTIONAL_PCI_DEVICE },
+ { },
+ },
+};
+
+static void vfio_pci_igd_register_types(void)
+{
+ type_register_static(&vfio_pci_igd_lpc_bridge_info);
+}
+
+type_init(vfio_pci_igd_register_types)
+
+static int vfio_pci_igd_lpc_init(VFIOPCIDevice *vdev,
+ struct vfio_region_info *info)
+{
+ PCIDevice *lpc_bridge;
+ int ret;
+
+ lpc_bridge = pci_find_device(pci_device_root_bus(&vdev->pdev),
+ 0, PCI_DEVFN(0x1f, 0));
+ if (!lpc_bridge) {
+ lpc_bridge = pci_create_simple(pci_device_root_bus(&vdev->pdev),
+ PCI_DEVFN(0x1f, 0), "vfio-pci-igd-lpc-bridge");
+ }
+
+ ret = vfio_pci_igd_copy(vdev, lpc_bridge, info, igd_lpc_bridge_infos,
+ ARRAY_SIZE(igd_lpc_bridge_infos));
+ if (!ret) {
+ trace_vfio_pci_igd_lpc_bridge_enabled(vdev->vbasedev.name);
+ }
+
+ return ret;
+}
+
+/*
+ * IGD Gen8 and newer support up to 8MB for the GTT and use a 64bit PTE
+ * entry, older IGDs use 2MB and 32bit. Each PTE maps a 4k page. Therefore
+ * we either have 2M/4k * 4 = 2k or 8M/4k * 8 = 16k as the maximum iobar index
+ * for programming the GTT.
+ *
+ * See linux:include/drm/i915_drm.h for shift and mask values.
+ */
+static int vfio_igd_gtt_max(VFIOPCIDevice *vdev)
+{
+ uint32_t gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, sizeof(gmch));
+ int ggms, gen = igd_gen(vdev);
+
+ gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, sizeof(gmch));
+ ggms = (gmch >> (gen < 8 ? 8 : 6)) & 0x3;
+ if (gen > 6) {
+ ggms = 1 << ggms;
+ }
+
+ ggms *= MiB;
+
+ return (ggms / (4 * KiB)) * (gen < 8 ? 4 : 8);
+}
+
+/*
+ * The IGD ROM will make use of stolen memory (GGMS) for support of VESA modes.
+ * Somehow the host stolen memory range is used for this, but how the ROM gets
+ * it is a mystery, perhaps it's hardcoded into the ROM. Thankfully though, it
+ * reprograms the GTT through the IOBAR where we can trap it and transpose the
+ * programming to the VM allocated buffer. That buffer gets reserved by the VM
+ * firmware via the fw_cfg entry added below. Here we're just monitoring the
+ * IOBAR address and data registers to detect a write sequence targeting the
+ * GTTADR. This code is developed by observed behavior and doesn't have a
+ * direct spec reference, unfortunately.
+ */
+static uint64_t vfio_igd_quirk_data_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIOIGDQuirk *igd = opaque;
+ VFIOPCIDevice *vdev = igd->vdev;
+
+ igd->index = ~0;
+
+ return vfio_region_read(&vdev->bars[4].region, addr + 4, size);
+}
+
+static void vfio_igd_quirk_data_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIOIGDQuirk *igd = opaque;
+ VFIOPCIDevice *vdev = igd->vdev;
+ uint64_t val = data;
+ int gen = igd_gen(vdev);
+
+ /*
+ * Programming the GGMS starts at index 0x1 and uses every 4th index (ie.
+ * 0x1, 0x5, 0x9, 0xd,...). For pre-Gen8 each 4-byte write is a whole PTE
+ * entry, with 0th bit enable set. For Gen8 and up, PTEs are 64bit, so
+ * entries 0x5 & 0xd are the high dword, in our case zero. Each PTE points
+ * to a 4k page, which we translate to a page from the VM allocated region,
+ * pointed to by the BDSM register. If this is not set, we fail.
+ *
+ * We trap writes to the full configured GTT size, but we typically only
+ * see the vBIOS writing up to (nearly) the 1MB barrier. In fact it often
+ * seems to miss the last entry for an even 1MB GTT. Doing a gratuitous
+ * write of that last entry does work, but is hopefully unnecessary since
+ * we clear the previous GTT on initialization.
+ */
+ if ((igd->index % 4 == 1) && igd->index < vfio_igd_gtt_max(vdev)) {
+ if (gen < 8 || (igd->index % 8 == 1)) {
+ uint32_t base;
+
+ base = pci_get_long(vdev->pdev.config + IGD_BDSM);
+ if (!base) {
+ hw_error("vfio-igd: Guest attempted to program IGD GTT before "
+ "BIOS reserved stolen memory. Unsupported BIOS?");
+ }
+
+ val = data - igd->bdsm + base;
+ } else {
+ val = 0; /* upper 32bits of pte, we only enable below 4G PTEs */
+ }
+
+ trace_vfio_pci_igd_bar4_write(vdev->vbasedev.name,
+ igd->index, data, val);
+ }
+
+ vfio_region_write(&vdev->bars[4].region, addr + 4, val, size);
+
+ igd->index = ~0;
+}
+
+static const MemoryRegionOps vfio_igd_data_quirk = {
+ .read = vfio_igd_quirk_data_read,
+ .write = vfio_igd_quirk_data_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static uint64_t vfio_igd_quirk_index_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIOIGDQuirk *igd = opaque;
+ VFIOPCIDevice *vdev = igd->vdev;
+
+ igd->index = ~0;
+
+ return vfio_region_read(&vdev->bars[4].region, addr, size);
+}
+
+static void vfio_igd_quirk_index_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIOIGDQuirk *igd = opaque;
+ VFIOPCIDevice *vdev = igd->vdev;
+
+ igd->index = data;
+
+ vfio_region_write(&vdev->bars[4].region, addr, data, size);
+}
+
+static const MemoryRegionOps vfio_igd_index_quirk = {
+ .read = vfio_igd_quirk_index_read,
+ .write = vfio_igd_quirk_index_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
+{
+ struct vfio_region_info *rom = NULL, *opregion = NULL,
+ *host = NULL, *lpc = NULL;
+ VFIOQuirk *quirk;
+ VFIOIGDQuirk *igd;
+ PCIDevice *lpc_bridge;
+ int i, ret, ggms_mb, gms_mb = 0, gen;
+ uint64_t *bdsm_size;
+ uint32_t gmch;
+ uint16_t cmd_orig, cmd;
+ Error *err = NULL;
+
+ /*
+ * This must be an Intel VGA device at address 00:02.0 for us to even
+ * consider enabling legacy mode. The vBIOS has dependencies on the
+ * PCI bus address.
+ */
+ if (!vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, PCI_ANY_ID) ||
+ !vfio_is_vga(vdev) || nr != 4 ||
+ &vdev->pdev != pci_find_device(pci_device_root_bus(&vdev->pdev),
+ 0, PCI_DEVFN(0x2, 0))) {
+ return;
+ }
+
+ /*
+ * We need to create an LPC/ISA bridge at PCI bus address 00:1f.0 that we
+ * can stuff host values into, so if there's already one there and it's not
+ * one we can hack on, legacy mode is no-go. Sorry Q35.
+ */
+ lpc_bridge = pci_find_device(pci_device_root_bus(&vdev->pdev),
+ 0, PCI_DEVFN(0x1f, 0));
+ if (lpc_bridge && !object_dynamic_cast(OBJECT(lpc_bridge),
+ "vfio-pci-igd-lpc-bridge")) {
+ error_report("IGD device %s cannot support legacy mode due to existing "
+ "devices at address 1f.0", vdev->vbasedev.name);
+ return;
+ }
+
+ /*
+ * IGD is not a standard, they like to change their specs often. We
+ * only attempt to support back to SandBridge and we hope that newer
+ * devices maintain compatibility with generation 8.
+ */
+ gen = igd_gen(vdev);
+ if (gen != 6 && gen != 8) {
+ error_report("IGD device %s is unsupported in legacy mode, "
+ "try SandyBridge or newer", vdev->vbasedev.name);
+ return;
+ }
+
+ /*
+ * Most of what we're doing here is to enable the ROM to run, so if
+ * there's no ROM, there's no point in setting up this quirk.
+ * NB. We only seem to get BIOS ROMs, so a UEFI VM would need CSM support.
+ */
+ ret = vfio_get_region_info(&vdev->vbasedev,
+ VFIO_PCI_ROM_REGION_INDEX, &rom);
+ if ((ret || !rom->size) && !vdev->pdev.romfile) {
+ error_report("IGD device %s has no ROM, legacy mode disabled",
+ vdev->vbasedev.name);
+ goto out;
+ }
+
+ /*
+ * Ignore the hotplug corner case, mark the ROM failed, we can't
+ * create the devices we need for legacy mode in the hotplug scenario.
+ */
+ if (vdev->pdev.qdev.hotplugged) {
+ error_report("IGD device %s hotplugged, ROM disabled, "
+ "legacy mode disabled", vdev->vbasedev.name);
+ vdev->rom_read_failed = true;
+ goto out;
+ }
+
+ /*
+ * Check whether we have all the vfio device specific regions to
+ * support legacy mode (added in Linux v4.6). If not, bail.
+ */
+ ret = vfio_get_dev_region_info(&vdev->vbasedev,
+ VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
+ if (ret) {
+ error_report("IGD device %s does not support OpRegion access,"
+ "legacy mode disabled", vdev->vbasedev.name);
+ goto out;
+ }
+
+ ret = vfio_get_dev_region_info(&vdev->vbasedev,
+ VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG, &host);
+ if (ret) {
+ error_report("IGD device %s does not support host bridge access,"
+ "legacy mode disabled", vdev->vbasedev.name);
+ goto out;
+ }
+
+ ret = vfio_get_dev_region_info(&vdev->vbasedev,
+ VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG, &lpc);
+ if (ret) {
+ error_report("IGD device %s does not support LPC bridge access,"
+ "legacy mode disabled", vdev->vbasedev.name);
+ goto out;
+ }
+
+ gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, 4);
+
+ /*
+ * If IGD VGA Disable is clear (expected) and VGA is not already enabled,
+ * try to enable it. Probably shouldn't be using legacy mode without VGA,
+ * but also no point in us enabling VGA if disabled in hardware.
+ */
+ if (!(gmch & 0x2) && !vdev->vga && vfio_populate_vga(vdev, &err)) {
+ error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
+ error_report("IGD device %s failed to enable VGA access, "
+ "legacy mode disabled", vdev->vbasedev.name);
+ goto out;
+ }
+
+ /* Create our LPC/ISA bridge */
+ ret = vfio_pci_igd_lpc_init(vdev, lpc);
+ if (ret) {
+ error_report("IGD device %s failed to create LPC bridge, "
+ "legacy mode disabled", vdev->vbasedev.name);
+ goto out;
+ }
+
+ /* Stuff some host values into the VM PCI host bridge */
+ ret = vfio_pci_igd_host_init(vdev, host);
+ if (ret) {
+ error_report("IGD device %s failed to modify host bridge, "
+ "legacy mode disabled", vdev->vbasedev.name);
+ goto out;
+ }
+
+ /* Setup OpRegion access */
+ ret = vfio_pci_igd_opregion_init(vdev, opregion, &err);
+ if (ret) {
+ error_append_hint(&err, "IGD legacy mode disabled\n");
+ error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
+ goto out;
+ }
+
+ /* Setup our quirk to munge GTT addresses to the VM allocated buffer */
+ quirk = vfio_quirk_alloc(2);
+ igd = quirk->data = g_malloc0(sizeof(*igd));
+ igd->vdev = vdev;
+ igd->index = ~0;
+ igd->bdsm = vfio_pci_read_config(&vdev->pdev, IGD_BDSM, 4);
+ igd->bdsm &= ~((1 * MiB) - 1); /* 1MB aligned */
+
+ memory_region_init_io(&quirk->mem[0], OBJECT(vdev), &vfio_igd_index_quirk,
+ igd, "vfio-igd-index-quirk", 4);
+ memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
+ 0, &quirk->mem[0], 1);
+
+ memory_region_init_io(&quirk->mem[1], OBJECT(vdev), &vfio_igd_data_quirk,
+ igd, "vfio-igd-data-quirk", 4);
+ memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
+ 4, &quirk->mem[1], 1);
+
+ QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
+
+ /* Determine the size of stolen memory needed for GTT */
+ ggms_mb = (gmch >> (gen < 8 ? 8 : 6)) & 0x3;
+ if (gen > 6) {
+ ggms_mb = 1 << ggms_mb;
+ }
+
+ /*
+ * Assume we have no GMS memory, but allow it to be overridden by device
+ * option (experimental). The spec doesn't actually allow zero GMS when
+ * when IVD (IGD VGA Disable) is clear, but the claim is that it's unused,
+ * so let's not waste VM memory for it.
+ */
+ gmch &= ~((gen < 8 ? 0x1f : 0xff) << (gen < 8 ? 3 : 8));
+
+ if (vdev->igd_gms) {
+ if (vdev->igd_gms <= 0x10) {
+ gms_mb = vdev->igd_gms * 32;
+ gmch |= vdev->igd_gms << (gen < 8 ? 3 : 8);
+ } else {
+ error_report("Unsupported IGD GMS value 0x%x", vdev->igd_gms);
+ vdev->igd_gms = 0;
+ }
+ }
+
+ /*
+ * Request reserved memory for stolen memory via fw_cfg. VM firmware
+ * must allocate a 1MB aligned reserved memory region below 4GB with
+ * the requested size (in bytes) for use by the Intel PCI class VGA
+ * device at VM address 00:02.0. The base address of this reserved
+ * memory region must be written to the device BDSM register at PCI
+ * config offset 0x5C.
+ */
+ bdsm_size = g_malloc(sizeof(*bdsm_size));
+ *bdsm_size = cpu_to_le64((ggms_mb + gms_mb) * MiB);
+ fw_cfg_add_file(fw_cfg_find(), "etc/igd-bdsm-size",
+ bdsm_size, sizeof(*bdsm_size));
+
+ /* GMCH is read-only, emulated */
+ pci_set_long(vdev->pdev.config + IGD_GMCH, gmch);
+ pci_set_long(vdev->pdev.wmask + IGD_GMCH, 0);
+ pci_set_long(vdev->emulated_config_bits + IGD_GMCH, ~0);
+
+ /* BDSM is read-write, emulated. The BIOS needs to be able to write it */
+ pci_set_long(vdev->pdev.config + IGD_BDSM, 0);
+ pci_set_long(vdev->pdev.wmask + IGD_BDSM, ~0);
+ pci_set_long(vdev->emulated_config_bits + IGD_BDSM, ~0);
+
+ /*
+ * This IOBAR gives us access to GTTADR, which allows us to write to
+ * the GTT itself. So let's go ahead and write zero to all the GTT
+ * entries to avoid spurious DMA faults. Be sure I/O access is enabled
+ * before talking to the device.
+ */
+ if (pread(vdev->vbasedev.fd, &cmd_orig, sizeof(cmd_orig),
+ vdev->config_offset + PCI_COMMAND) != sizeof(cmd_orig)) {
+ error_report("IGD device %s - failed to read PCI command register",
+ vdev->vbasedev.name);
+ }
+
+ cmd = cmd_orig | PCI_COMMAND_IO;
+
+ if (pwrite(vdev->vbasedev.fd, &cmd, sizeof(cmd),
+ vdev->config_offset + PCI_COMMAND) != sizeof(cmd)) {
+ error_report("IGD device %s - failed to write PCI command register",
+ vdev->vbasedev.name);
+ }
+
+ for (i = 1; i < vfio_igd_gtt_max(vdev); i += 4) {
+ vfio_region_write(&vdev->bars[4].region, 0, i, 4);
+ vfio_region_write(&vdev->bars[4].region, 4, 0, 4);
+ }
+
+ if (pwrite(vdev->vbasedev.fd, &cmd_orig, sizeof(cmd_orig),
+ vdev->config_offset + PCI_COMMAND) != sizeof(cmd_orig)) {
+ error_report("IGD device %s - failed to restore PCI command register",
+ vdev->vbasedev.name);
+ }
+
+ trace_vfio_pci_igd_bdsm_enabled(vdev->vbasedev.name, ggms_mb + gms_mb);
+
+out:
+ g_free(rom);
+ g_free(opregion);
+ g_free(host);
+ g_free(lpc);
+}
diff --git a/hw/vfio/meson.build b/hw/vfio/meson.build
new file mode 100644
index 000000000..da9af297a
--- /dev/null
+++ b/hw/vfio/meson.build
@@ -0,0 +1,19 @@
+vfio_ss = ss.source_set()
+vfio_ss.add(files(
+ 'common.c',
+ 'spapr.c',
+ 'migration.c',
+))
+vfio_ss.add(when: 'CONFIG_VFIO_PCI', if_true: files(
+ 'display.c',
+ 'pci-quirks.c',
+ 'pci.c',
+))
+vfio_ss.add(when: 'CONFIG_VFIO_CCW', if_true: files('ccw.c'))
+vfio_ss.add(when: 'CONFIG_VFIO_PLATFORM', if_true: files('platform.c'))
+vfio_ss.add(when: 'CONFIG_VFIO_XGMAC', if_true: files('calxeda-xgmac.c'))
+vfio_ss.add(when: 'CONFIG_VFIO_AMD_XGBE', if_true: files('amd-xgbe.c'))
+vfio_ss.add(when: 'CONFIG_VFIO_AP', if_true: files('ap.c'))
+vfio_ss.add(when: 'CONFIG_VFIO_IGD', if_true: files('igd.c'))
+
+specific_ss.add_all(when: 'CONFIG_VFIO', if_true: vfio_ss)
diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
new file mode 100644
index 000000000..ff6b45de6
--- /dev/null
+++ b/hw/vfio/migration.c
@@ -0,0 +1,911 @@
+/*
+ * Migration support for VFIO devices
+ *
+ * Copyright NVIDIA, Inc. 2020
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "qemu/cutils.h"
+#include <linux/vfio.h>
+#include <sys/ioctl.h>
+
+#include "sysemu/runstate.h"
+#include "hw/vfio/vfio-common.h"
+#include "migration/migration.h"
+#include "migration/vmstate.h"
+#include "migration/qemu-file.h"
+#include "migration/register.h"
+#include "migration/blocker.h"
+#include "migration/misc.h"
+#include "qapi/error.h"
+#include "exec/ramlist.h"
+#include "exec/ram_addr.h"
+#include "pci.h"
+#include "trace.h"
+#include "hw/hw.h"
+
+/*
+ * Flags to be used as unique delimiters for VFIO devices in the migration
+ * stream. These flags are composed as:
+ * 0xffffffff => MSB 32-bit all 1s
+ * 0xef10 => Magic ID, represents emulated (virtual) function IO
+ * 0x0000 => 16-bits reserved for flags
+ *
+ * The beginning of state information is marked by _DEV_CONFIG_STATE,
+ * _DEV_SETUP_STATE, or _DEV_DATA_STATE, respectively. The end of a
+ * certain state information is marked by _END_OF_STATE.
+ */
+#define VFIO_MIG_FLAG_END_OF_STATE (0xffffffffef100001ULL)
+#define VFIO_MIG_FLAG_DEV_CONFIG_STATE (0xffffffffef100002ULL)
+#define VFIO_MIG_FLAG_DEV_SETUP_STATE (0xffffffffef100003ULL)
+#define VFIO_MIG_FLAG_DEV_DATA_STATE (0xffffffffef100004ULL)
+
+static int64_t bytes_transferred;
+
+static inline int vfio_mig_access(VFIODevice *vbasedev, void *val, int count,
+ off_t off, bool iswrite)
+{
+ int ret;
+
+ ret = iswrite ? pwrite(vbasedev->fd, val, count, off) :
+ pread(vbasedev->fd, val, count, off);
+ if (ret < count) {
+ error_report("vfio_mig_%s %d byte %s: failed at offset 0x%"
+ HWADDR_PRIx", err: %s", iswrite ? "write" : "read", count,
+ vbasedev->name, off, strerror(errno));
+ return (ret < 0) ? ret : -EINVAL;
+ }
+ return 0;
+}
+
+static int vfio_mig_rw(VFIODevice *vbasedev, __u8 *buf, size_t count,
+ off_t off, bool iswrite)
+{
+ int ret, done = 0;
+ __u8 *tbuf = buf;
+
+ while (count) {
+ int bytes = 0;
+
+ if (count >= 8 && !(off % 8)) {
+ bytes = 8;
+ } else if (count >= 4 && !(off % 4)) {
+ bytes = 4;
+ } else if (count >= 2 && !(off % 2)) {
+ bytes = 2;
+ } else {
+ bytes = 1;
+ }
+
+ ret = vfio_mig_access(vbasedev, tbuf, bytes, off, iswrite);
+ if (ret) {
+ return ret;
+ }
+
+ count -= bytes;
+ done += bytes;
+ off += bytes;
+ tbuf += bytes;
+ }
+ return done;
+}
+
+#define vfio_mig_read(f, v, c, o) vfio_mig_rw(f, (__u8 *)v, c, o, false)
+#define vfio_mig_write(f, v, c, o) vfio_mig_rw(f, (__u8 *)v, c, o, true)
+
+#define VFIO_MIG_STRUCT_OFFSET(f) \
+ offsetof(struct vfio_device_migration_info, f)
+/*
+ * Change the device_state register for device @vbasedev. Bits set in @mask
+ * are preserved, bits set in @value are set, and bits not set in either @mask
+ * or @value are cleared in device_state. If the register cannot be accessed,
+ * the resulting state would be invalid, or the device enters an error state,
+ * an error is returned.
+ */
+
+static int vfio_migration_set_state(VFIODevice *vbasedev, uint32_t mask,
+ uint32_t value)
+{
+ VFIOMigration *migration = vbasedev->migration;
+ VFIORegion *region = &migration->region;
+ off_t dev_state_off = region->fd_offset +
+ VFIO_MIG_STRUCT_OFFSET(device_state);
+ uint32_t device_state;
+ int ret;
+
+ ret = vfio_mig_read(vbasedev, &device_state, sizeof(device_state),
+ dev_state_off);
+ if (ret < 0) {
+ return ret;
+ }
+
+ device_state = (device_state & mask) | value;
+
+ if (!VFIO_DEVICE_STATE_VALID(device_state)) {
+ return -EINVAL;
+ }
+
+ ret = vfio_mig_write(vbasedev, &device_state, sizeof(device_state),
+ dev_state_off);
+ if (ret < 0) {
+ int rret;
+
+ rret = vfio_mig_read(vbasedev, &device_state, sizeof(device_state),
+ dev_state_off);
+
+ if ((rret < 0) || (VFIO_DEVICE_STATE_IS_ERROR(device_state))) {
+ hw_error("%s: Device in error state 0x%x", vbasedev->name,
+ device_state);
+ return rret ? rret : -EIO;
+ }
+ return ret;
+ }
+
+ migration->device_state = device_state;
+ trace_vfio_migration_set_state(vbasedev->name, device_state);
+ return 0;
+}
+
+static void *get_data_section_size(VFIORegion *region, uint64_t data_offset,
+ uint64_t data_size, uint64_t *size)
+{
+ void *ptr = NULL;
+ uint64_t limit = 0;
+ int i;
+
+ if (!region->mmaps) {
+ if (size) {
+ *size = MIN(data_size, region->size - data_offset);
+ }
+ return ptr;
+ }
+
+ for (i = 0; i < region->nr_mmaps; i++) {
+ VFIOMmap *map = region->mmaps + i;
+
+ if ((data_offset >= map->offset) &&
+ (data_offset < map->offset + map->size)) {
+
+ /* check if data_offset is within sparse mmap areas */
+ ptr = map->mmap + data_offset - map->offset;
+ if (size) {
+ *size = MIN(data_size, map->offset + map->size - data_offset);
+ }
+ break;
+ } else if ((data_offset < map->offset) &&
+ (!limit || limit > map->offset)) {
+ /*
+ * data_offset is not within sparse mmap areas, find size of
+ * non-mapped area. Check through all list since region->mmaps list
+ * is not sorted.
+ */
+ limit = map->offset;
+ }
+ }
+
+ if (!ptr && size) {
+ *size = limit ? MIN(data_size, limit - data_offset) : data_size;
+ }
+ return ptr;
+}
+
+static int vfio_save_buffer(QEMUFile *f, VFIODevice *vbasedev, uint64_t *size)
+{
+ VFIOMigration *migration = vbasedev->migration;
+ VFIORegion *region = &migration->region;
+ uint64_t data_offset = 0, data_size = 0, sz;
+ int ret;
+
+ ret = vfio_mig_read(vbasedev, &data_offset, sizeof(data_offset),
+ region->fd_offset + VFIO_MIG_STRUCT_OFFSET(data_offset));
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = vfio_mig_read(vbasedev, &data_size, sizeof(data_size),
+ region->fd_offset + VFIO_MIG_STRUCT_OFFSET(data_size));
+ if (ret < 0) {
+ return ret;
+ }
+
+ trace_vfio_save_buffer(vbasedev->name, data_offset, data_size,
+ migration->pending_bytes);
+
+ qemu_put_be64(f, data_size);
+ sz = data_size;
+
+ while (sz) {
+ void *buf;
+ uint64_t sec_size;
+ bool buf_allocated = false;
+
+ buf = get_data_section_size(region, data_offset, sz, &sec_size);
+
+ if (!buf) {
+ buf = g_try_malloc(sec_size);
+ if (!buf) {
+ error_report("%s: Error allocating buffer ", __func__);
+ return -ENOMEM;
+ }
+ buf_allocated = true;
+
+ ret = vfio_mig_read(vbasedev, buf, sec_size,
+ region->fd_offset + data_offset);
+ if (ret < 0) {
+ g_free(buf);
+ return ret;
+ }
+ }
+
+ qemu_put_buffer(f, buf, sec_size);
+
+ if (buf_allocated) {
+ g_free(buf);
+ }
+ sz -= sec_size;
+ data_offset += sec_size;
+ }
+
+ ret = qemu_file_get_error(f);
+
+ if (!ret && size) {
+ *size = data_size;
+ }
+
+ bytes_transferred += data_size;
+ return ret;
+}
+
+static int vfio_load_buffer(QEMUFile *f, VFIODevice *vbasedev,
+ uint64_t data_size)
+{
+ VFIORegion *region = &vbasedev->migration->region;
+ uint64_t data_offset = 0, size, report_size;
+ int ret;
+
+ do {
+ ret = vfio_mig_read(vbasedev, &data_offset, sizeof(data_offset),
+ region->fd_offset + VFIO_MIG_STRUCT_OFFSET(data_offset));
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (data_offset + data_size > region->size) {
+ /*
+ * If data_size is greater than the data section of migration region
+ * then iterate the write buffer operation. This case can occur if
+ * size of migration region at destination is smaller than size of
+ * migration region at source.
+ */
+ report_size = size = region->size - data_offset;
+ data_size -= size;
+ } else {
+ report_size = size = data_size;
+ data_size = 0;
+ }
+
+ trace_vfio_load_state_device_data(vbasedev->name, data_offset, size);
+
+ while (size) {
+ void *buf;
+ uint64_t sec_size;
+ bool buf_alloc = false;
+
+ buf = get_data_section_size(region, data_offset, size, &sec_size);
+
+ if (!buf) {
+ buf = g_try_malloc(sec_size);
+ if (!buf) {
+ error_report("%s: Error allocating buffer ", __func__);
+ return -ENOMEM;
+ }
+ buf_alloc = true;
+ }
+
+ qemu_get_buffer(f, buf, sec_size);
+
+ if (buf_alloc) {
+ ret = vfio_mig_write(vbasedev, buf, sec_size,
+ region->fd_offset + data_offset);
+ g_free(buf);
+
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ size -= sec_size;
+ data_offset += sec_size;
+ }
+
+ ret = vfio_mig_write(vbasedev, &report_size, sizeof(report_size),
+ region->fd_offset + VFIO_MIG_STRUCT_OFFSET(data_size));
+ if (ret < 0) {
+ return ret;
+ }
+ } while (data_size);
+
+ return 0;
+}
+
+static int vfio_update_pending(VFIODevice *vbasedev)
+{
+ VFIOMigration *migration = vbasedev->migration;
+ VFIORegion *region = &migration->region;
+ uint64_t pending_bytes = 0;
+ int ret;
+
+ ret = vfio_mig_read(vbasedev, &pending_bytes, sizeof(pending_bytes),
+ region->fd_offset + VFIO_MIG_STRUCT_OFFSET(pending_bytes));
+ if (ret < 0) {
+ migration->pending_bytes = 0;
+ return ret;
+ }
+
+ migration->pending_bytes = pending_bytes;
+ trace_vfio_update_pending(vbasedev->name, pending_bytes);
+ return 0;
+}
+
+static int vfio_save_device_config_state(QEMUFile *f, void *opaque)
+{
+ VFIODevice *vbasedev = opaque;
+
+ qemu_put_be64(f, VFIO_MIG_FLAG_DEV_CONFIG_STATE);
+
+ if (vbasedev->ops && vbasedev->ops->vfio_save_config) {
+ vbasedev->ops->vfio_save_config(vbasedev, f);
+ }
+
+ qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
+
+ trace_vfio_save_device_config_state(vbasedev->name);
+
+ return qemu_file_get_error(f);
+}
+
+static int vfio_load_device_config_state(QEMUFile *f, void *opaque)
+{
+ VFIODevice *vbasedev = opaque;
+ uint64_t data;
+
+ if (vbasedev->ops && vbasedev->ops->vfio_load_config) {
+ int ret;
+
+ ret = vbasedev->ops->vfio_load_config(vbasedev, f);
+ if (ret) {
+ error_report("%s: Failed to load device config space",
+ vbasedev->name);
+ return ret;
+ }
+ }
+
+ data = qemu_get_be64(f);
+ if (data != VFIO_MIG_FLAG_END_OF_STATE) {
+ error_report("%s: Failed loading device config space, "
+ "end flag incorrect 0x%"PRIx64, vbasedev->name, data);
+ return -EINVAL;
+ }
+
+ trace_vfio_load_device_config_state(vbasedev->name);
+ return qemu_file_get_error(f);
+}
+
+static void vfio_migration_cleanup(VFIODevice *vbasedev)
+{
+ VFIOMigration *migration = vbasedev->migration;
+
+ if (migration->region.mmaps) {
+ vfio_region_unmap(&migration->region);
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int vfio_save_setup(QEMUFile *f, void *opaque)
+{
+ VFIODevice *vbasedev = opaque;
+ VFIOMigration *migration = vbasedev->migration;
+ int ret;
+
+ trace_vfio_save_setup(vbasedev->name);
+
+ qemu_put_be64(f, VFIO_MIG_FLAG_DEV_SETUP_STATE);
+
+ if (migration->region.mmaps) {
+ /*
+ * Calling vfio_region_mmap() from migration thread. Memory API called
+ * from this function require locking the iothread when called from
+ * outside the main loop thread.
+ */
+ qemu_mutex_lock_iothread();
+ ret = vfio_region_mmap(&migration->region);
+ qemu_mutex_unlock_iothread();
+ if (ret) {
+ error_report("%s: Failed to mmap VFIO migration region: %s",
+ vbasedev->name, strerror(-ret));
+ error_report("%s: Falling back to slow path", vbasedev->name);
+ }
+ }
+
+ ret = vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_MASK,
+ VFIO_DEVICE_STATE_SAVING);
+ if (ret) {
+ error_report("%s: Failed to set state SAVING", vbasedev->name);
+ return ret;
+ }
+
+ qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
+
+ ret = qemu_file_get_error(f);
+ if (ret) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vfio_save_cleanup(void *opaque)
+{
+ VFIODevice *vbasedev = opaque;
+
+ vfio_migration_cleanup(vbasedev);
+ trace_vfio_save_cleanup(vbasedev->name);
+}
+
+static void vfio_save_pending(QEMUFile *f, void *opaque,
+ uint64_t threshold_size,
+ uint64_t *res_precopy_only,
+ uint64_t *res_compatible,
+ uint64_t *res_postcopy_only)
+{
+ VFIODevice *vbasedev = opaque;
+ VFIOMigration *migration = vbasedev->migration;
+ int ret;
+
+ ret = vfio_update_pending(vbasedev);
+ if (ret) {
+ return;
+ }
+
+ *res_precopy_only += migration->pending_bytes;
+
+ trace_vfio_save_pending(vbasedev->name, *res_precopy_only,
+ *res_postcopy_only, *res_compatible);
+}
+
+static int vfio_save_iterate(QEMUFile *f, void *opaque)
+{
+ VFIODevice *vbasedev = opaque;
+ VFIOMigration *migration = vbasedev->migration;
+ uint64_t data_size;
+ int ret;
+
+ qemu_put_be64(f, VFIO_MIG_FLAG_DEV_DATA_STATE);
+
+ if (migration->pending_bytes == 0) {
+ ret = vfio_update_pending(vbasedev);
+ if (ret) {
+ return ret;
+ }
+
+ if (migration->pending_bytes == 0) {
+ qemu_put_be64(f, 0);
+ qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
+ /* indicates data finished, goto complete phase */
+ return 1;
+ }
+ }
+
+ ret = vfio_save_buffer(f, vbasedev, &data_size);
+ if (ret) {
+ error_report("%s: vfio_save_buffer failed %s", vbasedev->name,
+ strerror(errno));
+ return ret;
+ }
+
+ qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
+
+ ret = qemu_file_get_error(f);
+ if (ret) {
+ return ret;
+ }
+
+ /*
+ * Reset pending_bytes as .save_live_pending is not called during savevm or
+ * snapshot case, in such case vfio_update_pending() at the start of this
+ * function updates pending_bytes.
+ */
+ migration->pending_bytes = 0;
+ trace_vfio_save_iterate(vbasedev->name, data_size);
+ return 0;
+}
+
+static int vfio_save_complete_precopy(QEMUFile *f, void *opaque)
+{
+ VFIODevice *vbasedev = opaque;
+ VFIOMigration *migration = vbasedev->migration;
+ uint64_t data_size;
+ int ret;
+
+ ret = vfio_migration_set_state(vbasedev, ~VFIO_DEVICE_STATE_RUNNING,
+ VFIO_DEVICE_STATE_SAVING);
+ if (ret) {
+ error_report("%s: Failed to set state STOP and SAVING",
+ vbasedev->name);
+ return ret;
+ }
+
+ ret = vfio_update_pending(vbasedev);
+ if (ret) {
+ return ret;
+ }
+
+ while (migration->pending_bytes > 0) {
+ qemu_put_be64(f, VFIO_MIG_FLAG_DEV_DATA_STATE);
+ ret = vfio_save_buffer(f, vbasedev, &data_size);
+ if (ret < 0) {
+ error_report("%s: Failed to save buffer", vbasedev->name);
+ return ret;
+ }
+
+ if (data_size == 0) {
+ break;
+ }
+
+ ret = vfio_update_pending(vbasedev);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
+
+ ret = qemu_file_get_error(f);
+ if (ret) {
+ return ret;
+ }
+
+ ret = vfio_migration_set_state(vbasedev, ~VFIO_DEVICE_STATE_SAVING, 0);
+ if (ret) {
+ error_report("%s: Failed to set state STOPPED", vbasedev->name);
+ return ret;
+ }
+
+ trace_vfio_save_complete_precopy(vbasedev->name);
+ return ret;
+}
+
+static void vfio_save_state(QEMUFile *f, void *opaque)
+{
+ VFIODevice *vbasedev = opaque;
+ int ret;
+
+ ret = vfio_save_device_config_state(f, opaque);
+ if (ret) {
+ error_report("%s: Failed to save device config space",
+ vbasedev->name);
+ qemu_file_set_error(f, ret);
+ }
+}
+
+static int vfio_load_setup(QEMUFile *f, void *opaque)
+{
+ VFIODevice *vbasedev = opaque;
+ VFIOMigration *migration = vbasedev->migration;
+ int ret = 0;
+
+ if (migration->region.mmaps) {
+ ret = vfio_region_mmap(&migration->region);
+ if (ret) {
+ error_report("%s: Failed to mmap VFIO migration region %d: %s",
+ vbasedev->name, migration->region.nr,
+ strerror(-ret));
+ error_report("%s: Falling back to slow path", vbasedev->name);
+ }
+ }
+
+ ret = vfio_migration_set_state(vbasedev, ~VFIO_DEVICE_STATE_MASK,
+ VFIO_DEVICE_STATE_RESUMING);
+ if (ret) {
+ error_report("%s: Failed to set state RESUMING", vbasedev->name);
+ if (migration->region.mmaps) {
+ vfio_region_unmap(&migration->region);
+ }
+ }
+ return ret;
+}
+
+static int vfio_load_cleanup(void *opaque)
+{
+ VFIODevice *vbasedev = opaque;
+
+ vfio_migration_cleanup(vbasedev);
+ trace_vfio_load_cleanup(vbasedev->name);
+ return 0;
+}
+
+static int vfio_load_state(QEMUFile *f, void *opaque, int version_id)
+{
+ VFIODevice *vbasedev = opaque;
+ int ret = 0;
+ uint64_t data;
+
+ data = qemu_get_be64(f);
+ while (data != VFIO_MIG_FLAG_END_OF_STATE) {
+
+ trace_vfio_load_state(vbasedev->name, data);
+
+ switch (data) {
+ case VFIO_MIG_FLAG_DEV_CONFIG_STATE:
+ {
+ return vfio_load_device_config_state(f, opaque);
+ }
+ case VFIO_MIG_FLAG_DEV_SETUP_STATE:
+ {
+ data = qemu_get_be64(f);
+ if (data == VFIO_MIG_FLAG_END_OF_STATE) {
+ return ret;
+ } else {
+ error_report("%s: SETUP STATE: EOS not found 0x%"PRIx64,
+ vbasedev->name, data);
+ return -EINVAL;
+ }
+ break;
+ }
+ case VFIO_MIG_FLAG_DEV_DATA_STATE:
+ {
+ uint64_t data_size = qemu_get_be64(f);
+
+ if (data_size) {
+ ret = vfio_load_buffer(f, vbasedev, data_size);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ break;
+ }
+ default:
+ error_report("%s: Unknown tag 0x%"PRIx64, vbasedev->name, data);
+ return -EINVAL;
+ }
+
+ data = qemu_get_be64(f);
+ ret = qemu_file_get_error(f);
+ if (ret) {
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static SaveVMHandlers savevm_vfio_handlers = {
+ .save_setup = vfio_save_setup,
+ .save_cleanup = vfio_save_cleanup,
+ .save_live_pending = vfio_save_pending,
+ .save_live_iterate = vfio_save_iterate,
+ .save_live_complete_precopy = vfio_save_complete_precopy,
+ .save_state = vfio_save_state,
+ .load_setup = vfio_load_setup,
+ .load_cleanup = vfio_load_cleanup,
+ .load_state = vfio_load_state,
+};
+
+/* ---------------------------------------------------------------------- */
+
+static void vfio_vmstate_change(void *opaque, bool running, RunState state)
+{
+ VFIODevice *vbasedev = opaque;
+ VFIOMigration *migration = vbasedev->migration;
+ uint32_t value, mask;
+ int ret;
+
+ if (vbasedev->migration->vm_running == running) {
+ return;
+ }
+
+ if (running) {
+ /*
+ * Here device state can have one of _SAVING, _RESUMING or _STOP bit.
+ * Transition from _SAVING to _RUNNING can happen if there is migration
+ * failure, in that case clear _SAVING bit.
+ * Transition from _RESUMING to _RUNNING occurs during resuming
+ * phase, in that case clear _RESUMING bit.
+ * In both the above cases, set _RUNNING bit.
+ */
+ mask = ~VFIO_DEVICE_STATE_MASK;
+ value = VFIO_DEVICE_STATE_RUNNING;
+ } else {
+ /*
+ * Here device state could be either _RUNNING or _SAVING|_RUNNING. Reset
+ * _RUNNING bit
+ */
+ mask = ~VFIO_DEVICE_STATE_RUNNING;
+
+ /*
+ * When VM state transition to stop for savevm command, device should
+ * start saving data.
+ */
+ if (state == RUN_STATE_SAVE_VM) {
+ value = VFIO_DEVICE_STATE_SAVING;
+ } else {
+ value = 0;
+ }
+ }
+
+ ret = vfio_migration_set_state(vbasedev, mask, value);
+ if (ret) {
+ /*
+ * Migration should be aborted in this case, but vm_state_notify()
+ * currently does not support reporting failures.
+ */
+ error_report("%s: Failed to set device state 0x%x", vbasedev->name,
+ (migration->device_state & mask) | value);
+ qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
+ }
+ vbasedev->migration->vm_running = running;
+ trace_vfio_vmstate_change(vbasedev->name, running, RunState_str(state),
+ (migration->device_state & mask) | value);
+}
+
+static void vfio_migration_state_notifier(Notifier *notifier, void *data)
+{
+ MigrationState *s = data;
+ VFIOMigration *migration = container_of(notifier, VFIOMigration,
+ migration_state);
+ VFIODevice *vbasedev = migration->vbasedev;
+ int ret;
+
+ trace_vfio_migration_state_notifier(vbasedev->name,
+ MigrationStatus_str(s->state));
+
+ switch (s->state) {
+ case MIGRATION_STATUS_CANCELLING:
+ case MIGRATION_STATUS_CANCELLED:
+ case MIGRATION_STATUS_FAILED:
+ bytes_transferred = 0;
+ ret = vfio_migration_set_state(vbasedev,
+ ~(VFIO_DEVICE_STATE_SAVING | VFIO_DEVICE_STATE_RESUMING),
+ VFIO_DEVICE_STATE_RUNNING);
+ if (ret) {
+ error_report("%s: Failed to set state RUNNING", vbasedev->name);
+ }
+ }
+}
+
+static void vfio_migration_exit(VFIODevice *vbasedev)
+{
+ VFIOMigration *migration = vbasedev->migration;
+
+ vfio_region_exit(&migration->region);
+ vfio_region_finalize(&migration->region);
+ g_free(vbasedev->migration);
+ vbasedev->migration = NULL;
+}
+
+static int vfio_migration_init(VFIODevice *vbasedev,
+ struct vfio_region_info *info)
+{
+ int ret;
+ Object *obj;
+ VFIOMigration *migration;
+ char id[256] = "";
+ g_autofree char *path = NULL, *oid = NULL;
+
+ if (!vbasedev->ops->vfio_get_object) {
+ return -EINVAL;
+ }
+
+ obj = vbasedev->ops->vfio_get_object(vbasedev);
+ if (!obj) {
+ return -EINVAL;
+ }
+
+ vbasedev->migration = g_new0(VFIOMigration, 1);
+
+ ret = vfio_region_setup(obj, vbasedev, &vbasedev->migration->region,
+ info->index, "migration");
+ if (ret) {
+ error_report("%s: Failed to setup VFIO migration region %d: %s",
+ vbasedev->name, info->index, strerror(-ret));
+ goto err;
+ }
+
+ if (!vbasedev->migration->region.size) {
+ error_report("%s: Invalid zero-sized VFIO migration region %d",
+ vbasedev->name, info->index);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ migration = vbasedev->migration;
+ migration->vbasedev = vbasedev;
+
+ oid = vmstate_if_get_id(VMSTATE_IF(DEVICE(obj)));
+ if (oid) {
+ path = g_strdup_printf("%s/vfio", oid);
+ } else {
+ path = g_strdup("vfio");
+ }
+ strpadcpy(id, sizeof(id), path, '\0');
+
+ register_savevm_live(id, VMSTATE_INSTANCE_ID_ANY, 1, &savevm_vfio_handlers,
+ vbasedev);
+
+ migration->vm_state = qdev_add_vm_change_state_handler(vbasedev->dev,
+ vfio_vmstate_change,
+ vbasedev);
+ migration->migration_state.notify = vfio_migration_state_notifier;
+ add_migration_state_change_notifier(&migration->migration_state);
+ return 0;
+
+err:
+ vfio_migration_exit(vbasedev);
+ return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int64_t vfio_mig_bytes_transferred(void)
+{
+ return bytes_transferred;
+}
+
+int vfio_migration_probe(VFIODevice *vbasedev, Error **errp)
+{
+ VFIOContainer *container = vbasedev->group->container;
+ struct vfio_region_info *info = NULL;
+ int ret = -ENOTSUP;
+
+ if (!vbasedev->enable_migration || !container->dirty_pages_supported) {
+ goto add_blocker;
+ }
+
+ ret = vfio_get_dev_region_info(vbasedev, VFIO_REGION_TYPE_MIGRATION,
+ VFIO_REGION_SUBTYPE_MIGRATION, &info);
+ if (ret) {
+ goto add_blocker;
+ }
+
+ ret = vfio_migration_init(vbasedev, info);
+ if (ret) {
+ goto add_blocker;
+ }
+
+ trace_vfio_migration_probe(vbasedev->name, info->index);
+ g_free(info);
+ return 0;
+
+add_blocker:
+ error_setg(&vbasedev->migration_blocker,
+ "VFIO device doesn't support migration");
+ g_free(info);
+
+ ret = migrate_add_blocker(vbasedev->migration_blocker, errp);
+ if (ret < 0) {
+ error_free(vbasedev->migration_blocker);
+ vbasedev->migration_blocker = NULL;
+ }
+ return ret;
+}
+
+void vfio_migration_finalize(VFIODevice *vbasedev)
+{
+ if (vbasedev->migration) {
+ VFIOMigration *migration = vbasedev->migration;
+
+ remove_migration_state_change_notifier(&migration->migration_state);
+ qemu_del_vm_change_state_handler(migration->vm_state);
+ unregister_savevm(VMSTATE_IF(vbasedev->dev), "vfio", vbasedev);
+ vfio_migration_exit(vbasedev);
+ }
+
+ if (vbasedev->migration_blocker) {
+ migrate_del_blocker(vbasedev->migration_blocker);
+ error_free(vbasedev->migration_blocker);
+ vbasedev->migration_blocker = NULL;
+ }
+}
diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c
new file mode 100644
index 000000000..0cf69a8c6
--- /dev/null
+++ b/hw/vfio/pci-quirks.c
@@ -0,0 +1,1769 @@
+/*
+ * device quirks for PCI devices
+ *
+ * Copyright Red Hat, Inc. 2012-2015
+ *
+ * Authors:
+ * Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include CONFIG_DEVICES
+#include "exec/memop.h"
+#include "qemu/units.h"
+#include "qemu/log.h"
+#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
+#include "qemu/module.h"
+#include "qemu/range.h"
+#include "qapi/error.h"
+#include "qapi/visitor.h"
+#include <sys/ioctl.h>
+#include "hw/nvram/fw_cfg.h"
+#include "hw/qdev-properties.h"
+#include "pci.h"
+#include "trace.h"
+
+/*
+ * List of device ids/vendor ids for which to disable
+ * option rom loading. This avoids the guest hangs during rom
+ * execution as noticed with the BCM 57810 card for lack of a
+ * more better way to handle such issues.
+ * The user can still override by specifying a romfile or
+ * rombar=1.
+ * Please see https://bugs.launchpad.net/qemu/+bug/1284874
+ * for an analysis of the 57810 card hang. When adding
+ * a new vendor id/device id combination below, please also add
+ * your card/environment details and information that could
+ * help in debugging to the bug tracking this issue
+ */
+static const struct {
+ uint32_t vendor;
+ uint32_t device;
+} rom_denylist[] = {
+ { 0x14e4, 0x168e }, /* Broadcom BCM 57810 */
+};
+
+bool vfio_opt_rom_in_denylist(VFIOPCIDevice *vdev)
+{
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(rom_denylist); i++) {
+ if (vfio_pci_is(vdev, rom_denylist[i].vendor, rom_denylist[i].device)) {
+ trace_vfio_quirk_rom_in_denylist(vdev->vbasedev.name,
+ rom_denylist[i].vendor,
+ rom_denylist[i].device);
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * Device specific region quirks (mostly backdoors to PCI config space)
+ */
+
+/*
+ * The generic window quirks operate on an address and data register,
+ * vfio_generic_window_address_quirk handles the address register and
+ * vfio_generic_window_data_quirk handles the data register. These ops
+ * pass reads and writes through to hardware until a value matching the
+ * stored address match/mask is written. When this occurs, the data
+ * register access emulated PCI config space for the device rather than
+ * passing through accesses. This enables devices where PCI config space
+ * is accessible behind a window register to maintain the virtualization
+ * provided through vfio.
+ */
+typedef struct VFIOConfigWindowMatch {
+ uint32_t match;
+ uint32_t mask;
+} VFIOConfigWindowMatch;
+
+typedef struct VFIOConfigWindowQuirk {
+ struct VFIOPCIDevice *vdev;
+
+ uint32_t address_val;
+
+ uint32_t address_offset;
+ uint32_t data_offset;
+
+ bool window_enabled;
+ uint8_t bar;
+
+ MemoryRegion *addr_mem;
+ MemoryRegion *data_mem;
+
+ uint32_t nr_matches;
+ VFIOConfigWindowMatch matches[];
+} VFIOConfigWindowQuirk;
+
+static uint64_t vfio_generic_window_quirk_address_read(void *opaque,
+ hwaddr addr,
+ unsigned size)
+{
+ VFIOConfigWindowQuirk *window = opaque;
+ VFIOPCIDevice *vdev = window->vdev;
+
+ return vfio_region_read(&vdev->bars[window->bar].region,
+ addr + window->address_offset, size);
+}
+
+static void vfio_generic_window_quirk_address_write(void *opaque, hwaddr addr,
+ uint64_t data,
+ unsigned size)
+{
+ VFIOConfigWindowQuirk *window = opaque;
+ VFIOPCIDevice *vdev = window->vdev;
+ int i;
+
+ window->window_enabled = false;
+
+ vfio_region_write(&vdev->bars[window->bar].region,
+ addr + window->address_offset, data, size);
+
+ for (i = 0; i < window->nr_matches; i++) {
+ if ((data & ~window->matches[i].mask) == window->matches[i].match) {
+ window->window_enabled = true;
+ window->address_val = data & window->matches[i].mask;
+ trace_vfio_quirk_generic_window_address_write(vdev->vbasedev.name,
+ memory_region_name(window->addr_mem), data);
+ break;
+ }
+ }
+}
+
+static const MemoryRegionOps vfio_generic_window_address_quirk = {
+ .read = vfio_generic_window_quirk_address_read,
+ .write = vfio_generic_window_quirk_address_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static uint64_t vfio_generic_window_quirk_data_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIOConfigWindowQuirk *window = opaque;
+ VFIOPCIDevice *vdev = window->vdev;
+ uint64_t data;
+
+ /* Always read data reg, discard if window enabled */
+ data = vfio_region_read(&vdev->bars[window->bar].region,
+ addr + window->data_offset, size);
+
+ if (window->window_enabled) {
+ data = vfio_pci_read_config(&vdev->pdev, window->address_val, size);
+ trace_vfio_quirk_generic_window_data_read(vdev->vbasedev.name,
+ memory_region_name(window->data_mem), data);
+ }
+
+ return data;
+}
+
+static void vfio_generic_window_quirk_data_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIOConfigWindowQuirk *window = opaque;
+ VFIOPCIDevice *vdev = window->vdev;
+
+ if (window->window_enabled) {
+ vfio_pci_write_config(&vdev->pdev, window->address_val, data, size);
+ trace_vfio_quirk_generic_window_data_write(vdev->vbasedev.name,
+ memory_region_name(window->data_mem), data);
+ return;
+ }
+
+ vfio_region_write(&vdev->bars[window->bar].region,
+ addr + window->data_offset, data, size);
+}
+
+static const MemoryRegionOps vfio_generic_window_data_quirk = {
+ .read = vfio_generic_window_quirk_data_read,
+ .write = vfio_generic_window_quirk_data_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+/*
+ * The generic mirror quirk handles devices which expose PCI config space
+ * through a region within a BAR. When enabled, reads and writes are
+ * redirected through to emulated PCI config space. XXX if PCI config space
+ * used memory regions, this could just be an alias.
+ */
+typedef struct VFIOConfigMirrorQuirk {
+ struct VFIOPCIDevice *vdev;
+ uint32_t offset;
+ uint8_t bar;
+ MemoryRegion *mem;
+ uint8_t data[];
+} VFIOConfigMirrorQuirk;
+
+static uint64_t vfio_generic_quirk_mirror_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIOConfigMirrorQuirk *mirror = opaque;
+ VFIOPCIDevice *vdev = mirror->vdev;
+ uint64_t data;
+
+ /* Read and discard in case the hardware cares */
+ (void)vfio_region_read(&vdev->bars[mirror->bar].region,
+ addr + mirror->offset, size);
+
+ data = vfio_pci_read_config(&vdev->pdev, addr, size);
+ trace_vfio_quirk_generic_mirror_read(vdev->vbasedev.name,
+ memory_region_name(mirror->mem),
+ addr, data);
+ return data;
+}
+
+static void vfio_generic_quirk_mirror_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIOConfigMirrorQuirk *mirror = opaque;
+ VFIOPCIDevice *vdev = mirror->vdev;
+
+ vfio_pci_write_config(&vdev->pdev, addr, data, size);
+ trace_vfio_quirk_generic_mirror_write(vdev->vbasedev.name,
+ memory_region_name(mirror->mem),
+ addr, data);
+}
+
+static const MemoryRegionOps vfio_generic_mirror_quirk = {
+ .read = vfio_generic_quirk_mirror_read,
+ .write = vfio_generic_quirk_mirror_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+/* Is range1 fully contained within range2? */
+static bool vfio_range_contained(uint64_t first1, uint64_t len1,
+ uint64_t first2, uint64_t len2) {
+ return (first1 >= first2 && first1 + len1 <= first2 + len2);
+}
+
+#define PCI_VENDOR_ID_ATI 0x1002
+
+/*
+ * Radeon HD cards (HD5450 & HD7850) report the upper byte of the I/O port BAR
+ * through VGA register 0x3c3. On newer cards, the I/O port BAR is always
+ * BAR4 (older cards like the X550 used BAR1, but we don't care to support
+ * those). Note that on bare metal, a read of 0x3c3 doesn't always return the
+ * I/O port BAR address. Originally this was coded to return the virtual BAR
+ * address only if the physical register read returns the actual BAR address,
+ * but users have reported greater success if we return the virtual address
+ * unconditionally.
+ */
+static uint64_t vfio_ati_3c3_quirk_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIOPCIDevice *vdev = opaque;
+ uint64_t data = vfio_pci_read_config(&vdev->pdev,
+ PCI_BASE_ADDRESS_4 + 1, size);
+
+ trace_vfio_quirk_ati_3c3_read(vdev->vbasedev.name, data);
+
+ return data;
+}
+
+static void vfio_ati_3c3_quirk_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid access\n", __func__);
+}
+
+static const MemoryRegionOps vfio_ati_3c3_quirk = {
+ .read = vfio_ati_3c3_quirk_read,
+ .write = vfio_ati_3c3_quirk_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+VFIOQuirk *vfio_quirk_alloc(int nr_mem)
+{
+ VFIOQuirk *quirk = g_new0(VFIOQuirk, 1);
+ QLIST_INIT(&quirk->ioeventfds);
+ quirk->mem = g_new0(MemoryRegion, nr_mem);
+ quirk->nr_mem = nr_mem;
+
+ return quirk;
+}
+
+static void vfio_ioeventfd_exit(VFIOPCIDevice *vdev, VFIOIOEventFD *ioeventfd)
+{
+ QLIST_REMOVE(ioeventfd, next);
+ memory_region_del_eventfd(ioeventfd->mr, ioeventfd->addr, ioeventfd->size,
+ true, ioeventfd->data, &ioeventfd->e);
+
+ if (ioeventfd->vfio) {
+ struct vfio_device_ioeventfd vfio_ioeventfd;
+
+ vfio_ioeventfd.argsz = sizeof(vfio_ioeventfd);
+ vfio_ioeventfd.flags = ioeventfd->size;
+ vfio_ioeventfd.data = ioeventfd->data;
+ vfio_ioeventfd.offset = ioeventfd->region->fd_offset +
+ ioeventfd->region_addr;
+ vfio_ioeventfd.fd = -1;
+
+ if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_IOEVENTFD, &vfio_ioeventfd)) {
+ error_report("Failed to remove vfio ioeventfd for %s+0x%"
+ HWADDR_PRIx"[%d]:0x%"PRIx64" (%m)",
+ memory_region_name(ioeventfd->mr), ioeventfd->addr,
+ ioeventfd->size, ioeventfd->data);
+ }
+ } else {
+ qemu_set_fd_handler(event_notifier_get_fd(&ioeventfd->e),
+ NULL, NULL, NULL);
+ }
+
+ event_notifier_cleanup(&ioeventfd->e);
+ trace_vfio_ioeventfd_exit(memory_region_name(ioeventfd->mr),
+ (uint64_t)ioeventfd->addr, ioeventfd->size,
+ ioeventfd->data);
+ g_free(ioeventfd);
+}
+
+static void vfio_drop_dynamic_eventfds(VFIOPCIDevice *vdev, VFIOQuirk *quirk)
+{
+ VFIOIOEventFD *ioeventfd, *tmp;
+
+ QLIST_FOREACH_SAFE(ioeventfd, &quirk->ioeventfds, next, tmp) {
+ if (ioeventfd->dynamic) {
+ vfio_ioeventfd_exit(vdev, ioeventfd);
+ }
+ }
+}
+
+static void vfio_ioeventfd_handler(void *opaque)
+{
+ VFIOIOEventFD *ioeventfd = opaque;
+
+ if (event_notifier_test_and_clear(&ioeventfd->e)) {
+ vfio_region_write(ioeventfd->region, ioeventfd->region_addr,
+ ioeventfd->data, ioeventfd->size);
+ trace_vfio_ioeventfd_handler(memory_region_name(ioeventfd->mr),
+ (uint64_t)ioeventfd->addr, ioeventfd->size,
+ ioeventfd->data);
+ }
+}
+
+static VFIOIOEventFD *vfio_ioeventfd_init(VFIOPCIDevice *vdev,
+ MemoryRegion *mr, hwaddr addr,
+ unsigned size, uint64_t data,
+ VFIORegion *region,
+ hwaddr region_addr, bool dynamic)
+{
+ VFIOIOEventFD *ioeventfd;
+
+ if (vdev->no_kvm_ioeventfd) {
+ return NULL;
+ }
+
+ ioeventfd = g_malloc0(sizeof(*ioeventfd));
+
+ if (event_notifier_init(&ioeventfd->e, 0)) {
+ g_free(ioeventfd);
+ return NULL;
+ }
+
+ /*
+ * MemoryRegion and relative offset, plus additional ioeventfd setup
+ * parameters for configuring and later tearing down KVM ioeventfd.
+ */
+ ioeventfd->mr = mr;
+ ioeventfd->addr = addr;
+ ioeventfd->size = size;
+ ioeventfd->data = data;
+ ioeventfd->dynamic = dynamic;
+ /*
+ * VFIORegion and relative offset for implementing the userspace
+ * handler. data & size fields shared for both uses.
+ */
+ ioeventfd->region = region;
+ ioeventfd->region_addr = region_addr;
+
+ if (!vdev->no_vfio_ioeventfd) {
+ struct vfio_device_ioeventfd vfio_ioeventfd;
+
+ vfio_ioeventfd.argsz = sizeof(vfio_ioeventfd);
+ vfio_ioeventfd.flags = ioeventfd->size;
+ vfio_ioeventfd.data = ioeventfd->data;
+ vfio_ioeventfd.offset = ioeventfd->region->fd_offset +
+ ioeventfd->region_addr;
+ vfio_ioeventfd.fd = event_notifier_get_fd(&ioeventfd->e);
+
+ ioeventfd->vfio = !ioctl(vdev->vbasedev.fd,
+ VFIO_DEVICE_IOEVENTFD, &vfio_ioeventfd);
+ }
+
+ if (!ioeventfd->vfio) {
+ qemu_set_fd_handler(event_notifier_get_fd(&ioeventfd->e),
+ vfio_ioeventfd_handler, NULL, ioeventfd);
+ }
+
+ memory_region_add_eventfd(ioeventfd->mr, ioeventfd->addr, ioeventfd->size,
+ true, ioeventfd->data, &ioeventfd->e);
+ trace_vfio_ioeventfd_init(memory_region_name(mr), (uint64_t)addr,
+ size, data, ioeventfd->vfio);
+
+ return ioeventfd;
+}
+
+static void vfio_vga_probe_ati_3c3_quirk(VFIOPCIDevice *vdev)
+{
+ VFIOQuirk *quirk;
+
+ /*
+ * As long as the BAR is >= 256 bytes it will be aligned such that the
+ * lower byte is always zero. Filter out anything else, if it exists.
+ */
+ if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) ||
+ !vdev->bars[4].ioport || vdev->bars[4].region.size < 256) {
+ return;
+ }
+
+ quirk = vfio_quirk_alloc(1);
+
+ memory_region_init_io(quirk->mem, OBJECT(vdev), &vfio_ati_3c3_quirk, vdev,
+ "vfio-ati-3c3-quirk", 1);
+ memory_region_add_subregion(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
+ 3 /* offset 3 bytes from 0x3c0 */, quirk->mem);
+
+ QLIST_INSERT_HEAD(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks,
+ quirk, next);
+
+ trace_vfio_quirk_ati_3c3_probe(vdev->vbasedev.name);
+}
+
+/*
+ * Newer ATI/AMD devices, including HD5450 and HD7850, have a mirror to PCI
+ * config space through MMIO BAR2 at offset 0x4000. Nothing seems to access
+ * the MMIO space directly, but a window to this space is provided through
+ * I/O port BAR4. Offset 0x0 is the address register and offset 0x4 is the
+ * data register. When the address is programmed to a range of 0x4000-0x4fff
+ * PCI configuration space is available. Experimentation seems to indicate
+ * that read-only may be provided by hardware.
+ */
+static void vfio_probe_ati_bar4_quirk(VFIOPCIDevice *vdev, int nr)
+{
+ VFIOQuirk *quirk;
+ VFIOConfigWindowQuirk *window;
+
+ /* This windows doesn't seem to be used except by legacy VGA code */
+ if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) ||
+ !vdev->vga || nr != 4) {
+ return;
+ }
+
+ quirk = vfio_quirk_alloc(2);
+ window = quirk->data = g_malloc0(sizeof(*window) +
+ sizeof(VFIOConfigWindowMatch));
+ window->vdev = vdev;
+ window->address_offset = 0;
+ window->data_offset = 4;
+ window->nr_matches = 1;
+ window->matches[0].match = 0x4000;
+ window->matches[0].mask = vdev->config_size - 1;
+ window->bar = nr;
+ window->addr_mem = &quirk->mem[0];
+ window->data_mem = &quirk->mem[1];
+
+ memory_region_init_io(window->addr_mem, OBJECT(vdev),
+ &vfio_generic_window_address_quirk, window,
+ "vfio-ati-bar4-window-address-quirk", 4);
+ memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
+ window->address_offset,
+ window->addr_mem, 1);
+
+ memory_region_init_io(window->data_mem, OBJECT(vdev),
+ &vfio_generic_window_data_quirk, window,
+ "vfio-ati-bar4-window-data-quirk", 4);
+ memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
+ window->data_offset,
+ window->data_mem, 1);
+
+ QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
+
+ trace_vfio_quirk_ati_bar4_probe(vdev->vbasedev.name);
+}
+
+/*
+ * Trap the BAR2 MMIO mirror to config space as well.
+ */
+static void vfio_probe_ati_bar2_quirk(VFIOPCIDevice *vdev, int nr)
+{
+ VFIOQuirk *quirk;
+ VFIOConfigMirrorQuirk *mirror;
+
+ /* Only enable on newer devices where BAR2 is 64bit */
+ if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) ||
+ !vdev->vga || nr != 2 || !vdev->bars[2].mem64) {
+ return;
+ }
+
+ quirk = vfio_quirk_alloc(1);
+ mirror = quirk->data = g_malloc0(sizeof(*mirror));
+ mirror->mem = quirk->mem;
+ mirror->vdev = vdev;
+ mirror->offset = 0x4000;
+ mirror->bar = nr;
+
+ memory_region_init_io(mirror->mem, OBJECT(vdev),
+ &vfio_generic_mirror_quirk, mirror,
+ "vfio-ati-bar2-4000-quirk", PCI_CONFIG_SPACE_SIZE);
+ memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
+ mirror->offset, mirror->mem, 1);
+
+ QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
+
+ trace_vfio_quirk_ati_bar2_probe(vdev->vbasedev.name);
+}
+
+/*
+ * Older ATI/AMD cards like the X550 have a similar window to that above.
+ * I/O port BAR1 provides a window to a mirror of PCI config space located
+ * in BAR2 at offset 0xf00. We don't care to support such older cards, but
+ * note it for future reference.
+ */
+
+/*
+ * Nvidia has several different methods to get to config space, the
+ * nouveu project has several of these documented here:
+ * https://github.com/pathscale/envytools/tree/master/hwdocs
+ *
+ * The first quirk is actually not documented in envytools and is found
+ * on 10de:01d1 (NVIDIA Corporation G72 [GeForce 7300 LE]). This is an
+ * NV46 chipset. The backdoor uses the legacy VGA I/O ports to access
+ * the mirror of PCI config space found at BAR0 offset 0x1800. The access
+ * sequence first writes 0x338 to I/O port 0x3d4. The target offset is
+ * then written to 0x3d0. Finally 0x538 is written for a read and 0x738
+ * is written for a write to 0x3d4. The BAR0 offset is then accessible
+ * through 0x3d0. This quirk doesn't seem to be necessary on newer cards
+ * that use the I/O port BAR5 window but it doesn't hurt to leave it.
+ */
+typedef enum {NONE = 0, SELECT, WINDOW, READ, WRITE} VFIONvidia3d0State;
+static const char *nv3d0_states[] = { "NONE", "SELECT",
+ "WINDOW", "READ", "WRITE" };
+
+typedef struct VFIONvidia3d0Quirk {
+ VFIOPCIDevice *vdev;
+ VFIONvidia3d0State state;
+ uint32_t offset;
+} VFIONvidia3d0Quirk;
+
+static uint64_t vfio_nvidia_3d4_quirk_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIONvidia3d0Quirk *quirk = opaque;
+ VFIOPCIDevice *vdev = quirk->vdev;
+
+ quirk->state = NONE;
+
+ return vfio_vga_read(&vdev->vga->region[QEMU_PCI_VGA_IO_HI],
+ addr + 0x14, size);
+}
+
+static void vfio_nvidia_3d4_quirk_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIONvidia3d0Quirk *quirk = opaque;
+ VFIOPCIDevice *vdev = quirk->vdev;
+ VFIONvidia3d0State old_state = quirk->state;
+
+ quirk->state = NONE;
+
+ switch (data) {
+ case 0x338:
+ if (old_state == NONE) {
+ quirk->state = SELECT;
+ trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name,
+ nv3d0_states[quirk->state]);
+ }
+ break;
+ case 0x538:
+ if (old_state == WINDOW) {
+ quirk->state = READ;
+ trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name,
+ nv3d0_states[quirk->state]);
+ }
+ break;
+ case 0x738:
+ if (old_state == WINDOW) {
+ quirk->state = WRITE;
+ trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name,
+ nv3d0_states[quirk->state]);
+ }
+ break;
+ }
+
+ vfio_vga_write(&vdev->vga->region[QEMU_PCI_VGA_IO_HI],
+ addr + 0x14, data, size);
+}
+
+static const MemoryRegionOps vfio_nvidia_3d4_quirk = {
+ .read = vfio_nvidia_3d4_quirk_read,
+ .write = vfio_nvidia_3d4_quirk_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIONvidia3d0Quirk *quirk = opaque;
+ VFIOPCIDevice *vdev = quirk->vdev;
+ VFIONvidia3d0State old_state = quirk->state;
+ uint64_t data = vfio_vga_read(&vdev->vga->region[QEMU_PCI_VGA_IO_HI],
+ addr + 0x10, size);
+
+ quirk->state = NONE;
+
+ if (old_state == READ &&
+ (quirk->offset & ~(PCI_CONFIG_SPACE_SIZE - 1)) == 0x1800) {
+ uint8_t offset = quirk->offset & (PCI_CONFIG_SPACE_SIZE - 1);
+
+ data = vfio_pci_read_config(&vdev->pdev, offset, size);
+ trace_vfio_quirk_nvidia_3d0_read(vdev->vbasedev.name,
+ offset, size, data);
+ }
+
+ return data;
+}
+
+static void vfio_nvidia_3d0_quirk_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIONvidia3d0Quirk *quirk = opaque;
+ VFIOPCIDevice *vdev = quirk->vdev;
+ VFIONvidia3d0State old_state = quirk->state;
+
+ quirk->state = NONE;
+
+ if (old_state == SELECT) {
+ quirk->offset = (uint32_t)data;
+ quirk->state = WINDOW;
+ trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name,
+ nv3d0_states[quirk->state]);
+ } else if (old_state == WRITE) {
+ if ((quirk->offset & ~(PCI_CONFIG_SPACE_SIZE - 1)) == 0x1800) {
+ uint8_t offset = quirk->offset & (PCI_CONFIG_SPACE_SIZE - 1);
+
+ vfio_pci_write_config(&vdev->pdev, offset, data, size);
+ trace_vfio_quirk_nvidia_3d0_write(vdev->vbasedev.name,
+ offset, data, size);
+ return;
+ }
+ }
+
+ vfio_vga_write(&vdev->vga->region[QEMU_PCI_VGA_IO_HI],
+ addr + 0x10, data, size);
+}
+
+static const MemoryRegionOps vfio_nvidia_3d0_quirk = {
+ .read = vfio_nvidia_3d0_quirk_read,
+ .write = vfio_nvidia_3d0_quirk_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void vfio_vga_probe_nvidia_3d0_quirk(VFIOPCIDevice *vdev)
+{
+ VFIOQuirk *quirk;
+ VFIONvidia3d0Quirk *data;
+
+ if (vdev->no_geforce_quirks ||
+ !vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID) ||
+ !vdev->bars[1].region.size) {
+ return;
+ }
+
+ quirk = vfio_quirk_alloc(2);
+ quirk->data = data = g_malloc0(sizeof(*data));
+ data->vdev = vdev;
+
+ memory_region_init_io(&quirk->mem[0], OBJECT(vdev), &vfio_nvidia_3d4_quirk,
+ data, "vfio-nvidia-3d4-quirk", 2);
+ memory_region_add_subregion(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
+ 0x14 /* 0x3c0 + 0x14 */, &quirk->mem[0]);
+
+ memory_region_init_io(&quirk->mem[1], OBJECT(vdev), &vfio_nvidia_3d0_quirk,
+ data, "vfio-nvidia-3d0-quirk", 2);
+ memory_region_add_subregion(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
+ 0x10 /* 0x3c0 + 0x10 */, &quirk->mem[1]);
+
+ QLIST_INSERT_HEAD(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks,
+ quirk, next);
+
+ trace_vfio_quirk_nvidia_3d0_probe(vdev->vbasedev.name);
+}
+
+/*
+ * The second quirk is documented in envytools. The I/O port BAR5 is just
+ * a set of address/data ports to the MMIO BARs. The BAR we care about is
+ * again BAR0. This backdoor is apparently a bit newer than the one above
+ * so we need to not only trap 256 bytes @0x1800, but all of PCI config
+ * space, including extended space is available at the 4k @0x88000.
+ */
+typedef struct VFIONvidiaBAR5Quirk {
+ uint32_t master;
+ uint32_t enable;
+ MemoryRegion *addr_mem;
+ MemoryRegion *data_mem;
+ bool enabled;
+ VFIOConfigWindowQuirk window; /* last for match data */
+} VFIONvidiaBAR5Quirk;
+
+static void vfio_nvidia_bar5_enable(VFIONvidiaBAR5Quirk *bar5)
+{
+ VFIOPCIDevice *vdev = bar5->window.vdev;
+
+ if (((bar5->master & bar5->enable) & 0x1) == bar5->enabled) {
+ return;
+ }
+
+ bar5->enabled = !bar5->enabled;
+ trace_vfio_quirk_nvidia_bar5_state(vdev->vbasedev.name,
+ bar5->enabled ? "Enable" : "Disable");
+ memory_region_set_enabled(bar5->addr_mem, bar5->enabled);
+ memory_region_set_enabled(bar5->data_mem, bar5->enabled);
+}
+
+static uint64_t vfio_nvidia_bar5_quirk_master_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIONvidiaBAR5Quirk *bar5 = opaque;
+ VFIOPCIDevice *vdev = bar5->window.vdev;
+
+ return vfio_region_read(&vdev->bars[5].region, addr, size);
+}
+
+static void vfio_nvidia_bar5_quirk_master_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIONvidiaBAR5Quirk *bar5 = opaque;
+ VFIOPCIDevice *vdev = bar5->window.vdev;
+
+ vfio_region_write(&vdev->bars[5].region, addr, data, size);
+
+ bar5->master = data;
+ vfio_nvidia_bar5_enable(bar5);
+}
+
+static const MemoryRegionOps vfio_nvidia_bar5_quirk_master = {
+ .read = vfio_nvidia_bar5_quirk_master_read,
+ .write = vfio_nvidia_bar5_quirk_master_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static uint64_t vfio_nvidia_bar5_quirk_enable_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIONvidiaBAR5Quirk *bar5 = opaque;
+ VFIOPCIDevice *vdev = bar5->window.vdev;
+
+ return vfio_region_read(&vdev->bars[5].region, addr + 4, size);
+}
+
+static void vfio_nvidia_bar5_quirk_enable_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIONvidiaBAR5Quirk *bar5 = opaque;
+ VFIOPCIDevice *vdev = bar5->window.vdev;
+
+ vfio_region_write(&vdev->bars[5].region, addr + 4, data, size);
+
+ bar5->enable = data;
+ vfio_nvidia_bar5_enable(bar5);
+}
+
+static const MemoryRegionOps vfio_nvidia_bar5_quirk_enable = {
+ .read = vfio_nvidia_bar5_quirk_enable_read,
+ .write = vfio_nvidia_bar5_quirk_enable_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void vfio_probe_nvidia_bar5_quirk(VFIOPCIDevice *vdev, int nr)
+{
+ VFIOQuirk *quirk;
+ VFIONvidiaBAR5Quirk *bar5;
+ VFIOConfigWindowQuirk *window;
+
+ if (vdev->no_geforce_quirks ||
+ !vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID) ||
+ !vdev->vga || nr != 5 || !vdev->bars[5].ioport) {
+ return;
+ }
+
+ quirk = vfio_quirk_alloc(4);
+ bar5 = quirk->data = g_malloc0(sizeof(*bar5) +
+ (sizeof(VFIOConfigWindowMatch) * 2));
+ window = &bar5->window;
+
+ window->vdev = vdev;
+ window->address_offset = 0x8;
+ window->data_offset = 0xc;
+ window->nr_matches = 2;
+ window->matches[0].match = 0x1800;
+ window->matches[0].mask = PCI_CONFIG_SPACE_SIZE - 1;
+ window->matches[1].match = 0x88000;
+ window->matches[1].mask = vdev->config_size - 1;
+ window->bar = nr;
+ window->addr_mem = bar5->addr_mem = &quirk->mem[0];
+ window->data_mem = bar5->data_mem = &quirk->mem[1];
+
+ memory_region_init_io(window->addr_mem, OBJECT(vdev),
+ &vfio_generic_window_address_quirk, window,
+ "vfio-nvidia-bar5-window-address-quirk", 4);
+ memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
+ window->address_offset,
+ window->addr_mem, 1);
+ memory_region_set_enabled(window->addr_mem, false);
+
+ memory_region_init_io(window->data_mem, OBJECT(vdev),
+ &vfio_generic_window_data_quirk, window,
+ "vfio-nvidia-bar5-window-data-quirk", 4);
+ memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
+ window->data_offset,
+ window->data_mem, 1);
+ memory_region_set_enabled(window->data_mem, false);
+
+ memory_region_init_io(&quirk->mem[2], OBJECT(vdev),
+ &vfio_nvidia_bar5_quirk_master, bar5,
+ "vfio-nvidia-bar5-master-quirk", 4);
+ memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
+ 0, &quirk->mem[2], 1);
+
+ memory_region_init_io(&quirk->mem[3], OBJECT(vdev),
+ &vfio_nvidia_bar5_quirk_enable, bar5,
+ "vfio-nvidia-bar5-enable-quirk", 4);
+ memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
+ 4, &quirk->mem[3], 1);
+
+ QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
+
+ trace_vfio_quirk_nvidia_bar5_probe(vdev->vbasedev.name);
+}
+
+typedef struct LastDataSet {
+ VFIOQuirk *quirk;
+ hwaddr addr;
+ uint64_t data;
+ unsigned size;
+ int hits;
+ int added;
+} LastDataSet;
+
+#define MAX_DYN_IOEVENTFD 10
+#define HITS_FOR_IOEVENTFD 10
+
+/*
+ * Finally, BAR0 itself. We want to redirect any accesses to either
+ * 0x1800 or 0x88000 through the PCI config space access functions.
+ */
+static void vfio_nvidia_quirk_mirror_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIOConfigMirrorQuirk *mirror = opaque;
+ VFIOPCIDevice *vdev = mirror->vdev;
+ PCIDevice *pdev = &vdev->pdev;
+ LastDataSet *last = (LastDataSet *)&mirror->data;
+
+ vfio_generic_quirk_mirror_write(opaque, addr, data, size);
+
+ /*
+ * Nvidia seems to acknowledge MSI interrupts by writing 0xff to the
+ * MSI capability ID register. Both the ID and next register are
+ * read-only, so we allow writes covering either of those to real hw.
+ */
+ if ((pdev->cap_present & QEMU_PCI_CAP_MSI) &&
+ vfio_range_contained(addr, size, pdev->msi_cap, PCI_MSI_FLAGS)) {
+ vfio_region_write(&vdev->bars[mirror->bar].region,
+ addr + mirror->offset, data, size);
+ trace_vfio_quirk_nvidia_bar0_msi_ack(vdev->vbasedev.name);
+ }
+
+ /*
+ * Automatically add an ioeventfd to handle any repeated write with the
+ * same data and size above the standard PCI config space header. This is
+ * primarily expected to accelerate the MSI-ACK behavior, such as noted
+ * above. Current hardware/drivers should trigger an ioeventfd at config
+ * offset 0x704 (region offset 0x88704), with data 0x0, size 4.
+ *
+ * The criteria of 10 successive hits is arbitrary but reliably adds the
+ * MSI-ACK region. Note that as some writes are bypassed via the ioeventfd,
+ * the remaining ones have a greater chance of being seen successively.
+ * To avoid the pathological case of burning up all of QEMU's open file
+ * handles, arbitrarily limit this algorithm from adding no more than 10
+ * ioeventfds, print an error if we would have added an 11th, and then
+ * stop counting.
+ */
+ if (!vdev->no_kvm_ioeventfd &&
+ addr >= PCI_STD_HEADER_SIZEOF && last->added <= MAX_DYN_IOEVENTFD) {
+ if (addr != last->addr || data != last->data || size != last->size) {
+ last->addr = addr;
+ last->data = data;
+ last->size = size;
+ last->hits = 1;
+ } else if (++last->hits >= HITS_FOR_IOEVENTFD) {
+ if (last->added < MAX_DYN_IOEVENTFD) {
+ VFIOIOEventFD *ioeventfd;
+ ioeventfd = vfio_ioeventfd_init(vdev, mirror->mem, addr, size,
+ data, &vdev->bars[mirror->bar].region,
+ mirror->offset + addr, true);
+ if (ioeventfd) {
+ VFIOQuirk *quirk = last->quirk;
+
+ QLIST_INSERT_HEAD(&quirk->ioeventfds, ioeventfd, next);
+ last->added++;
+ }
+ } else {
+ last->added++;
+ warn_report("NVIDIA ioeventfd queue full for %s, unable to "
+ "accelerate 0x%"HWADDR_PRIx", data 0x%"PRIx64", "
+ "size %u", vdev->vbasedev.name, addr, data, size);
+ }
+ }
+ }
+}
+
+static const MemoryRegionOps vfio_nvidia_mirror_quirk = {
+ .read = vfio_generic_quirk_mirror_read,
+ .write = vfio_nvidia_quirk_mirror_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void vfio_nvidia_bar0_quirk_reset(VFIOPCIDevice *vdev, VFIOQuirk *quirk)
+{
+ VFIOConfigMirrorQuirk *mirror = quirk->data;
+ LastDataSet *last = (LastDataSet *)&mirror->data;
+
+ last->addr = last->data = last->size = last->hits = last->added = 0;
+
+ vfio_drop_dynamic_eventfds(vdev, quirk);
+}
+
+static void vfio_probe_nvidia_bar0_quirk(VFIOPCIDevice *vdev, int nr)
+{
+ VFIOQuirk *quirk;
+ VFIOConfigMirrorQuirk *mirror;
+ LastDataSet *last;
+
+ if (vdev->no_geforce_quirks ||
+ !vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID) ||
+ !vfio_is_vga(vdev) || nr != 0) {
+ return;
+ }
+
+ quirk = vfio_quirk_alloc(1);
+ quirk->reset = vfio_nvidia_bar0_quirk_reset;
+ mirror = quirk->data = g_malloc0(sizeof(*mirror) + sizeof(LastDataSet));
+ mirror->mem = quirk->mem;
+ mirror->vdev = vdev;
+ mirror->offset = 0x88000;
+ mirror->bar = nr;
+ last = (LastDataSet *)&mirror->data;
+ last->quirk = quirk;
+
+ memory_region_init_io(mirror->mem, OBJECT(vdev),
+ &vfio_nvidia_mirror_quirk, mirror,
+ "vfio-nvidia-bar0-88000-mirror-quirk",
+ vdev->config_size);
+ memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
+ mirror->offset, mirror->mem, 1);
+
+ QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
+
+ /* The 0x1800 offset mirror only seems to get used by legacy VGA */
+ if (vdev->vga) {
+ quirk = vfio_quirk_alloc(1);
+ quirk->reset = vfio_nvidia_bar0_quirk_reset;
+ mirror = quirk->data = g_malloc0(sizeof(*mirror) + sizeof(LastDataSet));
+ mirror->mem = quirk->mem;
+ mirror->vdev = vdev;
+ mirror->offset = 0x1800;
+ mirror->bar = nr;
+ last = (LastDataSet *)&mirror->data;
+ last->quirk = quirk;
+
+ memory_region_init_io(mirror->mem, OBJECT(vdev),
+ &vfio_nvidia_mirror_quirk, mirror,
+ "vfio-nvidia-bar0-1800-mirror-quirk",
+ PCI_CONFIG_SPACE_SIZE);
+ memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
+ mirror->offset, mirror->mem, 1);
+
+ QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
+ }
+
+ trace_vfio_quirk_nvidia_bar0_probe(vdev->vbasedev.name);
+}
+
+/*
+ * TODO - Some Nvidia devices provide config access to their companion HDA
+ * device and even to their parent bridge via these config space mirrors.
+ * Add quirks for those regions.
+ */
+
+#define PCI_VENDOR_ID_REALTEK 0x10ec
+
+/*
+ * RTL8168 devices have a backdoor that can access the MSI-X table. At BAR2
+ * offset 0x70 there is a dword data register, offset 0x74 is a dword address
+ * register. According to the Linux r8169 driver, the MSI-X table is addressed
+ * when the "type" portion of the address register is set to 0x1. This appears
+ * to be bits 16:30. Bit 31 is both a write indicator and some sort of
+ * "address latched" indicator. Bits 12:15 are a mask field, which we can
+ * ignore because the MSI-X table should always be accessed as a dword (full
+ * mask). Bits 0:11 is offset within the type.
+ *
+ * Example trace:
+ *
+ * Read from MSI-X table offset 0
+ * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x1f000, 4) // store read addr
+ * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x8001f000 // latch
+ * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x70, 4) = 0xfee00398 // read data
+ *
+ * Write 0xfee00000 to MSI-X table offset 0
+ * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x70, 0xfee00000, 4) // write data
+ * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x8001f000, 4) // do write
+ * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x1f000 // complete
+ */
+typedef struct VFIOrtl8168Quirk {
+ VFIOPCIDevice *vdev;
+ uint32_t addr;
+ uint32_t data;
+ bool enabled;
+} VFIOrtl8168Quirk;
+
+static uint64_t vfio_rtl8168_quirk_address_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIOrtl8168Quirk *rtl = opaque;
+ VFIOPCIDevice *vdev = rtl->vdev;
+ uint64_t data = vfio_region_read(&vdev->bars[2].region, addr + 0x74, size);
+
+ if (rtl->enabled) {
+ data = rtl->addr ^ 0x80000000U; /* latch/complete */
+ trace_vfio_quirk_rtl8168_fake_latch(vdev->vbasedev.name, data);
+ }
+
+ return data;
+}
+
+static void vfio_rtl8168_quirk_address_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIOrtl8168Quirk *rtl = opaque;
+ VFIOPCIDevice *vdev = rtl->vdev;
+
+ rtl->enabled = false;
+
+ if ((data & 0x7fff0000) == 0x10000) { /* MSI-X table */
+ rtl->enabled = true;
+ rtl->addr = (uint32_t)data;
+
+ if (data & 0x80000000U) { /* Do write */
+ if (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX) {
+ hwaddr offset = data & 0xfff;
+ uint64_t val = rtl->data;
+
+ trace_vfio_quirk_rtl8168_msix_write(vdev->vbasedev.name,
+ (uint16_t)offset, val);
+
+ /* Write to the proper guest MSI-X table instead */
+ memory_region_dispatch_write(&vdev->pdev.msix_table_mmio,
+ offset, val,
+ size_memop(size) | MO_LE,
+ MEMTXATTRS_UNSPECIFIED);
+ }
+ return; /* Do not write guest MSI-X data to hardware */
+ }
+ }
+
+ vfio_region_write(&vdev->bars[2].region, addr + 0x74, data, size);
+}
+
+static const MemoryRegionOps vfio_rtl_address_quirk = {
+ .read = vfio_rtl8168_quirk_address_read,
+ .write = vfio_rtl8168_quirk_address_write,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ .unaligned = false,
+ },
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static uint64_t vfio_rtl8168_quirk_data_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIOrtl8168Quirk *rtl = opaque;
+ VFIOPCIDevice *vdev = rtl->vdev;
+ uint64_t data = vfio_region_read(&vdev->bars[2].region, addr + 0x70, size);
+
+ if (rtl->enabled && (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX)) {
+ hwaddr offset = rtl->addr & 0xfff;
+ memory_region_dispatch_read(&vdev->pdev.msix_table_mmio, offset,
+ &data, size_memop(size) | MO_LE,
+ MEMTXATTRS_UNSPECIFIED);
+ trace_vfio_quirk_rtl8168_msix_read(vdev->vbasedev.name, offset, data);
+ }
+
+ return data;
+}
+
+static void vfio_rtl8168_quirk_data_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIOrtl8168Quirk *rtl = opaque;
+ VFIOPCIDevice *vdev = rtl->vdev;
+
+ rtl->data = (uint32_t)data;
+
+ vfio_region_write(&vdev->bars[2].region, addr + 0x70, data, size);
+}
+
+static const MemoryRegionOps vfio_rtl_data_quirk = {
+ .read = vfio_rtl8168_quirk_data_read,
+ .write = vfio_rtl8168_quirk_data_write,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ .unaligned = false,
+ },
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void vfio_probe_rtl8168_bar2_quirk(VFIOPCIDevice *vdev, int nr)
+{
+ VFIOQuirk *quirk;
+ VFIOrtl8168Quirk *rtl;
+
+ if (!vfio_pci_is(vdev, PCI_VENDOR_ID_REALTEK, 0x8168) || nr != 2) {
+ return;
+ }
+
+ quirk = vfio_quirk_alloc(2);
+ quirk->data = rtl = g_malloc0(sizeof(*rtl));
+ rtl->vdev = vdev;
+
+ memory_region_init_io(&quirk->mem[0], OBJECT(vdev),
+ &vfio_rtl_address_quirk, rtl,
+ "vfio-rtl8168-window-address-quirk", 4);
+ memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
+ 0x74, &quirk->mem[0], 1);
+
+ memory_region_init_io(&quirk->mem[1], OBJECT(vdev),
+ &vfio_rtl_data_quirk, rtl,
+ "vfio-rtl8168-window-data-quirk", 4);
+ memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
+ 0x70, &quirk->mem[1], 1);
+
+ QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
+
+ trace_vfio_quirk_rtl8168_probe(vdev->vbasedev.name);
+}
+
+#define IGD_ASLS 0xfc /* ASL Storage Register */
+
+/*
+ * The OpRegion includes the Video BIOS Table, which seems important for
+ * telling the driver what sort of outputs it has. Without this, the device
+ * may work in the guest, but we may not get output. This also requires BIOS
+ * support to reserve and populate a section of guest memory sufficient for
+ * the table and to write the base address of that memory to the ASLS register
+ * of the IGD device.
+ */
+int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
+ struct vfio_region_info *info, Error **errp)
+{
+ int ret;
+
+ vdev->igd_opregion = g_malloc0(info->size);
+ ret = pread(vdev->vbasedev.fd, vdev->igd_opregion,
+ info->size, info->offset);
+ if (ret != info->size) {
+ error_setg(errp, "failed to read IGD OpRegion");
+ g_free(vdev->igd_opregion);
+ vdev->igd_opregion = NULL;
+ return -EINVAL;
+ }
+
+ /*
+ * Provide fw_cfg with a copy of the OpRegion which the VM firmware is to
+ * allocate 32bit reserved memory for, copy these contents into, and write
+ * the reserved memory base address to the device ASLS register at 0xFC.
+ * Alignment of this reserved region seems flexible, but using a 4k page
+ * alignment seems to work well. This interface assumes a single IGD
+ * device, which may be at VM address 00:02.0 in legacy mode or another
+ * address in UPT mode.
+ *
+ * NB, there may be future use cases discovered where the VM should have
+ * direct interaction with the host OpRegion, in which case the write to
+ * the ASLS register would trigger MemoryRegion setup to enable that.
+ */
+ fw_cfg_add_file(fw_cfg_find(), "etc/igd-opregion",
+ vdev->igd_opregion, info->size);
+
+ trace_vfio_pci_igd_opregion_enabled(vdev->vbasedev.name);
+
+ pci_set_long(vdev->pdev.config + IGD_ASLS, 0);
+ pci_set_long(vdev->pdev.wmask + IGD_ASLS, ~0);
+ pci_set_long(vdev->emulated_config_bits + IGD_ASLS, ~0);
+
+ return 0;
+}
+
+/*
+ * Common quirk probe entry points.
+ */
+void vfio_vga_quirk_setup(VFIOPCIDevice *vdev)
+{
+ vfio_vga_probe_ati_3c3_quirk(vdev);
+ vfio_vga_probe_nvidia_3d0_quirk(vdev);
+}
+
+void vfio_vga_quirk_exit(VFIOPCIDevice *vdev)
+{
+ VFIOQuirk *quirk;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
+ QLIST_FOREACH(quirk, &vdev->vga->region[i].quirks, next) {
+ for (j = 0; j < quirk->nr_mem; j++) {
+ memory_region_del_subregion(&vdev->vga->region[i].mem,
+ &quirk->mem[j]);
+ }
+ }
+ }
+}
+
+void vfio_vga_quirk_finalize(VFIOPCIDevice *vdev)
+{
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
+ while (!QLIST_EMPTY(&vdev->vga->region[i].quirks)) {
+ VFIOQuirk *quirk = QLIST_FIRST(&vdev->vga->region[i].quirks);
+ QLIST_REMOVE(quirk, next);
+ for (j = 0; j < quirk->nr_mem; j++) {
+ object_unparent(OBJECT(&quirk->mem[j]));
+ }
+ g_free(quirk->mem);
+ g_free(quirk->data);
+ g_free(quirk);
+ }
+ }
+}
+
+void vfio_bar_quirk_setup(VFIOPCIDevice *vdev, int nr)
+{
+ vfio_probe_ati_bar4_quirk(vdev, nr);
+ vfio_probe_ati_bar2_quirk(vdev, nr);
+ vfio_probe_nvidia_bar5_quirk(vdev, nr);
+ vfio_probe_nvidia_bar0_quirk(vdev, nr);
+ vfio_probe_rtl8168_bar2_quirk(vdev, nr);
+#ifdef CONFIG_VFIO_IGD
+ vfio_probe_igd_bar4_quirk(vdev, nr);
+#endif
+}
+
+void vfio_bar_quirk_exit(VFIOPCIDevice *vdev, int nr)
+{
+ VFIOBAR *bar = &vdev->bars[nr];
+ VFIOQuirk *quirk;
+ int i;
+
+ QLIST_FOREACH(quirk, &bar->quirks, next) {
+ while (!QLIST_EMPTY(&quirk->ioeventfds)) {
+ vfio_ioeventfd_exit(vdev, QLIST_FIRST(&quirk->ioeventfds));
+ }
+
+ for (i = 0; i < quirk->nr_mem; i++) {
+ memory_region_del_subregion(bar->region.mem, &quirk->mem[i]);
+ }
+ }
+}
+
+void vfio_bar_quirk_finalize(VFIOPCIDevice *vdev, int nr)
+{
+ VFIOBAR *bar = &vdev->bars[nr];
+ int i;
+
+ while (!QLIST_EMPTY(&bar->quirks)) {
+ VFIOQuirk *quirk = QLIST_FIRST(&bar->quirks);
+ QLIST_REMOVE(quirk, next);
+ for (i = 0; i < quirk->nr_mem; i++) {
+ object_unparent(OBJECT(&quirk->mem[i]));
+ }
+ g_free(quirk->mem);
+ g_free(quirk->data);
+ g_free(quirk);
+ }
+}
+
+/*
+ * Reset quirks
+ */
+void vfio_quirk_reset(VFIOPCIDevice *vdev)
+{
+ int i;
+
+ for (i = 0; i < PCI_ROM_SLOT; i++) {
+ VFIOQuirk *quirk;
+ VFIOBAR *bar = &vdev->bars[i];
+
+ QLIST_FOREACH(quirk, &bar->quirks, next) {
+ if (quirk->reset) {
+ quirk->reset(vdev, quirk);
+ }
+ }
+ }
+}
+
+/*
+ * AMD Radeon PCI config reset, based on Linux:
+ * drivers/gpu/drm/radeon/ci_smc.c:ci_is_smc_running()
+ * drivers/gpu/drm/radeon/radeon_device.c:radeon_pci_config_reset
+ * drivers/gpu/drm/radeon/ci_smc.c:ci_reset_smc()
+ * drivers/gpu/drm/radeon/ci_smc.c:ci_stop_smc_clock()
+ * IDs: include/drm/drm_pciids.h
+ * Registers: http://cgit.freedesktop.org/~agd5f/linux/commit/?id=4e2aa447f6f0
+ *
+ * Bonaire and Hawaii GPUs do not respond to a bus reset. This is a bug in the
+ * hardware that should be fixed on future ASICs. The symptom of this is that
+ * once the accerlated driver loads, Windows guests will bsod on subsequent
+ * attmpts to load the driver, such as after VM reset or shutdown/restart. To
+ * work around this, we do an AMD specific PCI config reset, followed by an SMC
+ * reset. The PCI config reset only works if SMC firmware is running, so we
+ * have a dependency on the state of the device as to whether this reset will
+ * be effective. There are still cases where we won't be able to kick the
+ * device into working, but this greatly improves the usability overall. The
+ * config reset magic is relatively common on AMD GPUs, but the setup and SMC
+ * poking is largely ASIC specific.
+ */
+static bool vfio_radeon_smc_is_running(VFIOPCIDevice *vdev)
+{
+ uint32_t clk, pc_c;
+
+ /*
+ * Registers 200h and 204h are index and data registers for accessing
+ * indirect configuration registers within the device.
+ */
+ vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000004, 4);
+ clk = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
+ vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000370, 4);
+ pc_c = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
+
+ return (!(clk & 1) && (0x20100 <= pc_c));
+}
+
+/*
+ * The scope of a config reset is controlled by a mode bit in the misc register
+ * and a fuse, exposed as a bit in another register. The fuse is the default
+ * (0 = GFX, 1 = whole GPU), the misc bit is a toggle, with the formula
+ * scope = !(misc ^ fuse), where the resulting scope is defined the same as
+ * the fuse. A truth table therefore tells us that if misc == fuse, we need
+ * to flip the value of the bit in the misc register.
+ */
+static void vfio_radeon_set_gfx_only_reset(VFIOPCIDevice *vdev)
+{
+ uint32_t misc, fuse;
+ bool a, b;
+
+ vfio_region_write(&vdev->bars[5].region, 0x200, 0xc00c0000, 4);
+ fuse = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
+ b = fuse & 64;
+
+ vfio_region_write(&vdev->bars[5].region, 0x200, 0xc0000010, 4);
+ misc = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
+ a = misc & 2;
+
+ if (a == b) {
+ vfio_region_write(&vdev->bars[5].region, 0x204, misc ^ 2, 4);
+ vfio_region_read(&vdev->bars[5].region, 0x204, 4); /* flush */
+ }
+}
+
+static int vfio_radeon_reset(VFIOPCIDevice *vdev)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ int i, ret = 0;
+ uint32_t data;
+
+ /* Defer to a kernel implemented reset */
+ if (vdev->vbasedev.reset_works) {
+ trace_vfio_quirk_ati_bonaire_reset_skipped(vdev->vbasedev.name);
+ return -ENODEV;
+ }
+
+ /* Enable only memory BAR access */
+ vfio_pci_write_config(pdev, PCI_COMMAND, PCI_COMMAND_MEMORY, 2);
+
+ /* Reset only works if SMC firmware is loaded and running */
+ if (!vfio_radeon_smc_is_running(vdev)) {
+ ret = -EINVAL;
+ trace_vfio_quirk_ati_bonaire_reset_no_smc(vdev->vbasedev.name);
+ goto out;
+ }
+
+ /* Make sure only the GFX function is reset */
+ vfio_radeon_set_gfx_only_reset(vdev);
+
+ /* AMD PCI config reset */
+ vfio_pci_write_config(pdev, 0x7c, 0x39d5e86b, 4);
+ usleep(100);
+
+ /* Read back the memory size to make sure we're out of reset */
+ for (i = 0; i < 100000; i++) {
+ if (vfio_region_read(&vdev->bars[5].region, 0x5428, 4) != 0xffffffff) {
+ goto reset_smc;
+ }
+ usleep(1);
+ }
+
+ trace_vfio_quirk_ati_bonaire_reset_timeout(vdev->vbasedev.name);
+
+reset_smc:
+ /* Reset SMC */
+ vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000000, 4);
+ data = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
+ data |= 1;
+ vfio_region_write(&vdev->bars[5].region, 0x204, data, 4);
+
+ /* Disable SMC clock */
+ vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000004, 4);
+ data = vfio_region_read(&vdev->bars[5].region, 0x204, 4);
+ data |= 1;
+ vfio_region_write(&vdev->bars[5].region, 0x204, data, 4);
+
+ trace_vfio_quirk_ati_bonaire_reset_done(vdev->vbasedev.name);
+
+out:
+ /* Restore PCI command register */
+ vfio_pci_write_config(pdev, PCI_COMMAND, 0, 2);
+
+ return ret;
+}
+
+void vfio_setup_resetfn_quirk(VFIOPCIDevice *vdev)
+{
+ switch (vdev->vendor_id) {
+ case 0x1002:
+ switch (vdev->device_id) {
+ /* Bonaire */
+ case 0x6649: /* Bonaire [FirePro W5100] */
+ case 0x6650:
+ case 0x6651:
+ case 0x6658: /* Bonaire XTX [Radeon R7 260X] */
+ case 0x665c: /* Bonaire XT [Radeon HD 7790/8770 / R9 260 OEM] */
+ case 0x665d: /* Bonaire [Radeon R7 200 Series] */
+ /* Hawaii */
+ case 0x67A0: /* Hawaii XT GL [FirePro W9100] */
+ case 0x67A1: /* Hawaii PRO GL [FirePro W8100] */
+ case 0x67A2:
+ case 0x67A8:
+ case 0x67A9:
+ case 0x67AA:
+ case 0x67B0: /* Hawaii XT [Radeon R9 290X] */
+ case 0x67B1: /* Hawaii PRO [Radeon R9 290] */
+ case 0x67B8:
+ case 0x67B9:
+ case 0x67BA:
+ case 0x67BE:
+ vdev->resetfn = vfio_radeon_reset;
+ trace_vfio_quirk_ati_bonaire_reset(vdev->vbasedev.name);
+ break;
+ }
+ break;
+ }
+}
+
+/*
+ * The NVIDIA GPUDirect P2P Vendor capability allows the user to specify
+ * devices as a member of a clique. Devices within the same clique ID
+ * are capable of direct P2P. It's the user's responsibility that this
+ * is correct. The spec says that this may reside at any unused config
+ * offset, but reserves and recommends hypervisors place this at C8h.
+ * The spec also states that the hypervisor should place this capability
+ * at the end of the capability list, thus next is defined as 0h.
+ *
+ * +----------------+----------------+----------------+----------------+
+ * | sig 7:0 ('P') | vndr len (8h) | next (0h) | cap id (9h) |
+ * +----------------+----------------+----------------+----------------+
+ * | rsvd 15:7(0h),id 6:3,ver 2:0(0h)| sig 23:8 ('P2') |
+ * +---------------------------------+---------------------------------+
+ *
+ * https://lists.gnu.org/archive/html/qemu-devel/2017-08/pdfUda5iEpgOS.pdf
+ */
+static void get_nv_gpudirect_clique_id(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ Property *prop = opaque;
+ uint8_t *ptr = object_field_prop_ptr(obj, prop);
+
+ visit_type_uint8(v, name, ptr, errp);
+}
+
+static void set_nv_gpudirect_clique_id(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ Property *prop = opaque;
+ uint8_t value, *ptr = object_field_prop_ptr(obj, prop);
+
+ if (!visit_type_uint8(v, name, &value, errp)) {
+ return;
+ }
+
+ if (value & ~0xF) {
+ error_setg(errp, "Property %s: valid range 0-15", name);
+ return;
+ }
+
+ *ptr = value;
+}
+
+const PropertyInfo qdev_prop_nv_gpudirect_clique = {
+ .name = "uint4",
+ .description = "NVIDIA GPUDirect Clique ID (0 - 15)",
+ .get = get_nv_gpudirect_clique_id,
+ .set = set_nv_gpudirect_clique_id,
+};
+
+static int vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ int ret, pos = 0xC8;
+
+ if (vdev->nv_gpudirect_clique == 0xFF) {
+ return 0;
+ }
+
+ if (!vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID)) {
+ error_setg(errp, "NVIDIA GPUDirect Clique ID: invalid device vendor");
+ return -EINVAL;
+ }
+
+ if (pci_get_byte(pdev->config + PCI_CLASS_DEVICE + 1) !=
+ PCI_BASE_CLASS_DISPLAY) {
+ error_setg(errp, "NVIDIA GPUDirect Clique ID: unsupported PCI class");
+ return -EINVAL;
+ }
+
+ ret = pci_add_capability(pdev, PCI_CAP_ID_VNDR, pos, 8, errp);
+ if (ret < 0) {
+ error_prepend(errp, "Failed to add NVIDIA GPUDirect cap: ");
+ return ret;
+ }
+
+ memset(vdev->emulated_config_bits + pos, 0xFF, 8);
+ pos += PCI_CAP_FLAGS;
+ pci_set_byte(pdev->config + pos++, 8);
+ pci_set_byte(pdev->config + pos++, 'P');
+ pci_set_byte(pdev->config + pos++, '2');
+ pci_set_byte(pdev->config + pos++, 'P');
+ pci_set_byte(pdev->config + pos++, vdev->nv_gpudirect_clique << 3);
+ pci_set_byte(pdev->config + pos, 0);
+
+ return 0;
+}
+
+static void vfio_pci_nvlink2_get_tgt(Object *obj, Visitor *v,
+ const char *name,
+ void *opaque, Error **errp)
+{
+ uint64_t tgt = (uintptr_t) opaque;
+ visit_type_uint64(v, name, &tgt, errp);
+}
+
+static void vfio_pci_nvlink2_get_link_speed(Object *obj, Visitor *v,
+ const char *name,
+ void *opaque, Error **errp)
+{
+ uint32_t link_speed = (uint32_t)(uintptr_t) opaque;
+ visit_type_uint32(v, name, &link_speed, errp);
+}
+
+int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp)
+{
+ int ret;
+ void *p;
+ struct vfio_region_info *nv2reg = NULL;
+ struct vfio_info_cap_header *hdr;
+ struct vfio_region_info_cap_nvlink2_ssatgt *cap;
+ VFIOQuirk *quirk;
+
+ ret = vfio_get_dev_region_info(&vdev->vbasedev,
+ VFIO_REGION_TYPE_PCI_VENDOR_TYPE |
+ PCI_VENDOR_ID_NVIDIA,
+ VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM,
+ &nv2reg);
+ if (ret) {
+ return ret;
+ }
+
+ hdr = vfio_get_region_info_cap(nv2reg, VFIO_REGION_INFO_CAP_NVLINK2_SSATGT);
+ if (!hdr) {
+ ret = -ENODEV;
+ goto free_exit;
+ }
+ cap = (void *) hdr;
+
+ p = mmap(NULL, nv2reg->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, vdev->vbasedev.fd, nv2reg->offset);
+ if (p == MAP_FAILED) {
+ ret = -errno;
+ goto free_exit;
+ }
+
+ quirk = vfio_quirk_alloc(1);
+ memory_region_init_ram_ptr(&quirk->mem[0], OBJECT(vdev), "nvlink2-mr",
+ nv2reg->size, p);
+ QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next);
+
+ object_property_add(OBJECT(vdev), "nvlink2-tgt", "uint64",
+ vfio_pci_nvlink2_get_tgt, NULL, NULL,
+ (void *) (uintptr_t) cap->tgt);
+ trace_vfio_pci_nvidia_gpu_setup_quirk(vdev->vbasedev.name, cap->tgt,
+ nv2reg->size);
+free_exit:
+ g_free(nv2reg);
+
+ return ret;
+}
+
+int vfio_pci_nvlink2_init(VFIOPCIDevice *vdev, Error **errp)
+{
+ int ret;
+ void *p;
+ struct vfio_region_info *atsdreg = NULL;
+ struct vfio_info_cap_header *hdr;
+ struct vfio_region_info_cap_nvlink2_ssatgt *captgt;
+ struct vfio_region_info_cap_nvlink2_lnkspd *capspeed;
+ VFIOQuirk *quirk;
+
+ ret = vfio_get_dev_region_info(&vdev->vbasedev,
+ VFIO_REGION_TYPE_PCI_VENDOR_TYPE |
+ PCI_VENDOR_ID_IBM,
+ VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD,
+ &atsdreg);
+ if (ret) {
+ return ret;
+ }
+
+ hdr = vfio_get_region_info_cap(atsdreg,
+ VFIO_REGION_INFO_CAP_NVLINK2_SSATGT);
+ if (!hdr) {
+ ret = -ENODEV;
+ goto free_exit;
+ }
+ captgt = (void *) hdr;
+
+ hdr = vfio_get_region_info_cap(atsdreg,
+ VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD);
+ if (!hdr) {
+ ret = -ENODEV;
+ goto free_exit;
+ }
+ capspeed = (void *) hdr;
+
+ /* Some NVLink bridges may not have assigned ATSD */
+ if (atsdreg->size) {
+ p = mmap(NULL, atsdreg->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, vdev->vbasedev.fd, atsdreg->offset);
+ if (p == MAP_FAILED) {
+ ret = -errno;
+ goto free_exit;
+ }
+
+ quirk = vfio_quirk_alloc(1);
+ memory_region_init_ram_device_ptr(&quirk->mem[0], OBJECT(vdev),
+ "nvlink2-atsd-mr", atsdreg->size, p);
+ QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next);
+ }
+
+ object_property_add(OBJECT(vdev), "nvlink2-tgt", "uint64",
+ vfio_pci_nvlink2_get_tgt, NULL, NULL,
+ (void *) (uintptr_t) captgt->tgt);
+ trace_vfio_pci_nvlink2_setup_quirk_ssatgt(vdev->vbasedev.name, captgt->tgt,
+ atsdreg->size);
+
+ object_property_add(OBJECT(vdev), "nvlink2-link-speed", "uint32",
+ vfio_pci_nvlink2_get_link_speed, NULL, NULL,
+ (void *) (uintptr_t) capspeed->link_speed);
+ trace_vfio_pci_nvlink2_setup_quirk_lnkspd(vdev->vbasedev.name,
+ capspeed->link_speed);
+free_exit:
+ g_free(atsdreg);
+
+ return ret;
+}
+
+/*
+ * The VMD endpoint provides a real PCIe domain to the guest and the guest
+ * kernel performs enumeration of the VMD sub-device domain. Guest transactions
+ * to VMD sub-devices go through MMU translation from guest addresses to
+ * physical addresses. When MMIO goes to an endpoint after being translated to
+ * physical addresses, the bridge rejects the transaction because the window
+ * has been programmed with guest addresses.
+ *
+ * VMD can use the Host Physical Address in order to correctly program the
+ * bridge windows in its PCIe domain. VMD device 28C0 has HPA shadow registers
+ * located at offset 0x2000 in MEMBAR2 (BAR 4). This quirk provides the HPA
+ * shadow registers in a vendor-specific capability register for devices
+ * without native support. The position of 0xE8-0xFF is in the reserved range
+ * of the VMD device capability space following the Power Management
+ * Capability.
+ */
+#define VMD_SHADOW_CAP_VER 1
+#define VMD_SHADOW_CAP_LEN 24
+static int vfio_add_vmd_shadow_cap(VFIOPCIDevice *vdev, Error **errp)
+{
+ uint8_t membar_phys[16];
+ int ret, pos = 0xE8;
+
+ if (!(vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, 0x201D) ||
+ vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, 0x467F) ||
+ vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, 0x4C3D) ||
+ vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, 0x9A0B))) {
+ return 0;
+ }
+
+ ret = pread(vdev->vbasedev.fd, membar_phys, 16,
+ vdev->config_offset + PCI_BASE_ADDRESS_2);
+ if (ret != 16) {
+ error_report("VMD %s cannot read MEMBARs (%d)",
+ vdev->vbasedev.name, ret);
+ return -EFAULT;
+ }
+
+ ret = pci_add_capability(&vdev->pdev, PCI_CAP_ID_VNDR, pos,
+ VMD_SHADOW_CAP_LEN, errp);
+ if (ret < 0) {
+ error_prepend(errp, "Failed to add VMD MEMBAR Shadow cap: ");
+ return ret;
+ }
+
+ memset(vdev->emulated_config_bits + pos, 0xFF, VMD_SHADOW_CAP_LEN);
+ pos += PCI_CAP_FLAGS;
+ pci_set_byte(vdev->pdev.config + pos++, VMD_SHADOW_CAP_LEN);
+ pci_set_byte(vdev->pdev.config + pos++, VMD_SHADOW_CAP_VER);
+ pci_set_long(vdev->pdev.config + pos, 0x53484457); /* SHDW */
+ memcpy(vdev->pdev.config + pos + 4, membar_phys, 16);
+
+ return 0;
+}
+
+int vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp)
+{
+ int ret;
+
+ ret = vfio_add_nv_gpudirect_cap(vdev, errp);
+ if (ret) {
+ return ret;
+ }
+
+ ret = vfio_add_vmd_shadow_cap(vdev, errp);
+ if (ret) {
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
new file mode 100644
index 000000000..7b45353ce
--- /dev/null
+++ b/hw/vfio/pci.c
@@ -0,0 +1,3328 @@
+/*
+ * vfio based device assignment support
+ *
+ * Copyright Red Hat, Inc. 2012
+ *
+ * Authors:
+ * Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Based on qemu-kvm device-assignment:
+ * Adapted for KVM by Qumranet.
+ * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
+ * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
+ * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
+ * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
+ * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
+ */
+
+#include "qemu/osdep.h"
+#include <linux/vfio.h>
+#include <sys/ioctl.h>
+
+#include "hw/hw.h"
+#include "hw/pci/msi.h"
+#include "hw/pci/msix.h"
+#include "hw/pci/pci_bridge.h"
+#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
+#include "migration/vmstate.h"
+#include "qapi/qmp/qdict.h"
+#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
+#include "qemu/module.h"
+#include "qemu/range.h"
+#include "qemu/units.h"
+#include "sysemu/kvm.h"
+#include "sysemu/runstate.h"
+#include "pci.h"
+#include "trace.h"
+#include "qapi/error.h"
+#include "migration/blocker.h"
+#include "migration/qemu-file.h"
+
+#define TYPE_VFIO_PCI_NOHOTPLUG "vfio-pci-nohotplug"
+
+static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
+static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
+
+/*
+ * Disabling BAR mmaping can be slow, but toggling it around INTx can
+ * also be a huge overhead. We try to get the best of both worlds by
+ * waiting until an interrupt to disable mmaps (subsequent transitions
+ * to the same state are effectively no overhead). If the interrupt has
+ * been serviced and the time gap is long enough, we re-enable mmaps for
+ * performance. This works well for things like graphics cards, which
+ * may not use their interrupt at all and are penalized to an unusable
+ * level by read/write BAR traps. Other devices, like NICs, have more
+ * regular interrupts and see much better latency by staying in non-mmap
+ * mode. We therefore set the default mmap_timeout such that a ping
+ * is just enough to keep the mmap disabled. Users can experiment with
+ * other options with the x-intx-mmap-timeout-ms parameter (a value of
+ * zero disables the timer).
+ */
+static void vfio_intx_mmap_enable(void *opaque)
+{
+ VFIOPCIDevice *vdev = opaque;
+
+ if (vdev->intx.pending) {
+ timer_mod(vdev->intx.mmap_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
+ return;
+ }
+
+ vfio_mmap_set_enabled(vdev, true);
+}
+
+static void vfio_intx_interrupt(void *opaque)
+{
+ VFIOPCIDevice *vdev = opaque;
+
+ if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
+ return;
+ }
+
+ trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin);
+
+ vdev->intx.pending = true;
+ pci_irq_assert(&vdev->pdev);
+ vfio_mmap_set_enabled(vdev, false);
+ if (vdev->intx.mmap_timeout) {
+ timer_mod(vdev->intx.mmap_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
+ }
+}
+
+static void vfio_intx_eoi(VFIODevice *vbasedev)
+{
+ VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
+
+ if (!vdev->intx.pending) {
+ return;
+ }
+
+ trace_vfio_intx_eoi(vbasedev->name);
+
+ vdev->intx.pending = false;
+ pci_irq_deassert(&vdev->pdev);
+ vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
+}
+
+static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
+{
+#ifdef CONFIG_KVM
+ int irq_fd = event_notifier_get_fd(&vdev->intx.interrupt);
+
+ if (vdev->no_kvm_intx || !kvm_irqfds_enabled() ||
+ vdev->intx.route.mode != PCI_INTX_ENABLED ||
+ !kvm_resamplefds_enabled()) {
+ return;
+ }
+
+ /* Get to a known interrupt state */
+ qemu_set_fd_handler(irq_fd, NULL, NULL, vdev);
+ vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
+ vdev->intx.pending = false;
+ pci_irq_deassert(&vdev->pdev);
+
+ /* Get an eventfd for resample/unmask */
+ if (event_notifier_init(&vdev->intx.unmask, 0)) {
+ error_setg(errp, "event_notifier_init failed eoi");
+ goto fail;
+ }
+
+ if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state,
+ &vdev->intx.interrupt,
+ &vdev->intx.unmask,
+ vdev->intx.route.irq)) {
+ error_setg_errno(errp, errno, "failed to setup resample irqfd");
+ goto fail_irqfd;
+ }
+
+ if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
+ VFIO_IRQ_SET_ACTION_UNMASK,
+ event_notifier_get_fd(&vdev->intx.unmask),
+ errp)) {
+ goto fail_vfio;
+ }
+
+ /* Let'em rip */
+ vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
+
+ vdev->intx.kvm_accel = true;
+
+ trace_vfio_intx_enable_kvm(vdev->vbasedev.name);
+
+ return;
+
+fail_vfio:
+ kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt,
+ vdev->intx.route.irq);
+fail_irqfd:
+ event_notifier_cleanup(&vdev->intx.unmask);
+fail:
+ qemu_set_fd_handler(irq_fd, vfio_intx_interrupt, NULL, vdev);
+ vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
+#endif
+}
+
+static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
+{
+#ifdef CONFIG_KVM
+ if (!vdev->intx.kvm_accel) {
+ return;
+ }
+
+ /*
+ * Get to a known state, hardware masked, QEMU ready to accept new
+ * interrupts, QEMU IRQ de-asserted.
+ */
+ vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
+ vdev->intx.pending = false;
+ pci_irq_deassert(&vdev->pdev);
+
+ /* Tell KVM to stop listening for an INTx irqfd */
+ if (kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt,
+ vdev->intx.route.irq)) {
+ error_report("vfio: Error: Failed to disable INTx irqfd: %m");
+ }
+
+ /* We only need to close the eventfd for VFIO to cleanup the kernel side */
+ event_notifier_cleanup(&vdev->intx.unmask);
+
+ /* QEMU starts listening for interrupt events. */
+ qemu_set_fd_handler(event_notifier_get_fd(&vdev->intx.interrupt),
+ vfio_intx_interrupt, NULL, vdev);
+
+ vdev->intx.kvm_accel = false;
+
+ /* If we've missed an event, let it re-fire through QEMU */
+ vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
+
+ trace_vfio_intx_disable_kvm(vdev->vbasedev.name);
+#endif
+}
+
+static void vfio_intx_update(VFIOPCIDevice *vdev, PCIINTxRoute *route)
+{
+ Error *err = NULL;
+
+ trace_vfio_intx_update(vdev->vbasedev.name,
+ vdev->intx.route.irq, route->irq);
+
+ vfio_intx_disable_kvm(vdev);
+
+ vdev->intx.route = *route;
+
+ if (route->mode != PCI_INTX_ENABLED) {
+ return;
+ }
+
+ vfio_intx_enable_kvm(vdev, &err);
+ if (err) {
+ warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
+ }
+
+ /* Re-enable the interrupt in cased we missed an EOI */
+ vfio_intx_eoi(&vdev->vbasedev);
+}
+
+static void vfio_intx_routing_notifier(PCIDevice *pdev)
+{
+ VFIOPCIDevice *vdev = VFIO_PCI(pdev);
+ PCIINTxRoute route;
+
+ if (vdev->interrupt != VFIO_INT_INTx) {
+ return;
+ }
+
+ route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
+
+ if (pci_intx_route_changed(&vdev->intx.route, &route)) {
+ vfio_intx_update(vdev, &route);
+ }
+}
+
+static void vfio_irqchip_change(Notifier *notify, void *data)
+{
+ VFIOPCIDevice *vdev = container_of(notify, VFIOPCIDevice,
+ irqchip_change_notifier);
+
+ vfio_intx_update(vdev, &vdev->intx.route);
+}
+
+static int vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp)
+{
+ uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
+ Error *err = NULL;
+ int32_t fd;
+ int ret;
+
+
+ if (!pin) {
+ return 0;
+ }
+
+ vfio_disable_interrupts(vdev);
+
+ vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
+ pci_config_set_interrupt_pin(vdev->pdev.config, pin);
+
+#ifdef CONFIG_KVM
+ /*
+ * Only conditional to avoid generating error messages on platforms
+ * where we won't actually use the result anyway.
+ */
+ if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
+ vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
+ vdev->intx.pin);
+ }
+#endif
+
+ ret = event_notifier_init(&vdev->intx.interrupt, 0);
+ if (ret) {
+ error_setg_errno(errp, -ret, "event_notifier_init failed");
+ return ret;
+ }
+ fd = event_notifier_get_fd(&vdev->intx.interrupt);
+ qemu_set_fd_handler(fd, vfio_intx_interrupt, NULL, vdev);
+
+ if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
+ VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
+ qemu_set_fd_handler(fd, NULL, NULL, vdev);
+ event_notifier_cleanup(&vdev->intx.interrupt);
+ return -errno;
+ }
+
+ vfio_intx_enable_kvm(vdev, &err);
+ if (err) {
+ warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
+ }
+
+ vdev->interrupt = VFIO_INT_INTx;
+
+ trace_vfio_intx_enable(vdev->vbasedev.name);
+ return 0;
+}
+
+static void vfio_intx_disable(VFIOPCIDevice *vdev)
+{
+ int fd;
+
+ timer_del(vdev->intx.mmap_timer);
+ vfio_intx_disable_kvm(vdev);
+ vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
+ vdev->intx.pending = false;
+ pci_irq_deassert(&vdev->pdev);
+ vfio_mmap_set_enabled(vdev, true);
+
+ fd = event_notifier_get_fd(&vdev->intx.interrupt);
+ qemu_set_fd_handler(fd, NULL, NULL, vdev);
+ event_notifier_cleanup(&vdev->intx.interrupt);
+
+ vdev->interrupt = VFIO_INT_NONE;
+
+ trace_vfio_intx_disable(vdev->vbasedev.name);
+}
+
+/*
+ * MSI/X
+ */
+static void vfio_msi_interrupt(void *opaque)
+{
+ VFIOMSIVector *vector = opaque;
+ VFIOPCIDevice *vdev = vector->vdev;
+ MSIMessage (*get_msg)(PCIDevice *dev, unsigned vector);
+ void (*notify)(PCIDevice *dev, unsigned vector);
+ MSIMessage msg;
+ int nr = vector - vdev->msi_vectors;
+
+ if (!event_notifier_test_and_clear(&vector->interrupt)) {
+ return;
+ }
+
+ if (vdev->interrupt == VFIO_INT_MSIX) {
+ get_msg = msix_get_message;
+ notify = msix_notify;
+
+ /* A masked vector firing needs to use the PBA, enable it */
+ if (msix_is_masked(&vdev->pdev, nr)) {
+ set_bit(nr, vdev->msix->pending);
+ memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, true);
+ trace_vfio_msix_pba_enable(vdev->vbasedev.name);
+ }
+ } else if (vdev->interrupt == VFIO_INT_MSI) {
+ get_msg = msi_get_message;
+ notify = msi_notify;
+ } else {
+ abort();
+ }
+
+ msg = get_msg(&vdev->pdev, nr);
+ trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data);
+ notify(&vdev->pdev, nr);
+}
+
+static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
+{
+ struct vfio_irq_set *irq_set;
+ int ret = 0, i, argsz;
+ int32_t *fds;
+
+ argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));
+
+ irq_set = g_malloc0(argsz);
+ irq_set->argsz = argsz;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
+ irq_set->start = 0;
+ irq_set->count = vdev->nr_vectors;
+ fds = (int32_t *)&irq_set->data;
+
+ for (i = 0; i < vdev->nr_vectors; i++) {
+ int fd = -1;
+
+ /*
+ * MSI vs MSI-X - The guest has direct access to MSI mask and pending
+ * bits, therefore we always use the KVM signaling path when setup.
+ * MSI-X mask and pending bits are emulated, so we want to use the
+ * KVM signaling path only when configured and unmasked.
+ */
+ if (vdev->msi_vectors[i].use) {
+ if (vdev->msi_vectors[i].virq < 0 ||
+ (msix && msix_is_masked(&vdev->pdev, i))) {
+ fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
+ } else {
+ fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt);
+ }
+ }
+
+ fds[i] = fd;
+ }
+
+ ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
+
+ g_free(irq_set);
+
+ return ret;
+}
+
+static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
+ int vector_n, bool msix)
+{
+ int virq;
+
+ if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi)) {
+ return;
+ }
+
+ if (event_notifier_init(&vector->kvm_interrupt, 0)) {
+ return;
+ }
+
+ virq = kvm_irqchip_add_msi_route(kvm_state, vector_n, &vdev->pdev);
+ if (virq < 0) {
+ event_notifier_cleanup(&vector->kvm_interrupt);
+ return;
+ }
+
+ if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
+ NULL, virq) < 0) {
+ kvm_irqchip_release_virq(kvm_state, virq);
+ event_notifier_cleanup(&vector->kvm_interrupt);
+ return;
+ }
+
+ vector->virq = virq;
+}
+
+static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector)
+{
+ kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
+ vector->virq);
+ kvm_irqchip_release_virq(kvm_state, vector->virq);
+ vector->virq = -1;
+ event_notifier_cleanup(&vector->kvm_interrupt);
+}
+
+static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg,
+ PCIDevice *pdev)
+{
+ kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg, pdev);
+ kvm_irqchip_commit_routes(kvm_state);
+}
+
+static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
+ MSIMessage *msg, IOHandler *handler)
+{
+ VFIOPCIDevice *vdev = VFIO_PCI(pdev);
+ VFIOMSIVector *vector;
+ int ret;
+
+ trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr);
+
+ vector = &vdev->msi_vectors[nr];
+
+ if (!vector->use) {
+ vector->vdev = vdev;
+ vector->virq = -1;
+ if (event_notifier_init(&vector->interrupt, 0)) {
+ error_report("vfio: Error: event_notifier_init failed");
+ }
+ vector->use = true;
+ msix_vector_use(pdev, nr);
+ }
+
+ qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
+ handler, NULL, vector);
+
+ /*
+ * Attempt to enable route through KVM irqchip,
+ * default to userspace handling if unavailable.
+ */
+ if (vector->virq >= 0) {
+ if (!msg) {
+ vfio_remove_kvm_msi_virq(vector);
+ } else {
+ vfio_update_kvm_msi_virq(vector, *msg, pdev);
+ }
+ } else {
+ if (msg) {
+ vfio_add_kvm_msi_virq(vdev, vector, nr, true);
+ }
+ }
+
+ /*
+ * We don't want to have the host allocate all possible MSI vectors
+ * for a device if they're not in use, so we shutdown and incrementally
+ * increase them as needed.
+ */
+ if (vdev->nr_vectors < nr + 1) {
+ vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
+ vdev->nr_vectors = nr + 1;
+ ret = vfio_enable_vectors(vdev, true);
+ if (ret) {
+ error_report("vfio: failed to enable vectors, %d", ret);
+ }
+ } else {
+ Error *err = NULL;
+ int32_t fd;
+
+ if (vector->virq >= 0) {
+ fd = event_notifier_get_fd(&vector->kvm_interrupt);
+ } else {
+ fd = event_notifier_get_fd(&vector->interrupt);
+ }
+
+ if (vfio_set_irq_signaling(&vdev->vbasedev,
+ VFIO_PCI_MSIX_IRQ_INDEX, nr,
+ VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
+ error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
+ }
+ }
+
+ /* Disable PBA emulation when nothing more is pending. */
+ clear_bit(nr, vdev->msix->pending);
+ if (find_first_bit(vdev->msix->pending,
+ vdev->nr_vectors) == vdev->nr_vectors) {
+ memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
+ trace_vfio_msix_pba_disable(vdev->vbasedev.name);
+ }
+
+ return 0;
+}
+
+static int vfio_msix_vector_use(PCIDevice *pdev,
+ unsigned int nr, MSIMessage msg)
+{
+ return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
+}
+
+static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
+{
+ VFIOPCIDevice *vdev = VFIO_PCI(pdev);
+ VFIOMSIVector *vector = &vdev->msi_vectors[nr];
+
+ trace_vfio_msix_vector_release(vdev->vbasedev.name, nr);
+
+ /*
+ * There are still old guests that mask and unmask vectors on every
+ * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of
+ * the KVM setup in place, simply switch VFIO to use the non-bypass
+ * eventfd. We'll then fire the interrupt through QEMU and the MSI-X
+ * core will mask the interrupt and set pending bits, allowing it to
+ * be re-asserted on unmask. Nothing to do if already using QEMU mode.
+ */
+ if (vector->virq >= 0) {
+ int32_t fd = event_notifier_get_fd(&vector->interrupt);
+ Error *err = NULL;
+
+ if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX, nr,
+ VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
+ error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
+ }
+ }
+}
+
+static void vfio_msix_enable(VFIOPCIDevice *vdev)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ unsigned int nr, max_vec = 0;
+
+ vfio_disable_interrupts(vdev);
+
+ vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries);
+
+ vdev->interrupt = VFIO_INT_MSIX;
+
+ /*
+ * Some communication channels between VF & PF or PF & fw rely on the
+ * physical state of the device and expect that enabling MSI-X from the
+ * guest enables the same on the host. When our guest is Linux, the
+ * guest driver call to pci_enable_msix() sets the enabling bit in the
+ * MSI-X capability, but leaves the vector table masked. We therefore
+ * can't rely on a vector_use callback (from request_irq() in the guest)
+ * to switch the physical device into MSI-X mode because that may come a
+ * long time after pci_enable_msix(). This code enables vector 0 with
+ * triggering to userspace, then immediately release the vector, leaving
+ * the physical device with no vectors enabled, but MSI-X enabled, just
+ * like the guest view.
+ * If there are already unmasked vectors (in migration resume phase and
+ * some guest startups) which will be enabled soon, we can allocate all
+ * of them here to avoid inefficiently disabling and enabling vectors
+ * repeatedly later.
+ */
+ if (!pdev->msix_function_masked) {
+ for (nr = 0; nr < msix_nr_vectors_allocated(pdev); nr++) {
+ if (!msix_is_masked(pdev, nr)) {
+ max_vec = nr;
+ }
+ }
+ }
+ vfio_msix_vector_do_use(pdev, max_vec, NULL, NULL);
+ vfio_msix_vector_release(pdev, max_vec);
+
+ if (msix_set_vector_notifiers(pdev, vfio_msix_vector_use,
+ vfio_msix_vector_release, NULL)) {
+ error_report("vfio: msix_set_vector_notifiers failed");
+ }
+
+ trace_vfio_msix_enable(vdev->vbasedev.name);
+}
+
+static void vfio_msi_enable(VFIOPCIDevice *vdev)
+{
+ int ret, i;
+
+ vfio_disable_interrupts(vdev);
+
+ vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
+retry:
+ vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors);
+
+ for (i = 0; i < vdev->nr_vectors; i++) {
+ VFIOMSIVector *vector = &vdev->msi_vectors[i];
+
+ vector->vdev = vdev;
+ vector->virq = -1;
+ vector->use = true;
+
+ if (event_notifier_init(&vector->interrupt, 0)) {
+ error_report("vfio: Error: event_notifier_init failed");
+ }
+
+ qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
+ vfio_msi_interrupt, NULL, vector);
+
+ /*
+ * Attempt to enable route through KVM irqchip,
+ * default to userspace handling if unavailable.
+ */
+ vfio_add_kvm_msi_virq(vdev, vector, i, false);
+ }
+
+ /* Set interrupt type prior to possible interrupts */
+ vdev->interrupt = VFIO_INT_MSI;
+
+ ret = vfio_enable_vectors(vdev, false);
+ if (ret) {
+ if (ret < 0) {
+ error_report("vfio: Error: Failed to setup MSI fds: %m");
+ } else if (ret != vdev->nr_vectors) {
+ error_report("vfio: Error: Failed to enable %d "
+ "MSI vectors, retry with %d", vdev->nr_vectors, ret);
+ }
+
+ for (i = 0; i < vdev->nr_vectors; i++) {
+ VFIOMSIVector *vector = &vdev->msi_vectors[i];
+ if (vector->virq >= 0) {
+ vfio_remove_kvm_msi_virq(vector);
+ }
+ qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
+ NULL, NULL, NULL);
+ event_notifier_cleanup(&vector->interrupt);
+ }
+
+ g_free(vdev->msi_vectors);
+ vdev->msi_vectors = NULL;
+
+ if (ret > 0 && ret != vdev->nr_vectors) {
+ vdev->nr_vectors = ret;
+ goto retry;
+ }
+ vdev->nr_vectors = 0;
+
+ /*
+ * Failing to setup MSI doesn't really fall within any specification.
+ * Let's try leaving interrupts disabled and hope the guest figures
+ * out to fall back to INTx for this device.
+ */
+ error_report("vfio: Error: Failed to enable MSI");
+ vdev->interrupt = VFIO_INT_NONE;
+
+ return;
+ }
+
+ trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors);
+}
+
+static void vfio_msi_disable_common(VFIOPCIDevice *vdev)
+{
+ Error *err = NULL;
+ int i;
+
+ for (i = 0; i < vdev->nr_vectors; i++) {
+ VFIOMSIVector *vector = &vdev->msi_vectors[i];
+ if (vdev->msi_vectors[i].use) {
+ if (vector->virq >= 0) {
+ vfio_remove_kvm_msi_virq(vector);
+ }
+ qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
+ NULL, NULL, NULL);
+ event_notifier_cleanup(&vector->interrupt);
+ }
+ }
+
+ g_free(vdev->msi_vectors);
+ vdev->msi_vectors = NULL;
+ vdev->nr_vectors = 0;
+ vdev->interrupt = VFIO_INT_NONE;
+
+ vfio_intx_enable(vdev, &err);
+ if (err) {
+ error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
+ }
+}
+
+static void vfio_msix_disable(VFIOPCIDevice *vdev)
+{
+ int i;
+
+ msix_unset_vector_notifiers(&vdev->pdev);
+
+ /*
+ * MSI-X will only release vectors if MSI-X is still enabled on the
+ * device, check through the rest and release it ourselves if necessary.
+ */
+ for (i = 0; i < vdev->nr_vectors; i++) {
+ if (vdev->msi_vectors[i].use) {
+ vfio_msix_vector_release(&vdev->pdev, i);
+ msix_vector_unuse(&vdev->pdev, i);
+ }
+ }
+
+ if (vdev->nr_vectors) {
+ vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
+ }
+
+ vfio_msi_disable_common(vdev);
+
+ memset(vdev->msix->pending, 0,
+ BITS_TO_LONGS(vdev->msix->entries) * sizeof(unsigned long));
+
+ trace_vfio_msix_disable(vdev->vbasedev.name);
+}
+
+static void vfio_msi_disable(VFIOPCIDevice *vdev)
+{
+ vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX);
+ vfio_msi_disable_common(vdev);
+
+ trace_vfio_msi_disable(vdev->vbasedev.name);
+}
+
+static void vfio_update_msi(VFIOPCIDevice *vdev)
+{
+ int i;
+
+ for (i = 0; i < vdev->nr_vectors; i++) {
+ VFIOMSIVector *vector = &vdev->msi_vectors[i];
+ MSIMessage msg;
+
+ if (!vector->use || vector->virq < 0) {
+ continue;
+ }
+
+ msg = msi_get_message(&vdev->pdev, i);
+ vfio_update_kvm_msi_virq(vector, msg, &vdev->pdev);
+ }
+}
+
+static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
+{
+ struct vfio_region_info *reg_info;
+ uint64_t size;
+ off_t off = 0;
+ ssize_t bytes;
+
+ if (vfio_get_region_info(&vdev->vbasedev,
+ VFIO_PCI_ROM_REGION_INDEX, &reg_info)) {
+ error_report("vfio: Error getting ROM info: %m");
+ return;
+ }
+
+ trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info->size,
+ (unsigned long)reg_info->offset,
+ (unsigned long)reg_info->flags);
+
+ vdev->rom_size = size = reg_info->size;
+ vdev->rom_offset = reg_info->offset;
+
+ g_free(reg_info);
+
+ if (!vdev->rom_size) {
+ vdev->rom_read_failed = true;
+ error_report("vfio-pci: Cannot read device rom at "
+ "%s", vdev->vbasedev.name);
+ error_printf("Device option ROM contents are probably invalid "
+ "(check dmesg).\nSkip option ROM probe with rombar=0, "
+ "or load from file with romfile=\n");
+ return;
+ }
+
+ vdev->rom = g_malloc(size);
+ memset(vdev->rom, 0xff, size);
+
+ while (size) {
+ bytes = pread(vdev->vbasedev.fd, vdev->rom + off,
+ size, vdev->rom_offset + off);
+ if (bytes == 0) {
+ break;
+ } else if (bytes > 0) {
+ off += bytes;
+ size -= bytes;
+ } else {
+ if (errno == EINTR || errno == EAGAIN) {
+ continue;
+ }
+ error_report("vfio: Error reading device ROM: %m");
+ break;
+ }
+ }
+
+ /*
+ * Test the ROM signature against our device, if the vendor is correct
+ * but the device ID doesn't match, store the correct device ID and
+ * recompute the checksum. Intel IGD devices need this and are known
+ * to have bogus checksums so we can't simply adjust the checksum.
+ */
+ if (pci_get_word(vdev->rom) == 0xaa55 &&
+ pci_get_word(vdev->rom + 0x18) + 8 < vdev->rom_size &&
+ !memcmp(vdev->rom + pci_get_word(vdev->rom + 0x18), "PCIR", 4)) {
+ uint16_t vid, did;
+
+ vid = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 4);
+ did = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6);
+
+ if (vid == vdev->vendor_id && did != vdev->device_id) {
+ int i;
+ uint8_t csum, *data = vdev->rom;
+
+ pci_set_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6,
+ vdev->device_id);
+ data[6] = 0;
+
+ for (csum = 0, i = 0; i < vdev->rom_size; i++) {
+ csum += data[i];
+ }
+
+ data[6] = -csum;
+ }
+ }
+}
+
+static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
+{
+ VFIOPCIDevice *vdev = opaque;
+ union {
+ uint8_t byte;
+ uint16_t word;
+ uint32_t dword;
+ uint64_t qword;
+ } val;
+ uint64_t data = 0;
+
+ /* Load the ROM lazily when the guest tries to read it */
+ if (unlikely(!vdev->rom && !vdev->rom_read_failed)) {
+ vfio_pci_load_rom(vdev);
+ }
+
+ memcpy(&val, vdev->rom + addr,
+ (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0);
+
+ switch (size) {
+ case 1:
+ data = val.byte;
+ break;
+ case 2:
+ data = le16_to_cpu(val.word);
+ break;
+ case 4:
+ data = le32_to_cpu(val.dword);
+ break;
+ default:
+ hw_error("vfio: unsupported read size, %d bytes\n", size);
+ break;
+ }
+
+ trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data);
+
+ return data;
+}
+
+static void vfio_rom_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+}
+
+static const MemoryRegionOps vfio_rom_ops = {
+ .read = vfio_rom_read,
+ .write = vfio_rom_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
+{
+ uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK);
+ off_t offset = vdev->config_offset + PCI_ROM_ADDRESS;
+ DeviceState *dev = DEVICE(vdev);
+ char *name;
+ int fd = vdev->vbasedev.fd;
+
+ if (vdev->pdev.romfile || !vdev->pdev.rom_bar) {
+ /* Since pci handles romfile, just print a message and return */
+ if (vfio_opt_rom_in_denylist(vdev) && vdev->pdev.romfile) {
+ warn_report("Device at %s is known to cause system instability"
+ " issues during option rom execution",
+ vdev->vbasedev.name);
+ error_printf("Proceeding anyway since user specified romfile\n");
+ }
+ return;
+ }
+
+ /*
+ * Use the same size ROM BAR as the physical device. The contents
+ * will get filled in later when the guest tries to read it.
+ */
+ if (pread(fd, &orig, 4, offset) != 4 ||
+ pwrite(fd, &size, 4, offset) != 4 ||
+ pread(fd, &size, 4, offset) != 4 ||
+ pwrite(fd, &orig, 4, offset) != 4) {
+ error_report("%s(%s) failed: %m", __func__, vdev->vbasedev.name);
+ return;
+ }
+
+ size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1;
+
+ if (!size) {
+ return;
+ }
+
+ if (vfio_opt_rom_in_denylist(vdev)) {
+ if (dev->opts && qdict_haskey(dev->opts, "rombar")) {
+ warn_report("Device at %s is known to cause system instability"
+ " issues during option rom execution",
+ vdev->vbasedev.name);
+ error_printf("Proceeding anyway since user specified"
+ " non zero value for rombar\n");
+ } else {
+ warn_report("Rom loading for device at %s has been disabled"
+ " due to system instability issues",
+ vdev->vbasedev.name);
+ error_printf("Specify rombar=1 or romfile to force\n");
+ return;
+ }
+ }
+
+ trace_vfio_pci_size_rom(vdev->vbasedev.name, size);
+
+ name = g_strdup_printf("vfio[%s].rom", vdev->vbasedev.name);
+
+ memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev),
+ &vfio_rom_ops, vdev, name, size);
+ g_free(name);
+
+ pci_register_bar(&vdev->pdev, PCI_ROM_SLOT,
+ PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom);
+
+ vdev->rom_read_failed = false;
+}
+
+void vfio_vga_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIOVGARegion *region = opaque;
+ VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
+ union {
+ uint8_t byte;
+ uint16_t word;
+ uint32_t dword;
+ uint64_t qword;
+ } buf;
+ off_t offset = vga->fd_offset + region->offset + addr;
+
+ switch (size) {
+ case 1:
+ buf.byte = data;
+ break;
+ case 2:
+ buf.word = cpu_to_le16(data);
+ break;
+ case 4:
+ buf.dword = cpu_to_le32(data);
+ break;
+ default:
+ hw_error("vfio: unsupported write size, %d bytes", size);
+ break;
+ }
+
+ if (pwrite(vga->fd, &buf, size, offset) != size) {
+ error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
+ __func__, region->offset + addr, data, size);
+ }
+
+ trace_vfio_vga_write(region->offset + addr, data, size);
+}
+
+uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
+{
+ VFIOVGARegion *region = opaque;
+ VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
+ union {
+ uint8_t byte;
+ uint16_t word;
+ uint32_t dword;
+ uint64_t qword;
+ } buf;
+ uint64_t data = 0;
+ off_t offset = vga->fd_offset + region->offset + addr;
+
+ if (pread(vga->fd, &buf, size, offset) != size) {
+ error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
+ __func__, region->offset + addr, size);
+ return (uint64_t)-1;
+ }
+
+ switch (size) {
+ case 1:
+ data = buf.byte;
+ break;
+ case 2:
+ data = le16_to_cpu(buf.word);
+ break;
+ case 4:
+ data = le32_to_cpu(buf.dword);
+ break;
+ default:
+ hw_error("vfio: unsupported read size, %d bytes", size);
+ break;
+ }
+
+ trace_vfio_vga_read(region->offset + addr, size, data);
+
+ return data;
+}
+
+static const MemoryRegionOps vfio_vga_ops = {
+ .read = vfio_vga_read,
+ .write = vfio_vga_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+/*
+ * Expand memory region of sub-page(size < PAGE_SIZE) MMIO BAR to page
+ * size if the BAR is in an exclusive page in host so that we could map
+ * this BAR to guest. But this sub-page BAR may not occupy an exclusive
+ * page in guest. So we should set the priority of the expanded memory
+ * region to zero in case of overlap with BARs which share the same page
+ * with the sub-page BAR in guest. Besides, we should also recover the
+ * size of this sub-page BAR when its base address is changed in guest
+ * and not page aligned any more.
+ */
+static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar)
+{
+ VFIOPCIDevice *vdev = VFIO_PCI(pdev);
+ VFIORegion *region = &vdev->bars[bar].region;
+ MemoryRegion *mmap_mr, *region_mr, *base_mr;
+ PCIIORegion *r;
+ pcibus_t bar_addr;
+ uint64_t size = region->size;
+
+ /* Make sure that the whole region is allowed to be mmapped */
+ if (region->nr_mmaps != 1 || !region->mmaps[0].mmap ||
+ region->mmaps[0].size != region->size) {
+ return;
+ }
+
+ r = &pdev->io_regions[bar];
+ bar_addr = r->addr;
+ base_mr = vdev->bars[bar].mr;
+ region_mr = region->mem;
+ mmap_mr = &region->mmaps[0].mem;
+
+ /* If BAR is mapped and page aligned, update to fill PAGE_SIZE */
+ if (bar_addr != PCI_BAR_UNMAPPED &&
+ !(bar_addr & ~qemu_real_host_page_mask)) {
+ size = qemu_real_host_page_size;
+ }
+
+ memory_region_transaction_begin();
+
+ if (vdev->bars[bar].size < size) {
+ memory_region_set_size(base_mr, size);
+ }
+ memory_region_set_size(region_mr, size);
+ memory_region_set_size(mmap_mr, size);
+ if (size != vdev->bars[bar].size && memory_region_is_mapped(base_mr)) {
+ memory_region_del_subregion(r->address_space, base_mr);
+ memory_region_add_subregion_overlap(r->address_space,
+ bar_addr, base_mr, 0);
+ }
+
+ memory_region_transaction_commit();
+}
+
+/*
+ * PCI config space
+ */
+uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
+{
+ VFIOPCIDevice *vdev = VFIO_PCI(pdev);
+ uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
+
+ memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
+ emu_bits = le32_to_cpu(emu_bits);
+
+ if (emu_bits) {
+ emu_val = pci_default_read_config(pdev, addr, len);
+ }
+
+ if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
+ ssize_t ret;
+
+ ret = pread(vdev->vbasedev.fd, &phys_val, len,
+ vdev->config_offset + addr);
+ if (ret != len) {
+ error_report("%s(%s, 0x%x, 0x%x) failed: %m",
+ __func__, vdev->vbasedev.name, addr, len);
+ return -errno;
+ }
+ phys_val = le32_to_cpu(phys_val);
+ }
+
+ val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
+
+ trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val);
+
+ return val;
+}
+
+void vfio_pci_write_config(PCIDevice *pdev,
+ uint32_t addr, uint32_t val, int len)
+{
+ VFIOPCIDevice *vdev = VFIO_PCI(pdev);
+ uint32_t val_le = cpu_to_le32(val);
+
+ trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len);
+
+ /* Write everything to VFIO, let it filter out what we can't write */
+ if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr)
+ != len) {
+ error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m",
+ __func__, vdev->vbasedev.name, addr, val, len);
+ }
+
+ /* MSI/MSI-X Enabling/Disabling */
+ if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
+ ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
+ int is_enabled, was_enabled = msi_enabled(pdev);
+
+ pci_default_write_config(pdev, addr, val, len);
+
+ is_enabled = msi_enabled(pdev);
+
+ if (!was_enabled) {
+ if (is_enabled) {
+ vfio_msi_enable(vdev);
+ }
+ } else {
+ if (!is_enabled) {
+ vfio_msi_disable(vdev);
+ } else {
+ vfio_update_msi(vdev);
+ }
+ }
+ } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
+ ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
+ int is_enabled, was_enabled = msix_enabled(pdev);
+
+ pci_default_write_config(pdev, addr, val, len);
+
+ is_enabled = msix_enabled(pdev);
+
+ if (!was_enabled && is_enabled) {
+ vfio_msix_enable(vdev);
+ } else if (was_enabled && !is_enabled) {
+ vfio_msix_disable(vdev);
+ }
+ } else if (ranges_overlap(addr, len, PCI_BASE_ADDRESS_0, 24) ||
+ range_covers_byte(addr, len, PCI_COMMAND)) {
+ pcibus_t old_addr[PCI_NUM_REGIONS - 1];
+ int bar;
+
+ for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
+ old_addr[bar] = pdev->io_regions[bar].addr;
+ }
+
+ pci_default_write_config(pdev, addr, val, len);
+
+ for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
+ if (old_addr[bar] != pdev->io_regions[bar].addr &&
+ vdev->bars[bar].region.size > 0 &&
+ vdev->bars[bar].region.size < qemu_real_host_page_size) {
+ vfio_sub_page_bar_update_mapping(pdev, bar);
+ }
+ }
+ } else {
+ /* Write everything to QEMU to keep emulated bits correct */
+ pci_default_write_config(pdev, addr, val, len);
+ }
+}
+
+/*
+ * Interrupt setup
+ */
+static void vfio_disable_interrupts(VFIOPCIDevice *vdev)
+{
+ /*
+ * More complicated than it looks. Disabling MSI/X transitions the
+ * device to INTx mode (if supported). Therefore we need to first
+ * disable MSI/X and then cleanup by disabling INTx.
+ */
+ if (vdev->interrupt == VFIO_INT_MSIX) {
+ vfio_msix_disable(vdev);
+ } else if (vdev->interrupt == VFIO_INT_MSI) {
+ vfio_msi_disable(vdev);
+ }
+
+ if (vdev->interrupt == VFIO_INT_INTx) {
+ vfio_intx_disable(vdev);
+ }
+}
+
+static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
+{
+ uint16_t ctrl;
+ bool msi_64bit, msi_maskbit;
+ int ret, entries;
+ Error *err = NULL;
+
+ if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl),
+ vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
+ error_setg_errno(errp, errno, "failed reading MSI PCI_CAP_FLAGS");
+ return -errno;
+ }
+ ctrl = le16_to_cpu(ctrl);
+
+ msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
+ msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
+ entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);
+
+ trace_vfio_msi_setup(vdev->vbasedev.name, pos);
+
+ ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit, &err);
+ if (ret < 0) {
+ if (ret == -ENOTSUP) {
+ return 0;
+ }
+ error_propagate_prepend(errp, err, "msi_init failed: ");
+ return ret;
+ }
+ vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
+
+ return 0;
+}
+
+static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev)
+{
+ off_t start, end;
+ VFIORegion *region = &vdev->bars[vdev->msix->table_bar].region;
+
+ /*
+ * If the host driver allows mapping of a MSIX data, we are going to
+ * do map the entire BAR and emulate MSIX table on top of that.
+ */
+ if (vfio_has_region_cap(&vdev->vbasedev, region->nr,
+ VFIO_REGION_INFO_CAP_MSIX_MAPPABLE)) {
+ return;
+ }
+
+ /*
+ * We expect to find a single mmap covering the whole BAR, anything else
+ * means it's either unsupported or already setup.
+ */
+ if (region->nr_mmaps != 1 || region->mmaps[0].offset ||
+ region->size != region->mmaps[0].size) {
+ return;
+ }
+
+ /* MSI-X table start and end aligned to host page size */
+ start = vdev->msix->table_offset & qemu_real_host_page_mask;
+ end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset +
+ (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));
+
+ /*
+ * Does the MSI-X table cover the beginning of the BAR? The whole BAR?
+ * NB - Host page size is necessarily a power of two and so is the PCI
+ * BAR (not counting EA yet), therefore if we have host page aligned
+ * @start and @end, then any remainder of the BAR before or after those
+ * must be at least host page sized and therefore mmap'able.
+ */
+ if (!start) {
+ if (end >= region->size) {
+ region->nr_mmaps = 0;
+ g_free(region->mmaps);
+ region->mmaps = NULL;
+ trace_vfio_msix_fixup(vdev->vbasedev.name,
+ vdev->msix->table_bar, 0, 0);
+ } else {
+ region->mmaps[0].offset = end;
+ region->mmaps[0].size = region->size - end;
+ trace_vfio_msix_fixup(vdev->vbasedev.name,
+ vdev->msix->table_bar, region->mmaps[0].offset,
+ region->mmaps[0].offset + region->mmaps[0].size);
+ }
+
+ /* Maybe it's aligned at the end of the BAR */
+ } else if (end >= region->size) {
+ region->mmaps[0].size = start;
+ trace_vfio_msix_fixup(vdev->vbasedev.name,
+ vdev->msix->table_bar, region->mmaps[0].offset,
+ region->mmaps[0].offset + region->mmaps[0].size);
+
+ /* Otherwise it must split the BAR */
+ } else {
+ region->nr_mmaps = 2;
+ region->mmaps = g_renew(VFIOMmap, region->mmaps, 2);
+
+ memcpy(&region->mmaps[1], &region->mmaps[0], sizeof(VFIOMmap));
+
+ region->mmaps[0].size = start;
+ trace_vfio_msix_fixup(vdev->vbasedev.name,
+ vdev->msix->table_bar, region->mmaps[0].offset,
+ region->mmaps[0].offset + region->mmaps[0].size);
+
+ region->mmaps[1].offset = end;
+ region->mmaps[1].size = region->size - end;
+ trace_vfio_msix_fixup(vdev->vbasedev.name,
+ vdev->msix->table_bar, region->mmaps[1].offset,
+ region->mmaps[1].offset + region->mmaps[1].size);
+ }
+}
+
+static void vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp)
+{
+ int target_bar = -1;
+ size_t msix_sz;
+
+ if (!vdev->msix || vdev->msix_relo == OFF_AUTOPCIBAR_OFF) {
+ return;
+ }
+
+ /* The actual minimum size of MSI-X structures */
+ msix_sz = (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE) +
+ (QEMU_ALIGN_UP(vdev->msix->entries, 64) / 8);
+ /* Round up to host pages, we don't want to share a page */
+ msix_sz = REAL_HOST_PAGE_ALIGN(msix_sz);
+ /* PCI BARs must be a power of 2 */
+ msix_sz = pow2ceil(msix_sz);
+
+ if (vdev->msix_relo == OFF_AUTOPCIBAR_AUTO) {
+ /*
+ * TODO: Lookup table for known devices.
+ *
+ * Logically we might use an algorithm here to select the BAR adding
+ * the least additional MMIO space, but we cannot programmatically
+ * predict the driver dependency on BAR ordering or sizing, therefore
+ * 'auto' becomes a lookup for combinations reported to work.
+ */
+ if (target_bar < 0) {
+ error_setg(errp, "No automatic MSI-X relocation available for "
+ "device %04x:%04x", vdev->vendor_id, vdev->device_id);
+ return;
+ }
+ } else {
+ target_bar = (int)(vdev->msix_relo - OFF_AUTOPCIBAR_BAR0);
+ }
+
+ /* I/O port BARs cannot host MSI-X structures */
+ if (vdev->bars[target_bar].ioport) {
+ error_setg(errp, "Invalid MSI-X relocation BAR %d, "
+ "I/O port BAR", target_bar);
+ return;
+ }
+
+ /* Cannot use a BAR in the "shadow" of a 64-bit BAR */
+ if (!vdev->bars[target_bar].size &&
+ target_bar > 0 && vdev->bars[target_bar - 1].mem64) {
+ error_setg(errp, "Invalid MSI-X relocation BAR %d, "
+ "consumed by 64-bit BAR %d", target_bar, target_bar - 1);
+ return;
+ }
+
+ /* 2GB max size for 32-bit BARs, cannot double if already > 1G */
+ if (vdev->bars[target_bar].size > 1 * GiB &&
+ !vdev->bars[target_bar].mem64) {
+ error_setg(errp, "Invalid MSI-X relocation BAR %d, "
+ "no space to extend 32-bit BAR", target_bar);
+ return;
+ }
+
+ /*
+ * If adding a new BAR, test if we can make it 64bit. We make it
+ * prefetchable since QEMU MSI-X emulation has no read side effects
+ * and doing so makes mapping more flexible.
+ */
+ if (!vdev->bars[target_bar].size) {
+ if (target_bar < (PCI_ROM_SLOT - 1) &&
+ !vdev->bars[target_bar + 1].size) {
+ vdev->bars[target_bar].mem64 = true;
+ vdev->bars[target_bar].type = PCI_BASE_ADDRESS_MEM_TYPE_64;
+ }
+ vdev->bars[target_bar].type |= PCI_BASE_ADDRESS_MEM_PREFETCH;
+ vdev->bars[target_bar].size = msix_sz;
+ vdev->msix->table_offset = 0;
+ } else {
+ vdev->bars[target_bar].size = MAX(vdev->bars[target_bar].size * 2,
+ msix_sz * 2);
+ /*
+ * Due to above size calc, MSI-X always starts halfway into the BAR,
+ * which will always be a separate host page.
+ */
+ vdev->msix->table_offset = vdev->bars[target_bar].size / 2;
+ }
+
+ vdev->msix->table_bar = target_bar;
+ vdev->msix->pba_bar = target_bar;
+ /* Requires 8-byte alignment, but PCI_MSIX_ENTRY_SIZE guarantees that */
+ vdev->msix->pba_offset = vdev->msix->table_offset +
+ (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE);
+
+ trace_vfio_msix_relo(vdev->vbasedev.name,
+ vdev->msix->table_bar, vdev->msix->table_offset);
+}
+
+/*
+ * We don't have any control over how pci_add_capability() inserts
+ * capabilities into the chain. In order to setup MSI-X we need a
+ * MemoryRegion for the BAR. In order to setup the BAR and not
+ * attempt to mmap the MSI-X table area, which VFIO won't allow, we
+ * need to first look for where the MSI-X table lives. So we
+ * unfortunately split MSI-X setup across two functions.
+ */
+static void vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp)
+{
+ uint8_t pos;
+ uint16_t ctrl;
+ uint32_t table, pba;
+ int fd = vdev->vbasedev.fd;
+ VFIOMSIXInfo *msix;
+
+ pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
+ if (!pos) {
+ return;
+ }
+
+ if (pread(fd, &ctrl, sizeof(ctrl),
+ vdev->config_offset + pos + PCI_MSIX_FLAGS) != sizeof(ctrl)) {
+ error_setg_errno(errp, errno, "failed to read PCI MSIX FLAGS");
+ return;
+ }
+
+ if (pread(fd, &table, sizeof(table),
+ vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
+ error_setg_errno(errp, errno, "failed to read PCI MSIX TABLE");
+ return;
+ }
+
+ if (pread(fd, &pba, sizeof(pba),
+ vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
+ error_setg_errno(errp, errno, "failed to read PCI MSIX PBA");
+ return;
+ }
+
+ ctrl = le16_to_cpu(ctrl);
+ table = le32_to_cpu(table);
+ pba = le32_to_cpu(pba);
+
+ msix = g_malloc0(sizeof(*msix));
+ msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
+ msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
+ msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
+ msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
+ msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
+
+ /*
+ * Test the size of the pba_offset variable and catch if it extends outside
+ * of the specified BAR. If it is the case, we need to apply a hardware
+ * specific quirk if the device is known or we have a broken configuration.
+ */
+ if (msix->pba_offset >= vdev->bars[msix->pba_bar].region.size) {
+ /*
+ * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
+ * adapters. The T5 hardware returns an incorrect value of 0x8000 for
+ * the VF PBA offset while the BAR itself is only 8k. The correct value
+ * is 0x1000, so we hard code that here.
+ */
+ if (vdev->vendor_id == PCI_VENDOR_ID_CHELSIO &&
+ (vdev->device_id & 0xff00) == 0x5800) {
+ msix->pba_offset = 0x1000;
+ /*
+ * BAIDU KUNLUN Virtual Function devices for KUNLUN AI processor
+ * return an incorrect value of 0x460000 for the VF PBA offset while
+ * the BAR itself is only 0x10000. The correct value is 0xb400.
+ */
+ } else if (vfio_pci_is(vdev, PCI_VENDOR_ID_BAIDU,
+ PCI_DEVICE_ID_KUNLUN_VF)) {
+ msix->pba_offset = 0xb400;
+ } else if (vdev->msix_relo == OFF_AUTOPCIBAR_OFF) {
+ error_setg(errp, "hardware reports invalid configuration, "
+ "MSIX PBA outside of specified BAR");
+ g_free(msix);
+ return;
+ }
+ }
+
+ trace_vfio_msix_early_setup(vdev->vbasedev.name, pos, msix->table_bar,
+ msix->table_offset, msix->entries);
+ vdev->msix = msix;
+
+ vfio_pci_fixup_msix_region(vdev);
+
+ vfio_pci_relocate_msix(vdev, errp);
+}
+
+static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
+{
+ int ret;
+ Error *err = NULL;
+
+ vdev->msix->pending = g_malloc0(BITS_TO_LONGS(vdev->msix->entries) *
+ sizeof(unsigned long));
+ ret = msix_init(&vdev->pdev, vdev->msix->entries,
+ vdev->bars[vdev->msix->table_bar].mr,
+ vdev->msix->table_bar, vdev->msix->table_offset,
+ vdev->bars[vdev->msix->pba_bar].mr,
+ vdev->msix->pba_bar, vdev->msix->pba_offset, pos,
+ &err);
+ if (ret < 0) {
+ if (ret == -ENOTSUP) {
+ warn_report_err(err);
+ return 0;
+ }
+
+ error_propagate(errp, err);
+ return ret;
+ }
+
+ /*
+ * The PCI spec suggests that devices provide additional alignment for
+ * MSI-X structures and avoid overlapping non-MSI-X related registers.
+ * For an assigned device, this hopefully means that emulation of MSI-X
+ * structures does not affect the performance of the device. If devices
+ * fail to provide that alignment, a significant performance penalty may
+ * result, for instance Mellanox MT27500 VFs:
+ * http://www.spinics.net/lists/kvm/msg125881.html
+ *
+ * The PBA is simply not that important for such a serious regression and
+ * most drivers do not appear to look at it. The solution for this is to
+ * disable the PBA MemoryRegion unless it's being used. We disable it
+ * here and only enable it if a masked vector fires through QEMU. As the
+ * vector-use notifier is called, which occurs on unmask, we test whether
+ * PBA emulation is needed and again disable if not.
+ */
+ memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
+
+ /*
+ * The emulated machine may provide a paravirt interface for MSIX setup
+ * so it is not strictly necessary to emulate MSIX here. This becomes
+ * helpful when frequently accessed MMIO registers are located in
+ * subpages adjacent to the MSIX table but the MSIX data containing page
+ * cannot be mapped because of a host page size bigger than the MSIX table
+ * alignment.
+ */
+ if (object_property_get_bool(OBJECT(qdev_get_machine()),
+ "vfio-no-msix-emulation", NULL)) {
+ memory_region_set_enabled(&vdev->pdev.msix_table_mmio, false);
+ }
+
+ return 0;
+}
+
+static void vfio_teardown_msi(VFIOPCIDevice *vdev)
+{
+ msi_uninit(&vdev->pdev);
+
+ if (vdev->msix) {
+ msix_uninit(&vdev->pdev,
+ vdev->bars[vdev->msix->table_bar].mr,
+ vdev->bars[vdev->msix->pba_bar].mr);
+ g_free(vdev->msix->pending);
+ }
+}
+
+/*
+ * Resource setup
+ */
+static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled)
+{
+ int i;
+
+ for (i = 0; i < PCI_ROM_SLOT; i++) {
+ vfio_region_mmaps_set_enabled(&vdev->bars[i].region, enabled);
+ }
+}
+
+static void vfio_bar_prepare(VFIOPCIDevice *vdev, int nr)
+{
+ VFIOBAR *bar = &vdev->bars[nr];
+
+ uint32_t pci_bar;
+ int ret;
+
+ /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
+ if (!bar->region.size) {
+ return;
+ }
+
+ /* Determine what type of BAR this is for registration */
+ ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar),
+ vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
+ if (ret != sizeof(pci_bar)) {
+ error_report("vfio: Failed to read BAR %d (%m)", nr);
+ return;
+ }
+
+ pci_bar = le32_to_cpu(pci_bar);
+ bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO);
+ bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
+ bar->type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
+ ~PCI_BASE_ADDRESS_MEM_MASK);
+ bar->size = bar->region.size;
+}
+
+static void vfio_bars_prepare(VFIOPCIDevice *vdev)
+{
+ int i;
+
+ for (i = 0; i < PCI_ROM_SLOT; i++) {
+ vfio_bar_prepare(vdev, i);
+ }
+}
+
+static void vfio_bar_register(VFIOPCIDevice *vdev, int nr)
+{
+ VFIOBAR *bar = &vdev->bars[nr];
+ char *name;
+
+ if (!bar->size) {
+ return;
+ }
+
+ bar->mr = g_new0(MemoryRegion, 1);
+ name = g_strdup_printf("%s base BAR %d", vdev->vbasedev.name, nr);
+ memory_region_init_io(bar->mr, OBJECT(vdev), NULL, NULL, name, bar->size);
+ g_free(name);
+
+ if (bar->region.size) {
+ memory_region_add_subregion(bar->mr, 0, bar->region.mem);
+
+ if (vfio_region_mmap(&bar->region)) {
+ error_report("Failed to mmap %s BAR %d. Performance may be slow",
+ vdev->vbasedev.name, nr);
+ }
+ }
+
+ pci_register_bar(&vdev->pdev, nr, bar->type, bar->mr);
+}
+
+static void vfio_bars_register(VFIOPCIDevice *vdev)
+{
+ int i;
+
+ for (i = 0; i < PCI_ROM_SLOT; i++) {
+ vfio_bar_register(vdev, i);
+ }
+}
+
+static void vfio_bars_exit(VFIOPCIDevice *vdev)
+{
+ int i;
+
+ for (i = 0; i < PCI_ROM_SLOT; i++) {
+ VFIOBAR *bar = &vdev->bars[i];
+
+ vfio_bar_quirk_exit(vdev, i);
+ vfio_region_exit(&bar->region);
+ if (bar->region.size) {
+ memory_region_del_subregion(bar->mr, bar->region.mem);
+ }
+ }
+
+ if (vdev->vga) {
+ pci_unregister_vga(&vdev->pdev);
+ vfio_vga_quirk_exit(vdev);
+ }
+}
+
+static void vfio_bars_finalize(VFIOPCIDevice *vdev)
+{
+ int i;
+
+ for (i = 0; i < PCI_ROM_SLOT; i++) {
+ VFIOBAR *bar = &vdev->bars[i];
+
+ vfio_bar_quirk_finalize(vdev, i);
+ vfio_region_finalize(&bar->region);
+ if (bar->size) {
+ object_unparent(OBJECT(bar->mr));
+ g_free(bar->mr);
+ }
+ }
+
+ if (vdev->vga) {
+ vfio_vga_quirk_finalize(vdev);
+ for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
+ object_unparent(OBJECT(&vdev->vga->region[i].mem));
+ }
+ g_free(vdev->vga);
+ }
+}
+
+/*
+ * General setup
+ */
+static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
+{
+ uint8_t tmp;
+ uint16_t next = PCI_CONFIG_SPACE_SIZE;
+
+ for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
+ tmp = pdev->config[tmp + PCI_CAP_LIST_NEXT]) {
+ if (tmp > pos && tmp < next) {
+ next = tmp;
+ }
+ }
+
+ return next - pos;
+}
+
+
+static uint16_t vfio_ext_cap_max_size(const uint8_t *config, uint16_t pos)
+{
+ uint16_t tmp, next = PCIE_CONFIG_SPACE_SIZE;
+
+ for (tmp = PCI_CONFIG_SPACE_SIZE; tmp;
+ tmp = PCI_EXT_CAP_NEXT(pci_get_long(config + tmp))) {
+ if (tmp > pos && tmp < next) {
+ next = tmp;
+ }
+ }
+
+ return next - pos;
+}
+
+static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
+{
+ pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
+}
+
+static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos,
+ uint16_t val, uint16_t mask)
+{
+ vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
+ vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
+ vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
+}
+
+static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
+{
+ pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
+}
+
+static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos,
+ uint32_t val, uint32_t mask)
+{
+ vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
+ vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
+ vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
+}
+
+static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size,
+ Error **errp)
+{
+ uint16_t flags;
+ uint8_t type;
+
+ flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
+ type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;
+
+ if (type != PCI_EXP_TYPE_ENDPOINT &&
+ type != PCI_EXP_TYPE_LEG_END &&
+ type != PCI_EXP_TYPE_RC_END) {
+
+ error_setg(errp, "assignment of PCIe type 0x%x "
+ "devices is not currently supported", type);
+ return -EINVAL;
+ }
+
+ if (!pci_bus_is_express(pci_get_bus(&vdev->pdev))) {
+ PCIBus *bus = pci_get_bus(&vdev->pdev);
+ PCIDevice *bridge;
+
+ /*
+ * Traditionally PCI device assignment exposes the PCIe capability
+ * as-is on non-express buses. The reason being that some drivers
+ * simply assume that it's there, for example tg3. However when
+ * we're running on a native PCIe machine type, like Q35, we need
+ * to hide the PCIe capability. The reason for this is twofold;
+ * first Windows guests get a Code 10 error when the PCIe capability
+ * is exposed in this configuration. Therefore express devices won't
+ * work at all unless they're attached to express buses in the VM.
+ * Second, a native PCIe machine introduces the possibility of fine
+ * granularity IOMMUs supporting both translation and isolation.
+ * Guest code to discover the IOMMU visibility of a device, such as
+ * IOMMU grouping code on Linux, is very aware of device types and
+ * valid transitions between bus types. An express device on a non-
+ * express bus is not a valid combination on bare metal systems.
+ *
+ * Drivers that require a PCIe capability to make the device
+ * functional are simply going to need to have their devices placed
+ * on a PCIe bus in the VM.
+ */
+ while (!pci_bus_is_root(bus)) {
+ bridge = pci_bridge_get_device(bus);
+ bus = pci_get_bus(bridge);
+ }
+
+ if (pci_bus_is_express(bus)) {
+ return 0;
+ }
+
+ } else if (pci_bus_is_root(pci_get_bus(&vdev->pdev))) {
+ /*
+ * On a Root Complex bus Endpoints become Root Complex Integrated
+ * Endpoints, which changes the type and clears the LNK & LNK2 fields.
+ */
+ if (type == PCI_EXP_TYPE_ENDPOINT) {
+ vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
+ PCI_EXP_TYPE_RC_END << 4,
+ PCI_EXP_FLAGS_TYPE);
+
+ /* Link Capabilities, Status, and Control goes away */
+ if (size > PCI_EXP_LNKCTL) {
+ vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
+ vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
+ vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);
+
+#ifndef PCI_EXP_LNKCAP2
+#define PCI_EXP_LNKCAP2 44
+#endif
+#ifndef PCI_EXP_LNKSTA2
+#define PCI_EXP_LNKSTA2 50
+#endif
+ /* Link 2 Capabilities, Status, and Control goes away */
+ if (size > PCI_EXP_LNKCAP2) {
+ vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
+ vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
+ vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
+ }
+ }
+
+ } else if (type == PCI_EXP_TYPE_LEG_END) {
+ /*
+ * Legacy endpoints don't belong on the root complex. Windows
+ * seems to be happier with devices if we skip the capability.
+ */
+ return 0;
+ }
+
+ } else {
+ /*
+ * Convert Root Complex Integrated Endpoints to regular endpoints.
+ * These devices don't support LNK/LNK2 capabilities, so make them up.
+ */
+ if (type == PCI_EXP_TYPE_RC_END) {
+ vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
+ PCI_EXP_TYPE_ENDPOINT << 4,
+ PCI_EXP_FLAGS_TYPE);
+ vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
+ QEMU_PCI_EXP_LNKCAP_MLW(QEMU_PCI_EXP_LNK_X1) |
+ QEMU_PCI_EXP_LNKCAP_MLS(QEMU_PCI_EXP_LNK_2_5GT), ~0);
+ vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
+ }
+ }
+
+ /*
+ * Intel 82599 SR-IOV VFs report an invalid PCIe capability version 0
+ * (Niantic errate #35) causing Windows to error with a Code 10 for the
+ * device on Q35. Fixup any such devices to report version 1. If we
+ * were to remove the capability entirely the guest would lose extended
+ * config space.
+ */
+ if ((flags & PCI_EXP_FLAGS_VERS) == 0) {
+ vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
+ 1, PCI_EXP_FLAGS_VERS);
+ }
+
+ pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size,
+ errp);
+ if (pos < 0) {
+ return pos;
+ }
+
+ vdev->pdev.exp.exp_cap = pos;
+
+ return pos;
+}
+
+static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos)
+{
+ uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP);
+
+ if (cap & PCI_EXP_DEVCAP_FLR) {
+ trace_vfio_check_pcie_flr(vdev->vbasedev.name);
+ vdev->has_flr = true;
+ }
+}
+
+static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos)
+{
+ uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL);
+
+ if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) {
+ trace_vfio_check_pm_reset(vdev->vbasedev.name);
+ vdev->has_pm_reset = true;
+ }
+}
+
+static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos)
+{
+ uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP);
+
+ if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) {
+ trace_vfio_check_af_flr(vdev->vbasedev.name);
+ vdev->has_flr = true;
+ }
+}
+
+static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ uint8_t cap_id, next, size;
+ int ret;
+
+ cap_id = pdev->config[pos];
+ next = pdev->config[pos + PCI_CAP_LIST_NEXT];
+
+ /*
+ * If it becomes important to configure capabilities to their actual
+ * size, use this as the default when it's something we don't recognize.
+ * Since QEMU doesn't actually handle many of the config accesses,
+ * exact size doesn't seem worthwhile.
+ */
+ size = vfio_std_cap_max_size(pdev, pos);
+
+ /*
+ * pci_add_capability always inserts the new capability at the head
+ * of the chain. Therefore to end up with a chain that matches the
+ * physical device, we insert from the end by making this recursive.
+ * This is also why we pre-calculate size above as cached config space
+ * will be changed as we unwind the stack.
+ */
+ if (next) {
+ ret = vfio_add_std_cap(vdev, next, errp);
+ if (ret) {
+ return ret;
+ }
+ } else {
+ /* Begin the rebuild, use QEMU emulated list bits */
+ pdev->config[PCI_CAPABILITY_LIST] = 0;
+ vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
+ vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
+
+ ret = vfio_add_virt_caps(vdev, errp);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ /* Scale down size, esp in case virt caps were added above */
+ size = MIN(size, vfio_std_cap_max_size(pdev, pos));
+
+ /* Use emulated next pointer to allow dropping caps */
+ pci_set_byte(vdev->emulated_config_bits + pos + PCI_CAP_LIST_NEXT, 0xff);
+
+ switch (cap_id) {
+ case PCI_CAP_ID_MSI:
+ ret = vfio_msi_setup(vdev, pos, errp);
+ break;
+ case PCI_CAP_ID_EXP:
+ vfio_check_pcie_flr(vdev, pos);
+ ret = vfio_setup_pcie_cap(vdev, pos, size, errp);
+ break;
+ case PCI_CAP_ID_MSIX:
+ ret = vfio_msix_setup(vdev, pos, errp);
+ break;
+ case PCI_CAP_ID_PM:
+ vfio_check_pm_reset(vdev, pos);
+ vdev->pm_cap = pos;
+ ret = pci_add_capability(pdev, cap_id, pos, size, errp);
+ break;
+ case PCI_CAP_ID_AF:
+ vfio_check_af_flr(vdev, pos);
+ ret = pci_add_capability(pdev, cap_id, pos, size, errp);
+ break;
+ default:
+ ret = pci_add_capability(pdev, cap_id, pos, size, errp);
+ break;
+ }
+
+ if (ret < 0) {
+ error_prepend(errp,
+ "failed to add PCI capability 0x%x[0x%x]@0x%x: ",
+ cap_id, size, pos);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vfio_add_ext_cap(VFIOPCIDevice *vdev)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ uint32_t header;
+ uint16_t cap_id, next, size;
+ uint8_t cap_ver;
+ uint8_t *config;
+
+ /* Only add extended caps if we have them and the guest can see them */
+ if (!pci_is_express(pdev) || !pci_bus_is_express(pci_get_bus(pdev)) ||
+ !pci_get_long(pdev->config + PCI_CONFIG_SPACE_SIZE)) {
+ return;
+ }
+
+ /*
+ * pcie_add_capability always inserts the new capability at the tail
+ * of the chain. Therefore to end up with a chain that matches the
+ * physical device, we cache the config space to avoid overwriting
+ * the original config space when we parse the extended capabilities.
+ */
+ config = g_memdup(pdev->config, vdev->config_size);
+
+ /*
+ * Extended capabilities are chained with each pointing to the next, so we
+ * can drop anything other than the head of the chain simply by modifying
+ * the previous next pointer. Seed the head of the chain here such that
+ * we can simply skip any capabilities we want to drop below, regardless
+ * of their position in the chain. If this stub capability still exists
+ * after we add the capabilities we want to expose, update the capability
+ * ID to zero. Note that we cannot seed with the capability header being
+ * zero as this conflicts with definition of an absent capability chain
+ * and prevents capabilities beyond the head of the list from being added.
+ * By replacing the dummy capability ID with zero after walking the device
+ * chain, we also transparently mark extended capabilities as absent if
+ * no capabilities were added. Note that the PCIe spec defines an absence
+ * of extended capabilities to be determined by a value of zero for the
+ * capability ID, version, AND next pointer. A non-zero next pointer
+ * should be sufficient to indicate additional capabilities are present,
+ * which will occur if we call pcie_add_capability() below. The entire
+ * first dword is emulated to support this.
+ *
+ * NB. The kernel side does similar masking, so be prepared that our
+ * view of the device may also contain a capability ID zero in the head
+ * of the chain. Skip it for the same reason that we cannot seed the
+ * chain with a zero capability.
+ */
+ pci_set_long(pdev->config + PCI_CONFIG_SPACE_SIZE,
+ PCI_EXT_CAP(0xFFFF, 0, 0));
+ pci_set_long(pdev->wmask + PCI_CONFIG_SPACE_SIZE, 0);
+ pci_set_long(vdev->emulated_config_bits + PCI_CONFIG_SPACE_SIZE, ~0);
+
+ for (next = PCI_CONFIG_SPACE_SIZE; next;
+ next = PCI_EXT_CAP_NEXT(pci_get_long(config + next))) {
+ header = pci_get_long(config + next);
+ cap_id = PCI_EXT_CAP_ID(header);
+ cap_ver = PCI_EXT_CAP_VER(header);
+
+ /*
+ * If it becomes important to configure extended capabilities to their
+ * actual size, use this as the default when it's something we don't
+ * recognize. Since QEMU doesn't actually handle many of the config
+ * accesses, exact size doesn't seem worthwhile.
+ */
+ size = vfio_ext_cap_max_size(config, next);
+
+ /* Use emulated next pointer to allow dropping extended caps */
+ pci_long_test_and_set_mask(vdev->emulated_config_bits + next,
+ PCI_EXT_CAP_NEXT_MASK);
+
+ switch (cap_id) {
+ case 0: /* kernel masked capability */
+ case PCI_EXT_CAP_ID_SRIOV: /* Read-only VF BARs confuse OVMF */
+ case PCI_EXT_CAP_ID_ARI: /* XXX Needs next function virtualization */
+ case PCI_EXT_CAP_ID_REBAR: /* Can't expose read-only */
+ trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next);
+ break;
+ default:
+ pcie_add_capability(pdev, cap_id, cap_ver, next, size);
+ }
+
+ }
+
+ /* Cleanup chain head ID if necessary */
+ if (pci_get_word(pdev->config + PCI_CONFIG_SPACE_SIZE) == 0xFFFF) {
+ pci_set_word(pdev->config + PCI_CONFIG_SPACE_SIZE, 0);
+ }
+
+ g_free(config);
+ return;
+}
+
+static int vfio_add_capabilities(VFIOPCIDevice *vdev, Error **errp)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ int ret;
+
+ if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
+ !pdev->config[PCI_CAPABILITY_LIST]) {
+ return 0; /* Nothing to add */
+ }
+
+ ret = vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST], errp);
+ if (ret) {
+ return ret;
+ }
+
+ vfio_add_ext_cap(vdev);
+ return 0;
+}
+
+static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ uint16_t cmd;
+
+ vfio_disable_interrupts(vdev);
+
+ /* Make sure the device is in D0 */
+ if (vdev->pm_cap) {
+ uint16_t pmcsr;
+ uint8_t state;
+
+ pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
+ state = pmcsr & PCI_PM_CTRL_STATE_MASK;
+ if (state) {
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
+ /* vfio handles the necessary delay here */
+ pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
+ state = pmcsr & PCI_PM_CTRL_STATE_MASK;
+ if (state) {
+ error_report("vfio: Unable to power on device, stuck in D%d",
+ state);
+ }
+ }
+ }
+
+ /*
+ * Stop any ongoing DMA by disconnecting I/O, MMIO, and bus master.
+ * Also put INTx Disable in known state.
+ */
+ cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
+ cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
+ PCI_COMMAND_INTX_DISABLE);
+ vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
+}
+
+static void vfio_pci_post_reset(VFIOPCIDevice *vdev)
+{
+ Error *err = NULL;
+ int nr;
+
+ vfio_intx_enable(vdev, &err);
+ if (err) {
+ error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
+ }
+
+ for (nr = 0; nr < PCI_NUM_REGIONS - 1; ++nr) {
+ off_t addr = vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr);
+ uint32_t val = 0;
+ uint32_t len = sizeof(val);
+
+ if (pwrite(vdev->vbasedev.fd, &val, len, addr) != len) {
+ error_report("%s(%s) reset bar %d failed: %m", __func__,
+ vdev->vbasedev.name, nr);
+ }
+ }
+
+ vfio_quirk_reset(vdev);
+}
+
+static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name)
+{
+ char tmp[13];
+
+ sprintf(tmp, "%04x:%02x:%02x.%1x", addr->domain,
+ addr->bus, addr->slot, addr->function);
+
+ return (strcmp(tmp, name) == 0);
+}
+
+static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
+{
+ VFIOGroup *group;
+ struct vfio_pci_hot_reset_info *info;
+ struct vfio_pci_dependent_device *devices;
+ struct vfio_pci_hot_reset *reset;
+ int32_t *fds;
+ int ret, i, count;
+ bool multi = false;
+
+ trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
+
+ if (!single) {
+ vfio_pci_pre_reset(vdev);
+ }
+ vdev->vbasedev.needs_reset = false;
+
+ info = g_malloc0(sizeof(*info));
+ info->argsz = sizeof(*info);
+
+ ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
+ if (ret && errno != ENOSPC) {
+ ret = -errno;
+ if (!vdev->has_pm_reset) {
+ error_report("vfio: Cannot reset device %s, "
+ "no available reset mechanism.", vdev->vbasedev.name);
+ }
+ goto out_single;
+ }
+
+ count = info->count;
+ info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices)));
+ info->argsz = sizeof(*info) + (count * sizeof(*devices));
+ devices = &info->devices[0];
+
+ ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
+ if (ret) {
+ ret = -errno;
+ error_report("vfio: hot reset info failed: %m");
+ goto out_single;
+ }
+
+ trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
+
+ /* Verify that we have all the groups required */
+ for (i = 0; i < info->count; i++) {
+ PCIHostDeviceAddress host;
+ VFIOPCIDevice *tmp;
+ VFIODevice *vbasedev_iter;
+
+ host.domain = devices[i].segment;
+ host.bus = devices[i].bus;
+ host.slot = PCI_SLOT(devices[i].devfn);
+ host.function = PCI_FUNC(devices[i].devfn);
+
+ trace_vfio_pci_hot_reset_dep_devices(host.domain,
+ host.bus, host.slot, host.function, devices[i].group_id);
+
+ if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
+ continue;
+ }
+
+ QLIST_FOREACH(group, &vfio_group_list, next) {
+ if (group->groupid == devices[i].group_id) {
+ break;
+ }
+ }
+
+ if (!group) {
+ if (!vdev->has_pm_reset) {
+ error_report("vfio: Cannot reset device %s, "
+ "depends on group %d which is not owned.",
+ vdev->vbasedev.name, devices[i].group_id);
+ }
+ ret = -EPERM;
+ goto out;
+ }
+
+ /* Prep dependent devices for reset and clear our marker. */
+ QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
+ if (!vbasedev_iter->dev->realized ||
+ vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
+ continue;
+ }
+ tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
+ if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
+ if (single) {
+ ret = -EINVAL;
+ goto out_single;
+ }
+ vfio_pci_pre_reset(tmp);
+ tmp->vbasedev.needs_reset = false;
+ multi = true;
+ break;
+ }
+ }
+ }
+
+ if (!single && !multi) {
+ ret = -EINVAL;
+ goto out_single;
+ }
+
+ /* Determine how many group fds need to be passed */
+ count = 0;
+ QLIST_FOREACH(group, &vfio_group_list, next) {
+ for (i = 0; i < info->count; i++) {
+ if (group->groupid == devices[i].group_id) {
+ count++;
+ break;
+ }
+ }
+ }
+
+ reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
+ reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
+ fds = &reset->group_fds[0];
+
+ /* Fill in group fds */
+ QLIST_FOREACH(group, &vfio_group_list, next) {
+ for (i = 0; i < info->count; i++) {
+ if (group->groupid == devices[i].group_id) {
+ fds[reset->count++] = group->fd;
+ break;
+ }
+ }
+ }
+
+ /* Bus reset! */
+ ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
+ g_free(reset);
+
+ trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
+ ret ? "%m" : "Success");
+
+out:
+ /* Re-enable INTx on affected devices */
+ for (i = 0; i < info->count; i++) {
+ PCIHostDeviceAddress host;
+ VFIOPCIDevice *tmp;
+ VFIODevice *vbasedev_iter;
+
+ host.domain = devices[i].segment;
+ host.bus = devices[i].bus;
+ host.slot = PCI_SLOT(devices[i].devfn);
+ host.function = PCI_FUNC(devices[i].devfn);
+
+ if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
+ continue;
+ }
+
+ QLIST_FOREACH(group, &vfio_group_list, next) {
+ if (group->groupid == devices[i].group_id) {
+ break;
+ }
+ }
+
+ if (!group) {
+ break;
+ }
+
+ QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
+ if (!vbasedev_iter->dev->realized ||
+ vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
+ continue;
+ }
+ tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
+ if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
+ vfio_pci_post_reset(tmp);
+ break;
+ }
+ }
+ }
+out_single:
+ if (!single) {
+ vfio_pci_post_reset(vdev);
+ }
+ g_free(info);
+
+ return ret;
+}
+
+/*
+ * We want to differentiate hot reset of multiple in-use devices vs hot reset
+ * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
+ * of doing hot resets when there is only a single device per bus. The in-use
+ * here refers to how many VFIODevices are affected. A hot reset that affects
+ * multiple devices, but only a single in-use device, means that we can call
+ * it from our bus ->reset() callback since the extent is effectively a single
+ * device. This allows us to make use of it in the hotplug path. When there
+ * are multiple in-use devices, we can only trigger the hot reset during a
+ * system reset and thus from our reset handler. We separate _one vs _multi
+ * here so that we don't overlap and do a double reset on the system reset
+ * path where both our reset handler and ->reset() callback are used. Calling
+ * _one() will only do a hot reset for the one in-use devices case, calling
+ * _multi() will do nothing if a _one() would have been sufficient.
+ */
+static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev)
+{
+ return vfio_pci_hot_reset(vdev, true);
+}
+
+static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev)
+{
+ VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
+ return vfio_pci_hot_reset(vdev, false);
+}
+
+static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev)
+{
+ VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
+ if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) {
+ vbasedev->needs_reset = true;
+ }
+}
+
+static Object *vfio_pci_get_object(VFIODevice *vbasedev)
+{
+ VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
+
+ return OBJECT(vdev);
+}
+
+static bool vfio_msix_present(void *opaque, int version_id)
+{
+ PCIDevice *pdev = opaque;
+
+ return msix_present(pdev);
+}
+
+const VMStateDescription vmstate_vfio_pci_config = {
+ .name = "VFIOPCIDevice",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_PCI_DEVICE(pdev, VFIOPCIDevice),
+ VMSTATE_MSIX_TEST(pdev, VFIOPCIDevice, vfio_msix_present),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void vfio_pci_save_config(VFIODevice *vbasedev, QEMUFile *f)
+{
+ VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
+
+ vmstate_save_state(f, &vmstate_vfio_pci_config, vdev, NULL);
+}
+
+static int vfio_pci_load_config(VFIODevice *vbasedev, QEMUFile *f)
+{
+ VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
+ PCIDevice *pdev = &vdev->pdev;
+ pcibus_t old_addr[PCI_NUM_REGIONS - 1];
+ int bar, ret;
+
+ for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
+ old_addr[bar] = pdev->io_regions[bar].addr;
+ }
+
+ ret = vmstate_load_state(f, &vmstate_vfio_pci_config, vdev, 1);
+ if (ret) {
+ return ret;
+ }
+
+ vfio_pci_write_config(pdev, PCI_COMMAND,
+ pci_get_word(pdev->config + PCI_COMMAND), 2);
+
+ for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
+ /*
+ * The address may not be changed in some scenarios
+ * (e.g. the VF driver isn't loaded in VM).
+ */
+ if (old_addr[bar] != pdev->io_regions[bar].addr &&
+ vdev->bars[bar].region.size > 0 &&
+ vdev->bars[bar].region.size < qemu_real_host_page_size) {
+ vfio_sub_page_bar_update_mapping(pdev, bar);
+ }
+ }
+
+ if (msi_enabled(pdev)) {
+ vfio_msi_enable(vdev);
+ } else if (msix_enabled(pdev)) {
+ vfio_msix_enable(vdev);
+ }
+
+ return ret;
+}
+
+static VFIODeviceOps vfio_pci_ops = {
+ .vfio_compute_needs_reset = vfio_pci_compute_needs_reset,
+ .vfio_hot_reset_multi = vfio_pci_hot_reset_multi,
+ .vfio_eoi = vfio_intx_eoi,
+ .vfio_get_object = vfio_pci_get_object,
+ .vfio_save_config = vfio_pci_save_config,
+ .vfio_load_config = vfio_pci_load_config,
+};
+
+int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp)
+{
+ VFIODevice *vbasedev = &vdev->vbasedev;
+ struct vfio_region_info *reg_info;
+ int ret;
+
+ ret = vfio_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, &reg_info);
+ if (ret) {
+ error_setg_errno(errp, -ret,
+ "failed getting region info for VGA region index %d",
+ VFIO_PCI_VGA_REGION_INDEX);
+ return ret;
+ }
+
+ if (!(reg_info->flags & VFIO_REGION_INFO_FLAG_READ) ||
+ !(reg_info->flags & VFIO_REGION_INFO_FLAG_WRITE) ||
+ reg_info->size < 0xbffff + 1) {
+ error_setg(errp, "unexpected VGA info, flags 0x%lx, size 0x%lx",
+ (unsigned long)reg_info->flags,
+ (unsigned long)reg_info->size);
+ g_free(reg_info);
+ return -EINVAL;
+ }
+
+ vdev->vga = g_new0(VFIOVGA, 1);
+
+ vdev->vga->fd_offset = reg_info->offset;
+ vdev->vga->fd = vdev->vbasedev.fd;
+
+ g_free(reg_info);
+
+ vdev->vga->region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
+ vdev->vga->region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
+ QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_MEM].quirks);
+
+ memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
+ OBJECT(vdev), &vfio_vga_ops,
+ &vdev->vga->region[QEMU_PCI_VGA_MEM],
+ "vfio-vga-mmio@0xa0000",
+ QEMU_PCI_VGA_MEM_SIZE);
+
+ vdev->vga->region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE;
+ vdev->vga->region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO;
+ QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].quirks);
+
+ memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
+ OBJECT(vdev), &vfio_vga_ops,
+ &vdev->vga->region[QEMU_PCI_VGA_IO_LO],
+ "vfio-vga-io@0x3b0",
+ QEMU_PCI_VGA_IO_LO_SIZE);
+
+ vdev->vga->region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE;
+ vdev->vga->region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI;
+ QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks);
+
+ memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
+ OBJECT(vdev), &vfio_vga_ops,
+ &vdev->vga->region[QEMU_PCI_VGA_IO_HI],
+ "vfio-vga-io@0x3c0",
+ QEMU_PCI_VGA_IO_HI_SIZE);
+
+ pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
+ &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
+ &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem);
+
+ return 0;
+}
+
+static void vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
+{
+ VFIODevice *vbasedev = &vdev->vbasedev;
+ struct vfio_region_info *reg_info;
+ struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
+ int i, ret = -1;
+
+ /* Sanity check device */
+ if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) {
+ error_setg(errp, "this isn't a PCI device");
+ return;
+ }
+
+ if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
+ error_setg(errp, "unexpected number of io regions %u",
+ vbasedev->num_regions);
+ return;
+ }
+
+ if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
+ error_setg(errp, "unexpected number of irqs %u", vbasedev->num_irqs);
+ return;
+ }
+
+ for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
+ char *name = g_strdup_printf("%s BAR %d", vbasedev->name, i);
+
+ ret = vfio_region_setup(OBJECT(vdev), vbasedev,
+ &vdev->bars[i].region, i, name);
+ g_free(name);
+
+ if (ret) {
+ error_setg_errno(errp, -ret, "failed to get region %d info", i);
+ return;
+ }
+
+ QLIST_INIT(&vdev->bars[i].quirks);
+ }
+
+ ret = vfio_get_region_info(vbasedev,
+ VFIO_PCI_CONFIG_REGION_INDEX, &reg_info);
+ if (ret) {
+ error_setg_errno(errp, -ret, "failed to get config info");
+ return;
+ }
+
+ trace_vfio_populate_device_config(vdev->vbasedev.name,
+ (unsigned long)reg_info->size,
+ (unsigned long)reg_info->offset,
+ (unsigned long)reg_info->flags);
+
+ vdev->config_size = reg_info->size;
+ if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
+ vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
+ }
+ vdev->config_offset = reg_info->offset;
+
+ g_free(reg_info);
+
+ if (vdev->features & VFIO_FEATURE_ENABLE_VGA) {
+ ret = vfio_populate_vga(vdev, errp);
+ if (ret) {
+ error_append_hint(errp, "device does not support "
+ "requested feature x-vga\n");
+ return;
+ }
+ }
+
+ irq_info.index = VFIO_PCI_ERR_IRQ_INDEX;
+
+ ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
+ if (ret) {
+ /* This can fail for an old kernel or legacy PCI dev */
+ trace_vfio_populate_device_get_irq_info_failure(strerror(errno));
+ } else if (irq_info.count == 1) {
+ vdev->pci_aer = true;
+ } else {
+ warn_report(VFIO_MSG_PREFIX
+ "Could not enable error recovery for the device",
+ vbasedev->name);
+ }
+}
+
+static void vfio_put_device(VFIOPCIDevice *vdev)
+{
+ g_free(vdev->vbasedev.name);
+ g_free(vdev->msix);
+
+ vfio_put_base_device(&vdev->vbasedev);
+}
+
+static void vfio_err_notifier_handler(void *opaque)
+{
+ VFIOPCIDevice *vdev = opaque;
+
+ if (!event_notifier_test_and_clear(&vdev->err_notifier)) {
+ return;
+ }
+
+ /*
+ * TBD. Retrieve the error details and decide what action
+ * needs to be taken. One of the actions could be to pass
+ * the error to the guest and have the guest driver recover
+ * from the error. This requires that PCIe capabilities be
+ * exposed to the guest. For now, we just terminate the
+ * guest to contain the error.
+ */
+
+ error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__, vdev->vbasedev.name);
+
+ vm_stop(RUN_STATE_INTERNAL_ERROR);
+}
+
+/*
+ * Registers error notifier for devices supporting error recovery.
+ * If we encounter a failure in this function, we report an error
+ * and continue after disabling error recovery support for the
+ * device.
+ */
+static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
+{
+ Error *err = NULL;
+ int32_t fd;
+
+ if (!vdev->pci_aer) {
+ return;
+ }
+
+ if (event_notifier_init(&vdev->err_notifier, 0)) {
+ error_report("vfio: Unable to init event notifier for error detection");
+ vdev->pci_aer = false;
+ return;
+ }
+
+ fd = event_notifier_get_fd(&vdev->err_notifier);
+ qemu_set_fd_handler(fd, vfio_err_notifier_handler, NULL, vdev);
+
+ if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0,
+ VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
+ error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
+ qemu_set_fd_handler(fd, NULL, NULL, vdev);
+ event_notifier_cleanup(&vdev->err_notifier);
+ vdev->pci_aer = false;
+ }
+}
+
+static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
+{
+ Error *err = NULL;
+
+ if (!vdev->pci_aer) {
+ return;
+ }
+
+ if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0,
+ VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
+ error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
+ }
+ qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
+ NULL, NULL, vdev);
+ event_notifier_cleanup(&vdev->err_notifier);
+}
+
+static void vfio_req_notifier_handler(void *opaque)
+{
+ VFIOPCIDevice *vdev = opaque;
+ Error *err = NULL;
+
+ if (!event_notifier_test_and_clear(&vdev->req_notifier)) {
+ return;
+ }
+
+ qdev_unplug(DEVICE(vdev), &err);
+ if (err) {
+ warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
+ }
+}
+
+static void vfio_register_req_notifier(VFIOPCIDevice *vdev)
+{
+ struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info),
+ .index = VFIO_PCI_REQ_IRQ_INDEX };
+ Error *err = NULL;
+ int32_t fd;
+
+ if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) {
+ return;
+ }
+
+ if (ioctl(vdev->vbasedev.fd,
+ VFIO_DEVICE_GET_IRQ_INFO, &irq_info) < 0 || irq_info.count < 1) {
+ return;
+ }
+
+ if (event_notifier_init(&vdev->req_notifier, 0)) {
+ error_report("vfio: Unable to init event notifier for device request");
+ return;
+ }
+
+ fd = event_notifier_get_fd(&vdev->req_notifier);
+ qemu_set_fd_handler(fd, vfio_req_notifier_handler, NULL, vdev);
+
+ if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0,
+ VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) {
+ error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
+ qemu_set_fd_handler(fd, NULL, NULL, vdev);
+ event_notifier_cleanup(&vdev->req_notifier);
+ } else {
+ vdev->req_enabled = true;
+ }
+}
+
+static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev)
+{
+ Error *err = NULL;
+
+ if (!vdev->req_enabled) {
+ return;
+ }
+
+ if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0,
+ VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
+ error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name);
+ }
+ qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier),
+ NULL, NULL, vdev);
+ event_notifier_cleanup(&vdev->req_notifier);
+
+ vdev->req_enabled = false;
+}
+
+static void vfio_realize(PCIDevice *pdev, Error **errp)
+{
+ VFIOPCIDevice *vdev = VFIO_PCI(pdev);
+ VFIODevice *vbasedev_iter;
+ VFIOGroup *group;
+ char *tmp, *subsys, group_path[PATH_MAX], *group_name;
+ Error *err = NULL;
+ ssize_t len;
+ struct stat st;
+ int groupid;
+ int i, ret;
+ bool is_mdev;
+
+ if (!vdev->vbasedev.sysfsdev) {
+ if (!(~vdev->host.domain || ~vdev->host.bus ||
+ ~vdev->host.slot || ~vdev->host.function)) {
+ error_setg(errp, "No provided host device");
+ error_append_hint(errp, "Use -device vfio-pci,host=DDDD:BB:DD.F "
+ "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n");
+ return;
+ }
+ vdev->vbasedev.sysfsdev =
+ g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
+ vdev->host.domain, vdev->host.bus,
+ vdev->host.slot, vdev->host.function);
+ }
+
+ if (stat(vdev->vbasedev.sysfsdev, &st) < 0) {
+ error_setg_errno(errp, errno, "no such host device");
+ error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.sysfsdev);
+ return;
+ }
+
+ vdev->vbasedev.name = g_path_get_basename(vdev->vbasedev.sysfsdev);
+ vdev->vbasedev.ops = &vfio_pci_ops;
+ vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI;
+ vdev->vbasedev.dev = DEVICE(vdev);
+
+ tmp = g_strdup_printf("%s/iommu_group", vdev->vbasedev.sysfsdev);
+ len = readlink(tmp, group_path, sizeof(group_path));
+ g_free(tmp);
+
+ if (len <= 0 || len >= sizeof(group_path)) {
+ error_setg_errno(errp, len < 0 ? errno : ENAMETOOLONG,
+ "no iommu_group found");
+ goto error;
+ }
+
+ group_path[len] = 0;
+
+ group_name = basename(group_path);
+ if (sscanf(group_name, "%d", &groupid) != 1) {
+ error_setg_errno(errp, errno, "failed to read %s", group_path);
+ goto error;
+ }
+
+ trace_vfio_realize(vdev->vbasedev.name, groupid);
+
+ group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev), errp);
+ if (!group) {
+ goto error;
+ }
+
+ QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
+ if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) {
+ error_setg(errp, "device is already attached");
+ vfio_put_group(group);
+ goto error;
+ }
+ }
+
+ /*
+ * Mediated devices *might* operate compatibly with discarding of RAM, but
+ * we cannot know for certain, it depends on whether the mdev vendor driver
+ * stays in sync with the active working set of the guest driver. Prevent
+ * the x-balloon-allowed option unless this is minimally an mdev device.
+ */
+ tmp = g_strdup_printf("%s/subsystem", vdev->vbasedev.sysfsdev);
+ subsys = realpath(tmp, NULL);
+ g_free(tmp);
+ is_mdev = subsys && (strcmp(subsys, "/sys/bus/mdev") == 0);
+ free(subsys);
+
+ trace_vfio_mdev(vdev->vbasedev.name, is_mdev);
+
+ if (vdev->vbasedev.ram_block_discard_allowed && !is_mdev) {
+ error_setg(errp, "x-balloon-allowed only potentially compatible "
+ "with mdev devices");
+ vfio_put_group(group);
+ goto error;
+ }
+
+ ret = vfio_get_device(group, vdev->vbasedev.name, &vdev->vbasedev, errp);
+ if (ret) {
+ vfio_put_group(group);
+ goto error;
+ }
+
+ vfio_populate_device(vdev, &err);
+ if (err) {
+ error_propagate(errp, err);
+ goto error;
+ }
+
+ /* Get a copy of config space */
+ ret = pread(vdev->vbasedev.fd, vdev->pdev.config,
+ MIN(pci_config_size(&vdev->pdev), vdev->config_size),
+ vdev->config_offset);
+ if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
+ ret = ret < 0 ? -errno : -EFAULT;
+ error_setg_errno(errp, -ret, "failed to read device config space");
+ goto error;
+ }
+
+ /* vfio emulates a lot for us, but some bits need extra love */
+ vdev->emulated_config_bits = g_malloc0(vdev->config_size);
+
+ /* QEMU can choose to expose the ROM or not */
+ memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);
+ /* QEMU can also add or extend BARs */
+ memset(vdev->emulated_config_bits + PCI_BASE_ADDRESS_0, 0xff, 6 * 4);
+
+ /*
+ * The PCI spec reserves vendor ID 0xffff as an invalid value. The
+ * device ID is managed by the vendor and need only be a 16-bit value.
+ * Allow any 16-bit value for subsystem so they can be hidden or changed.
+ */
+ if (vdev->vendor_id != PCI_ANY_ID) {
+ if (vdev->vendor_id >= 0xffff) {
+ error_setg(errp, "invalid PCI vendor ID provided");
+ goto error;
+ }
+ vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0);
+ trace_vfio_pci_emulated_vendor_id(vdev->vbasedev.name, vdev->vendor_id);
+ } else {
+ vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
+ }
+
+ if (vdev->device_id != PCI_ANY_ID) {
+ if (vdev->device_id > 0xffff) {
+ error_setg(errp, "invalid PCI device ID provided");
+ goto error;
+ }
+ vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0);
+ trace_vfio_pci_emulated_device_id(vdev->vbasedev.name, vdev->device_id);
+ } else {
+ vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
+ }
+
+ if (vdev->sub_vendor_id != PCI_ANY_ID) {
+ if (vdev->sub_vendor_id > 0xffff) {
+ error_setg(errp, "invalid PCI subsystem vendor ID provided");
+ goto error;
+ }
+ vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID,
+ vdev->sub_vendor_id, ~0);
+ trace_vfio_pci_emulated_sub_vendor_id(vdev->vbasedev.name,
+ vdev->sub_vendor_id);
+ }
+
+ if (vdev->sub_device_id != PCI_ANY_ID) {
+ if (vdev->sub_device_id > 0xffff) {
+ error_setg(errp, "invalid PCI subsystem device ID provided");
+ goto error;
+ }
+ vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0);
+ trace_vfio_pci_emulated_sub_device_id(vdev->vbasedev.name,
+ vdev->sub_device_id);
+ }
+
+ /* QEMU can change multi-function devices to single function, or reverse */
+ vdev->emulated_config_bits[PCI_HEADER_TYPE] =
+ PCI_HEADER_TYPE_MULTI_FUNCTION;
+
+ /* Restore or clear multifunction, this is always controlled by QEMU */
+ if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
+ vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
+ } else {
+ vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
+ }
+
+ /*
+ * Clear host resource mapping info. If we choose not to register a
+ * BAR, such as might be the case with the option ROM, we can get
+ * confusing, unwritable, residual addresses from the host here.
+ */
+ memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
+ memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);
+
+ vfio_pci_size_rom(vdev);
+
+ vfio_bars_prepare(vdev);
+
+ vfio_msix_early_setup(vdev, &err);
+ if (err) {
+ error_propagate(errp, err);
+ goto error;
+ }
+
+ vfio_bars_register(vdev);
+
+ ret = vfio_add_capabilities(vdev, errp);
+ if (ret) {
+ goto out_teardown;
+ }
+
+ if (vdev->vga) {
+ vfio_vga_quirk_setup(vdev);
+ }
+
+ for (i = 0; i < PCI_ROM_SLOT; i++) {
+ vfio_bar_quirk_setup(vdev, i);
+ }
+
+ if (!vdev->igd_opregion &&
+ vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) {
+ struct vfio_region_info *opregion;
+
+ if (vdev->pdev.qdev.hotplugged) {
+ error_setg(errp,
+ "cannot support IGD OpRegion feature on hotplugged "
+ "device");
+ goto out_teardown;
+ }
+
+ ret = vfio_get_dev_region_info(&vdev->vbasedev,
+ VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
+ if (ret) {
+ error_setg_errno(errp, -ret,
+ "does not support requested IGD OpRegion feature");
+ goto out_teardown;
+ }
+
+ ret = vfio_pci_igd_opregion_init(vdev, opregion, errp);
+ g_free(opregion);
+ if (ret) {
+ goto out_teardown;
+ }
+ }
+
+ /* QEMU emulates all of MSI & MSIX */
+ if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
+ memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
+ MSIX_CAP_LENGTH);
+ }
+
+ if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
+ memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
+ vdev->msi_cap_size);
+ }
+
+ if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
+ vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
+ vfio_intx_mmap_enable, vdev);
+ pci_device_set_intx_routing_notifier(&vdev->pdev,
+ vfio_intx_routing_notifier);
+ vdev->irqchip_change_notifier.notify = vfio_irqchip_change;
+ kvm_irqchip_add_change_notifier(&vdev->irqchip_change_notifier);
+ ret = vfio_intx_enable(vdev, errp);
+ if (ret) {
+ goto out_deregister;
+ }
+ }
+
+ if (vdev->display != ON_OFF_AUTO_OFF) {
+ ret = vfio_display_probe(vdev, errp);
+ if (ret) {
+ goto out_deregister;
+ }
+ }
+ if (vdev->enable_ramfb && vdev->dpy == NULL) {
+ error_setg(errp, "ramfb=on requires display=on");
+ goto out_deregister;
+ }
+ if (vdev->display_xres || vdev->display_yres) {
+ if (vdev->dpy == NULL) {
+ error_setg(errp, "xres and yres properties require display=on");
+ goto out_deregister;
+ }
+ if (vdev->dpy->edid_regs == NULL) {
+ error_setg(errp, "xres and yres properties need edid support");
+ goto out_deregister;
+ }
+ }
+
+ if (vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID)) {
+ ret = vfio_pci_nvidia_v100_ram_init(vdev, errp);
+ if (ret && ret != -ENODEV) {
+ error_report("Failed to setup NVIDIA V100 GPU RAM");
+ }
+ }
+
+ if (vfio_pci_is(vdev, PCI_VENDOR_ID_IBM, PCI_ANY_ID)) {
+ ret = vfio_pci_nvlink2_init(vdev, errp);
+ if (ret && ret != -ENODEV) {
+ error_report("Failed to setup NVlink2 bridge");
+ }
+ }
+
+ if (!pdev->failover_pair_id) {
+ ret = vfio_migration_probe(&vdev->vbasedev, errp);
+ if (ret) {
+ error_report("%s: Migration disabled", vdev->vbasedev.name);
+ }
+ }
+
+ vfio_register_err_notifier(vdev);
+ vfio_register_req_notifier(vdev);
+ vfio_setup_resetfn_quirk(vdev);
+
+ return;
+
+out_deregister:
+ pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
+ kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier);
+out_teardown:
+ vfio_teardown_msi(vdev);
+ vfio_bars_exit(vdev);
+error:
+ error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name);
+}
+
+static void vfio_instance_finalize(Object *obj)
+{
+ VFIOPCIDevice *vdev = VFIO_PCI(obj);
+ VFIOGroup *group = vdev->vbasedev.group;
+
+ vfio_display_finalize(vdev);
+ vfio_bars_finalize(vdev);
+ g_free(vdev->emulated_config_bits);
+ g_free(vdev->rom);
+ /*
+ * XXX Leaking igd_opregion is not an oversight, we can't remove the
+ * fw_cfg entry therefore leaking this allocation seems like the safest
+ * option.
+ *
+ * g_free(vdev->igd_opregion);
+ */
+ vfio_put_device(vdev);
+ vfio_put_group(group);
+}
+
+static void vfio_exitfn(PCIDevice *pdev)
+{
+ VFIOPCIDevice *vdev = VFIO_PCI(pdev);
+
+ vfio_unregister_req_notifier(vdev);
+ vfio_unregister_err_notifier(vdev);
+ pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
+ if (vdev->irqchip_change_notifier.notify) {
+ kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier);
+ }
+ vfio_disable_interrupts(vdev);
+ if (vdev->intx.mmap_timer) {
+ timer_free(vdev->intx.mmap_timer);
+ }
+ vfio_teardown_msi(vdev);
+ vfio_bars_exit(vdev);
+ vfio_migration_finalize(&vdev->vbasedev);
+}
+
+static void vfio_pci_reset(DeviceState *dev)
+{
+ VFIOPCIDevice *vdev = VFIO_PCI(dev);
+
+ trace_vfio_pci_reset(vdev->vbasedev.name);
+
+ vfio_pci_pre_reset(vdev);
+
+ if (vdev->display != ON_OFF_AUTO_OFF) {
+ vfio_display_reset(vdev);
+ }
+
+ if (vdev->resetfn && !vdev->resetfn(vdev)) {
+ goto post_reset;
+ }
+
+ if (vdev->vbasedev.reset_works &&
+ (vdev->has_flr || !vdev->has_pm_reset) &&
+ !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
+ trace_vfio_pci_reset_flr(vdev->vbasedev.name);
+ goto post_reset;
+ }
+
+ /* See if we can do our own bus reset */
+ if (!vfio_pci_hot_reset_one(vdev)) {
+ goto post_reset;
+ }
+
+ /* If nothing else works and the device supports PM reset, use it */
+ if (vdev->vbasedev.reset_works && vdev->has_pm_reset &&
+ !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
+ trace_vfio_pci_reset_pm(vdev->vbasedev.name);
+ goto post_reset;
+ }
+
+post_reset:
+ vfio_pci_post_reset(vdev);
+}
+
+static void vfio_instance_init(Object *obj)
+{
+ PCIDevice *pci_dev = PCI_DEVICE(obj);
+ VFIOPCIDevice *vdev = VFIO_PCI(obj);
+
+ device_add_bootindex_property(obj, &vdev->bootindex,
+ "bootindex", NULL,
+ &pci_dev->qdev);
+ vdev->host.domain = ~0U;
+ vdev->host.bus = ~0U;
+ vdev->host.slot = ~0U;
+ vdev->host.function = ~0U;
+
+ vdev->nv_gpudirect_clique = 0xFF;
+
+ /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
+ * line, therefore, no need to wait to realize like other devices */
+ pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
+}
+
+static Property vfio_pci_dev_properties[] = {
+ DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host),
+ DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev),
+ DEFINE_PROP_ON_OFF_AUTO("x-pre-copy-dirty-page-tracking", VFIOPCIDevice,
+ vbasedev.pre_copy_dirty_page_tracking,
+ ON_OFF_AUTO_ON),
+ DEFINE_PROP_ON_OFF_AUTO("display", VFIOPCIDevice,
+ display, ON_OFF_AUTO_OFF),
+ DEFINE_PROP_UINT32("xres", VFIOPCIDevice, display_xres, 0),
+ DEFINE_PROP_UINT32("yres", VFIOPCIDevice, display_yres, 0),
+ DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice,
+ intx.mmap_timeout, 1100),
+ DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features,
+ VFIO_FEATURE_ENABLE_VGA_BIT, false),
+ DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features,
+ VFIO_FEATURE_ENABLE_REQ_BIT, true),
+ DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
+ VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false),
+ DEFINE_PROP_BOOL("x-enable-migration", VFIOPCIDevice,
+ vbasedev.enable_migration, false),
+ DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
+ DEFINE_PROP_BOOL("x-balloon-allowed", VFIOPCIDevice,
+ vbasedev.ram_block_discard_allowed, false),
+ DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false),
+ DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false),
+ DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false),
+ DEFINE_PROP_BOOL("x-no-geforce-quirks", VFIOPCIDevice,
+ no_geforce_quirks, false),
+ DEFINE_PROP_BOOL("x-no-kvm-ioeventfd", VFIOPCIDevice, no_kvm_ioeventfd,
+ false),
+ DEFINE_PROP_BOOL("x-no-vfio-ioeventfd", VFIOPCIDevice, no_vfio_ioeventfd,
+ false),
+ DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice, vendor_id, PCI_ANY_ID),
+ DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice, device_id, PCI_ANY_ID),
+ DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice,
+ sub_vendor_id, PCI_ANY_ID),
+ DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice,
+ sub_device_id, PCI_ANY_ID),
+ DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice, igd_gms, 0),
+ DEFINE_PROP_UNSIGNED_NODEFAULT("x-nv-gpudirect-clique", VFIOPCIDevice,
+ nv_gpudirect_clique,
+ qdev_prop_nv_gpudirect_clique, uint8_t),
+ DEFINE_PROP_OFF_AUTO_PCIBAR("x-msix-relocation", VFIOPCIDevice, msix_relo,
+ OFF_AUTOPCIBAR_OFF),
+ /*
+ * TODO - support passed fds... is this necessary?
+ * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
+ * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
+ */
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
+
+ dc->reset = vfio_pci_reset;
+ device_class_set_props(dc, vfio_pci_dev_properties);
+ dc->desc = "VFIO-based PCI device assignment";
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ pdc->realize = vfio_realize;
+ pdc->exit = vfio_exitfn;
+ pdc->config_read = vfio_pci_read_config;
+ pdc->config_write = vfio_pci_write_config;
+}
+
+static const TypeInfo vfio_pci_dev_info = {
+ .name = TYPE_VFIO_PCI,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(VFIOPCIDevice),
+ .class_init = vfio_pci_dev_class_init,
+ .instance_init = vfio_instance_init,
+ .instance_finalize = vfio_instance_finalize,
+ .interfaces = (InterfaceInfo[]) {
+ { INTERFACE_PCIE_DEVICE },
+ { INTERFACE_CONVENTIONAL_PCI_DEVICE },
+ { }
+ },
+};
+
+static Property vfio_pci_dev_nohotplug_properties[] = {
+ DEFINE_PROP_BOOL("ramfb", VFIOPCIDevice, enable_ramfb, false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void vfio_pci_nohotplug_dev_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ device_class_set_props(dc, vfio_pci_dev_nohotplug_properties);
+ dc->hotpluggable = false;
+}
+
+static const TypeInfo vfio_pci_nohotplug_dev_info = {
+ .name = TYPE_VFIO_PCI_NOHOTPLUG,
+ .parent = TYPE_VFIO_PCI,
+ .instance_size = sizeof(VFIOPCIDevice),
+ .class_init = vfio_pci_nohotplug_dev_class_init,
+};
+
+static void register_vfio_pci_dev_type(void)
+{
+ type_register_static(&vfio_pci_dev_info);
+ type_register_static(&vfio_pci_nohotplug_dev_info);
+}
+
+type_init(register_vfio_pci_dev_type)
diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h
new file mode 100644
index 000000000..64777516d
--- /dev/null
+++ b/hw/vfio/pci.h
@@ -0,0 +1,227 @@
+/*
+ * vfio based device assignment support - PCI devices
+ *
+ * Copyright Red Hat, Inc. 2012-2015
+ *
+ * Authors:
+ * Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+#ifndef HW_VFIO_VFIO_PCI_H
+#define HW_VFIO_VFIO_PCI_H
+
+#include "exec/memory.h"
+#include "hw/pci/pci.h"
+#include "hw/vfio/vfio-common.h"
+#include "qemu/event_notifier.h"
+#include "qemu/queue.h"
+#include "qemu/timer.h"
+#include "qom/object.h"
+
+#define PCI_ANY_ID (~0)
+
+struct VFIOPCIDevice;
+
+typedef struct VFIOIOEventFD {
+ QLIST_ENTRY(VFIOIOEventFD) next;
+ MemoryRegion *mr;
+ hwaddr addr;
+ unsigned size;
+ uint64_t data;
+ EventNotifier e;
+ VFIORegion *region;
+ hwaddr region_addr;
+ bool dynamic; /* Added runtime, removed on device reset */
+ bool vfio;
+} VFIOIOEventFD;
+
+typedef struct VFIOQuirk {
+ QLIST_ENTRY(VFIOQuirk) next;
+ void *data;
+ QLIST_HEAD(, VFIOIOEventFD) ioeventfds;
+ int nr_mem;
+ MemoryRegion *mem;
+ void (*reset)(struct VFIOPCIDevice *vdev, struct VFIOQuirk *quirk);
+} VFIOQuirk;
+
+typedef struct VFIOBAR {
+ VFIORegion region;
+ MemoryRegion *mr;
+ size_t size;
+ uint8_t type;
+ bool ioport;
+ bool mem64;
+ QLIST_HEAD(, VFIOQuirk) quirks;
+} VFIOBAR;
+
+typedef struct VFIOVGARegion {
+ MemoryRegion mem;
+ off_t offset;
+ int nr;
+ QLIST_HEAD(, VFIOQuirk) quirks;
+} VFIOVGARegion;
+
+typedef struct VFIOVGA {
+ off_t fd_offset;
+ int fd;
+ VFIOVGARegion region[QEMU_PCI_VGA_NUM_REGIONS];
+} VFIOVGA;
+
+typedef struct VFIOINTx {
+ bool pending; /* interrupt pending */
+ bool kvm_accel; /* set when QEMU bypass through KVM enabled */
+ uint8_t pin; /* which pin to pull for qemu_set_irq */
+ EventNotifier interrupt; /* eventfd triggered on interrupt */
+ EventNotifier unmask; /* eventfd for unmask on QEMU bypass */
+ PCIINTxRoute route; /* routing info for QEMU bypass */
+ uint32_t mmap_timeout; /* delay to re-enable mmaps after interrupt */
+ QEMUTimer *mmap_timer; /* enable mmaps after periods w/o interrupts */
+} VFIOINTx;
+
+typedef struct VFIOMSIVector {
+ /*
+ * Two interrupt paths are configured per vector. The first, is only used
+ * for interrupts injected via QEMU. This is typically the non-accel path,
+ * but may also be used when we want QEMU to handle masking and pending
+ * bits. The KVM path bypasses QEMU and is therefore higher performance,
+ * but requires masking at the device. virq is used to track the MSI route
+ * through KVM, thus kvm_interrupt is only available when virq is set to a
+ * valid (>= 0) value.
+ */
+ EventNotifier interrupt;
+ EventNotifier kvm_interrupt;
+ struct VFIOPCIDevice *vdev; /* back pointer to device */
+ int virq;
+ bool use;
+} VFIOMSIVector;
+
+enum {
+ VFIO_INT_NONE = 0,
+ VFIO_INT_INTx = 1,
+ VFIO_INT_MSI = 2,
+ VFIO_INT_MSIX = 3,
+};
+
+/* Cache of MSI-X setup */
+typedef struct VFIOMSIXInfo {
+ uint8_t table_bar;
+ uint8_t pba_bar;
+ uint16_t entries;
+ uint32_t table_offset;
+ uint32_t pba_offset;
+ unsigned long *pending;
+} VFIOMSIXInfo;
+
+#define TYPE_VFIO_PCI "vfio-pci"
+OBJECT_DECLARE_SIMPLE_TYPE(VFIOPCIDevice, VFIO_PCI)
+
+struct VFIOPCIDevice {
+ PCIDevice pdev;
+ VFIODevice vbasedev;
+ VFIOINTx intx;
+ unsigned int config_size;
+ uint8_t *emulated_config_bits; /* QEMU emulated bits, little-endian */
+ off_t config_offset; /* Offset of config space region within device fd */
+ unsigned int rom_size;
+ off_t rom_offset; /* Offset of ROM region within device fd */
+ void *rom;
+ int msi_cap_size;
+ VFIOMSIVector *msi_vectors;
+ VFIOMSIXInfo *msix;
+ int nr_vectors; /* Number of MSI/MSIX vectors currently in use */
+ int interrupt; /* Current interrupt type */
+ VFIOBAR bars[PCI_NUM_REGIONS - 1]; /* No ROM */
+ VFIOVGA *vga; /* 0xa0000, 0x3b0, 0x3c0 */
+ void *igd_opregion;
+ PCIHostDeviceAddress host;
+ EventNotifier err_notifier;
+ EventNotifier req_notifier;
+ int (*resetfn)(struct VFIOPCIDevice *);
+ uint32_t vendor_id;
+ uint32_t device_id;
+ uint32_t sub_vendor_id;
+ uint32_t sub_device_id;
+ uint32_t features;
+#define VFIO_FEATURE_ENABLE_VGA_BIT 0
+#define VFIO_FEATURE_ENABLE_VGA (1 << VFIO_FEATURE_ENABLE_VGA_BIT)
+#define VFIO_FEATURE_ENABLE_REQ_BIT 1
+#define VFIO_FEATURE_ENABLE_REQ (1 << VFIO_FEATURE_ENABLE_REQ_BIT)
+#define VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT 2
+#define VFIO_FEATURE_ENABLE_IGD_OPREGION \
+ (1 << VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT)
+ OnOffAuto display;
+ uint32_t display_xres;
+ uint32_t display_yres;
+ int32_t bootindex;
+ uint32_t igd_gms;
+ OffAutoPCIBAR msix_relo;
+ uint8_t pm_cap;
+ uint8_t nv_gpudirect_clique;
+ bool pci_aer;
+ bool req_enabled;
+ bool has_flr;
+ bool has_pm_reset;
+ bool rom_read_failed;
+ bool no_kvm_intx;
+ bool no_kvm_msi;
+ bool no_kvm_msix;
+ bool no_geforce_quirks;
+ bool no_kvm_ioeventfd;
+ bool no_vfio_ioeventfd;
+ bool enable_ramfb;
+ VFIODisplay *dpy;
+ Notifier irqchip_change_notifier;
+};
+
+/* Use uin32_t for vendor & device so PCI_ANY_ID expands and cannot match hw */
+static inline bool vfio_pci_is(VFIOPCIDevice *vdev, uint32_t vendor, uint32_t device)
+{
+ return (vendor == PCI_ANY_ID || vendor == vdev->vendor_id) &&
+ (device == PCI_ANY_ID || device == vdev->device_id);
+}
+
+static inline bool vfio_is_vga(VFIOPCIDevice *vdev)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ uint16_t class = pci_get_word(pdev->config + PCI_CLASS_DEVICE);
+
+ return class == PCI_CLASS_DISPLAY_VGA;
+}
+
+uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len);
+void vfio_pci_write_config(PCIDevice *pdev,
+ uint32_t addr, uint32_t val, int len);
+
+uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size);
+void vfio_vga_write(void *opaque, hwaddr addr, uint64_t data, unsigned size);
+
+bool vfio_opt_rom_in_denylist(VFIOPCIDevice *vdev);
+void vfio_vga_quirk_setup(VFIOPCIDevice *vdev);
+void vfio_vga_quirk_exit(VFIOPCIDevice *vdev);
+void vfio_vga_quirk_finalize(VFIOPCIDevice *vdev);
+void vfio_bar_quirk_setup(VFIOPCIDevice *vdev, int nr);
+void vfio_bar_quirk_exit(VFIOPCIDevice *vdev, int nr);
+void vfio_bar_quirk_finalize(VFIOPCIDevice *vdev, int nr);
+void vfio_setup_resetfn_quirk(VFIOPCIDevice *vdev);
+int vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp);
+void vfio_quirk_reset(VFIOPCIDevice *vdev);
+VFIOQuirk *vfio_quirk_alloc(int nr_mem);
+void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr);
+
+extern const PropertyInfo qdev_prop_nv_gpudirect_clique;
+
+int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp);
+
+int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
+ struct vfio_region_info *info,
+ Error **errp);
+int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp);
+int vfio_pci_nvlink2_init(VFIOPCIDevice *vdev, Error **errp);
+
+void vfio_display_reset(VFIOPCIDevice *vdev);
+int vfio_display_probe(VFIOPCIDevice *vdev, Error **errp);
+void vfio_display_finalize(VFIOPCIDevice *vdev);
+
+#endif /* HW_VFIO_VFIO_PCI_H */
diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c
new file mode 100644
index 000000000..f8f08a0f3
--- /dev/null
+++ b/hw/vfio/platform.c
@@ -0,0 +1,720 @@
+/*
+ * vfio based device assignment support - platform devices
+ *
+ * Copyright Linaro Limited, 2014
+ *
+ * Authors:
+ * Kim Phillips <kim.phillips@linaro.org>
+ * Eric Auger <eric.auger@linaro.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Based on vfio based PCI device assignment support:
+ * Copyright Red Hat, Inc. 2012
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include <sys/ioctl.h>
+#include <linux/vfio.h>
+
+#include "hw/vfio/vfio-platform.h"
+#include "migration/vmstate.h"
+#include "qemu/error-report.h"
+#include "qemu/lockable.h"
+#include "qemu/main-loop.h"
+#include "qemu/module.h"
+#include "qemu/range.h"
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
+#include "qemu/queue.h"
+#include "hw/sysbus.h"
+#include "trace.h"
+#include "hw/irq.h"
+#include "hw/platform-bus.h"
+#include "hw/qdev-properties.h"
+#include "sysemu/kvm.h"
+
+/*
+ * Functions used whatever the injection method
+ */
+
+static inline bool vfio_irq_is_automasked(VFIOINTp *intp)
+{
+ return intp->flags & VFIO_IRQ_INFO_AUTOMASKED;
+}
+
+/**
+ * vfio_init_intp - allocate, initialize the IRQ struct pointer
+ * and add it into the list of IRQs
+ * @vbasedev: the VFIO device handle
+ * @info: irq info struct retrieved from VFIO driver
+ * @errp: error object
+ */
+static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev,
+ struct vfio_irq_info info, Error **errp)
+{
+ int ret;
+ VFIOPlatformDevice *vdev =
+ container_of(vbasedev, VFIOPlatformDevice, vbasedev);
+ SysBusDevice *sbdev = SYS_BUS_DEVICE(vdev);
+ VFIOINTp *intp;
+
+ intp = g_malloc0(sizeof(*intp));
+ intp->vdev = vdev;
+ intp->pin = info.index;
+ intp->flags = info.flags;
+ intp->state = VFIO_IRQ_INACTIVE;
+ intp->kvm_accel = false;
+
+ sysbus_init_irq(sbdev, &intp->qemuirq);
+
+ /* Get an eventfd for trigger */
+ intp->interrupt = g_malloc0(sizeof(EventNotifier));
+ ret = event_notifier_init(intp->interrupt, 0);
+ if (ret) {
+ g_free(intp->interrupt);
+ g_free(intp);
+ error_setg_errno(errp, -ret,
+ "failed to initialize trigger eventfd notifier");
+ return NULL;
+ }
+ if (vfio_irq_is_automasked(intp)) {
+ /* Get an eventfd for resample/unmask */
+ intp->unmask = g_malloc0(sizeof(EventNotifier));
+ ret = event_notifier_init(intp->unmask, 0);
+ if (ret) {
+ g_free(intp->interrupt);
+ g_free(intp->unmask);
+ g_free(intp);
+ error_setg_errno(errp, -ret,
+ "failed to initialize resample eventfd notifier");
+ return NULL;
+ }
+ }
+
+ QLIST_INSERT_HEAD(&vdev->intp_list, intp, next);
+ return intp;
+}
+
+/**
+ * vfio_set_trigger_eventfd - set VFIO eventfd handling
+ *
+ * @intp: IRQ struct handle
+ * @handler: handler to be called on eventfd signaling
+ *
+ * Setup VFIO signaling and attach an optional user-side handler
+ * to the eventfd
+ */
+static int vfio_set_trigger_eventfd(VFIOINTp *intp,
+ eventfd_user_side_handler_t handler)
+{
+ VFIODevice *vbasedev = &intp->vdev->vbasedev;
+ int32_t fd = event_notifier_get_fd(intp->interrupt);
+ Error *err = NULL;
+ int ret;
+
+ qemu_set_fd_handler(fd, (IOHandler *)handler, NULL, intp);
+
+ ret = vfio_set_irq_signaling(vbasedev, intp->pin, 0,
+ VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err);
+ if (ret) {
+ error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name);
+ qemu_set_fd_handler(fd, NULL, NULL, NULL);
+ }
+
+ return ret;
+}
+
+/*
+ * Functions only used when eventfds are handled on user-side
+ * ie. without irqfd
+ */
+
+/**
+ * vfio_mmap_set_enabled - enable/disable the fast path mode
+ * @vdev: the VFIO platform device
+ * @enabled: the target mmap state
+ *
+ * enabled = true ~ fast path = MMIO region is mmaped (no KVM TRAP);
+ * enabled = false ~ slow path = MMIO region is trapped and region callbacks
+ * are called; slow path enables to trap the device IRQ status register reset
+*/
+
+static void vfio_mmap_set_enabled(VFIOPlatformDevice *vdev, bool enabled)
+{
+ int i;
+
+ for (i = 0; i < vdev->vbasedev.num_regions; i++) {
+ vfio_region_mmaps_set_enabled(vdev->regions[i], enabled);
+ }
+}
+
+/**
+ * vfio_intp_mmap_enable - timer function, restores the fast path
+ * if there is no more active IRQ
+ * @opaque: actually points to the VFIO platform device
+ *
+ * Called on mmap timer timeout, this function checks whether the
+ * IRQ is still active and if not, restores the fast path.
+ * by construction a single eventfd is handled at a time.
+ * if the IRQ is still active, the timer is re-programmed.
+ */
+static void vfio_intp_mmap_enable(void *opaque)
+{
+ VFIOINTp *tmp;
+ VFIOPlatformDevice *vdev = (VFIOPlatformDevice *)opaque;
+
+ QEMU_LOCK_GUARD(&vdev->intp_mutex);
+ QLIST_FOREACH(tmp, &vdev->intp_list, next) {
+ if (tmp->state == VFIO_IRQ_ACTIVE) {
+ trace_vfio_platform_intp_mmap_enable(tmp->pin);
+ /* re-program the timer to check active status later */
+ timer_mod(vdev->mmap_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
+ vdev->mmap_timeout);
+ return;
+ }
+ }
+ vfio_mmap_set_enabled(vdev, true);
+}
+
+/**
+ * vfio_intp_inject_pending_lockheld - Injects a pending IRQ
+ * @opaque: opaque pointer, in practice the VFIOINTp handle
+ *
+ * The function is called on a previous IRQ completion, from
+ * vfio_platform_eoi, while the intp_mutex is locked.
+ * Also in such situation, the slow path already is set and
+ * the mmap timer was already programmed.
+ */
+static void vfio_intp_inject_pending_lockheld(VFIOINTp *intp)
+{
+ trace_vfio_platform_intp_inject_pending_lockheld(intp->pin,
+ event_notifier_get_fd(intp->interrupt));
+
+ intp->state = VFIO_IRQ_ACTIVE;
+
+ /* trigger the virtual IRQ */
+ qemu_set_irq(intp->qemuirq, 1);
+}
+
+/**
+ * vfio_intp_interrupt - The user-side eventfd handler
+ * @opaque: opaque pointer which in practice is the VFIOINTp handle
+ *
+ * the function is entered in event handler context:
+ * the vIRQ is injected into the guest if there is no other active
+ * or pending IRQ.
+ */
+static void vfio_intp_interrupt(VFIOINTp *intp)
+{
+ int ret;
+ VFIOINTp *tmp;
+ VFIOPlatformDevice *vdev = intp->vdev;
+ bool delay_handling = false;
+
+ QEMU_LOCK_GUARD(&vdev->intp_mutex);
+ if (intp->state == VFIO_IRQ_INACTIVE) {
+ QLIST_FOREACH(tmp, &vdev->intp_list, next) {
+ if (tmp->state == VFIO_IRQ_ACTIVE ||
+ tmp->state == VFIO_IRQ_PENDING) {
+ delay_handling = true;
+ break;
+ }
+ }
+ }
+ if (delay_handling) {
+ /*
+ * the new IRQ gets a pending status and is pushed in
+ * the pending queue
+ */
+ intp->state = VFIO_IRQ_PENDING;
+ trace_vfio_intp_interrupt_set_pending(intp->pin);
+ QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue,
+ intp, pqnext);
+ event_notifier_test_and_clear(intp->interrupt);
+ return;
+ }
+
+ trace_vfio_platform_intp_interrupt(intp->pin,
+ event_notifier_get_fd(intp->interrupt));
+
+ ret = event_notifier_test_and_clear(intp->interrupt);
+ if (!ret) {
+ error_report("Error when clearing fd=%d (ret = %d)",
+ event_notifier_get_fd(intp->interrupt), ret);
+ }
+
+ intp->state = VFIO_IRQ_ACTIVE;
+
+ /* sets slow path */
+ vfio_mmap_set_enabled(vdev, false);
+
+ /* trigger the virtual IRQ */
+ qemu_set_irq(intp->qemuirq, 1);
+
+ /*
+ * Schedule the mmap timer which will restore fastpath when no IRQ
+ * is active anymore
+ */
+ if (vdev->mmap_timeout) {
+ timer_mod(vdev->mmap_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
+ vdev->mmap_timeout);
+ }
+}
+
+/**
+ * vfio_platform_eoi - IRQ completion routine
+ * @vbasedev: the VFIO device handle
+ *
+ * De-asserts the active virtual IRQ and unmasks the physical IRQ
+ * (effective for level sensitive IRQ auto-masked by the VFIO driver).
+ * Then it handles next pending IRQ if any.
+ * eoi function is called on the first access to any MMIO region
+ * after an IRQ was triggered, trapped since slow path was set.
+ * It is assumed this access corresponds to the IRQ status
+ * register reset. With such a mechanism, a single IRQ can be
+ * handled at a time since there is no way to know which IRQ
+ * was completed by the guest (we would need additional details
+ * about the IRQ status register mask).
+ */
+static void vfio_platform_eoi(VFIODevice *vbasedev)
+{
+ VFIOINTp *intp;
+ VFIOPlatformDevice *vdev =
+ container_of(vbasedev, VFIOPlatformDevice, vbasedev);
+
+ QEMU_LOCK_GUARD(&vdev->intp_mutex);
+ QLIST_FOREACH(intp, &vdev->intp_list, next) {
+ if (intp->state == VFIO_IRQ_ACTIVE) {
+ trace_vfio_platform_eoi(intp->pin,
+ event_notifier_get_fd(intp->interrupt));
+ intp->state = VFIO_IRQ_INACTIVE;
+
+ /* deassert the virtual IRQ */
+ qemu_set_irq(intp->qemuirq, 0);
+
+ if (vfio_irq_is_automasked(intp)) {
+ /* unmasks the physical level-sensitive IRQ */
+ vfio_unmask_single_irqindex(vbasedev, intp->pin);
+ }
+
+ /* a single IRQ can be active at a time */
+ break;
+ }
+ }
+ /* in case there are pending IRQs, handle the first one */
+ if (!QSIMPLEQ_EMPTY(&vdev->pending_intp_queue)) {
+ intp = QSIMPLEQ_FIRST(&vdev->pending_intp_queue);
+ vfio_intp_inject_pending_lockheld(intp);
+ QSIMPLEQ_REMOVE_HEAD(&vdev->pending_intp_queue, pqnext);
+ }
+}
+
+/**
+ * vfio_start_eventfd_injection - starts the virtual IRQ injection using
+ * user-side handled eventfds
+ * @sbdev: the sysbus device handle
+ * @irq: the qemu irq handle
+ */
+
+static void vfio_start_eventfd_injection(SysBusDevice *sbdev, qemu_irq irq)
+{
+ VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev);
+ VFIOINTp *intp;
+
+ QLIST_FOREACH(intp, &vdev->intp_list, next) {
+ if (intp->qemuirq == irq) {
+ break;
+ }
+ }
+ assert(intp);
+
+ if (vfio_set_trigger_eventfd(intp, vfio_intp_interrupt)) {
+ abort();
+ }
+}
+
+/*
+ * Functions used for irqfd
+ */
+
+/**
+ * vfio_set_resample_eventfd - sets the resamplefd for an IRQ
+ * @intp: the IRQ struct handle
+ * programs the VFIO driver to unmask this IRQ when the
+ * intp->unmask eventfd is triggered
+ */
+static int vfio_set_resample_eventfd(VFIOINTp *intp)
+{
+ int32_t fd = event_notifier_get_fd(intp->unmask);
+ VFIODevice *vbasedev = &intp->vdev->vbasedev;
+ Error *err = NULL;
+ int ret;
+
+ qemu_set_fd_handler(fd, NULL, NULL, NULL);
+ ret = vfio_set_irq_signaling(vbasedev, intp->pin, 0,
+ VFIO_IRQ_SET_ACTION_UNMASK, fd, &err);
+ if (ret) {
+ error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name);
+ }
+ return ret;
+}
+
+/**
+ * vfio_start_irqfd_injection - starts the virtual IRQ injection using
+ * irqfd
+ *
+ * @sbdev: the sysbus device handle
+ * @irq: the qemu irq handle
+ *
+ * In case the irqfd setup fails, we fallback to userspace handled eventfd
+ */
+static void vfio_start_irqfd_injection(SysBusDevice *sbdev, qemu_irq irq)
+{
+ VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev);
+ VFIOINTp *intp;
+
+ if (!kvm_irqfds_enabled() || !kvm_resamplefds_enabled() ||
+ !vdev->irqfd_allowed) {
+ goto fail_irqfd;
+ }
+
+ QLIST_FOREACH(intp, &vdev->intp_list, next) {
+ if (intp->qemuirq == irq) {
+ break;
+ }
+ }
+ assert(intp);
+
+ if (kvm_irqchip_add_irqfd_notifier(kvm_state, intp->interrupt,
+ intp->unmask, irq) < 0) {
+ goto fail_irqfd;
+ }
+
+ if (vfio_set_trigger_eventfd(intp, NULL) < 0) {
+ goto fail_vfio;
+ }
+ if (vfio_irq_is_automasked(intp)) {
+ if (vfio_set_resample_eventfd(intp) < 0) {
+ goto fail_vfio;
+ }
+ trace_vfio_platform_start_level_irqfd_injection(intp->pin,
+ event_notifier_get_fd(intp->interrupt),
+ event_notifier_get_fd(intp->unmask));
+ } else {
+ trace_vfio_platform_start_edge_irqfd_injection(intp->pin,
+ event_notifier_get_fd(intp->interrupt));
+ }
+
+ intp->kvm_accel = true;
+
+ return;
+fail_vfio:
+ kvm_irqchip_remove_irqfd_notifier(kvm_state, intp->interrupt, irq);
+ abort();
+fail_irqfd:
+ vfio_start_eventfd_injection(sbdev, irq);
+ return;
+}
+
+/* VFIO skeleton */
+
+static void vfio_platform_compute_needs_reset(VFIODevice *vbasedev)
+{
+ vbasedev->needs_reset = true;
+}
+
+/* not implemented yet */
+static int vfio_platform_hot_reset_multi(VFIODevice *vbasedev)
+{
+ return -1;
+}
+
+/**
+ * vfio_populate_device - Allocate and populate MMIO region
+ * and IRQ structs according to driver returned information
+ * @vbasedev: the VFIO device handle
+ * @errp: error object
+ *
+ */
+static int vfio_populate_device(VFIODevice *vbasedev, Error **errp)
+{
+ VFIOINTp *intp, *tmp;
+ int i, ret = -1;
+ VFIOPlatformDevice *vdev =
+ container_of(vbasedev, VFIOPlatformDevice, vbasedev);
+
+ if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PLATFORM)) {
+ error_setg(errp, "this isn't a platform device");
+ return ret;
+ }
+
+ vdev->regions = g_new0(VFIORegion *, vbasedev->num_regions);
+
+ for (i = 0; i < vbasedev->num_regions; i++) {
+ char *name = g_strdup_printf("VFIO %s region %d\n", vbasedev->name, i);
+
+ vdev->regions[i] = g_new0(VFIORegion, 1);
+ ret = vfio_region_setup(OBJECT(vdev), vbasedev,
+ vdev->regions[i], i, name);
+ g_free(name);
+ if (ret) {
+ error_setg_errno(errp, -ret, "failed to get region %d info", i);
+ goto reg_error;
+ }
+ }
+
+ vdev->mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
+ vfio_intp_mmap_enable, vdev);
+
+ QSIMPLEQ_INIT(&vdev->pending_intp_queue);
+
+ for (i = 0; i < vbasedev->num_irqs; i++) {
+ struct vfio_irq_info irq = { .argsz = sizeof(irq) };
+
+ irq.index = i;
+ ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
+ if (ret) {
+ error_setg_errno(errp, -ret, "failed to get device irq info");
+ goto irq_err;
+ } else {
+ trace_vfio_platform_populate_interrupts(irq.index,
+ irq.count,
+ irq.flags);
+ intp = vfio_init_intp(vbasedev, irq, errp);
+ if (!intp) {
+ ret = -1;
+ goto irq_err;
+ }
+ }
+ }
+ return 0;
+irq_err:
+ timer_del(vdev->mmap_timer);
+ QLIST_FOREACH_SAFE(intp, &vdev->intp_list, next, tmp) {
+ QLIST_REMOVE(intp, next);
+ g_free(intp);
+ }
+reg_error:
+ for (i = 0; i < vbasedev->num_regions; i++) {
+ if (vdev->regions[i]) {
+ vfio_region_finalize(vdev->regions[i]);
+ }
+ g_free(vdev->regions[i]);
+ }
+ g_free(vdev->regions);
+ return ret;
+}
+
+/* specialized functions for VFIO Platform devices */
+static VFIODeviceOps vfio_platform_ops = {
+ .vfio_compute_needs_reset = vfio_platform_compute_needs_reset,
+ .vfio_hot_reset_multi = vfio_platform_hot_reset_multi,
+ .vfio_eoi = vfio_platform_eoi,
+};
+
+/**
+ * vfio_base_device_init - perform preliminary VFIO setup
+ * @vbasedev: the VFIO device handle
+ * @errp: error object
+ *
+ * Implement the VFIO command sequence that allows to discover
+ * assigned device resources: group extraction, device
+ * fd retrieval, resource query.
+ * Precondition: the device name must be initialized
+ */
+static int vfio_base_device_init(VFIODevice *vbasedev, Error **errp)
+{
+ VFIOGroup *group;
+ VFIODevice *vbasedev_iter;
+ char *tmp, group_path[PATH_MAX], *group_name;
+ ssize_t len;
+ struct stat st;
+ int groupid;
+ int ret;
+
+ /* @sysfsdev takes precedence over @host */
+ if (vbasedev->sysfsdev) {
+ g_free(vbasedev->name);
+ vbasedev->name = g_path_get_basename(vbasedev->sysfsdev);
+ } else {
+ if (!vbasedev->name || strchr(vbasedev->name, '/')) {
+ error_setg(errp, "wrong host device name");
+ return -EINVAL;
+ }
+
+ vbasedev->sysfsdev = g_strdup_printf("/sys/bus/platform/devices/%s",
+ vbasedev->name);
+ }
+
+ if (stat(vbasedev->sysfsdev, &st) < 0) {
+ error_setg_errno(errp, errno,
+ "failed to get the sysfs host device file status");
+ return -errno;
+ }
+
+ tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev);
+ len = readlink(tmp, group_path, sizeof(group_path));
+ g_free(tmp);
+
+ if (len < 0 || len >= sizeof(group_path)) {
+ ret = len < 0 ? -errno : -ENAMETOOLONG;
+ error_setg_errno(errp, -ret, "no iommu_group found");
+ return ret;
+ }
+
+ group_path[len] = 0;
+
+ group_name = basename(group_path);
+ if (sscanf(group_name, "%d", &groupid) != 1) {
+ error_setg_errno(errp, errno, "failed to read %s", group_path);
+ return -errno;
+ }
+
+ trace_vfio_platform_base_device_init(vbasedev->name, groupid);
+
+ group = vfio_get_group(groupid, &address_space_memory, errp);
+ if (!group) {
+ return -ENOENT;
+ }
+
+ QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
+ if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) {
+ error_setg(errp, "device is already attached");
+ vfio_put_group(group);
+ return -EBUSY;
+ }
+ }
+ ret = vfio_get_device(group, vbasedev->name, vbasedev, errp);
+ if (ret) {
+ vfio_put_group(group);
+ return ret;
+ }
+
+ ret = vfio_populate_device(vbasedev, errp);
+ if (ret) {
+ vfio_put_group(group);
+ }
+
+ return ret;
+}
+
+/**
+ * vfio_platform_realize - the device realize function
+ * @dev: device state pointer
+ * @errp: error
+ *
+ * initialize the device, its memory regions and IRQ structures
+ * IRQ are started separately
+ */
+static void vfio_platform_realize(DeviceState *dev, Error **errp)
+{
+ VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev);
+ SysBusDevice *sbdev = SYS_BUS_DEVICE(dev);
+ VFIODevice *vbasedev = &vdev->vbasedev;
+ int i, ret;
+
+ vbasedev->type = VFIO_DEVICE_TYPE_PLATFORM;
+ vbasedev->dev = dev;
+ vbasedev->ops = &vfio_platform_ops;
+
+ qemu_mutex_init(&vdev->intp_mutex);
+
+ trace_vfio_platform_realize(vbasedev->sysfsdev ?
+ vbasedev->sysfsdev : vbasedev->name,
+ vdev->compat);
+
+ ret = vfio_base_device_init(vbasedev, errp);
+ if (ret) {
+ goto out;
+ }
+
+ if (!vdev->compat) {
+ GError *gerr = NULL;
+ gchar *contents;
+ gsize length;
+ char *path;
+
+ path = g_strdup_printf("%s/of_node/compatible", vbasedev->sysfsdev);
+ if (!g_file_get_contents(path, &contents, &length, &gerr)) {
+ error_setg(errp, "%s", gerr->message);
+ g_error_free(gerr);
+ g_free(path);
+ return;
+ }
+ g_free(path);
+ vdev->compat = contents;
+ for (vdev->num_compat = 0; length; vdev->num_compat++) {
+ size_t skip = strlen(contents) + 1;
+ contents += skip;
+ length -= skip;
+ }
+ }
+
+ for (i = 0; i < vbasedev->num_regions; i++) {
+ if (vfio_region_mmap(vdev->regions[i])) {
+ warn_report("%s mmap unsupported, performance may be slow",
+ memory_region_name(vdev->regions[i]->mem));
+ }
+ sysbus_init_mmio(sbdev, vdev->regions[i]->mem);
+ }
+out:
+ if (!ret) {
+ return;
+ }
+
+ if (vdev->vbasedev.name) {
+ error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name);
+ } else {
+ error_prepend(errp, "vfio error: ");
+ }
+}
+
+static const VMStateDescription vfio_platform_vmstate = {
+ .name = "vfio-platform",
+ .unmigratable = 1,
+};
+
+static Property vfio_platform_dev_properties[] = {
+ DEFINE_PROP_STRING("host", VFIOPlatformDevice, vbasedev.name),
+ DEFINE_PROP_STRING("sysfsdev", VFIOPlatformDevice, vbasedev.sysfsdev),
+ DEFINE_PROP_BOOL("x-no-mmap", VFIOPlatformDevice, vbasedev.no_mmap, false),
+ DEFINE_PROP_UINT32("mmap-timeout-ms", VFIOPlatformDevice,
+ mmap_timeout, 1100),
+ DEFINE_PROP_BOOL("x-irqfd", VFIOPlatformDevice, irqfd_allowed, true),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void vfio_platform_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass);
+
+ dc->realize = vfio_platform_realize;
+ device_class_set_props(dc, vfio_platform_dev_properties);
+ dc->vmsd = &vfio_platform_vmstate;
+ dc->desc = "VFIO-based platform device assignment";
+ sbc->connect_irq_notifier = vfio_start_irqfd_injection;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ /* Supported by TYPE_VIRT_MACHINE */
+ dc->user_creatable = true;
+}
+
+static const TypeInfo vfio_platform_dev_info = {
+ .name = TYPE_VFIO_PLATFORM,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(VFIOPlatformDevice),
+ .class_init = vfio_platform_class_init,
+ .class_size = sizeof(VFIOPlatformDeviceClass),
+};
+
+static void register_vfio_platform_dev_type(void)
+{
+ type_register_static(&vfio_platform_dev_info);
+}
+
+type_init(register_vfio_platform_dev_type)
diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c
new file mode 100644
index 000000000..04c6e67f8
--- /dev/null
+++ b/hw/vfio/spapr.c
@@ -0,0 +1,255 @@
+/*
+ * DMA memory preregistration
+ *
+ * Authors:
+ * Alexey Kardashevskiy <aik@ozlabs.ru>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include <sys/ioctl.h>
+#include <linux/vfio.h>
+
+#include "hw/vfio/vfio-common.h"
+#include "hw/hw.h"
+#include "exec/ram_addr.h"
+#include "qemu/error-report.h"
+#include "qapi/error.h"
+#include "trace.h"
+
+static bool vfio_prereg_listener_skipped_section(MemoryRegionSection *section)
+{
+ if (memory_region_is_iommu(section->mr)) {
+ hw_error("Cannot possibly preregister IOMMU memory");
+ }
+
+ return !memory_region_is_ram(section->mr) ||
+ memory_region_is_ram_device(section->mr);
+}
+
+static void *vfio_prereg_gpa_to_vaddr(MemoryRegionSection *section, hwaddr gpa)
+{
+ return memory_region_get_ram_ptr(section->mr) +
+ section->offset_within_region +
+ (gpa - section->offset_within_address_space);
+}
+
+static void vfio_prereg_listener_region_add(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ VFIOContainer *container = container_of(listener, VFIOContainer,
+ prereg_listener);
+ const hwaddr gpa = section->offset_within_address_space;
+ hwaddr end;
+ int ret;
+ hwaddr page_mask = qemu_real_host_page_mask;
+ struct vfio_iommu_spapr_register_memory reg = {
+ .argsz = sizeof(reg),
+ .flags = 0,
+ };
+
+ if (vfio_prereg_listener_skipped_section(section)) {
+ trace_vfio_prereg_listener_region_add_skip(
+ section->offset_within_address_space,
+ section->offset_within_address_space +
+ int128_get64(int128_sub(section->size, int128_one())));
+ return;
+ }
+
+ if (unlikely((section->offset_within_address_space & ~page_mask) ||
+ (section->offset_within_region & ~page_mask) ||
+ (int128_get64(section->size) & ~page_mask))) {
+ error_report("%s received unaligned region", __func__);
+ return;
+ }
+
+ end = section->offset_within_address_space + int128_get64(section->size);
+ if (gpa >= end) {
+ return;
+ }
+
+ memory_region_ref(section->mr);
+
+ reg.vaddr = (uintptr_t) vfio_prereg_gpa_to_vaddr(section, gpa);
+ reg.size = end - gpa;
+
+ ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_REGISTER_MEMORY, &reg);
+ trace_vfio_prereg_register(reg.vaddr, reg.size, ret ? -errno : 0);
+ if (ret) {
+ /*
+ * On the initfn path, store the first error in the container so we
+ * can gracefully fail. Runtime, there's not much we can do other
+ * than throw a hardware error.
+ */
+ if (!container->initialized) {
+ if (!container->error) {
+ error_setg_errno(&container->error, -ret,
+ "Memory registering failed");
+ }
+ } else {
+ hw_error("vfio: Memory registering failed, unable to continue");
+ }
+ }
+}
+
+static void vfio_prereg_listener_region_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ VFIOContainer *container = container_of(listener, VFIOContainer,
+ prereg_listener);
+ const hwaddr gpa = section->offset_within_address_space;
+ hwaddr end;
+ int ret;
+ hwaddr page_mask = qemu_real_host_page_mask;
+ struct vfio_iommu_spapr_register_memory reg = {
+ .argsz = sizeof(reg),
+ .flags = 0,
+ };
+
+ if (vfio_prereg_listener_skipped_section(section)) {
+ trace_vfio_prereg_listener_region_del_skip(
+ section->offset_within_address_space,
+ section->offset_within_address_space +
+ int128_get64(int128_sub(section->size, int128_one())));
+ return;
+ }
+
+ if (unlikely((section->offset_within_address_space & ~page_mask) ||
+ (section->offset_within_region & ~page_mask) ||
+ (int128_get64(section->size) & ~page_mask))) {
+ error_report("%s received unaligned region", __func__);
+ return;
+ }
+
+ end = section->offset_within_address_space + int128_get64(section->size);
+ if (gpa >= end) {
+ return;
+ }
+
+ reg.vaddr = (uintptr_t) vfio_prereg_gpa_to_vaddr(section, gpa);
+ reg.size = end - gpa;
+
+ ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY, &reg);
+ trace_vfio_prereg_unregister(reg.vaddr, reg.size, ret ? -errno : 0);
+}
+
+const MemoryListener vfio_prereg_listener = {
+ .name = "vfio-pre-reg",
+ .region_add = vfio_prereg_listener_region_add,
+ .region_del = vfio_prereg_listener_region_del,
+};
+
+int vfio_spapr_create_window(VFIOContainer *container,
+ MemoryRegionSection *section,
+ hwaddr *pgsize)
+{
+ int ret = 0;
+ IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
+ uint64_t pagesize = memory_region_iommu_get_min_page_size(iommu_mr), pgmask;
+ unsigned entries, bits_total, bits_per_level, max_levels;
+ struct vfio_iommu_spapr_tce_create create = { .argsz = sizeof(create) };
+ long rampagesize = qemu_minrampagesize();
+
+ /*
+ * The host might not support the guest supported IOMMU page size,
+ * so we will use smaller physical IOMMU pages to back them.
+ */
+ if (pagesize > rampagesize) {
+ pagesize = rampagesize;
+ }
+ pgmask = container->pgsizes & (pagesize | (pagesize - 1));
+ pagesize = pgmask ? (1ULL << (63 - clz64(pgmask))) : 0;
+ if (!pagesize) {
+ error_report("Host doesn't support page size 0x%"PRIx64
+ ", the supported mask is 0x%lx",
+ memory_region_iommu_get_min_page_size(iommu_mr),
+ container->pgsizes);
+ return -EINVAL;
+ }
+
+ /*
+ * FIXME: For VFIO iommu types which have KVM acceleration to
+ * avoid bouncing all map/unmaps through qemu this way, this
+ * would be the right place to wire that up (tell the KVM
+ * device emulation the VFIO iommu handles to use).
+ */
+ create.window_size = int128_get64(section->size);
+ create.page_shift = ctz64(pagesize);
+ /*
+ * SPAPR host supports multilevel TCE tables. We try to guess optimal
+ * levels number and if this fails (for example due to the host memory
+ * fragmentation), we increase levels. The DMA address structure is:
+ * rrrrrrrr rxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx iiiiiiii
+ * where:
+ * r = reserved (bits >= 55 are reserved in the existing hardware)
+ * i = IOMMU page offset (64K in this example)
+ * x = bits to index a TCE which can be split to equal chunks to index
+ * within the level.
+ * The aim is to split "x" to smaller possible number of levels.
+ */
+ entries = create.window_size >> create.page_shift;
+ /* bits_total is number of "x" needed */
+ bits_total = ctz64(entries * sizeof(uint64_t));
+ /*
+ * bits_per_level is a safe guess of how much we can allocate per level:
+ * 8 is the current minimum for CONFIG_FORCE_MAX_ZONEORDER and MAX_ORDER
+ * is usually bigger than that.
+ * Below we look at qemu_real_host_page_size as TCEs are allocated from
+ * system pages.
+ */
+ bits_per_level = ctz64(qemu_real_host_page_size) + 8;
+ create.levels = bits_total / bits_per_level;
+ if (bits_total % bits_per_level) {
+ ++create.levels;
+ }
+ max_levels = (64 - create.page_shift) / ctz64(qemu_real_host_page_size);
+ for ( ; create.levels <= max_levels; ++create.levels) {
+ ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
+ if (!ret) {
+ break;
+ }
+ }
+ if (ret) {
+ error_report("Failed to create a window, ret = %d (%m)", ret);
+ return -errno;
+ }
+
+ if (create.start_addr != section->offset_within_address_space) {
+ vfio_spapr_remove_window(container, create.start_addr);
+
+ error_report("Host doesn't support DMA window at %"HWADDR_PRIx", must be %"PRIx64,
+ section->offset_within_address_space,
+ (uint64_t)create.start_addr);
+ return -EINVAL;
+ }
+ trace_vfio_spapr_create_window(create.page_shift,
+ create.levels,
+ create.window_size,
+ create.start_addr);
+ *pgsize = pagesize;
+
+ return 0;
+}
+
+int vfio_spapr_remove_window(VFIOContainer *container,
+ hwaddr offset_within_address_space)
+{
+ struct vfio_iommu_spapr_tce_remove remove = {
+ .argsz = sizeof(remove),
+ .start_addr = offset_within_address_space,
+ };
+ int ret;
+
+ ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);
+ if (ret) {
+ error_report("Failed to remove window at %"PRIx64,
+ (uint64_t)remove.start_addr);
+ return -errno;
+ }
+
+ trace_vfio_spapr_remove_window(offset_within_address_space);
+
+ return 0;
+}
diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events
new file mode 100644
index 000000000..0ef1b5f4a
--- /dev/null
+++ b/hw/vfio/trace-events
@@ -0,0 +1,167 @@
+# See docs/devel/tracing.rst for syntax documentation.
+
+# pci.c
+vfio_intx_interrupt(const char *name, char line) " (%s) Pin %c"
+vfio_intx_eoi(const char *name) " (%s) EOI"
+vfio_intx_enable_kvm(const char *name) " (%s) KVM INTx accel enabled"
+vfio_intx_disable_kvm(const char *name) " (%s) KVM INTx accel disabled"
+vfio_intx_update(const char *name, int new_irq, int target_irq) " (%s) IRQ moved %d -> %d"
+vfio_intx_enable(const char *name) " (%s)"
+vfio_intx_disable(const char *name) " (%s)"
+vfio_msi_interrupt(const char *name, int index, uint64_t addr, int data) " (%s) vector %d 0x%"PRIx64"/0x%x"
+vfio_msix_vector_do_use(const char *name, int index) " (%s) vector %d used"
+vfio_msix_vector_release(const char *name, int index) " (%s) vector %d released"
+vfio_msix_enable(const char *name) " (%s)"
+vfio_msix_pba_disable(const char *name) " (%s)"
+vfio_msix_pba_enable(const char *name) " (%s)"
+vfio_msix_disable(const char *name) " (%s)"
+vfio_msix_fixup(const char *name, int bar, uint64_t start, uint64_t end) " (%s) MSI-X region %d mmap fixup [0x%"PRIx64" - 0x%"PRIx64"]"
+vfio_msix_relo(const char *name, int bar, uint64_t offset) " (%s) BAR %d offset 0x%"PRIx64""
+vfio_msi_enable(const char *name, int nr_vectors) " (%s) Enabled %d MSI vectors"
+vfio_msi_disable(const char *name) " (%s)"
+vfio_pci_load_rom(const char *name, unsigned long size, unsigned long offset, unsigned long flags) "Device %s ROM:\n size: 0x%lx, offset: 0x%lx, flags: 0x%lx"
+vfio_rom_read(const char *name, uint64_t addr, int size, uint64_t data) " (%s, 0x%"PRIx64", 0x%x) = 0x%"PRIx64
+vfio_pci_size_rom(const char *name, int size) "%s ROM size 0x%x"
+vfio_vga_write(uint64_t addr, uint64_t data, int size) " (0x%"PRIx64", 0x%"PRIx64", %d)"
+vfio_vga_read(uint64_t addr, int size, uint64_t data) " (0x%"PRIx64", %d) = 0x%"PRIx64
+vfio_pci_read_config(const char *name, int addr, int len, int val) " (%s, @0x%x, len=0x%x) 0x%x"
+vfio_pci_write_config(const char *name, int addr, int val, int len) " (%s, @0x%x, 0x%x, len=0x%x)"
+vfio_msi_setup(const char *name, int pos) "%s PCI MSI CAP @0x%x"
+vfio_msix_early_setup(const char *name, int pos, int table_bar, int offset, int entries) "%s PCI MSI-X CAP @0x%x, BAR %d, offset 0x%x, entries %d"
+vfio_check_pcie_flr(const char *name) "%s Supports FLR via PCIe cap"
+vfio_check_pm_reset(const char *name) "%s Supports PM reset"
+vfio_check_af_flr(const char *name) "%s Supports FLR via AF cap"
+vfio_pci_hot_reset(const char *name, const char *type) " (%s) %s"
+vfio_pci_hot_reset_has_dep_devices(const char *name) "%s: hot reset dependent devices:"
+vfio_pci_hot_reset_dep_devices(int domain, int bus, int slot, int function, int group_id) "\t%04x:%02x:%02x.%x group %d"
+vfio_pci_hot_reset_result(const char *name, const char *result) "%s hot reset: %s"
+vfio_populate_device_config(const char *name, unsigned long size, unsigned long offset, unsigned long flags) "Device %s config:\n size: 0x%lx, offset: 0x%lx, flags: 0x%lx"
+vfio_populate_device_get_irq_info_failure(const char *errstr) "VFIO_DEVICE_GET_IRQ_INFO failure: %s"
+vfio_realize(const char *name, int group_id) " (%s) group %d"
+vfio_mdev(const char *name, bool is_mdev) " (%s) is_mdev %d"
+vfio_add_ext_cap_dropped(const char *name, uint16_t cap, uint16_t offset) "%s 0x%x@0x%x"
+vfio_pci_reset(const char *name) " (%s)"
+vfio_pci_reset_flr(const char *name) "%s FLR/VFIO_DEVICE_RESET"
+vfio_pci_reset_pm(const char *name) "%s PCI PM Reset"
+vfio_pci_emulated_vendor_id(const char *name, uint16_t val) "%s 0x%04x"
+vfio_pci_emulated_device_id(const char *name, uint16_t val) "%s 0x%04x"
+vfio_pci_emulated_sub_vendor_id(const char *name, uint16_t val) "%s 0x%04x"
+vfio_pci_emulated_sub_device_id(const char *name, uint16_t val) "%s 0x%04x"
+
+# pci-quirks.c
+vfio_quirk_rom_in_denylist(const char *name, uint16_t vid, uint16_t did) "%s %04x:%04x"
+vfio_quirk_generic_window_address_write(const char *name, const char * region_name, uint64_t data) "%s %s 0x%"PRIx64
+vfio_quirk_generic_window_data_read(const char *name, const char * region_name, uint64_t data) "%s %s 0x%"PRIx64
+vfio_quirk_generic_window_data_write(const char *name, const char * region_name, uint64_t data) "%s %s 0x%"PRIx64
+vfio_quirk_generic_mirror_read(const char *name, const char * region_name, uint64_t addr, uint64_t data) "%s %s 0x%"PRIx64": 0x%"PRIx64
+vfio_quirk_generic_mirror_write(const char *name, const char * region_name, uint64_t addr, uint64_t data) "%s %s 0x%"PRIx64": 0x%"PRIx64
+vfio_quirk_ati_3c3_read(const char *name, uint64_t data) "%s 0x%"PRIx64
+vfio_quirk_ati_3c3_probe(const char *name) "%s"
+vfio_quirk_ati_bar4_probe(const char *name) "%s"
+vfio_quirk_ati_bar2_probe(const char *name) "%s"
+vfio_quirk_nvidia_3d0_state(const char *name, const char *state) "%s %s"
+vfio_quirk_nvidia_3d0_read(const char *name, uint8_t offset, unsigned size, uint64_t val) " (%s, @0x%x, len=0x%x) 0x%"PRIx64
+vfio_quirk_nvidia_3d0_write(const char *name, uint8_t offset, uint64_t data, unsigned size) "(%s, @0x%x, 0x%"PRIx64", len=0x%x)"
+vfio_quirk_nvidia_3d0_probe(const char *name) "%s"
+vfio_quirk_nvidia_bar5_state(const char *name, const char *state) "%s %s"
+vfio_quirk_nvidia_bar5_probe(const char *name) "%s"
+vfio_quirk_nvidia_bar0_msi_ack(const char *name) "%s"
+vfio_quirk_nvidia_bar0_probe(const char *name) "%s"
+vfio_quirk_rtl8168_fake_latch(const char *name, uint64_t val) "%s 0x%"PRIx64
+vfio_quirk_rtl8168_msix_write(const char *name, uint16_t offset, uint64_t val) "%s MSI-X table write[0x%x]: 0x%"PRIx64
+vfio_quirk_rtl8168_msix_read(const char *name, uint16_t offset, uint64_t val) "%s MSI-X table read[0x%x]: 0x%"PRIx64
+vfio_quirk_rtl8168_probe(const char *name) "%s"
+
+vfio_quirk_ati_bonaire_reset_skipped(const char *name) "%s"
+vfio_quirk_ati_bonaire_reset_no_smc(const char *name) "%s"
+vfio_quirk_ati_bonaire_reset_timeout(const char *name) "%s"
+vfio_quirk_ati_bonaire_reset_done(const char *name) "%s"
+vfio_quirk_ati_bonaire_reset(const char *name) "%s"
+vfio_ioeventfd_exit(const char *name, uint64_t addr, unsigned size, uint64_t data) "%s+0x%"PRIx64"[%d]:0x%"PRIx64
+vfio_ioeventfd_handler(const char *name, uint64_t addr, unsigned size, uint64_t data) "%s+0x%"PRIx64"[%d] -> 0x%"PRIx64
+vfio_ioeventfd_init(const char *name, uint64_t addr, unsigned size, uint64_t data, bool vfio) "%s+0x%"PRIx64"[%d]:0x%"PRIx64" vfio:%d"
+vfio_pci_igd_opregion_enabled(const char *name) "%s"
+
+vfio_pci_nvidia_gpu_setup_quirk(const char *name, uint64_t tgt, uint64_t size) "%s tgt=0x%"PRIx64" size=0x%"PRIx64
+vfio_pci_nvlink2_setup_quirk_ssatgt(const char *name, uint64_t tgt, uint64_t size) "%s tgt=0x%"PRIx64" size=0x%"PRIx64
+vfio_pci_nvlink2_setup_quirk_lnkspd(const char *name, uint32_t link_speed) "%s link_speed=0x%x"
+
+# igd.c
+vfio_pci_igd_bar4_write(const char *name, uint32_t index, uint32_t data, uint32_t base) "%s [0x%03x] 0x%08x -> 0x%08x"
+vfio_pci_igd_bdsm_enabled(const char *name, int size) "%s %dMB"
+vfio_pci_igd_host_bridge_enabled(const char *name) "%s"
+vfio_pci_igd_lpc_bridge_enabled(const char *name) "%s"
+
+# common.c
+vfio_region_write(const char *name, int index, uint64_t addr, uint64_t data, unsigned size) " (%s:region%d+0x%"PRIx64", 0x%"PRIx64 ", %d)"
+vfio_region_read(char *name, int index, uint64_t addr, unsigned size, uint64_t data) " (%s:region%d+0x%"PRIx64", %d) = 0x%"PRIx64
+vfio_iommu_map_notify(const char *op, uint64_t iova_start, uint64_t iova_end) "iommu %s @ 0x%"PRIx64" - 0x%"PRIx64
+vfio_listener_region_add_skip(uint64_t start, uint64_t end) "SKIPPING region_add 0x%"PRIx64" - 0x%"PRIx64
+vfio_spapr_group_attach(int groupfd, int tablefd) "Attached groupfd %d to liobn fd %d"
+vfio_listener_region_add_iommu(uint64_t start, uint64_t end) "region_add [iommu] 0x%"PRIx64" - 0x%"PRIx64
+vfio_listener_region_add_ram(uint64_t iova_start, uint64_t iova_end, void *vaddr) "region_add [ram] 0x%"PRIx64" - 0x%"PRIx64" [%p]"
+vfio_listener_region_add_no_dma_map(const char *name, uint64_t iova, uint64_t size, uint64_t page_size) "Region \"%s\" 0x%"PRIx64" size=0x%"PRIx64" is not aligned to 0x%"PRIx64" and cannot be mapped for DMA"
+vfio_listener_region_del_skip(uint64_t start, uint64_t end) "SKIPPING region_del 0x%"PRIx64" - 0x%"PRIx64
+vfio_listener_region_del(uint64_t start, uint64_t end) "region_del 0x%"PRIx64" - 0x%"PRIx64
+vfio_disconnect_container(int fd) "close container->fd=%d"
+vfio_put_group(int fd) "close group->fd=%d"
+vfio_get_device(const char * name, unsigned int flags, unsigned int num_regions, unsigned int num_irqs) "Device %s flags: %u, regions: %u, irqs: %u"
+vfio_put_base_device(int fd) "close vdev->fd=%d"
+vfio_region_setup(const char *dev, int index, const char *name, unsigned long flags, unsigned long offset, unsigned long size) "Device %s, region %d \"%s\", flags: 0x%lx, offset: 0x%lx, size: 0x%lx"
+vfio_region_mmap_fault(const char *name, int index, unsigned long offset, unsigned long size, int fault) "Region %s mmaps[%d], [0x%lx - 0x%lx], fault: %d"
+vfio_region_mmap(const char *name, unsigned long offset, unsigned long end) "Region %s [0x%lx - 0x%lx]"
+vfio_region_exit(const char *name, int index) "Device %s, region %d"
+vfio_region_finalize(const char *name, int index) "Device %s, region %d"
+vfio_region_mmaps_set_enabled(const char *name, bool enabled) "Region %s mmaps enabled: %d"
+vfio_region_unmap(const char *name, unsigned long offset, unsigned long end) "Region %s unmap [0x%lx - 0x%lx]"
+vfio_region_sparse_mmap_header(const char *name, int index, int nr_areas) "Device %s region %d: %d sparse mmap entries"
+vfio_region_sparse_mmap_entry(int i, unsigned long start, unsigned long end) "sparse entry %d [0x%lx - 0x%lx]"
+vfio_get_dev_region(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%0x8"
+vfio_dma_unmap_overflow_workaround(void) ""
+
+# platform.c
+vfio_platform_base_device_init(char *name, int groupid) "%s belongs to group #%d"
+vfio_platform_realize(char *name, char *compat) "vfio device %s, compat = %s"
+vfio_platform_eoi(int pin, int fd) "EOI IRQ pin %d (fd=%d)"
+vfio_platform_intp_mmap_enable(int pin) "IRQ #%d still active, stay in slow path"
+vfio_platform_intp_interrupt(int pin, int fd) "Inject IRQ #%d (fd = %d)"
+vfio_platform_intp_inject_pending_lockheld(int pin, int fd) "Inject pending IRQ #%d (fd = %d)"
+vfio_platform_populate_interrupts(int pin, int count, int flags) "- IRQ index %d: count %d, flags=0x%x"
+vfio_intp_interrupt_set_pending(int index) "irq %d is set PENDING"
+vfio_platform_start_level_irqfd_injection(int index, int fd, int resamplefd) "IRQ index=%d, fd = %d, resamplefd = %d"
+vfio_platform_start_edge_irqfd_injection(int index, int fd) "IRQ index=%d, fd = %d"
+
+# spapr.c
+vfio_prereg_listener_region_add_skip(uint64_t start, uint64_t end) "0x%"PRIx64" - 0x%"PRIx64
+vfio_prereg_listener_region_del_skip(uint64_t start, uint64_t end) "0x%"PRIx64" - 0x%"PRIx64
+vfio_prereg_register(uint64_t va, uint64_t size, int ret) "va=0x%"PRIx64" size=0x%"PRIx64" ret=%d"
+vfio_prereg_unregister(uint64_t va, uint64_t size, int ret) "va=0x%"PRIx64" size=0x%"PRIx64" ret=%d"
+vfio_spapr_create_window(int ps, unsigned int levels, uint64_t ws, uint64_t off) "pageshift=0x%x levels=%u winsize=0x%"PRIx64" offset=0x%"PRIx64
+vfio_spapr_remove_window(uint64_t off) "offset=0x%"PRIx64
+
+# display.c
+vfio_display_edid_available(void) ""
+vfio_display_edid_link_up(void) ""
+vfio_display_edid_link_down(void) ""
+vfio_display_edid_update(uint32_t prefx, uint32_t prefy) "%ux%u"
+vfio_display_edid_write_error(void) ""
+
+# migration.c
+vfio_migration_probe(const char *name, uint32_t index) " (%s) Region %d"
+vfio_migration_set_state(const char *name, uint32_t state) " (%s) state %d"
+vfio_vmstate_change(const char *name, int running, const char *reason, uint32_t dev_state) " (%s) running %d reason %s device state %d"
+vfio_migration_state_notifier(const char *name, const char *state) " (%s) state %s"
+vfio_save_setup(const char *name) " (%s)"
+vfio_save_cleanup(const char *name) " (%s)"
+vfio_save_buffer(const char *name, uint64_t data_offset, uint64_t data_size, uint64_t pending) " (%s) Offset 0x%"PRIx64" size 0x%"PRIx64" pending 0x%"PRIx64
+vfio_update_pending(const char *name, uint64_t pending) " (%s) pending 0x%"PRIx64
+vfio_save_device_config_state(const char *name) " (%s)"
+vfio_save_pending(const char *name, uint64_t precopy, uint64_t postcopy, uint64_t compatible) " (%s) precopy 0x%"PRIx64" postcopy 0x%"PRIx64" compatible 0x%"PRIx64
+vfio_save_iterate(const char *name, int data_size) " (%s) data_size %d"
+vfio_save_complete_precopy(const char *name) " (%s)"
+vfio_load_device_config_state(const char *name) " (%s)"
+vfio_load_state(const char *name, uint64_t data) " (%s) data 0x%"PRIx64
+vfio_load_state_device_data(const char *name, uint64_t data_offset, uint64_t data_size) " (%s) Offset 0x%"PRIx64" size 0x%"PRIx64
+vfio_load_cleanup(const char *name) " (%s)"
+vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64
+vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64
diff --git a/hw/vfio/trace.h b/hw/vfio/trace.h
new file mode 100644
index 000000000..5a343aa59
--- /dev/null
+++ b/hw/vfio/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-hw_vfio.h"