aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--adapter.c25
-rw-r--r--event_notifier.c6
-rw-r--r--event_notifier.h2
-rw-r--r--vhost_loopback.c2
-rw-r--r--vhost_loopback.h3
-rw-r--r--vhost_user_loopback.c33
-rw-r--r--vhost_user_loopback.h54
-rw-r--r--vhost_user_rng.c23
-rw-r--r--vhost_user_rng.h2
-rw-r--r--virtio_loopback.c469
-rw-r--r--virtio_loopback.h140
-rw-r--r--virtio_rng.c38
-rw-r--r--virtio_rng.h3
13 files changed, 455 insertions, 345 deletions
diff --git a/adapter.c b/adapter.c
index 2aebd9f..9474076 100644
--- a/adapter.c
+++ b/adapter.c
@@ -52,19 +52,19 @@ struct adapter_dev *adev;
struct vhost_user *vudev;
-void vhost_user_adapter_init (void)
+void vhost_user_adapter_init(void)
{
/* Init vhost-user device */
- vudev = (struct vhost_user*) malloc(sizeof(struct vhost_user));
+ vudev = (struct vhost_user *)malloc(sizeof(struct vhost_user));
/* Init vhost device */
- dev = (struct vhost_dev*) malloc(sizeof(struct vhost_dev));
+ dev = (struct vhost_dev *)malloc(sizeof(struct vhost_dev));
/* Init virtio device */
- global_vdev = (VirtIODevice*) malloc(sizeof(VirtIODevice));
+ global_vdev = (VirtIODevice *)malloc(sizeof(VirtIODevice));
/* Init virtio bus */
- global_vbus = (VirtioBus *) malloc(sizeof(VirtioBus));
+ global_vbus = (VirtioBus *)malloc(sizeof(VirtioBus));
global_vbus->vdev = global_vdev;
global_vdev->vbus = global_vbus;
@@ -72,7 +72,7 @@ void vhost_user_adapter_init (void)
dev->vdev = global_vdev;
/* Init adapter device */
- adev = (struct adapter_dev*) malloc(sizeof(struct adapter_dev));
+ adev = (struct adapter_dev *)malloc(sizeof(struct adapter_dev));
adev->vdev = dev;
adev->vudev = vudev;
adev->virtio_dev = global_vdev;
@@ -80,7 +80,7 @@ void vhost_user_adapter_init (void)
}
-void client (char *sock_path)
+void client(char *sock_path)
{
int rc, len;
struct sockaddr_un client_sockaddr;
@@ -106,7 +106,7 @@ void client (char *sock_path)
strcpy(client_sockaddr.sun_path, sock_path);
len = sizeof(client_sockaddr);
rc = connect(client_sock, (struct sockaddr *) &client_sockaddr, len);
- if(rc == -1) {
+ if (rc == -1) {
printf("CONNECT ERROR\n");
close(client_sock);
exit(1);
@@ -114,11 +114,12 @@ void client (char *sock_path)
}
-static void help_args (void) {
+static void help_args(void)
+{
printf("Run example:\n\t./adapter -s /path_to_socket/rng.sock\n");
}
-int main (int argc, char **argv)
+int main(int argc, char **argv)
{
#ifdef VHOST_USER_RNG_DEV
/*
@@ -146,8 +147,8 @@ int main (int argc, char **argv)
virtio_rng_realize(); /* <-- Enable that for simple rng */
#endif
- /* Startthe mmio trasnport layer and communiation with the loopback driver */
- virtio_mmio_start();
+ /* Start loopback trasnport layer and communiation with the loopback driver */
+ virtio_loopback_start();
return 0;
diff --git a/event_notifier.c b/event_notifier.c
index 121cc6e..5a5d1c3 100644
--- a/event_notifier.c
+++ b/event_notifier.c
@@ -1,17 +1,17 @@
/*
* Based on:
- * 1) file-posix.c of Qemu Project
+ * 1) file-posix.c of QEMU Project
*
* Copyright (c) 2006 Fabrice Bellard
*
- * 2) event_notifier-posix.c of Qemu Project
+ * 2) event_notifier-posix.c of QEMU Project
*
* Copyright Red Hat, Inc. 2010
*
* Authors:
* Michael S. Tsirkin <mst@redhat.com>
*
- * 3) os-posix-lib.c of Qemu project
+ * 3) os-posix-lib.c of QEMU project
*
* Copyright (c) 2003-2008 Fabrice Bellard
* Copyright (c) 2010 Red Hat, Inc.
diff --git a/event_notifier.h b/event_notifier.h
index 412cc4b..070b777 100644
--- a/event_notifier.h
+++ b/event_notifier.h
@@ -1,5 +1,5 @@
/*
- * Based on event_notifier.h of Qemu project
+ * Based on event_notifier.h of QEMU project
*
* Copyright Red Hat, Inc. 2010
*
diff --git a/vhost_loopback.c b/vhost_loopback.c
index a76b3d0..95ac21e 100644
--- a/vhost_loopback.c
+++ b/vhost_loopback.c
@@ -1,5 +1,5 @@
/*
- * Based on vhost.c of Qemu project;
+ * Based on vhost.c of QEMU project;
*
* Copyright Red Hat, Inc. 2010
*
diff --git a/vhost_loopback.h b/vhost_loopback.h
index ec9c67e..f63a819 100644
--- a/vhost_loopback.h
+++ b/vhost_loopback.h
@@ -29,6 +29,7 @@
int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev);
-void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, bool mask);
+void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev,
+ int n, bool mask);
#endif /* LOOPBACK_VHOST_H */
diff --git a/vhost_user_loopback.c b/vhost_user_loopback.c
index dec0186..3df7bc3 100644
--- a/vhost_user_loopback.c
+++ b/vhost_user_loopback.c
@@ -1,5 +1,5 @@
/*
- * Based on libvhost-user.c of Qemu project
+ * Based on libvhost-user.c of QEMU project
*
* Copyright IBM, Corp. 2007
* Copyright (c) 2016 Red Hat, Inc.
@@ -432,7 +432,8 @@ out:
int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
{
- /* TODO: Add a assert to check the requested index
+ /*
+ * TODO: Add a assert to check the requested index
*
* assert(idx >= dev->vq_index && idx < dev->vq_index + (int)dev->nvqs);
*/
@@ -452,7 +453,8 @@ void vhost_user_share_fd(void)
msg.fd_num = 1;
memcpy(msg.fds, &loopback_fd, fd_num * sizeof(int));
- /* TODO: Check if we need to remove the VHOST_USER_NEED_REPLY_MASK flag
+ /*
+ * TODO: Check if we need to remove the VHOST_USER_NEED_REPLY_MASK flag
*
* msg.flags &= ~VHOST_USER_NEED_REPLY_MASK;
*/
@@ -478,7 +480,8 @@ int vhost_set_vring_file(VhostUserRequest request,
msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
}
- /* TODO: Check if we need to remove the VHOST_USER_NEED_REPLY_MASK flag
+ /*
+ * TODO: Check if we need to remove the VHOST_USER_NEED_REPLY_MASK flag
*
* msg.flags &= ~VHOST_USER_NEED_REPLY_MASK;
*/
@@ -553,7 +556,6 @@ int vhost_user_set_vring_addr(struct vhost_dev *dev,
msg.flags |= VHOST_USER_NEED_REPLY_MASK;
}
- //ret = vhost_user_write(dev, &msg, NULL, 0);
ret = vu_message_write(client_sock, &msg);
if (ret < 0) {
DBG("Fail vhost_user_set_vring_addr\n");
@@ -606,8 +608,8 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
return 0;
}
-
-/* TODO: This function is not yet fully optimized because in the current release
+/*
+ * TODO: This function is not yet fully optimized because in the current release
* it is not used. t will be implemented or deleted in a later release.
*/
int vhost_user_set_mem_table(struct vhost_dev *dev)
@@ -667,9 +669,10 @@ int vhost_user_backend_init(struct vhost_dev *vhdev)
vhdev->protocol_features =
protocol_features & VHOST_USER_PROTOCOL_FEATURE_MASK;
-
- /* TODO: Disable config bit for the rng, this might be usefull
- * when new devices are added*/
+ /*
+ * TODO: Disable config bit for the rng, this might be usefull
+ * when new devices are added
+ */
vhdev->protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
err = vhost_user_set_protocol_features(vhdev->protocol_features);
@@ -744,7 +747,8 @@ int vhost_user_backend_init(struct vhost_dev *vhdev)
}
}
- /* TODO: We might need to set up a postcopy_notifier in a future release:
+ /*
+ * TODO: We might need to set up a postcopy_notifier in a future release:
*
* u->postcopy_notifier.notify = vhost_user_postcopy_notifier;
* postcopy_add_notifier(&u->postcopy_notifier);
@@ -754,8 +758,8 @@ int vhost_user_backend_init(struct vhost_dev *vhdev)
}
-void vhost_dev_init(struct vhost_dev *vhdev) {
-
+void vhost_dev_init(struct vhost_dev *vhdev)
+{
uint64_t features;
int r, n_initialized_vqs = 0;
unsigned int i;
@@ -782,7 +786,8 @@ void vhost_dev_init(struct vhost_dev *vhdev) {
}
}
- /* TODO: busyloop == 0 in rng case, but we might need it for new devices:
+ /*
+ * TODO: busyloop == 0 in rng case, but we might need it for new devices:
*
* if (busyloop_timeout) {
* for (i = 0; i < dev->nvqs; ++i) {
diff --git a/vhost_user_loopback.h b/vhost_user_loopback.h
index c41bca6..4a98516 100644
--- a/vhost_user_loopback.h
+++ b/vhost_user_loopback.h
@@ -1,5 +1,5 @@
/*
- * Based on libvhost-user.h of Qemu project
+ * Based on libvhost-user.h of QEMU project
*
* Copyright (c) 2016 Red Hat, Inc.
*
@@ -314,8 +314,10 @@ typedef struct VuDevIface {
vu_get_features_cb get_features;
/* enable vhost implementation features */
vu_set_features_cb set_features;
- /* get the protocol feature bitmask from the underlying vhost
- * implementation */
+ /*
+ * get the protocol feature bitmask from the underlying vhost
+ * implementation
+ */
vu_get_features_cb get_protocol_features;
/* enable protocol features in the underlying vhost implementation. */
vu_set_features_cb set_protocol_features;
@@ -348,19 +350,25 @@ typedef struct VuRing {
} VuRing;
typedef struct VuDescStateSplit {
- /* Indicate whether this descriptor is inflight or not.
- * Only available for head-descriptor. */
+ /*
+ * Indicate whether this descriptor is inflight or not.
+ * Only available for head-descriptor.
+ */
uint8_t inflight;
/* Padding */
uint8_t padding[5];
- /* Maintain a list for the last batch of used descriptors.
- * Only available when batching is used for submitting */
+ /*
+ * Maintain a list for the last batch of used descriptors.
+ * Only available when batching is used for submitting
+ */
uint16_t next;
- /* Used to preserve the order of fetching available descriptors.
- * Only available for head-descriptor. */
+ /*
+ * Used to preserve the order of fetching available descriptors.
+ * Only available for head-descriptor.
+ */
uint64_t counter;
} VuDescStateSplit;
@@ -368,15 +376,21 @@ typedef struct VuVirtqInflight {
/* The feature flags of this region. Now it's initialized to 0. */
uint64_t features;
- /* The version of this region. It's 1 currently.
- * Zero value indicates a vm reset happened. */
+ /*
+ * The version of this region. It's 1 currently.
+ * Zero value indicates a vm reset happened.
+ */
uint16_t version;
- /* The size of VuDescStateSplit array. It's equal to the virtqueue
- * size. Slave could get it from queue size field of VhostUserInflight. */
+ /*
+ * The size of VuDescStateSplit array. It's equal to the virtqueue
+ * size. Slave could get it from queue size field of VhostUserInflight.
+ */
uint16_t desc_num;
- /* The head of list that track the last batch of used descriptors. */
+ /*
+ * The head of list that track the last batch of used descriptors.
+ */
uint16_t last_batch_head;
/* Storing the idx value of used ring */
@@ -721,7 +735,7 @@ void vu_queue_push(VuDev *dev, VuVirtq *vq,
*
* Mark the last number of elements as done (used.idx is updated by
* num elements).
-*/
+ */
void vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int num);
/**
@@ -778,9 +792,11 @@ int vhost_user_set_features(struct vhost_dev *dev,
int vhost_user_set_mem_table(struct vhost_dev *dev);
int vhost_user_get_vq_index(struct vhost_dev *dev, int idx);
void vhost_user_share_fd(void);
-int vhost_user_set_vring_num(struct vhost_dev *dev, struct vhost_vring_state *ring);
-int vhost_user_set_vring_base(struct vhost_dev *dev, struct vhost_vring_state *ring);
-int vhost_user_set_vring_addr(struct vhost_dev *dev, struct vhost_vring_addr *addr);
-
+int vhost_user_set_vring_num(struct vhost_dev *dev,
+ struct vhost_vring_state *ring);
+int vhost_user_set_vring_base(struct vhost_dev *dev,
+ struct vhost_vring_state *ring);
+int vhost_user_set_vring_addr(struct vhost_dev *dev,
+ struct vhost_vring_addr *addr);
#endif /* LIBVHOST_USER_H */
diff --git a/vhost_user_rng.c b/vhost_user_rng.c
index 7dc7d99..f674ef6 100644
--- a/vhost_user_rng.c
+++ b/vhost_user_rng.c
@@ -1,5 +1,5 @@
/*
- * Based on vhost-user-rng of Qemu project
+ * Based on vhost-user-rng of QEMU project
*
* Copyright (c) 2021 Mathieu Poirier <mathieu.poirier@linaro.org>
*
@@ -131,9 +131,9 @@ static void vu_rng_set_status(VirtIODevice *vdev, uint8_t status)
}
}
-static void virtio_dev_class_init (VirtIODevice *vdev) {
-
- vdev->vdev_class = (VirtioDeviceClass *) malloc(sizeof(VirtioDeviceClass));
+static void virtio_dev_class_init(VirtIODevice *vdev)
+{
+ vdev->vdev_class = (VirtioDeviceClass *)malloc(sizeof(VirtioDeviceClass));
vdev->vdev_class->parent = vdev;
vdev->vdev_class->set_status = vu_rng_set_status;
vdev->vdev_class->get_features = vu_rng_get_features;
@@ -142,16 +142,16 @@ static void virtio_dev_class_init (VirtIODevice *vdev) {
}
-void vhost_user_rng_init(VirtIODevice *vdev) {
-
- VHostUserRNG *vhrng = (VHostUserRNG*) malloc (sizeof(VHostUserRNG));
+void vhost_user_rng_init(VirtIODevice *vdev)
+{
+ VHostUserRNG *vhrng = (VHostUserRNG *)malloc(sizeof(VHostUserRNG));
vdev->vhrng = vhrng;
vhrng->parent = vdev;
vhrng->req_vq = vdev->vq;
vhrng->vhost_dev = dev;
- virtio_dev_class_init (vdev);
- virtio_mmio_bus_init(vdev->vbus);
+ virtio_dev_class_init(vdev);
+ virtio_loopback_bus_init(vdev->vbus);
}
static void vu_rng_handle_output(VirtIODevice *vdev, VirtQueue *vq)
@@ -175,14 +175,15 @@ void vhost_user_rng_realize(void)
global_vdev->host_features = 0x39000000;
- proxy = (VirtIOMMIOProxy*) malloc (sizeof(VirtIOMMIOProxy));
+ proxy = (VirtIOMMIOProxy *)malloc(sizeof(VirtIOMMIOProxy));
*proxy = (VirtIOMMIOProxy) {
.legacy = 1,
};
/* Virtqueues conf */
dev->nvqs = 1;
- dev->vqs = (struct vhost_virtqueue*) malloc(dev->nvqs * sizeof(struct vhost_virtqueue));
+ dev->vqs = (struct vhost_virtqueue *)malloc(dev->nvqs *
+ sizeof(struct vhost_virtqueue));
vhost_dev_init(dev);
}
diff --git a/vhost_user_rng.h b/vhost_user_rng.h
index 77a783c..fd39800 100644
--- a/vhost_user_rng.h
+++ b/vhost_user_rng.h
@@ -1,5 +1,5 @@
/*
- * Based on vhost-user-rng of Qemu project
+ * Based on vhost-user-rng of QEMU project
*
* Copyright (c) 2021 Mathieu Poirier <mathieu.poirier@linaro.org>
*
diff --git a/virtio_loopback.c b/virtio_loopback.c
index 8da13b6..5a831ce 100644
--- a/virtio_loopback.c
+++ b/virtio_loopback.c
@@ -2,7 +2,7 @@
*
* Based on:
*
- * 1) virtio.c of Qemu project
+ * 1) virtio.c of QEMU project
*
* Copyright IBM, Corp. 2007
*
@@ -10,7 +10,7 @@
* Anthony Liguori <aliguori@us.ibm.com>
*
*
- * 2) virtio-mmio.c of Qemu project
+ * 2) virtio-mmio.c of QEMU project
*
* Copyright (c) 2011 Linaro Limited
*
@@ -177,7 +177,8 @@ uint64_t virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
{
int s;
- s = virtio_has_feature(vdev->guest_features, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+ s = virtio_has_feature(vdev->guest_features,
+ VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
return offsetof(VRingAvail, ring) +
sizeof(uint16_t) * vdev->vq[n].vring.num + s;
}
@@ -186,7 +187,8 @@ uint64_t virtio_queue_get_used_size(VirtIODevice *vdev, int n)
{
int s;
- s = virtio_has_feature(vdev->guest_features, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+ s = virtio_has_feature(vdev->guest_features,
+ VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
return offsetof(VRingUsed, ring) +
sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s;
}
@@ -220,7 +222,8 @@ unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
{
- /* Don't allow guest to flip queue between existent and
+ /*
+ * Don't allow guest to flip queue between existent and
* nonexistent states, or to set it to an invalid size.
*/
if (!!num != !!vdev->vq[n].vring.num ||
@@ -330,16 +333,21 @@ static inline uint16_t vring_get_used_event(VirtQueue *vq)
}
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
-/* Assuming a given event_idx value from the other side, if
+/*
+ * Assuming a given event_idx value from the other side, if
* we have just incremented index from old to new_idx,
- * should we trigger an event? */
-static inline int vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
+ * should we trigger an event?
+ */
+static inline int vring_need_event(uint16_t event_idx,
+ uint16_t new_idx, uint16_t old)
{
- /* Note: Xen has similar logic for notification hold-off
+ /*
+ * Note: Xen has similar logic for notification hold-off
* in include/xen/interface/io/ring.h with req_event and req_prod
* corresponding to event_idx + 1 and new_idx respectively.
* Note also that req_event and req_prod in Xen start at 1,
- * event indexes in virtio start at 0. */
+ * event indexes in virtio start at 0.
+ */
return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
}
@@ -377,7 +385,8 @@ void virtio_set_isr(VirtIODevice *vdev, int value)
{
uint8_t old = vdev->isr;
- /* Do not write ISR if it does not change, so that its cacheline remains
+ /*
+ * Do not write ISR if it does not change, so that its cacheline remains
* shared in the common case where the guest does not read it.
*/
if ((old & value) != value) {
@@ -448,8 +457,9 @@ static void virtqueue_split_flush(VirtQueue *vq, unsigned int count)
new = old + count;
vring_used_idx_set(vq, new);
vq->inuse -= count;
- if ((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))
+ if ((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)) {
vq->signalled_used_valid = false;
+ }
}
void virtqueue_flush(VirtQueue *vq, unsigned int count)
@@ -490,14 +500,15 @@ static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
uint64_t len = sz;
if (num_sg == max_num_sg) {
- DBG("virtio: too many write descriptors in \n"
+ DBG("virtio: too many write descriptors in\n"
"indirect table");
goto out;
}
ioctl(fd, SHARE_BUF, &pa);
- iov[num_sg].iov_base = mmap (NULL, 8192, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ iov[num_sg].iov_base = mmap(NULL, 8192, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
iov[num_sg].iov_base += pa & 0xfff;
if (!iov[num_sg].iov_base) {
@@ -519,7 +530,8 @@ out:
return ok;
}
-static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
+static void *virtqueue_alloc_element(size_t sz, unsigned out_num,
+ unsigned in_num)
{
VirtQueueElement *elem;
size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
@@ -529,7 +541,8 @@ static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_nu
size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
- /* TODO: Add check for requested size
+ /*
+ * TODO: Add check for requested size
*
* assert(sz >= sizeof(VirtQueueElement));
*/
@@ -646,8 +659,10 @@ bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
unsigned int *head)
{
- /* Grab the next descriptor number they're advertising, and increment
- * the index we've seen. */
+ /*
+ * Grab the next descriptor number they're advertising, and increment
+ * the index we've seen.
+ */
*head = vring_avail_ring(vq, idx % vq->vring.num);
/* If their number is silly, that's a fatal mistake. */
@@ -685,7 +700,7 @@ int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
*next = desc->next;
if (*next >= max) {
- DBG( "Desc next is %u", *next);
+ DBG("Desc next is %u", *next);
return VIRTQUEUE_READ_DESC_ERROR;
}
@@ -799,113 +814,114 @@ err:
}
}
-void print_neg_flag(uint64_t neg_flag, bool read) {
-
- if (read)
+void print_neg_flag(uint64_t neg_flag, bool read)
+{
+ if (read) {
DBG("Read:\n\t");
- else
+ } else {
DBG("Write:\n\t");
+ }
switch (neg_flag) {
- case VIRTIO_MMIO_MAGIC_VALUE: //0x000
- DBG("VIRTIO_MMIO_MAGIC_VALUE\n");
- break;
- case VIRTIO_MMIO_VERSION: //0x004
- DBG("VIRTIO_MMIO_VERSION\n");
- break;
- case VIRTIO_MMIO_DEVICE_ID: //0x008
- DBG("VIRTIO_MMIO_DEVICE_ID\n");
- break;
- case VIRTIO_MMIO_VENDOR_ID: //0x00c
- DBG("VIRTIO_MMIO_VENDOR_ID\n");
- break;
- case VIRTIO_MMIO_DEVICE_FEATURES: //0x010
- DBG("VIRTIO_MMIO_DEVICE_FEATURES\n");
- break;
- case VIRTIO_MMIO_DEVICE_FEATURES_SEL: //0x014
- DBG("VIRTIO_MMIO_DEVICE_FEATURES_SEL\n");
- break;
- case VIRTIO_MMIO_DRIVER_FEATURES: //0x020
- DBG("VIRTIO_MMIO_DRIVER_FEATURES\n");
- break;
- case VIRTIO_MMIO_DRIVER_FEATURES_SEL: //0x024
- DBG("VIRTIO_MMIO_DRIVER_FEATURES_SEL\n");
- break;
- case VIRTIO_MMIO_GUEST_PAGE_SIZE: //0x028
- DBG("VIRTIO_MMIO_GUEST_PAGE_SIZE\n");
- break;
- case VIRTIO_MMIO_QUEUE_SEL: //0x030
- DBG("VIRTIO_MMIO_QUEUE_SEL\n");
- break;
- case VIRTIO_MMIO_QUEUE_NUM_MAX: //0x034
- DBG("VIRTIO_MMIO_QUEUE_NUM_MAX\n");
- break;
- case VIRTIO_MMIO_QUEUE_NUM: //0x038
- DBG("VIRTIO_MMIO_QUEUE_NUM\n");
- break;
- case VIRTIO_MMIO_QUEUE_ALIGN: //0x03c
- DBG("VIRTIO_MMIO_QUEUE_ALIGN\n");
- break;
- case VIRTIO_MMIO_QUEUE_PFN: //0x040
- DBG("VIRTIO_MMIO_QUEUE_PFN\n");
- break;
- case VIRTIO_MMIO_QUEUE_READY: //0x044
- DBG("VIRTIO_MMIO_QUEUE_READY\n");
- break;
- case VIRTIO_MMIO_QUEUE_NOTIFY: //0x050
- DBG("VIRTIO_MMIO_QUEUE_NOTIFY\n");
- break;
- case VIRTIO_MMIO_INTERRUPT_STATUS: //0x060
- DBG("VIRTIO_MMIO_INTERRUPT_STATUS\n");
- break;
- case VIRTIO_MMIO_INTERRUPT_ACK: //0x064
- DBG("VIRTIO_MMIO_INTERRUPT_ACK\n");
- break;
- case VIRTIO_MMIO_STATUS: //0x070
- DBG("VIRTIO_MMIO_STATUS\n");
- break;
- case VIRTIO_MMIO_QUEUE_DESC_LOW: //0x080
- DBG("VIRTIO_MMIO_QUEUE_DESC_LOW\n");
- break;
- case VIRTIO_MMIO_QUEUE_DESC_HIGH: //0x084
- DBG("VIRTIO_MMIO_QUEUE_DESC_HIGH\n");
- break;
- case VIRTIO_MMIO_QUEUE_AVAIL_LOW: //0x090
- DBG("VIRTIO_MMIO_QUEUE_AVAIL_LOW\n");
- break;
- case VIRTIO_MMIO_QUEUE_AVAIL_HIGH: //0x094
- DBG("VIRTIO_MMIO_QUEUE_AVAIL_HIGH\n");
- break;
- case VIRTIO_MMIO_QUEUE_USED_LOW: //0x0a0
- DBG("VIRTIO_MMIO_QUEUE_USED_LOW\n");
- break;
- case VIRTIO_MMIO_QUEUE_USED_HIGH: //0x0a4
- DBG("VIRTIO_MMIO_QUEUE_USED_HIGH\n");
- break;
- case VIRTIO_MMIO_SHM_SEL: //0x0ac
- DBG("VIRTIO_MMIO_SHM_SEL\n");
- break;
- case VIRTIO_MMIO_SHM_LEN_LOW: //0x0b0
- DBG("VIRTIO_MMIO_SHM_LEN_LOW\n");
- break;
- case VIRTIO_MMIO_SHM_LEN_HIGH: //0x0b4
- DBG("VIRTIO_MMIO_SHM_LEN_HIGH\n");
- break;
- case VIRTIO_MMIO_SHM_BASE_LOW: //0x0b8
- DBG("VIRTIO_MMIO_SHM_BASE_LOW\n");
- break;
- case VIRTIO_MMIO_SHM_BASE_HIGH: //0x0bc
- DBG("VIRTIO_MMIO_SHM_BASE_HIGH\n");
- break;
- case VIRTIO_MMIO_CONFIG_GENERATION: //0x0fc
- DBG("VIRTIO_MMIO_CONFIG_GENERATION\n");
- break;
- case VIRTIO_MMIO_CONFIG: //0x100
- DBG("VIRTIO_MMIO_CONFIG\n");
- break;
- default:
- DBG("Negotiation flag Unknown: %ld\n", neg_flag);
- return;
+ case VIRTIO_MMIO_MAGIC_VALUE: /* 0x000 */
+ DBG("VIRTIO_MMIO_MAGIC_VALUE\n");
+ break;
+ case VIRTIO_MMIO_VERSION: /* 0x004 */
+ DBG("VIRTIO_MMIO_VERSION\n");
+ break;
+ case VIRTIO_MMIO_DEVICE_ID: /* 0x008 */
+ DBG("VIRTIO_MMIO_DEVICE_ID\n");
+ break;
+ case VIRTIO_MMIO_VENDOR_ID: /* 0x00c */
+ DBG("VIRTIO_MMIO_VENDOR_ID\n");
+ break;
+ case VIRTIO_MMIO_DEVICE_FEATURES: /* 0x010 */
+ DBG("VIRTIO_MMIO_DEVICE_FEATURES\n");
+ break;
+ case VIRTIO_MMIO_DEVICE_FEATURES_SEL: /* 0x014 */
+ DBG("VIRTIO_MMIO_DEVICE_FEATURES_SEL\n");
+ break;
+ case VIRTIO_MMIO_DRIVER_FEATURES: /* 0x020 */
+ DBG("VIRTIO_MMIO_DRIVER_FEATURES\n");
+ break;
+ case VIRTIO_MMIO_DRIVER_FEATURES_SEL: /* 0x024 */
+ DBG("VIRTIO_MMIO_DRIVER_FEATURES_SEL\n");
+ break;
+ case VIRTIO_MMIO_GUEST_PAGE_SIZE: /* 0x028 */
+ DBG("VIRTIO_MMIO_GUEST_PAGE_SIZE\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_SEL: /* 0x030 */
+ DBG("VIRTIO_MMIO_QUEUE_SEL\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_NUM_MAX: /* 0x034 */
+ DBG("VIRTIO_MMIO_QUEUE_NUM_MAX\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_NUM: /* 0x038 */
+ DBG("VIRTIO_MMIO_QUEUE_NUM\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_ALIGN: /* 0x03c */
+ DBG("VIRTIO_MMIO_QUEUE_ALIGN\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_PFN: /* 0x040 */
+ DBG("VIRTIO_MMIO_QUEUE_PFN\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_READY: /* 0x044 */
+ DBG("VIRTIO_MMIO_QUEUE_READY\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_NOTIFY: /* 0x050 */
+ DBG("VIRTIO_MMIO_QUEUE_NOTIFY\n");
+ break;
+ case VIRTIO_MMIO_INTERRUPT_STATUS: /* 0x060 */
+ DBG("VIRTIO_MMIO_INTERRUPT_STATUS\n");
+ break;
+ case VIRTIO_MMIO_INTERRUPT_ACK: /* 0x064 */
+ DBG("VIRTIO_MMIO_INTERRUPT_ACK\n");
+ break;
+ case VIRTIO_MMIO_STATUS: /* 0x070 */
+ DBG("VIRTIO_MMIO_STATUS\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_DESC_LOW: /* 0x080 */
+ DBG("VIRTIO_MMIO_QUEUE_DESC_LOW\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_DESC_HIGH: /* 0x084 */
+ DBG("VIRTIO_MMIO_QUEUE_DESC_HIGH\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_AVAIL_LOW: /* 0x090 */
+ DBG("VIRTIO_MMIO_QUEUE_AVAIL_LOW\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_AVAIL_HIGH: /* 0x094 */
+ DBG("VIRTIO_MMIO_QUEUE_AVAIL_HIGH\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_USED_LOW: /* 0x0a0 */
+ DBG("VIRTIO_MMIO_QUEUE_USED_LOW\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_USED_HIGH: /* 0x0a4 */
+ DBG("VIRTIO_MMIO_QUEUE_USED_HIGH\n");
+ break;
+ case VIRTIO_MMIO_SHM_SEL: /* 0x0ac */
+ DBG("VIRTIO_MMIO_SHM_SEL\n");
+ break;
+ case VIRTIO_MMIO_SHM_LEN_LOW: /* 0x0b0 */
+ DBG("VIRTIO_MMIO_SHM_LEN_LOW\n");
+ break;
+ case VIRTIO_MMIO_SHM_LEN_HIGH: /* 0x0b4 */
+ DBG("VIRTIO_MMIO_SHM_LEN_HIGH\n");
+ break;
+ case VIRTIO_MMIO_SHM_BASE_LOW: /* 0x0b8 */
+ DBG("VIRTIO_MMIO_SHM_BASE_LOW\n");
+ break;
+ case VIRTIO_MMIO_SHM_BASE_HIGH: /* 0x0bc */
+ DBG("VIRTIO_MMIO_SHM_BASE_HIGH\n");
+ break;
+ case VIRTIO_MMIO_CONFIG_GENERATION: /* 0x0fc */
+ DBG("VIRTIO_MMIO_CONFIG_GENERATION\n");
+ break;
+ case VIRTIO_MMIO_CONFIG: /* 0x100 */
+ DBG("VIRTIO_MMIO_CONFIG\n");
+ break;
+ default:
+ DBG("Negotiation flag Unknown: %ld\n", neg_flag);
+ return;
}
}
@@ -942,8 +958,8 @@ static void virtio_queue_guest_notifier_read(EventNotifier *n)
int vhost_user_loopback_eventfd = 0;
-void *loopback_event_select(void *data) {
-
+void *loopback_event_select(void *data)
+{
int retval;
uint64_t eftd_ctr;
fd_set rfds;
@@ -957,17 +973,18 @@ void *loopback_event_select(void *data) {
FD_ZERO(&rfds);
FD_SET(vhost_user_loopback_eventfd, &rfds);
- while(1) {
+ while (1) {
- retval = select(vhost_user_loopback_eventfd+1, &rfds, NULL, NULL, NULL);
+ retval = select(vhost_user_loopback_eventfd + 1,
+ &rfds, NULL, NULL, NULL);
- if (retval == -1){
+ if (retval == -1) {
DBG("\nselect() error. Exiting...");
exit(EXIT_FAILURE);
} else if (retval > 0) {
s = read(vhost_user_loopback_eventfd, &eftd_ctr, sizeof(uint64_t));
- if (s != sizeof(uint64_t)){
+ if (s != sizeof(uint64_t)) {
DBG("\neventfd read error. Exiting...");
exit(1);
} else {
@@ -991,7 +1008,9 @@ void event_notifier_set_handler(EventNotifier *e,
if (vhost_user_loopback_eventfd > 0) {
ret = pthread_create(&thread_id, NULL, loopback_event_select, NULL);
- if (ret != 0) exit(1);
+ if (ret != 0) {
+ exit(1);
+ }
}
}
@@ -1005,8 +1024,10 @@ void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
event_notifier_set_handler(&vq->guest_notifier, NULL);
}
if (!assign) {
- /* Test and clear notifier before closing it,
- * in case poll callback didn't have time to run. */
+ /*
+ * Test and clear notifier before closing it,
+ * in case poll callback didn't have time to run.
+ */
virtio_queue_guest_notifier_read(&vq->guest_notifier);
}
}
@@ -1016,7 +1037,7 @@ EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
return &vq->guest_notifier;
}
-int virtio_mmio_set_guest_notifier(VirtIODevice *vdev, int n, bool assign,
+int virtio_loopback_set_guest_notifier(VirtIODevice *vdev, int n, bool assign,
bool with_irqfd)
{
VirtioDeviceClass *vdc = vdev->vdev_class;
@@ -1036,7 +1057,7 @@ int virtio_mmio_set_guest_notifier(VirtIODevice *vdev, int n, bool assign,
return 0;
}
-int virtio_mmio_set_guest_notifiers(VirtIODevice *vdev, int nvqs,
+int virtio_loopback_set_guest_notifiers(VirtIODevice *vdev, int nvqs,
bool assign)
{
bool with_irqfd = false;
@@ -1049,7 +1070,7 @@ int virtio_mmio_set_guest_notifiers(VirtIODevice *vdev, int nvqs,
break;
}
- r = virtio_mmio_set_guest_notifier(vdev, n, assign, with_irqfd);
+ r = virtio_loopback_set_guest_notifier(vdev, n, assign, with_irqfd);
if (r < 0) {
goto assign_error;
}
@@ -1058,7 +1079,7 @@ int virtio_mmio_set_guest_notifiers(VirtIODevice *vdev, int nvqs,
return 0;
assign_error:
- DBG("Error return virtio_mmio_set_guest_notifiers\n");
+ DBG("Error return virtio_loopback_set_guest_notifiers\n");
return r;
}
@@ -1111,7 +1132,8 @@ int virtio_bus_set_host_notifier(VirtioBus *vbus, int n, bool assign)
/* On success, ioeventfd ownership belongs to the caller. */
int virtio_bus_grab_ioeventfd(VirtioBus *bus)
{
- /* vhost can be used even if ioeventfd=off in the proxy device,
+ /*
+ * vhost can be used even if ioeventfd=off in the proxy device,
* so do not check k->ioeventfd_enabled.
*/
if (!bus->ioeventfd_assign) {
@@ -1119,7 +1141,8 @@ int virtio_bus_grab_ioeventfd(VirtioBus *bus)
}
if (bus->ioeventfd_grabbed == 0 && bus->ioeventfd_started) {
- /* Remember that we need to restart ioeventfd
+ /*
+ * Remember that we need to restart ioeventfd
* when ioeventfd_grabbed becomes zero.
*/
bus->ioeventfd_started = true;
@@ -1140,7 +1163,7 @@ bool virtio_device_disabled(VirtIODevice *vdev)
int prev_level = 0;
-void virtio_mmio_update_irq(VirtIODevice *vdev)
+void virtio_loopback_update_irq(VirtIODevice *vdev)
{
int level, irq_num = 44;
pthread_t my_thread_id;
@@ -1172,9 +1195,10 @@ void virtio_notify_vector(VirtIODevice *vdev)
return;
}
- virtio_mmio_update_irq(vdev);
+ virtio_loopback_update_irq(vdev);
- /* TODO: substitue the previous line with the
+ /*
+ * TODO: substitue the previous line with the
* following when it's implemented
*
* if (k->notify) {
@@ -1208,13 +1232,15 @@ void virtio_queue_notify(VirtIODevice *vdev, int n)
}
-static uint64_t virtio_mmio_read(VirtIODevice *vdev, uint64_t offset, unsigned size)
+static uint64_t virtio_loopback_read(VirtIODevice *vdev, uint64_t offset,
+ unsigned size)
{
- print_neg_flag (offset, 1);
+ print_neg_flag(offset, 1);
if (!vdev) {
- /* If no backend is present, we treat most registers as
+ /*
+ * If no backend is present, we treat most registers as
* read-as-zero, except for the magic number, version and
* vendor ID. This is not strictly sanctioned by the virtio
* spec, but it allows us to provide transports with no backend
@@ -1279,14 +1305,17 @@ static uint64_t virtio_mmio_read(VirtIODevice *vdev, uint64_t offset, unsigned s
return VIRTQUEUE_MAX_SIZE;
case VIRTIO_MMIO_QUEUE_PFN:
if (!proxy->legacy) {
- DBG("VIRTIO_MMIO_QUEUE_PFN: read from legacy register (0x%lx) in non-legacy mode\n", offset);
+ DBG("VIRTIO_MMIO_QUEUE_PFN: read from legacy register (0x%lx) "
+ "in non-legacy mode\n", offset);
return 0;
}
- return virtio_queue_get_addr(vdev, vdev->queue_sel) >> proxy->guest_page_shift;
+ return virtio_queue_get_addr(vdev, vdev->queue_sel) >>
+ proxy->guest_page_shift;
case VIRTIO_MMIO_QUEUE_READY:
if (proxy->legacy) {
- DBG("VIRTIO_MMIO_QUEUE_READY: read from legacy register (0x%lx) in non-legacy mode\n", offset);
+ DBG("VIRTIO_MMIO_QUEUE_READY: read from legacy register (0x%lx) "
+ "in non-legacy mode\n", offset);
return 0;
}
/* TODO: To be implemented */
@@ -1296,12 +1325,13 @@ static uint64_t virtio_mmio_read(VirtIODevice *vdev, uint64_t offset, unsigned s
return vdev->status;
case VIRTIO_MMIO_CONFIG_GENERATION:
if (proxy->legacy) {
- DBG("VIRTIO_MMIO_CONFIG_GENERATION: read from legacy register (0x%lx) in non-legacy mode\n", offset);
+ DBG("VIRTIO_MMIO_CONFIG_GENERATION: read from legacy "
+ "register (0x%lx) in non-legacy mode\n", offset);
return 0;
}
return vdev->generation;
- case VIRTIO_MMIO_SHM_LEN_LOW:
- case VIRTIO_MMIO_SHM_LEN_HIGH:
+ case VIRTIO_MMIO_SHM_LEN_LOW:
+ case VIRTIO_MMIO_SHM_LEN_HIGH:
/*
* VIRTIO_MMIO_SHM_SEL is unimplemented
* according to the linux driver, if region length is -1
@@ -1323,7 +1353,8 @@ static uint64_t virtio_mmio_read(VirtIODevice *vdev, uint64_t offset, unsigned s
case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
case VIRTIO_MMIO_QUEUE_USED_LOW:
case VIRTIO_MMIO_QUEUE_USED_HIGH:
- DBG("VIRTIO_MMIO_QUEUE_USED_HIGH: read of write-only register (0x%lx)\n", offset);
+ DBG("VIRTIO_MMIO_QUEUE_USED_HIGH: read of write-only "
+ "register (0x%lx)\n", offset);
return 0;
default:
DBG("read: bad register offset (0x%lx)\n", offset);
@@ -1333,14 +1364,15 @@ static uint64_t virtio_mmio_read(VirtIODevice *vdev, uint64_t offset, unsigned s
}
-void virtio_mmio_write(VirtIODevice *vdev, uint64_t offset, uint64_t value,
- unsigned size)
+void virtio_loopback_write(VirtIODevice *vdev, uint64_t offset,
+ uint64_t value, unsigned size)
{
- print_neg_flag (offset, 0);
+ print_neg_flag(offset, 0);
if (!vdev) {
- /* If no backend is present, we just make all registers
+ /*
+ * If no backend is present, we just make all registers
* write-ignored. This allows us to provide transports with
* no backend plugged in.
*/
@@ -1411,14 +1443,16 @@ void virtio_mmio_write(VirtIODevice *vdev, uint64_t offset, uint64_t value,
break;
case VIRTIO_MMIO_QUEUE_ALIGN:
if (!proxy->legacy) {
- DBG("write to legacy register (0x%lx) in non-legacy mode\n", offset);
+ DBG("write to legacy register (0x%lx) in "
+ "non-legacy mode\n", offset);
return;
}
/* TODO: To be implemented */
break;
case VIRTIO_MMIO_QUEUE_PFN:
if (!proxy->legacy) {
- DBG("write to legacy register (0x%lx) in non-legacy mode\n", offset);
+ DBG("write to legacy register (0x%lx) in "
+ "non-legacy mode\n", offset);
return;
}
if (value == 0) {
@@ -1426,7 +1460,9 @@ void virtio_mmio_write(VirtIODevice *vdev, uint64_t offset, uint64_t value,
} else {
(void)value;
uint64_t desc_addr;
- desc_addr = (uint64_t)mmap (NULL, 16*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ desc_addr = (uint64_t)mmap(NULL, 16 * PAGE_SIZE,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
virtio_queue_set_addr(vdev, vdev->queue_sel,
desc_addr);
@@ -1434,7 +1470,8 @@ void virtio_mmio_write(VirtIODevice *vdev, uint64_t offset, uint64_t value,
break;
case VIRTIO_MMIO_QUEUE_READY:
if (proxy->legacy) {
- DBG("write to non-legacy register (0x%lx) in legacy mode\n", offset);
+ DBG("write to non-legacy register (0x%lx) in "
+ "legacy mode\n", offset);
return;
}
/* TODO: To be implemented */
@@ -1450,10 +1487,11 @@ void virtio_mmio_write(VirtIODevice *vdev, uint64_t offset, uint64_t value,
break;
case VIRTIO_MMIO_STATUS:
- /* TODO: Add it in a future release later
+ /*
+ * TODO: Add it in a future release later
*
* if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) {
- * virtio_mmio_stop_ioeventfd(proxy);
+ * virtio_loopback_stop_ioeventfd(proxy);
* }
*/
@@ -1465,53 +1503,60 @@ void virtio_mmio_write(VirtIODevice *vdev, uint64_t offset, uint64_t value,
virtio_set_status(vdev, value & 0xff);
- /* TODO: Check if this is still needed
+ /*
+ * TODO: Check if this is still needed
*
* if (vdev->status == 0) {
* virtio_reset(vdev);
- * virtio_mmio_soft_reset(proxy);
+ * virtio_loopback_soft_reset(proxy);
* }
*/
break;
case VIRTIO_MMIO_QUEUE_DESC_LOW:
if (proxy->legacy) {
- DBG("write to non-legacy register (0x%lx) in legacy mode\n", offset);
+ DBG("write to non-legacy register (0x%lx) in "
+ "legacy mode\n", offset);
return;
}
/* TODO: To be implemented */
break;
case VIRTIO_MMIO_QUEUE_DESC_HIGH:
if (proxy->legacy) {
- DBG("write to non-legacy register (0x%lx) in legacy mode\n", offset);
+ DBG("write to non-legacy register (0x%lx) in "
+ "legacy mode\n", offset);
return;
}
/* TODO: To be implemented */
break;
case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
if (proxy->legacy) {
- DBG("write to non-legacy register (0x%lx) in legacy mode\n", offset);
+ DBG("write to non-legacy register (0x%lx) in "
+ "legacy mode\n", offset);
return;
}
/* TODO: To be implemented */
break;
case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
if (proxy->legacy) {
- DBG("write to non-legacy register (0x%lx) in legacy mode\n", offset);
+ DBG("write to non-legacy register (0x%lx) in "
+ "legacy mode\n", offset);
return;
}
/* TODO: To be implemented */
break;
case VIRTIO_MMIO_QUEUE_USED_LOW:
if (proxy->legacy) {
- DBG("write to non-legacy register (0x%lx) in legacy mode\n", offset);
+ DBG("write to non-legacy register (0x%lx) in "
+ "legacy mode\n", offset);
return;
}
/* TODO: To be implemented */
break;
case VIRTIO_MMIO_QUEUE_USED_HIGH:
if (proxy->legacy) {
- DBG("write to non-legacy register (0x%lx) in legacy mode\n", offset);
+ DBG("write to non-legacy register (0x%lx) in "
+ "legacy mode\n", offset);
return;
}
/* TODO: To be implemented */
@@ -1534,25 +1579,34 @@ void virtio_mmio_write(VirtIODevice *vdev, uint64_t offset, uint64_t value,
VirtIODevice *global_vdev;
VirtioBus *global_vbus;
-void adapter_read_write_cb (void) {
-
- /* If you want to print all the incoming events enable the next line
+void adapter_read_write_cb(void)
+{
+ /*
+ * Enabling the next line, all the incoming
+ * read/write events will be printed:
*
* print_neg_flag (address->notification, address->read);
*/
if (address->read) {
- address->data = virtio_mmio_read(global_vdev, address->notification, address->size);
+ address->data = virtio_loopback_read(global_vdev,
+ address->notification, address->size);
} else {
- virtio_mmio_write(global_vdev, address->notification, address->data, address->size);
+ virtio_loopback_write(global_vdev, address->notification,
+ address->data, address->size);
}
+
+ /*
+ * Note the driver that we have done
+ * All the required actions.
+ */
(void)ioctl(fd, WAKEUP);
}
-void *my_select(void *data) {
-
+void *driver_event_select(void *data)
+{
int retval;
(void) data;
@@ -1562,21 +1616,21 @@ void *my_select(void *data) {
FD_ZERO(&rfds);
FD_SET(efd, &rfds);
- while(1) {
+ while (1) {
- retval = select(efd+1, &rfds, NULL, NULL, NULL);
+ retval = select(efd + 1, &rfds, NULL, NULL, NULL);
- if (retval == -1){
+ if (retval == -1) {
DBG("\nselect() error. Exiting...");
exit(EXIT_FAILURE);
} else if (retval > 0) {
s = read(efd, &eftd_ctr, sizeof(uint64_t));
- if (s != sizeof(uint64_t)){
+ if (s != sizeof(uint64_t)) {
DBG("\neventfd read error. Exiting...");
exit(1);
} else {
- adapter_read_write_cb ();
+ adapter_read_write_cb();
}
} else if (retval == 0) {
@@ -1586,8 +1640,8 @@ void *my_select(void *data) {
}
-void create_rng_struct (void) {
-
+void create_rng_struct(void)
+{
device_info.magic = 0x74726976;
device_info.version = 0x1;
device_info.device_id = 0x4;
@@ -1605,8 +1659,9 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
int i;
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
- if (vdev->vq[i].vring.num == 0)
+ if (vdev->vq[i].vring.num == 0) {
break;
+ }
}
if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE) {
@@ -1617,7 +1672,8 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
vdev->vq[i].vring.num_default = queue_size;
vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
vdev->vq[i].handle_output = handle_output;
- vdev->vq[i].used_elems = (VirtQueueElement *) malloc (sizeof(VirtQueueElement) * queue_size);
+ vdev->vq[i].used_elems = (VirtQueueElement *)malloc(sizeof(VirtQueueElement)
+ * queue_size);
return &vdev->vq[i];
}
@@ -1654,13 +1710,13 @@ void virtio_dev_init(VirtIODevice *vdev, const char *name,
vdev->use_guest_notifier_mask = true;
}
-static bool virtio_mmio_ioeventfd_enabled(VirtIODevice *d)
+static bool virtio_loopback_ioeventfd_enabled(VirtIODevice *d)
{
return (proxy->flags & VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD) != 0;
}
/* TODO: This function might not be needed anymore */
-static int virtio_mmio_ioeventfd_assign(VirtIOMMIOProxy *d,
+static int virtio_loopback_ioeventfd_assign(VirtIOMMIOProxy *d,
EventNotifier *notifier,
int n, bool assign)
{
@@ -1678,16 +1734,16 @@ bool virtio_bus_device_iommu_enabled(VirtIODevice *vdev)
return k->iommu_enabled(vdev);
}
-void virtio_mmio_bus_init(VirtioBus *k)
+void virtio_loopback_bus_init(VirtioBus *k)
{
- k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
- k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled;
- k->ioeventfd_assign = virtio_mmio_ioeventfd_assign;
+ k->set_guest_notifiers = virtio_loopback_set_guest_notifiers;
+ k->ioeventfd_enabled = virtio_loopback_ioeventfd_enabled;
+ k->ioeventfd_assign = virtio_loopback_ioeventfd_assign;
}
-int virtio_mmio_start(void) {
-
+int virtio_loopback_start(void)
+{
efd_data_t info;
pthread_t thread_id;
int ret = -1;
@@ -1696,15 +1752,14 @@ int virtio_mmio_start(void) {
(void)info;
fd = open("/dev/loopback", O_RDWR);
- if (fd < 0)
- {
- perror ("Open call failed");
+ if (fd < 0) {
+ perror("Open call failed");
return -1;
}
loopback_fd = fd;
/* Create eventfd */
- efd = eventfd(0,0);
+ efd = eventfd(0, 0);
if (efd == -1) {
DBG("\nUnable to create eventfd! Exiting...\n");
exit(EXIT_FAILURE);
@@ -1713,20 +1768,25 @@ int virtio_mmio_start(void) {
info.pid = getpid();
info.efd = efd;
+ /*
+ * Send the appropriate information to the driver
+ * so to be able to trigger an eventfd
+ */
(void)ioctl(fd, EFD_INIT, &info);
/* Map notification mechanism */
/* Multiple mmaps: /dev/loopback-0/vqs, /dev/loopback-0/ctlr */
- address = mmap (NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
- if (address == MAP_FAILED)
- {
- perror ("mmap operation failed");
+ address = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if (address == MAP_FAILED) {
+ perror("mmap operation failed");
return -1;
}
/* Wait the eventfd */
- ret = pthread_create(&thread_id, NULL, my_select, NULL);
- if (ret != 0) exit(1);
+ ret = pthread_create(&thread_id, NULL, driver_event_select, NULL);
+ if (ret != 0) {
+ exit(1);
+ }
/* Fille the device info */
create_rng_struct();
@@ -1734,9 +1794,10 @@ int virtio_mmio_start(void) {
/* Start loopback transport */
(void)ioctl(fd, START_LOOPBACK, &device_info);
-
ret = pthread_join(thread_id, NULL);
- if (ret != 0) exit(1);
+ if (ret != 0) {
+ exit(1);
+ }
DBG("\nClosing eventfd. Exiting...\n");
close(efd);
diff --git a/virtio_loopback.h b/virtio_loopback.h
index 5400cd7..49ae219 100644
--- a/virtio_loopback.h
+++ b/virtio_loopback.h
@@ -1,20 +1,20 @@
/*
* Based on:
- * 1) virtio.h of Qemu project
+ * 1) virtio.h of QEMU project
*
* Copyright IBM, Corp. 2007
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
- * 2) virtio-mmio.h of Qemu project
+ * 2) virtio-mmio.h of QEMU project
*
* Copyright (c) 2011 Linaro Limited
*
* Author:
* Peter Maydell <peter.maydell@linaro.org>
*
- * 3) vhost.h of Qemu project
+ * 3) vhost.h of QEMU project
*
* Copyright 2022 Virtual Open Systems SAS.
*
@@ -43,15 +43,19 @@
/* Virtio vendor ID - Read Only */
#define VIRTIO_MMIO_VENDOR_ID 0x00c
-/* Bitmask of the features supported by the device (host)
- * (32 bits per set) - Read Only */
+/*
+ * Bitmask of the features supported by the device (host)
+ * (32 bits per set) - Read Only
+ */
#define VIRTIO_MMIO_DEVICE_FEATURES 0x010
/* Device (host) features set selector - Write Only */
#define VIRTIO_MMIO_DEVICE_FEATURES_SEL 0x014
-/* Bitmask of features activated by the driver (guest)
- * (32 bits per set) - Write Only */
+/*
+ * Bitmask of features activated by the driver (guest)
+ * (32 bits per set) - Write Only
+ */
#define VIRTIO_MMIO_DRIVER_FEATURES 0x020
/* Activated features set selector - Write Only */
@@ -117,8 +121,10 @@
/* Configuration atomicity value */
#define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc
-/* The config space is defined by each driver as
- * the per-driver configuration space - Read Write */
+/*
+ * The config space is defined by each driver as
+ * the per-driver configuration space - Read Write
+ */
#define VIRTIO_MMIO_CONFIG 0x100
/*
@@ -134,7 +140,7 @@
/* Virtio loopback driver related */
-/* Qemu defines */
+/* QEMU defines */
#define VIRT_MAGIC 0x74726976 /* 'virt' */
#define VIRT_VERSION 2
#define VIRT_VERSION_LEGACY 1
@@ -150,7 +156,8 @@
#define PAGE_SIZE 4096
#define EFD_INIT _IOC(_IOC_WRITE, 'k', 1, sizeof(efd_data_t))
#define WAKEUP _IOC(_IOC_WRITE, 'k', 2, 0)
-#define START_LOOPBACK _IOC(_IOC_WRITE, 'k', 3, sizeof(virtio_device_info_struct_t))
+#define START_LOOPBACK _IOC(_IOC_WRITE, 'k', 3, \
+ sizeof(virtio_device_info_struct_t))
#define IRQ _IOC(_IOC_WRITE, 'k', 4, sizeof(int))
#define SHARE_VQS _IOC(_IOC_WRITE, 'k', 5, 0)
#define SHARE_BUF _IOC(_IOC_WRITE, 'k', 6, sizeof(uint64_t))
@@ -189,13 +196,17 @@ typedef struct VirtIOMMIOProxy {
#define VRING_PACKED_DESC_F_AVAIL 7
#define VRING_PACKED_DESC_F_USED 15
-/* The Host uses this in used->flags to advise the Guest: don't kick me when
+/*
+ * The Host uses this in used->flags to advise the Guest: don't kick me when
* you add a buffer. It's unreliable, so it's simply an optimization. Guest
- * will still kick if it's out of buffers. */
+ * will still kick if it's out of buffers.
+ */
#define VRING_USED_F_NO_NOTIFY 1
-/* The Guest uses this in avail->flags to advise the Host: don't interrupt me
+/*
+ * The Guest uses this in avail->flags to advise the Host: don't interrupt me
* when you consume a buffer. It's unreliable, so it's simply an
- * optimization. */
+ * optimization.
+ */
#define VRING_AVAIL_F_NO_INTERRUPT 1
/* Enable events in packed ring. */
@@ -218,13 +229,18 @@ typedef struct VirtIOMMIOProxy {
/* We support indirect buffer descriptors */
#define VIRTIO_RING_F_INDIRECT_DESC 28
-/* The Guest publishes the used index for which it expects an interrupt
- * at the end of the avail ring. Host should ignore the avail->flags field. */
-/* The Host publishes the avail index for which it expects a kick
- * at the end of the used ring. Guest should ignore the used->flags field. */
+/*
+ * The Guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field.
+ */
+/*
+ * The Host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field.
+ */
#define VIRTIO_RING_F_EVENT_IDX 29
-/* Alignment requirements for vring elements.
+/*
+ * Alignment requirements for vring elements.
* When using pre-virtio 1.0 layout, these fall out naturally.
*/
#define VRING_AVAIL_ALIGN_SIZE 2
@@ -232,8 +248,7 @@ typedef struct VirtIOMMIOProxy {
#define VRING_DESC_ALIGN_SIZE 16
/******************/
-typedef struct VRing
-{
+typedef struct VRing {
unsigned int num;
unsigned int num_default;
unsigned int align;
@@ -242,8 +257,7 @@ typedef struct VRing
uint64_t used;
} VRing;
-typedef struct VRingDesc
-{
+typedef struct VRingDesc {
uint64_t addr;
uint32_t len;
uint16_t flags;
@@ -257,28 +271,24 @@ typedef struct VRingPackedDesc {
uint16_t flags;
} VRingPackedDesc;
-typedef struct VRingAvail
-{
+typedef struct VRingAvail {
uint16_t flags;
uint16_t idx;
uint16_t ring[];
} VRingAvail;
-typedef struct VRingUsedElem
-{
+typedef struct VRingUsedElem {
uint32_t id;
uint32_t len;
} VRingUsedElem;
-typedef struct VRingUsed
-{
+typedef struct VRingUsed {
uint16_t flags;
uint16_t idx;
VRingUsedElem ring[];
} VRingUsed;
-typedef struct VirtQueueElement
-{
+typedef struct VirtQueueElement {
unsigned int index;
unsigned int len;
unsigned int ndescs;
@@ -294,8 +304,7 @@ typedef struct VirtIODevice VirtIODevice;
typedef struct VirtQueue VirtQueue;
typedef void (*VirtIOHandleOutput)(VirtIODevice *, VirtQueue *);
-typedef struct VirtQueue
-{
+typedef struct VirtQueue {
VRing vring;
VirtQueueElement *used_elems;
@@ -330,7 +339,6 @@ typedef struct VirtQueue
EventNotifier guest_notifier;
EventNotifier host_notifier;
bool host_notifier_enabled;
- //QLIST_ENTRY(VirtQueue) node;
} VirtQueue;
typedef struct VirtIORNG VirtIORNG;
@@ -338,9 +346,7 @@ typedef struct VHostUserRNG VHostUserRNG;
typedef struct VirtioDeviceClass VirtioDeviceClass;
typedef struct VirtioBus VirtioBus;
-typedef struct VirtIODevice
-{
- //DeviceState parent_obj;
+typedef struct VirtIODevice {
VirtioBus *vbus;
VirtioDeviceClass *vdev_class;
const char *name;
@@ -356,7 +362,6 @@ typedef struct VirtIODevice
uint32_t generation;
int nvectors;
VirtQueue *vq;
- //MemoryListener listener;
uint16_t device_id;
bool vm_running;
bool broken; /* device in invalid state, needs reset */
@@ -366,14 +371,11 @@ typedef struct VirtIODevice
bool started;
bool start_on_kick; /* when virtio 1.0 feature has not been negotiated */
bool disable_legacy_check;
- //VMChangeStateEntry *vmstate;
char *bus_name;
uint8_t device_endian;
bool use_guest_notifier_mask;
VirtIORNG *vrng;
VHostUserRNG *vhrng;
- //AddressSpace *dma_as;
- //QLIST_HEAD(, VirtQueue) *vector_queues;
} VirtIODevice;
typedef struct efd_data {
@@ -389,14 +391,17 @@ typedef struct virtio_device_info_struct {
} virtio_device_info_struct_t;
-/* proto */
+
+/* Negotiation structs */
+
+typedef struct { int counter; } atomic_t;
+
typedef struct virtio_neg {
uint64_t notification;
uint64_t data;
uint64_t size;
bool read;
- bool done;
- bool request_op;
+ atomic_t done;
} virtio_neg_t;
@@ -498,18 +503,21 @@ typedef struct VirtioDeviceClass {
void (*set_config)(VirtIODevice *vdev, const uint8_t *config);
void (*reset)(VirtIODevice *vdev);
void (*set_status)(VirtIODevice *vdev, uint8_t val);
- /* For transitional devices, this is a bitmap of features
+ /*
+ * For transitional devices, this is a bitmap of features
* that are only exposed on the legacy interface but not
* the modern one.
*/
uint64_t legacy_features;
- /* Test and clear event pending status.
+ /*
+ * Test and clear event pending status.
* Should be called after unmask to avoid losing events.
* If backend does not support masking,
* must check in frontend instead.
*/
bool (*guest_notifier_pending)(VirtIODevice *vdev, int n);
- /* Mask/unmask events from this vq. Any events reported
+ /*
+ * Mask/unmask events from this vq. Any events reported
* while masked will become pending.
* If backend does not support masking,
* must mask in frontend instead.
@@ -517,13 +525,15 @@ typedef struct VirtioDeviceClass {
void (*guest_notifier_mask)(VirtIODevice *vdev, int n, bool mask);
int (*start_ioeventfd)(VirtIODevice *vdev);
void (*stop_ioeventfd)(VirtIODevice *vdev);
- /* Saving and loading of a device; trying to deprecate save/load
+ /*
+ * Saving and loading of a device; trying to deprecate save/load
* use vmsd for new devices.
*/
- /* Post load hook in vmsd is called early while device is processed, and
- * when VirtIODevice isn't fully initialized. Devices should use this instead,
- * unless they specifically want to verify the migration stream as it's
- * processed, e.g. for bounds checking.
+ /*
+ * Post load hook in vmsd is called early while device is processed, and
+ * when VirtIODevice isn't fully initialized. Devices should use this
+ * instead, unless they specifically want to verify the migration stream
+ * as it's processed, e.g. for bounds checking.
*/
int (*post_load)(VirtIODevice *vdev);
bool (*primary_unplug_pending)(void *opaque);
@@ -537,10 +547,10 @@ void handle_input(VirtIODevice *vdev, VirtQueue *vq);
void *my_select(void *data);
void *wait_read_write(void *data);
void *my_notify(void *data);
-void create_rng_struct (void);
+void create_rng_struct(void);
void print_neg_flag(uint64_t neg_flag, bool read);
-void adapter_read_write_cb (void);
-int virtio_mmio_start(void);
+void adapter_read_write_cb(void);
+int virtio_loopback_start(void);
int virtio_queue_ready(VirtQueue *vq);
void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
@@ -551,7 +561,8 @@ bool virtio_has_feature(uint64_t features, unsigned int fbit);
int virtio_queue_empty(VirtQueue *vq);
void *virtqueue_pop(VirtQueue *vq, size_t sz);
-void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, unsigned int len);
+void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
+ unsigned int len);
size_t iov_from_buf(const struct iovec *iov, unsigned int iov_cnt,
size_t offset, const void *buf, size_t bytes);
bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
@@ -571,7 +582,7 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n);
void virtio_dev_init(VirtIODevice *vdev, const char *name,
uint16_t device_id, size_t config_size);
-void virtio_mmio_bus_init(VirtioBus *k);
+void virtio_loopback_bus_init(VirtioBus *k);
int virtio_bus_set_host_notifier(VirtioBus *vbus, int n, bool assign);
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
@@ -594,17 +605,22 @@ void virtio_notify(VirtIODevice *vdev, VirtQueue *vq);
int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
unsigned int max, unsigned int *next);
-/* Do we get callbacks when the ring is completely used, even if we've
- * suppressed them? */
+/*
+ * Do we get callbacks when the ring is completely used, even if we've
+ * suppressed them?
+ */
#define VIRTIO_F_NOTIFY_ON_EMPTY 24
#define VIRTIO_CONFIG_S_FEATURES_OK 8
#define VIRTIO_CONFIG_S_DRIVER_OK 4
#define VIRTIO_F_VERSION_1 32
#define VIRTIO_F_ACCESS_PLATFORM 33
-/* Legacy name for VIRTIO_F_ACCESS_PLATFORM (for compatibility with old userspace) */
+/*
+ * Legacy name for VIRTIO_F_ACCESS_PLATFORM
+ * (for compatibility with old userspace)
+ */
#define VIRTIO_F_IOMMU_PLATFORM VIRTIO_F_ACCESS_PLATFORM
-/* Qemu Aligned functions */
+/* QEMU Aligned functions */
/*
* Round number down to multiple. Safe when m is not a power of 2 (see
* ROUND_DOWN for a faster version when a power of 2 is guaranteed).
diff --git a/virtio_rng.c b/virtio_rng.c
index 7fd7000..ca8f971 100644
--- a/virtio_rng.c
+++ b/virtio_rng.c
@@ -1,7 +1,7 @@
/*
* A virtio device implementing a hardware random number generator.
*
- * Based on virtio-rng.c of Qemu project
+ * Based on virtio-rng.c of QEMU project
* Copyright 2012 Red Hat, Inc.
* Copyright 2012 Amit Shah <amit.shah@redhat.com>
*
@@ -15,7 +15,7 @@
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
-#include <stdint.h> //Definition of uint64_t
+#include <stdint.h>
#include <string.h>
#include <stdbool.h>
#include <sys/param.h>
@@ -87,7 +87,8 @@ void chr_read(VirtIORNG *vrng, const void *buf, size_t size)
virtqueue_push(vrng->vq, elem, len);
- /* TODO: We need tp free the elem
+ /*
+ * TODO: We need tp free the elem
*
* g_free(elem);
*/
@@ -95,14 +96,21 @@ void chr_read(VirtIORNG *vrng, const void *buf, size_t size)
virtio_notify(vdev, vrng->vq);
if (!virtio_queue_empty(vrng->vq)) {
- /* If we didn't drain the queue, call virtio_rng_process
+ /*
+ * If we didn't drain the queue, call virtio_rng_process
* to take care of asking for more data as appropriate.
*/
virtio_rng_process(vrng);
}
}
-const char test_str[64] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63};
+const char test_str[64] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63};
void virtio_rng_process(VirtIORNG *vrng)
{
@@ -131,30 +139,30 @@ void handle_input(VirtIODevice *vdev, VirtQueue *vq)
virtio_rng_process(vdev->vrng);
}
-static void virtio_dev_class_init (VirtIODevice *vdev) {
-
- vdev->vdev_class = (VirtioDeviceClass *) malloc(sizeof(VirtioDeviceClass));
+static void virtio_dev_class_init(VirtIODevice *vdev)
+{
+ vdev->vdev_class = (VirtioDeviceClass *)malloc(sizeof(VirtioDeviceClass));
vdev->vdev_class->parent = vdev;
vdev->vdev_class->set_status = virtio_rng_set_status;
}
-void virtio_rng_init(VirtIODevice *vdev) {
-
- VirtIORNG *vrng = (VirtIORNG*) malloc (sizeof(VirtIORNG));
+void virtio_rng_init(VirtIODevice *vdev)
+{
+ VirtIORNG *vrng = (VirtIORNG *)malloc(sizeof(VirtIORNG));
vdev->vrng = vrng;
vrng->parent_obj = vdev;
vrng->vq = vdev->vq;
vrng->quota_remaining = LONG_MAX;
/* Prepare dev_class */
- virtio_dev_class_init (vdev);
+ virtio_dev_class_init(vdev);
}
-void virtio_rng_realize(void) {
-
+void virtio_rng_realize(void)
+{
/* prepare procy and virtio dev*/
- proxy = (VirtIOMMIOProxy*) malloc (sizeof(VirtIOMMIOProxy));
+ proxy = (VirtIOMMIOProxy *)malloc(sizeof(VirtIOMMIOProxy));
virtio_dev_init(global_vdev, "virtio-rng", 4, 0);
diff --git a/virtio_rng.h b/virtio_rng.h
index 042f0fd..812ddb4 100644
--- a/virtio_rng.h
+++ b/virtio_rng.h
@@ -39,7 +39,8 @@ typedef struct VirtIORNG {
VirtIORNGConf conf;
RngBackend *rng;
- /* We purposefully don't migrate this state. The quota will reset on the
+ /*
+ * We purposefully don't migrate this state. The quota will reset on the
* destination as a result. Rate limiting is host state, not guest state.
*/
int64_t quota_remaining;