summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore7
-rw-r--r--.gitreview5
-rw-r--r--Makefile58
-rw-r--r--README.md16
-rw-r--r--adapter.c314
-rw-r--r--event_notifier.c205
-rw-r--r--event_notifier.h54
-rw-r--r--linux/virtio_gpio.h72
-rw-r--r--linux/virtio_snd.h334
-rw-r--r--queue.h576
-rw-r--r--vhost_loopback.c469
-rw-r--r--vhost_loopback.h80
-rw-r--r--vhost_user_blk.c520
-rw-r--r--vhost_user_blk.h59
-rw-r--r--vhost_user_gpio.c381
-rw-r--r--vhost_user_gpio.h37
-rw-r--r--vhost_user_input.c235
-rw-r--r--vhost_user_input.h185
-rw-r--r--vhost_user_loopback.c1782
-rw-r--r--vhost_user_loopback.h969
-rw-r--r--vhost_user_rng.c201
-rw-r--r--vhost_user_rng.h44
-rw-r--r--vhost_user_sound.c313
-rw-r--r--vhost_user_sound.h40
-rw-r--r--virtio_blk.h95
-rw-r--r--virtio_input.c286
-rw-r--r--virtio_loopback.c2041
-rw-r--r--virtio_loopback.h709
-rw-r--r--virtio_rng.c179
-rw-r--r--virtio_rng.h55
30 files changed, 10321 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..702ac5b
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,7 @@
+adapter
+*.sh
+.adapter*
+make_bins.sh
+*.o
+*.c.*
+*.h.*
diff --git a/.gitreview b/.gitreview
new file mode 100644
index 0000000..7b04db2
--- /dev/null
+++ b/.gitreview
@@ -0,0 +1,5 @@
+[gerrit]
+host=gerrit.automotivelinux.org
+port=29418
+project=src/virtio/virtio-loopback-adapter
+defaultbranch=master
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..2aa7c13
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,58 @@
+# Copyright 2022-2023 Virtual Open Systems SAS
+#
+# Authors:
+# Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+
+#CFLAGS := -Wall -Wextra -Werror
+#CFLAGS := -Wall -Wextra -Wno-unused-variable -Wno-unused-function
+CFLAGS := -Wno-unused-variable -Wno-unused-function -D_GNU_SOURCE
+CFLAGS = -D_GNU_SOURCE -O2
+CC ?=
+
+ifeq ($(ARCH), arm64)
+ # arm64
+ CC ?= aarch64-linux-gnu-gcc
+else
+ CC ?= gcc
+endif
+
+INCL += -I .
+DEPS = adapter.h vhost_user_loopback.h event_notifier.h virtio_loopback.h
+SRC_C = event_notifier.c vhost_user_loopback.c virtio_loopback.c virtio_rng.c virtio_input.c vhost_user_input.c vhost_user_blk.c vhost_user_rng.c vhost_user_sound.c vhost_user_gpio.c vhost_loopback.c adapter.c
+
+OBJS = $(SRC_C:.c=.o)
+BINS = adapter
+
+ifeq ($(DEBUG), 1)
+ CFLAGS += -DDEBUG
+endif
+
+all: $(BINS)
+
+$(BINS): $(OBJS)
+ @echo -e "CC\t$@"
+ $(CC) $(CFLAGS) $(INCL) $^ -o $@ -lpthread
+
+%.o: %.c
+ @echo -e "CC\t$@"
+ $(CC) $(CFLAGS) $(INCL) -c $< -o $@
+
+clean:
+ rm -f *.o *~ $(BINS)
+
+.PHONY: all
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..bf0b746
--- /dev/null
+++ b/README.md
@@ -0,0 +1,16 @@
+# virtio-loopback adapter repository
+
+This repository includes the beta version of the "virtio_loopback_adapter" application which is part of the Virtio Loopback Design presented in this [document](https://git.virtualopensystems.com/virtio-loopback/docs/-/blob/master/design_docs). This work carried on by Virtual Open Systems in the [Automotive Grade Linux](https://www.automotivegradelinux.org) community.
+
+As described in the design document, the adapter is only a part of a more complex architecture. If you want to see the implementation and build the other components, refer to the [virtio-loopback docs repository](https://git.virtualopensystems.com/virtio-loopback/docs/-/tree/beta-release).
+
+## Build the virtio-loopback adapter
+
+In order to build this project the next commands need to be used:
+- `make` for x86
+- `make ARCH=arm64` for arm64
+
+**NOTE**: You can also use the parameter "DEBUG=1" in order to enable the debug messages.
+
+Example building the adapter with all the available parameters:
+`make ARCH=arm64 DEBUG=1`
diff --git a/adapter.c b/adapter.c
new file mode 100644
index 0000000..75b5681
--- /dev/null
+++ b/adapter.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright 2022-2023 Virtual Open Systems SAS
+ *
+ * Authors:
+ * Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>
+ * Stefanos Gerangelos <s.gerangelos@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/eventfd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <sys/param.h>
+#include <assert.h>
+
+/* For socket */
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+
+/* Project header files */
+#include "virtio_loopback.h"
+#include "vhost_user_loopback.h"
+#include "virtio_rng.h"
+#include "vhost_user_rng.h"
+#include "vhost_user_blk.h"
+#include "vhost_user_input.h"
+#include "vhost_user_gpio.h"
+#include "vhost_user_sound.h"
+
+
+#ifdef DEBUG
+#define DBG(...) printf("adapter: " __VA_ARGS__)
+#else
+#define DBG(...)
+#endif /* DEBUG */
+
+
+/* Global variables */
+int client_sock;
+struct vhost_dev *dev;
+struct adapter_dev *adev;
+struct vhost_user *vudev;
+
+
+void vhost_user_adapter_init(void)
+{
+
+ DBG("Setup adapter data structures\n");
+
+ /* Init vhost-user device */
+ vudev = (struct vhost_user *)malloc(sizeof(struct vhost_user));
+
+ /* Init vhost device */
+ dev = (struct vhost_dev *)malloc(sizeof(struct vhost_dev));
+
+ /* Init virtio device */
+ global_vdev = (VirtIODevice *)malloc(sizeof(VirtIODevice));
+
+ /* Init virtio bus */
+ global_vbus = (VirtioBus *)malloc(sizeof(VirtioBus));
+ global_vbus->vdev = global_vdev;
+ global_vdev->vbus = global_vbus;
+ global_vdev->vhdev = dev;
+
+
+ /* Store virtio_dev reference into vhost_dev struct*/
+ dev->vdev = global_vdev;
+
+ /* Init adapter device */
+ adev = (struct adapter_dev *)malloc(sizeof(struct adapter_dev));
+ adev->vdev = dev;
+ adev->vudev = vudev;
+ adev->virtio_dev = global_vdev;
+ adev->vbus = global_vbus;
+}
+
+
+void client(char *sock_path)
+{
+ int rc, len;
+ struct sockaddr_un client_sockaddr;
+
+ DBG("Create shared socket with vhost-user-device\n");
+
+ /* Initialize the struct to zero */
+ memset(&client_sockaddr, 0, sizeof(struct sockaddr_un));
+
+ /*
+ * Create a UNIX socket
+ */
+ client_sock = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (client_sock == -1) {
+ DBG("SOCKET ERROR\n");
+ exit(1);
+ }
+
+ /*
+ * Set up the UNIX sockaddr structure
+ * by using AF_UNIX for the family and
+ * giving it a filepath to connect.
+ */
+ client_sockaddr.sun_family = AF_UNIX;
+ strcpy(client_sockaddr.sun_path, sock_path);
+ len = sizeof(client_sockaddr);
+ rc = connect(client_sock, (struct sockaddr *) &client_sockaddr, len);
+ if (rc == -1) {
+ printf("CONNECT ERROR: Check the \"-s\" parameter\n");
+ close(client_sock);
+ exit(1);
+ }
+}
+
+static void help_args(void)
+{
+ printf("Run example:\n\t./adapter -s /path_to_socket/rng.sock\n"
+ "\t\t -d device_name\n"
+ "\t\t [ -qn number of queues ]\n"
+ "\t\t [ -qs size of queues ]\n"
+ "The 'device_name' can be one of the following:\n"
+ "\tvrng, vhurng, vhublk, vhuinput, vhusnd, vhugpio\n");
+}
+
+int find_arg(int argc, char **argv, char *str)
+{
+ int i;
+
+ for (i = 0; i < argc; i++) {
+ if (!strcmp(argv[i], str)) {
+ return i + 1;
+ }
+ }
+ return -1;
+}
+
+int val_device_arg(char *str)
+{
+ char *adapter_devices[] = {"vrng", "vhurng", "vhublk", "vhuinput",
+ "vhusnd", "vhugpio"};
+ char *vhu_devices[] = {"vhurng", "vhublk", "vhuinput", "vhusnd", "vhugpio"};
+ int adapter_devices_num = 6, i;
+
+ for (i = 0; i < adapter_devices_num; i++) {
+ if (!strcmp(adapter_devices[i], str)) {
+ return i + 1;
+ }
+ }
+
+ return 0;
+}
+
+bool check_vhu_device(char *str)
+{
+ char *vhu_devices[] = {"vhurng", "vhublk", "vhuinput", "vhusnd", "vhugpio"};
+ int vhu_devices_num = 5, i;
+
+ for (i = 0; i < vhu_devices_num; i++) {
+ if (!strcmp(vhu_devices[i], str)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void get_queue_num_size_args(int argc, char **argv,
+ int *eval_queue_num, int *eval_queue_size)
+{
+ int queue_num, queue_size, queue_num_id, queue_size_id;
+
+ if (argc < 9) {
+ return;
+ }
+
+ queue_num_id = find_arg(argc, argv, "-qn");
+ queue_size_id = find_arg(argc, argv, "-qs");
+
+ /* Check if both qs ans qn exist */
+ if (queue_num_id < 0 || queue_size_id < 0) {
+ return;
+ }
+
+ queue_num = atoi(argv[queue_num_id]);
+ queue_size = atoi(argv[queue_size_id]);
+
+ /* Evaluate number of queues */
+ if (queue_num <= 0 || queue_num > 16) {
+ return;
+ }
+
+ /* Evaluate queues' size */
+ if (queue_size <= 0 || queue_size > 1024) {
+ return;
+ }
+
+ *eval_queue_num = queue_num;
+ *eval_queue_size = queue_size;
+}
+
+
+int main(int argc, char **argv)
+{
+ int socket_idx, device_idx, device_id;
+ bool vhost_user_enabled;
+ /* Assign default queue num and size */
+ int queue_num = 1, queue_size = 64;
+
+ /*
+ * Check if the user has provided all the required arguments.
+ * If not, print the help messages.
+ */
+
+ if (argc < 3) {
+ goto error_args;
+ }
+
+ device_idx = find_arg(argc, argv, "-d");
+
+ if (device_idx < 0) {
+ printf("You have not specified parameter \"-d\"\n");
+ goto error_args;
+ }
+
+ /* Validate the argumetns */
+ device_id = val_device_arg(argv[device_idx]);
+
+ if (device_id == 0) {
+ goto error_args;
+ }
+
+ /* Check if this is a vhost-user device */
+ vhost_user_enabled = check_vhu_device(argv[device_idx]);
+
+ /* Check if a socket is needed and provided */
+ socket_idx = find_arg(argc, argv, "-s");
+
+ if ((socket_idx < 0) && (vhost_user_enabled)) {
+ printf("You have not specified parameter \"-s\"\n");
+ goto error_args;
+ }
+
+ /*
+ * Create the socket and connect to the backend.
+ * Enabled on vhost-user case
+ */
+ if (vhost_user_enabled) {
+ client(argv[socket_idx]);
+ }
+
+ /* Initialize the adapter data structures */
+ vhost_user_adapter_init();
+
+ /* Initialize the virtio/vhost-user device */
+ /* TODO: Switch numbers with name defs */
+ switch (device_id) {
+ case 1:
+ virtio_rng_realize();
+ break;
+ case 2:
+ vhost_user_rng_realize();
+ break;
+ case 3:
+ get_queue_num_size_args(argc, argv, &queue_num, &queue_size);
+ printf("Running vhublk with num %d and size %d\n",
+ queue_num, queue_size);
+ vhost_user_blk_realize(queue_num, queue_size);
+ break;
+ case 4:
+ vhost_user_input_init(global_vdev);
+ virtio_input_device_realize();
+ break;
+ case 5:
+ vus_device_realize();
+ break;
+ case 6:
+ vu_gpio_device_realize();
+ break;
+ default:
+ exit(1);
+ }
+
+ /*
+ * Start loopback trasnport layer and communiation with the loopback driver
+ */
+ virtio_loopback_start();
+
+ return 0;
+
+error_args:
+ help_args();
+ return 1;
+}
diff --git a/event_notifier.c b/event_notifier.c
new file mode 100644
index 0000000..5b7c1cd
--- /dev/null
+++ b/event_notifier.c
@@ -0,0 +1,205 @@
+/*
+ * Based on:
+ * 1) file-posix.c of QEMU Project
+ *
+ * Copyright (c) 2006 Fabrice Bellard
+ *
+ * 2) event_notifier-posix.c of QEMU Project
+ *
+ * Copyright Red Hat, Inc. 2010
+ *
+ * Authors:
+ * Michael S. Tsirkin <mst@redhat.com>
+ *
+ * 3) os-posix-lib.c of QEMU project
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2010 Red Hat, Inc.
+ *
+ * Copyright 2023 Virtual Open Systems SAS.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/eventfd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <sys/param.h>
+#include <assert.h>
+
+/* For socket */
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+
+/* Project header files */
+#include "vhost_user_loopback.h"
+
+
+/* Sets a specific flag */
+int fcntl_setfl(int fd, int flag)
+{
+ int flags;
+
+ flags = fcntl(fd, F_GETFL);
+ if (flags == -1) {
+ return -errno;
+ }
+
+ if (fcntl(fd, F_SETFL, flags | flag) == -1) {
+ return -errno;
+ }
+
+ return 0;
+}
+
+void qemu_set_cloexec(int fd)
+{
+ int f;
+ f = fcntl(fd, F_GETFD);
+ f = fcntl(fd, F_SETFD, f | FD_CLOEXEC);
+}
+
+/*
+ * Creates a pipe with FD_CLOEXEC set on both file descriptors
+ */
+int qemu_pipe(int pipefd[2])
+{
+ int ret;
+
+#ifdef CONFIG_PIPE2
+ ret = pipe2(pipefd, O_CLOEXEC);
+ if (ret != -1 || errno != ENOSYS) {
+ return ret;
+ }
+#endif
+ ret = pipe(pipefd);
+ if (ret == 0) {
+ qemu_set_cloexec(pipefd[0]);
+ qemu_set_cloexec(pipefd[1]);
+ }
+
+ return ret;
+}
+
+int event_notifier_get_fd(const EventNotifier *e)
+{
+ return e->rfd;
+}
+
+int event_notifier_get_wfd(const EventNotifier *e)
+{
+ return e->wfd;
+}
+
+int event_notifier_set(EventNotifier *e)
+{
+ static const uint64_t value = 1;
+ ssize_t ret;
+
+ if (!e->initialized) {
+ return -1;
+ }
+
+ do {
+ ret = write(e->wfd, &value, sizeof(value));
+ } while (ret < 0 && errno == EINTR);
+
+ /* EAGAIN is fine, a read must be pending. */
+ if (ret < 0 && errno != EAGAIN) {
+ return -errno;
+ }
+ return 0;
+}
+
+int event_notifier_init(EventNotifier *e, int active)
+{
+ int fds[2];
+ int ret;
+
+ ret = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+
+ if (ret >= 0) {
+ e->rfd = e->wfd = ret;
+ } else {
+ if (errno != ENOSYS) {
+ return -errno;
+ }
+ if (qemu_pipe(fds) < 0) {
+ return -errno;
+ }
+ ret = fcntl_setfl(fds[0], O_NONBLOCK);
+ if (ret < 0) {
+ ret = -errno;
+ goto fail;
+ }
+ ret = fcntl_setfl(fds[1], O_NONBLOCK);
+ if (ret < 0) {
+ ret = -errno;
+ goto fail;
+ }
+ e->rfd = fds[0];
+ e->wfd = fds[1];
+ }
+ e->initialized = true;
+ if (active) {
+ event_notifier_set(e);
+ }
+ return 0;
+
+fail:
+ close(fds[0]);
+ close(fds[1]);
+ return ret;
+}
+
+bool ioeventfd_enabled(void)
+{
+ /*
+ * TODO: Delete if not needed:
+ * return !kvm_enabled() || kvm_eventfds_enabled();
+ */
+ return 1;
+}
+
+int event_notifier_test_and_clear(EventNotifier *e)
+{
+ int value;
+ ssize_t len;
+ char buffer[512];
+
+ if (!e->initialized) {
+ return 0;
+ }
+
+ /* Drain the notify pipe. For eventfd, only 8 bytes will be read. */
+ value = 0;
+ do {
+ len = read(e->rfd, buffer, sizeof(buffer));
+ value |= (len > 0);
+ } while ((len == -1 && errno == EINTR) || len == sizeof(buffer));
+
+ return value;
+}
diff --git a/event_notifier.h b/event_notifier.h
new file mode 100644
index 0000000..718f784
--- /dev/null
+++ b/event_notifier.h
@@ -0,0 +1,54 @@
+/*
+ * Based on event_notifier.h of QEMU project
+ *
+ * Copyright Red Hat, Inc. 2010
+ *
+ * Authors:
+ * Michael S. Tsirkin <mst@redhat.com>
+ *
+ * Copyright 2023 Virtual Open Systems SAS.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+
+#ifndef EVENT_NOT_H
+#define EVENT_NOT_H
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <poll.h>
+#include <pthread.h>
+
+typedef struct EventNotifier {
+ int rfd;
+ int wfd;
+ bool initialized;
+} EventNotifier;
+
+
+int fcntl_setfl(int fd, int flag);
+void qemu_set_cloexec(int fd);
+int qemu_pipe(int pipefd[2]);
+int event_notifier_get_fd(const EventNotifier *e);
+int event_notifier_get_wfd(const EventNotifier *e);
+int event_notifier_set(EventNotifier *e);
+int event_notifier_init(EventNotifier *e, int active);
+bool ioeventfd_enabled(void);
+int event_notifier_test_and_clear(EventNotifier *e);
+
+
+#endif /* EVENT_NOT_H */
diff --git a/linux/virtio_gpio.h b/linux/virtio_gpio.h
new file mode 100644
index 0000000..d4b29d9
--- /dev/null
+++ b/linux/virtio_gpio.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef _LINUX_VIRTIO_GPIO_H
+#define _LINUX_VIRTIO_GPIO_H
+
+#include <linux/types.h>
+
+/* Virtio GPIO Feature bits */
+#define VIRTIO_GPIO_F_IRQ 0
+
+/* Virtio GPIO request types */
+#define VIRTIO_GPIO_MSG_GET_NAMES 0x0001
+#define VIRTIO_GPIO_MSG_GET_DIRECTION 0x0002
+#define VIRTIO_GPIO_MSG_SET_DIRECTION 0x0003
+#define VIRTIO_GPIO_MSG_GET_VALUE 0x0004
+#define VIRTIO_GPIO_MSG_SET_VALUE 0x0005
+#define VIRTIO_GPIO_MSG_IRQ_TYPE 0x0006
+
+/* Possible values of the status field */
+#define VIRTIO_GPIO_STATUS_OK 0x0
+#define VIRTIO_GPIO_STATUS_ERR 0x1
+
+/* Direction types */
+#define VIRTIO_GPIO_DIRECTION_NONE 0x00
+#define VIRTIO_GPIO_DIRECTION_OUT 0x01
+#define VIRTIO_GPIO_DIRECTION_IN 0x02
+
+/* Virtio GPIO IRQ types */
+#define VIRTIO_GPIO_IRQ_TYPE_NONE 0x00
+#define VIRTIO_GPIO_IRQ_TYPE_EDGE_RISING 0x01
+#define VIRTIO_GPIO_IRQ_TYPE_EDGE_FALLING 0x02
+#define VIRTIO_GPIO_IRQ_TYPE_EDGE_BOTH 0x03
+#define VIRTIO_GPIO_IRQ_TYPE_LEVEL_HIGH 0x04
+#define VIRTIO_GPIO_IRQ_TYPE_LEVEL_LOW 0x08
+
+struct virtio_gpio_config {
+ __le16 ngpio;
+ __u8 padding[2];
+ __le32 gpio_names_size;
+};
+
+/* Virtio GPIO Request / Response */
+struct virtio_gpio_request {
+ __le16 type;
+ __le16 gpio;
+ __le32 value;
+};
+
+struct virtio_gpio_response {
+ __u8 status;
+ __u8 value;
+};
+
+struct virtio_gpio_response_get_names {
+ __u8 status;
+ __u8 value[];
+};
+
+/* Virtio GPIO IRQ Request / Response */
+struct virtio_gpio_irq_request {
+ __le16 gpio;
+};
+
+struct virtio_gpio_irq_response {
+ __u8 status;
+};
+
+/* Possible values of the interrupt status field */
+#define VIRTIO_GPIO_IRQ_STATUS_INVALID 0x0
+#define VIRTIO_GPIO_IRQ_STATUS_VALID 0x1
+
+#endif /* _LINUX_VIRTIO_GPIO_H */
diff --git a/linux/virtio_snd.h b/linux/virtio_snd.h
new file mode 100644
index 0000000..dfe4954
--- /dev/null
+++ b/linux/virtio_snd.h
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (C) 2021 OpenSynergy GmbH
+ */
+#ifndef VIRTIO_SND_IF_H
+#define VIRTIO_SND_IF_H
+
+#include <linux/virtio_types.h>
+
+/*******************************************************************************
+ * CONFIGURATION SPACE
+ */
+struct virtio_snd_config {
+ /* # of available physical jacks */
+ __le32 jacks;
+ /* # of available PCM streams */
+ __le32 streams;
+ /* # of available channel maps */
+ __le32 chmaps;
+};
+
+enum {
+ /* device virtqueue indexes */
+ VIRTIO_SND_VQ_CONTROL = 0,
+ VIRTIO_SND_VQ_EVENT,
+ VIRTIO_SND_VQ_TX,
+ VIRTIO_SND_VQ_RX,
+ /* # of device virtqueues */
+ VIRTIO_SND_VQ_MAX
+};
+
+/*******************************************************************************
+ * COMMON DEFINITIONS
+ */
+
+/* supported dataflow directions */
+enum {
+ VIRTIO_SND_D_OUTPUT = 0,
+ VIRTIO_SND_D_INPUT
+};
+
+enum {
+ /* jack control request types */
+ VIRTIO_SND_R_JACK_INFO = 1,
+ VIRTIO_SND_R_JACK_REMAP,
+
+ /* PCM control request types */
+ VIRTIO_SND_R_PCM_INFO = 0x0100,
+ VIRTIO_SND_R_PCM_SET_PARAMS,
+ VIRTIO_SND_R_PCM_PREPARE,
+ VIRTIO_SND_R_PCM_RELEASE,
+ VIRTIO_SND_R_PCM_START,
+ VIRTIO_SND_R_PCM_STOP,
+
+ /* channel map control request types */
+ VIRTIO_SND_R_CHMAP_INFO = 0x0200,
+
+ /* jack event types */
+ VIRTIO_SND_EVT_JACK_CONNECTED = 0x1000,
+ VIRTIO_SND_EVT_JACK_DISCONNECTED,
+
+ /* PCM event types */
+ VIRTIO_SND_EVT_PCM_PERIOD_ELAPSED = 0x1100,
+ VIRTIO_SND_EVT_PCM_XRUN,
+
+ /* common status codes */
+ VIRTIO_SND_S_OK = 0x8000,
+ VIRTIO_SND_S_BAD_MSG,
+ VIRTIO_SND_S_NOT_SUPP,
+ VIRTIO_SND_S_IO_ERR
+};
+
+/* common header */
+struct virtio_snd_hdr {
+ __le32 code;
+};
+
+/* event notification */
+struct virtio_snd_event {
+ /* VIRTIO_SND_EVT_XXX */
+ struct virtio_snd_hdr hdr;
+ /* optional event data */
+ __le32 data;
+};
+
+/* common control request to query an item information */
+struct virtio_snd_query_info {
+ /* VIRTIO_SND_R_XXX_INFO */
+ struct virtio_snd_hdr hdr;
+ /* item start identifier */
+ __le32 start_id;
+ /* item count to query */
+ __le32 count;
+ /* item information size in bytes */
+ __le32 size;
+};
+
+/* common item information header */
+struct virtio_snd_info {
+ /* function group node id (High Definition Audio Specification 7.1.2) */
+ __le32 hda_fn_nid;
+};
+
+/*******************************************************************************
+ * JACK CONTROL MESSAGES
+ */
+struct virtio_snd_jack_hdr {
+ /* VIRTIO_SND_R_JACK_XXX */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::jacks - 1 */
+ __le32 jack_id;
+};
+
+/* supported jack features */
+enum {
+ VIRTIO_SND_JACK_F_REMAP = 0
+};
+
+struct virtio_snd_jack_info {
+ /* common header */
+ struct virtio_snd_info hdr;
+ /* supported feature bit map (1 << VIRTIO_SND_JACK_F_XXX) */
+ __le32 features;
+ /* pin configuration (High Definition Audio Specification 7.3.3.31) */
+ __le32 hda_reg_defconf;
+ /* pin capabilities (High Definition Audio Specification 7.3.4.9) */
+ __le32 hda_reg_caps;
+ /* current jack connection status (0: disconnected, 1: connected) */
+ __u8 connected;
+
+ __u8 padding[7];
+};
+
+/* jack remapping control request */
+struct virtio_snd_jack_remap {
+ /* .code = VIRTIO_SND_R_JACK_REMAP */
+ struct virtio_snd_jack_hdr hdr;
+ /* selected association number */
+ __le32 association;
+ /* selected sequence number */
+ __le32 sequence;
+};
+
+/*******************************************************************************
+ * PCM CONTROL MESSAGES
+ */
+struct virtio_snd_pcm_hdr {
+ /* VIRTIO_SND_R_PCM_XXX */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::streams - 1 */
+ __le32 stream_id;
+};
+
+/* supported PCM stream features */
+enum {
+ VIRTIO_SND_PCM_F_SHMEM_HOST = 0,
+ VIRTIO_SND_PCM_F_SHMEM_GUEST,
+ VIRTIO_SND_PCM_F_MSG_POLLING,
+ VIRTIO_SND_PCM_F_EVT_SHMEM_PERIODS,
+ VIRTIO_SND_PCM_F_EVT_XRUNS
+};
+
+/* supported PCM sample formats */
+enum {
+ /* analog formats (width / physical width) */
+ VIRTIO_SND_PCM_FMT_IMA_ADPCM = 0, /* 4 / 4 bits */
+ VIRTIO_SND_PCM_FMT_MU_LAW, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_A_LAW, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_S8, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_U8, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_S16, /* 16 / 16 bits */
+ VIRTIO_SND_PCM_FMT_U16, /* 16 / 16 bits */
+ VIRTIO_SND_PCM_FMT_S18_3, /* 18 / 24 bits */
+ VIRTIO_SND_PCM_FMT_U18_3, /* 18 / 24 bits */
+ VIRTIO_SND_PCM_FMT_S20_3, /* 20 / 24 bits */
+ VIRTIO_SND_PCM_FMT_U20_3, /* 20 / 24 bits */
+ VIRTIO_SND_PCM_FMT_S24_3, /* 24 / 24 bits */
+ VIRTIO_SND_PCM_FMT_U24_3, /* 24 / 24 bits */
+ VIRTIO_SND_PCM_FMT_S20, /* 20 / 32 bits */
+ VIRTIO_SND_PCM_FMT_U20, /* 20 / 32 bits */
+ VIRTIO_SND_PCM_FMT_S24, /* 24 / 32 bits */
+ VIRTIO_SND_PCM_FMT_U24, /* 24 / 32 bits */
+ VIRTIO_SND_PCM_FMT_S32, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_U32, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_FLOAT, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_FLOAT64, /* 64 / 64 bits */
+ /* digital formats (width / physical width) */
+ VIRTIO_SND_PCM_FMT_DSD_U8, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_DSD_U16, /* 16 / 16 bits */
+ VIRTIO_SND_PCM_FMT_DSD_U32, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_IEC958_SUBFRAME /* 32 / 32 bits */
+};
+
+/* supported PCM frame rates */
+enum {
+ VIRTIO_SND_PCM_RATE_5512 = 0,
+ VIRTIO_SND_PCM_RATE_8000,
+ VIRTIO_SND_PCM_RATE_11025,
+ VIRTIO_SND_PCM_RATE_16000,
+ VIRTIO_SND_PCM_RATE_22050,
+ VIRTIO_SND_PCM_RATE_32000,
+ VIRTIO_SND_PCM_RATE_44100,
+ VIRTIO_SND_PCM_RATE_48000,
+ VIRTIO_SND_PCM_RATE_64000,
+ VIRTIO_SND_PCM_RATE_88200,
+ VIRTIO_SND_PCM_RATE_96000,
+ VIRTIO_SND_PCM_RATE_176400,
+ VIRTIO_SND_PCM_RATE_192000,
+ VIRTIO_SND_PCM_RATE_384000
+};
+
+struct virtio_snd_pcm_info {
+ /* common header */
+ struct virtio_snd_info hdr;
+ /* supported feature bit map (1 << VIRTIO_SND_PCM_F_XXX) */
+ __le32 features;
+ /* supported sample format bit map (1 << VIRTIO_SND_PCM_FMT_XXX) */
+ __le64 formats;
+ /* supported frame rate bit map (1 << VIRTIO_SND_PCM_RATE_XXX) */
+ __le64 rates;
+ /* dataflow direction (VIRTIO_SND_D_XXX) */
+ __u8 direction;
+ /* minimum # of supported channels */
+ __u8 channels_min;
+ /* maximum # of supported channels */
+ __u8 channels_max;
+
+ __u8 padding[5];
+};
+
+/* set PCM stream format */
+struct virtio_snd_pcm_set_params {
+ /* .code = VIRTIO_SND_R_PCM_SET_PARAMS */
+ struct virtio_snd_pcm_hdr hdr;
+ /* size of the hardware buffer */
+ __le32 buffer_bytes;
+ /* size of the hardware period */
+ __le32 period_bytes;
+ /* selected feature bit map (1 << VIRTIO_SND_PCM_F_XXX) */
+ __le32 features;
+ /* selected # of channels */
+ __u8 channels;
+ /* selected sample format (VIRTIO_SND_PCM_FMT_XXX) */
+ __u8 format;
+ /* selected frame rate (VIRTIO_SND_PCM_RATE_XXX) */
+ __u8 rate;
+
+ __u8 padding;
+};
+
+/*******************************************************************************
+ * PCM I/O MESSAGES
+ */
+
+/* I/O request header */
+struct virtio_snd_pcm_xfer {
+ /* 0 ... virtio_snd_config::streams - 1 */
+ __le32 stream_id;
+};
+
+/* I/O request status */
+struct virtio_snd_pcm_status {
+ /* VIRTIO_SND_S_XXX */
+ __le32 status;
+ /* current device latency */
+ __le32 latency_bytes;
+};
+
+/*******************************************************************************
+ * CHANNEL MAP CONTROL MESSAGES
+ */
+struct virtio_snd_chmap_hdr {
+ /* VIRTIO_SND_R_CHMAP_XXX */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::chmaps - 1 */
+ __le32 chmap_id;
+};
+
+/* standard channel position definition */
+enum {
+ VIRTIO_SND_CHMAP_NONE = 0, /* undefined */
+ VIRTIO_SND_CHMAP_NA, /* silent */
+ VIRTIO_SND_CHMAP_MONO, /* mono stream */
+ VIRTIO_SND_CHMAP_FL, /* front left */
+ VIRTIO_SND_CHMAP_FR, /* front right */
+ VIRTIO_SND_CHMAP_RL, /* rear left */
+ VIRTIO_SND_CHMAP_RR, /* rear right */
+ VIRTIO_SND_CHMAP_FC, /* front center */
+ VIRTIO_SND_CHMAP_LFE, /* low frequency (LFE) */
+ VIRTIO_SND_CHMAP_SL, /* side left */
+ VIRTIO_SND_CHMAP_SR, /* side right */
+ VIRTIO_SND_CHMAP_RC, /* rear center */
+ VIRTIO_SND_CHMAP_FLC, /* front left center */
+ VIRTIO_SND_CHMAP_FRC, /* front right center */
+ VIRTIO_SND_CHMAP_RLC, /* rear left center */
+ VIRTIO_SND_CHMAP_RRC, /* rear right center */
+ VIRTIO_SND_CHMAP_FLW, /* front left wide */
+ VIRTIO_SND_CHMAP_FRW, /* front right wide */
+ VIRTIO_SND_CHMAP_FLH, /* front left high */
+ VIRTIO_SND_CHMAP_FCH, /* front center high */
+ VIRTIO_SND_CHMAP_FRH, /* front right high */
+ VIRTIO_SND_CHMAP_TC, /* top center */
+ VIRTIO_SND_CHMAP_TFL, /* top front left */
+ VIRTIO_SND_CHMAP_TFR, /* top front right */
+ VIRTIO_SND_CHMAP_TFC, /* top front center */
+ VIRTIO_SND_CHMAP_TRL, /* top rear left */
+ VIRTIO_SND_CHMAP_TRR, /* top rear right */
+ VIRTIO_SND_CHMAP_TRC, /* top rear center */
+ VIRTIO_SND_CHMAP_TFLC, /* top front left center */
+ VIRTIO_SND_CHMAP_TFRC, /* top front right center */
+ VIRTIO_SND_CHMAP_TSL, /* top side left */
+ VIRTIO_SND_CHMAP_TSR, /* top side right */
+ VIRTIO_SND_CHMAP_LLFE, /* left LFE */
+ VIRTIO_SND_CHMAP_RLFE, /* right LFE */
+ VIRTIO_SND_CHMAP_BC, /* bottom center */
+ VIRTIO_SND_CHMAP_BLC, /* bottom left center */
+ VIRTIO_SND_CHMAP_BRC /* bottom right center */
+};
+
+/* maximum possible number of channels */
+#define VIRTIO_SND_CHMAP_MAX_SIZE 18
+
+struct virtio_snd_chmap_info {
+ /* common header */
+ struct virtio_snd_info hdr;
+ /* dataflow direction (VIRTIO_SND_D_XXX) */
+ __u8 direction;
+ /* # of valid channel position values */
+ __u8 channels;
+ /* channel position values (VIRTIO_SND_CHMAP_XXX) */
+ __u8 positions[VIRTIO_SND_CHMAP_MAX_SIZE];
+};
+
+#endif /* VIRTIO_SND_IF_H */
diff --git a/queue.h b/queue.h
new file mode 100644
index 0000000..e029e7b
--- /dev/null
+++ b/queue.h
@@ -0,0 +1,576 @@
+/* $NetBSD: queue.h,v 1.52 2009/04/20 09:56:08 mschuett Exp $ */
+
+/*
+ * QEMU version: Copy from netbsd, removed debug code, removed some of
+ * the implementations. Left in singly-linked lists, lists, simple
+ * queues, and tail queues.
+ */
+
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ */
+
+#ifndef QEMU_SYS_QUEUE_H
+#define QEMU_SYS_QUEUE_H
+
+/*
+ * This file defines four types of data structures: singly-linked lists,
+ * lists, simple queues, and tail queues.
+ *
+ * A singly-linked list is headed by a single forward pointer. The
+ * elements are singly linked for minimum space and pointer manipulation
+ * overhead at the expense of O(n) removal for arbitrary elements. New
+ * elements can be added to the list after an existing element or at the
+ * head of the list. Elements being removed from the head of the list
+ * should use the explicit macro for this purpose for optimum
+ * efficiency. A singly-linked list may only be traversed in the forward
+ * direction. Singly-linked lists are ideal for applications with large
+ * datasets and few or no removals or for implementing a LIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A simple queue is headed by a pair of pointers, one the head of the
+ * list and the other to the tail of the list. The elements are singly
+ * linked to save space, so elements can only be removed from the
+ * head of the list. New elements can be added to the list after
+ * an existing element, at the head of the list, or at the end of the
+ * list. A simple queue may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+/*
+ * List definitions.
+ */
+#define QLIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define QLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define QLIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+/*
+ * List functions.
+ */
+#define QLIST_INIT(head) do { \
+ (head)->lh_first = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_SWAP(dstlist, srclist, field) do { \
+ void *tmplist; \
+ tmplist = (srclist)->lh_first; \
+ (srclist)->lh_first = (dstlist)->lh_first; \
+ if ((srclist)->lh_first != NULL) { \
+ (srclist)->lh_first->field.le_prev = &(srclist)->lh_first; \
+ } \
+ (dstlist)->lh_first = tmplist; \
+ if ((dstlist)->lh_first != NULL) { \
+ (dstlist)->lh_first->field.le_prev = &(dstlist)->lh_first; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_INSERT_AFTER(listelm, elm, field) do { \
+ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
+ (listelm)->field.le_next->field.le_prev = \
+ &(elm)->field.le_next; \
+ (listelm)->field.le_next = (elm); \
+ (elm)->field.le_prev = &(listelm)->field.le_next; \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ (elm)->field.le_next = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &(elm)->field.le_next; \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.le_next = (head)->lh_first) != NULL) \
+ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+ (head)->lh_first = (elm); \
+ (elm)->field.le_prev = &(head)->lh_first; \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_REMOVE(elm, field) do { \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+ (elm)->field.le_next = NULL; \
+ (elm)->field.le_prev = NULL; \
+} while (/*CONSTCOND*/0)
+
+/*
+ * Like QLIST_REMOVE() but safe to call when elm is not in a list
+ */
+#define QLIST_SAFE_REMOVE(elm, field) do { \
+ if ((elm)->field.le_prev != NULL) { \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+ (elm)->field.le_next = NULL; \
+ (elm)->field.le_prev = NULL; \
+ } \
+} while (/*CONSTCOND*/0)
+
+/* Is elm in a list? */
+#define QLIST_IS_INSERTED(elm, field) ((elm)->field.le_prev != NULL)
+
+#define QLIST_FOREACH(var, head, field) \
+ for ((var) = ((head)->lh_first); \
+ (var); \
+ (var) = ((var)->field.le_next))
+
+#define QLIST_FOREACH_SAFE(var, head, field, next_var) \
+ for ((var) = ((head)->lh_first); \
+ (var) && ((next_var) = ((var)->field.le_next), 1); \
+ (var) = (next_var))
+
+/*
+ * List access methods.
+ */
+#define QLIST_EMPTY(head) ((head)->lh_first == NULL)
+#define QLIST_FIRST(head) ((head)->lh_first)
+#define QLIST_NEXT(elm, field) ((elm)->field.le_next)
+
+
+/*
+ * Singly-linked List definitions.
+ */
+#define QSLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define QSLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define QSLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+
+/*
+ * Singly-linked List functions.
+ */
+#define QSLIST_INIT(head) do { \
+ (head)->slh_first = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_INSERT_AFTER(slistelm, elm, field) do { \
+ (elm)->field.sle_next = (slistelm)->field.sle_next; \
+ (slistelm)->field.sle_next = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.sle_next = (head)->slh_first; \
+ (head)->slh_first = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_INSERT_HEAD_ATOMIC(head, elm, field) do { \
+ typeof(elm) save_sle_next; \
+ do { \
+ save_sle_next = (elm)->field.sle_next = (head)->slh_first; \
+ } while (qatomic_cmpxchg(&(head)->slh_first, save_sle_next, (elm)) !=\
+ save_sle_next); \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_MOVE_ATOMIC(dest, src) do { \
+ (dest)->slh_first = qatomic_xchg(&(src)->slh_first, NULL); \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_REMOVE_HEAD(head, field) do { \
+ typeof((head)->slh_first) elm = (head)->slh_first; \
+ (head)->slh_first = elm->field.sle_next; \
+ elm->field.sle_next = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_REMOVE_AFTER(slistelm, field) do { \
+ typeof(slistelm) next = (slistelm)->field.sle_next; \
+ (slistelm)->field.sle_next = next->field.sle_next; \
+ next->field.sle_next = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_REMOVE(head, elm, type, field) do { \
+ if ((head)->slh_first == (elm)) { \
+ QSLIST_REMOVE_HEAD((head), field); \
+ } else { \
+ struct type *curelm = (head)->slh_first; \
+ while (curelm->field.sle_next != (elm)) \
+ curelm = curelm->field.sle_next; \
+ curelm->field.sle_next = curelm->field.sle_next->field.sle_next; \
+ (elm)->field.sle_next = NULL; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_FOREACH(var, head, field) \
+ for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
+
+#define QSLIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = QSLIST_FIRST((head)); \
+ (var) && ((tvar) = QSLIST_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+/*
+ * Singly-linked List access methods.
+ */
+#define QSLIST_EMPTY(head) ((head)->slh_first == NULL)
+#define QSLIST_FIRST(head) ((head)->slh_first)
+#define QSLIST_NEXT(elm, field) ((elm)->field.sle_next)
+
+
+/*
+ * Simple queue definitions.
+ */
+#define QSIMPLEQ_HEAD(name, type) \
+struct name { \
+ struct type *sqh_first; /* first element */ \
+ struct type **sqh_last; /* addr of last next element */ \
+}
+
+#define QSIMPLEQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).sqh_first }
+
+#define QSIMPLEQ_ENTRY(type) \
+struct { \
+ struct type *sqe_next; /* next element */ \
+}
+
+/*
+ * Simple queue functions.
+ */
+#define QSIMPLEQ_INIT(head) do { \
+ (head)->sqh_first = NULL; \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (head)->sqh_first = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.sqe_next = NULL; \
+ *(head)->sqh_last = (elm); \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (listelm)->field.sqe_next = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_REMOVE_HEAD(head, field) do { \
+ typeof((head)->sqh_first) elm = (head)->sqh_first; \
+ if (((head)->sqh_first = elm->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(head)->sqh_first; \
+ elm->field.sqe_next = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_SPLIT_AFTER(head, elm, field, removed) do { \
+ QSIMPLEQ_INIT(removed); \
+ if (((removed)->sqh_first = (head)->sqh_first) != NULL) { \
+ if (((head)->sqh_first = (elm)->field.sqe_next) == NULL) { \
+ (head)->sqh_last = &(head)->sqh_first; \
+ } \
+ (removed)->sqh_last = &(elm)->field.sqe_next; \
+ (elm)->field.sqe_next = NULL; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_REMOVE(head, elm, type, field) do { \
+ if ((head)->sqh_first == (elm)) { \
+ QSIMPLEQ_REMOVE_HEAD((head), field); \
+ } else { \
+ struct type *curelm = (head)->sqh_first; \
+ while (curelm->field.sqe_next != (elm)) \
+ curelm = curelm->field.sqe_next; \
+ if ((curelm->field.sqe_next = \
+ curelm->field.sqe_next->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(curelm)->field.sqe_next; \
+ (elm)->field.sqe_next = NULL; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->sqh_first); \
+ (var); \
+ (var) = ((var)->field.sqe_next))
+
+#define QSIMPLEQ_FOREACH_SAFE(var, head, field, next) \
+ for ((var) = ((head)->sqh_first); \
+ (var) && ((next = ((var)->field.sqe_next)), 1); \
+ (var) = (next))
+
+#define QSIMPLEQ_CONCAT(head1, head2) do { \
+ if (!QSIMPLEQ_EMPTY((head2))) { \
+ *(head1)->sqh_last = (head2)->sqh_first; \
+ (head1)->sqh_last = (head2)->sqh_last; \
+ QSIMPLEQ_INIT((head2)); \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_PREPEND(head1, head2) do { \
+ if (!QSIMPLEQ_EMPTY((head2))) { \
+ *(head2)->sqh_last = (head1)->sqh_first; \
+ (head1)->sqh_first = (head2)->sqh_first; \
+ QSIMPLEQ_INIT((head2)); \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_LAST(head, type, field) \
+ (QSIMPLEQ_EMPTY((head)) ? \
+ NULL : \
+ ((struct type *)(void *) \
+ ((char *)((head)->sqh_last) - offsetof(struct type, field))))
+
+/*
+ * Simple queue access methods.
+ */
+#define QSIMPLEQ_EMPTY_ATOMIC(head) \
+ (qatomic_read(&((head)->sqh_first)) == NULL)
+#define QSIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
+#define QSIMPLEQ_FIRST(head) ((head)->sqh_first)
+#define QSIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
+
+typedef struct QTailQLink {
+ void *tql_next;
+ struct QTailQLink *tql_prev;
+} QTailQLink;
+
+/*
+ * Tail queue definitions. The union acts as a poor man template, as if
+ * it were QTailQLink<type>.
+ */
+#define QTAILQ_HEAD(name, type) \
+union name { \
+ struct type *tqh_first; /* first element */ \
+ QTailQLink tqh_circ; /* link for circular backwards list */ \
+}
+
+#define QTAILQ_HEAD_INITIALIZER(head) \
+ { .tqh_circ = { NULL, &(head).tqh_circ } }
+
+#define QTAILQ_ENTRY(type) \
+union { \
+ struct type *tqe_next; /* next element */ \
+ QTailQLink tqe_circ; /* link for circular backwards list */ \
+}
+
+/*
+ * Tail queue functions.
+ */
+#define QTAILQ_INIT(head) do { \
+ (head)->tqh_first = NULL; \
+ (head)->tqh_circ.tql_prev = &(head)->tqh_circ; \
+} while (/*CONSTCOND*/0)
+
+#define QTAILQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
+ (head)->tqh_first->field.tqe_circ.tql_prev = \
+ &(elm)->field.tqe_circ; \
+ else \
+ (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
+ (head)->tqh_first = (elm); \
+ (elm)->field.tqe_circ.tql_prev = &(head)->tqh_circ; \
+} while (/*CONSTCOND*/0)
+
+#define QTAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.tqe_next = NULL; \
+ (elm)->field.tqe_circ.tql_prev = (head)->tqh_circ.tql_prev; \
+ (head)->tqh_circ.tql_prev->tql_next = (elm); \
+ (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
+} while (/*CONSTCOND*/0)
+
+#define QTAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
+ (elm)->field.tqe_next->field.tqe_circ.tql_prev = \
+ &(elm)->field.tqe_circ; \
+ else \
+ (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
+ (listelm)->field.tqe_next = (elm); \
+ (elm)->field.tqe_circ.tql_prev = &(listelm)->field.tqe_circ; \
+} while (/*CONSTCOND*/0)
+
+#define QTAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.tqe_circ.tql_prev = (listelm)->field.tqe_circ.tql_prev; \
+ (elm)->field.tqe_next = (listelm); \
+ (listelm)->field.tqe_circ.tql_prev->tql_next = (elm); \
+ (listelm)->field.tqe_circ.tql_prev = &(elm)->field.tqe_circ; \
+} while (/*CONSTCOND*/0)
+
+#define QTAILQ_REMOVE(head, elm, field) do { \
+ if (((elm)->field.tqe_next) != NULL) \
+ (elm)->field.tqe_next->field.tqe_circ.tql_prev = \
+ (elm)->field.tqe_circ.tql_prev; \
+ else \
+ (head)->tqh_circ.tql_prev = (elm)->field.tqe_circ.tql_prev; \
+ (elm)->field.tqe_circ.tql_prev->tql_next = (elm)->field.tqe_next; \
+ (elm)->field.tqe_circ.tql_prev = NULL; \
+ (elm)->field.tqe_circ.tql_next = NULL; \
+ (elm)->field.tqe_next = NULL; \
+} while (/*CONSTCOND*/0)
+
+/* remove @left, @right and all elements in between from @head */
+#define QTAILQ_REMOVE_SEVERAL(head, left, right, field) do { \
+ if (((right)->field.tqe_next) != NULL) \
+ (right)->field.tqe_next->field.tqe_circ.tql_prev = \
+ (left)->field.tqe_circ.tql_prev; \
+ else \
+ (head)->tqh_circ.tql_prev = (left)->field.tqe_circ.tql_prev; \
+ (left)->field.tqe_circ.tql_prev->tql_next = (right)->field.tqe_next; \
+ } while (/*CONSTCOND*/0)
+
+#define QTAILQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->tqh_first); \
+ (var); \
+ (var) = ((var)->field.tqe_next))
+
+#define QTAILQ_FOREACH_SAFE(var, head, field, next_var) \
+ for ((var) = ((head)->tqh_first); \
+ (var) && ((next_var) = ((var)->field.tqe_next), 1); \
+ (var) = (next_var))
+
+#define QTAILQ_FOREACH_REVERSE(var, head, field) \
+ for ((var) = QTAILQ_LAST(head); \
+ (var); \
+ (var) = QTAILQ_PREV(var, field))
+
+#define QTAILQ_FOREACH_REVERSE_SAFE(var, head, field, prev_var) \
+ for ((var) = QTAILQ_LAST(head); \
+ (var) && ((prev_var) = QTAILQ_PREV(var, field), 1); \
+ (var) = (prev_var))
+
+/*
+ * Tail queue access methods.
+ */
+#define QTAILQ_EMPTY(head) ((head)->tqh_first == NULL)
+#define QTAILQ_FIRST(head) ((head)->tqh_first)
+#define QTAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+#define QTAILQ_IN_USE(elm, field) ((elm)->field.tqe_circ.tql_prev != NULL)
+
+#define QTAILQ_LINK_PREV(link) \
+ ((link).tql_prev->tql_prev->tql_next)
+#define QTAILQ_LAST(head) \
+ ((typeof((head)->tqh_first)) QTAILQ_LINK_PREV((head)->tqh_circ))
+#define QTAILQ_PREV(elm, field) \
+ ((typeof((elm)->field.tqe_next)) QTAILQ_LINK_PREV((elm)->field.tqe_circ))
+
+#define field_at_offset(base, offset, type) \
+ ((type *) (((char *) (base)) + (offset)))
+
+/*
+ * Raw access of elements of a tail queue head. Offsets are all zero
+ * because it's a union.
+ */
+#define QTAILQ_RAW_FIRST(head) \
+ field_at_offset(head, 0, void *)
+#define QTAILQ_RAW_TQH_CIRC(head) \
+ field_at_offset(head, 0, QTailQLink)
+
+/*
+ * Raw access of elements of a tail entry
+ */
+#define QTAILQ_RAW_NEXT(elm, entry) \
+ field_at_offset(elm, entry, void *)
+#define QTAILQ_RAW_TQE_CIRC(elm, entry) \
+ field_at_offset(elm, entry, QTailQLink)
+/*
+ * Tail queue traversal using pointer arithmetic.
+ */
+#define QTAILQ_RAW_FOREACH(elm, head, entry) \
+ for ((elm) = *QTAILQ_RAW_FIRST(head); \
+ (elm); \
+ (elm) = *QTAILQ_RAW_NEXT(elm, entry))
+/*
+ * Tail queue insertion using pointer arithmetic.
+ */
+#define QTAILQ_RAW_INSERT_TAIL(head, elm, entry) do { \
+ *QTAILQ_RAW_NEXT(elm, entry) = NULL; \
+ QTAILQ_RAW_TQE_CIRC(elm, entry)->tql_prev = QTAILQ_RAW_TQH_CIRC(head)->tql_prev; \
+ QTAILQ_RAW_TQH_CIRC(head)->tql_prev->tql_next = (elm); \
+ QTAILQ_RAW_TQH_CIRC(head)->tql_prev = QTAILQ_RAW_TQE_CIRC(elm, entry); \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_RAW_FIRST(head) \
+ field_at_offset(head, 0, void *)
+
+#define QLIST_RAW_NEXT(elm, entry) \
+ field_at_offset(elm, entry, void *)
+
+#define QLIST_RAW_PREVIOUS(elm, entry) \
+ field_at_offset(elm, entry + sizeof(void *), void *)
+
+#define QLIST_RAW_FOREACH(elm, head, entry) \
+ for ((elm) = *QLIST_RAW_FIRST(head); \
+ (elm); \
+ (elm) = *QLIST_RAW_NEXT(elm, entry))
+
+#define QLIST_RAW_INSERT_AFTER(head, prev, elem, entry) do { \
+ *QLIST_RAW_NEXT(prev, entry) = elem; \
+ *QLIST_RAW_PREVIOUS(elem, entry) = QLIST_RAW_NEXT(prev, entry); \
+ *QLIST_RAW_NEXT(elem, entry) = NULL; \
+} while (0)
+
+#define QLIST_RAW_INSERT_HEAD(head, elm, entry) do { \
+ void *first = *QLIST_RAW_FIRST(head); \
+ *QLIST_RAW_FIRST(head) = elm; \
+ *QLIST_RAW_PREVIOUS(elm, entry) = QLIST_RAW_FIRST(head); \
+ if (first) { \
+ *QLIST_RAW_NEXT(elm, entry) = first; \
+ *QLIST_RAW_PREVIOUS(first, entry) = QLIST_RAW_NEXT(elm, entry); \
+ } else { \
+ *QLIST_RAW_NEXT(elm, entry) = NULL; \
+ } \
+} while (0)
+
+#endif /* QEMU_SYS_QUEUE_H */
diff --git a/vhost_loopback.c b/vhost_loopback.c
new file mode 100644
index 0000000..af725e3
--- /dev/null
+++ b/vhost_loopback.c
@@ -0,0 +1,469 @@
+/*
+ * Based on vhost.c of QEMU project;
+ *
+ * Copyright Red Hat, Inc. 2010
+ *
+ * Authors:
+ * Michael S. Tsirkin <mst@redhat.com>
+ *
+ * Copyright Red Hat, Inc. 2010
+ *
+ * Authors:
+ * Michael S. Tsirkin <mst@redhat.com>
+ *
+ * Copyright 2022-2023 Virtual Open Systems SAS.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/eventfd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <sys/param.h>
+#include <assert.h>
+
+/* For socket */
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+
+/* Project header files */
+#include "virtio_loopback.h"
+#include "vhost_user_loopback.h"
+#include "event_notifier.h"
+
+/* vhost headers */
+#include "vhost_loopback.h"
+
+#ifdef DEBUG
+#define DBG(...) printf("vhost-loopback: " __VA_ARGS__)
+#else
+#define DBG(...)
+#endif /* DEBUG */
+
+/*
+ * Stop processing guest IO notifications in qemu.
+ * Start processing them in vhost in kernel.
+ */
+int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
+{
+ int i, r, e;
+
+ /*
+ * We will pass the notifiers to the kernel, make sure that QEMU
+ * doesn't interfere.
+ */
+
+ /* TODO: Check if this is still useful */
+ r = virtio_device_grab_ioeventfd(vdev);
+ if (r < 0) {
+ DBG("binding does not support host notifiers\n");
+ goto fail;
+ }
+
+
+ for (i = 0; i < hdev->nvqs; ++i) {
+ r = virtio_bus_set_host_notifier(vdev->vbus, hdev->vq_index + i,
+ true);
+ if (r < 0) {
+ DBG("vhost VQ %d notifier binding failed: %d", i, r);
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ DBG("Fail vhost_dev_enable_notifiers\n");
+ return r;
+}
+
+/* TODO: This needs to be checked if it's still needed */
+static int vhost_dev_has_iommu(struct vhost_dev *dev)
+{
+ VirtIODevice *vdev = dev->vdev;
+
+ /*
+ * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support
+ * incremental memory mapping API via IOTLB API. For platform that
+ * does not have IOMMU, there's no need to enable this feature
+ * which may cause unnecessary IOTLB miss/update transactions.
+ */
+ return virtio_bus_device_iommu_enabled(vdev) &&
+ virtio_has_feature(vdev->host_features, VIRTIO_F_IOMMU_PLATFORM);
+}
+
+static int vhost_dev_set_features(struct vhost_dev *dev,
+ bool enable_log)
+{
+ uint64_t features = dev->acked_features;
+ int r;
+
+ if (enable_log) {
+ features |= 0x1ULL << VHOST_F_LOG_ALL;
+ }
+
+ /* TODO: check if this is needed */
+ if (!vhost_dev_has_iommu(dev)) {
+ features &= ~(0x1ULL << VIRTIO_F_IOMMU_PLATFORM);
+ }
+
+ r = vhost_user_set_features(dev, features);
+ if (r < 0) {
+ DBG("vhost_set_features failed\n");
+ goto out;
+ }
+
+out:
+ return r;
+}
+
+static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
+ struct vhost_virtqueue *vq,
+ unsigned idx, bool enable_log)
+{
+ struct vhost_vring_addr addr;
+ int r;
+
+ memset(&addr, 0, sizeof(struct vhost_vring_addr));
+
+ addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
+ addr.avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
+ addr.used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
+
+ DBG("Print physical addresses of vrings:\n");
+ DBG("\tvq->desc_phys: 0x%llx\n", vq->desc_phys);
+ DBG("\tvq->avail_phys: 0x%llx\n", vq->avail_phys);
+ DBG("\tvq->used_phys: 0x%llx\n", vq->used_phys);
+
+ addr.index = idx;
+ addr.log_guest_addr = vq->used_phys;
+ addr.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0;
+
+ r = vhost_user_set_vring_addr(dev, &addr);
+ if (r < 0) {
+ DBG("vhost_set_vring_addr failed\n");
+ }
+ return r;
+}
+
+uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
+ uint64_t features)
+{
+ const int *bit = feature_bits;
+ while (*bit != VHOST_INVALID_FEATURE_BIT) {
+ uint64_t bit_mask = (1ULL << *bit);
+ if (!(hdev->features & bit_mask)) {
+ features &= ~bit_mask;
+ }
+ bit++;
+ }
+ return features;
+}
+
+void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
+ uint64_t features)
+{
+ const int *bit = feature_bits;
+ while (*bit != VHOST_INVALID_FEATURE_BIT) {
+ uint64_t bit_mask = (1ULL << *bit);
+ if (features & bit_mask) {
+ hdev->acked_features |= bit_mask;
+ }
+ bit++;
+ }
+}
+
+
+
+/* Mask/unmask events from this vq. */
+void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
+ bool mask)
+{
+ struct VirtQueue *vvq = virtio_get_queue(vdev, n);
+ int r, index = n - hdev->vq_index;
+ struct vhost_vring_file file;
+
+ if (mask) {
+ file.fd = event_notifier_get_wfd(&hdev->vqs[index].masked_notifier);
+ } else {
+ file.fd = event_notifier_get_wfd(virtio_queue_get_guest_notifier(vvq));
+ }
+
+ file.index = vhost_user_get_vq_index(hdev, n);
+
+ r = vhost_user_set_vring_call(&file);
+ if (r < 0) {
+ DBG("vhost_set_vring_call failed\n");
+ }
+}
+
+static int vhost_virtqueue_start(struct vhost_dev *dev,
+ struct VirtIODevice *vdev,
+ struct vhost_virtqueue *vq,
+ unsigned idx)
+{
+ VirtioBus *vbus = vdev->vbus;
+ uint64_t s, l, a;
+ int r;
+
+ int vhost_vq_index = vhost_user_get_vq_index(dev, idx);
+ struct vhost_vring_file file = {
+ .index = vhost_vq_index
+ };
+ struct vhost_vring_state state = {
+ .index = vhost_vq_index
+ };
+ struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
+
+ a = virtio_queue_get_desc_addr(vdev, idx);
+ if (a == 0) {
+ /* Queue might not be ready for start */
+ DBG("Error: Queue might not be ready for start\n");
+ return 0;
+ }
+
+ vq->num = state.num = virtio_queue_get_num(vdev, idx);
+
+ r = vhost_user_set_vring_num(dev, &state);
+ if (r) {
+ DBG("vhost_set_vring_num failed\n");
+ return r;
+ }
+
+ state.num = virtio_queue_get_last_avail_idx(vdev, idx);
+ r = vhost_user_set_vring_base(dev, &state);
+ if (r) {
+ DBG("vhost_set_vring_base failed\n");
+ return r;
+ }
+
+ vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
+ vq->desc_phys = vring_phys_addrs[idx] << 12;
+ vq->desc = (void *)virtio_queue_get_desc_addr(vdev, idx);
+ if (!vq->desc || l != s) {
+ DBG("Error : vq->desc = a\n");
+ r = -ENOMEM;
+ return r;
+ }
+
+ vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
+ vq->avail_phys = vq->desc_phys + virtio_queue_get_avail_addr(vdev, idx)
+ - virtio_queue_get_desc_addr(vdev, idx);
+ vq->avail = (void *)virtio_queue_get_avail_addr(vdev, idx);
+ if (!vq->avail || l != s) {
+ DBG("Error : vq->avail = a\n");
+ r = -ENOMEM;
+ return r;
+ }
+
+ vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
+ vq->used_phys = a = vq->avail_phys + virtio_queue_get_used_addr(vdev, idx)
+ - virtio_queue_get_avail_addr(vdev, idx);
+ vq->used = (void *)virtio_queue_get_used_addr(vdev, idx);
+ if (!vq->used || l != s) {
+ DBG("Error : vq->used = a\n");
+ r = -ENOMEM;
+ return r;
+ }
+
+ r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
+ if (r < 0) {
+ DBG("Fail vhost_virtqueue_set_addr\n");
+ return r;
+ }
+
+ /* The next line has to be disable for rng */
+ /* Clear and discard previous events if any. */
+ event_notifier_test_and_clear(virtio_queue_get_host_notifier(vvq));
+
+ file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
+ r = vhost_user_set_vring_kick(&file);
+ if (r) {
+ DBG("vhost_set_vring_kick failed\n");
+ return r;
+ }
+
+ /* Clear and discard previous events if any. */
+ event_notifier_test_and_clear(&vq->masked_notifier);
+
+ /*
+ * Init vring in unmasked state, unless guest_notifier_mask
+ * will do it later.
+ */
+ if (!vdev->use_guest_notifier_mask) {
+ /* TODO: check and handle errors. */
+ vhost_virtqueue_mask(dev, vdev, idx, false);
+ }
+
+ return 0;
+}
+
+void update_mem_table(VirtIODevice *vdev)
+{
+ print_mem_table(vdev->vhdev);
+ vhost_commit_vqs(vdev->vhdev);
+ print_mem_table(vdev->vhdev);
+ (void)vhost_user_set_mem_table(vdev->vhdev);
+}
+
+static int vhost_dev_set_vring_enable(struct vhost_dev *hdev, int enable)
+{
+ DBG("vhost_dev_set_vring_enable not yet implemented\n");
+
+ /*
+ * For vhost-user devices, if VHOST_USER_F_PROTOCOL_FEATURES has not
+ * been negotiated, the rings start directly in the enabled state, and
+ * .vhost_set_vring_enable callback will fail since
+ * VHOST_USER_SET_VRING_ENABLE is not supported.
+ */
+ if (!virtio_has_feature(hdev->backend_features,
+ VHOST_USER_F_PROTOCOL_FEATURES)) {
+ DBG("Does not have VHOST_USER_F_PROTOCOL_FEATURES\n");
+ return 0;
+ }
+
+ return vhost_user_set_vring_enable(hdev, enable);
+}
+
+/* Host notifiers must be enabled at this point. */
+int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
+{
+ int i, r;
+
+ hdev->started = true;
+ hdev->vdev = vdev;
+
+ r = vhost_dev_set_features(hdev, hdev->log_enabled);
+ if (r < 0) {
+ return r;
+ }
+
+ /* TODO: check if this is needed */
+ if (vhost_dev_has_iommu(hdev)) {
+ DBG("memory_listener_register?\n");
+ }
+
+ vhost_commit_mem_regions(hdev);
+
+ for (i = 0; i < hdev->nvqs; ++i) {
+ r = vhost_virtqueue_start(hdev,
+ vdev,
+ hdev->vqs + i,
+ hdev->vq_index + i);
+ if (r < 0) {
+ DBG("Fail vhost_virtqueue_start\n");
+ return r;
+ }
+ }
+
+ if (vrings) {
+ r = vhost_dev_set_vring_enable(hdev, true);
+ if (r) {
+ DBG("Fail vhost_dev_set_vring_enable\n");
+ return r;
+ }
+ }
+
+ r = vhost_user_dev_start(hdev, true);
+ if (r) {
+ DBG("Fail vhost_dev_set_vring_enable\n");
+ return r;
+ }
+
+ return 0;
+}
+
+
+int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
+ uint32_t config_len)
+{
+ return vhost_user_get_config(hdev, config, config_len);
+}
+
+int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
+ uint32_t offset, uint32_t size, uint32_t flags)
+{
+ return vhost_user_set_config(hdev, data, offset, size, flags);
+}
+
+void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
+ const VhostDevConfigOps *ops)
+{
+ hdev->config_ops = ops;
+}
+
+int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev)
+{
+ int r;
+
+ /*
+ * TODO: Check if we need that
+ * if (hdev->vhost_ops->vhost_get_inflight_fd == NULL ||
+ * hdev->vhost_ops->vhost_set_inflight_fd == NULL) {
+ * return 0;
+ * }
+ */
+
+ hdev->vdev = vdev;
+
+ r = vhost_dev_set_features(hdev, hdev->log_enabled);
+ if (r < 0) {
+ DBG("vhost_dev_prepare_inflight failed\n");
+ return r;
+ }
+
+ return 0;
+}
+
+int vhost_dev_set_inflight(struct vhost_dev *dev,
+ struct vhost_inflight *inflight)
+{
+ int r;
+
+ if (inflight->addr) {
+ r = vhost_user_set_inflight_fd(dev, inflight);
+ if (r) {
+ DBG("vhost_set_inflight_fd failed\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
+ struct vhost_inflight *inflight)
+{
+ int r;
+
+ r = vhost_user_get_inflight_fd(dev, queue_size, inflight);
+ if (r) {
+ DBG("vhost_get_inflight_fd failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
diff --git a/vhost_loopback.h b/vhost_loopback.h
new file mode 100644
index 0000000..198a5af
--- /dev/null
+++ b/vhost_loopback.h
@@ -0,0 +1,80 @@
+/*
+ * Based on vhost.h of QEMU project
+ *
+ * Copyright 2022-2023 Virtual Open Systems SAS.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef LOOPBACK_VHOST_H
+#define LOOPBACK_VHOST_H
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <poll.h>
+#include <pthread.h>
+#include "vhost_user_loopback.h"
+#include "virtio_loopback.h"
+
+#define VHOST_INVALID_FEATURE_BIT (0xff)
+#define VHOST_QUEUE_NUM_CONFIG_INR 0
+
+int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
+int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);
+void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev,
+ int n, bool mask);
+int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
+ uint32_t config_len);
+int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
+ uint32_t offset, uint32_t size, uint32_t flags);
+uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
+ uint64_t features);
+void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
+ uint64_t features);
+
+/**
+ * vhost_dev_set_config_notifier() - register VhostDevConfigOps
+ * @hdev: common vhost_dev_structure
+ * @ops: notifier ops
+ *
+ * If the device is expected to change configuration a notifier can be
+ * setup to handle the case.
+ */
+
+typedef struct VhostDevConfigOps VhostDevConfigOps;
+
+void vhost_dev_set_config_notifier(struct vhost_dev *dev,
+ const VhostDevConfigOps *ops);
+int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev);
+
+int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
+ struct vhost_inflight *inflight);
+
+int vhost_dev_set_inflight(struct vhost_dev *dev,
+ struct vhost_inflight *inflight);
+
+void update_mem_table(VirtIODevice *vdev);
+
+
+struct vhost_inflight {
+ int fd;
+ void *addr;
+ uint64_t size;
+ uint64_t offset;
+ uint16_t queue_size;
+};
+
+#endif /* LOOPBACK_VHOST_H */
diff --git a/vhost_user_blk.c b/vhost_user_blk.c
new file mode 100644
index 0000000..d182a74
--- /dev/null
+++ b/vhost_user_blk.c
@@ -0,0 +1,520 @@
+/*
+ * Based on vhost-user-blk.c of QEMU project
+ *
+ * Copyright(C) 2017 Intel Corporation.
+ *
+ * Authors:
+ * Changpeng Liu <changpeng.liu@intel.com>
+ *
+ * Largely based on the "vhost-user-scsi.c" and "vhost-scsi.c" implemented by:
+ * Felipe Franciosi <felipe@nutanix.com>
+ * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
+ * Nicholas Bellinger <nab@risingtidesystems.com>
+ *
+ * Copyright (c) 2022-2023 Virtual Open Systems SAS.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+#include <sys/param.h>
+
+/* Project header files */
+#include "vhost_user_blk.h"
+
+#ifdef DEBUG
+#define DBG(...) printf("vhost-user-blk: " __VA_ARGS__)
+#else
+#define DBG(...)
+#endif /* DEBUG */
+
+
+#define REALIZE_CONNECTION_RETRIES 3
+static uint64_t vhost_user_blk_get_features(VirtIODevice *vdev,
+ uint64_t features);
+
+static int vhost_user_blk_start(VirtIODevice *vdev)
+{
+ VHostUserBlk *s = vdev->vhublk;
+ VirtioBus *k = vdev->vbus;
+ int i, ret;
+
+ DBG("vhost_user_blk_start\n");
+
+ if (!k->set_guest_notifiers) {
+ DBG("binding does not support guest notifiers\n");
+ return -1;
+ }
+
+ ret = vhost_dev_enable_notifiers(s->vhost_dev, vdev);
+ if (ret < 0) {
+ DBG("Error enabling host notifiers\n");
+ return ret;
+ }
+
+ ret = k->set_guest_notifiers(k->vdev, s->vhost_dev->nvqs, true);
+ if (ret < 0) {
+ DBG("Error enabling host notifier\n");
+ return ret;
+ }
+
+ s->vhost_dev->acked_features = vdev->guest_features;
+ DBG("acked_features: 0x%lx\n", vdev->guest_features);
+
+ /* FIXME: We might do not need that */
+ ret = vhost_dev_prepare_inflight(s->vhost_dev, vdev);
+ if (ret < 0) {
+ DBG("Error setting inflight format\n");
+ return ret;
+ }
+
+ if (!s->inflight->addr) {
+ ret = vhost_dev_get_inflight(s->vhost_dev, s->queue_size, s->inflight);
+ if (ret < 0) {
+ DBG("Error getting inflight\n");
+ return ret;
+ }
+ }
+
+ ret = vhost_dev_set_inflight(s->vhost_dev, s->inflight);
+ if (ret < 0) {
+ DBG("Error setting inflight\n");
+ return ret;
+ }
+
+ DBG("After vhost_dev_set_inflight\n");
+
+
+ ret = vhost_dev_start(s->vhost_dev, vdev, false);
+ if (ret < 0) {
+ DBG("Error starting vhost\n");
+ return ret;
+ }
+
+ s->started_vu = true;
+
+ DBG("vhost_virtqueue_mask\n");
+ /*
+ * guest_notifier_mask/pending not used yet, so just unmask
+ * everything here. virtio-pci will do the right thing by
+ * enabling/disabling irqfd.
+ */
+ for (i = 0; i < s->vhost_dev->nvqs; i++) {
+ vhost_virtqueue_mask(s->vhost_dev, vdev, i, false);
+ }
+
+ DBG("vhost_user_blk_start return successfully: %d\n", ret);
+ return ret;
+
+}
+
+static void vhost_user_blk_stop(VirtIODevice *vdev)
+{
+ DBG("Not yet implemented\n");
+}
+
+static int vhost_user_blk_handle_config_change(struct vhost_dev *dev)
+{
+ int ret;
+ struct virtio_blk_config blkcfg;
+ VHostUserBlk *s = dev->vdev->vhublk;
+
+ DBG("vhost_user_blk_handle_config_change(...)\n");
+
+ ret = vhost_dev_get_config(dev, (uint8_t *)&blkcfg,
+ sizeof(struct virtio_blk_config));
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* valid for resize only */
+ if (blkcfg.capacity != s->blkcfg.capacity) {
+ DBG("blkcfg.capacity != s->blkcfg.capacity\n");
+ s->blkcfg.capacity = blkcfg.capacity;
+ memcpy(dev->vdev->config, &s->blkcfg, sizeof(struct virtio_blk_config));
+ DBG("To virtio_notify_config\n");
+ virtio_notify_config(dev->vdev);
+ }
+
+ return 0;
+}
+
+
+const VhostDevConfigOps blk_ops = {
+ .vhost_dev_config_notifier = vhost_user_blk_handle_config_change,
+};
+
+
+static uint64_t vhost_user_blk_get_features(VirtIODevice *vdev,
+ uint64_t features)
+{
+ VHostUserBlk *s = vdev->vhublk;
+
+ DBG("vhost_user_blk_get_features()\n");
+
+ /* Turn on pre-defined features */
+ virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX);
+ virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY);
+ virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY);
+ virtio_add_feature(&features, VIRTIO_BLK_F_FLUSH);
+ virtio_add_feature(&features, VIRTIO_BLK_F_DISCARD);
+ virtio_add_feature(&features, VIRTIO_BLK_F_WRITE_ZEROES);
+ virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE);
+ virtio_add_feature(&features, VIRTIO_BLK_F_RO);
+ /*
+ * TODO: Delete if not needed
+ * virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE);
+ */
+
+ /*
+ * The next line makes the blk read only
+ *
+ * virtio_add_feature(&features, VIRTIO_BLK_F_RO);
+ *
+ */
+
+ if (s->config_wce) {
+ DBG("Add config feature\n");
+ virtio_add_feature(&features, VIRTIO_BLK_F_CONFIG_WCE);
+ }
+
+ if (s->num_queues > 1) {
+ virtio_add_feature(&features, VIRTIO_BLK_F_MQ);
+ }
+
+ return vhost_user_get_features(&features);
+}
+
+static int vhost_user_blk_connect(VirtIODevice *vdev)
+{
+ VHostUserBlk *s = vdev->vhublk;
+ int ret = 0;
+
+ DBG("vhost_user_blk_connect(...)\n");
+
+ if (s->connected) {
+ DBG("s->connected\n");
+ return 0;
+ }
+ s->connected = true;
+ s->vhost_dev->num_queues = s->num_queues;
+ s->vhost_dev->nvqs = s->num_queues;
+ s->vhost_dev->vqs = s->vhost_vqs;
+ s->vhost_dev->vq_index = 0;
+ s->vhost_dev->backend_features = 0;
+
+ vhost_dev_set_config_notifier(s->vhost_dev, &blk_ops);
+
+ vhost_dev_init(s->vhost_dev);
+
+ /* Pass the new obtained features */
+ global_vdev->host_features = s->vhost_dev->features;
+
+ /*
+ * The next line disables VIRTIO_RING_F_INDIRECT_DESC:
+ * global_vdev->host_features &= ~(1ULL << VIRTIO_RING_F_INDIRECT_DESC);
+ */
+
+ DBG("After init global_vdev->host_features: 0x%lx\n",
+ global_vdev->host_features);
+
+ /* Restore vhost state */
+ if (virtio_device_started(vdev, vdev->status)) {
+ ret = vhost_user_blk_start(vdev);
+ if (ret < 0) {
+ DBG("vhost_user_blk_start failed\n");
+ return ret;
+ }
+ }
+
+ DBG("vhost_user_blk_connect return successfully!\n");
+
+ return 0;
+}
+
+static void vhost_user_blk_disconnect(VirtIODevice *dev)
+{
+ DBG("vhost_user_blk_disconnect not yet implemented\n");
+}
+
+static void vhost_user_blk_chr_closed_bh(void *opaque)
+{
+ DBG("vhost_user_blk_chr_closed_bh not yet implemented\n");
+}
+
+static void vhost_user_blk_event(void *opaque)
+{
+ DBG("vhost_user_blk_event not yet implemented");
+}
+
+static int vhost_user_blk_realize_connect(VHostUserBlk *s)
+{
+ int ret;
+
+ DBG("vhost_user_blk_realize_connect(...)\n");
+ s->connected = false;
+
+ DBG("s->vdev: 0x%lx\n", (uint64_t)s->parent);
+ DBG("global_vdev: 0x%lx\n", (uint64_t)global_vdev);
+ ret = vhost_user_blk_connect(s->parent);
+ if (ret < 0) {
+ DBG("vhost_user_blk_connect failed\n");
+ return ret;
+ }
+ DBG("s->connected: %d\n", s->connected);
+
+ ret = vhost_dev_get_config(s->vhost_dev, (uint8_t *)&s->blkcfg,
+ sizeof(struct virtio_blk_config));
+ if (ret < 0) {
+ DBG("vhost_dev_get_config failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+
+static void vhost_user_blk_device_unrealize(VirtIODevice *vdev)
+{
+ DBG("vhost_user_blk_device_unrealize not yet implemented\n");
+}
+
+static void vhost_user_blk_reset(VirtIODevice *vdev)
+{
+ DBG("vhost_user_blk_reset not yet implemented\n");
+}
+
+static void vhost_user_blk_set_config(VirtIODevice *vdev,
+ const uint8_t *config);
+
+
+static void vhost_user_blk_update_config(VirtIODevice *vdev, uint8_t *config)
+{
+ VHostUserBlk *s = vdev->vhublk;
+
+ DBG("vhost_user_blk_update_config(...)\n");
+
+ /* Our num_queues overrides the device backend */
+ memcpy(&s->blkcfg.num_queues, &s->num_queues, sizeof(uint64_t));
+
+ memcpy(config, &s->blkcfg, sizeof(struct virtio_blk_config));
+}
+
+static void vhost_user_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
+{
+ VHostUserBlk *s = vdev->vhublk;
+ struct virtio_blk_config *blkcfg = (struct virtio_blk_config *)config;
+ int ret;
+
+ DBG("vhost_user_blk_set_config(...)\n");
+
+ if (blkcfg->wce == s->blkcfg.wce) {
+ DBG("blkcfg->wce == s->blkcfg.wce\n");
+ return;
+ }
+
+ ret = vhost_dev_set_config(s->vhost_dev, &blkcfg->wce,
+ offsetof(struct virtio_blk_config, wce),
+ sizeof(blkcfg->wce),
+ VHOST_SET_CONFIG_TYPE_MASTER);
+ if (ret) {
+ DBG("set device config space failed\n");
+ return;
+ }
+
+ s->blkcfg.wce = blkcfg->wce;
+}
+
+
+static void vhost_user_blk_set_status(VirtIODevice *vdev, uint8_t status)
+{
+ VHostUserBlk *s = vdev->vhublk;
+ /* Just for testing: bool should_start = true; */
+ bool should_start = virtio_device_started(vdev, status);
+ int ret;
+
+ DBG("vhost_user_blk_set_status (...)\n");
+
+ /* TODO: Remove if not needed */
+ if (!s->connected) {
+ DBG("Not connected!\n");
+ return;
+ }
+
+ DBG("should_start == %d\n", should_start);
+ if (s->vhost_dev->started == should_start) {
+ DBG("s->dev->started == should_start\n");
+ return;
+ }
+
+ if (should_start) {
+ ret = vhost_user_blk_start(vdev);
+ if (ret < 0) {
+ DBG("vhost_user_blk_start returned error\n");
+ }
+ } else {
+ DBG("Call vhost_user_blk_stop (not yet in place)\n");
+ /* TODO: vhost_user_blk_stop(vdev); */
+ }
+
+ DBG("vhost_user_blk_set_status return successfully\n");
+}
+
+static void print_config_blk(uint8_t *config_data)
+{
+ struct virtio_blk_config *config_strct =
+ (struct virtio_blk_config *)config_data;
+
+ DBG("uint64_t capacity: %llu\n", config_strct->capacity);
+ DBG("uint32_t size_max: %u\n", config_strct->size_max);
+ DBG("uint32_t seg_max: %u\n", config_strct->seg_max);
+
+ DBG("virtio_blk_geometry:\n");
+ DBG(" uint16_t cylinders: %u\n",
+ config_strct->geometry.cylinders);
+ DBG(" uint8_t heads: %u\n",
+ config_strct->geometry.heads);
+ DBG(" uint8_t sectors: %u\n",
+ config_strct->geometry.sectors);
+
+ DBG("uint32_t blk_size: %u\n", config_strct->blk_size);
+ DBG("uint8_t physical_block_exp: %u\n",
+ config_strct->physical_block_exp);
+ DBG("uint8_t alignment_offset: %u\n",
+ config_strct->alignment_offset);
+ DBG("uint16_t min_io_size: %u\n", config_strct->min_io_size);
+ DBG("uint32_t opt_io_size: %u\n", config_strct->opt_io_size);
+ DBG("uint8_t wce: %u\n", config_strct->wce);
+ DBG("uint8_t unused: %u\n", config_strct->unused);
+ DBG("uint16_t num_queues: %u\n", config_strct->num_queues);
+ DBG("uint32_t max_discard_sectors: %u\n",
+ config_strct->max_discard_sectors);
+ DBG("uint32_t max_discard_seg: %u\n", config_strct->max_discard_seg);
+ DBG("uint32_t discard_sector_alignment: %u\n",
+ config_strct->discard_sector_alignment);
+ DBG("uint32_t max_write_zeroes_sectors: %u\n",
+ config_strct->max_write_zeroes_sectors);
+ DBG("uint32_t max_write_zeroes_seg: %u\n",
+ config_strct->max_write_zeroes_seg);
+ DBG("uint8_t write_zeroes_may_unmap: %u\n",
+ config_strct->write_zeroes_may_unmap);
+ DBG("uint8_t unused1[3]: %u\n", config_strct->unused1[0]);
+ DBG("uint8_t unused1[3]: %u\n", config_strct->unused1[1]);
+ DBG("uint8_t unused1[3]: %u\n", config_strct->unused1[2]);
+}
+
+static void virtio_dev_class_init(VirtIODevice *vdev)
+{
+ DBG("virtio_dev_class_init\n");
+
+ vdev->vdev_class = (VirtioDeviceClass *)malloc(sizeof(VirtioDeviceClass));
+ vdev->vdev_class->parent = vdev;
+ vdev->vdev_class->realize = vhost_user_blk_realize;
+ vdev->vdev_class->unrealize = vhost_user_blk_device_unrealize;
+ vdev->vdev_class->get_config = vhost_user_blk_update_config;
+ vdev->vdev_class->set_config = vhost_user_blk_set_config;
+ vdev->vdev_class->get_features = vhost_user_blk_get_features;
+ vdev->vdev_class->set_status = vhost_user_blk_set_status;
+ vdev->vdev_class->reset = vhost_user_blk_reset;
+ vdev->vdev_class->update_mem_table = update_mem_table;
+ vdev->vdev_class->print_config = print_config_blk;
+}
+
+void vhost_user_blk_init(VirtIODevice *vdev)
+{
+
+ DBG("vhost_user_blk_init\n");
+
+ VHostUserBlk *vhublk = (VHostUserBlk *)malloc(sizeof(VHostUserBlk));
+ vdev->vhublk = vhublk;
+ vdev->nvqs = &dev->nvqs;
+ vhublk->parent = vdev;
+ vhublk->virtqs = vdev->vqs;
+ vhublk->vhost_dev = dev;
+
+ virtio_dev_class_init(vdev);
+ virtio_loopback_bus_init(vdev->vbus);
+}
+
+static void vhost_user_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
+{
+ /*
+ * Not normally called; it's the daemon that handles the queue;
+ * however virtio's cleanup path can call this.
+ */
+ DBG("vhost_user_blk_handle_output not yet implemented\n");
+}
+
+void vhost_user_blk_realize(int queue_num, int queue_size)
+{
+ int retries;
+ int i, ret;
+
+ DBG("vhost_user_blk_realize\n");
+
+ /* This needs to be added */
+ proxy = (VirtIOMMIOProxy *)malloc(sizeof(VirtIOMMIOProxy));
+ *proxy = (VirtIOMMIOProxy) {
+ .legacy = 1,
+ };
+
+ /* VIRTIO_ID_BLOCK is 2, check virtio_ids.h in linux */
+ virtio_dev_init(global_vdev, "virtio-blk", 2,
+ sizeof(struct virtio_blk_config));
+
+ vhost_user_blk_init(global_vdev);
+
+ global_vdev->vhublk->config_wce = 1;
+
+ global_vdev->vhublk->num_queues = queue_num;
+
+ global_vdev->vhublk->queue_size = queue_size;
+
+ /* NOTE: global_vdev->vqs == vhublk->virtqs */
+ global_vdev->vqs = (VirtQueue **)malloc(sizeof(VirtQueue *)
+ * global_vdev->vhublk->num_queues);
+ for (i = 0; i < global_vdev->vhublk->num_queues; i++) {
+ global_vdev->vqs[i] = virtio_add_queue(global_vdev,
+ global_vdev->vhublk->queue_size,
+ vhost_user_blk_handle_output);
+ }
+
+ global_vdev->vhublk->inflight = (struct vhost_inflight *)malloc(
+ sizeof(struct vhost_inflight));
+ global_vdev->vhublk->vhost_vqs = (struct vhost_virtqueue *)malloc(
+ sizeof(struct vhost_virtqueue) *
+ global_vdev->vhublk->num_queues);
+
+ retries = REALIZE_CONNECTION_RETRIES;
+
+ do {
+ ret = vhost_user_blk_realize_connect(global_vdev->vhublk);
+ } while (ret < 0 && retries--);
+
+ if (ret < 0) {
+ DBG("vhost_user_blk_realize_connect: -EPROTO\n");
+ }
+
+ DBG("final global_vdev->host_features: 0x%lx\n",
+ global_vdev->host_features);
+
+ print_config_blk((uint8_t *)(&global_vdev->vhublk->blkcfg));
+}
+
diff --git a/vhost_user_blk.h b/vhost_user_blk.h
new file mode 100644
index 0000000..d5857c8
--- /dev/null
+++ b/vhost_user_blk.h
@@ -0,0 +1,59 @@
+/*
+ * Based on vhost-user-blk.h of QEMU project
+ *
+ * Copyright(C) 2017 Intel Corporation.
+ *
+ * Authors:
+ * Changpeng Liu <changpeng.liu@intel.com>
+ *
+ *
+ * Copyright (c) 2022-2023 Virtual Open Systems SAS.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef VHOST_USER_BLK
+#define VHOST_USER_BLK
+
+#include "vhost_loopback.h"
+#include "vhost_user_loopback.h"
+#include "virtio_loopback.h"
+#include <linux/virtio_blk.h>
+
+#define TYPE_VHOST_USER_BLK "vhost-user-blk"
+
+#define VHOST_USER_BLK_AUTO_NUM_QUEUES UINT16_MAX
+
+struct VHostUserBlk {
+ VirtIODevice *parent;
+ struct vhost_virtqueue *vhost_vq;
+ struct vhost_dev *vhost_dev;
+ VirtQueue *req_vq;
+ VirtQueue **virtqs;
+ uint16_t num_queues;
+ uint32_t queue_size;
+ /* uint32_t config_wce; //We will need it for the next release */
+ uint32_t config_wce;
+ struct vhost_inflight *inflight;
+ struct vhost_virtqueue *vhost_vqs;
+ struct virtio_blk_config blkcfg;
+ bool connected;
+ bool started_vu;
+};
+
+void vhost_user_blk_realize(int queue_num, int queue_size);
+
+#endif /* VHOST_USER_BLK */
diff --git a/vhost_user_gpio.c b/vhost_user_gpio.c
new file mode 100644
index 0000000..6049bd5
--- /dev/null
+++ b/vhost_user_gpio.c
@@ -0,0 +1,381 @@
+/*
+ * Based on vhost-user-gpio.c of QEMU project
+ *
+ * Copyright (c) 2022 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * Copyright (c) 2023 Virtual Open Systems SAS.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+#include <sys/param.h>
+#include <errno.h>
+
+/* Project header files */
+#include "vhost_user_gpio.h"
+
+#ifdef DEBUG
+#define DBG(...) printf("vhost-user-gpio: " __VA_ARGS__)
+#else
+#define DBG(...)
+#endif /* DEBUG */
+
+#define REALIZE_CONNECTION_RETRIES 3
+#define VHOST_NVQS 2
+
+static const int feature_bits[] = {
+ VIRTIO_F_VERSION_1,
+ VIRTIO_F_NOTIFY_ON_EMPTY,
+ VIRTIO_RING_F_INDIRECT_DESC,
+ VIRTIO_RING_F_EVENT_IDX,
+ VIRTIO_GPIO_F_IRQ,
+ VIRTIO_F_RING_RESET,
+ VHOST_INVALID_FEATURE_BIT
+};
+
+static void vu_gpio_get_config(VirtIODevice *vdev, uint8_t *config)
+{
+ VHostUserGPIO *gpio = dev->vdev->vhugpio;
+
+ DBG("vu_gpio_get_config()\n");
+ memcpy(config, &gpio->config, sizeof(gpio->config));
+}
+
+static int vu_gpio_config_notifier(struct vhost_dev *dev)
+{
+ VHostUserGPIO *gpio = dev->vdev->vhugpio;
+
+ DBG("vu_gpio_config_notifier\n");
+
+ memcpy(dev->vdev->config, &gpio->config, sizeof(gpio->config));
+ virtio_notify_config(dev->vdev);
+
+ return 0;
+}
+
+const VhostDevConfigOps gpio_ops = {
+ .vhost_dev_config_notifier = vu_gpio_config_notifier,
+};
+
+static int vu_gpio_start(VirtIODevice *vdev)
+{
+ VirtioBus *k = vdev->vbus;
+ VHostUserGPIO *gpio = vdev->vhugpio;
+ int ret, i;
+
+ DBG("vu_gpio_start()\n");
+
+ if (!k->set_guest_notifiers) {
+ DBG("binding does not support guest notifiers");
+ return -ENOSYS;
+ }
+
+ ret = vhost_dev_enable_notifiers(gpio->vhost_dev, vdev);
+ if (ret < 0) {
+ DBG("Error enabling host notifiers: %d", ret);
+ return ret;
+ }
+
+ ret = k->set_guest_notifiers(k->vdev, gpio->vhost_dev->nvqs, true);
+ if (ret < 0) {
+ DBG("Error binding guest notifier: %d", ret);
+ goto out_with_err_host_notifiers;
+ }
+
+ vhost_ack_features(gpio->vhost_dev, feature_bits, vdev->guest_features);
+
+ ret = vhost_dev_start(gpio->vhost_dev, vdev, true);
+ if (ret < 0) {
+ DBG("Error starting vhost-user-gpio: %d", ret);
+ goto out_with_err_guest_notifiers;
+ }
+ gpio->started_vu = true;
+
+ for (i = 0; i < gpio->vhost_dev->nvqs; i++) {
+ vhost_virtqueue_mask(gpio->vhost_dev, vdev, i, false);
+ }
+
+ /*
+ * TODO: check if we need the following is needed
+ * ret = gpio->vhost_dev->vhost_ops->vhost_set_vring_enable(gpio->vhost_dev,
+ * true);
+ */
+
+ return 0;
+
+out_with_err_guest_notifiers:
+ k->set_guest_notifiers(k->vdev, gpio->vhost_dev->nvqs, false);
+out_with_err_host_notifiers:
+ /*
+ * TODO: implement the following functions:
+ * vhost_dev_disable_notifiers(&gpio->vhost_dev, vdev);
+ */
+
+ return ret;
+}
+
+static void vu_gpio_stop(VirtIODevice *vdev)
+{
+ DBG("vu_gpio_stop() not yet implemented\n");
+}
+
+static void vu_gpio_set_status(VirtIODevice *vdev, uint8_t status)
+{
+ VHostUserGPIO *gpio = vdev->vhugpio;
+ bool should_start = virtio_device_started(vdev, status);
+
+ DBG("vu_gpio_set_status()\n");
+
+ if (!gpio->connected) {
+ return;
+ }
+
+printf("should_start: %d\n", should_start);
+ if (gpio->vhost_dev->started) {
+ return;
+ }
+
+ if (should_start) {
+ if (vu_gpio_start(vdev)) {
+ DBG("vu_gpio_start() failed\n");
+ }
+ } else {
+ vu_gpio_stop(vdev);
+ }
+}
+
+static uint64_t vu_gpio_get_features(VirtIODevice *vdev, uint64_t features)
+{
+ VHostUserGPIO *gpio = vdev->vhugpio;
+
+ DBG("vu_gpio_get_features()\n");
+ return vhost_get_features(gpio->vhost_dev, feature_bits, features);
+}
+
+static void vu_gpio_handle_output(VirtIODevice *vdev, VirtQueue *vq)
+{
+ /*
+ * Not normally called; it's the daemon that handles the queue;
+ * however virtio's cleanup path can call this.
+ */
+ DBG("vu_gpio_handle_output not yet implemented\n");
+}
+
+static void vu_gpio_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
+{
+ VHostUserGPIO *gpio = vdev->vhugpio;
+
+ DBG("vu_gpio_guest_notifier_mask() not yet implemented\n");
+
+ vhost_virtqueue_mask(gpio->vhost_dev, vdev, idx, mask);
+}
+
+static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserGPIO *gpio)
+{
+ DBG("do_vhost_user_cleanup() not yet implemented\n");
+}
+
+static int vu_gpio_connect(VirtIODevice *vdev)
+{
+ VHostUserGPIO *gpio = vdev->vhugpio;
+ int ret;
+
+ DBG("vu_gpio_connect()\n");
+
+ if (gpio->connected) {
+ return 0;
+ }
+ gpio->connected = true;
+
+ vhost_dev_set_config_notifier(gpio->vhost_dev, &gpio_ops);
+ /*
+ * TODO: Investigate if the following is needed
+ * gpio->vhost_user.supports_config = true;
+ */
+
+ gpio->vhost_dev->nvqs = VHOST_NVQS;
+ gpio->vhost_dev->vqs = gpio->vhost_vqs;
+
+ vhost_dev_init(gpio->vhost_dev);
+ /*
+ * TODO: Add error handling
+ * if (ret < 0) {
+ * return ret;
+ * }
+ */
+
+ /* restore vhost state */
+ if (virtio_device_started(vdev, vdev->status)) {
+ vu_gpio_start(vdev);
+ }
+
+ return 0;
+}
+
+static int vu_gpio_realize_connect(VHostUserGPIO *gpio)
+{
+ int ret;
+
+ DBG("vu_gpio_realize_connect()\n");
+
+ ret = vu_gpio_connect(gpio->parent);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = vhost_dev_get_config(gpio->vhost_dev, (uint8_t *)&gpio->config,
+ sizeof(gpio->config));
+
+ if (ret < 0) {
+ DBG("vhost-user-gpio: get config failed\n");
+ /*
+ * TODO: Add cleanup function
+ * vhost_dev_cleanup(vhost_dev);
+ */
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vu_gpio_device_unrealize(VirtIODevice *vdev)
+{
+ DBG("vu_gpio_device_unrealize() not yet implemented\n");
+}
+
+static void print_config_gpio(uint8_t *config_data)
+{
+ struct virtio_gpio_config *config =
+ (struct virtio_gpio_config *)config_data;
+
+ DBG("ngpio: %hu\n", config->ngpio);
+ DBG("gpio_names_size: %u\n", config->gpio_names_size);
+}
+
+static void vu_gpio_class_init(VirtIODevice *vdev)
+{
+ DBG("vu_gpio_class_init()\n");
+
+ vdev->vdev_class = (VirtioDeviceClass *)malloc(sizeof(VirtioDeviceClass));
+ if (!vdev->vdev_class) {
+ DBG("vdev_class memory allocation failed\n");
+ return;
+ }
+ vdev->vdev_class->realize = vu_gpio_device_realize;
+ vdev->vdev_class->unrealize = vu_gpio_device_unrealize;
+ vdev->vdev_class->get_features = vu_gpio_get_features;
+ vdev->vdev_class->get_config = vu_gpio_get_config;
+ vdev->vdev_class->set_status = vu_gpio_set_status;
+ vdev->vdev_class->guest_notifier_mask = vu_gpio_guest_notifier_mask;
+}
+
+void vu_gpio_init(VirtIODevice *vdev)
+{
+ DBG("vu_gpio_init()\n");
+
+ VHostUserGPIO *vhugpio = (VHostUserGPIO *)malloc(sizeof(VHostUserGPIO));
+ if (!proxy) {
+ DBG("proxy memory allocation failed\n");
+ goto out;
+ }
+
+ vdev->vhugpio = vhugpio;
+ vdev->nvqs = &dev->nvqs;
+ vhugpio->parent = vdev;
+ vhugpio->vhost_dev = dev;
+
+ vu_gpio_class_init(vdev);
+ virtio_loopback_bus_init(vdev->vbus);
+
+out:
+ return;
+}
+
+/* TODO: Add queue_num, queue_size as parameters */
+void vu_gpio_device_realize()
+{
+ int retries, ret;
+ int i;
+
+ DBG("vu_gpio_device_realize()\n");
+
+ /* This needs to be added */
+ proxy = (VirtIOMMIOProxy *)malloc(sizeof(VirtIOMMIOProxy));
+ if (!proxy) {
+ DBG("proxy memory allocation failed\n");
+ goto out_with_error;
+ }
+
+ *proxy = (VirtIOMMIOProxy) {
+ .legacy = 1,
+ };
+
+ /* VIRTIO_ID_GPIO is 41, check virtio_ids.h in linux */
+ virtio_dev_init(global_vdev, "virtio-gpio", 41,
+ sizeof(struct virtio_gpio_config));
+
+ vu_gpio_init(global_vdev);
+ if (!global_vdev->vhugpio) {
+ DBG("vhugpio memory allocation failed\n");
+ goto out_with_proxy;
+ }
+
+ global_vdev->vhugpio->command_vq = virtio_add_queue(global_vdev, 64,
+ vu_gpio_handle_output);
+ global_vdev->vhugpio->interrupt_vq = virtio_add_queue(global_vdev, 64,
+ vu_gpio_handle_output);
+
+ global_vdev->vhugpio->vhost_vqs = (struct vhost_virtqueue *)
+ malloc(sizeof(struct vhost_virtqueue *));
+ if (!global_vdev->vhugpio->vhost_vqs) {
+ DBG("vhost_vqs memory allocation failed\n");
+ goto out_with_dev;
+ }
+
+ global_vdev->vhugpio->connected = false;
+
+ retries = REALIZE_CONNECTION_RETRIES;
+
+ do {
+ ret = vu_gpio_realize_connect(global_vdev->vhugpio);
+ } while (ret < 0 && retries--);
+
+ if (ret < 0) {
+ DBG("vu_gpio_realize_connect(): -EPROTO\n");
+ do_vhost_user_cleanup(global_vdev, global_vdev->vhugpio);
+ }
+
+ print_config_gpio((uint8_t *)(&global_vdev->vhugpio->config));
+ DBG("(realize completed)\n");
+
+ return;
+
+ /* TODO: Fix the following considering also do_vhost_user_cleanup() */
+out_with_cmd_vq:
+ /* free(global_vdev->vhugpio->command_vq); */
+out_with_dev:
+ free(global_vdev->vhugpio);
+out_with_proxy:
+ free(proxy);
+out_with_error:
+ DBG("Realize funciton return error\n");
+ return;
+}
diff --git a/vhost_user_gpio.h b/vhost_user_gpio.h
new file mode 100644
index 0000000..4477ba7
--- /dev/null
+++ b/vhost_user_gpio.h
@@ -0,0 +1,37 @@
+/*
+ * Based on virtio-gpio.h of QEMU project
+ *
+ * Copyright (c) 2023 Virtual Open Systems SAS.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#ifndef VHOST_USER_GPIO
+#define VHOST_USER_GPIO
+
+#include "vhost_loopback.h"
+#include "vhost_user_loopback.h"
+#include "virtio_loopback.h"
+#include <linux/virtio_gpio.h>
+#include "queue.h"
+#include <sys/mman.h>
+
+#define TYPE_VHOST_USER_GPIO "vhost-user-gpio-device"
+#define VIRTIO_GPIO_F_IRQ 0
+
+struct VHostUserGPIO {
+ VirtIODevice *parent;
+ struct virtio_gpio_config config;
+ struct vhost_virtqueue *vhost_vqs;
+ struct vhost_dev *vhost_dev;
+ VirtQueue *command_vq;
+ VirtQueue *interrupt_vq;
+ bool connected;
+ bool started_vu;
+};
+
+void vu_gpio_device_realize();
+
+#endif /* VHOST_USER_GPIO */
diff --git a/vhost_user_input.c b/vhost_user_input.c
new file mode 100644
index 0000000..9cb2164
--- /dev/null
+++ b/vhost_user_input.c
@@ -0,0 +1,235 @@
+/*
+ * Based on vhost-user-input.c of QEMU project
+ *
+ * Copyright (c) 2022-2023 Virtual Open Systems SAS.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+#include <sys/param.h>
+
+/* Project header files */
+#include "vhost_user_input.h"
+
+#ifdef DEBUG
+#define DBG(...) printf("vhost-user-input: " __VA_ARGS__)
+#else
+#define DBG(...)
+#endif /* DEBUG */
+
+
+static int vhost_input_config_change(struct vhost_dev *dev)
+{
+ DBG("vhost-user-input: unhandled backend config change\n");
+ return -1;
+}
+
+const VhostDevConfigOps config_ops = {
+ .vhost_dev_config_notifier = vhost_input_config_change,
+};
+
+static void vhost_input_change_active(VirtIOInput *vinput)
+{
+ DBG("vhost_input_change_active(...)\n");
+
+ VhostUserInput *vhuinput = global_vdev->vhuinput;
+
+ if (vinput->active) {
+ vhost_user_backend_start(global_vdev);
+ } else {
+ vhost_user_backend_stop(global_vdev);
+ }
+}
+
+static void vhost_input_get_config(VirtIODevice *vdev, uint8_t *config_data)
+{
+ DBG("vhost_input_get_config(...)\n");
+
+ VirtIOInput *vinput = vdev->vinput;
+ VhostUserInput *vhi = vdev->vhuinput;
+ int ret;
+
+ memset(config_data, 0, vinput->cfg_size);
+
+ ret = vhost_dev_get_config(vhi->vhost_dev, config_data, vinput->cfg_size);
+ if (ret) {
+ DBG("vhost_input_get_config failed\n");
+ return;
+ }
+}
+
+static void vhost_input_set_config(VirtIODevice *vdev,
+ const uint8_t *config_data)
+{
+ DBG("vhost_input_set_config(...)\n");
+
+ VhostUserInput *vhi = vdev->vhuinput;
+ int ret;
+
+ ret = vhost_dev_set_config(vhi->vhost_dev, config_data,
+ 0, sizeof(virtio_input_config),
+ VHOST_SET_CONFIG_TYPE_MASTER);
+ if (ret) {
+ DBG("vhost_input_set_config failed\n");
+ return;
+ }
+
+ virtio_notify_config(vdev);
+}
+
+static struct vhost_dev *vhost_input_get_vhost(VirtIODevice *vdev)
+{
+ DBG("vhost_input_get_vhost(...)\n");
+
+ return vdev->vhuinput->vhost_dev;
+}
+
+static void print_config_input(uint8_t *config_data)
+{
+ struct virtio_input_config *config_strct =
+ (struct virtio_input_config *)config_data;
+ DBG("print_config_input: Not yet implemented\n");
+}
+
+static void vhost_input_class_init(VirtIODevice *vdev)
+{
+ DBG("vhost_input_class_init(...)\n");
+
+
+ /* Comment out the following lines to get the local config */
+ vdev->vdev_class->get_config = vhost_input_get_config;
+ vdev->vdev_class->set_config = vhost_input_set_config;
+ vdev->vdev_class->get_vhost = vhost_input_get_vhost;
+ vdev->vhuinput->vdev_input->input_class->realize = vhost_user_input_realize;
+ vdev->vhuinput->vdev_input->input_class->change_active =
+ vhost_input_change_active;
+ vdev->vdev_class->update_mem_table = update_mem_table;
+ vdev->vdev_class->print_config = print_config_input;
+}
+
+
+void vhost_user_input_init(VirtIODevice *vdev)
+{
+
+ DBG("vhost_user_input_init(...)\n");
+
+ struct VirtIOInputClass *input_class = (struct VirtIOInputClass *)malloc(
+ sizeof(struct VirtIOInputClass));
+ VirtIOInput *vinput = (VirtIOInput *)malloc(sizeof(VirtIOInput));
+ VhostUserInput *vhuinput = (VhostUserInput *)malloc(sizeof(VhostUserInput));
+
+ vdev->vinput = vinput;
+ vdev->vinput->input_class = input_class;
+
+ vdev->vhuinput = vhuinput;
+ vdev->nvqs = &dev->nvqs;
+ vhuinput->vdev = vdev;
+ vhuinput->vhost_dev = dev;
+ vhuinput->vdev_input = vinput;
+
+ /*
+ * Call first the virtio_input class init to set up
+ * the basic functionality.
+ */
+ virtio_input_class_init(vdev);
+
+ /* Then call the vhost_user class init */
+ vhost_input_class_init(vdev);
+
+ /* finally initialize the bus */
+ virtio_loopback_bus_init(vdev->vbus);
+}
+
+
+void vhost_user_input_realize()
+{
+ int nvqs = 2; /* qemu choice: 2 */
+
+ DBG("vhost_user_input_realize()\n");
+
+ vhost_dev_set_config_notifier(global_vdev->vhuinput->vhost_dev,
+ &config_ops);
+
+ global_vdev->vhuinput->vdev_input->cfg_size =
+ sizeof_field(virtio_input_config, u);
+
+ global_vdev->vhuinput->vhost_dev->vq_index = 0;
+ global_vdev->vhuinput->vhost_dev->backend_features = 0;
+ global_vdev->vhuinput->vhost_dev->num_queues = nvqs;
+
+
+ global_vdev->vq = (struct VirtQueue *)malloc(
+ sizeof(struct VirtQueue) * nvqs);
+
+ global_vdev->vhuinput->vhost_dev->nvqs = nvqs;
+ global_vdev->vhuinput->vhost_dev->vqs = (struct vhost_virtqueue *)malloc(
+ sizeof(struct vhost_virtqueue) * nvqs);
+ vhost_dev_init(global_vdev->vhuinput->vhost_dev);
+
+ /* Pass the new obtained features */
+ global_vdev->host_features = global_vdev->vhuinput->vhost_dev->features;
+}
+
+void vhost_user_backend_start(VirtIODevice *vdev)
+{
+ VirtioBus *k = vdev->vbus;
+ int ret, i;
+
+ DBG("vhost_user_backend_start(...)\n");
+
+ if (vdev->started) {
+ DBG("Device has already been started!\n");
+ return;
+ }
+
+ if (!k->set_guest_notifiers) {
+ DBG("binding does not support guest notifiers\n");
+ return;
+ }
+
+ ret = vhost_dev_enable_notifiers(vdev->vhuinput->vhost_dev, vdev);
+ if (ret < 0) {
+ DBG("vhost_dev_enable_notifiers failed!\n");
+ return;
+ }
+
+ DBG("k->set_guest_notifiers, nvqs: %d\n", vdev->vhuinput->vhost_dev->nvqs);
+ ret = k->set_guest_notifiers(vdev, vdev->vhuinput->vhost_dev->nvqs, true);
+ if (ret < 0) {
+ DBG("Error binding guest notifier\n");
+ }
+
+ vdev->vhuinput->vhost_dev->acked_features = vdev->guest_features;
+ ret = vhost_dev_start(vdev->vhuinput->vhost_dev, vdev, false);
+ if (ret < 0) {
+ DBG("Error start vhost dev\n");
+ return;
+ }
+
+ /*
+ * guest_notifier_mask/pending not used yet, so just unmask
+ * everything here. virtio-pci will do the right thing by
+ * enabling/disabling irqfd.
+ */
+ for (i = 0; i < vdev->vhuinput->vhost_dev->nvqs; i++) {
+ vhost_virtqueue_mask(vdev->vhuinput->vhost_dev, vdev,
+ vdev->vhuinput->vhost_dev->vq_index + i, false);
+ }
+
+ vdev->started = true;
+ return;
+
+}
+
+void vhost_user_backend_stop(VirtIODevice *vdev)
+{
+ DBG("vhost_user_backend_stop() not yet implemented\n");
+}
diff --git a/vhost_user_input.h b/vhost_user_input.h
new file mode 100644
index 0000000..741006e
--- /dev/null
+++ b/vhost_user_input.h
@@ -0,0 +1,185 @@
+/*
+ * Based on virtio-input.h of QEMU project
+ *
+ * Copyright (c) 2022-2023 Virtual Open Systems SAS.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#ifndef VHOST_USER_INPUT
+#define VHOST_USER_INPUT
+
+#include "vhost_loopback.h"
+#include "vhost_user_loopback.h"
+#include "virtio_loopback.h"
+#include <linux/virtio_input.h>
+#include "queue.h"
+#include <sys/mman.h>
+
+/* ----------------------------------------------------------------- */
+/* virtio input protocol */
+
+typedef struct virtio_input_absinfo virtio_input_absinfo;
+typedef struct virtio_input_config virtio_input_config;
+typedef struct virtio_input_event virtio_input_event;
+
+/* ----------------------------------------------------------------- */
+/* qemu internals */
+
+#define TYPE_VIRTIO_INPUT "virtio-input-device"
+#define TYPE_VIRTIO_INPUT_HID "virtio-input-hid-device"
+#define TYPE_VIRTIO_KEYBOARD "virtio-keyboard-device"
+#define TYPE_VIRTIO_MOUSE "virtio-mouse-device"
+#define TYPE_VIRTIO_TABLET "virtio-tablet-device"
+
+#define TYPE_VIRTIO_INPUT_HOST "virtio-input-host-device"
+
+#define TYPE_VHOST_USER_INPUT "vhost-user-input"
+
+typedef struct VirtIOInputConfig {
+ virtio_input_config config;
+ QTAILQ_ENTRY(VirtIOInputConfig) node;
+} VirtIOInputConfig;
+
+struct VirtIOInputClass;
+
+typedef struct VirtIOInput {
+ VirtIODevice *parent_dev;
+ struct VirtIOInputClass *input_class;
+ uint8_t cfg_select;
+ uint8_t cfg_subsel;
+ uint32_t cfg_size;
+ QTAILQ_HEAD(, VirtIOInputConfig) cfg_list;
+ VirtQueue *evt, *sts;
+ char *serial;
+ struct {
+ virtio_input_event event;
+ VirtQueueElement *elem;
+ } *queue;
+ uint32_t qindex, qsize;
+ bool active;
+
+} VirtIOInput;
+
+typedef struct VirtIOInputClass {
+ VirtioDeviceClass *parent_class;
+ void (*realize)();
+ void (*unrealize)(VirtIODevice *dev);
+ void (*change_active)(VirtIOInput *vinput);
+ void (*handle_status)(VirtIOInput *vinput, virtio_input_event *event);
+} VirtIOInputClass;
+
+struct VirtIOInputHID {
+ VirtIOInput parent_obj;
+ char *display;
+ uint32_t head;
+ int ledstate;
+ bool wheel_axis;
+};
+
+struct VirtIOInputHost {
+ VirtIOInput parent_obj;
+ char *evdev;
+ int fd;
+};
+
+typedef struct VhostUserInput {
+ VirtIOInput *vdev_input;
+ struct vhost_dev *vhost_dev;
+ VirtIODevice *vdev;
+ bool started;
+ bool completed;
+} VhostUserInput;
+
+#define VIRTIO_ID_NAME_KEYBOARD "QEMU Virtio Keyboard"
+#define BUS_VIRTUAL 0x06
+
+
+/*
+ * Event types
+ */
+
+#define EV_SYN 0x00
+#define EV_KEY 0x01
+#define EV_REL 0x02
+#define EV_ABS 0x03
+#define EV_MSC 0x04
+#define EV_SW 0x05
+#define EV_LED 0x11
+#define EV_SND 0x12
+#define EV_REP 0x14
+#define EV_FF 0x15
+#define EV_PWR 0x16
+#define EV_FF_STATUS 0x17
+#define EV_MAX 0x1f
+#define EV_CNT (EV_MAX + 1)
+
+/*
+ * LEDs
+ */
+
+#define LED_NUML 0x00
+#define LED_CAPSL 0x01
+#define LED_SCROLLL 0x02
+#define LED_COMPOSE 0x03
+#define LED_KANA 0x04
+#define LED_SLEEP 0x05
+#define LED_SUSPEND 0x06
+#define LED_MUTE 0x07
+#define LED_MISC 0x08
+#define LED_MAIL 0x09
+#define LED_CHARGING 0x0a
+#define LED_MAX 0x0f
+#define LED_CNT (LED_MAX + 1)
+
+/*
+ * Keys and buttons
+ *
+ * Most of the keys/buttons are modeled after USB HUT 1.12
+ * (see http://www.usb.org/developers/hidpage).
+ * Abbreviations in the comments:
+ * AC - Application Control
+ * AL - Application Launch Button
+ * SC - System Control
+ */
+#define KEY_G 34
+
+static struct virtio_input_config virtio_keyboard_config[] = {
+ {
+ .select = VIRTIO_INPUT_CFG_ID_NAME,
+ .size = sizeof(VIRTIO_ID_NAME_KEYBOARD),
+ .u.string = VIRTIO_ID_NAME_KEYBOARD,
+ },{
+ .select = VIRTIO_INPUT_CFG_ID_DEVIDS,
+ .size = sizeof(struct virtio_input_devids),
+ .u.ids = {
+ .bustype = (BUS_VIRTUAL),
+ .vendor = (0x0627), /* same we use for usb hid devices */
+ .product = (0x0001),
+ .version = (0x0001),
+ },
+ },{
+ .select = VIRTIO_INPUT_CFG_EV_BITS,
+ .subsel = EV_KEY,
+ .size = 1,
+ .u.bitmap = {
+ KEY_G,
+ },
+ },
+ {}, /* End of list */
+};
+
+void vhost_user_backend_start(VirtIODevice *vdev);
+void vhost_user_backend_stop(VirtIODevice *vdev);
+
+void virtio_input_init_config(VirtIOInput *vinput,
+ virtio_input_config *config);
+void virtio_input_class_init(VirtIODevice *vdev);
+void virtio_input_device_realize();
+
+void vhost_user_input_init(VirtIODevice *vdev);
+void vhost_user_input_realize();
+
+#endif /* VHOST_USER_INPUT */
diff --git a/vhost_user_loopback.c b/vhost_user_loopback.c
new file mode 100644
index 0000000..85b9405
--- /dev/null
+++ b/vhost_user_loopback.c
@@ -0,0 +1,1782 @@
+/*
+ * Based on libvhost-user.c of QEMU project
+ *
+ * Copyright IBM, Corp. 2007
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Marc-André Lureau <mlureau@redhat.com>
+ * Victor Kaplansky <victork@redhat.com>
+ *
+ * Copyright 2022-2023 Virtual Open Systems SAS.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/eventfd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <sys/param.h>
+#include <assert.h>
+
+/* For socket */
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+
+/* Project header files */
+#include "virtio_loopback.h"
+#include "vhost_user_loopback.h"
+#include "vhost_loopback.h"
+#include "event_notifier.h"
+
+#ifdef DEBUG
+#define DBG(...) printf("vhost-user-loopback: " __VA_ARGS__)
+#else
+#define DBG(...)
+#endif /* DEBUG */
+
+
+bool vhost_user_one_time_request(VhostUserRequest request)
+{
+ switch (request) {
+ case VHOST_USER_SET_OWNER:
+ case VHOST_USER_RESET_OWNER:
+ case VHOST_USER_SET_MEM_TABLE:
+ case VHOST_USER_GET_QUEUE_NUM:
+ case VHOST_USER_NET_SET_MTU:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+void vmsg_close_fds(VhostUserMsg *vmsg)
+{
+ int i;
+
+ for (i = 0; i < vmsg->fd_num; i++) {
+ close(vmsg->fds[i]);
+ }
+}
+
+
+bool vu_message_write(int conn_fd, VhostUserMsg *vmsg)
+{
+ int rc;
+ uint8_t *p = (uint8_t *)vmsg;
+ size_t fdsize;
+ char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = {};
+ struct iovec iov = {
+ .iov_base = (char *)vmsg,
+ .iov_len = VHOST_USER_HDR_SIZE,
+ };
+
+ struct msghdr msg = {
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ .msg_control = control,
+ };
+ struct cmsghdr *cmsg;
+
+ if (vhost_user_one_time_request(vmsg->request) && dev->vq_index != 0) {
+ vmsg->flags &= ~VHOST_USER_NEED_REPLY_MASK;
+ return 0;
+ }
+
+ memset(control, 0, sizeof(control));
+ if (vmsg->fd_num > 0) {
+ fdsize = vmsg->fd_num * sizeof(int);
+ msg.msg_controllen = CMSG_SPACE(fdsize);
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_len = CMSG_LEN(fdsize);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ memcpy(CMSG_DATA(cmsg), vmsg->fds, fdsize);
+ } else {
+ msg.msg_controllen = 0;
+ }
+
+ do {
+ rc = sendmsg(conn_fd, &msg, 0);
+ } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
+
+ if (vmsg->size) {
+ do {
+ if (vmsg->data) {
+ rc = write(conn_fd, vmsg->data, vmsg->size);
+ } else {
+ rc = write(conn_fd, p + VHOST_USER_HDR_SIZE, vmsg->size);
+ }
+ } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
+ }
+
+ if (rc <= 0) {
+ DBG("Error while writing\n");
+ return false;
+ }
+
+ return true;
+}
+
+
+bool vu_message_read(int conn_fd, VhostUserMsg *vmsg)
+{
+ char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = {};
+ struct iovec iov = {
+ .iov_base = (char *)vmsg,
+ .iov_len = VHOST_USER_HDR_SIZE,
+ };
+ struct msghdr msg = {
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ .msg_control = control,
+ .msg_controllen = sizeof(control),
+ };
+ size_t fd_size;
+ struct cmsghdr *cmsg;
+ int rc;
+
+ do {
+ rc = recvmsg(conn_fd, &msg, 0);
+ } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
+
+ if (rc < 0) {
+ DBG("Error while recvmsg\n");
+ return false;
+ }
+
+ vmsg->fd_num = 0;
+ for (cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg != NULL;
+ cmsg = CMSG_NXTHDR(&msg, cmsg))
+ {
+ if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
+ fd_size = cmsg->cmsg_len - CMSG_LEN(0);
+ vmsg->fd_num = fd_size / sizeof(int);
+ memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size);
+ break;
+ }
+ }
+
+ if (vmsg->size > sizeof(vmsg->payload)) {
+ DBG(
+ "Error: too big message request: %d, size: vmsg->size: %u, "
+ "while sizeof(vmsg->payload) = %zu\n",
+ vmsg->request, vmsg->size, sizeof(vmsg->payload));
+ goto fail;
+ }
+
+ if (vmsg->size) {
+ do {
+ rc = read(conn_fd, &vmsg->payload, vmsg->size);
+ } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
+
+ if (rc <= 0) {
+ DBG("Error while reading\n");
+ goto fail;
+ }
+ }
+
+ return true;
+
+fail:
+ vmsg_close_fds(vmsg);
+
+ return false;
+}
+
+int vhost_user_set_owner(void)
+{
+ VhostUserMsg msg = {
+ .request = VHOST_USER_SET_OWNER,
+ .flags = VHOST_USER_VERSION,
+ };
+
+ return vu_message_write(client_sock, &msg);
+}
+
+int process_message_reply(const VhostUserMsg *msg)
+{
+ int ret;
+ VhostUserMsg msg_reply;
+
+ if ((msg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
+ DBG("Don't wait for any reply!\n");
+ return 0;
+ }
+
+ ret = vu_message_read(client_sock, &msg_reply);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (msg_reply.request != msg->request) {
+ DBG("Received unexpected msg type. "
+ "Expected %d received %d\n",
+ msg->request, msg_reply.request);
+ return -EPROTO;
+ }
+
+ return msg_reply.payload.u64 ? -EIO : 0;
+}
+
+int vhost_user_get_u64(int request, uint64_t *u64)
+{
+ int ret;
+ VhostUserMsg msg = {
+ .request = request,
+ .flags = VHOST_USER_VERSION,
+ };
+
+ print_vhost_user_messages(request);
+
+ if (vhost_user_one_time_request(request) && dev->vq_index != 0) {
+ return 0;
+ }
+
+ ret = vu_message_write(client_sock, &msg);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = vu_message_read(client_sock, &msg);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (msg.request != request) {
+ DBG("Received unexpected msg type. Expected %d received %d\n",
+ request, msg.request);
+ return -EPROTO;
+ }
+
+ if (msg.size != sizeof(msg.payload.u64)) {
+ DBG("Received bad msg size.\n");
+ return -EPROTO;
+ }
+
+ *u64 = msg.payload.u64;
+ DBG("\tGet value: 0x%lx\n", msg.payload.u64);
+
+ return 0;
+}
+
+
+int vhost_user_get_features(uint64_t *features)
+{
+ if (vhost_user_get_u64(VHOST_USER_GET_FEATURES, features) < 0) {
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+int enforce_reply(const VhostUserMsg *msg)
+{
+ uint64_t dummy;
+
+ if (msg->flags & VHOST_USER_NEED_REPLY_MASK) {
+ return process_message_reply(msg);
+ }
+
+ /*
+ * We need to wait for a reply but the backend does not
+ * support replies for the command we just sent.
+ * Send VHOST_USER_GET_FEATURES which makes all backends
+ * send a reply.
+ */
+ return vhost_user_get_features(&dummy);
+}
+
+int vhost_user_set_u64(int request, uint64_t u64, bool wait_for_reply)
+{
+ VhostUserMsg msg = {
+ .request = request,
+ .flags = VHOST_USER_VERSION,
+ .payload.u64 = u64,
+ .size = sizeof(msg.payload.u64),
+ };
+ int ret;
+
+ print_vhost_user_messages(request);
+ DBG("\tSet value: 0x%lx\n", u64);
+
+ if (wait_for_reply) {
+ bool reply_supported = virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_REPLY_ACK);
+
+ if (reply_supported) {
+ msg.flags |= VHOST_USER_NEED_REPLY_MASK;
+ }
+ }
+
+ ret = vu_message_write(client_sock, &msg);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (wait_for_reply) {
+ return enforce_reply(&msg);
+ }
+
+ return 0;
+}
+
+int vhost_user_set_features(struct vhost_dev *dev,
+ uint64_t features)
+{
+ /*
+ * wait for a reply if logging is enabled to make sure
+ * backend is actually logging changes
+ */
+ bool log_enabled = features & (0x1ULL << VHOST_F_LOG_ALL);
+
+ (void) dev;
+
+ /* Pass hdev as parameter! */
+ DBG("vhost_user_set_features: 0x%lx\n", features | dev->backend_features);
+ return vhost_user_set_u64(VHOST_USER_SET_FEATURES,
+ features | dev->backend_features, log_enabled);
+}
+
+int vhost_user_set_protocol_features(uint64_t features)
+{
+ return vhost_user_set_u64(VHOST_USER_SET_PROTOCOL_FEATURES, features,
+ false);
+}
+
+int vhost_user_get_max_memslots(uint64_t *max_memslots)
+{
+ uint64_t backend_max_memslots;
+ int err;
+
+ err = vhost_user_get_u64(VHOST_USER_GET_MAX_MEM_SLOTS,
+ &backend_max_memslots);
+ if (err < 0) {
+ return err;
+ }
+
+ *max_memslots = backend_max_memslots;
+
+ return 0;
+}
+
+
+
+int vhost_setup_slave_channel(struct vhost_dev *dev)
+{
+ VhostUserMsg msg = {
+ .request = VHOST_USER_SET_SLAVE_REQ_FD,
+ .flags = VHOST_USER_VERSION,
+ };
+ int sv[2], ret = 0;
+ bool reply_supported = virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_REPLY_ACK);
+
+ if (!virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
+ return 0;
+ }
+
+ if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
+ int saved_errno = errno;
+ DBG("socketpair() failed\n");
+ return -saved_errno;
+ }
+
+ memcpy(msg.fds, &sv[1], sizeof(int));
+ msg.fd_num = 1;
+
+
+ /* FIXME: something missing here */
+
+
+ if (reply_supported) {
+ msg.flags |= VHOST_USER_NEED_REPLY_MASK;
+ }
+
+ ret = vu_message_write(client_sock, &msg);
+ if (!ret) {
+ DBG("Go out\n");
+ goto out;
+ }
+
+ if (reply_supported) {
+ ret = process_message_reply(&msg);
+ DBG("Reply is done!\n");
+ }
+
+out:
+ /* TODO: Close slave channel and fd in case of error */
+ /*
+ * close(sv[1]);
+ * if (ret) {
+ * close_slave_channel(u);
+ * }
+ */
+
+ return ret;
+}
+
+
+int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
+{
+ /*
+ * TODO: Add a assert to check the requested index
+ *
+ * assert(idx >= dev->vq_index && idx < dev->vq_index + (int)dev->nvqs);
+ */
+ return idx;
+}
+
+int vhost_set_vring_file(VhostUserRequest request,
+ struct vhost_vring_file *file)
+{
+ int fds[VHOST_USER_MAX_RAM_SLOTS];
+ size_t fd_num = 0;
+ VhostUserMsg msg = {
+ .request = request,
+ .flags = VHOST_USER_VERSION,
+ .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK,
+ .size = sizeof(msg.payload.u64),
+ };
+
+ if (ioeventfd_enabled() && file->fd > 0) {
+ fds[fd_num++] = file->fd;
+ } else {
+ msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
+ }
+
+ /*
+ * TODO: Check if we need to remove the VHOST_USER_NEED_REPLY_MASK flag
+ *
+ * msg.flags &= ~VHOST_USER_NEED_REPLY_MASK;
+ */
+
+ (void)fds;
+ (void)fd_num;
+
+ msg.fd_num = fd_num;
+ memcpy(msg.fds, &fds, fd_num * sizeof(int));
+
+ return !vu_message_write(client_sock, &msg);
+}
+
+int vhost_user_set_vring_kick(struct vhost_vring_file *file)
+{
+ DBG("Call vhost_user_set_vring_kick()\n");
+ return vhost_set_vring_file(VHOST_USER_SET_VRING_KICK, file);
+}
+
+int vhost_user_set_vring_call(struct vhost_vring_file *file)
+{
+ DBG("Call vhost_user_set_vring_call()\n");
+ return vhost_set_vring_file(VHOST_USER_SET_VRING_CALL, file);
+}
+
+static int vhost_set_vring(struct vhost_dev *dev,
+ unsigned long int request,
+ struct vhost_vring_state *ring)
+{
+ VhostUserMsg msg = {
+ .request = request,
+ .flags = VHOST_USER_VERSION,
+ .payload.state = *ring,
+ .size = sizeof(msg.payload.state),
+ };
+
+ return !vu_message_write(client_sock, &msg);
+}
+
+int vhost_user_set_vring_num(struct vhost_dev *dev,
+ struct vhost_vring_state *ring)
+{
+ return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
+}
+
+int vhost_user_set_vring_base(struct vhost_dev *dev,
+ struct vhost_vring_state *ring)
+{
+ return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring);
+}
+
+
+int vhost_user_set_vring_addr(struct vhost_dev *dev,
+ struct vhost_vring_addr *addr)
+{
+ int ret;
+ VhostUserMsg msg = {
+ .request = VHOST_USER_SET_VRING_ADDR,
+ .flags = VHOST_USER_VERSION,
+ .payload.addr = *addr,
+ .size = sizeof(msg.payload.addr),
+ };
+
+ bool reply_supported = virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_REPLY_ACK);
+
+ /*
+ * wait for a reply if logging is enabled to make sure
+ * backend is actually logging changes
+ */
+ bool wait_for_reply = addr->flags & (1 << VHOST_VRING_F_LOG);
+
+ if (reply_supported && wait_for_reply) {
+ msg.flags |= VHOST_USER_NEED_REPLY_MASK;
+ }
+
+ ret = vu_message_write(client_sock, &msg);
+ if (ret < 0) {
+ DBG("Fail vhost_user_set_vring_addr\n");
+ return ret;
+ }
+
+ if (wait_for_reply) {
+ return enforce_reply(&msg);
+ }
+
+ return 0;
+}
+
+
+int vhost_virtqueue_init(struct vhost_dev *dev,
+ struct vhost_virtqueue *vq, int n)
+{
+ int vhost_vq_index = (int)vhost_user_get_vq_index(dev, n);
+
+ struct vhost_vring_file file = {
+ .index = vhost_vq_index,
+ };
+
+ int r = event_notifier_init(&vq->masked_notifier, 0);
+ if (r < 0) {
+ return r;
+ }
+
+ file.fd = event_notifier_get_wfd(&vq->masked_notifier);
+
+ r = vhost_user_set_vring_call(&file);
+ if (r) {
+ DBG("vhost_set_vring_call failed\n");
+ return r;
+ }
+
+ vq->dev = dev;
+
+ return 0;
+}
+
+int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
+ uint32_t config_len)
+{
+ int ret;
+ VhostUserMsg msg = {
+ .request = VHOST_USER_GET_CONFIG,
+ .flags = VHOST_USER_VERSION,
+ .size = VHOST_USER_CONFIG_HDR_SIZE + config_len,
+ };
+
+ DBG("dev->protocol_features: 0x%lx\n", dev->protocol_features);
+ DBG("VHOST_USER_PROTOCOL_F_CONFIG: 0x%x\n", VHOST_USER_PROTOCOL_F_CONFIG);
+
+ if (!virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_CONFIG)) {
+ DBG("VHOST_USER_PROTOCOL_F_CONFIG not supported\n");
+ return -1;
+ }
+
+ msg.payload.config.offset = 0;
+ msg.payload.config.size = config_len;
+ ret = vu_message_write(client_sock, &msg);
+ DBG("vu_message_write return: %d\n", ret);
+ if (ret < 0) {
+ DBG("vhost_get_config failed\n");
+ return -1;
+ }
+
+ ret = vu_message_read(client_sock, &msg);
+ if (ret < 0) {
+ DBG("vhost_get_config failed\n");
+ return -1;
+ }
+
+ if (msg.request != VHOST_USER_GET_CONFIG) {
+ DBG("Received unexpected msg type. Expected %d received %d",
+ VHOST_USER_GET_CONFIG, msg.request);
+ return -1;
+ }
+
+ if (msg.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) {
+ DBG("Received bad msg size.\n");
+ return -1;
+ }
+
+ memcpy(config, msg.payload.config.region, config_len);
+
+ DBG("Received config: %u, config_len: %u\n", *config, config_len);
+
+ DBG("vhost_user_get_config return successfully\n");
+
+ return 0;
+}
+
+int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
+ uint32_t offset, uint32_t size, uint32_t flags)
+{
+ int ret;
+ uint8_t *p;
+ bool reply_supported = virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_REPLY_ACK);
+
+ VhostUserMsg msg = {
+ .request = VHOST_USER_SET_CONFIG,
+ .flags = VHOST_USER_VERSION,
+ .size = VHOST_USER_CONFIG_HDR_SIZE + size,
+ };
+
+ if (!virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_CONFIG)) {
+ DBG("VHOST_USER_PROTOCOL_F_CONFIG not supported\n");
+ return -ENOTSUP;
+ }
+
+ if (reply_supported) {
+ msg.flags |= VHOST_USER_NEED_REPLY_MASK;
+ }
+
+ if (size > VHOST_USER_MAX_CONFIG_SIZE) {
+ return -EINVAL;
+ }
+
+ msg.payload.config.offset = offset,
+ msg.payload.config.size = size,
+ msg.payload.config.flags = flags,
+ p = msg.payload.config.region;
+ memcpy(p, data, size);
+
+ ret = vu_message_write(client_sock, &msg);
+ DBG("vu_message_write return: %d\n", ret);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (reply_supported) {
+ return process_message_reply(&msg);
+ DBG("Reply is done!\n");
+ }
+
+ return 0;
+}
+
+
+int vhost_user_get_inflight_fd(struct vhost_dev *dev,
+ uint16_t queue_size,
+ struct vhost_inflight *inflight)
+{
+ void *addr;
+ int fd;
+ int ret;
+ VhostUserMsg msg = {
+ .request = VHOST_USER_GET_INFLIGHT_FD,
+ .flags = VHOST_USER_VERSION,
+ .payload.inflight.num_queues = dev->nvqs,
+ .payload.inflight.queue_size = queue_size,
+ .size = sizeof(msg.payload.inflight),
+ };
+
+ DBG("vhost_user_get_inflight_fd\n");
+
+ if (!virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
+ return 0;
+ }
+
+ /* NOTE: This stays here as a reference */
+ ret = vu_message_write(client_sock, &msg);
+ if (ret < 0) {
+ DBG("vhost_user_get_inflight_fd\n\t->write error\n");
+ return ret;
+ }
+
+ /* NOTE: This stays here as a reference */
+ ret = vu_message_read(client_sock, &msg);
+ if (ret < 0) {
+ DBG("vhost_user_get_inflight_fd\n\t->read error\n");
+ return ret;
+ }
+
+ if (msg.request != VHOST_USER_GET_INFLIGHT_FD) {
+ DBG("Received unexpected msg type. "
+ "Expected %d received %d\n",
+ VHOST_USER_GET_INFLIGHT_FD, msg.request);
+ return -1;
+ }
+
+ if (msg.size != sizeof(msg.payload.inflight)) {
+ DBG("Received bad msg size.\n");
+ return -1;
+ }
+
+ if (!msg.payload.inflight.mmap_size) {
+ DBG("!msg.payload.inflight.mmap_size\n");
+ return 0;
+ }
+
+ /* FIXME: This needs to be checked */
+ memcpy(&fd, msg.fds, sizeof(int));
+ if (fd < 0) {
+ DBG("Failed to get mem fd\n");
+ return -1;
+ }
+
+ addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, msg.payload.inflight.mmap_offset);
+
+ if (addr == MAP_FAILED) {
+ DBG("Failed to mmap mem fd\n");
+ close(fd);
+ return -1;
+ }
+
+ inflight->addr = addr;
+ inflight->fd = fd;
+ inflight->size = msg.payload.inflight.mmap_size;
+ inflight->offset = msg.payload.inflight.mmap_offset;
+ inflight->queue_size = queue_size;
+
+ return 0;
+}
+
+
+int vhost_user_set_inflight_fd(struct vhost_dev *dev,
+ struct vhost_inflight *inflight)
+{
+ VhostUserMsg msg = {
+ .request = VHOST_USER_SET_INFLIGHT_FD,
+ .flags = VHOST_USER_VERSION,
+ .payload.inflight.mmap_size = inflight->size,
+ .payload.inflight.mmap_offset = inflight->offset,
+ .payload.inflight.num_queues = dev->nvqs,
+ .payload.inflight.queue_size = inflight->queue_size,
+ .size = sizeof(msg.payload.inflight),
+ };
+
+ DBG("vhost_user_set_inflight_fd\n");
+
+ if (!virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
+ return 0;
+ }
+
+ msg.fd_num = 1;
+ memcpy(msg.fds, &inflight->fd, msg.fd_num * sizeof(int));
+
+ return !vu_message_write(client_sock, &msg); /* Returns true or false*/
+}
+
+
+/* -------------------- Mem regions functions -------------------- */
+
+
+static MemoryRegion *vhost_user_get_mr_data(struct vhost_memory_region *reg,
+ ram_addr_t *offset, int *fd)
+{
+ MemoryRegion *mr;
+
+ *offset = 0;
+ *fd = loopback_fd;
+
+ return mr;
+}
+
+static void vhost_user_fill_msg_region(VhostUserMemoryRegion *dst,
+ struct vhost_memory_region *src,
+ uint64_t mmap_offset)
+{
+ assert(src != NULL && dst != NULL);
+ dst->userspace_addr = src->userspace_addr;
+ dst->memory_size = src->memory_size;
+ dst->guest_phys_addr = src->guest_phys_addr;
+ dst->mmap_offset = mmap_offset;
+}
+
+static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u,
+ struct vhost_dev *dev,
+ VhostUserMsg *msg,
+ int *fds, size_t *fd_num,
+ bool track_ramblocks)
+{
+ int i, fd;
+ ram_addr_t offset;
+ MemoryRegion *mr;
+ struct vhost_memory_region *reg;
+ VhostUserMemoryRegion region_buffer;
+
+ msg->request = VHOST_USER_SET_MEM_TABLE;
+
+ for (i = 0; i < dev->mem->nregions; ++i) {
+ reg = dev->mem->regions + i;
+
+ mr = vhost_user_get_mr_data(reg, &offset, &fd);
+ if (fd > 0) {
+ if (track_ramblocks) {
+ u->region_rb_offset[i] = offset;
+ u->region_rb[i] = mr->ram_block;
+ } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) {
+ DBG("Failed preparing vhost-user memory table msg: %d\n", *fd_num);
+ return -1;
+ }
+ vhost_user_fill_msg_region(&region_buffer, reg, offset);
+ msg->payload.memory.regions[*fd_num] = region_buffer;
+ fds[(*fd_num)++] = fd;
+ } else if (track_ramblocks) {
+ u->region_rb_offset[i] = 0;
+ u->region_rb[i] = NULL;
+ }
+ }
+
+ msg->payload.memory.nregions = *fd_num;
+ if (!*fd_num) {
+ DBG("Failed initializing vhost-user memory map, "
+ "consider using -object memory-backend-file share=on\n");
+ return -1;
+ }
+
+ msg->size = sizeof(msg->payload.memory.nregions);
+ msg->size += sizeof(msg->payload.memory.padding);
+ msg->size += *fd_num * sizeof(VhostUserMemoryRegion);
+
+ return 1;
+}
+
+static inline bool reg_equal(struct vhost_memory_region *shadow_reg,
+ struct vhost_memory_region *vdev_reg)
+{
+ return shadow_reg->guest_phys_addr == vdev_reg->guest_phys_addr &&
+ shadow_reg->userspace_addr == vdev_reg->userspace_addr &&
+ shadow_reg->memory_size == vdev_reg->memory_size;
+}
+
+
+/* Sync the two region lists (device / adapter) */
+static void scrub_shadow_regions(struct vhost_dev *dev,
+ struct scrub_regions *add_reg,
+ int *nr_add_reg,
+ struct scrub_regions *rem_reg,
+ int *nr_rem_reg, uint64_t *shadow_pcb,
+ bool track_ramblocks)
+{
+ struct vhost_user *u = adev->vudev;
+ bool found[VHOST_USER_MAX_RAM_SLOTS] = {};
+ struct vhost_memory_region *reg, *shadow_reg;
+ int i, j, fd, add_idx = 0, rm_idx = 0, fd_num = 0;
+ ram_addr_t offset;
+ MemoryRegion *mr;
+ bool matching;
+
+ /*
+ * Find memory regions present in our shadow state which are not in
+ * the device's current memory state.
+ *
+ * Mark regions in both the shadow and device state as "found".
+ */
+ for (i = 0; i < u->num_shadow_regions; i++) {
+ shadow_reg = &u->shadow_regions[i];
+ matching = false;
+
+ for (j = 0; j < dev->mem->nregions; j++) {
+ reg = &dev->mem->regions[j];
+
+ mr = vhost_user_get_mr_data(reg, &offset, &fd);
+
+ if (reg_equal(shadow_reg, reg)) {
+ matching = true;
+ found[j] = true;
+ break;
+ }
+ }
+
+ /*
+ * If the region was not found in the current device memory state
+ * create an entry for it in the removed list.
+ */
+ if (!matching) {
+ rem_reg[rm_idx].region = shadow_reg;
+ rem_reg[rm_idx++].reg_idx = i;
+ }
+ }
+
+ /*
+ * For regions not marked "found", create entries in the added list.
+ *
+ * Note their indexes in the device memory state and the indexes of their
+ * file descriptors.
+ */
+
+ DBG("For regions not marked 'found', create entries in the added list\n");
+ DBG("dev->mem->nregions: %d\n", dev->mem->nregions);
+
+ for (i = 0; i < dev->mem->nregions; i++) {
+
+ reg = &dev->mem->regions[i];
+
+ mr = vhost_user_get_mr_data(reg, &offset, &fd);
+
+ /*
+ * If the region was in both the shadow and device state we don't
+ * need to send a VHOST_USER_ADD_MEM_REG message for it.
+ */
+ if (found[i]) {
+ continue;
+ }
+
+ add_reg[add_idx].region = reg;
+ add_reg[add_idx].reg_idx = i;
+ add_reg[add_idx++].fd_idx = fd_num;
+
+ }
+ *nr_rem_reg = rm_idx;
+ *nr_add_reg = add_idx;
+
+ return;
+}
+
+
+static int send_remove_regions(struct vhost_dev *dev,
+ struct scrub_regions *remove_reg,
+ int nr_rem_reg, VhostUserMsg *msg,
+ bool reply_supported)
+{
+ struct vhost_user *u = adev->vudev;
+ struct vhost_memory_region *shadow_reg;
+ int i, fd, shadow_reg_idx, ret;
+ ram_addr_t offset;
+ VhostUserMemoryRegion region_buffer;
+
+ /*
+ * The regions in remove_reg appear in the same order they do in the
+ * shadow table. Therefore we can minimize memory copies by iterating
+ * through remove_reg backwards.
+ */
+ for (i = nr_rem_reg - 1; i >= 0; i--) {
+ shadow_reg = remove_reg[i].region;
+ shadow_reg_idx = remove_reg[i].reg_idx;
+
+ DBG("Try to remove: 0x%llx\n", remove_reg[i].region->guest_phys_addr);
+
+ (void)vhost_user_get_mr_data(shadow_reg, &offset, &fd);
+
+ if (fd > 0) {
+ msg->request = VHOST_USER_REM_MEM_REG;
+ vhost_user_fill_msg_region(&region_buffer, shadow_reg, offset);
+ msg->payload.memreg.region = region_buffer;
+
+ msg->fd_num = 1;
+ memcpy(msg->fds, &loopback_fd, sizeof(int));
+
+ if (vu_message_write(client_sock, msg) < 0) {
+ return -1;
+ }
+
+ if (reply_supported) {
+ msg->flags |= VHOST_USER_NEED_REPLY_MASK;
+ ret = process_message_reply(msg);
+
+ /*
+ * TODO: For this release do not process the message:
+ * if (ret) {
+ * return ret;
+ * }
+ */
+ }
+ }
+
+ }
+
+ return 0;
+}
+
+static int send_add_regions(struct vhost_dev *dev,
+ struct scrub_regions *add_reg, int nr_add_reg,
+ VhostUserMsg *msg, uint64_t *shadow_pcb,
+ bool reply_supported, bool track_ramblocks)
+{
+ struct vhost_user *u = adev->vudev;
+ int i, fd, ret, reg_idx, reg_fd_idx;
+ struct vhost_memory_region *reg;
+ MemoryRegion *mr;
+ ram_addr_t offset;
+ VhostUserMsg msg_reply;
+ VhostUserMemoryRegion region_buffer;
+
+ for (i = 0; i < nr_add_reg; i++) {
+ reg = add_reg[i].region;
+ reg_idx = add_reg[i].reg_idx;
+ reg_fd_idx = add_reg[i].fd_idx;
+
+ DBG("Try to add: 0x%llx\n", add_reg[i].region->guest_phys_addr);
+
+ mr = vhost_user_get_mr_data(reg, &offset, &fd);
+
+ if (fd > 0) {
+
+ msg->request = VHOST_USER_ADD_MEM_REG;
+ vhost_user_fill_msg_region(&region_buffer, reg, offset);
+ msg->payload.memreg.region = region_buffer;
+
+ msg->fd_num = 1;
+ memcpy(msg->fds, &loopback_fd, sizeof(int));
+
+ if (vu_message_write(client_sock, msg) < 0) {
+ DBG("send_add_regions -> write failed\n");
+ return -1;
+ }
+
+ if (reply_supported) {
+ msg->flags |= VHOST_USER_NEED_REPLY_MASK;
+ ret = process_message_reply(msg);
+
+ /*
+ * TODO: For this release do not process the message:
+ * if (ret) {
+ * return ret;
+ * }
+ */
+ }
+ } else if (track_ramblocks) {
+ u->region_rb_offset[reg_idx] = 0;
+ u->region_rb[reg_idx] = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static int vhost_user_add_remove_regions(struct vhost_dev *dev,
+ VhostUserMsg *msg,
+ bool reply_supported,
+ bool track_ramblocks)
+{
+ struct vhost_user *u = adev->vudev;
+ struct scrub_regions add_reg[VHOST_USER_MAX_RAM_SLOTS];
+ struct scrub_regions rem_reg[VHOST_USER_MAX_RAM_SLOTS];
+ uint64_t shadow_pcb[VHOST_USER_MAX_RAM_SLOTS] = {};
+ int nr_add_reg, nr_rem_reg;
+
+ msg->size = sizeof(msg->payload.memreg);
+
+ /* Find the regions which need to be removed or added. */
+ scrub_shadow_regions(dev, add_reg, &nr_add_reg, rem_reg, &nr_rem_reg,
+ shadow_pcb, track_ramblocks);
+
+ if (nr_rem_reg && send_remove_regions(dev, rem_reg, nr_rem_reg, msg,
+ reply_supported) < 0)
+ {
+ DBG("send_remove_regions failed\n");
+ goto err;
+ }
+
+ if (nr_add_reg && send_add_regions(dev, add_reg, nr_add_reg, msg,
+ shadow_pcb, reply_supported, track_ramblocks) < 0)
+ {
+ DBG("send_add_regions failed\n");
+ goto err;
+ }
+
+
+ /* TODO: At this point we need to update the shadow list */
+ u->num_shadow_regions = dev->mem->nregions;
+ memcpy(u->shadow_regions, dev->mem->regions,
+ dev->mem->nregions * sizeof(struct vhost_memory_region));
+
+ return 0;
+
+err:
+ DBG("vhost_user_add_remove_regions failed\n");
+ return -1;
+}
+
+
+/* TODO: This funciton might be implemented in a later release */
+static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
+ bool reply_supported,
+ bool config_mem_slots)
+{
+ DBG("vhost_user_set_mem_table_postcopy(...) not yet implemented\n");
+ return 0;
+}
+
+
+/*
+ * TODO: This function is not yet fully optimized because in the current release
+ * it is not used. t will be implemented or deleted in a later release.
+ */
+int vhost_user_set_mem_table(struct vhost_dev *dev)
+{
+ int fds[VHOST_MEMORY_BASELINE_NREGIONS];
+ size_t fd_num = 0;
+ bool reply_supported = virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_REPLY_ACK);
+ bool config_mem_slots =
+ virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS);
+ int ret;
+ struct vhost_user *u = adev->vudev;
+ bool do_postcopy = false;
+
+ if (do_postcopy) {
+ /*
+ * Postcopy has enough differences that it's best done in it's own
+ * version
+ */
+ return vhost_user_set_mem_table_postcopy(dev, reply_supported,
+ config_mem_slots);
+ }
+
+ VhostUserMsg msg = {
+ .flags = VHOST_USER_VERSION,
+ };
+
+ if (reply_supported) {
+ msg.flags |= VHOST_USER_NEED_REPLY_MASK;
+ }
+
+ if (config_mem_slots) {
+ DBG("vonfig_mem_slots is enabled\n");
+ if (vhost_user_add_remove_regions(dev, &msg, reply_supported,
+ false) < 0) {
+ return -1;
+ }
+ } else {
+
+ if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
+ false) < 0) {
+ DBG("vhost_user_fill_set_mem_table_msg error\n");
+ return -1;
+ }
+
+ /* Update message parameters */
+ DBG("\nfd_num: %d\n", fd_num);
+ msg.fd_num = fd_num;
+ memcpy(msg.fds, fds, fd_num * sizeof(int));
+
+ if (vu_message_write(client_sock, &msg) < 0) {
+ DBG("vhost_user_set_mem_table failed write msg\n");
+ return -1;
+ }
+
+ if (reply_supported) {
+ return process_message_reply(&msg);
+ }
+ }
+
+ return 0;
+}
+
+
+
+void print_mem_table(struct vhost_dev *dev)
+{
+ struct vhost_memory_region *cur_vmr;
+ int i;
+
+ DBG("print_mem_table:\n");
+
+ for (i = 0; i < dev->n_mem_sections; i++) {
+
+ cur_vmr = dev->mem->regions + i;
+ DBG("regions[%d]->guest_phys_addr: 0x%llx\n",
+ i, cur_vmr->guest_phys_addr);
+ DBG("regions[%d]->memory_size: 0x%llu\n",
+ i, cur_vmr->memory_size);
+ DBG("regions[%d]->userspace_addr: 0x%llx\n",
+ i, cur_vmr->userspace_addr);
+ DBG("regions[%d]->flags_padding: 0x%llx\n",
+ i, cur_vmr->flags_padding);
+
+ }
+}
+
+static void vhost_add_reg(struct vhost_dev *dev, uint64_t hpa, uint64_t len)
+{
+ size_t regions_size, old_regions_size;
+ struct vhost_memory *temp_mem;
+ struct vhost_memory_region *cur_vmr;
+
+ DBG("vhost_add_reg (hpa: 0x%lx, len: %lu)\n", hpa, len);
+
+ /* Rebuild the regions list from the new sections list */
+ regions_size = offsetof(struct vhost_memory, regions) +
+ (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
+ temp_mem = (struct vhost_memory *)malloc(regions_size);
+
+ /* Copy the old mem structure */
+ old_regions_size = offsetof(struct vhost_memory, regions) +
+ (dev->mem->nregions) * sizeof dev->mem->regions[0];
+ memcpy(temp_mem, dev->mem, old_regions_size);
+
+ /* Increase the regions' counter */
+ temp_mem->nregions = dev->mem->nregions + 1;
+ dev->n_mem_sections = temp_mem->nregions;
+
+ /* Clear the previous structure */
+ free(dev->mem);
+
+ /* Point to the new one */
+ dev->mem = temp_mem;
+
+ /* Init the new region */
+ cur_vmr = dev->mem->regions + (dev->mem->nregions - 1);
+ cur_vmr->guest_phys_addr = hpa;
+ cur_vmr->memory_size = len;
+ cur_vmr->userspace_addr = hpa;
+ cur_vmr->flags_padding = 0;
+}
+
+static bool find_reg(struct vhost_dev *dev, uint64_t hpa, uint64_t len)
+{
+ struct vhost_memory_region *cur_vmr;
+ int i;
+
+ DBG("Try to find hpa: 0x%lx\n", hpa);
+
+ for (i = dev->nvqs; i < dev->n_mem_sections; i++) {
+
+ cur_vmr = dev->mem->regions + i;
+ if ((hpa >= cur_vmr->guest_phys_addr) &&
+ ((hpa + len) <= (cur_vmr->guest_phys_addr
+ + cur_vmr->memory_size))) {
+ DBG("Find region with hpa: 0x%llx, and len: %lld\n",
+ cur_vmr->guest_phys_addr, cur_vmr->memory_size);
+ return true;
+ }
+ }
+
+ DBG("Did not find region with hpa: 0x%lx\n", hpa);
+ return false;
+}
+
+int last_avail = -1;
+
+void find_add_new_reg(struct vhost_dev *dev)
+{
+ int sglist_elem_num;
+ int i;
+
+ DBG("Total nvqs: %d\n", dev->nvqs);
+ for (int i = 0; i < dev->nvqs; i++) {
+
+ VRing *vring = &dev->vdev->vq[i].vring;
+ uint64_t vring_num = vring->num;
+
+ DBG("For vq[%d]:\n", i);
+ DBG("vqs[%u] hpa 0x%lx\n", i, vring_phys_addrs[i]);
+ DBG("vq[%d].vring.num: %ld\n", i, vring_num);
+ DBG("We got avail buf: %d\n",
+ ((VRingAvail *)(dev->vdev->vq[i].vring.avail))->idx);
+
+ int avail_diff = ((VRingAvail *)(dev->vdev->vq[i].vring.avail))->idx
+ - last_avail;
+
+ for (int j = 0; j < vring_num; j++) {
+
+ uint64_t desc_addr = dev->vdev->vq[i].vring.desc;
+ VRingDesc desc_p = ((VRingDesc *)desc_addr)[j];
+ uint64_t sg_addr = desc_p.addr;
+ uint64_t sg_len = desc_p.len;
+
+ if (desc_p.addr == 0) {
+ sglist_elem_num = j;
+ DBG("We got avail buf: %d\n",
+ ((VRingAvail *)(dev->vdev->vq[i].vring.avail))->idx);
+ DBG("We got sglist_ele_num: %d\n", sglist_elem_num);
+ break;
+ }
+
+ DBG("desc[%u] 0x%lx\n", j, desc_addr);
+ DBG("desc[%u].addr 0x%lx\n", j, sg_addr);
+ DBG("desc[%u].len 0x%lu\n", j, sg_len);
+ DBG("desc[%u].flags 0x%u\n", j, desc_p.flags);
+
+ if (!find_reg(dev, sg_addr, sg_len)) {
+ vhost_add_reg(dev, sg_addr, sg_len);
+ }
+
+ }
+ DBG("We got avail buf: %d\n",
+ ((VRingAvail *)(dev->vdev->vq[i].vring.avail))->idx);
+
+ last_avail = ((VRingAvail *)(dev->vdev->vq[i].vring.avail))->idx;
+ sglist_elem_num = 3 * avail_diff;
+ }
+}
+
+void vhost_commit_init_vqs(struct vhost_dev *dev)
+{
+ MemoryRegionSection *old_sections;
+ int n_old_sections;
+ uint64_t log_size;
+ size_t regions_size;
+ int r;
+ int i;
+ bool changed = false;
+ int sglist_elem_num;
+
+ dev->n_mem_sections = dev->nvqs;
+
+ /* Rebuild the regions list from the new sections list */
+ regions_size = offsetof(struct vhost_memory, regions) +
+ dev->n_mem_sections * sizeof dev->mem->regions[0];
+ dev->mem = (struct vhost_memory *)malloc(regions_size);
+ dev->mem->nregions = dev->n_mem_sections;
+
+ for (i = 0; i < dev->nvqs; i++) {
+ struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
+
+ cur_vmr->guest_phys_addr = vring_phys_addrs[i] << PAGE_SHIFT;
+ cur_vmr->memory_size = get_vqs_max_size(global_vdev);
+ cur_vmr->userspace_addr = 0;
+ cur_vmr->flags_padding = 0;
+ }
+}
+
+void vhost_commit_vqs(struct vhost_dev *dev)
+{
+ free(dev->mem);
+ vhost_commit_init_vqs(dev);
+ find_add_new_reg(dev);
+}
+
+void vhost_commit_mem_regions(struct vhost_dev *dev)
+{
+ uint64_t mmap_pa_req;
+ int i;
+
+ /* Create and add all ram memory regions */
+ for (i = 0; i < VHOST_USER_MAX_RAM_SLOTS; i++) {
+
+ /* Calculate new Physical Address */
+ mmap_pa_req = INIT_PA + i * 1 * OFFSET_1GB;
+
+ /* Add a new region */
+ vhost_add_reg(dev, mmap_pa_req, 1 * OFFSET_1GB);
+ }
+
+ /* Send new region */
+ if (vhost_user_set_mem_table(dev) < 0) {
+ DBG("vhost_user_set_mem_table -> Error\n");
+ exit(1);
+ }
+}
+
+/* -------------------- End of Mem regions functions -------------------- */
+
+int vhost_user_backend_init(struct vhost_dev *vhdev)
+{
+ uint64_t features, protocol_features, ram_slots;
+ int err;
+
+ DBG("vhost_user_backend_init (...)\n");
+
+ err = vhost_user_get_features(&features);
+ if (err < 0) {
+ DBG("vhost_backend_init failed\n");
+ return err;
+ }
+
+ if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) {
+ vhdev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
+
+ err = vhost_user_get_u64(VHOST_USER_GET_PROTOCOL_FEATURES,
+ &protocol_features);
+ if (err < 0) {
+ DBG("vhost_backend_init failed\n");
+ return -EPROTO;
+ }
+
+ vhdev->protocol_features =
+ protocol_features & VHOST_USER_PROTOCOL_FEATURE_MASK;
+
+ /*
+ * FIXME: Disable VHOST_USER_PROTOCOL_F_SLAVE_REQ for the moment
+ * vhdev->protocol_features &=
+ * ~(1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ);
+ */
+
+ /* FIXME: Disable VHOST_USER_GET_INFLIGHT_FD for the moment */
+ vhdev->protocol_features &=
+ ~(1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD);
+
+ if (!vhdev->config_ops ||
+ !vhdev->config_ops->vhost_dev_config_notifier) {
+ DBG("There is no config_ops or vhost_dev_config_notifier\n");
+ /* Don't acknowledge CONFIG feature if device doesn't support it */
+ dev->protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
+ } else if (!(protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_CONFIG))) {
+ DBG("Device expects VHOST_USER_PROTOCOL_F_CONFIG "
+ "but backend does not support it.\n");
+ return -EINVAL;
+ }
+
+ err = vhost_user_set_protocol_features(vhdev->protocol_features);
+ if (err < 0) {
+ DBG("vhost_backend_init failed\n");
+ return -EPROTO;
+ }
+
+ /* query the max queues we support if backend supports Multiple Queue */
+ if (vhdev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) {
+ err = vhost_user_get_u64(VHOST_USER_GET_QUEUE_NUM,
+ &vhdev->max_queues);
+ if (err < 0) {
+ DBG("vhost_backend_init failed\n");
+ return -EPROTO;
+ }
+ } else {
+ vhdev->max_queues = 1;
+ }
+
+ if (vhdev->num_queues && vhdev->max_queues < vhdev->num_queues) {
+ DBG("The maximum number of queues supported by the "
+ "backend is %ld\n", vhdev->max_queues);
+ return -EINVAL;
+ }
+
+ if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) &&
+ !(virtio_has_feature(vhdev->protocol_features,
+ VHOST_USER_PROTOCOL_F_SLAVE_REQ) &&
+ virtio_has_feature(vhdev->protocol_features,
+ VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
+ DBG("IOMMU support requires reply-ack and "
+ "slave-req protocol features.\n");
+ return -EINVAL;
+ }
+
+ /* get max memory regions if backend supports configurable RAM slots */
+ if (!virtio_has_feature(vhdev->protocol_features,
+ VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS)) {
+ vhdev->memory_slots = VHOST_MEMORY_BASELINE_NREGIONS;
+ } else {
+ err = vhost_user_get_max_memslots(&ram_slots);
+ if (err < 0) {
+ DBG("vhost_backend_init failed\n");
+ return -EPROTO;
+ }
+
+ if (ram_slots < vhdev->memory_slots) {
+ DBG("The backend specified a max ram slots limit "
+ "of %ld, when the prior validated limit was "
+ "%ld. This limit should never decrease.\n", ram_slots,
+ vhdev->memory_slots);
+ return -EINVAL;
+ }
+
+ vhdev->memory_slots = MIN(ram_slots, VHOST_USER_MAX_RAM_SLOTS);
+ }
+ }
+
+ if (vhdev->migration_blocker == NULL &&
+ !virtio_has_feature(vhdev->protocol_features,
+ VHOST_USER_PROTOCOL_F_LOG_SHMFD)) {
+ DBG("Migration disabled: vhost-user backend lacks "
+ "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.\n");
+ }
+
+ if (vhdev->vq_index == 0) {
+ err = vhost_setup_slave_channel(vhdev);
+ if (err < 0) {
+ DBG("vhost_backend_init failed\n");
+ return -EPROTO;
+ }
+ }
+
+ /*
+ * TODO: We might need to set up a postcopy_notifier in a future release:
+ *
+ * u->postcopy_notifier.notify = vhost_user_postcopy_notifier;
+ * postcopy_add_notifier(&u->postcopy_notifier);
+ */
+
+ return 0;
+}
+
+/* TODO: Return an error code */
+void vhost_dev_init(struct vhost_dev *vhdev)
+{
+ uint64_t features;
+ int r, n_initialized_vqs = 0;
+ unsigned int i;
+
+ DBG("vhost_dev_init(...)\n");
+
+ /* Vhost conf */
+ vhdev->migration_blocker = NULL;
+
+ (void)vhost_user_backend_init(vhdev);
+
+ r = vhost_user_set_owner();
+ if (r < 0) {
+ DBG("vhost_set_owner failed\n");
+ }
+
+ r = vhost_user_get_features(&features);
+ if (r < 0) {
+ DBG("vhost_get_features failed\n");
+ }
+ DBG("Print vhost_dev_init->features: 0x%lx\n", features);
+
+
+ for (i = 0; i < vhdev->nvqs; ++i, ++n_initialized_vqs) {
+ r = vhost_virtqueue_init(vhdev, vhdev->vqs + i, vhdev->vq_index + i);
+ if (r < 0) {
+ DBG("Failed to initialize virtqueue %d", i);
+ }
+ }
+
+ vhdev->mem = (struct vhost_memory *)malloc(sizeof(struct vhost_memory));
+ vhdev->mem->nregions = 0;
+
+ vhdev->n_mem_sections = 0;
+ vhdev->mem_sections = NULL;
+ vhdev->log = NULL;
+ vhdev->log_size = 0;
+ vhdev->log_enabled = false;
+ vhdev->started = false;
+
+
+ /*
+ * TODO: busyloop == 0 in rng case, but we might need it for new devices:
+ *
+ * if (busyloop_timeout) {
+ * for (i = 0; i < dev->nvqs; ++i) {
+ * r = vhost_virtqueue_set_busyloop_timeout(dev, dev->vq_index + i,
+ * busyloop_timeout);
+ * if (r < 0) {
+ * DBG("Failed to set busyloop timeout\n");
+ * return -1;
+ * }
+ * }
+ * }
+ */
+
+ vhdev->features = features;
+ DBG("vhdev->backend_features 0x%llx\n", vhdev->backend_features);
+ DBG("vhdev->features 0x%llx\n", vhdev->features);
+}
+
+int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
+{
+ int i;
+ DBG("vhost_user_set_vring_enable not yet implemented\n");
+
+ if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) {
+ DBG("Does not have VHOST_USER_F_PROTOCOL_FEATURES\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dev->nvqs; ++i) {
+ int ret;
+ struct vhost_vring_state state = {
+ .index = dev->vq_index + i,
+ .num = enable,
+ };
+
+ ret = vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
+ if (ret < 0) {
+ /*
+ * Restoring the previous state is likely infeasible, as well as
+ * proceeding regardless the error, so just bail out and hope for
+ * the device-level recovery.
+ */
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int vhost_user_set_status(struct vhost_dev *dev, uint8_t status)
+{
+ return vhost_user_set_u64(VHOST_USER_SET_STATUS, status, false);
+}
+
+static int vhost_user_get_status(struct vhost_dev *dev, uint8_t *status)
+{
+ uint64_t value;
+ int ret;
+
+ ret = vhost_user_get_u64(VHOST_USER_GET_STATUS, &value);
+ if (ret < 0) {
+ return ret;
+ }
+ *status = value;
+
+ return 0;
+}
+
+static int vhost_user_add_status(struct vhost_dev *dev, uint8_t status)
+{
+ uint8_t s;
+ int ret;
+
+ ret = vhost_user_get_status(dev, &s);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if ((s & status) == status) {
+ return 0;
+ }
+ s |= status;
+
+ return vhost_user_set_status(dev, s);
+}
+
+int vhost_user_dev_start(struct vhost_dev *dev, bool started)
+{
+ DBG("vhost_user_dev_start(...)\n");
+ if (!virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_STATUS)) {
+ DBG("VHOST_USER_PROTOCOL_F_STATUS not in features\n");
+ return 0;
+ }
+
+ /* Set device status only for last queue pair */
+ if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
+ return 0;
+ }
+
+ if (started) {
+ return vhost_user_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
+ VIRTIO_CONFIG_S_DRIVER |
+ VIRTIO_CONFIG_S_DRIVER_OK);
+ } else {
+ return 0;
+ }
+}
+
+void print_vhost_user_messages(int request)
+{
+ switch (request) {
+ case VHOST_USER_GET_FEATURES:
+ DBG("VHOST_USER_GET_FEATURES\n");
+ break;
+ case VHOST_USER_SET_FEATURES:
+ DBG("VHOST_USER_SET_FEATURES\n");
+ break;
+ case VHOST_USER_GET_PROTOCOL_FEATURES:
+ DBG("VHOST_USER_GET_PROTOCOL_FEATURES\n");
+ break;
+ case VHOST_USER_SET_PROTOCOL_FEATURES:
+ DBG("VHOST_USER_SET_PROTOCOL_FEATURES\n");
+ break;
+ case VHOST_USER_SET_OWNER:
+ DBG("VHOST_USER_SET_OWNER\n");
+ break;
+ case VHOST_USER_RESET_OWNER:
+ DBG("VHOST_USER_RESET_OWNER\n");
+ break;
+ case VHOST_USER_SET_MEM_TABLE:
+ DBG("VHOST_USER_SET_MEM_TABLE\n");
+ break;
+ case VHOST_USER_SET_LOG_BASE:
+ DBG("VHOST_USER_SET_LOG_BASE\n");
+ break;
+ case VHOST_USER_SET_LOG_FD:
+ DBG("VHOST_USER_SET_LOG_FD\n");
+ break;
+ case VHOST_USER_SET_VRING_NUM:
+ DBG("VHOST_USER_SET_VRING_NUM\n");
+ break;
+ case VHOST_USER_SET_VRING_ADDR:
+ DBG("VHOST_USER_SET_VRING_ADDR\n");
+ break;
+ case VHOST_USER_SET_VRING_BASE:
+ DBG("VHOST_USER_SET_VRING_BASE\n");
+ break;
+ case VHOST_USER_GET_VRING_BASE:
+ DBG("VHOST_USER_GET_VRING_BASE\n");
+ break;
+ case VHOST_USER_SET_VRING_KICK:
+ DBG("VHOST_USER_SET_VRING_KICK\n");
+ break;
+ case VHOST_USER_SET_VRING_CALL:
+ DBG("VHOST_USER_SET_VRING_CALL\n");
+ break;
+ case VHOST_USER_SET_VRING_ERR:
+ DBG("VHOST_USER_SET_VRING_ERR\n");
+ break;
+ case VHOST_USER_GET_QUEUE_NUM:
+ DBG("VHOST_USER_GET_QUEUE_NUM\n");
+ break;
+ case VHOST_USER_SET_VRING_ENABLE:
+ DBG("VHOST_USER_SET_VRING_ENABLE\n");
+ break;
+ case VHOST_USER_SET_SLAVE_REQ_FD:
+ DBG("VHOST_USER_SET_SLAVE_REQ_FD\n");
+ break;
+ case VHOST_USER_GET_CONFIG:
+ DBG("VHOST_USER_GET_CONFIG\n");
+ break;
+ case VHOST_USER_SET_CONFIG:
+ DBG("VHOST_USER_SET_CONFIG\n");
+ break;
+ case VHOST_USER_NONE:
+ DBG("VHOST_USER_NONE\n");
+ break;
+ case VHOST_USER_POSTCOPY_ADVISE:
+ DBG("VHOST_USER_POSTCOPY_ADVISE\n");
+ break;
+ case VHOST_USER_POSTCOPY_LISTEN:
+ DBG("VHOST_USER_POSTCOPY_LISTEN\n");
+ break;
+ case VHOST_USER_POSTCOPY_END:
+ DBG("VHOST_USER_POSTCOPY_END\n");
+ break;
+ case VHOST_USER_GET_INFLIGHT_FD:
+ DBG("VHOST_USER_GET_INFLIGHT_FD\n");
+ break;
+ case VHOST_USER_SET_INFLIGHT_FD:
+ DBG("VHOST_USER_SET_INFLIGHT_FD\n");
+ break;
+ case VHOST_USER_VRING_KICK:
+ DBG("VHOST_USER_VRING_KICK\n");
+ break;
+ case VHOST_USER_GET_MAX_MEM_SLOTS:
+ DBG("VHOST_USER_GET_MAX_MEM_SLOTS\n");
+ break;
+ case VHOST_USER_ADD_MEM_REG:
+ DBG("VHOST_USER_ADD_MEM_REG\n");
+ break;
+ case VHOST_USER_REM_MEM_REG:
+ DBG("VHOST_USER_REM_MEM_REG\n");
+ break;
+ default:
+ DBG("Unhandled request: %d\n", request);
+ }
+}
diff --git a/vhost_user_loopback.h b/vhost_user_loopback.h
new file mode 100644
index 0000000..8aa7e05
--- /dev/null
+++ b/vhost_user_loopback.h
@@ -0,0 +1,969 @@
+/*
+ * Based on libvhost-user.h of QEMU project
+ *
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * Authors:
+ * Victor Kaplansky <victork@redhat.com>
+ * Marc-André Lureau <mlureau@redhat.com>
+ *
+ * Copyright 2022-2023 Virtual Open Systems SAS.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef LOOPBACK_LIBVHOST_USER_H
+#define LOOPBACK_LIBVHOST_USER_H
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <poll.h>
+#include <linux/vhost.h>
+#include <pthread.h>
+#include "virtio_loopback.h"
+#include "queue.h"
+
+typedef struct adapter_dev {
+ struct vhost_dev *vdev;
+ struct vhost_user *vudev;
+ VirtIODevice *virtio_dev;
+ VirtioBus *vbus;
+} AdapterDev;
+
+
+
+
+struct scrub_regions {
+ struct vhost_memory_region *region;
+ int reg_idx;
+ int fd_idx;
+};
+
+struct vhost_virtqueue {
+ int kick;
+ int call;
+ void *desc;
+ void *avail;
+ void *used;
+ int num;
+ unsigned long long desc_phys;
+ unsigned desc_size;
+ unsigned long long avail_phys;
+ unsigned avail_size;
+ unsigned long long used_phys;
+ unsigned used_size;
+ EventNotifier masked_notifier;
+ EventNotifier masked_config_notifier;
+ struct vhost_dev *dev;
+};
+
+typedef struct VhostDevConfigOps {
+ /* Vhost device config space changed callback */
+ int (*vhost_dev_config_notifier)(struct vhost_dev *dev);
+} VhostDevConfigOps;
+
+
+typedef struct MemoryRegion MemoryRegion;
+
+typedef struct MemoryRegionSection {
+ uint64_t size;
+ MemoryRegion *mr;
+ uint64_t offset_within_region;
+ uint64_t offset_within_address_space;
+ bool readonly;
+ bool nonvolatile;
+} MemoryRegionSection;
+
+struct vhost_dev {
+ VirtIODevice *vdev;
+ struct vhost_memory *mem;
+ int n_mem_sections;
+ MemoryRegionSection *mem_sections;
+ int n_tmp_sections;
+ MemoryRegionSection *tmp_sections;
+ struct vhost_virtqueue *vqs;
+ unsigned int nvqs;
+ /* the first virtqueue which would be used by this vhost dev */
+ int vq_index;
+ /* one past the last vq index for the virtio device (not vhost) */
+ int vq_index_end;
+ /* if non-zero, minimum required value for max_queues */
+ int num_queues;
+ uint64_t features;
+ uint64_t acked_features;
+ uint64_t backend_features;
+ uint64_t protocol_features;
+ uint64_t max_queues;
+ uint64_t backend_cap;
+ bool started;
+ bool log_enabled;
+ uint64_t log_size;
+ void *migration_blocker;
+ void *opaque;
+ struct vhost_log *log;
+ QLIST_ENTRY(vhost_dev) entry;
+ uint64_t memory_slots;
+ const VhostDevConfigOps *config_ops;
+};
+
+
+#define VHOST_USER_MAX_RAM_SLOTS 8
+
+typedef uint64_t ram_addr_t;
+typedef struct RAMBlock RAMBlock;
+
+typedef struct RAMBlock {
+ struct MemoryRegion *mr;
+ uint8_t *host;
+ uint8_t *colo_cache; /* For colo, VM's ram cache */
+ ram_addr_t offset;
+ ram_addr_t used_length;
+ ram_addr_t max_length;
+ void (*resized)(const char*, uint64_t length, void *host);
+ uint32_t flags;
+ /* Protected by iothread lock. */
+ char idstr[256];
+ /* RCU-enabled, writes protected by the ramlist lock */
+ int fd;
+ size_t page_size;
+ /* dirty bitmap used during migration */
+ unsigned long *bmap;
+ /* bitmap of already received pages in postcopy */
+ unsigned long *receivedmap;
+
+ /*
+ * bitmap to track already cleared dirty bitmap. When the bit is
+ * set, it means the corresponding memory chunk needs a log-clear.
+ * Set this up to non-NULL to enable the capability to postpone
+ * and split clearing of dirty bitmap on the remote node (e.g.,
+ * KVM). The bitmap will be set only when doing global sync.
+ *
+ * NOTE: this bitmap is different comparing to the other bitmaps
+ * in that one bit can represent multiple guest pages (which is
+ * decided by the `clear_bmap_shift' variable below). On
+ * destination side, this should always be NULL, and the variable
+ * `clear_bmap_shift' is meaningless.
+ */
+ unsigned long *clear_bmap;
+ uint8_t clear_bmap_shift;
+
+ /*
+ * RAM block length that corresponds to the used_length on the migration
+ * source (after RAM block sizes were synchronized). Especially, after
+ * starting to run the guest, used_length and postcopy_length can differ.
+ * Used to register/unregister uffd handlers and as the size of the received
+ * bitmap. Receiving any page beyond this length will bail out, as it
+ * could not have been valid on the source.
+ */
+ ram_addr_t postcopy_length;
+} RAMBlock;
+
+
+/*
+ * MemoryRegion:
+ *
+ * A struct representing a memory region.
+ */
+typedef struct MemoryRegion {
+ /* private: */
+
+ /* The following fields should fit in a cache line */
+ bool romd_mode;
+ bool ram;
+ bool subpage;
+ bool readonly; /* For RAM regions */
+ bool nonvolatile;
+ bool rom_device;
+ bool flush_coalesced_mmio;
+ uint8_t dirty_log_mask;
+ bool is_iommu;
+ RAMBlock *ram_block;
+
+ void *opaque;
+ MemoryRegion *container;
+ uint64_t size;
+ uint64_t addr;
+ void (*destructor)(MemoryRegion *mr);
+ uint64_t align;
+ bool terminates;
+ bool ram_device;
+ bool enabled;
+ bool warning_printed; /* For reservations */
+ uint8_t vga_logging_count;
+ MemoryRegion *alias;
+ uint64_t alias_offset;
+ int32_t priority;
+ QTAILQ_HEAD(, MemoryRegion) subregions;
+ QTAILQ_ENTRY(MemoryRegion) subregions_link;
+ const char *name;
+ unsigned ioeventfd_nb;
+} MemoryRegion;
+
+struct vhost_user {
+ struct vhost_dev *dev;
+
+ /* Shared between vhost devs of the same virtio device */
+
+ uint64_t postcopy_client_bases[VHOST_USER_MAX_RAM_SLOTS];
+ /* Length of the region_rb and region_rb_offset arrays */
+ size_t region_rb_len;
+ /* RAMBlock associated with a given region */
+ RAMBlock **region_rb;
+ /*
+ * The offset from the start of the RAMBlock to the start of the
+ * vhost region.
+ */
+ ram_addr_t *region_rb_offset;
+
+ /* True once we've entered postcopy_listen */
+ bool postcopy_listen;
+
+ /* Our current regions */
+ int num_shadow_regions;
+ struct vhost_memory_region shadow_regions[VHOST_USER_MAX_RAM_SLOTS];
+};
+
+/* Global variables */
+extern int client_sock;
+extern struct vhost_dev *dev;
+extern struct adapter_dev *adev;
+extern struct vhost_user *vudev;
+
+/* Based on qemu/hw/virtio/vhost-user.c */
+#define VHOST_USER_F_PROTOCOL_FEATURES 30
+#define VHOST_LOG_PAGE 4096
+#define VHOST_MEMORY_BASELINE_NREGIONS VHOST_USER_MAX_RAM_SLOTS
+
+/* The version of the protocol we support */
+#define VHOST_USER_VERSION (0x1)
+
+/*
+ * Set a reasonable maximum number of ram slots, which will be supported by
+ * any architecture.
+ */
+#define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
+
+/*
+ * Maximum size of virtio device config space
+ */
+#define VHOST_USER_MAX_CONFIG_SIZE 256
+
+typedef enum VhostSetConfigType {
+ VHOST_SET_CONFIG_TYPE_MASTER = 0,
+ VHOST_SET_CONFIG_TYPE_MIGRATION = 1,
+} VhostSetConfigType;
+
+enum VhostUserProtocolFeature {
+ VHOST_USER_PROTOCOL_F_MQ = 0,
+ VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
+ VHOST_USER_PROTOCOL_F_RARP = 2,
+ VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
+ VHOST_USER_PROTOCOL_F_NET_MTU = 4,
+ VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
+ VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
+ VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
+ VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
+ VHOST_USER_PROTOCOL_F_CONFIG = 9,
+ VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
+ VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
+ VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
+ VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
+ VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
+ VHOST_USER_PROTOCOL_F_STATUS = 16,
+ VHOST_USER_PROTOCOL_F_MAX
+};
+
+#define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
+
+typedef enum VhostUserRequest {
+ VHOST_USER_NONE = 0,
+ VHOST_USER_GET_FEATURES = 1,
+ VHOST_USER_SET_FEATURES = 2,
+ VHOST_USER_SET_OWNER = 3,
+ VHOST_USER_RESET_OWNER = 4,
+ VHOST_USER_SET_MEM_TABLE = 5,
+ VHOST_USER_SET_LOG_BASE = 6,
+ VHOST_USER_SET_LOG_FD = 7,
+ VHOST_USER_SET_VRING_NUM = 8,
+ VHOST_USER_SET_VRING_ADDR = 9,
+ VHOST_USER_SET_VRING_BASE = 10,
+ VHOST_USER_GET_VRING_BASE = 11,
+ VHOST_USER_SET_VRING_KICK = 12,
+ VHOST_USER_SET_VRING_CALL = 13,
+ VHOST_USER_SET_VRING_ERR = 14,
+ VHOST_USER_GET_PROTOCOL_FEATURES = 15,
+ VHOST_USER_SET_PROTOCOL_FEATURES = 16,
+ VHOST_USER_GET_QUEUE_NUM = 17,
+ VHOST_USER_SET_VRING_ENABLE = 18,
+ VHOST_USER_SEND_RARP = 19,
+ VHOST_USER_NET_SET_MTU = 20,
+ VHOST_USER_SET_SLAVE_REQ_FD = 21,
+ VHOST_USER_IOTLB_MSG = 22,
+ VHOST_USER_SET_VRING_ENDIAN = 23,
+ VHOST_USER_GET_CONFIG = 24,
+ VHOST_USER_SET_CONFIG = 25,
+ VHOST_USER_CREATE_CRYPTO_SESSION = 26,
+ VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
+ VHOST_USER_POSTCOPY_ADVISE = 28,
+ VHOST_USER_POSTCOPY_LISTEN = 29,
+ VHOST_USER_POSTCOPY_END = 30,
+ VHOST_USER_GET_INFLIGHT_FD = 31,
+ VHOST_USER_SET_INFLIGHT_FD = 32,
+ VHOST_USER_GPU_SET_SOCKET = 33,
+ VHOST_USER_VRING_KICK = 35,
+ VHOST_USER_GET_MAX_MEM_SLOTS = 36,
+ VHOST_USER_ADD_MEM_REG = 37,
+ VHOST_USER_REM_MEM_REG = 38,
+ VHOST_USER_SET_STATUS = 39,
+ VHOST_USER_GET_STATUS = 40,
+ VHOST_USER_MAX
+} VhostUserRequest;
+
+typedef enum VhostUserSlaveRequest {
+ VHOST_USER_SLAVE_NONE = 0,
+ VHOST_USER_SLAVE_IOTLB_MSG = 1,
+ VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
+ VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
+ VHOST_USER_SLAVE_VRING_CALL = 4,
+ VHOST_USER_SLAVE_VRING_ERR = 5,
+ VHOST_USER_SLAVE_MAX
+} VhostUserSlaveRequest;
+
+typedef struct VhostUserMemoryRegion {
+ uint64_t guest_phys_addr;
+ uint64_t memory_size;
+ uint64_t userspace_addr;
+ uint64_t mmap_offset;
+} VhostUserMemoryRegion;
+
+#define VHOST_USER_MEM_REG_SIZE (sizeof(VhostUserMemoryRegion))
+
+typedef struct VhostUserMemory {
+ uint32_t nregions;
+ uint32_t padding;
+ VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS];
+} VhostUserMemory;
+
+typedef struct VhostUserMemRegMsg {
+ uint64_t padding;
+ VhostUserMemoryRegion region;
+} VhostUserMemRegMsg;
+
+typedef struct VhostUserLog {
+ uint64_t mmap_size;
+ uint64_t mmap_offset;
+} VhostUserLog;
+
+typedef struct VhostUserConfig {
+ uint32_t offset;
+ uint32_t size;
+ uint32_t flags;
+ uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
+} VhostUserConfig;
+
+static VhostUserConfig c __attribute__ ((unused));
+#define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
+ + sizeof(c.size) \
+ + sizeof(c.flags))
+
+typedef struct VhostUserVringArea {
+ uint64_t u64;
+ uint64_t size;
+ uint64_t offset;
+} VhostUserVringArea;
+
+typedef struct VhostUserInflight {
+ uint64_t mmap_size;
+ uint64_t mmap_offset;
+ uint16_t num_queues;
+ uint16_t queue_size;
+} VhostUserInflight;
+
+#if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__))
+# define VU_PACKED __attribute__((gcc_struct, packed))
+#else
+# define VU_PACKED __attribute__((packed))
+#endif
+
+typedef struct VhostUserMsg {
+ int request;
+
+#define VHOST_USER_VERSION_MASK (0x3)
+#define VHOST_USER_REPLY_MASK (0x1 << 2)
+#define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
+ uint32_t flags;
+ uint32_t size; /* the following payload size */
+
+ union {
+#define VHOST_USER_VRING_IDX_MASK (0xff)
+#define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
+ uint64_t u64;
+ struct vhost_vring_state state;
+ struct vhost_vring_addr addr;
+ VhostUserMemory memory;
+ VhostUserMemRegMsg memreg;
+ VhostUserLog log;
+ VhostUserConfig config;
+ VhostUserVringArea area;
+ VhostUserInflight inflight;
+ } payload;
+
+ int fds[VHOST_MEMORY_BASELINE_NREGIONS];
+ int fd_num;
+ uint8_t *data;
+} VU_PACKED VhostUserMsg;
+
+typedef struct VuDevRegion {
+ /* Guest Physical address. */
+ uint64_t gpa;
+ /* Memory region size. */
+ uint64_t size;
+ /* QEMU virtual address (userspace). */
+ uint64_t qva;
+ /* Starting offset in our mmaped space. */
+ uint64_t mmap_offset;
+ /* Start address of mmaped space. */
+ uint64_t mmap_addr;
+} VuDevRegion;
+
+
+typedef struct VuDev VuDev;
+typedef uint64_t (*vu_get_features_cb) (VuDev *dev);
+typedef void (*vu_set_features_cb) (VuDev *dev, uint64_t features);
+typedef int (*vu_process_msg_cb) (VuDev *dev, VhostUserMsg *vmsg,
+ int *do_reply);
+typedef bool (*vu_read_msg_cb) (VuDev *dev, int sock, VhostUserMsg *vmsg);
+typedef void (*vu_queue_set_started_cb) (VuDev *dev, int qidx, bool started);
+typedef bool (*vu_queue_is_processed_in_order_cb) (VuDev *dev, int qidx);
+typedef int (*vu_get_config_cb) (VuDev *dev, uint8_t *config, uint32_t len);
+typedef int (*vu_set_config_cb) (VuDev *dev, const uint8_t *data,
+ uint32_t offset, uint32_t size,
+ uint32_t flags);
+
+typedef struct VuDevIface {
+ /* called by VHOST_USER_GET_FEATURES to get the features bitmask */
+ vu_get_features_cb get_features;
+ /* enable vhost implementation features */
+ vu_set_features_cb set_features;
+ /*
+ * get the protocol feature bitmask from the underlying vhost
+ * implementation
+ */
+ vu_get_features_cb get_protocol_features;
+ /* enable protocol features in the underlying vhost implementation. */
+ vu_set_features_cb set_protocol_features;
+ /* process_msg is called for each vhost-user message received */
+ /* skip libvhost-user processing if return value != 0 */
+ vu_process_msg_cb process_msg;
+ /* tells when queues can be processed */
+ vu_queue_set_started_cb queue_set_started;
+ /*
+ * If the queue is processed in order, in which case it will be
+ * resumed to vring.used->idx. This can help to support resuming
+ * on unmanaged exit/crash.
+ */
+ vu_queue_is_processed_in_order_cb queue_is_processed_in_order;
+ /* get the config space of the device */
+ vu_get_config_cb get_config;
+ /* set the config space of the device */
+ vu_set_config_cb set_config;
+} VuDevIface;
+
+typedef void (*vu_queue_handler_cb) (VuDev *dev, int qidx);
+
+typedef struct VuRing {
+ unsigned int num;
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+ uint64_t log_guest_addr;
+ uint32_t flags;
+} VuRing;
+
+typedef struct VuDescStateSplit {
+ /*
+ * Indicate whether this descriptor is inflight or not.
+ * Only available for head-descriptor.
+ */
+ uint8_t inflight;
+
+ /* Padding */
+ uint8_t padding[5];
+
+ /*
+ * Maintain a list for the last batch of used descriptors.
+ * Only available when batching is used for submitting
+ */
+ uint16_t next;
+
+ /*
+ * Used to preserve the order of fetching available descriptors.
+ * Only available for head-descriptor.
+ */
+ uint64_t counter;
+} VuDescStateSplit;
+
+typedef struct VuVirtqInflight {
+ /* The feature flags of this region. Now it's initialized to 0. */
+ uint64_t features;
+
+ /*
+ * The version of this region. It's 1 currently.
+ * Zero value indicates a vm reset happened.
+ */
+ uint16_t version;
+
+ /*
+ * The size of VuDescStateSplit array. It's equal to the virtqueue
+ * size. Slave could get it from queue size field of VhostUserInflight.
+ */
+ uint16_t desc_num;
+
+ /*
+ * The head of list that track the last batch of used descriptors.
+ */
+ uint16_t last_batch_head;
+
+ /* Storing the idx value of used ring */
+ uint16_t used_idx;
+
+ /* Used to track the state of each descriptor in descriptor table */
+ VuDescStateSplit desc[];
+} VuVirtqInflight;
+
+typedef struct VuVirtqInflightDesc {
+ uint16_t index;
+ uint64_t counter;
+} VuVirtqInflightDesc;
+
+typedef struct VuVirtq {
+ VuRing vring;
+ VuVirtqInflight *inflight;
+ VuVirtqInflightDesc *resubmit_list;
+ uint16_t resubmit_num;
+ uint64_t counter;
+ /* Next head to pop */
+ uint16_t last_avail_idx;
+ /* Last avail_idx read from VQ. */
+ uint16_t shadow_avail_idx;
+ uint16_t used_idx;
+ /* Last used index value we have signalled on */
+ uint16_t signalled_used;
+ /* Last used index value we have signalled on */
+ bool signalled_used_valid;
+ /* Notification enabled? */
+ bool notification;
+ int inuse;
+ vu_queue_handler_cb handler;
+ int call_fd;
+ int kick_fd;
+ int err_fd;
+ unsigned int enable;
+ bool started;
+ /* Guest addresses of our ring */
+ struct vhost_vring_addr vra;
+} VuVirtq;
+
+enum VuWatchCondtion {
+ VU_WATCH_IN = POLLIN,
+ VU_WATCH_OUT = POLLOUT,
+ VU_WATCH_PRI = POLLPRI,
+ VU_WATCH_ERR = POLLERR,
+ VU_WATCH_HUP = POLLHUP,
+};
+
+typedef void (*vu_panic_cb) (VuDev *dev, const char *err);
+typedef void (*vu_watch_cb) (VuDev *dev, int condition, void *data);
+typedef void (*vu_set_watch_cb) (VuDev *dev, int fd, int condition,
+ vu_watch_cb cb, void *data);
+typedef void (*vu_remove_watch_cb) (VuDev *dev, int fd);
+
+typedef struct VuDevInflightInfo {
+ int fd;
+ void *addr;
+ uint64_t size;
+} VuDevInflightInfo;
+
+struct VuDev {
+ int sock;
+ uint32_t nregions;
+ VuDevRegion regions[VHOST_USER_MAX_RAM_SLOTS];
+ VuVirtq *vq;
+ VuDevInflightInfo inflight_info;
+ int log_call_fd;
+ /* Must be held while using slave_fd */
+ pthread_mutex_t slave_mutex;
+ int slave_fd;
+ uint64_t log_size;
+ uint8_t *log_table;
+ uint64_t features;
+ uint64_t protocol_features;
+ bool broken;
+ uint16_t max_queues;
+
+ /*
+ * @read_msg: custom method to read vhost-user message
+ *
+ * Read data from vhost_user socket fd and fill up
+ * the passed VhostUserMsg *vmsg struct.
+ *
+ * If reading fails, it should close the received set of file
+ * descriptors as socket message's auxiliary data.
+ *
+ * For the details, please refer to vu_message_read in libvhost-user.c
+ * which will be used by default if not custom method is provided when
+ * calling vu_init
+ *
+ * Returns: true if vhost-user message successfully received,
+ * otherwise return false.
+ *
+ */
+ vu_read_msg_cb read_msg;
+
+ /*
+ * @set_watch: add or update the given fd to the watch set,
+ * call cb when condition is met.
+ */
+ vu_set_watch_cb set_watch;
+
+ /* @remove_watch: remove the given fd from the watch set */
+ vu_remove_watch_cb remove_watch;
+
+ /*
+ * @panic: encountered an unrecoverable error, you may try to re-initialize
+ */
+ vu_panic_cb panic;
+ const VuDevIface *iface;
+
+ /* Postcopy data */
+ int postcopy_ufd;
+ bool postcopy_listening;
+};
+
+typedef struct VuVirtqElement {
+ unsigned int index;
+ unsigned int out_num;
+ unsigned int in_num;
+ struct iovec *in_sg;
+ struct iovec *out_sg;
+} VuVirtqElement;
+
+/**
+ * vu_init:
+ * @dev: a VuDev context
+ * @max_queues: maximum number of virtqueues
+ * @socket: the socket connected to vhost-user master
+ * @panic: a panic callback
+ * @set_watch: a set_watch callback
+ * @remove_watch: a remove_watch callback
+ * @iface: a VuDevIface structure with vhost-user device callbacks
+ *
+ * Initializes a VuDev vhost-user context.
+ *
+ * Returns: true on success, false on failure.
+ **/
+bool vu_init(VuDev *dev,
+ uint16_t max_queues,
+ int socket,
+ vu_panic_cb panic,
+ vu_read_msg_cb read_msg,
+ vu_set_watch_cb set_watch,
+ vu_remove_watch_cb remove_watch,
+ const VuDevIface *iface);
+
+
+/**
+ * vu_deinit:
+ * @dev: a VuDev context
+ *
+ * Cleans up the VuDev context
+ */
+void vu_deinit(VuDev *dev);
+
+/**
+ * vu_dispatch:
+ * @dev: a VuDev context
+ *
+ * Process one vhost-user message.
+ *
+ * Returns: TRUE on success, FALSE on failure.
+ */
+bool vu_dispatch(VuDev *dev);
+
+/**
+ * vu_gpa_to_va:
+ * @dev: a VuDev context
+ * @plen: guest memory size
+ * @guest_addr: guest address
+ *
+ * Translate a guest address to a pointer. Returns NULL on failure.
+ */
+void *vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr);
+
+/**
+ * vu_get_queue:
+ * @dev: a VuDev context
+ * @qidx: queue index
+ *
+ * Returns the queue number @qidx.
+ */
+VuVirtq *vu_get_queue(VuDev *dev, int qidx);
+
+/**
+ * vu_set_queue_handler:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @handler: the queue handler callback
+ *
+ * Set the queue handler. This function may be called several times
+ * for the same queue. If called with NULL @handler, the handler is
+ * removed.
+ */
+void vu_set_queue_handler(VuDev *dev, VuVirtq *vq,
+ vu_queue_handler_cb handler);
+
+/**
+ * vu_set_queue_host_notifier:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @fd: a file descriptor
+ * @size: host page size
+ * @offset: notifier offset in @fd file
+ *
+ * Set queue's host notifier. This function may be called several
+ * times for the same queue. If called with -1 @fd, the notifier
+ * is removed.
+ */
+bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
+ int size, int offset);
+
+/**
+ * vu_queue_set_notification:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @enable: state
+ *
+ * Set whether the queue notifies (via event index or interrupt)
+ */
+void vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable);
+
+/**
+ * vu_queue_enabled:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ *
+ * Returns: whether the queue is enabled.
+ */
+bool vu_queue_enabled(VuDev *dev, VuVirtq *vq);
+
+/**
+ * vu_queue_started:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ *
+ * Returns: whether the queue is started.
+ */
+bool vu_queue_started(const VuDev *dev, const VuVirtq *vq);
+
+/**
+ * vu_queue_empty:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ *
+ * Returns: true if the queue is empty or not ready.
+ */
+bool vu_queue_empty(VuDev *dev, VuVirtq *vq);
+
+/**
+ * vu_queue_notify:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ *
+ * Request to notify the queue via callfd (skipped if unnecessary)
+ */
+void vu_queue_notify(VuDev *dev, VuVirtq *vq);
+
+/**
+ * vu_queue_notify_sync:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ *
+ * Request to notify the queue via callfd (skipped if unnecessary)
+ * or sync message if possible.
+ */
+void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq);
+
+/**
+ * vu_queue_pop:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @sz: the size of struct to return (must be >= VuVirtqElement)
+ *
+ * Returns: a VuVirtqElement filled from the queue or NULL. The
+ * returned element must be free()-d by the caller.
+ */
+void *vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz);
+
+
+/**
+ * vu_queue_unpop:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @elem: The #VuVirtqElement
+ * @len: number of bytes written
+ *
+ * Pretend the most recent element wasn't popped from the virtqueue. The next
+ * call to vu_queue_pop() will refetch the element.
+ */
+void vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
+ size_t len);
+
+/**
+ * vu_queue_rewind:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @num: number of elements to push back
+ *
+ * Pretend that elements weren't popped from the virtqueue. The next
+ * virtqueue_pop() will refetch the oldest element.
+ *
+ * Returns: true on success, false if @num is greater than the number of in use
+ * elements.
+ */
+bool vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num);
+
+/**
+ * vu_queue_fill:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @elem: a VuVirtqElement
+ * @len: length in bytes to write
+ * @idx: optional offset for the used ring index (0 in general)
+ *
+ * Fill the used ring with @elem element.
+ */
+void vu_queue_fill(VuDev *dev, VuVirtq *vq,
+ const VuVirtqElement *elem,
+ unsigned int len, unsigned int idx);
+
+/**
+ * vu_queue_push:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @elem: a VuVirtqElement
+ * @len: length in bytes to write
+ *
+ * Helper that combines vu_queue_fill() with a vu_queue_flush().
+ */
+void vu_queue_push(VuDev *dev, VuVirtq *vq,
+ const VuVirtqElement *elem, unsigned int len);
+
+/**
+ * vu_queue_flush:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @num: number of elements to flush
+ *
+ * Mark the last number of elements as done (used.idx is updated by
+ * num elements).
+ */
+void vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int num);
+
+/**
+ * vu_queue_get_avail_bytes:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @in_bytes: in bytes
+ * @out_bytes: out bytes
+ * @max_in_bytes: stop counting after max_in_bytes
+ * @max_out_bytes: stop counting after max_out_bytes
+ *
+ * Count the number of available bytes, up to max_in_bytes/max_out_bytes.
+ */
+void vu_queue_get_avail_bytes(VuDev *vdev, VuVirtq *vq, unsigned int *in_bytes,
+ unsigned int *out_bytes,
+ unsigned max_in_bytes, unsigned max_out_bytes);
+
+/**
+ * vu_queue_avail_bytes:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @in_bytes: expected in bytes
+ * @out_bytes: expected out bytes
+ *
+ * Returns: true if in_bytes <= in_total && out_bytes <= out_total
+ */
+bool vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes,
+ unsigned int out_bytes);
+
+
+bool vhost_user_one_time_request(VhostUserRequest request);
+void vmsg_close_fds(VhostUserMsg *vmsg);
+bool vu_message_write(int conn_fd, VhostUserMsg *vmsg);
+bool vu_message_read(int conn_fd, VhostUserMsg *vmsg);
+int vhost_user_set_owner(void);
+int process_message_reply(const VhostUserMsg *msg);
+int vhost_user_get_u64(int request, uint64_t *u64);
+int vhost_user_get_features(uint64_t *features);
+int enforce_reply(const VhostUserMsg *msg);
+int vhost_user_set_u64(int request, uint64_t u64, bool wait_for_reply);
+int vhost_user_set_protocol_features(uint64_t features);
+int vhost_user_get_max_memslots(uint64_t *max_memslots);
+int vhost_setup_slave_channel(struct vhost_dev *dev);
+int vhost_user_get_vq_index(struct vhost_dev *dev, int idx);
+int vhost_set_vring_file(VhostUserRequest request,
+ struct vhost_vring_file *file);
+int vhost_user_set_vring_kick(struct vhost_vring_file *file);
+int vhost_user_set_vring_call(struct vhost_vring_file *file);
+int vhost_virtqueue_init(struct vhost_dev *dev,
+ struct vhost_virtqueue *vq, int n);
+void vhost_dev_init(struct vhost_dev *vhdev);
+int vhost_user_set_features(struct vhost_dev *dev,
+ uint64_t features);
+int vhost_user_set_mem_table(struct vhost_dev *dev);
+int vhost_user_get_vq_index(struct vhost_dev *dev, int idx);
+void vhost_user_share_fd(void);
+int vhost_user_set_vring_num(struct vhost_dev *dev,
+ struct vhost_vring_state *ring);
+int vhost_user_set_vring_base(struct vhost_dev *dev,
+ struct vhost_vring_state *ring);
+int vhost_user_set_vring_addr(struct vhost_dev *dev,
+ struct vhost_vring_addr *addr);
+int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
+ uint32_t config_len);
+int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
+ uint32_t offset, uint32_t size, uint32_t flags);
+int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable);
+int vhost_user_dev_start(struct vhost_dev *dev, bool started);
+
+void vhost_commit_init_vqs(struct vhost_dev *dev);
+void vhost_commit_mem_regions(struct vhost_dev *dev);
+void vhost_commit_vqs(struct vhost_dev *dev);
+void find_add_new_reg(struct vhost_dev *dev);
+void print_mem_table(struct vhost_dev *dev);
+void print_vhost_user_messages(int request);
+
+
+/* FIXME: This need to move in a better place */
+struct vhost_inflight;
+int vhost_user_get_inflight_fd(struct vhost_dev *dev,
+ uint16_t queue_size,
+ struct vhost_inflight *inflight);
+int vhost_user_set_inflight_fd(struct vhost_dev *dev,
+ struct vhost_inflight *inflight);
+
+
+#endif /* LIBVHOST_USER_H */
diff --git a/vhost_user_rng.c b/vhost_user_rng.c
new file mode 100644
index 0000000..c727636
--- /dev/null
+++ b/vhost_user_rng.c
@@ -0,0 +1,201 @@
+/*
+ * Based on vhost-user-rng of QEMU project
+ *
+ * Copyright (c) 2021 Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * Copyright (c) 2022-2023 Virtual Open Systems SAS.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+#include <sys/param.h>
+
+/* Project header files */
+#include "vhost_loopback.h"
+#include "vhost_user_rng.h"
+
+#ifdef DEBUG
+#define DBG(...) printf("vhost-user-rng: " __VA_ARGS__)
+#else
+#define DBG(...)
+#endif /* DEBUG */
+
+static void vu_rng_start(VirtIODevice *vdev)
+{
+ VHostUserRNG *rng = vdev->vhrng;
+ VirtioBus *k = vdev->vbus;
+ int ret;
+ int i;
+
+ /* TODO: This might be deleted in future */
+ if (!k->set_guest_notifiers) {
+ DBG("binding does not support guest notifiers\n");
+ return;
+ }
+
+ ret = vhost_dev_enable_notifiers(rng->vhost_dev, vdev);
+ if (ret < 0) {
+ DBG("Error enabling host notifiers: %d\n", ret);
+ return;
+ }
+
+ ret = k->set_guest_notifiers(vdev, rng->vhost_dev->nvqs, true);
+ if (ret < 0) {
+ DBG("Error binding guest notifier: %d\n", ret);
+ return;
+ }
+
+ rng->vhost_dev->acked_features = vdev->guest_features;
+ DBG("rng->vhost_dev->acked_features: 0x%lx\n", vdev->guest_features);
+
+ ret = vhost_dev_start(rng->vhost_dev, vdev, true);
+ if (ret < 0) {
+ DBG("Error starting vhost-user-rng: %d\n", ret);
+ return;
+ }
+
+ /*
+ * guest_notifier_mask/pending not used yet, so just unmask
+ * everything here. virtio-pci will do the right thing by
+ * enabling/disabling irqfd.
+ */
+ for (i = 0; i < rng->vhost_dev->nvqs; i++) {
+ vhost_virtqueue_mask(rng->vhost_dev, vdev, i, false);
+ }
+
+ /* Wait a bit for the vrings to be set in vhost-user-device */
+ sleep(1);
+
+}
+
+/* TODO: We need to implement this function in a future release */
+static void vu_rng_stop(VirtIODevice *vdev)
+{
+ VHostUserRNG *rng = vdev->vhrng;
+}
+
+
+static uint64_t vu_rng_get_features(VirtIODevice *vdev,
+ uint64_t requested_features)
+{
+ /* No feature bits used yet */
+ return requested_features;
+}
+
+/* TODO: We need to implement this function in a future release */
+static void vu_rng_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
+{
+ VHostUserRNG *rng = vdev->vhrng;
+
+ /* vhost_virtqueue_mask(&rng->vhost_dev, vdev, idx, mask); */
+}
+
+/* TODO: We need to implement this function in a future release */
+static bool vu_rng_guest_notifier_pending(VirtIODevice *vdev, int idx)
+{
+ VHostUserRNG *rng = vdev->vhrng;
+
+ /* return vhost_virtqueue_pending(&rng->vhost_dev, idx); */
+ return 1;
+}
+
+static void vu_rng_set_status(VirtIODevice *vdev, uint8_t status)
+{
+ VHostUserRNG *rng = vdev->vhrng;
+ bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK;
+
+ if (rng->vhost_dev->started == should_start) {
+ DBG("rng->vhost_dev->started != should_start\n");
+ return;
+ }
+
+ if (should_start) {
+ vu_rng_start(vdev);
+ } else {
+ DBG("vu_rng_stop(vdev)\n");
+ /* TODO: Add vu_rng_stop(vdev); when this function is implemented */
+ }
+}
+
+static void virtio_dev_class_init(VirtIODevice *vdev)
+{
+ vdev->vdev_class = (VirtioDeviceClass *)malloc(sizeof(VirtioDeviceClass));
+ vdev->vdev_class->parent = vdev;
+ vdev->vdev_class->set_status = vu_rng_set_status;
+ vdev->vdev_class->get_features = vu_rng_get_features;
+ vdev->vdev_class->guest_notifier_mask = vu_rng_guest_notifier_mask;
+ vdev->vdev_class->guest_notifier_pending = vu_rng_guest_notifier_pending;
+ vdev->vdev_class->update_mem_table = update_mem_table;
+}
+
+
+void vhost_user_rng_init(VirtIODevice *vdev)
+{
+ VHostUserRNG *vhrng = (VHostUserRNG *)malloc(sizeof(VHostUserRNG));
+ vdev->vhrng = vhrng;
+ vdev->nvqs = &dev->nvqs;
+ vhrng->parent = vdev;
+ vhrng->req_vq = vdev->vq;
+ vhrng->vhost_dev = dev;
+
+ virtio_dev_class_init(vdev);
+ virtio_loopback_bus_init(vdev->vbus);
+}
+
+static void vu_rng_handle_output(VirtIODevice *vdev, VirtQueue *vq)
+{
+ /*
+ * Not normally called; it's the daemon that handles the queue;
+ * however virtio's cleanup path can call this.
+ */
+ DBG("vu_rng_handle_output\n");
+}
+
+
+void vhost_user_rng_realize(void)
+{
+ /* Initiliaze virtio_dev data structures */
+ virtio_dev_init(global_vdev, "virtio-rng", 4, 0);
+
+ /* This needs to be change to vhost-user-rng init */
+ vhost_user_rng_init(global_vdev);
+
+ global_vdev->vq = virtio_add_queue(global_vdev, 4, vu_rng_handle_output);
+
+ global_vdev->host_features = 0x39000000;
+
+ proxy = (VirtIOMMIOProxy *)malloc(sizeof(VirtIOMMIOProxy));
+ *proxy = (VirtIOMMIOProxy) {
+ .legacy = 1,
+ };
+
+ /* Virtqueues conf */
+ dev->nvqs = 1;
+ dev->vqs = (struct vhost_virtqueue *)malloc(dev->nvqs *
+ sizeof(struct vhost_virtqueue));
+
+ /* Initiale vhost-user communication */
+ vhost_dev_init(dev);
+
+ /* Write the final features */
+ global_vdev->host_features = dev->features;
+ DBG("dev->host_features: 0x%lx\n", dev->features);
+}
diff --git a/vhost_user_rng.h b/vhost_user_rng.h
new file mode 100644
index 0000000..69b5916
--- /dev/null
+++ b/vhost_user_rng.h
@@ -0,0 +1,44 @@
+/*
+ * Based on vhost-user-rng of QEMU project
+ *
+ * Copyright (c) 2021 Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * Copyright (c) 2022-2023 Virtual Open Systems SAS.
+ *
+ * Author:
+ * Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef VHOST_USER_RNG
+#define VHOST_USER_RNG
+
+#include "vhost_loopback.h"
+#include "virtio_rng.h"
+#include "vhost_user_loopback.h"
+#include "virtio_loopback.h"
+
+typedef struct VHostUserRNG {
+ VirtIODevice *parent;
+ struct vhost_virtqueue *vhost_vq;
+ struct vhost_dev *vhost_dev;
+ VirtQueue *req_vq;
+ bool connected;
+} VHostUserRNG;
+
+void vhost_user_rng_realize(void);
+
+#endif /* VHOST_USER_RNG */
diff --git a/vhost_user_sound.c b/vhost_user_sound.c
new file mode 100644
index 0000000..1e4c5cf
--- /dev/null
+++ b/vhost_user_sound.c
@@ -0,0 +1,313 @@
+/*
+ * Based on vhost-user-sound.c of QEMU project
+ *
+ * Copyright 2020 Red Hat, Inc.
+ *
+ * Copyright (c) 2023 Virtual Open Systems SAS.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ *
+ */
+
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+#include <sys/param.h>
+
+/* Project header files */
+#include "vhost_user_sound.h"
+
+#ifdef DEBUG
+#define DBG(...) printf("vhost-user-sound: " __VA_ARGS__)
+#else
+#define DBG(...)
+#endif /* DEBUG */
+
+/***************************** vhost-user-sound ******************************/
+
+/*
+ * Features supported by the vhost-user-sound frontend:
+ * VIRTIO_F_VERSION_1,
+ * VIRTIO_RING_F_INDIRECT_DESC,
+ * VIRTIO_RING_F_EVENT_IDX,
+ * VIRTIO_F_RING_RESET,
+ * VIRTIO_F_NOTIFY_ON_EMPTY,
+ * VHOST_INVALID_FEATURE_BIT
+ */
+static const int user_feature_bits[] = {
+ VIRTIO_F_VERSION_1,
+ VIRTIO_RING_F_INDIRECT_DESC,
+ VIRTIO_RING_F_EVENT_IDX,
+ VIRTIO_F_RING_RESET,
+ VIRTIO_F_NOTIFY_ON_EMPTY,
+ VHOST_INVALID_FEATURE_BIT
+};
+
+static void vus_get_config(VirtIODevice *vdev, uint8_t *config)
+{
+ VHostUserSound *snd = vdev->vhusnd;
+
+ memcpy(config, &snd->config, sizeof(struct virtio_snd_config));
+}
+
+
+static void vus_start(VirtIODevice *vdev)
+{
+ VHostUserSound *vhusnd = vdev->vhusnd;
+ VirtioBus *k = vdev->vbus;
+ int ret;
+ int i;
+
+ DBG("vus_start(...)\n");
+
+ if (!k->set_guest_notifiers) {
+ DBG("binding does not support guest notifiers\n");
+ return;
+ }
+
+ ret = vhost_dev_enable_notifiers(vhusnd->vhost_dev, vdev);
+ if (ret < 0) {
+ DBG("Error enabling host notifiers: %d\n", -ret);
+ return;
+ }
+
+ ret = k->set_guest_notifiers(k->vdev, vhusnd->vhost_dev->nvqs, true);
+ if (ret < 0) {
+ DBG("Error binding guest notifier: %d\n", -ret);
+ goto err_host_notifiers;
+ }
+
+ vhusnd->vhost_dev->acked_features = vdev->guest_features;
+
+ ret = vhost_dev_start(vhusnd->vhost_dev, vdev, true);
+ if (ret < 0) {
+ DBG("Error starting vhost: %d\n", -ret);
+ goto err_guest_notifiers;
+ }
+
+ /*
+ * guest_notifier_mask/pending not used yet, so just unmask
+ * everything here. virtio-pci will do the right thing by
+ * enabling/disabling irqfd.
+ */
+ for (i = 0; i < vhusnd->vhost_dev->nvqs; i++) {
+ vhost_virtqueue_mask(vhusnd->vhost_dev, vdev, i, false);
+ }
+
+ /* Wait a bit for the vrings to be set in vhost-user-device */
+ sleep(1);
+
+ return;
+
+err_guest_notifiers:
+err_host_notifiers:
+ DBG("vhu_start error\n");
+ return;
+}
+
+static void vus_stop(VirtIODevice *vdev)
+{
+ DBG("vus_stop: not yet implemented\n");
+}
+
+static void vus_set_status(VirtIODevice *vdev, uint8_t status)
+{
+ VHostUserSound *vhusnd = vdev->vhusnd;
+ bool should_start = virtio_device_started(vdev, status);
+ DBG("vus_set_status\n");
+
+ if (vhusnd->vhost_dev->started == should_start) {
+ DBG("snd->vhost_dev->started == should_start\n");
+ return;
+ }
+
+ if (should_start) {
+ vus_start(vdev);
+ } else {
+ vus_stop(vdev);
+ }
+}
+
+static uint64_t vus_get_features(VirtIODevice *vdev, uint64_t features)
+{
+ VHostUserSound *s = vdev->vhusnd;
+
+ DBG("vus_get_features()\n");
+
+ return vhost_get_features(s->vhost_dev, user_feature_bits, features);
+}
+
+static void vus_snd_handle_output(VirtIODevice *vdev, VirtQueue *vq)
+{
+ /*
+ * Not normally called; it's the daemon that handles the queue;
+ * however virtio's cleanup path can call this.
+ */
+}
+
+/*
+ * TODO: Add it later
+ * static void vhost_sound_guest_notifier_mask(VirtIODevice *vdev, int idx,
+ * bool mask)
+ */
+
+/*
+ * TODO: Add it later
+ * static bool vhost_sound_guest_notifier_pending(VirtIODevice *vdev,
+ * int idx)
+ */
+
+static int vus_sound_config_change(struct vhost_dev *dev)
+{
+ VHostUserSound *vhusnd = dev->vdev->vhusnd;
+ DBG("vus_sound_config_change\n");
+
+ int ret = vhost_dev_get_config(dev, (uint8_t *)&vhusnd->config,
+ sizeof(struct virtio_snd_config));
+ if (ret < 0) {
+ DBG("vus_sound_config_change error\n");
+ return -1;
+ }
+
+ virtio_notify_config(dev->vdev);
+
+ return 0;
+}
+
+const VhostDevConfigOps snd_config_ops = {
+ .vhost_dev_config_notifier = vus_sound_config_change,
+};
+
+static void vhost_user_snd_init(VirtIODevice *vdev);
+
+void vus_device_realize()
+{
+ VirtIODevice *vdev = global_vdev;
+ int ret;
+
+ DBG("vus_device_realize\n");
+
+ /* This needs to be added */
+ proxy = (VirtIOMMIOProxy *)malloc(sizeof(VirtIOMMIOProxy));
+ *proxy = (VirtIOMMIOProxy) {
+ .legacy = 1,
+ };
+
+ /* VIRTIO_ID_SOUND is 25, check virtio_ids.h in linux*/
+ virtio_dev_init(vdev, "virtio-sound", 25, sizeof(vdev->vhusnd->config));
+ vhost_user_snd_init(global_vdev);
+
+ /* add queues */
+ vdev->vhusnd->ctrl_vq = virtio_add_queue(vdev, 64, vus_snd_handle_output);
+ vdev->vhusnd->event_vq = virtio_add_queue(vdev, 64, vus_snd_handle_output);
+ vdev->vhusnd->tx_vq = virtio_add_queue(vdev, 64, vus_snd_handle_output);
+ vdev->vhusnd->rx_vq = virtio_add_queue(vdev, 64, vus_snd_handle_output);
+ vdev->vhusnd->vhost_dev->nvqs = 4;
+ vdev->vhusnd->num_queues = 4;
+ vdev->vhusnd->queue_size = 64;
+
+ /* NOTE: global_vdev->vqs == vhublk->virtqs */
+ vdev->vqs = (VirtQueue **)malloc(sizeof(VirtQueue *)
+ * global_vdev->vhusnd->num_queues);
+ vdev->vqs[0] = vdev->vhusnd->ctrl_vq;
+ vdev->vqs[1] = vdev->vhusnd->event_vq;
+ vdev->vqs[2] = vdev->vhusnd->tx_vq;
+ vdev->vqs[3] = vdev->vhusnd->rx_vq;
+
+ vdev->vhusnd->vhost_vqs = (struct vhost_virtqueue *)malloc(
+ sizeof(struct vhost_virtqueue) *
+ vdev->vhusnd->num_queues);
+
+ /* Set up vhost device */
+ vdev->vhusnd->vhost_dev->num_queues = vdev->vhusnd->num_queues;
+ vdev->vhusnd->vhost_dev->nvqs = vdev->vhusnd->num_queues;
+ vdev->vhusnd->vhost_dev->vqs = vdev->vhusnd->vhost_vqs;
+ vdev->vhusnd->vhost_dev->vq_index = 0;
+ vdev->vhusnd->vhost_dev->backend_features = 0;
+
+ vhost_dev_set_config_notifier(vdev->vhusnd->vhost_dev, &snd_config_ops);
+
+ /* TODO: Add error handling */
+ vhost_dev_init(vdev->vhusnd->vhost_dev);
+
+ /* Pass the new obtained features */
+ global_vdev->host_features = vdev->vhusnd->vhost_dev->features;
+
+ ret = vhost_dev_get_config(vdev->vhusnd->vhost_dev,
+ (uint8_t *)&vdev->vhusnd->config,
+ sizeof(struct virtio_snd_config));
+ if (ret < 0) {
+ goto vhost_dev_init_failed;
+ }
+
+ vdev->vdev_class->print_config((uint8_t *)&vdev->vhusnd->config);
+
+ return;
+
+vhost_dev_init_failed:
+ DBG("vhost_dev_init_failed\n");
+ return;
+}
+
+static void vus_device_unrealize(VirtIODevice *vdev)
+{
+ DBG("vhost_user_blk_device_unrealize not yet implemented\n");
+}
+
+static struct vhost_dev *vus_get_vhost(VirtIODevice *vdev)
+{
+ VHostUserSound *vhusnd = vdev->vhusnd;
+ return vhusnd->vhost_dev;
+}
+
+static void print_config_snd(uint8_t *config_data)
+{
+ struct virtio_snd_config *config_strct =
+ (struct virtio_snd_config *)config_data;
+
+ DBG("print_config_snd:\n");
+
+ /* # of available physical jacks */
+ DBG("\tuint32_t jacks: %u\n", config_strct->jacks);
+ /* # of available PCM streams */
+ DBG("\tuint32_t streams: %u\n", config_strct->streams);
+ /* # of available channel maps */
+ DBG("\tuint32_t chmaps: %u\n", config_strct->chmaps);
+}
+
+static void virtio_dev_class_init(VirtIODevice *vdev)
+{
+ DBG("virtio_dev_class_init\n");
+
+ vdev->vdev_class = (VirtioDeviceClass *)malloc(sizeof(VirtioDeviceClass));
+ vdev->vdev_class->parent = vdev;
+ vdev->vdev_class->realize = vus_device_realize;
+ vdev->vdev_class->unrealize = vus_device_unrealize;
+ vdev->vdev_class->get_config = vus_get_config;
+ vdev->vdev_class->get_features = vus_get_features;
+ vdev->vdev_class->set_status = vus_set_status;
+ vdev->vdev_class->update_mem_table = update_mem_table;
+ vdev->vdev_class->print_config = print_config_snd;
+}
+
+static void vhost_user_snd_init(VirtIODevice *vdev)
+{
+
+ DBG("vhost_user_blk_init\n");
+
+ VHostUserSound *vhusnd = (VHostUserSound *)malloc(sizeof(VHostUserSound));
+ vdev->vhusnd = vhusnd;
+ vdev->nvqs = &vdev->vhdev->nvqs;
+ vhusnd->parent = vdev;
+ vhusnd->virtqs = vdev->vqs;
+ vhusnd->vhost_dev = vdev->vhdev;
+
+ virtio_dev_class_init(vdev);
+ virtio_loopback_bus_init(vdev->vbus);
+}
diff --git a/vhost_user_sound.h b/vhost_user_sound.h
new file mode 100644
index 0000000..c2dc73f
--- /dev/null
+++ b/vhost_user_sound.h
@@ -0,0 +1,40 @@
+/*
+ * Based on vhost-user-sound.h of QEMU project
+ *
+ * Copyright 2020 Red Hat, Inc.
+ *
+ * Copyright (c) 2023 Virtual Open Systems SAS.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ *
+ */
+
+#ifndef VHOST_USER_SOUND
+#define VHOST_USER_SOUND
+
+#include "virtio_loopback.h"
+#include "vhost_loopback.h"
+#include "vhost_user_loopback.h"
+#include <linux/virtio_snd.h>
+
+typedef struct VHostUserSound {
+ /*< private >*/
+ VirtIODevice *parent;
+ struct vhost_virtqueue *vhost_vqs;
+ VirtQueue **virtqs;
+ uint16_t num_queues;
+ uint32_t queue_size;
+ struct virtio_snd_config config;
+ struct vhost_dev *vhost_dev;
+ VirtQueue *ctrl_vq;
+ VirtQueue *event_vq;
+ VirtQueue *tx_vq;
+ VirtQueue *rx_vq;
+ /*< public >*/
+} VHostUserSound;
+
+void vus_device_realize(void);
+
+#endif /* VHOST_USER_BLK */
diff --git a/virtio_blk.h b/virtio_blk.h
new file mode 100644
index 0000000..75534ed
--- /dev/null
+++ b/virtio_blk.h
@@ -0,0 +1,95 @@
+/*
+ * Virtio Block Device
+ *
+ * Copyright IBM, Corp. 2007
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_VIRTIO_BLK_H
+#define QEMU_VIRTIO_BLK_H
+
+#include "standard-headers/linux/virtio_blk.h"
+#include "hw/virtio/virtio.h"
+#include "hw/block/block.h"
+#include "sysemu/iothread.h"
+#include "sysemu/block-backend.h"
+#include "qom/object.h"
+
+#define TYPE_VIRTIO_BLK "virtio-blk-device"
+#define VIRTIO_BLK_AUTO_NUM_QUEUES UINT16_MAX
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBlock, VIRTIO_BLK)
+
+/* This is the last element of the write scatter-gather list */
+struct virtio_blk_inhdr {
+ unsigned char status;
+};
+
+
+struct VirtIOBlkConf {
+ BlockConf conf;
+ IOThread *iothread;
+ char *serial;
+ uint32_t request_merging;
+ uint16_t num_queues;
+ uint16_t queue_size;
+ bool seg_max_adjust;
+ bool report_discard_granularity;
+ uint32_t max_discard_sectors;
+ uint32_t max_write_zeroes_sectors;
+ bool x_enable_wce_if_config_wce;
+};
+
+
+struct VirtIOBlockDataPlane;
+
+struct VirtIOBlockReq;
+struct VirtIOBlock {
+ VirtIODevice parent_obj;
+ BlockBackend *blk;
+ void *rq;
+ QEMUBH *bh;
+ VirtIOBlkConf conf;
+ unsigned short sector_mask;
+ bool original_wce;
+ VMChangeStateEntry *change;
+ bool dataplane_disabled;
+ bool dataplane_started;
+ struct VirtIOBlockDataPlane *dataplane;
+ uint64_t host_features;
+ size_t config_size;
+};
+
+typedef struct VirtIOBlockReq {
+ VirtQueueElement elem;
+ int64_t sector_num;
+ VirtIOBlock *dev;
+ VirtQueue *vq;
+ IOVDiscardUndo inhdr_undo;
+ IOVDiscardUndo outhdr_undo;
+ struct virtio_blk_inhdr *in;
+ struct virtio_blk_outhdr out;
+ QEMUIOVector qiov;
+ size_t in_len;
+ struct VirtIOBlockReq *next;
+ struct VirtIOBlockReq *mr_next;
+ BlockAcctCookie acct;
+} VirtIOBlockReq;
+
+#define VIRTIO_BLK_MAX_MERGE_REQS 32
+
+typedef struct MultiReqBuffer {
+ VirtIOBlockReq *reqs[VIRTIO_BLK_MAX_MERGE_REQS];
+ unsigned int num_reqs;
+ bool is_write;
+} MultiReqBuffer;
+
+bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
+void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh);
+
+#endif
diff --git a/virtio_input.c b/virtio_input.c
new file mode 100644
index 0000000..c0993ea
--- /dev/null
+++ b/virtio_input.c
@@ -0,0 +1,286 @@
+/*
+ * Based on virtio-input.h of QEMU project
+ *
+ * Copyright (c) 2022-2023 Virtual Open Systems SAS.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+#include <sys/param.h>
+
+/* Project header files */
+#include "vhost_user_input.h"
+
+#ifdef DEBUG
+#define DBG(...) printf("virtio-input: " __VA_ARGS__)
+#else
+#define DBG(...)
+#endif /* DEBUG */
+
+#define VIRTIO_INPUT_VM_VERSION 1
+
+/* ----------------------------------------------------------------- */
+
+void virtio_input_send(VirtIOInput *vinput, virtio_input_event *event)
+{
+ DBG("virtio_input_send() not yet implemeted\n");
+}
+
+static void virtio_input_handle_evt(VirtIODevice *vdev, VirtQueue *vq)
+{
+ DBG("virtio_input_handle_evt(...)\n");
+ /* nothing */
+}
+
+static void virtio_input_handle_sts(VirtIODevice *vdev, VirtQueue *vq)
+{
+ VirtIOInputClass *vic = vdev->vinput->input_class;
+ VirtIOInput *vinput = vdev->vinput;
+ virtio_input_event event;
+ VirtQueueElement *elem;
+ int len;
+
+ DBG("virtio_input_handle_sts(...)\n");
+
+ for (;;) {
+ elem = virtqueue_pop(vinput->sts, sizeof(VirtQueueElement));
+ if (!elem) {
+ break;
+ }
+
+ memset(&event, 0, sizeof(event));
+ /* FIXME: add iov_to_buf func */
+ len = 1;
+ /*
+ * TODO: Will be added in a next release
+ * len = iov_to_buf(elem->out_sg, elem->out_num,
+ * 0, &event, sizeof(event));
+ */
+ if (vic->handle_status) {
+ vic->handle_status(vinput, &event);
+ }
+ virtqueue_push(vinput->sts, elem, len);
+ munmap(elem, sizeof(VirtQueueElement));
+ }
+ virtio_notify(vdev, vinput->sts);
+}
+
+virtio_input_config *virtio_input_find_config(VirtIOInput *vinput,
+ uint8_t select,
+ uint8_t subsel)
+{
+ DBG("virtio_input_find_config(...)\n");
+ VirtIOInputConfig *cfg;
+
+ QTAILQ_FOREACH(cfg, &vinput->cfg_list, node) {
+ if (select == cfg->config.select &&
+ subsel == cfg->config.subsel) {
+ return &cfg->config;
+ }
+ }
+ return NULL;
+}
+
+void virtio_input_add_config(VirtIOInput *vinput,
+ virtio_input_config *config)
+{
+ DBG("virtio_input_add_config(...)\n");
+ VirtIOInputConfig *cfg;
+
+ if (virtio_input_find_config(vinput, config->select, config->subsel)) {
+ /* should not happen */
+ DBG("Error duplicate config: %d/%d\n", config->select, config->subsel);
+ exit(1);
+ }
+
+ cfg = (VirtIOInputConfig *)malloc(sizeof(VirtIOInputConfig));
+ cfg->config = *config;
+
+ QTAILQ_INSERT_TAIL(&vinput->cfg_list, cfg, node);
+}
+
+void virtio_input_init_config(VirtIOInput *vinput,
+ virtio_input_config *config)
+{
+ DBG("virtio_input_init_config(...)\n");
+ int i = 0;
+
+ QTAILQ_INIT(&vinput->cfg_list);
+ while (config[i].select) {
+ virtio_input_add_config(vinput, config + i);
+ i++;
+ }
+}
+
+void virtio_input_idstr_config(VirtIOInput *vinput,
+ uint8_t select, const char *string)
+{
+ DBG("virtio_input_idstr_config(...)\n");
+ virtio_input_config id;
+
+ if (!string) {
+ return;
+ }
+ memset(&id, 0, sizeof(id));
+ id.select = select;
+ id.size = snprintf(id.u.string, sizeof(id.u.string), "%s", string);
+ virtio_input_add_config(vinput, &id);
+}
+
+static void virtio_input_get_config(VirtIODevice *vdev, uint8_t *config_data)
+{
+ DBG("virtio_input_get_config(...)\n");
+ VirtIOInput *vinput = vdev->vinput;
+ virtio_input_config *config;
+
+ config = virtio_input_find_config(vinput, vinput->cfg_select,
+ vinput->cfg_subsel);
+ if (config) {
+ memcpy(config_data, config, vinput->cfg_size);
+ } else {
+ memset(config_data, 0, vinput->cfg_size);
+ }
+}
+
+static void virtio_input_set_config(VirtIODevice *vdev,
+ const uint8_t *config_data)
+{
+ VirtIOInput *vinput = vdev->vinput;
+ virtio_input_config *config = (virtio_input_config *)config_data;
+
+ DBG("virtio_input_set_config(...)\n");
+
+ vinput->cfg_select = config->select;
+ vinput->cfg_subsel = config->subsel;
+ virtio_notify_config(vdev);
+}
+
+static uint64_t virtio_input_get_features(VirtIODevice *vdev, uint64_t f)
+{
+ DBG("virtio_input_get_features(...)\n");
+ return f;
+}
+
+static void virtio_input_set_status(VirtIODevice *vdev, uint8_t val)
+{
+ VirtIOInputClass *vic = vdev->vinput->input_class;
+ VirtIOInput *vinput = vdev->vinput;
+ bool should_start = virtio_device_started(vdev, val);
+
+ DBG("virtio_input_set_status(...): %u\n", val);
+
+ if (should_start) {
+ if (!vinput->active) {
+ vinput->active = true;
+ if (vic->change_active) {
+ vic->change_active(vinput);
+ }
+ }
+ }
+}
+
+static void virtio_input_reset(VirtIODevice *vdev)
+{
+ VirtIOInputClass *vic = vdev->vinput->input_class;
+ VirtIOInput *vinput = vdev->vinput;
+
+ DBG("virtio_input_reset(...)\n");
+
+ if (vinput->active) {
+ vinput->active = false;
+ if (vic->change_active) {
+ vic->change_active(vinput);
+ }
+ }
+}
+
+static int virtio_input_post_load(void *opaque, int version_id)
+{
+ VirtIOInput *vinput = global_vdev->vinput;
+ VirtIOInputClass *vic = global_vdev->vinput->input_class;
+ VirtIODevice *vdev = global_vdev;
+
+ DBG("virtio_input_post_load(...)\n");
+
+ vinput->active = vdev->status & VIRTIO_CONFIG_S_DRIVER_OK;
+ if (vic->change_active) {
+ vic->change_active(vinput);
+ }
+ return 0;
+}
+
+void virtio_input_device_realize()
+{
+ VirtIODevice *vdev = global_vdev;
+ struct VirtIOInputClass *vic = vdev->vinput->input_class;
+ VirtIOInput *vinput = vdev->vinput;
+ VirtIOInputConfig *cfg;
+
+ DBG("virtio_input_device_realize(...)\n");
+
+ /* This needs to be added */
+ proxy = (VirtIOMMIOProxy *)malloc(sizeof(VirtIOMMIOProxy));
+ *proxy = (VirtIOMMIOProxy) {
+ .legacy = 1,
+ };
+
+ if (vic->realize) {
+ vic->realize(vdev);
+ }
+
+ virtio_input_idstr_config(vinput, VIRTIO_INPUT_CFG_ID_SERIAL,
+ vinput->serial);
+
+ QTAILQ_FOREACH(cfg, &vinput->cfg_list, node) {
+ if (vinput->cfg_size < cfg->config.size) {
+ vinput->cfg_size = cfg->config.size;
+ }
+ }
+ vinput->cfg_size += 8;
+
+ virtio_input_init_config(vinput, virtio_keyboard_config);
+
+ virtio_dev_init(vdev, "virtio-input", 18, vinput->cfg_size);
+ vinput->evt = virtio_add_queue(vdev, 64, virtio_input_handle_evt);
+ vinput->sts = virtio_add_queue(vdev, 64, virtio_input_handle_sts);
+
+ /* FIXME: do we need that? */
+ memcpy(global_vdev->vq, vinput->evt, sizeof(VirtQueue));
+ memcpy(&global_vdev->vq[1], vinput->sts, sizeof(VirtQueue));
+
+ DBG("global_vdev->guest_features: 0x%lx\n", global_vdev->guest_features);
+}
+
+static void virtio_input_finalize(VirtIODevice *vdev)
+{
+ DBG("virtio_input_finalize not yet implemented");
+}
+
+static void virtio_input_device_unrealize(VirtIODevice *vdev)
+{
+ DBG("virtio_input_device_unrealize not yet implemented");
+}
+
+
+void virtio_input_class_init(VirtIODevice *vdev)
+{
+ vdev->vdev_class = (VirtioDeviceClass *)malloc(sizeof(VirtioDeviceClass));
+ vdev->vdev_class->parent = vdev;
+
+ DBG("virtio_input_class_init(...)\n");
+
+ vdev->vdev_class->realize = virtio_input_device_realize;
+ vdev->vdev_class->get_config = virtio_input_get_config;
+ vdev->vdev_class->set_config = virtio_input_set_config;
+ vdev->vdev_class->get_features = virtio_input_get_features;
+ vdev->vdev_class->set_status = virtio_input_set_status;
+ vdev->vdev_class->reset = virtio_input_reset;
+}
diff --git a/virtio_loopback.c b/virtio_loopback.c
new file mode 100644
index 0000000..e95c648
--- /dev/null
+++ b/virtio_loopback.c
@@ -0,0 +1,2041 @@
+/*
+ *
+ * Based on:
+ *
+ * 1) virtio.c of QEMU project
+ *
+ * Copyright IBM, Corp. 2007
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ *
+ * 2) virtio-mmio.c of QEMU project
+ *
+ * Copyright (c) 2011 Linaro Limited
+ *
+ * Author:
+ * Peter Maydell <peter.maydell@linaro.org>
+ *
+ *
+ * Copyright 2022-2023 Virtual Open Systems SAS.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/eventfd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <sys/param.h>
+
+/* For socket */
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+
+/* Project header files */
+#include "virtio_loopback.h"
+#include "virtio_rng.h"
+
+#include <stddef.h>
+#include <pthread.h>
+#include <limits.h>
+
+#ifdef DEBUG
+#define DBG(...) printf("virtio-loopback: " __VA_ARGS__)
+#else
+#define DBG(...)
+#endif /* DEBUG */
+
+/* Global variables */
+
+int s; /* To be deleted */
+int efd; /* Eventfd file descriptor */
+int efd_notify; /* Eventfd file descriptor */
+fd_set rfds;
+int fd;
+int loopback_fd;
+
+virtio_device_info_struct_t device_info;
+virtio_neg_t *address;
+
+VirtIOMMIOProxy *proxy;
+
+int eventfd_count;
+pthread_mutex_t interrupt_lock;
+
+void virtio_add_feature(uint64_t *features, unsigned int fbit)
+{
+ *features |= (1ULL << fbit);
+}
+
+bool virtio_has_feature(uint64_t features, unsigned int fbit)
+{
+ return !!(features & (1ULL << fbit));
+}
+
+static int virtio_validate_features(VirtIODevice *vdev)
+{
+ if (virtio_has_feature(vdev->host_features, VIRTIO_F_IOMMU_PLATFORM) &&
+ !virtio_has_feature(vdev->guest_features, VIRTIO_F_IOMMU_PLATFORM)) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+bool virtio_device_should_start(VirtIODevice *vdev, uint8_t status)
+{
+ if (!vdev->vm_running) {
+ return false;
+ }
+
+ return virtio_device_started(vdev, status);
+}
+
+bool virtio_device_started(VirtIODevice *vdev, uint8_t status)
+{
+
+ DBG("virtio_device_started: %d\n", status & VIRTIO_CONFIG_S_DRIVER_OK);
+ DBG("status: %d\n", status);
+
+ return status & VIRTIO_CONFIG_S_DRIVER_OK;
+}
+
+
+void virtio_set_started(VirtIODevice *vdev, bool started)
+{
+ if (started) {
+ vdev->start_on_kick = false;
+ }
+
+ if (vdev->use_started) {
+ vdev->started = started;
+ }
+}
+
+int virtio_set_status(VirtIODevice *vdev, uint8_t val)
+{
+ VirtioDeviceClass *k = vdev->vdev_class;
+
+ DBG("virtio_set_status(...)\n");
+
+ if (virtio_has_feature(vdev->guest_features, VIRTIO_F_VERSION_1)) {
+ if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
+ val & VIRTIO_CONFIG_S_FEATURES_OK) {
+ int ret = virtio_validate_features(vdev);
+
+ if (ret) {
+ return ret;
+ }
+ }
+ }
+
+ if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) !=
+ (val & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK);
+ }
+
+ DBG("set vdev->status:%u\n", vdev->status);
+
+ if (k->set_status) {
+ DBG("k->set_status\n");
+ k->set_status(vdev, val);
+ }
+
+ vdev->status = val;
+
+ return 0;
+}
+
+uint64_t vring_align(uint64_t addr, unsigned long align)
+{
+ return QEMU_ALIGN_UP(addr, align);
+}
+
+uint64_t virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
+{
+ return sizeof(VRingDesc) * vdev->vq[n].vring.num;
+}
+
+uint64_t virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
+{
+ return vdev->vq[n].vring.desc;
+}
+
+uint64_t virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
+{
+ return vdev->vq[n].vring.avail;
+}
+
+uint64_t virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
+{
+ return vdev->vq[n].vring.used;
+}
+
+
+int virtio_queue_get_num(VirtIODevice *vdev, int n)
+{
+ return vdev->vq[n].vring.num;
+}
+
+
+uint64_t virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
+{
+ int s;
+
+ s = virtio_has_feature(vdev->guest_features,
+ VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+ return offsetof(VRingAvail, ring) +
+ sizeof(uint16_t) * vdev->vq[n].vring.num + s;
+}
+
+uint64_t virtio_queue_get_used_size(VirtIODevice *vdev, int n)
+{
+ int s;
+
+ s = virtio_has_feature(vdev->guest_features,
+ VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+ return offsetof(VRingUsed, ring) +
+ sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s;
+}
+
+/* virt queue functions */
+void virtio_queue_update_rings(VirtIODevice *vdev, int n)
+{
+ VRing *vring = &vdev->vq[n].vring;
+
+ if (!vring->num || !vring->desc || !vring->align) {
+ /* not yet setup -> nothing to do */
+ return;
+ }
+ vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
+ vring->used = vring_align(vring->avail +
+ offsetof(VRingAvail, ring[vring->num]),
+ vring->align);
+}
+
+static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev,
+ int n)
+{
+ return vdev->vq[n].last_avail_idx;
+}
+
+
+unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
+{
+ return virtio_queue_split_get_last_avail_idx(vdev, n);
+}
+
+void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
+{
+ /*
+ * Don't allow guest to flip queue between existent and
+ * nonexistent states, or to set it to an invalid size.
+ */
+ if (!!num != !!vdev->vq[n].vring.num ||
+ num > VIRTQUEUE_MAX_SIZE ||
+ num < 0) {
+ return;
+ }
+ vdev->vq[n].vring.num = num;
+}
+
+uint64_t virtio_queue_get_addr(VirtIODevice *vdev, int n)
+{
+ return vdev->vq[n].vring.desc;
+}
+
+
+void virtio_queue_set_addr(VirtIODevice *vdev, int n, uint64_t addr)
+{
+ if (!vdev->vq[n].vring.num) {
+ return;
+ }
+ vdev->vq[n].vring.desc = addr;
+ virtio_queue_update_rings(vdev, n);
+}
+
+int virtio_queue_ready(VirtQueue *vq)
+{
+ return vq->vring.avail != 0;
+}
+
+
+uint16_t vring_avail_idx(VirtQueue *vq)
+{
+ vq->shadow_avail_idx = ((VRingAvail *)vq->vring.avail)->idx;
+
+ return vq->shadow_avail_idx;
+}
+
+uint16_t vring_avail_ring(VirtQueue *vq, int i)
+{
+ return ((VRingAvail *)vq->vring.avail)->ring[i];
+}
+
+int virtio_queue_split_empty(VirtQueue *vq)
+{
+ bool empty;
+
+ if (!vq->vring.avail) {
+ return 1;
+ }
+
+ if (vq->shadow_avail_idx != vq->last_avail_idx) {
+ return 0;
+ }
+
+ empty = vring_avail_idx(vq) == vq->last_avail_idx;
+ return empty;
+}
+
+int virtio_queue_empty(VirtQueue *vq)
+{
+ return virtio_queue_split_empty(vq);
+}
+
+size_t iov_from_buf_full(const struct iovec *iov, unsigned int iov_cnt,
+ size_t offset, const void *buf, size_t bytes)
+{
+ size_t done;
+ unsigned int i;
+ for (i = 0, done = 0; (offset || done < bytes) && i < iov_cnt; i++) {
+ if (offset < iov[i].iov_len) {
+ size_t len = MIN(iov[i].iov_len - offset, bytes - done);
+ memcpy(iov[i].iov_base + offset, buf + done, len);
+ done += len;
+ offset = 0;
+ } else {
+ offset -= iov[i].iov_len;
+ }
+ }
+ return done;
+}
+
+
+size_t qemu_iov_from_buf(const struct iovec *iov, unsigned int iov_cnt,
+ size_t offset, const void *buf, size_t bytes)
+{
+ if (__builtin_constant_p(bytes) && iov_cnt &&
+ offset <= iov[0].iov_len && bytes <= iov[0].iov_len - offset) {
+ memcpy(iov[0].iov_base + offset, buf, bytes);
+ return bytes;
+ } else {
+ return iov_from_buf_full(iov, iov_cnt, offset, buf, bytes);
+ }
+}
+
+
+/* Called within rcu_read_lock(). */
+static inline uint16_t vring_avail_flags(VirtQueue *vq)
+{
+ return ((VRingAvail *)vq->vring.avail)->flags;
+}
+
+/* Called within rcu_read_lock(). */
+static inline uint16_t vring_get_used_event(VirtQueue *vq)
+{
+ return vring_avail_ring(vq, vq->vring.num);
+}
+
+/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
+/*
+ * Assuming a given event_idx value from the other side, if
+ * we have just incremented index from old to new_idx,
+ * should we trigger an event?
+ */
+static inline int vring_need_event(uint16_t event_idx,
+ uint16_t new_idx, uint16_t old)
+{
+ /*
+ * Note: Xen has similar logic for notification hold-off
+ * in include/xen/interface/io/ring.h with req_event and req_prod
+ * corresponding to event_idx + 1 and new_idx respectively.
+ * Note also that req_event and req_prod in Xen start at 1,
+ * event indexes in virtio start at 0.
+ */
+ return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
+}
+
+/* Called within rcu_read_lock(). */
+static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq)
+{
+ uint16_t old, new;
+ bool v;
+
+ /* Always notify when queue is empty (when feature acknowledge) */
+ if (virtio_has_feature(vdev->guest_features, VIRTIO_F_NOTIFY_ON_EMPTY) &&
+ !vq->inuse && virtio_queue_empty(vq)) {
+ return true;
+ }
+
+ if (!virtio_has_feature(vdev->guest_features, VIRTIO_RING_F_EVENT_IDX)) {
+ return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
+ }
+
+ v = vq->signalled_used_valid;
+ vq->signalled_used_valid = true;
+ old = vq->signalled_used;
+ new = vq->signalled_used = vq->used_idx;
+ return !v || vring_need_event(vring_get_used_event(vq), new, old);
+}
+
+/* Called within rcu_read_lock(). */
+static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
+{
+ return virtio_split_should_notify(vdev, vq);
+}
+
+
+void virtio_set_isr(VirtIODevice *vdev, int value)
+{
+ uint8_t old = vdev->isr;
+
+ /*
+ * Do not write ISR if it does not change, so that its cacheline remains
+ * shared in the common case where the guest does not read it.
+ */
+ if ((old & value) != value) {
+ vdev->isr |= value;
+ }
+
+ DBG("Update isr: %d\n", vdev->isr);
+}
+
+static void virtio_irq(VirtQueue *vq)
+{
+ virtio_set_isr(vq->vdev, 0x1);
+ virtio_notify_vector(vq->vdev);
+}
+
+void virtio_notify_config(VirtIODevice *vdev)
+{
+
+ DBG("virtio_notify_config\n");
+
+ if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ return;
+ }
+
+ virtio_set_isr(vdev, 0x3);
+ vdev->generation++;
+ /*
+ * MMIO does not use vector parameter:
+ * virtio_notify_vector(vdev, vdev->config_vector);
+ */
+ virtio_notify_vector(vdev);
+}
+
+void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
+{
+ if (!virtio_should_notify(vdev, vq)) {
+ DBG("Do not notify!\n");
+ return;
+ }
+ DBG("Go on and notify!\n");
+
+ virtio_irq(vq);
+}
+
+static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem, int i)
+{
+ VRingUsed *used = (VRingUsed *)vq->vring.used;
+
+ used->ring[i] = *uelem;
+}
+
+void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem,
+ unsigned int len, unsigned int idx)
+{
+ VRingUsedElem uelem;
+
+ if (!vq->vring.used) {
+ return;
+ }
+
+ idx = (idx + vq->used_idx) % vq->vring.num;
+
+ uelem.id = elem->index;
+ uelem.len = len;
+ vring_used_write(vq, &uelem, idx);
+}
+
+void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
+ unsigned int len, unsigned int idx)
+{
+ virtqueue_split_fill(vq, elem, len, idx);
+}
+
+static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
+{
+ ((VRingUsed *)vq->vring.used)->idx = val;
+ vq->used_idx = val;
+}
+
+static void virtqueue_split_flush(VirtQueue *vq, unsigned int count)
+{
+ uint16_t old, new;
+
+ if (!vq->vring.used) {
+ return;
+ }
+
+ old = vq->used_idx;
+ new = old + count;
+ vring_used_idx_set(vq, new);
+ vq->inuse -= count;
+ if ((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)) {
+ vq->signalled_used_valid = false;
+ }
+}
+
+void virtqueue_flush(VirtQueue *vq, unsigned int count)
+{
+ virtqueue_split_flush(vq, count);
+}
+
+void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
+ unsigned int len)
+{
+ virtqueue_fill(vq, elem, len, 0);
+ virtqueue_flush(vq, 1);
+}
+
+
+void vring_set_avail_event(VirtQueue *vq, uint16_t val)
+{
+ uint16_t *avail;
+
+ avail = (uint16_t *)&((VRingUsed *)vq->vring.used)->ring[vq->vring.num];
+ *avail = val;
+}
+
+static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
+ uint64_t *addr, struct iovec *iov,
+ unsigned int max_num_sg, bool is_write,
+ uint64_t pa, size_t sz)
+{
+ DBG("Not implemented\n");
+}
+
+static void *virtqueue_alloc_element(size_t sz, unsigned out_num,
+ unsigned in_num)
+{
+ VirtQueueElement *elem;
+ size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
+ size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
+ size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
+ size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
+ size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
+ size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
+
+ /*
+ * TODO: Add check for requested size
+ *
+ * assert(sz >= sizeof(VirtQueueElement));
+ */
+ elem = malloc(out_sg_end);
+ elem->out_num = out_num;
+ elem->in_num = in_num;
+ elem->in_addr = (void *)elem + in_addr_ofs;
+ elem->out_addr = (void *)elem + out_addr_ofs;
+ elem->in_sg = (void *)elem + in_sg_ofs;
+ elem->out_sg = (void *)elem + out_sg_ofs;
+ return elem;
+}
+
+void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
+{
+ unsigned int i, head, max;
+ int64_t len;
+ VirtIODevice *vdev = vq->vdev;
+ VirtQueueElement *elem = NULL;
+ unsigned out_num, in_num, elem_entries;
+ uint64_t addr[VIRTQUEUE_MAX_SIZE];
+ struct iovec iov[VIRTQUEUE_MAX_SIZE];
+ VRingDesc *desc;
+ int rc;
+
+ if (virtio_queue_split_empty(vq)) {
+ goto done;
+ }
+
+ /* When we start there are none of either input nor output. */
+ out_num = in_num = elem_entries = 0;
+
+ max = vq->vring.num;
+
+ if (vq->inuse >= vq->vring.num) {
+ DBG("Virtqueue size exceeded\n");
+ goto done;
+ }
+
+ if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
+ goto done;
+ }
+
+ if (virtio_has_feature(vdev->guest_features, VIRTIO_RING_F_EVENT_IDX)) {
+ vring_set_avail_event(vq, vq->last_avail_idx);
+ }
+
+ i = head;
+
+ desc = (VRingDesc *)vq->vring.desc + i;
+
+ /* Collect all the descriptors */
+ do {
+ bool map_ok;
+
+ if (desc->flags & VRING_DESC_F_WRITE) {
+ map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
+ iov + out_num,
+ VIRTQUEUE_MAX_SIZE - out_num, true,
+ desc->addr, desc->len);
+ } else {
+ if (in_num) {
+ DBG("Incorrect order for descriptors\n");
+ goto err_undo_map;
+ }
+ map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
+ VIRTQUEUE_MAX_SIZE, false,
+ desc->addr, desc->len);
+ }
+ if (!map_ok) {
+ goto err_undo_map;
+ }
+
+ /* If we've got too many, that implies a descriptor loop. */
+ if (++elem_entries > max) {
+ goto err_undo_map;
+ }
+
+ rc = virtqueue_split_read_next_desc(vdev, desc, max, &i);
+ } while (rc == VIRTQUEUE_READ_DESC_MORE);
+
+ if (rc == VIRTQUEUE_READ_DESC_ERROR) {
+ goto err_undo_map;
+ }
+
+ /* Now copy what we have collected and mapped */
+ elem = virtqueue_alloc_element(sz, out_num, in_num);
+ elem->index = head;
+ elem->ndescs = 1;
+ for (i = 0; i < out_num; i++) {
+ elem->out_addr[i] = addr[i];
+ elem->out_sg[i] = iov[i];
+ }
+ for (i = 0; i < in_num; i++) {
+ elem->in_addr[i] = addr[out_num + i];
+ elem->in_sg[i] = iov[out_num + i];
+ }
+
+ vq->inuse++;
+
+done:
+ return elem;
+
+err_undo_map:
+ goto done;
+}
+
+void *virtqueue_pop(VirtQueue *vq, size_t sz)
+{
+ return virtqueue_split_pop(vq, sz);
+}
+
+bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
+ unsigned int *head)
+{
+
+ /*
+ * Grab the next descriptor number they're advertising, and increment
+ * the index we've seen.
+ */
+ *head = vring_avail_ring(vq, idx % vq->vring.num);
+
+ /* If their number is silly, that's a fatal mistake. */
+ if (*head >= vq->vring.num) {
+ DBG("Guest says index %u is available", *head);
+ return false;
+ }
+
+ return true;
+}
+
+uint32_t get_vqs_max_size(VirtIODevice *vdev)
+{
+ uint32_t vq_max_size = VIRTQUEUE_MAX_SIZE;
+ uint32_t total_size, temp_size, total_p2 = 1;
+ int i, log_res = 0;
+
+ total_size = VIRTQUEUE_MAX_SIZE * sizeof(VRingDesc);
+ total_size += offsetof(VRingAvail, ring) +
+ VIRTQUEUE_MAX_SIZE * sizeof(uint16_t);
+ total_size += offsetof(VRingUsed, ring) +
+ VIRTQUEUE_MAX_SIZE * sizeof(uint16_t);
+
+ temp_size = total_size;
+
+ /* Compute log2 of total_size (Needs to be power of 2) */
+ while ((temp_size /= 2) > 0) {
+ log_res++;
+ total_p2 *= 2;
+ }
+
+ /* if total_size is not a power of 2: (total_size > 8) -> 16 */
+ if (total_size > total_p2) {
+ total_size = 2 * total_p2;
+ }
+
+ /*
+ * Align to page size: This needed only in case total_size
+ * is less than 4096 (PAGE_SIZE)
+ */
+ if (total_size % PAGE_SIZE > 0) {
+ total_size = (total_size / PAGE_SIZE) * PAGE_SIZE + PAGE_SIZE;
+ }
+
+ DBG("Total vqs size to mmap is: %u\n", total_size);
+
+ return total_size;
+}
+
+int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
+{
+ uint16_t num_heads = vring_avail_idx(vq) - idx;
+
+ /* Check it isn't doing very strange things with descriptor numbers. */
+ if (num_heads > vq->vring.num) {
+ DBG("Guest moved used index from %u to %u",
+ idx, vq->shadow_avail_idx);
+ return -EINVAL;
+ }
+
+ return num_heads;
+}
+
+int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
+ unsigned int max, unsigned int *next)
+{
+ /* If this descriptor says it doesn't chain, we're done. */
+ if (!(desc->flags & VRING_DESC_F_NEXT)) {
+ return VIRTQUEUE_READ_DESC_DONE;
+ }
+
+ /* Check they're not leading us off end of descriptors. */
+ *next = desc->next;
+
+ if (*next >= max) {
+ DBG("Desc next is %u", *next);
+ return VIRTQUEUE_READ_DESC_ERROR;
+ }
+
+ desc = (VRingDesc *)desc + *next;
+ return VIRTQUEUE_READ_DESC_MORE;
+}
+
+
+static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
+ unsigned int *in_bytes, unsigned int *out_bytes,
+ unsigned max_in_bytes, unsigned max_out_bytes)
+{
+ VirtIODevice *vdev = vq->vdev;
+ unsigned int max, idx;
+ unsigned int total_bufs, in_total, out_total;
+ int64_t len = 0;
+ int rc;
+
+ idx = vq->last_avail_idx;
+ total_bufs = in_total = out_total = 0;
+
+ max = vq->vring.num;
+
+ while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
+ unsigned int num_bufs;
+ VRingDesc *desc;
+ unsigned int i;
+
+ num_bufs = total_bufs;
+
+ if (!virtqueue_get_head(vq, idx++, &i)) {
+ goto err;
+ }
+
+ /* there is no need to copy anything form the cache struct */
+ desc = (VRingDesc *)vq->vring.desc + i;
+
+ if (desc->flags & VRING_DESC_F_INDIRECT) {
+ if (!desc->len || (desc->len % sizeof(VRingDesc))) {
+ DBG("Invalid size for indirect buffer table\n");
+ goto err;
+ }
+
+ /* If we've got too many, that implies a descriptor loop. */
+ if (num_bufs >= max) {
+ goto err;
+ }
+ }
+
+ do {
+ /* If we've got too many, that implies a descriptor loop. */
+ if (++num_bufs > max) {
+ goto err;
+ }
+
+ if (desc->flags & VRING_DESC_F_WRITE) {
+ in_total += desc->len;
+ } else {
+ out_total += desc->len;
+ }
+ if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
+ goto done;
+ }
+
+ rc = virtqueue_split_read_next_desc(vdev, desc, max, &i);
+ } while (rc == VIRTQUEUE_READ_DESC_MORE);
+
+ if (rc == VIRTQUEUE_READ_DESC_ERROR) {
+ goto err;
+ }
+
+ total_bufs = num_bufs;
+ }
+
+ if (rc < 0) {
+ goto err;
+ }
+
+done:
+ if (in_bytes) {
+ *in_bytes = in_total;
+ }
+ if (out_bytes) {
+ *out_bytes = out_total;
+ }
+ return;
+
+err:
+ in_total = out_total = 0;
+ goto done;
+}
+
+void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
+ unsigned int *out_bytes,
+ unsigned max_in_bytes, unsigned max_out_bytes)
+{
+ if (!vq->vring.desc) {
+ goto err;
+ }
+
+ virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes,
+ max_in_bytes, max_out_bytes);
+
+ return;
+err:
+ if (in_bytes) {
+ *in_bytes = 0;
+ }
+ if (out_bytes) {
+ *out_bytes = 0;
+ }
+}
+
+void print_neg_flag(uint64_t neg_flag, bool read)
+{
+ if (read) {
+ DBG("Read:\t");
+ } else {
+ DBG("Write:\t");
+ }
+
+ switch (neg_flag) {
+ case VIRTIO_MMIO_MAGIC_VALUE: /* 0x000 */
+ DBG("VIRTIO_MMIO_MAGIC_VALUE\n");
+ break;
+ case VIRTIO_MMIO_VERSION: /* 0x004 */
+ DBG("VIRTIO_MMIO_VERSION\n");
+ break;
+ case VIRTIO_MMIO_DEVICE_ID: /* 0x008 */
+ DBG("VIRTIO_MMIO_DEVICE_ID\n");
+ break;
+ case VIRTIO_MMIO_VENDOR_ID: /* 0x00c */
+ DBG("VIRTIO_MMIO_VENDOR_ID\n");
+ break;
+ case VIRTIO_MMIO_DEVICE_FEATURES: /* 0x010 */
+ DBG("VIRTIO_MMIO_DEVICE_FEATURES\n");
+ break;
+ case VIRTIO_MMIO_DEVICE_FEATURES_SEL: /* 0x014 */
+ DBG("VIRTIO_MMIO_DEVICE_FEATURES_SEL\n");
+ break;
+ case VIRTIO_MMIO_DRIVER_FEATURES: /* 0x020 */
+ DBG("VIRTIO_MMIO_DRIVER_FEATURES\n");
+ break;
+ case VIRTIO_MMIO_DRIVER_FEATURES_SEL: /* 0x024 */
+ DBG("VIRTIO_MMIO_DRIVER_FEATURES_SEL\n");
+ break;
+ case VIRTIO_MMIO_GUEST_PAGE_SIZE: /* 0x028 */
+ DBG("VIRTIO_MMIO_GUEST_PAGE_SIZE\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_SEL: /* 0x030 */
+ DBG("VIRTIO_MMIO_QUEUE_SEL\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_NUM_MAX: /* 0x034 */
+ DBG("VIRTIO_MMIO_QUEUE_NUM_MAX\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_NUM: /* 0x038 */
+ DBG("VIRTIO_MMIO_QUEUE_NUM\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_ALIGN: /* 0x03c */
+ DBG("VIRTIO_MMIO_QUEUE_ALIGN\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_PFN: /* 0x040 */
+ DBG("VIRTIO_MMIO_QUEUE_PFN\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_READY: /* 0x044 */
+ DBG("VIRTIO_MMIO_QUEUE_READY\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_NOTIFY: /* 0x050 */
+ DBG("VIRTIO_MMIO_QUEUE_NOTIFY\n");
+ break;
+ case VIRTIO_MMIO_INTERRUPT_STATUS: /* 0x060 */
+ DBG("VIRTIO_MMIO_INTERRUPT_STATUS\n");
+ break;
+ case VIRTIO_MMIO_INTERRUPT_ACK: /* 0x064 */
+ DBG("VIRTIO_MMIO_INTERRUPT_ACK\n");
+ break;
+ case VIRTIO_MMIO_STATUS: /* 0x070 */
+ DBG("VIRTIO_MMIO_STATUS\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_DESC_LOW: /* 0x080 */
+ DBG("VIRTIO_MMIO_QUEUE_DESC_LOW\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_DESC_HIGH: /* 0x084 */
+ DBG("VIRTIO_MMIO_QUEUE_DESC_HIGH\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_AVAIL_LOW: /* 0x090 */
+ DBG("VIRTIO_MMIO_QUEUE_AVAIL_LOW\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_AVAIL_HIGH: /* 0x094 */
+ DBG("VIRTIO_MMIO_QUEUE_AVAIL_HIGH\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_USED_LOW: /* 0x0a0 */
+ DBG("VIRTIO_MMIO_QUEUE_USED_LOW\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_USED_HIGH: /* 0x0a4 */
+ DBG("VIRTIO_MMIO_QUEUE_USED_HIGH\n");
+ break;
+ case VIRTIO_MMIO_SHM_SEL: /* 0x0ac */
+ DBG("VIRTIO_MMIO_SHM_SEL\n");
+ break;
+ case VIRTIO_MMIO_SHM_LEN_LOW: /* 0x0b0 */
+ DBG("VIRTIO_MMIO_SHM_LEN_LOW\n");
+ break;
+ case VIRTIO_MMIO_SHM_LEN_HIGH: /* 0x0b4 */
+ DBG("VIRTIO_MMIO_SHM_LEN_HIGH\n");
+ break;
+ case VIRTIO_MMIO_SHM_BASE_LOW: /* 0x0b8 */
+ DBG("VIRTIO_MMIO_SHM_BASE_LOW\n");
+ break;
+ case VIRTIO_MMIO_SHM_BASE_HIGH: /* 0x0bc */
+ DBG("VIRTIO_MMIO_SHM_BASE_HIGH\n");
+ break;
+ case VIRTIO_MMIO_CONFIG_GENERATION: /* 0x0fc */
+ DBG("VIRTIO_MMIO_CONFIG_GENERATION\n");
+ break;
+ default:
+ if (neg_flag >= VIRTIO_MMIO_CONFIG) {
+ DBG("\tVIRTIO_MMIO_CONFIG\n");
+ } else {
+ DBG("\tNegotiation flag Unknown: %ld\n", neg_flag);
+ }
+ return;
+ }
+}
+
+int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
+{
+ bool bad = (val & ~(vdev->host_features)) != 0;
+
+ val &= vdev->host_features;
+
+ vdev->guest_features |= val;
+ return bad ? -1 : 0;
+}
+
+int virtio_set_features(VirtIODevice *vdev, uint64_t val)
+{
+ int ret;
+ /*
+ * The driver must not attempt to set features after feature negotiation
+ * has finished.
+ */
+ if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
+ DBG("virtio_set_features: vdev->status "
+ "& VIRTIO_CONFIG_S_FEATURES_OK\n");
+ return -EINVAL;
+ }
+ ret = virtio_set_features_nocheck(vdev, val);
+ return ret;
+}
+
+
+/* TODO: MMIO notifiers -- This might not be needed anymore */
+static void virtio_queue_guest_notifier_read(EventNotifier *n)
+{
+ VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
+ if (event_notifier_test_and_clear(n)) {
+ virtio_irq(vq);
+ }
+}
+
+void *loopback_event_select(void *_e)
+{
+ int retval;
+ fd_set rfds;
+ int s;
+ EventNotifier *e = (EventNotifier *)_e;
+ int rfd = e->rfd;
+ VirtQueue *vq = container_of(e, VirtQueue, guest_notifier);
+
+ DBG("\nWaiting event from vhost-user-device\n");
+
+ FD_ZERO(&rfds);
+ FD_SET(rfd, &rfds);
+
+ while (1) {
+
+ retval = select(rfd + 1, &rfds, NULL, NULL, NULL);
+
+ if (retval == -1) {
+ DBG("select() error. Exiting...\n");
+ exit(1);
+ }
+ if (retval > 0) {
+
+ DBG("\n\nEvent has come from the vhost-user-device "
+ "(eventfd: %d) -> event_count: %d (select value: %d)\n\n",
+ rfd, eventfd_count, retval);
+
+ if (event_notifier_test_and_clear(e)) {
+ if (pthread_mutex_lock(&interrupt_lock) == 0) {
+ eventfd_count++;
+ virtio_irq(vq);
+ pthread_mutex_unlock(&interrupt_lock);
+ } else {
+ printf("[ERROR] Locking failed\n");
+ exit(1);
+ }
+ }
+ }
+ }
+}
+
+
+void event_notifier_set_handler(EventNotifier *e,
+ void *handler)
+{
+ int ret;
+ pthread_t thread_id;
+
+ if (e->wfd > 0) {
+ ret = pthread_create(&thread_id, NULL, loopback_event_select,
+ (void *)e);
+ if (ret != 0) {
+ exit(1);
+ }
+ }
+}
+
+
+void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
+ bool with_irqfd)
+{
+ if (assign && !with_irqfd) {
+ event_notifier_set_handler(&vq->guest_notifier,
+ virtio_queue_guest_notifier_read);
+ } else {
+ event_notifier_set_handler(&vq->guest_notifier, NULL);
+ }
+ if (!assign) {
+ /*
+ * Test and clear notifier before closing it,
+ * in case poll callback didn't have time to run.
+ */
+ virtio_queue_guest_notifier_read(&vq->guest_notifier);
+ }
+}
+
+EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
+{
+ return &vq->guest_notifier;
+}
+
+int virtio_loopback_set_guest_notifier(VirtIODevice *vdev, int n, bool assign,
+ bool with_irqfd)
+{
+ VirtioDeviceClass *vdc = vdev->vdev_class;
+ VirtQueue *vq = virtio_get_queue(vdev, n);
+ EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
+
+ if (assign) {
+ int r = event_notifier_init(notifier, 0);
+ if (r < 0) {
+ return r;
+ }
+ virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
+ } else {
+ virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
+ }
+
+ return 0;
+}
+
+int virtio_loopback_set_guest_notifiers(VirtIODevice *vdev, int nvqs,
+ bool assign)
+{
+ bool with_irqfd = false;
+ int r, n;
+
+ nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
+
+ for (n = 0; n < nvqs; n++) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ break;
+ }
+
+ r = virtio_loopback_set_guest_notifier(vdev, n, assign, with_irqfd);
+ if (r < 0) {
+ goto assign_error;
+ }
+ }
+
+ return 0;
+
+assign_error:
+ DBG("Error return virtio_loopback_set_guest_notifiers\n");
+ return r;
+}
+
+EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
+{
+ return &vq->host_notifier;
+}
+
+void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
+{
+ vq->host_notifier_enabled = enabled;
+}
+
+int virtio_bus_set_host_notifier(VirtioBus *vbus, int n, bool assign)
+{
+ VirtIODevice *vdev = vbus->vdev;
+ VirtQueue *vq = virtio_get_queue(vdev, n);
+
+ EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
+ int r = 0;
+
+
+ if (!vbus->ioeventfd_assign) {
+ return -ENOSYS;
+ }
+
+ if (assign) {
+ r = event_notifier_init(notifier, 1);
+ if (r < 0) {
+ DBG("unable to init event notifier: %d", r);
+ return r;
+ }
+ r = vbus->ioeventfd_assign(proxy, notifier, n, true);
+ if (r < 0) {
+ DBG("unable to assign ioeventfd: %d", r);
+ }
+ } else {
+ vbus->ioeventfd_assign(proxy, notifier, n, false);
+ }
+
+ if (r == 0) {
+ virtio_queue_set_host_notifier_enabled(vq, assign);
+ }
+
+ return r;
+}
+
+
+
+/* On success, ioeventfd ownership belongs to the caller. */
+int virtio_bus_grab_ioeventfd(VirtioBus *bus)
+{
+ /*
+ * vhost can be used even if ioeventfd=off in the proxy device,
+ * so do not check k->ioeventfd_enabled.
+ */
+ if (!bus->ioeventfd_assign) {
+ return -ENOSYS;
+ }
+
+ if (bus->ioeventfd_grabbed == 0 && bus->ioeventfd_started) {
+ /*
+ * Remember that we need to restart ioeventfd
+ * when ioeventfd_grabbed becomes zero.
+ */
+ bus->ioeventfd_started = true;
+ }
+ bus->ioeventfd_grabbed++;
+ return 0;
+}
+
+int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
+{
+ return virtio_bus_grab_ioeventfd(vdev->vbus);
+}
+
+bool virtio_device_disabled(VirtIODevice *vdev)
+{
+ return vdev->disabled || vdev->broken;
+}
+
+static int prev_level;
+static int int_count;
+
+void virtio_loopback_update_irq(VirtIODevice *vdev)
+{
+ int level, irq_num = 44;
+ pthread_t my_thread_id;
+
+ if (!vdev) {
+ return;
+ }
+
+ level = (vdev->isr != 0);
+
+ DBG("level: %d\n", level);
+ DBG("prev_level: %d\n", prev_level);
+
+ if (!((level == 1) && (prev_level == 0))) {
+ DBG("No interrupt\n");
+ prev_level = level;
+ return;
+ }
+ prev_level = level;
+
+ DBG("Trigger interrupt (ioctl)\n");
+ DBG("Interrupt counter: %d\n", int_count++);
+
+ (void) ioctl(fd, IRQ, &irq_num);
+}
+
+bool enable_virtio_interrupt;
+
+/* virtio device */
+void virtio_notify_vector(VirtIODevice *vdev)
+{
+
+ /* TODO: Check if this is still needed */
+ if (virtio_device_disabled(vdev)) {
+ DBG("Device is disabled\n");
+ return;
+ }
+
+ virtio_loopback_update_irq(vdev);
+
+ /*
+ * TODO: substitue the previous line with the
+ * following when it's implemented
+ *
+ * if (k->notify) {
+ * k->notify(qbus->parent, vector);
+ * }
+ */
+}
+
+void virtio_update_irq(VirtIODevice *vdev)
+{
+ virtio_notify_vector(vdev);
+}
+
+void virtio_queue_notify(VirtIODevice *vdev, int n)
+{
+ VirtQueue *vq = &vdev->vq[n];
+
+ DBG("virtio_queue_notify(..., vq_n: %d)\n", n);
+
+ if (!vq->vring.desc || vdev->broken) {
+ DBG("virtio_queue_notify: broken\n");
+ return;
+ }
+
+ if (vq->host_notifier_enabled) {
+ event_notifier_set(&vq->host_notifier);
+ } else if (vq->handle_output) {
+ DBG("vq->handle_output\n");
+ vq->handle_output(vdev, vq);
+
+ if (vdev->start_on_kick) {
+ virtio_set_started(vdev, true);
+ }
+ }
+}
+
+uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
+{
+ VirtioDeviceClass *k = vdev->vdev_class;
+ uint8_t val;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ DBG("virtio_config_readb failed\n");
+ return (uint32_t)-1;
+ }
+
+ k->get_config(vdev, vdev->config);
+
+ memcpy(&val, (uint8_t *)(vdev->config + addr), sizeof(uint8_t));
+
+ return val;
+}
+
+uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
+{
+ VirtioDeviceClass *k = vdev->vdev_class;
+ uint16_t val;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ DBG("virtio_config_readw failed\n");
+ return (uint32_t)-1;
+ }
+
+ k->get_config(vdev, vdev->config);
+
+ memcpy(&val, (uint16_t *)(vdev->config + addr), sizeof(uint16_t));
+ return val;
+}
+
+uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
+{
+ VirtioDeviceClass *k = vdev->vdev_class;
+ uint32_t val;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ DBG("virtio_config_readl failed\n");
+ return (uint32_t)-1;
+ }
+
+ k->get_config(vdev, vdev->config);
+
+ memcpy(&val, (uint32_t *)(vdev->config + addr), sizeof(uint32_t));
+ return val;
+}
+
+void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
+{
+ VirtioDeviceClass *k = vdev->vdev_class;
+ uint8_t val = data;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return;
+ }
+
+ memcpy((uint8_t *)(vdev->config + addr), &val, sizeof(uint8_t));
+
+ if (k->set_config) {
+ k->set_config(vdev, vdev->config);
+ }
+}
+
+void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
+{
+ VirtioDeviceClass *k = vdev->vdev_class;
+ uint16_t val = data;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return;
+ }
+
+ memcpy((uint16_t *)(vdev->config + addr), &val, sizeof(uint16_t));
+
+ if (k->set_config) {
+ k->set_config(vdev, vdev->config);
+ }
+}
+
+void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
+{
+ VirtioDeviceClass *k = vdev->vdev_class;
+ uint32_t val = data;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return;
+ }
+
+ memcpy((uint32_t *)(vdev->config + addr), &val, sizeof(uint32_t));
+
+ if (k->set_config) {
+ k->set_config(vdev, vdev->config);
+ }
+}
+
+
+
+static uint64_t virtio_loopback_read(VirtIODevice *vdev, uint64_t offset,
+ unsigned size)
+{
+
+ uint64_t ret;
+
+ if (!vdev) {
+ /*
+ * If no backend is present, we treat most registers as
+ * read-as-zero, except for the magic number, version and
+ * vendor ID. This is not strictly sanctioned by the virtio
+ * spec, but it allows us to provide transports with no backend
+ * plugged in which don't confuse Linux's virtio code: the
+ * probe won't complain about the bad magic number, but the
+ * device ID of zero means no backend will claim it.
+ */
+ switch (offset) {
+ case VIRTIO_MMIO_MAGIC_VALUE:
+ return VIRT_MAGIC;
+ case VIRTIO_MMIO_VERSION:
+ if (proxy->legacy) {
+ return VIRT_VERSION_LEGACY;
+ } else {
+ return VIRT_VERSION;
+ }
+ case VIRTIO_MMIO_VENDOR_ID:
+ return VIRT_VENDOR;
+ default:
+ return 0;
+ }
+ }
+
+ if (offset >= VIRTIO_MMIO_CONFIG) {
+ offset -= VIRTIO_MMIO_CONFIG;
+
+ if (proxy->legacy) {
+ switch (size) {
+ case 1:
+ ret = virtio_config_readb(vdev, offset);
+ break;
+ case 2:
+ ret = virtio_config_readw(vdev, offset);
+ break;
+ case 4:
+ ret = virtio_config_readl(vdev, offset);
+ break;
+ default:
+ abort();
+ }
+ DBG("ret: %lu\n", ret);
+ return ret;
+ }
+
+ return 4;
+ }
+
+ if (size != 4) {
+ DBG("wrong size access to register!\n");
+ return 0;
+ }
+
+ switch (offset) {
+ case VIRTIO_MMIO_MAGIC_VALUE:
+ return VIRT_MAGIC;
+ case VIRTIO_MMIO_VERSION:
+ DBG("VIRTIO_MMIO_VERSION ->\n");
+ if (proxy->legacy) {
+ DBG("VIRTIO_MMIO_VERSION -> legacy\n");
+ return VIRT_VERSION_LEGACY;
+ } else {
+ DBG("VIRTIO_MMIO_VERSION -> version\n");
+ return VIRT_VERSION;
+ }
+ case VIRTIO_MMIO_DEVICE_ID:
+ return vdev->device_id;
+ case VIRTIO_MMIO_VENDOR_ID:
+ DBG("READ\n");
+ return VIRT_VENDOR;
+ case VIRTIO_MMIO_DEVICE_FEATURES:
+ if (proxy->legacy) {
+ if (proxy->host_features_sel) {
+ return vdev->host_features >> 32;
+ } else {
+ return vdev->host_features & (uint64_t)(((1ULL << 32) - 1));
+ }
+ } else {
+ /* TODO: To be implemented */
+ }
+ case VIRTIO_MMIO_QUEUE_NUM_MAX:
+ /* TODO: To be implemented */
+ return VIRTQUEUE_MAX_SIZE;
+ case VIRTIO_MMIO_QUEUE_PFN:
+ if (!proxy->legacy) {
+ DBG("VIRTIO_MMIO_QUEUE_PFN: read from legacy register (0x%lx) "
+ "in non-legacy mode\n", offset);
+ return 0;
+ }
+ return virtio_queue_get_addr(vdev, vdev->queue_sel) >>
+ proxy->guest_page_shift;
+
+ case VIRTIO_MMIO_QUEUE_READY:
+ if (proxy->legacy) {
+ DBG("VIRTIO_MMIO_QUEUE_READY: read from legacy register (0x%lx) "
+ "in non-legacy mode\n", offset);
+ return 0;
+ }
+ /* TODO: To be implemented */
+ case VIRTIO_MMIO_INTERRUPT_STATUS:
+ return vdev->isr;
+ case VIRTIO_MMIO_STATUS:
+ DBG("Read VIRTIO_MMIO_STATUS: %d\n", vdev->status);
+ return vdev->status;
+ case VIRTIO_MMIO_CONFIG_GENERATION:
+ if (proxy->legacy) {
+ DBG("VIRTIO_MMIO_CONFIG_GENERATION: read from legacy "
+ "register (0x%lx) in non-legacy mode\n", offset);
+ return 0;
+ }
+ return vdev->generation;
+ case VIRTIO_MMIO_SHM_LEN_LOW:
+ case VIRTIO_MMIO_SHM_LEN_HIGH:
+ /*
+ * VIRTIO_MMIO_SHM_SEL is unimplemented
+ * according to the linux driver, if region length is -1
+ * the shared memory doesn't exist
+ */
+ return -1;
+ case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
+ case VIRTIO_MMIO_DRIVER_FEATURES:
+ case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
+ case VIRTIO_MMIO_GUEST_PAGE_SIZE:
+ case VIRTIO_MMIO_QUEUE_SEL:
+ case VIRTIO_MMIO_QUEUE_NUM:
+ case VIRTIO_MMIO_QUEUE_ALIGN:
+ case VIRTIO_MMIO_QUEUE_NOTIFY:
+ case VIRTIO_MMIO_INTERRUPT_ACK:
+ case VIRTIO_MMIO_QUEUE_DESC_LOW:
+ case VIRTIO_MMIO_QUEUE_DESC_HIGH:
+ case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
+ case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
+ case VIRTIO_MMIO_QUEUE_USED_LOW:
+ case VIRTIO_MMIO_QUEUE_USED_HIGH:
+ DBG("VIRTIO_MMIO_QUEUE_USED_HIGH: read of write-only "
+ "register (0x%lx)\n", offset);
+ return 0;
+ default:
+ DBG("read: bad register offset (0x%lx)\n", offset);
+ return 0;
+ }
+ return 0;
+}
+
+uint64_t vring_phys_addrs[10] = {0};
+uint32_t vring_phys_addrs_idx;
+static int notify_cnt;
+
+void virtio_loopback_write(VirtIODevice *vdev, uint64_t offset,
+ uint64_t value, unsigned size)
+{
+ if (!vdev) {
+ /*
+ * If no backend is present, we just make all registers
+ * write-ignored. This allows us to provide transports with
+ * no backend plugged in.
+ */
+ return;
+ }
+
+ if (offset >= VIRTIO_MMIO_CONFIG) {
+ offset -= VIRTIO_MMIO_CONFIG;
+
+ if (proxy->legacy) {
+ switch (size) {
+ case 1:
+ virtio_config_writeb(vdev, offset, value);
+ break;
+ case 2:
+ virtio_config_writew(vdev, offset, value);
+ break;
+ case 4:
+ virtio_config_writel(vdev, offset, value);
+ break;
+ default:
+ DBG("VIRTIO_MMIO_CONFIG abort\n");
+ abort();
+ }
+ return;
+ }
+
+ return;
+ }
+ if (size != 4) {
+ DBG("write: wrong size access to register!\n");
+ return;
+ }
+ switch (offset) {
+ case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
+ DBG("VIRTIO_MMIO_DEVICE_FEATURES_SEL: 0x%lx\n", value);
+ if (value) {
+ proxy->host_features_sel = 1;
+ } else {
+ proxy->host_features_sel = 0;
+ }
+ break;
+ case VIRTIO_MMIO_DRIVER_FEATURES:
+ if (proxy->legacy) {
+ if (proxy->guest_features_sel) {
+ DBG("attempt to write guest features with "
+ "guest_features_sel > 0 in legacy mode\n");
+ DBG("Set driver features: 0x%lx\n", value << 32);
+ virtio_set_features(vdev, value << 32);
+ } else {
+ DBG("Set driver features: 0x%lx\n", value);
+ virtio_set_features(vdev, value);
+ }
+ } else {
+ /* TODO: To be implemented */
+ }
+ break;
+ case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
+ if (value) {
+ proxy->guest_features_sel = 1;
+ } else {
+ proxy->guest_features_sel = 0;
+ }
+ break;
+ case VIRTIO_MMIO_GUEST_PAGE_SIZE:
+ if (!proxy->legacy) {
+ DBG("write to legacy register (0x%lx"
+ ") in non-legacy mode\n", offset);
+ return;
+ }
+ if (proxy->guest_page_shift > 31) {
+ proxy->guest_page_shift = 0;
+ }
+ break;
+ case VIRTIO_MMIO_QUEUE_SEL:
+ if (value < VIRTIO_QUEUE_MAX) {
+ vdev->queue_sel = value;
+ }
+ break;
+ case VIRTIO_MMIO_QUEUE_NUM:
+ DBG("VIRTIO_MMIO_QUEUE_NUM: %lu\n", value);
+
+ virtio_queue_set_num(vdev, vdev->queue_sel, value);
+
+ if (proxy->legacy) {
+ virtio_queue_update_rings(vdev, vdev->queue_sel);
+ } else {
+ /* TODO: To be implemented */
+ exit(1);
+ }
+ break;
+ case VIRTIO_MMIO_QUEUE_ALIGN:
+ if (!proxy->legacy) {
+ DBG("write to legacy register (0x%lx) in "
+ "non-legacy mode\n", offset);
+ return;
+ }
+ /* TODO: To be implemented */
+ break;
+ case VIRTIO_MMIO_QUEUE_PFN:
+ if (!proxy->legacy) {
+ DBG("write to legacy register (0x%lx) in "
+ "non-legacy mode\n", offset);
+ return;
+ }
+ if (value == 0) {
+ /* TODO: To be implemented */
+ } else {
+
+ DBG("desc_addr: 0x%lx\n", value);
+ vring_phys_addrs[vring_phys_addrs_idx++] = value;
+
+ uint64_t desc_addr;
+ uint32_t vqs_size = get_vqs_max_size(global_vdev);
+
+ ioctl(fd, SHARE_VQS, &vdev->queue_sel);
+
+ desc_addr = (uint64_t)mmap(NULL, vqs_size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+
+ virtio_queue_set_addr(vdev, vdev->queue_sel,
+ desc_addr);
+ }
+ break;
+ case VIRTIO_MMIO_QUEUE_READY:
+ if (proxy->legacy) {
+ DBG("write to non-legacy register (0x%lx) in "
+ "legacy mode\n", offset);
+ return;
+ }
+ /* TODO: To be implemented */
+ break;
+ case VIRTIO_MMIO_QUEUE_NOTIFY:
+ DBG("VIRTIO_MMIO_QUEUE_NOTIFY: vq_index -> %d, notify_cnt: %d\n",
+ value, notify_cnt++);
+ if (value < VIRTIO_QUEUE_MAX) {
+ virtio_queue_notify(vdev, value);
+ }
+ break;
+ case VIRTIO_MMIO_INTERRUPT_ACK:
+ vdev->isr = vdev->isr & ~value;
+ virtio_update_irq(vdev);
+ break;
+ case VIRTIO_MMIO_STATUS:
+
+ /*
+ * TODO: Add it in a future release later
+ *
+ * if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ * virtio_loopback_stop_ioeventfd(proxy);
+ * }
+ */
+
+ if (!proxy->legacy && (value & VIRTIO_CONFIG_S_FEATURES_OK)) {
+ virtio_set_features(vdev,
+ ((uint64_t)proxy->guest_features[1]) << 32 |
+ proxy->guest_features[0]);
+ }
+
+ virtio_set_status(vdev, value & 0xff);
+
+ DBG("STATUS -> %ld\n", value);
+
+ /*
+ * TODO: Check if this is still needed
+ *
+ * if (vdev->status == 0) {
+ * virtio_reset(vdev);
+ * virtio_loopback_soft_reset(proxy);
+ * }
+ */
+
+ break;
+ case VIRTIO_MMIO_QUEUE_DESC_LOW:
+ if (proxy->legacy) {
+ DBG("write to non-legacy register (0x%lx) in "
+ "legacy mode\n", offset);
+ return;
+ }
+ /* TODO: To be implemented */
+ break;
+ case VIRTIO_MMIO_QUEUE_DESC_HIGH:
+ if (proxy->legacy) {
+ DBG("write to non-legacy register (0x%lx) in "
+ "legacy mode\n", offset);
+ return;
+ }
+ /* TODO: To be implemented */
+ break;
+ case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
+ if (proxy->legacy) {
+ DBG("write to non-legacy register (0x%lx) in "
+ "legacy mode\n", offset);
+ return;
+ }
+ /* TODO: To be implemented */
+ break;
+ case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
+ if (proxy->legacy) {
+ DBG("write to non-legacy register (0x%lx) in "
+ "legacy mode\n", offset);
+ return;
+ }
+ /* TODO: To be implemented */
+ break;
+ case VIRTIO_MMIO_QUEUE_USED_LOW:
+ if (proxy->legacy) {
+ DBG("write to non-legacy register (0x%lx) in "
+ "legacy mode\n", offset);
+ return;
+ }
+ /* TODO: To be implemented */
+ break;
+ case VIRTIO_MMIO_QUEUE_USED_HIGH:
+ if (proxy->legacy) {
+ DBG("write to non-legacy register (0x%lx) in "
+ "legacy mode\n", offset);
+ return;
+ }
+ /* TODO: To be implemented */
+ break;
+ case VIRTIO_MMIO_MAGIC_VALUE:
+ case VIRTIO_MMIO_VERSION:
+ case VIRTIO_MMIO_DEVICE_ID:
+ case VIRTIO_MMIO_VENDOR_ID:
+ case VIRTIO_MMIO_DEVICE_FEATURES:
+ case VIRTIO_MMIO_QUEUE_NUM_MAX:
+ case VIRTIO_MMIO_INTERRUPT_STATUS:
+ case VIRTIO_MMIO_CONFIG_GENERATION:
+ /* TODO: To be implemented */
+ break;
+ default:
+ DBG("bad register offset (0x%lx)\n", offset);
+ }
+}
+
+VirtIODevice *global_vdev;
+VirtioBus *global_vbus;
+
+void adapter_read_write_cb(void)
+{
+ /*
+ * Enabling the next line, all the incoming
+ * read/write events will be printed:
+ *
+ * print_neg_flag (address->notification, address->read);
+ */
+ print_neg_flag(address->notification, address->read);
+
+ if (address->read) {
+ address->data = virtio_loopback_read(global_vdev,
+ address->notification, address->size);
+ } else {
+ virtio_loopback_write(global_vdev, address->notification,
+ address->data, address->size);
+ }
+
+ DBG("Return to the driver\n");
+
+ /*
+ * Note the driver that we have done
+ * All the required actions.
+ */
+ (void)ioctl(fd, WAKEUP);
+
+}
+
+void *driver_event_select(void *data)
+{
+ int retval;
+ uint64_t eftd_ctr;
+ int efd = *(int *)data;
+
+ DBG("\nWaiting for loopback read/write events\n");
+
+ FD_ZERO(&rfds);
+ FD_SET(efd, &rfds);
+
+ while (1) {
+
+ retval = select(efd + 1, &rfds, NULL, NULL, NULL);
+
+ if (retval == -1) {
+ DBG("\nselect() error. Exiting...");
+ exit(EXIT_FAILURE);
+ } else if (retval > 0) {
+
+ s = read(efd, &eftd_ctr, sizeof(uint64_t));
+ if (s != sizeof(uint64_t)) {
+ DBG("\neventfd read error. Exiting...");
+ exit(1);
+ } else {
+ adapter_read_write_cb();
+ }
+
+ } else if (retval == 0) {
+ DBG("\nselect() says that no data was available");
+ }
+ }
+}
+
+void create_rng_struct(void)
+{
+ device_info.magic = 0x74726976;
+ device_info.version = 0x1;
+ device_info.device_id = 0x4;
+ device_info.vendor = 0x554d4551;
+}
+
+VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
+{
+ return vdev->vq + n;
+}
+
+VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
+ VirtIOHandleOutput handle_output)
+{
+ int i;
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ if (vdev->vq[i].vring.num == 0) {
+ break;
+ }
+ }
+
+ if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE) {
+ DBG("Error: queue_size > VIRTQUEUE_MAX_SIZE\n");
+ exit(1);
+ }
+
+ vdev->vq[i].vring.num = queue_size;
+ vdev->vq[i].vring.num_default = queue_size;
+ vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
+ vdev->vq[i].handle_output = handle_output;
+ vdev->vq[i].used_elems = (VirtQueueElement *)malloc(sizeof(VirtQueueElement)
+ * queue_size);
+
+ return &vdev->vq[i];
+}
+
+void virtio_dev_init(VirtIODevice *vdev, const char *name,
+ uint16_t device_id, size_t config_size)
+{
+ int i;
+
+ DBG("virtio_dev_init\n");
+
+ /* Initialize global variables */
+ prev_level = 0;
+ int_count = 0;
+ eventfd_count = 0;
+ enable_virtio_interrupt = false;
+ vring_phys_addrs_idx = 0;
+ notify_cnt = 0;
+
+ /* Initialize interrupt mutex */
+ if (pthread_mutex_init(&interrupt_lock, NULL) != 0) {
+ printf("[ERROR] mutex init has failed\n");
+ exit(1);
+ }
+
+ vdev->start_on_kick = false;
+ vdev->started = false;
+ vdev->device_id = device_id;
+ vdev->status = 0;
+ vdev->queue_sel = 0;
+ vdev->config_vector = VIRTIO_NO_VECTOR;
+ /* TODO: check malloc return value */
+ vdev->vq = (VirtQueue *) malloc(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
+ vdev->vm_running = false;
+ vdev->broken = false;
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ vdev->vq[i].vector = VIRTIO_NO_VECTOR;
+ vdev->vq[i].vdev = vdev;
+ vdev->vq[i].queue_index = i;
+ vdev->vq[i].host_notifier_enabled = false;
+ }
+
+ vdev->name = name;
+ vdev->config_len = config_size;
+ if (vdev->config_len) {
+ vdev->config = (void *) malloc(config_size);
+ } else {
+ vdev->config = NULL;
+ }
+
+ vdev->use_guest_notifier_mask = true;
+ DBG("virtio_dev_init return\n");
+}
+
+static bool virtio_loopback_ioeventfd_enabled(VirtIODevice *d)
+{
+ return (proxy->flags & VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD) != 0;
+}
+
+/* TODO: This function might not be needed anymore */
+static int virtio_loopback_ioeventfd_assign(VirtIOMMIOProxy *d,
+ EventNotifier *notifier,
+ int n, bool assign)
+{
+ return 0;
+}
+
+bool virtio_bus_device_iommu_enabled(VirtIODevice *vdev)
+{
+ VirtioBus *k = vdev->vbus;
+
+ if (!k->iommu_enabled) {
+ return false;
+ }
+
+ return k->iommu_enabled(vdev);
+}
+
+void virtio_loopback_bus_init(VirtioBus *k)
+{
+ DBG("virtio_loopback_bus_init(...)\n");
+ k->set_guest_notifiers = virtio_loopback_set_guest_notifiers;
+ k->ioeventfd_enabled = virtio_loopback_ioeventfd_enabled;
+ k->ioeventfd_assign = virtio_loopback_ioeventfd_assign;
+ DBG("virtio_loopback_bus_init(...) return\n");
+}
+
+
+int virtio_loopback_start(void)
+{
+ efd_data_t info;
+ pthread_t thread_id;
+ int ret = -1;
+ int flags;
+
+ fd = open("/dev/loopback", O_RDWR);
+ if (fd < 0) {
+ perror("Open call failed");
+ return -1;
+ }
+ loopback_fd = fd;
+
+ /* Create eventfd */
+ efd = eventfd(0, 0);
+ if (efd == -1) {
+ DBG("\nUnable to create eventfd! Exiting...\n");
+ exit(EXIT_FAILURE);
+ }
+
+ info.pid = getpid();
+ info.efd[0] = efd;
+
+ /*
+ * Send the appropriate information to the driver
+ * so to be able to trigger an eventfd
+ */
+ (void)ioctl(fd, EFD_INIT, &info);
+
+ /* Map communication mechanism */
+ (void)ioctl(fd, SHARE_COM_STRUCT);
+ address = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if (address == MAP_FAILED) {
+ perror("mmap operation failed");
+ return -1;
+ }
+
+ /* Wait the eventfd */
+ ret = pthread_create(&thread_id, NULL, driver_event_select, (void *)&efd);
+ if (ret != 0) {
+ exit(1);
+ }
+
+ /* Start loopback transport */
+ (void)ioctl(fd, START_LOOPBACK, &device_info);
+
+ ret = pthread_join(thread_id, NULL);
+ if (ret != 0) {
+ exit(1);
+ }
+
+ DBG("\nClosing eventfd. Exiting...\n");
+ close(efd);
+
+ exit(EXIT_SUCCESS);
+}
diff --git a/virtio_loopback.h b/virtio_loopback.h
new file mode 100644
index 0000000..34bae2f
--- /dev/null
+++ b/virtio_loopback.h
@@ -0,0 +1,709 @@
+/*
+ * Based on:
+ * 1) virtio.h of QEMU project
+ *
+ * Copyright IBM, Corp. 2007
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * 2) virtio-mmio.h of QEMU project
+ *
+ * Copyright (c) 2011 Linaro Limited
+ *
+ * Author:
+ * Peter Maydell <peter.maydell@linaro.org>
+ *
+ * 3) vhost.h of QEMU project
+ *
+ * Copyright 2022-2023 Virtual Open Systems SAS.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+/*
+ * Control registers
+ */
+#ifndef VIRTIO_LOOPBACK
+#define VIRTIO_LOOPBACK
+
+#include "event_notifier.h"
+
+#define sizeof_field(type, field) sizeof(((type *)0)->field)
+
+/* Magic value ("virt" string) - Read Only */
+#define VIRTIO_MMIO_MAGIC_VALUE 0x000
+
+/* Virtio device version - Read Only */
+#define VIRTIO_MMIO_VERSION 0x004
+
+/* Virtio device ID - Read Only */
+#define VIRTIO_MMIO_DEVICE_ID 0x008
+
+/* Virtio vendor ID - Read Only */
+#define VIRTIO_MMIO_VENDOR_ID 0x00c
+
+/*
+ * Bitmask of the features supported by the device (host)
+ * (32 bits per set) - Read Only
+ */
+#define VIRTIO_MMIO_DEVICE_FEATURES 0x010
+
+/* Device (host) features set selector - Write Only */
+#define VIRTIO_MMIO_DEVICE_FEATURES_SEL 0x014
+
+/*
+ * Bitmask of features activated by the driver (guest)
+ * (32 bits per set) - Write Only
+ */
+#define VIRTIO_MMIO_DRIVER_FEATURES 0x020
+
+/* Activated features set selector - Write Only */
+#define VIRTIO_MMIO_DRIVER_FEATURES_SEL 0x024
+
+/* Guest's memory page size in bytes - Write Only */
+#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028
+
+/* Queue selector - Write Only */
+#define VIRTIO_MMIO_QUEUE_SEL 0x030
+
+/* Maximum size of the currently selected queue - Read Only */
+#define VIRTIO_MMIO_QUEUE_NUM_MAX 0x034
+
+/* Queue size for the currently selected queue - Write Only */
+#define VIRTIO_MMIO_QUEUE_NUM 0x038
+
+
+/* Used Ring alignment for the currently selected queue - Write Only */
+#define VIRTIO_MMIO_QUEUE_ALIGN 0x03c
+
+/* Guest's PFN for the currently selected queue - Read Write */
+#define VIRTIO_MMIO_QUEUE_PFN 0x040
+
+/* Ready bit for the currently selected queue - Read Write */
+#define VIRTIO_MMIO_QUEUE_READY 0x044
+
+/* Queue notifier - Write Only */
+#define VIRTIO_MMIO_QUEUE_NOTIFY 0x050
+
+/* Interrupt status - Read Only */
+#define VIRTIO_MMIO_INTERRUPT_STATUS 0x060
+
+/* Interrupt acknowledge - Write Only */
+#define VIRTIO_MMIO_INTERRUPT_ACK 0x064
+
+/* Device status register - Read Write */
+#define VIRTIO_MMIO_STATUS 0x070
+
+/* Selected queue's Descriptor Table address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_DESC_LOW 0x080
+#define VIRTIO_MMIO_QUEUE_DESC_HIGH 0x084
+
+/* Selected queue's Available Ring address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_AVAIL_LOW 0x090
+#define VIRTIO_MMIO_QUEUE_AVAIL_HIGH 0x094
+
+/* Selected queue's Used Ring address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0
+#define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4
+
+/* Shared memory region id */
+#define VIRTIO_MMIO_SHM_SEL 0x0ac
+
+/* Shared memory region length, 64 bits in two halves */
+#define VIRTIO_MMIO_SHM_LEN_LOW 0x0b0
+#define VIRTIO_MMIO_SHM_LEN_HIGH 0x0b4
+
+/* Shared memory region base address, 64 bits in two halves */
+#define VIRTIO_MMIO_SHM_BASE_LOW 0x0b8
+#define VIRTIO_MMIO_SHM_BASE_HIGH 0x0bc
+
+/* Configuration atomicity value */
+#define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc
+
+/*
+ * The config space is defined by each driver as
+ * the per-driver configuration space - Read Write
+ */
+#define VIRTIO_MMIO_CONFIG 0x100
+
+/*
+ * Interrupt flags (re: interrupt status & acknowledge registers)
+ */
+#define VIRTIO_MMIO_INT_VRING (1 << 0)
+#define VIRTIO_MMIO_INT_CONFIG (1 << 1)
+
+#define VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD_BIT 1
+#define VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD \
+ (1 << VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD_BIT)
+
+
+/* Virtio loopback driver related */
+
+/* QEMU defines */
+#define VIRT_MAGIC 0x74726976 /* 'virt' */
+#define VIRT_VERSION 2
+#define VIRT_VERSION_LEGACY 1
+#define VIRT_VENDOR 0x554D4551 /* 'QEMU' */
+
+#define VIRTQUEUE_MAX_SIZE 64
+#define VIRTIO_QUEUE_MAX VIRTQUEUE_MAX_SIZE
+
+#define VIRTIO_NO_VECTOR 0xffff
+#define TYPE_VIRTIO_DEVICE "virtio-device"
+
+/* Loopback negotiation code */
+
+#define PAGE_SHIFT 12
+#define PAGE_SIZE 4096
+
+#define EFD_INIT _IOC(_IOC_WRITE, 'k', 1, sizeof(efd_data_t))
+#define WAKEUP _IOC(_IOC_WRITE, 'k', 2, 0)
+#define START_LOOPBACK _IOC(_IOC_WRITE, 'k', 3, \
+ sizeof(virtio_device_info_struct_t))
+#define IRQ _IOC(_IOC_WRITE, 'k', 4, sizeof(int))
+#define SHARE_VQS _IOC(_IOC_WRITE, 'k', 5, sizeof(uint32_t))
+#define SHARE_BUF _IOC(_IOC_WRITE, 'k', 6, sizeof(uint64_t))
+#define SHARE_COM_STRUCT _IOC(_IOC_WRITE, 'k', 7, 0)
+
+#define VIRTIO_PCI_VRING_ALIGN 4096
+
+typedef struct VirtIOMMIOProxy {
+ /* Generic */
+ bool legacy;
+ uint32_t flags;
+ /* Guest accessible state needing migration and reset */
+ uint32_t host_features_sel;
+ uint32_t guest_features_sel;
+ uint32_t guest_page_shift;
+ /* virtio-bus */
+ bool format_transport_address;
+ /* Fields only used for non-legacy (v2) devices */
+ uint32_t guest_features[2];
+} VirtIOMMIOProxy;
+
+
+/* Vring specific */
+/* This marks a buffer as continuing via the next field. */
+#define VRING_DESC_F_NEXT 1
+/* This marks a buffer as write-only (otherwise read-only). */
+#define VRING_DESC_F_WRITE 2
+/* This means the buffer contains a list of buffer descriptors. */
+#define VRING_DESC_F_INDIRECT 4
+/* Reset vrings value */
+#define VIRTIO_F_RING_RESET 40
+
+
+/*
+ * Mark a descriptor as available or used in packed ring.
+ * Notice: they are defined as shifts instead of shifted values.
+ */
+#define VRING_PACKED_DESC_F_AVAIL 7
+#define VRING_PACKED_DESC_F_USED 15
+
+/*
+ * The Host uses this in used->flags to advise the Guest: don't kick me when
+ * you add a buffer. It's unreliable, so it's simply an optimization. Guest
+ * will still kick if it's out of buffers.
+ */
+#define VRING_USED_F_NO_NOTIFY 1
+/*
+ * The Guest uses this in avail->flags to advise the Host: don't interrupt me
+ * when you consume a buffer. It's unreliable, so it's simply an
+ * optimization.
+ */
+#define VRING_AVAIL_F_NO_INTERRUPT 1
+
+/* Enable events in packed ring. */
+#define VRING_PACKED_EVENT_FLAG_ENABLE 0x0
+/* Disable events in packed ring. */
+#define VRING_PACKED_EVENT_FLAG_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor in packed ring.
+ * (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
+ * Only valid if VIRTIO_RING_F_EVENT_IDX has been negotiated.
+ */
+#define VRING_PACKED_EVENT_FLAG_DESC 0x2
+
+/*
+ * Wrap counter bit shift in event suppression structure
+ * of packed ring.
+ */
+#define VRING_PACKED_EVENT_F_WRAP_CTR 15
+
+/* We support indirect buffer descriptors */
+#define VIRTIO_RING_F_INDIRECT_DESC 28
+
+/*
+ * The Guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field.
+ */
+/*
+ * The Host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field.
+ */
+#define VIRTIO_RING_F_EVENT_IDX 29
+
+/*
+ * Alignment requirements for vring elements.
+ * When using pre-virtio 1.0 layout, these fall out naturally.
+ */
+#define VRING_AVAIL_ALIGN_SIZE 2
+#define VRING_USED_ALIGN_SIZE 4
+#define VRING_DESC_ALIGN_SIZE 16
+/******************/
+
+
+#define container_of(ptr, type, member) ({ \
+ const typeof(((type *) 0)->member) *__mptr = (ptr); \
+ (type *) ((char *) __mptr - offsetof(type, member));})
+
+extern uint64_t vring_phys_addrs[10];
+extern uint32_t vring_phys_addrs_idx;
+
+typedef struct VRing {
+ unsigned int num;
+ unsigned int num_default;
+ unsigned int align;
+ uint64_t desc;
+ uint64_t avail;
+ uint64_t used;
+} VRing;
+
+typedef struct VRingDesc {
+ uint64_t addr;
+ uint32_t len;
+ uint16_t flags;
+ uint16_t next;
+} VRingDesc;
+
+typedef struct VRingPackedDesc {
+ uint64_t addr;
+ uint32_t len;
+ uint16_t id;
+ uint16_t flags;
+} VRingPackedDesc;
+
+typedef struct VRingAvail {
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[];
+} VRingAvail;
+
+typedef struct VRingUsedElem {
+ uint32_t id;
+ uint32_t len;
+} VRingUsedElem;
+
+typedef struct VRingUsed {
+ uint16_t flags;
+ uint16_t idx;
+ VRingUsedElem ring[];
+} VRingUsed;
+
+typedef struct VirtQueueElement {
+ unsigned int index;
+ unsigned int len;
+ unsigned int ndescs;
+ unsigned int out_num;
+ unsigned int in_num;
+ uint64_t *in_addr;
+ uint64_t *out_addr;
+ struct iovec *in_sg;
+ struct iovec *out_sg;
+} VirtQueueElement;
+
+typedef struct VirtIODevice VirtIODevice;
+typedef struct VirtQueue VirtQueue;
+typedef void (*VirtIOHandleOutput)(VirtIODevice *, VirtQueue *);
+
+typedef struct VirtQueue {
+ VRing vring;
+ VirtQueueElement *used_elems;
+
+ /* Next head to pop */
+ uint16_t last_avail_idx;
+ bool last_avail_wrap_counter;
+
+ /* Last avail_idx read from VQ. */
+ uint16_t shadow_avail_idx;
+ bool shadow_avail_wrap_counter;
+
+ uint16_t used_idx;
+ bool used_wrap_counter;
+
+ /* Last used index value we have signalled on */
+ uint16_t signalled_used;
+
+ /* Last used index value we have signalled on */
+ bool signalled_used_valid;
+
+ /* Notification enabled? */
+ bool notification;
+
+ uint16_t queue_index;
+
+ unsigned int inuse;
+
+ uint16_t vector;
+ VirtIOHandleOutput handle_output;
+ VirtIODevice *vdev;
+
+ EventNotifier guest_notifier;
+ EventNotifier host_notifier;
+ bool host_notifier_enabled;
+} VirtQueue;
+
+typedef struct VirtIORNG VirtIORNG;
+typedef struct VirtIOInput VirtIOInput;
+typedef struct VHostUserRNG VHostUserRNG;
+typedef struct VirtioDeviceClass VirtioDeviceClass;
+typedef struct VHostUserBlk VHostUserBlk;
+typedef struct VhostUserInput VhostUserInput;
+typedef struct VHostUserGPIO VHostUserGPIO;
+typedef struct VHostUserSound VHostUserSound;
+typedef struct VirtioBus VirtioBus;
+
+typedef struct VirtIODevice {
+ VirtioBus *vbus;
+ VirtioDeviceClass *vdev_class;
+ struct vhost_dev *vhdev;
+ const char *name;
+ uint8_t status;
+ uint8_t isr;
+ uint16_t queue_sel;
+ uint64_t guest_features;
+ uint64_t host_features;
+ uint64_t backend_features;
+ size_t config_len;
+ void *config;
+ uint16_t config_vector;
+ uint32_t generation;
+ int nvectors;
+ VirtQueue *vq;
+ VirtQueue **vqs;
+ int *nvqs;
+ uint16_t device_id;
+ bool vm_running;
+ bool broken; /* device in invalid state, needs reset */
+ bool use_disabled_flag; /* allow use of 'disable' flag when needed */
+ bool disabled; /* device in temporarily disabled state */
+ bool use_started;
+ bool started;
+ bool start_on_kick; /* when virtio 1.0 feature has not been negotiated */
+ bool disable_legacy_check;
+ char *bus_name;
+ uint8_t device_endian;
+ bool use_guest_notifier_mask;
+ /* TODO: Switch to union? */
+ VirtIORNG *vrng;
+ VirtIOInput *vinput;
+ VHostUserRNG *vhrng;
+ VHostUserBlk *vhublk;
+ VhostUserInput *vhuinput;
+ VHostUserSound *vhusnd;
+ VHostUserGPIO *vhugpio;
+} VirtIODevice;
+
+typedef struct efd_data {
+ int efd[2];
+ int pid;
+} efd_data_t;
+
+typedef struct virtio_device_info_struct {
+ unsigned long magic;
+ unsigned long version;
+ unsigned long device_id;
+ unsigned long vendor;
+
+} virtio_device_info_struct_t;
+
+
+/* Negotiation structs */
+
+typedef struct { int counter; } atomic_t;
+
+typedef struct virtio_neg {
+ uint64_t notification;
+ uint64_t data;
+ uint64_t size;
+ bool read;
+ atomic_t done;
+} virtio_neg_t;
+
+
+/* This is left here as a reference, might be useful in the future */
+/*
+ * static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
+ * {
+ * BusClass *bus_class = BUS_CLASS(klass);
+ * VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
+ *
+ * k->notify = virtio_mmio_update_irq;
+ * k->save_config = virtio_mmio_save_config;
+ * k->load_config = virtio_mmio_load_config;
+ * k->save_extra_state = virtio_mmio_save_extra_state;
+ * k->load_extra_state = virtio_mmio_load_extra_state;
+ * k->has_extra_state = virtio_mmio_has_extra_state;
+ * k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
+ * k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled;
+ * k->ioeventfd_assign = virtio_mmio_ioeventfd_assign;
+ * k->pre_plugged = virtio_mmio_pre_plugged;
+ * k->vmstate_change = virtio_mmio_vmstate_change;
+ * k->has_variable_vring_alignment = true;
+ * bus_class->max_dev = 1;
+ * bus_class->get_dev_path = virtio_mmio_bus_get_dev_path;
+ * }
+ *
+ */
+
+
+typedef struct VirtioBus {
+
+ VirtIODevice *vdev;
+ void (*notify)(VirtIODevice *d, uint16_t vector);
+ bool (*has_extra_state)(VirtIODevice *d);
+ bool (*query_guest_notifiers)(VirtIODevice *d);
+ int (*set_guest_notifiers)(VirtIODevice *d, int nvqs, bool assign);
+ void (*vmstate_change)(VirtIODevice *d, bool running);
+ void (*pre_plugged)(VirtIODevice *d);
+ void (*device_plugged)(VirtIODevice *d);
+ /*
+ * transport independent exit function.
+ * This is called by virtio-bus just before the device is unplugged.
+ */
+ void (*device_unplugged)(VirtIODevice *d);
+ int (*query_nvectors)(VirtIODevice *d);
+ /*
+ * ioeventfd handling: if the transport implements ioeventfd_assign,
+ * it must implement ioeventfd_enabled as well.
+ */
+ /* Returns true if the ioeventfd is enabled for the device. */
+ bool (*ioeventfd_enabled)(VirtIODevice *d);
+ /*
+ * Assigns/deassigns the ioeventfd backing for the transport on
+ * the device for queue number n. Returns an error value on
+ * failure.
+ */
+ int (*ioeventfd_assign)(VirtIOMMIOProxy *d, EventNotifier *notifier,
+ int n, bool assign);
+ /*
+ * Whether queue number n is enabled.
+ */
+ bool (*queue_enabled)(VirtIODevice *d, int n);
+ /*
+ * Does the transport have variable vring alignment?
+ * (ie can it ever call virtio_queue_set_align()?)
+ * Note that changing this will break migration for this transport.
+ */
+ bool has_variable_vring_alignment;
+ bool (*iommu_enabled)(VirtIODevice *d);
+
+ /*
+ * Set if ioeventfd has been started.
+ */
+ bool ioeventfd_started;
+
+ /*
+ * Set if ioeventfd has been grabbed by vhost. When ioeventfd
+ * is grabbed by vhost, we track its started/stopped state (which
+ * depends in turn on the virtio status register), but do not
+ * register a handler for the ioeventfd. When ioeventfd is
+ * released, if ioeventfd_started is true we finally register
+ * the handler so that QEMU's device model can use ioeventfd.
+ */
+ int ioeventfd_grabbed;
+} VirtioBus;
+
+
+typedef struct VirtioDeviceClass {
+ /*< private >*/
+ VirtIODevice *parent;
+ /*< public >*/
+ /* This is what a VirtioDevice must implement */
+ uint64_t (*get_features)(VirtIODevice *vdev,
+ uint64_t requested_features);
+ uint64_t (*bad_features)(VirtIODevice *vdev);
+ void (*set_features)(VirtIODevice *vdev, uint64_t val);
+ int (*validate_features)(VirtIODevice *vdev);
+ void (*get_config)(VirtIODevice *vdev, uint8_t *config);
+ void (*set_config)(VirtIODevice *vdev, const uint8_t *config);
+ void (*reset)(VirtIODevice *vdev);
+ void (*set_status)(VirtIODevice *vdev, uint8_t val);
+ void (*realize)(void);
+ void (*unrealize)(VirtIODevice *vdev);
+ /*
+ * For transitional devices, this is a bitmap of features
+ * that are only exposed on the legacy interface but not
+ * the modern one.
+ */
+ uint64_t legacy_features;
+ /*
+ * Test and clear event pending status.
+ * Should be called after unmask to avoid losing events.
+ * If backend does not support masking,
+ * must check in frontend instead.
+ */
+ bool (*guest_notifier_pending)(VirtIODevice *vdev, int n);
+ /*
+ * Mask/unmask events from this vq. Any events reported
+ * while masked will become pending.
+ * If backend does not support masking,
+ * must mask in frontend instead.
+ */
+ void (*guest_notifier_mask)(VirtIODevice *vdev, int n, bool mask);
+ int (*start_ioeventfd)(VirtIODevice *vdev);
+ void (*stop_ioeventfd)(VirtIODevice *vdev);
+ /*
+ * Saving and loading of a device; trying to deprecate save/load
+ * use vmsd for new devices.
+ */
+ /*
+ * Post load hook in vmsd is called early while device is processed, and
+ * when VirtIODevice isn't fully initialized. Devices should use this
+ * instead, unless they specifically want to verify the migration stream
+ * as it's processed, e.g. for bounds checking.
+ */
+ int (*post_load)(VirtIODevice *vdev);
+ bool (*primary_unplug_pending)(void *opaque);
+
+ void (*update_mem_table)(VirtIODevice *vdev);
+ void (*print_config)(uint8_t *config_data);
+
+ struct vhost_dev *(*get_vhost)(VirtIODevice *vdev);
+} VirtioDeviceClass;
+
+/* Global variables */
+extern int fd;
+extern int loopback_fd;
+
+void handle_input(VirtIODevice *vdev, VirtQueue *vq);
+void *my_select(void *data);
+void *wait_read_write(void *data);
+void virtio_notify_config(VirtIODevice *vdev);
+void create_rng_struct(void);
+void print_neg_flag(uint64_t neg_flag, bool read);
+void adapter_read_write_cb(void);
+int virtio_loopback_start(void);
+
+int virtio_queue_ready(VirtQueue *vq);
+void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
+ unsigned int *out_bytes,
+ unsigned max_in_bytes, unsigned max_out_bytes);
+void virtio_add_feature(uint64_t *features, unsigned int fbit);
+bool virtio_has_feature(uint64_t features, unsigned int fbit);
+bool virtio_device_started(VirtIODevice *vdev, uint8_t status);
+bool virtio_device_should_start(VirtIODevice *vdev, uint8_t status);
+
+int virtio_queue_empty(VirtQueue *vq);
+void *virtqueue_pop(VirtQueue *vq, size_t sz);
+void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
+ unsigned int len);
+size_t iov_from_buf(const struct iovec *iov, unsigned int iov_cnt,
+ size_t offset, const void *buf, size_t bytes);
+bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
+ unsigned int *head);
+void virtio_notify_vector(VirtIODevice *vdev);
+
+enum {
+ VIRTQUEUE_READ_DESC_ERROR = -1,
+ VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
+ VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
+};
+
+size_t qemu_iov_from_buf(const struct iovec *iov, unsigned int iov_cnt,
+ size_t offset, const void *buf, size_t bytes);
+VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
+ VirtIOHandleOutput handle_output);
+VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n);
+void virtio_dev_init(VirtIODevice *vdev, const char *name,
+ uint16_t device_id, size_t config_size);
+void virtio_loopback_bus_init(VirtioBus *k);
+int virtio_bus_set_host_notifier(VirtioBus *vbus, int n, bool assign);
+EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
+EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
+uint64_t virtio_queue_get_desc_addr(VirtIODevice *vdev, int n);
+uint64_t virtio_queue_get_avail_addr(VirtIODevice *vdev, int n);
+uint64_t virtio_queue_get_used_addr(VirtIODevice *vdev, int n);
+int virtio_queue_get_num(VirtIODevice *vdev, int n);
+unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n);
+uint64_t virtio_queue_get_desc_size(VirtIODevice *vdev, int n);
+uint64_t virtio_queue_get_avail_size(VirtIODevice *vdev, int n);
+uint64_t virtio_queue_get_used_size(VirtIODevice *vdev, int n);
+void virtio_set_isr(VirtIODevice *vdev, int value);
+int virtio_device_grab_ioeventfd(VirtIODevice *vdev);
+bool virtio_bus_device_iommu_enabled(VirtIODevice *vdev);
+size_t iov_from_buf_full(const struct iovec *iov, unsigned int iov_cnt,
+ size_t offset, const void *buf, size_t bytes);
+void event_notifier_set_handler(EventNotifier *e,
+ void *handler);
+void virtio_notify(VirtIODevice *vdev, VirtQueue *vq);
+int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
+ unsigned int max, unsigned int *next);
+void print_config(uint8_t *config);
+uint32_t get_vqs_max_size(VirtIODevice *vdev);
+
+/*
+ * Do we get callbacks when the ring is completely used, even if we've
+ * suppressed them?
+ */
+#define VIRTIO_F_NOTIFY_ON_EMPTY 24
+#define VIRTIO_CONFIG_S_FEATURES_OK 8
+#define VIRTIO_CONFIG_S_DRIVER_OK 4
+#define VIRTIO_F_VERSION_1 32
+#define VIRTIO_F_ACCESS_PLATFORM 33
+
+/*
+ * Legacy name for VIRTIO_F_ACCESS_PLATFORM
+ * (for compatibility with old userspace)
+ */
+#define VIRTIO_F_IOMMU_PLATFORM 33
+
+/* QEMU Aligned functions */
+/*
+ * Round number down to multiple. Safe when m is not a power of 2 (see
+ * ROUND_DOWN for a faster version when a power of 2 is guaranteed).
+ */
+#define QEMU_ALIGN_DOWN(n, m) ((n) / (m) * (m))
+
+/*
+ * Round number up to multiple. Safe when m is not a power of 2 (see
+ * ROUND_UP for a faster version when a power of 2 is guaranteed).
+ */
+#define QEMU_ALIGN_UP(n, m) QEMU_ALIGN_DOWN((n) + (m) - 1, (m))
+
+/* Check if n is a multiple of m */
+#define QEMU_IS_ALIGNED(n, m) (((n) % (m)) == 0)
+
+/* n-byte align pointer down */
+#define QEMU_ALIGN_PTR_DOWN(p, n) \
+ ((typeof(p))QEMU_ALIGN_DOWN((uintptr_t)(p), (n)))
+
+/* n-byte align pointer up */
+#define QEMU_ALIGN_PTR_UP(p, n) \
+ ((typeof(p))QEMU_ALIGN_UP((uintptr_t)(p), (n)))
+
+/* Check if pointer p is n-bytes aligned */
+#define QEMU_PTR_IS_ALIGNED(p, n) QEMU_IS_ALIGNED((uintptr_t)(p), (n))
+
+/*
+ * Define 1 GB offset in order to request big enough
+ * memory blocks from the kernel:
+ * 0x40000000 = 1024 * 1024 * 1024 = 64 * 4096 * 4096 = 1G
+ */
+#define OFFSET_1GB (64ULL * PAGE_SIZE * PAGE_SIZE)
+
+/*
+ * Define starting physical address of host memory address space
+ */
+#define INIT_PA 0
+
+
+extern VirtIODevice *global_vdev;
+extern VirtIOMMIOProxy *proxy;
+extern VirtioBus *global_vbus;
+
+#endif /* VIRTIO_LOOPBACK */
+
diff --git a/virtio_rng.c b/virtio_rng.c
new file mode 100644
index 0000000..6a5b24b
--- /dev/null
+++ b/virtio_rng.c
@@ -0,0 +1,179 @@
+/*
+ * A virtio device implementing a hardware random number generator.
+ *
+ * Based on virtio-rng.c of QEMU project
+ * Copyright 2012 Red Hat, Inc.
+ * Copyright 2012 Amit Shah <amit.shah@redhat.com>
+ *
+ * Copyright 2022-2023 Virtual Open Systems SAS.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+#include <sys/param.h>
+
+/* Project header files */
+#include "virtio_loopback.h"
+#include "virtio_rng.h"
+
+#ifdef DEBUG
+#define DBG(...) printf("virtio-rng: " __VA_ARGS__)
+#else
+#define DBG(...)
+#endif /* DEBUG */
+
+bool is_guest_ready(VirtIORNG *vrng)
+{
+ VirtIODevice *vdev = vrng->parent_obj;
+
+ if (virtio_queue_ready(vrng->vq)
+ && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ return true;
+ }
+ return false;
+}
+
+size_t get_request_size(VirtQueue *vq, unsigned quota)
+{
+ unsigned int in, out;
+
+ virtqueue_get_avail_bytes(vq, &in, &out, quota, 0);
+ return in;
+}
+
+void virtio_rng_set_status(VirtIODevice *vdev, uint8_t status)
+{
+ VirtIORNG *vrng = vdev->vrng;
+
+ vdev->status = status;
+
+ /* Something changed, try to process buffers */
+ virtio_rng_process(vrng);
+}
+
+/* Send data from a char device over to the guest */
+void chr_read(VirtIORNG *vrng, const void *buf, size_t size)
+{
+ VirtIODevice *vdev = vrng->parent_obj;
+ VirtQueueElement *elem;
+ size_t len;
+ int offset;
+
+ if (!is_guest_ready(vrng)) {
+ return;
+ }
+
+ vrng->quota_remaining -= size;
+
+ offset = 0;
+ while (offset < size) {
+ elem = virtqueue_pop(vrng->vq, sizeof(VirtQueueElement));
+
+
+ if (!elem) {
+ break;
+ }
+ len = qemu_iov_from_buf(elem->in_sg, elem->in_num,
+ 0, buf + offset, size - offset);
+ offset += len;
+
+ virtqueue_push(vrng->vq, elem, len);
+
+ /*
+ * TODO: We need tp free the elem
+ *
+ * g_free(elem);
+ */
+ }
+ virtio_notify(vdev, vrng->vq);
+
+ if (!virtio_queue_empty(vrng->vq)) {
+ /*
+ * If we didn't drain the queue, call virtio_rng_process
+ * to take care of asking for more data as appropriate.
+ */
+ virtio_rng_process(vrng);
+ }
+}
+
+const char test_str[64] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63};
+
+void virtio_rng_process(VirtIORNG *vrng)
+{
+ size_t size;
+ unsigned quota;
+
+ if (!is_guest_ready(vrng)) {
+ return;
+ }
+
+ if (vrng->quota_remaining < 0) {
+ quota = 0;
+ } else {
+ quota = MIN((uint64_t)vrng->quota_remaining, (uint64_t)UINT32_MAX);
+ }
+ size = get_request_size(vrng->vq, quota);
+ size = MIN(vrng->quota_remaining, size);
+
+ if (size) {
+ chr_read(vrng, &test_str, size);
+ }
+}
+
+void handle_input(VirtIODevice *vdev, VirtQueue *vq)
+{
+ virtio_rng_process(vdev->vrng);
+}
+
+static void virtio_dev_class_init(VirtIODevice *vdev)
+{
+ vdev->vdev_class = (VirtioDeviceClass *)malloc(sizeof(VirtioDeviceClass));
+ vdev->vdev_class->parent = vdev;
+ vdev->vdev_class->set_status = virtio_rng_set_status;
+}
+
+void virtio_rng_init(VirtIODevice *vdev)
+{
+ VirtIORNG *vrng = (VirtIORNG *)malloc(sizeof(VirtIORNG));
+ vdev->vrng = vrng;
+ vrng->parent_obj = vdev;
+ vrng->vq = vdev->vq;
+ vrng->quota_remaining = LONG_MAX;
+
+ /* Prepare dev_class */
+ virtio_dev_class_init(vdev);
+}
+
+
+void virtio_rng_realize(void)
+{
+ /* prepare procy and virtio dev*/
+ proxy = (VirtIOMMIOProxy *)malloc(sizeof(VirtIOMMIOProxy));
+
+ virtio_dev_init(global_vdev, "virtio-rng", 4, 0);
+
+ virtio_rng_init(global_vdev);
+
+ global_vdev->vq = virtio_add_queue(global_vdev, 8, handle_input);
+
+ global_vdev->host_features = 0x39000000;
+
+ *proxy = (VirtIOMMIOProxy) {
+ .legacy = 1,
+ };
+}
+
diff --git a/virtio_rng.h b/virtio_rng.h
new file mode 100644
index 0000000..cf35c29
--- /dev/null
+++ b/virtio_rng.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2022-2023 Virtual Open Systems SAS.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef VIRTIO_RNG_DEV
+#define VIRTIO_RNG_DEV
+
+#include "virtio_loopback.h"
+
+extern const char test_str[64];
+
+typedef struct VirtIORNGConf {
+ uint64_t max_bytes;
+ uint32_t period_ms;
+} VirtIORNGConf;
+
+typedef struct VirtIORNG {
+ VirtIODevice *parent_obj;
+
+ /* Only one vq - guest puts buffer(s) on it when it needs entropy */
+ VirtQueue *vq;
+ VirtIORNGConf conf;
+
+ /*
+ * We purposefully don't migrate this state. The quota will reset on the
+ * destination as a result. Rate limiting is host state, not guest state.
+ */
+ int64_t quota_remaining;
+ bool activate_timer;
+
+} VirtIORNG;
+
+bool is_guest_ready(VirtIORNG *vrng);
+size_t get_request_size(VirtQueue *vq, unsigned quota);
+void virtio_rng_set_status(VirtIODevice *vdev, uint8_t status);
+void virtio_rng_process(VirtIORNG *vrng);
+void chr_read(VirtIORNG *vrng, const void *buf, size_t size);
+void virtio_rng_realize(void);
+void virtio_rng_init(VirtIODevice *vdev);
+
+#endif /* VIRTIO_RNG */