aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/adapter.h44
-rw-r--r--include/event_notifier.h54
-rw-r--r--include/parser.h42
-rw-r--r--include/queue.h576
-rw-r--r--include/vhost_loopback.h80
-rw-r--r--include/vhost_user_blk.h59
-rw-r--r--include/vhost_user_can.h54
-rw-r--r--include/vhost_user_console.h46
-rw-r--r--include/vhost_user_gpio.h37
-rw-r--r--include/vhost_user_input.h30
-rw-r--r--include/vhost_user_loopback.h994
-rw-r--r--include/vhost_user_rng.h43
-rw-r--r--include/vhost_user_sound.h42
-rw-r--r--include/virtio_loopback.h715
-rw-r--r--include/virtio_rng.h56
15 files changed, 2872 insertions, 0 deletions
diff --git a/include/adapter.h b/include/adapter.h
new file mode 100644
index 0000000..601ed23
--- /dev/null
+++ b/include/adapter.h
@@ -0,0 +1,44 @@
+/*
+ *
+ * Copyright (c) 2022-2023 Virtual Open Systems SAS.
+ *
+ * Author:
+ * Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef ADAPTER
+#define ADAPTER
+
+/* Project header files */
+#include "virtio_loopback.h"
+#include "vhost_user_loopback.h"
+#include "virtio_rng.h"
+#include "vhost_user_rng.h"
+#include "vhost_user_blk.h"
+#include "vhost_user_input.h"
+#include "vhost_user_gpio.h"
+#include "vhost_user_sound.h"
+#include "vhost_user_can.h"
+#include "vhost_user_console.h"
+
+extern Device devices[MAX_DEVICES];
+
+/* Global functions */
+int parse_args(int argc, char *argv[]);
+void help_args(void);
+
+#endif /* ADAPTER */
diff --git a/include/event_notifier.h b/include/event_notifier.h
new file mode 100644
index 0000000..718f784
--- /dev/null
+++ b/include/event_notifier.h
@@ -0,0 +1,54 @@
+/*
+ * Based on event_notifier.h of QEMU project
+ *
+ * Copyright Red Hat, Inc. 2010
+ *
+ * Authors:
+ * Michael S. Tsirkin <mst@redhat.com>
+ *
+ * Copyright 2023 Virtual Open Systems SAS.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+
+#ifndef EVENT_NOT_H
+#define EVENT_NOT_H
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <poll.h>
+#include <pthread.h>
+
+typedef struct EventNotifier {
+ int rfd;
+ int wfd;
+ bool initialized;
+} EventNotifier;
+
+
+int fcntl_setfl(int fd, int flag);
+void qemu_set_cloexec(int fd);
+int qemu_pipe(int pipefd[2]);
+int event_notifier_get_fd(const EventNotifier *e);
+int event_notifier_get_wfd(const EventNotifier *e);
+int event_notifier_set(EventNotifier *e);
+int event_notifier_init(EventNotifier *e, int active);
+bool ioeventfd_enabled(void);
+int event_notifier_test_and_clear(EventNotifier *e);
+
+
+#endif /* EVENT_NOT_H */
diff --git a/include/parser.h b/include/parser.h
new file mode 100644
index 0000000..f37e41d
--- /dev/null
+++ b/include/parser.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2022-2024 Virtual Open Systems SAS.
+ *
+ * Author:
+ * Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef PARSER_H
+#define PARSER_H
+
+/* Project header files */
+#include "adapter.h"
+
+// Default values for queue_num and queue_size
+#define DEFAULT_QUEUE_NUM 1
+#define DEFAULT_QUEUE_SIZE 1024
+
+/* Global variables */
+const char *adapter_devices[] = {"vrng", "vhurng", "vhublk",
+ "vhuinput", "vhusnd", "vhugpio",
+ "vhucan", "vhuconsole"};
+const char *vhu_devices[] = {"vhurng", "vhublk", "vhuinput",
+ "vhusnd", "vhugpio", "vhucan",
+ "vhuconsole"};
+const unsigned int num_adapter_devices = sizeof(adapter_devices) / sizeof(adapter_devices[0]);
+const unsigned int num_vhu_devices = num_adapter_devices - 1;
+
+#endif /* PARSER_H */
diff --git a/include/queue.h b/include/queue.h
new file mode 100644
index 0000000..e029e7b
--- /dev/null
+++ b/include/queue.h
@@ -0,0 +1,576 @@
+/* $NetBSD: queue.h,v 1.52 2009/04/20 09:56:08 mschuett Exp $ */
+
+/*
+ * QEMU version: Copy from netbsd, removed debug code, removed some of
+ * the implementations. Left in singly-linked lists, lists, simple
+ * queues, and tail queues.
+ */
+
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ */
+
+#ifndef QEMU_SYS_QUEUE_H
+#define QEMU_SYS_QUEUE_H
+
+/*
+ * This file defines four types of data structures: singly-linked lists,
+ * lists, simple queues, and tail queues.
+ *
+ * A singly-linked list is headed by a single forward pointer. The
+ * elements are singly linked for minimum space and pointer manipulation
+ * overhead at the expense of O(n) removal for arbitrary elements. New
+ * elements can be added to the list after an existing element or at the
+ * head of the list. Elements being removed from the head of the list
+ * should use the explicit macro for this purpose for optimum
+ * efficiency. A singly-linked list may only be traversed in the forward
+ * direction. Singly-linked lists are ideal for applications with large
+ * datasets and few or no removals or for implementing a LIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A simple queue is headed by a pair of pointers, one the head of the
+ * list and the other to the tail of the list. The elements are singly
+ * linked to save space, so elements can only be removed from the
+ * head of the list. New elements can be added to the list after
+ * an existing element, at the head of the list, or at the end of the
+ * list. A simple queue may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+/*
+ * List definitions.
+ */
+#define QLIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define QLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define QLIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+/*
+ * List functions.
+ */
+#define QLIST_INIT(head) do { \
+ (head)->lh_first = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_SWAP(dstlist, srclist, field) do { \
+ void *tmplist; \
+ tmplist = (srclist)->lh_first; \
+ (srclist)->lh_first = (dstlist)->lh_first; \
+ if ((srclist)->lh_first != NULL) { \
+ (srclist)->lh_first->field.le_prev = &(srclist)->lh_first; \
+ } \
+ (dstlist)->lh_first = tmplist; \
+ if ((dstlist)->lh_first != NULL) { \
+ (dstlist)->lh_first->field.le_prev = &(dstlist)->lh_first; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_INSERT_AFTER(listelm, elm, field) do { \
+ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
+ (listelm)->field.le_next->field.le_prev = \
+ &(elm)->field.le_next; \
+ (listelm)->field.le_next = (elm); \
+ (elm)->field.le_prev = &(listelm)->field.le_next; \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ (elm)->field.le_next = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &(elm)->field.le_next; \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.le_next = (head)->lh_first) != NULL) \
+ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+ (head)->lh_first = (elm); \
+ (elm)->field.le_prev = &(head)->lh_first; \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_REMOVE(elm, field) do { \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+ (elm)->field.le_next = NULL; \
+ (elm)->field.le_prev = NULL; \
+} while (/*CONSTCOND*/0)
+
+/*
+ * Like QLIST_REMOVE() but safe to call when elm is not in a list
+ */
+#define QLIST_SAFE_REMOVE(elm, field) do { \
+ if ((elm)->field.le_prev != NULL) { \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+ (elm)->field.le_next = NULL; \
+ (elm)->field.le_prev = NULL; \
+ } \
+} while (/*CONSTCOND*/0)
+
+/* Is elm in a list? */
+#define QLIST_IS_INSERTED(elm, field) ((elm)->field.le_prev != NULL)
+
+#define QLIST_FOREACH(var, head, field) \
+ for ((var) = ((head)->lh_first); \
+ (var); \
+ (var) = ((var)->field.le_next))
+
+#define QLIST_FOREACH_SAFE(var, head, field, next_var) \
+ for ((var) = ((head)->lh_first); \
+ (var) && ((next_var) = ((var)->field.le_next), 1); \
+ (var) = (next_var))
+
+/*
+ * List access methods.
+ */
+#define QLIST_EMPTY(head) ((head)->lh_first == NULL)
+#define QLIST_FIRST(head) ((head)->lh_first)
+#define QLIST_NEXT(elm, field) ((elm)->field.le_next)
+
+
+/*
+ * Singly-linked List definitions.
+ */
+#define QSLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define QSLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define QSLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+
+/*
+ * Singly-linked List functions.
+ */
+#define QSLIST_INIT(head) do { \
+ (head)->slh_first = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_INSERT_AFTER(slistelm, elm, field) do { \
+ (elm)->field.sle_next = (slistelm)->field.sle_next; \
+ (slistelm)->field.sle_next = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.sle_next = (head)->slh_first; \
+ (head)->slh_first = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_INSERT_HEAD_ATOMIC(head, elm, field) do { \
+ typeof(elm) save_sle_next; \
+ do { \
+ save_sle_next = (elm)->field.sle_next = (head)->slh_first; \
+ } while (qatomic_cmpxchg(&(head)->slh_first, save_sle_next, (elm)) !=\
+ save_sle_next); \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_MOVE_ATOMIC(dest, src) do { \
+ (dest)->slh_first = qatomic_xchg(&(src)->slh_first, NULL); \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_REMOVE_HEAD(head, field) do { \
+ typeof((head)->slh_first) elm = (head)->slh_first; \
+ (head)->slh_first = elm->field.sle_next; \
+ elm->field.sle_next = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_REMOVE_AFTER(slistelm, field) do { \
+ typeof(slistelm) next = (slistelm)->field.sle_next; \
+ (slistelm)->field.sle_next = next->field.sle_next; \
+ next->field.sle_next = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_REMOVE(head, elm, type, field) do { \
+ if ((head)->slh_first == (elm)) { \
+ QSLIST_REMOVE_HEAD((head), field); \
+ } else { \
+ struct type *curelm = (head)->slh_first; \
+ while (curelm->field.sle_next != (elm)) \
+ curelm = curelm->field.sle_next; \
+ curelm->field.sle_next = curelm->field.sle_next->field.sle_next; \
+ (elm)->field.sle_next = NULL; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_FOREACH(var, head, field) \
+ for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
+
+#define QSLIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = QSLIST_FIRST((head)); \
+ (var) && ((tvar) = QSLIST_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+/*
+ * Singly-linked List access methods.
+ */
+#define QSLIST_EMPTY(head) ((head)->slh_first == NULL)
+#define QSLIST_FIRST(head) ((head)->slh_first)
+#define QSLIST_NEXT(elm, field) ((elm)->field.sle_next)
+
+
+/*
+ * Simple queue definitions.
+ */
+#define QSIMPLEQ_HEAD(name, type) \
+struct name { \
+ struct type *sqh_first; /* first element */ \
+ struct type **sqh_last; /* addr of last next element */ \
+}
+
+#define QSIMPLEQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).sqh_first }
+
+#define QSIMPLEQ_ENTRY(type) \
+struct { \
+ struct type *sqe_next; /* next element */ \
+}
+
+/*
+ * Simple queue functions.
+ */
+#define QSIMPLEQ_INIT(head) do { \
+ (head)->sqh_first = NULL; \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (head)->sqh_first = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.sqe_next = NULL; \
+ *(head)->sqh_last = (elm); \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (listelm)->field.sqe_next = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_REMOVE_HEAD(head, field) do { \
+ typeof((head)->sqh_first) elm = (head)->sqh_first; \
+ if (((head)->sqh_first = elm->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(head)->sqh_first; \
+ elm->field.sqe_next = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_SPLIT_AFTER(head, elm, field, removed) do { \
+ QSIMPLEQ_INIT(removed); \
+ if (((removed)->sqh_first = (head)->sqh_first) != NULL) { \
+ if (((head)->sqh_first = (elm)->field.sqe_next) == NULL) { \
+ (head)->sqh_last = &(head)->sqh_first; \
+ } \
+ (removed)->sqh_last = &(elm)->field.sqe_next; \
+ (elm)->field.sqe_next = NULL; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_REMOVE(head, elm, type, field) do { \
+ if ((head)->sqh_first == (elm)) { \
+ QSIMPLEQ_REMOVE_HEAD((head), field); \
+ } else { \
+ struct type *curelm = (head)->sqh_first; \
+ while (curelm->field.sqe_next != (elm)) \
+ curelm = curelm->field.sqe_next; \
+ if ((curelm->field.sqe_next = \
+ curelm->field.sqe_next->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(curelm)->field.sqe_next; \
+ (elm)->field.sqe_next = NULL; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->sqh_first); \
+ (var); \
+ (var) = ((var)->field.sqe_next))
+
+#define QSIMPLEQ_FOREACH_SAFE(var, head, field, next) \
+ for ((var) = ((head)->sqh_first); \
+ (var) && ((next = ((var)->field.sqe_next)), 1); \
+ (var) = (next))
+
+#define QSIMPLEQ_CONCAT(head1, head2) do { \
+ if (!QSIMPLEQ_EMPTY((head2))) { \
+ *(head1)->sqh_last = (head2)->sqh_first; \
+ (head1)->sqh_last = (head2)->sqh_last; \
+ QSIMPLEQ_INIT((head2)); \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_PREPEND(head1, head2) do { \
+ if (!QSIMPLEQ_EMPTY((head2))) { \
+ *(head2)->sqh_last = (head1)->sqh_first; \
+ (head1)->sqh_first = (head2)->sqh_first; \
+ QSIMPLEQ_INIT((head2)); \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_LAST(head, type, field) \
+ (QSIMPLEQ_EMPTY((head)) ? \
+ NULL : \
+ ((struct type *)(void *) \
+ ((char *)((head)->sqh_last) - offsetof(struct type, field))))
+
+/*
+ * Simple queue access methods.
+ */
+#define QSIMPLEQ_EMPTY_ATOMIC(head) \
+ (qatomic_read(&((head)->sqh_first)) == NULL)
+#define QSIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
+#define QSIMPLEQ_FIRST(head) ((head)->sqh_first)
+#define QSIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
+
+typedef struct QTailQLink {
+ void *tql_next;
+ struct QTailQLink *tql_prev;
+} QTailQLink;
+
+/*
+ * Tail queue definitions. The union acts as a poor man template, as if
+ * it were QTailQLink<type>.
+ */
+#define QTAILQ_HEAD(name, type) \
+union name { \
+ struct type *tqh_first; /* first element */ \
+ QTailQLink tqh_circ; /* link for circular backwards list */ \
+}
+
+#define QTAILQ_HEAD_INITIALIZER(head) \
+ { .tqh_circ = { NULL, &(head).tqh_circ } }
+
+#define QTAILQ_ENTRY(type) \
+union { \
+ struct type *tqe_next; /* next element */ \
+ QTailQLink tqe_circ; /* link for circular backwards list */ \
+}
+
+/*
+ * Tail queue functions.
+ */
+#define QTAILQ_INIT(head) do { \
+ (head)->tqh_first = NULL; \
+ (head)->tqh_circ.tql_prev = &(head)->tqh_circ; \
+} while (/*CONSTCOND*/0)
+
+#define QTAILQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
+ (head)->tqh_first->field.tqe_circ.tql_prev = \
+ &(elm)->field.tqe_circ; \
+ else \
+ (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
+ (head)->tqh_first = (elm); \
+ (elm)->field.tqe_circ.tql_prev = &(head)->tqh_circ; \
+} while (/*CONSTCOND*/0)
+
+#define QTAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.tqe_next = NULL; \
+ (elm)->field.tqe_circ.tql_prev = (head)->tqh_circ.tql_prev; \
+ (head)->tqh_circ.tql_prev->tql_next = (elm); \
+ (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
+} while (/*CONSTCOND*/0)
+
+#define QTAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
+ (elm)->field.tqe_next->field.tqe_circ.tql_prev = \
+ &(elm)->field.tqe_circ; \
+ else \
+ (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
+ (listelm)->field.tqe_next = (elm); \
+ (elm)->field.tqe_circ.tql_prev = &(listelm)->field.tqe_circ; \
+} while (/*CONSTCOND*/0)
+
+#define QTAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.tqe_circ.tql_prev = (listelm)->field.tqe_circ.tql_prev; \
+ (elm)->field.tqe_next = (listelm); \
+ (listelm)->field.tqe_circ.tql_prev->tql_next = (elm); \
+ (listelm)->field.tqe_circ.tql_prev = &(elm)->field.tqe_circ; \
+} while (/*CONSTCOND*/0)
+
+#define QTAILQ_REMOVE(head, elm, field) do { \
+ if (((elm)->field.tqe_next) != NULL) \
+ (elm)->field.tqe_next->field.tqe_circ.tql_prev = \
+ (elm)->field.tqe_circ.tql_prev; \
+ else \
+ (head)->tqh_circ.tql_prev = (elm)->field.tqe_circ.tql_prev; \
+ (elm)->field.tqe_circ.tql_prev->tql_next = (elm)->field.tqe_next; \
+ (elm)->field.tqe_circ.tql_prev = NULL; \
+ (elm)->field.tqe_circ.tql_next = NULL; \
+ (elm)->field.tqe_next = NULL; \
+} while (/*CONSTCOND*/0)
+
+/* remove @left, @right and all elements in between from @head */
+#define QTAILQ_REMOVE_SEVERAL(head, left, right, field) do { \
+ if (((right)->field.tqe_next) != NULL) \
+ (right)->field.tqe_next->field.tqe_circ.tql_prev = \
+ (left)->field.tqe_circ.tql_prev; \
+ else \
+ (head)->tqh_circ.tql_prev = (left)->field.tqe_circ.tql_prev; \
+ (left)->field.tqe_circ.tql_prev->tql_next = (right)->field.tqe_next; \
+ } while (/*CONSTCOND*/0)
+
+#define QTAILQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->tqh_first); \
+ (var); \
+ (var) = ((var)->field.tqe_next))
+
+#define QTAILQ_FOREACH_SAFE(var, head, field, next_var) \
+ for ((var) = ((head)->tqh_first); \
+ (var) && ((next_var) = ((var)->field.tqe_next), 1); \
+ (var) = (next_var))
+
+#define QTAILQ_FOREACH_REVERSE(var, head, field) \
+ for ((var) = QTAILQ_LAST(head); \
+ (var); \
+ (var) = QTAILQ_PREV(var, field))
+
+#define QTAILQ_FOREACH_REVERSE_SAFE(var, head, field, prev_var) \
+ for ((var) = QTAILQ_LAST(head); \
+ (var) && ((prev_var) = QTAILQ_PREV(var, field), 1); \
+ (var) = (prev_var))
+
+/*
+ * Tail queue access methods.
+ */
+#define QTAILQ_EMPTY(head) ((head)->tqh_first == NULL)
+#define QTAILQ_FIRST(head) ((head)->tqh_first)
+#define QTAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+#define QTAILQ_IN_USE(elm, field) ((elm)->field.tqe_circ.tql_prev != NULL)
+
+#define QTAILQ_LINK_PREV(link) \
+ ((link).tql_prev->tql_prev->tql_next)
+#define QTAILQ_LAST(head) \
+ ((typeof((head)->tqh_first)) QTAILQ_LINK_PREV((head)->tqh_circ))
+#define QTAILQ_PREV(elm, field) \
+ ((typeof((elm)->field.tqe_next)) QTAILQ_LINK_PREV((elm)->field.tqe_circ))
+
+#define field_at_offset(base, offset, type) \
+ ((type *) (((char *) (base)) + (offset)))
+
+/*
+ * Raw access of elements of a tail queue head. Offsets are all zero
+ * because it's a union.
+ */
+#define QTAILQ_RAW_FIRST(head) \
+ field_at_offset(head, 0, void *)
+#define QTAILQ_RAW_TQH_CIRC(head) \
+ field_at_offset(head, 0, QTailQLink)
+
+/*
+ * Raw access of elements of a tail entry
+ */
+#define QTAILQ_RAW_NEXT(elm, entry) \
+ field_at_offset(elm, entry, void *)
+#define QTAILQ_RAW_TQE_CIRC(elm, entry) \
+ field_at_offset(elm, entry, QTailQLink)
+/*
+ * Tail queue traversal using pointer arithmetic.
+ */
+#define QTAILQ_RAW_FOREACH(elm, head, entry) \
+ for ((elm) = *QTAILQ_RAW_FIRST(head); \
+ (elm); \
+ (elm) = *QTAILQ_RAW_NEXT(elm, entry))
+/*
+ * Tail queue insertion using pointer arithmetic.
+ */
+#define QTAILQ_RAW_INSERT_TAIL(head, elm, entry) do { \
+ *QTAILQ_RAW_NEXT(elm, entry) = NULL; \
+ QTAILQ_RAW_TQE_CIRC(elm, entry)->tql_prev = QTAILQ_RAW_TQH_CIRC(head)->tql_prev; \
+ QTAILQ_RAW_TQH_CIRC(head)->tql_prev->tql_next = (elm); \
+ QTAILQ_RAW_TQH_CIRC(head)->tql_prev = QTAILQ_RAW_TQE_CIRC(elm, entry); \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_RAW_FIRST(head) \
+ field_at_offset(head, 0, void *)
+
+#define QLIST_RAW_NEXT(elm, entry) \
+ field_at_offset(elm, entry, void *)
+
+#define QLIST_RAW_PREVIOUS(elm, entry) \
+ field_at_offset(elm, entry + sizeof(void *), void *)
+
+#define QLIST_RAW_FOREACH(elm, head, entry) \
+ for ((elm) = *QLIST_RAW_FIRST(head); \
+ (elm); \
+ (elm) = *QLIST_RAW_NEXT(elm, entry))
+
+#define QLIST_RAW_INSERT_AFTER(head, prev, elem, entry) do { \
+ *QLIST_RAW_NEXT(prev, entry) = elem; \
+ *QLIST_RAW_PREVIOUS(elem, entry) = QLIST_RAW_NEXT(prev, entry); \
+ *QLIST_RAW_NEXT(elem, entry) = NULL; \
+} while (0)
+
+#define QLIST_RAW_INSERT_HEAD(head, elm, entry) do { \
+ void *first = *QLIST_RAW_FIRST(head); \
+ *QLIST_RAW_FIRST(head) = elm; \
+ *QLIST_RAW_PREVIOUS(elm, entry) = QLIST_RAW_FIRST(head); \
+ if (first) { \
+ *QLIST_RAW_NEXT(elm, entry) = first; \
+ *QLIST_RAW_PREVIOUS(first, entry) = QLIST_RAW_NEXT(elm, entry); \
+ } else { \
+ *QLIST_RAW_NEXT(elm, entry) = NULL; \
+ } \
+} while (0)
+
+#endif /* QEMU_SYS_QUEUE_H */
diff --git a/include/vhost_loopback.h b/include/vhost_loopback.h
new file mode 100644
index 0000000..198a5af
--- /dev/null
+++ b/include/vhost_loopback.h
@@ -0,0 +1,80 @@
+/*
+ * Based on vhost.h of QEMU project
+ *
+ * Copyright 2022-2023 Virtual Open Systems SAS.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef LOOPBACK_VHOST_H
+#define LOOPBACK_VHOST_H
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <poll.h>
+#include <pthread.h>
+#include "vhost_user_loopback.h"
+#include "virtio_loopback.h"
+
+#define VHOST_INVALID_FEATURE_BIT (0xff)
+#define VHOST_QUEUE_NUM_CONFIG_INR 0
+
+int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
+int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);
+void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev,
+ int n, bool mask);
+int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
+ uint32_t config_len);
+int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
+ uint32_t offset, uint32_t size, uint32_t flags);
+uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
+ uint64_t features);
+void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
+ uint64_t features);
+
+/**
+ * vhost_dev_set_config_notifier() - register VhostDevConfigOps
+ * @hdev: common vhost_dev_structure
+ * @ops: notifier ops
+ *
+ * If the device is expected to change configuration a notifier can be
+ * setup to handle the case.
+ */
+
+typedef struct VhostDevConfigOps VhostDevConfigOps;
+
+void vhost_dev_set_config_notifier(struct vhost_dev *dev,
+ const VhostDevConfigOps *ops);
+int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev);
+
+int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
+ struct vhost_inflight *inflight);
+
+int vhost_dev_set_inflight(struct vhost_dev *dev,
+ struct vhost_inflight *inflight);
+
+void update_mem_table(VirtIODevice *vdev);
+
+
+struct vhost_inflight {
+ int fd;
+ void *addr;
+ uint64_t size;
+ uint64_t offset;
+ uint16_t queue_size;
+};
+
+#endif /* LOOPBACK_VHOST_H */
diff --git a/include/vhost_user_blk.h b/include/vhost_user_blk.h
new file mode 100644
index 0000000..3b25dfe
--- /dev/null
+++ b/include/vhost_user_blk.h
@@ -0,0 +1,59 @@
+/*
+ * Based on vhost-user-blk.h of QEMU project
+ *
+ * Copyright(C) 2017 Intel Corporation.
+ *
+ * Authors:
+ * Changpeng Liu <changpeng.liu@intel.com>
+ *
+ *
+ * Copyright (c) 2022-2023 Virtual Open Systems SAS.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef VHOST_USER_BLK
+#define VHOST_USER_BLK
+
+#include "vhost_loopback.h"
+#include "vhost_user_loopback.h"
+#include "virtio_loopback.h"
+#include <linux/virtio_blk.h>
+
+#define TYPE_VHOST_USER_BLK "vhost-user-blk"
+
+#define VHOST_USER_BLK_AUTO_NUM_QUEUES UINT16_MAX
+
+struct VHostUserBlk {
+ VirtIODevice *parent;
+ struct vhost_virtqueue *vhost_vq;
+ struct vhost_dev *vhost_dev;
+ VirtQueue *req_vq;
+ VirtQueue **virtqs;
+ uint16_t num_queues;
+ uint32_t queue_size;
+ /* uint32_t config_wce; //We will need it for the next release */
+ uint32_t config_wce;
+ struct vhost_inflight *inflight;
+ struct vhost_virtqueue *vhost_vqs;
+ struct virtio_blk_config blkcfg;
+ bool connected;
+ bool started_vu;
+};
+
+void vhost_user_blk_realize(struct adapter_dev *adev);
+
+#endif /* VHOST_USER_BLK */
diff --git a/include/vhost_user_can.h b/include/vhost_user_can.h
new file mode 100644
index 0000000..4768499
--- /dev/null
+++ b/include/vhost_user_can.h
@@ -0,0 +1,54 @@
+/*
+ * Virtio CAN Device
+ *
+ * Based on virtio_can.h of OpenSynergy's virtio-can RFC
+ * https://github.com/OpenSynergy/qemu/tree/virtio-can-spec-rfc-v3
+ *
+ * Copyright (C) 2021-2023 OpenSynergy GmbH
+ * Copyright (c) 2023 Virtual Open Systems SAS.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifndef VHOST_USER_CAN
+#define VHOST_USER_CAN
+
+#include "virtio_loopback.h"
+#include "vhost_loopback.h"
+#include "vhost_user_loopback.h"
+
+/* The following are defined into virtio_can.h -- Delete them in the future */
+#define VIRTIO_CAN_S_CTRL_BUSOFF (1u << 0) /* Controller BusOff */
+struct virtio_can_config {
+ /* CAN controller status */
+ __le16 status;
+};
+
+typedef struct VHostUserCan {
+ VirtIODevice *parent;
+ struct vhost_virtqueue *vhost_vqs;
+ VirtQueue **virtqs;
+ uint16_t num_queues;
+ uint32_t queue_size;
+ struct virtio_can_config config;
+ struct vhost_dev *vhost_dev;
+ /* Support classic CAN */
+ bool support_can_classic;
+ /* Support CAN FD */
+ bool support_can_fd;
+} VHostUserCan;
+
+void vhost_user_can_realize(struct adapter_dev *adev);
+
+#endif /* VHOST_USER_CAN */
diff --git a/include/vhost_user_console.h b/include/vhost_user_console.h
new file mode 100644
index 0000000..9f0eed8
--- /dev/null
+++ b/include/vhost_user_console.h
@@ -0,0 +1,46 @@
+/*
+ *
+ * Copyright (c) 2023 Virtual Open Systems SAS.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ *
+ */
+
+#ifndef VHOST_USER_CONSOLE
+#define VHOST_USER_CONSOLE
+
+#include "virtio_loopback.h"
+#include "vhost_loopback.h"
+#include "vhost_user_loopback.h"
+
+/* Feature bits */
+#define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */
+#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */
+#define VIRTIO_CONSOLE_F_EMERG_WRITE 2 /* Does host support emergency write? */
+
+struct virtio_console_config {
+ /* colums of the screens */
+ __virtio16 cols;
+ /* rows of the screens */
+ __virtio16 rows;
+ /* max. number of ports this device can hold */
+ __virtio32 max_nr_ports;
+ /* emergency write register */
+ __virtio32 emerg_wr;
+};
+
+typedef struct VHostUserConsole {
+ VirtIODevice *parent;
+ struct vhost_virtqueue *vhost_vqs;
+ VirtQueue **virtqs;
+ uint16_t num_queues;
+ uint32_t queue_size;
+ struct virtio_console_config config;
+ struct vhost_dev *vhost_dev;
+} VHostUserConsole;
+
+void vhost_user_console_realize(struct adapter_dev *adev);
+
+#endif /* VHOST_USER_CONSOLE */
diff --git a/include/vhost_user_gpio.h b/include/vhost_user_gpio.h
new file mode 100644
index 0000000..4d77e0a
--- /dev/null
+++ b/include/vhost_user_gpio.h
@@ -0,0 +1,37 @@
+/*
+ * Based on virtio-gpio.h of QEMU project
+ *
+ * Copyright (c) 2023-2024 Virtual Open Systems SAS.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#ifndef VHOST_USER_GPIO
+#define VHOST_USER_GPIO
+
+#include "vhost_loopback.h"
+#include "vhost_user_loopback.h"
+#include "virtio_loopback.h"
+
+#define VIRTIO_GPIO_F_IRQ 0
+
+struct virtio_gpio_config {
+ uint16_t ngpio;
+ uint8_t padding[2];
+ uint32_t gpio_names_size;
+};
+
+struct VHostUserGPIO {
+ VirtIODevice *parent;
+ struct virtio_gpio_config config;
+ struct vhost_virtqueue *vhost_vqs;
+ struct vhost_dev *vhost_dev;
+ uint16_t num_queues;
+ uint32_t queue_size;
+};
+
+void vu_gpio_device_realize(struct adapter_dev *adev);
+
+#endif /* VHOST_USER_GPIO */
diff --git a/include/vhost_user_input.h b/include/vhost_user_input.h
new file mode 100644
index 0000000..8137694
--- /dev/null
+++ b/include/vhost_user_input.h
@@ -0,0 +1,30 @@
+/*
+ * Based on virtio-input.h of QEMU project
+ *
+ * Copyright (c) 2022-2024 Virtual Open Systems SAS.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#ifndef VHOST_USER_INPUT
+#define VHOST_USER_INPUT
+
+#include "vhost_loopback.h"
+#include "vhost_user_loopback.h"
+#include "virtio_loopback.h"
+#include <linux/virtio_input.h>
+
+typedef struct VhostUserInput {
+ VirtIODevice *parent;
+ struct vhost_dev *vhost_dev;
+ struct virtio_input_config config;
+ VirtQueue *evt, *sts;
+ bool started;
+ bool completed;
+} VhostUserInput;
+
+void virtio_input_device_realize(struct adapter_dev *adev);
+
+#endif /* VHOST_USER_INPUT */
diff --git a/include/vhost_user_loopback.h b/include/vhost_user_loopback.h
new file mode 100644
index 0000000..3aa8e8c
--- /dev/null
+++ b/include/vhost_user_loopback.h
@@ -0,0 +1,994 @@
+/*
+ * Based on libvhost-user.h of QEMU project
+ *
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * Authors:
+ * Victor Kaplansky <victork@redhat.com>
+ * Marc-André Lureau <mlureau@redhat.com>
+ *
+ * Copyright 2022-2023 Virtual Open Systems SAS.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef LOOPBACK_LIBVHOST_USER_H
+#define LOOPBACK_LIBVHOST_USER_H
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <poll.h>
+#include <linux/vhost.h>
+#include <pthread.h>
+#include "virtio_loopback.h"
+#include "queue.h"
+
+struct scrub_regions {
+ struct vhost_memory_region *region;
+ int reg_idx;
+ int fd_idx;
+};
+
+struct vhost_virtqueue {
+ int kick;
+ int call;
+ void *desc;
+ void *avail;
+ void *used;
+ int num;
+ unsigned long long desc_phys;
+ unsigned desc_size;
+ unsigned long long avail_phys;
+ unsigned avail_size;
+ unsigned long long used_phys;
+ unsigned used_size;
+ EventNotifier masked_notifier;
+ EventNotifier masked_config_notifier;
+ struct vhost_dev *dev;
+};
+
+typedef struct VhostDevConfigOps {
+ /* Vhost device config space changed callback */
+ int (*vhost_dev_config_notifier)(struct vhost_dev *dev);
+} VhostDevConfigOps;
+
+
+typedef struct MemoryRegion MemoryRegion;
+
+typedef struct MemoryRegionSection {
+ uint64_t size;
+ MemoryRegion *mr;
+ uint64_t offset_within_region;
+ uint64_t offset_within_address_space;
+ bool readonly;
+ bool nonvolatile;
+} MemoryRegionSection;
+
+struct vhost_dev {
+ VirtIODevice *vdev;
+ struct vhost_user *vhudev;
+ struct vhost_memory *mem;
+ int n_mem_sections;
+ MemoryRegionSection *mem_sections;
+ int n_tmp_sections;
+ MemoryRegionSection *tmp_sections;
+ struct vhost_virtqueue *vqs;
+ unsigned int nvqs;
+ /* the first virtqueue which would be used by this vhost dev */
+ int vq_index;
+ /* one past the last vq index for the virtio device (not vhost) */
+ int vq_index_end;
+ /* if non-zero, minimum required value for max_queues */
+ int num_queues;
+ uint64_t features;
+ uint64_t acked_features;
+ uint64_t backend_features;
+ uint64_t protocol_features;
+ uint64_t max_queues;
+ uint64_t backend_cap;
+ bool started;
+ bool log_enabled;
+ uint64_t log_size;
+ void *migration_blocker;
+ void *opaque;
+ struct vhost_log *log;
+ QLIST_ENTRY(vhost_dev) entry;
+ uint64_t memory_slots;
+ const VhostDevConfigOps *config_ops;
+};
+
+
+#define VHOST_USER_MAX_RAM_SLOTS 16
+
+typedef uint64_t ram_addr_t;
+typedef struct RAMBlock RAMBlock;
+
+typedef struct RAMBlock {
+ struct MemoryRegion *mr;
+ uint8_t *host;
+ uint8_t *colo_cache; /* For colo, VM's ram cache */
+ ram_addr_t offset;
+ ram_addr_t used_length;
+ ram_addr_t max_length;
+ void (*resized)(const char*, uint64_t length, void *host);
+ uint32_t flags;
+ /* Protected by iothread lock. */
+ char idstr[256];
+ /* RCU-enabled, writes protected by the ramlist lock */
+ int fd;
+ size_t page_size;
+ /* dirty bitmap used during migration */
+ unsigned long *bmap;
+ /* bitmap of already received pages in postcopy */
+ unsigned long *receivedmap;
+
+ /*
+ * bitmap to track already cleared dirty bitmap. When the bit is
+ * set, it means the corresponding memory chunk needs a log-clear.
+ * Set this up to non-NULL to enable the capability to postpone
+ * and split clearing of dirty bitmap on the remote node (e.g.,
+ * KVM). The bitmap will be set only when doing global sync.
+ *
+ * NOTE: this bitmap is different comparing to the other bitmaps
+ * in that one bit can represent multiple guest pages (which is
+ * decided by the `clear_bmap_shift' variable below). On
+ * destination side, this should always be NULL, and the variable
+ * `clear_bmap_shift' is meaningless.
+ */
+ unsigned long *clear_bmap;
+ uint8_t clear_bmap_shift;
+
+ /*
+ * RAM block length that corresponds to the used_length on the migration
+ * source (after RAM block sizes were synchronized). Especially, after
+ * starting to run the guest, used_length and postcopy_length can differ.
+ * Used to register/unregister uffd handlers and as the size of the received
+ * bitmap. Receiving any page beyond this length will bail out, as it
+ * could not have been valid on the source.
+ */
+ ram_addr_t postcopy_length;
+} RAMBlock;
+
+
+/*
+ * MemoryRegion:
+ *
+ * A struct representing a memory region.
+ */
+typedef struct MemoryRegion {
+ /* private: */
+
+ /* The following fields should fit in a cache line */
+ bool romd_mode;
+ bool ram;
+ bool subpage;
+ bool readonly; /* For RAM regions */
+ bool nonvolatile;
+ bool rom_device;
+ bool flush_coalesced_mmio;
+ uint8_t dirty_log_mask;
+ bool is_iommu;
+ RAMBlock *ram_block;
+
+ void *opaque;
+ MemoryRegion *container;
+ uint64_t size;
+ uint64_t addr;
+ void (*destructor)(MemoryRegion *mr);
+ uint64_t align;
+ bool terminates;
+ bool ram_device;
+ bool enabled;
+ bool warning_printed; /* For reservations */
+ uint8_t vga_logging_count;
+ MemoryRegion *alias;
+ uint64_t alias_offset;
+ int32_t priority;
+ QTAILQ_HEAD(, MemoryRegion) subregions;
+ QTAILQ_ENTRY(MemoryRegion) subregions_link;
+ const char *name;
+ unsigned ioeventfd_nb;
+} MemoryRegion;
+
+struct vhost_user {
+ struct vhost_dev *dev;
+
+ /* Shared between vhost devs of the same virtio device */
+
+ uint64_t postcopy_client_bases[VHOST_USER_MAX_RAM_SLOTS];
+ /* Length of the region_rb and region_rb_offset arrays */
+ size_t region_rb_len;
+ /* RAMBlock associated with a given region */
+ RAMBlock **region_rb;
+ /*
+ * The offset from the start of the RAMBlock to the start of the
+ * vhost region.
+ */
+ ram_addr_t *region_rb_offset;
+
+ /* True once we've entered postcopy_listen */
+ bool postcopy_listen;
+
+ /* Our current regions */
+ int num_shadow_regions;
+ struct vhost_memory_region shadow_regions[VHOST_USER_MAX_RAM_SLOTS];
+};
+
+#define MAX_DEVICES 100
+#define MAX_NAME_LENGTH 256
+#define MAX_SOCKET_LENGTH 256
+
+typedef struct Device {
+ char device_name[MAX_NAME_LENGTH];
+ char socket[MAX_SOCKET_LENGTH];
+ int queue_num;
+ int queue_size;
+ int device_index;
+ int requires_socket;
+ int client_socket;
+} Device;
+
+typedef struct adapter_dev {
+ struct vhost_dev vdev;
+ struct vhost_user vudev;
+ VirtIODevice virtio_dev;
+ VirtioBus vbus;
+ Device *device_params;
+ /* More variables */
+ int loopback_fd;
+ int efd; /* Eventfd file descriptor */
+ virtio_neg_t *address;
+ virtio_device_info_struct_t device_info;
+ VirtIOMMIOProxy proxy;
+} AdapterDev;
+
+#define container_of_a(ptr, type, member) \
+ ((type *)((char *)(ptr) - offsetof(type, member)))
+
+/*
+#define to_vhost_user_device(ptr, field) \
+ container_of_a(ptr, struct vhost_user, field)
+
+#define virtio_to_vhost_dev(ptr, field) \
+ container_of_a(ptr, struct vhost_dev, field)
+
+*/
+#define to_adapter_device(ptr, field) \
+ container_of(ptr, struct adapter_dev, field)
+
+/* Based on qemu/hw/virtio/vhost-user.c */
+#define VHOST_USER_F_PROTOCOL_FEATURES 30
+#define VHOST_LOG_PAGE 4096
+#define VHOST_MEMORY_BASELINE_NREGIONS VHOST_USER_MAX_RAM_SLOTS
+
+/* The version of the protocol we support */
+#define VHOST_USER_VERSION (0x1)
+
+/*
+ * Set a reasonable maximum number of ram slots, which will be supported by
+ * any architecture.
+ */
+#define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
+
+/*
+ * Maximum size of virtio device config space
+ */
+#define VHOST_USER_MAX_CONFIG_SIZE 256
+
+typedef enum VhostSetConfigType {
+ VHOST_SET_CONFIG_TYPE_MASTER = 0,
+ VHOST_SET_CONFIG_TYPE_MIGRATION = 1,
+} VhostSetConfigType;
+
+enum VhostUserProtocolFeature {
+ VHOST_USER_PROTOCOL_F_MQ = 0,
+ VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
+ VHOST_USER_PROTOCOL_F_RARP = 2,
+ VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
+ VHOST_USER_PROTOCOL_F_NET_MTU = 4,
+ VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
+ VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
+ VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
+ VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
+ VHOST_USER_PROTOCOL_F_CONFIG = 9,
+ VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
+ VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
+ VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
+ VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
+ VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
+ VHOST_USER_PROTOCOL_F_STATUS = 16,
+ VHOST_USER_PROTOCOL_F_MAX
+};
+
+#define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
+
+typedef enum VhostUserRequest {
+ VHOST_USER_NONE = 0,
+ VHOST_USER_GET_FEATURES = 1,
+ VHOST_USER_SET_FEATURES = 2,
+ VHOST_USER_SET_OWNER = 3,
+ VHOST_USER_RESET_OWNER = 4,
+ VHOST_USER_SET_MEM_TABLE = 5,
+ VHOST_USER_SET_LOG_BASE = 6,
+ VHOST_USER_SET_LOG_FD = 7,
+ VHOST_USER_SET_VRING_NUM = 8,
+ VHOST_USER_SET_VRING_ADDR = 9,
+ VHOST_USER_SET_VRING_BASE = 10,
+ VHOST_USER_GET_VRING_BASE = 11,
+ VHOST_USER_SET_VRING_KICK = 12,
+ VHOST_USER_SET_VRING_CALL = 13,
+ VHOST_USER_SET_VRING_ERR = 14,
+ VHOST_USER_GET_PROTOCOL_FEATURES = 15,
+ VHOST_USER_SET_PROTOCOL_FEATURES = 16,
+ VHOST_USER_GET_QUEUE_NUM = 17,
+ VHOST_USER_SET_VRING_ENABLE = 18,
+ VHOST_USER_SEND_RARP = 19,
+ VHOST_USER_NET_SET_MTU = 20,
+ VHOST_USER_SET_SLAVE_REQ_FD = 21,
+ VHOST_USER_IOTLB_MSG = 22,
+ VHOST_USER_SET_VRING_ENDIAN = 23,
+ VHOST_USER_GET_CONFIG = 24,
+ VHOST_USER_SET_CONFIG = 25,
+ VHOST_USER_CREATE_CRYPTO_SESSION = 26,
+ VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
+ VHOST_USER_POSTCOPY_ADVISE = 28,
+ VHOST_USER_POSTCOPY_LISTEN = 29,
+ VHOST_USER_POSTCOPY_END = 30,
+ VHOST_USER_GET_INFLIGHT_FD = 31,
+ VHOST_USER_SET_INFLIGHT_FD = 32,
+ VHOST_USER_GPU_SET_SOCKET = 33,
+ VHOST_USER_VRING_KICK = 35,
+ VHOST_USER_GET_MAX_MEM_SLOTS = 36,
+ VHOST_USER_ADD_MEM_REG = 37,
+ VHOST_USER_REM_MEM_REG = 38,
+ VHOST_USER_SET_STATUS = 39,
+ VHOST_USER_GET_STATUS = 40,
+ VHOST_USER_MAX
+} VhostUserRequest;
+
+typedef enum VhostUserSlaveRequest {
+ VHOST_USER_SLAVE_NONE = 0,
+ VHOST_USER_SLAVE_IOTLB_MSG = 1,
+ VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
+ VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
+ VHOST_USER_SLAVE_VRING_CALL = 4,
+ VHOST_USER_SLAVE_VRING_ERR = 5,
+ VHOST_USER_SLAVE_MAX
+} VhostUserSlaveRequest;
+
+typedef struct VhostUserMemoryRegion {
+ uint64_t guest_phys_addr;
+ uint64_t memory_size;
+ uint64_t userspace_addr;
+ uint64_t mmap_offset;
+} VhostUserMemoryRegion;
+
+#define VHOST_USER_MEM_REG_SIZE (sizeof(VhostUserMemoryRegion))
+
+typedef struct VhostUserMemory {
+ uint32_t nregions;
+ uint32_t padding;
+ VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS];
+} VhostUserMemory;
+
+typedef struct VhostUserMemRegMsg {
+ uint64_t padding;
+ VhostUserMemoryRegion region;
+} VhostUserMemRegMsg;
+
+typedef struct VhostUserLog {
+ uint64_t mmap_size;
+ uint64_t mmap_offset;
+} VhostUserLog;
+
+typedef struct VhostUserConfig {
+ uint32_t offset;
+ uint32_t size;
+ uint32_t flags;
+ uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
+} VhostUserConfig;
+
+static VhostUserConfig c __attribute__ ((unused));
+#define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
+ + sizeof(c.size) \
+ + sizeof(c.flags))
+
+typedef struct VhostUserVringArea {
+ uint64_t u64;
+ uint64_t size;
+ uint64_t offset;
+} VhostUserVringArea;
+
+typedef struct VhostUserInflight {
+ uint64_t mmap_size;
+ uint64_t mmap_offset;
+ uint16_t num_queues;
+ uint16_t queue_size;
+} VhostUserInflight;
+
+#if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__))
+# define VU_PACKED __attribute__((gcc_struct, packed))
+#else
+# define VU_PACKED __attribute__((packed))
+#endif
+
+typedef struct VhostUserMsg {
+ int request;
+
+#define VHOST_USER_VERSION_MASK (0x3)
+#define VHOST_USER_REPLY_MASK (0x1 << 2)
+#define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
+ uint32_t flags;
+ uint32_t size; /* the following payload size */
+
+ union {
+#define VHOST_USER_VRING_IDX_MASK (0xff)
+#define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
+ uint64_t u64;
+ struct vhost_vring_state state;
+ struct vhost_vring_addr addr;
+ VhostUserMemory memory;
+ VhostUserMemRegMsg memreg;
+ VhostUserLog log;
+ VhostUserConfig config;
+ VhostUserVringArea area;
+ VhostUserInflight inflight;
+ } payload;
+
+ int fds[VHOST_MEMORY_BASELINE_NREGIONS];
+ int fd_num;
+ uint8_t *data;
+} VU_PACKED VhostUserMsg;
+
+typedef struct VuDevRegion {
+ /* Guest Physical address. */
+ uint64_t gpa;
+ /* Memory region size. */
+ uint64_t size;
+ /* QEMU virtual address (userspace). */
+ uint64_t qva;
+ /* Starting offset in our mmaped space. */
+ uint64_t mmap_offset;
+ /* Start address of mmaped space. */
+ uint64_t mmap_addr;
+} VuDevRegion;
+
+
+typedef struct VuDev VuDev;
+typedef uint64_t (*vu_get_features_cb) (VuDev *dev);
+typedef void (*vu_set_features_cb) (VuDev *dev, uint64_t features);
+typedef int (*vu_process_msg_cb) (VuDev *dev, VhostUserMsg *vmsg,
+ int *do_reply);
+typedef bool (*vu_read_msg_cb) (VuDev *dev, int sock, VhostUserMsg *vmsg);
+typedef void (*vu_queue_set_started_cb) (VuDev *dev, int qidx, bool started);
+typedef bool (*vu_queue_is_processed_in_order_cb) (VuDev *dev, int qidx);
+typedef int (*vu_get_config_cb) (VuDev *dev, uint8_t *config, uint32_t len);
+typedef int (*vu_set_config_cb) (VuDev *dev, const uint8_t *data,
+ uint32_t offset, uint32_t size,
+ uint32_t flags);
+
+typedef struct VuDevIface {
+ /* called by VHOST_USER_GET_FEATURES to get the features bitmask */
+ vu_get_features_cb get_features;
+ /* enable vhost implementation features */
+ vu_set_features_cb set_features;
+ /*
+ * get the protocol feature bitmask from the underlying vhost
+ * implementation
+ */
+ vu_get_features_cb get_protocol_features;
+ /* enable protocol features in the underlying vhost implementation. */
+ vu_set_features_cb set_protocol_features;
+ /* process_msg is called for each vhost-user message received */
+ /* skip libvhost-user processing if return value != 0 */
+ vu_process_msg_cb process_msg;
+ /* tells when queues can be processed */
+ vu_queue_set_started_cb queue_set_started;
+ /*
+ * If the queue is processed in order, in which case it will be
+ * resumed to vring.used->idx. This can help to support resuming
+ * on unmanaged exit/crash.
+ */
+ vu_queue_is_processed_in_order_cb queue_is_processed_in_order;
+ /* get the config space of the device */
+ vu_get_config_cb get_config;
+ /* set the config space of the device */
+ vu_set_config_cb set_config;
+} VuDevIface;
+
+typedef void (*vu_queue_handler_cb) (VuDev *dev, int qidx);
+
+typedef struct VuRing {
+ unsigned int num;
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+ uint64_t log_guest_addr;
+ uint32_t flags;
+} VuRing;
+
+typedef struct VuDescStateSplit {
+ /*
+ * Indicate whether this descriptor is inflight or not.
+ * Only available for head-descriptor.
+ */
+ uint8_t inflight;
+
+ /* Padding */
+ uint8_t padding[5];
+
+ /*
+ * Maintain a list for the last batch of used descriptors.
+ * Only available when batching is used for submitting
+ */
+ uint16_t next;
+
+ /*
+ * Used to preserve the order of fetching available descriptors.
+ * Only available for head-descriptor.
+ */
+ uint64_t counter;
+} VuDescStateSplit;
+
+typedef struct VuVirtqInflight {
+ /* The feature flags of this region. Now it's initialized to 0. */
+ uint64_t features;
+
+ /*
+ * The version of this region. It's 1 currently.
+ * Zero value indicates a vm reset happened.
+ */
+ uint16_t version;
+
+ /*
+ * The size of VuDescStateSplit array. It's equal to the virtqueue
+ * size. Slave could get it from queue size field of VhostUserInflight.
+ */
+ uint16_t desc_num;
+
+ /*
+ * The head of list that track the last batch of used descriptors.
+ */
+ uint16_t last_batch_head;
+
+ /* Storing the idx value of used ring */
+ uint16_t used_idx;
+
+ /* Used to track the state of each descriptor in descriptor table */
+ VuDescStateSplit desc[];
+} VuVirtqInflight;
+
+typedef struct VuVirtqInflightDesc {
+ uint16_t index;
+ uint64_t counter;
+} VuVirtqInflightDesc;
+
+typedef struct VuVirtq {
+ VuRing vring;
+ VuVirtqInflight *inflight;
+ VuVirtqInflightDesc *resubmit_list;
+ uint16_t resubmit_num;
+ uint64_t counter;
+ /* Next head to pop */
+ uint16_t last_avail_idx;
+ /* Last avail_idx read from VQ. */
+ uint16_t shadow_avail_idx;
+ uint16_t used_idx;
+ /* Last used index value we have signalled on */
+ uint16_t signalled_used;
+ /* Last used index value we have signalled on */
+ bool signalled_used_valid;
+ /* Notification enabled? */
+ bool notification;
+ int inuse;
+ vu_queue_handler_cb handler;
+ int call_fd;
+ int kick_fd;
+ int err_fd;
+ unsigned int enable;
+ bool started;
+ /* Guest addresses of our ring */
+ struct vhost_vring_addr vra;
+} VuVirtq;
+
+enum VuWatchCondtion {
+ VU_WATCH_IN = POLLIN,
+ VU_WATCH_OUT = POLLOUT,
+ VU_WATCH_PRI = POLLPRI,
+ VU_WATCH_ERR = POLLERR,
+ VU_WATCH_HUP = POLLHUP,
+};
+
+typedef void (*vu_panic_cb) (VuDev *dev, const char *err);
+typedef void (*vu_watch_cb) (VuDev *dev, int condition, void *data);
+typedef void (*vu_set_watch_cb) (VuDev *dev, int fd, int condition,
+ vu_watch_cb cb, void *data);
+typedef void (*vu_remove_watch_cb) (VuDev *dev, int fd);
+
+typedef struct VuDevInflightInfo {
+ int fd;
+ void *addr;
+ uint64_t size;
+} VuDevInflightInfo;
+
+struct VuDev {
+ int sock;
+ uint32_t nregions;
+ VuDevRegion regions[VHOST_USER_MAX_RAM_SLOTS];
+ VuVirtq *vq;
+ VuDevInflightInfo inflight_info;
+ int log_call_fd;
+ /* Must be held while using slave_fd */
+ pthread_mutex_t slave_mutex;
+ int slave_fd;
+ uint64_t log_size;
+ uint8_t *log_table;
+ uint64_t features;
+ uint64_t protocol_features;
+ bool broken;
+ uint16_t max_queues;
+
+ /*
+ * @read_msg: custom method to read vhost-user message
+ *
+ * Read data from vhost_user socket fd and fill up
+ * the passed VhostUserMsg *vmsg struct.
+ *
+ * If reading fails, it should close the received set of file
+ * descriptors as socket message's auxiliary data.
+ *
+ * For the details, please refer to vu_message_read in libvhost-user.c
+ * which will be used by default if not custom method is provided when
+ * calling vu_init
+ *
+ * Returns: true if vhost-user message successfully received,
+ * otherwise return false.
+ *
+ */
+ vu_read_msg_cb read_msg;
+
+ /*
+ * @set_watch: add or update the given fd to the watch set,
+ * call cb when condition is met.
+ */
+ vu_set_watch_cb set_watch;
+
+ /* @remove_watch: remove the given fd from the watch set */
+ vu_remove_watch_cb remove_watch;
+
+ /*
+ * @panic: encountered an unrecoverable error, you may try to re-initialize
+ */
+ vu_panic_cb panic;
+ const VuDevIface *iface;
+
+ /* Postcopy data */
+ int postcopy_ufd;
+ bool postcopy_listening;
+};
+
+typedef struct VuVirtqElement {
+ unsigned int index;
+ unsigned int out_num;
+ unsigned int in_num;
+ struct iovec *in_sg;
+ struct iovec *out_sg;
+} VuVirtqElement;
+
+/**
+ * vu_init:
+ * @dev: a VuDev context
+ * @max_queues: maximum number of virtqueues
+ * @socket: the socket connected to vhost-user master
+ * @panic: a panic callback
+ * @set_watch: a set_watch callback
+ * @remove_watch: a remove_watch callback
+ * @iface: a VuDevIface structure with vhost-user device callbacks
+ *
+ * Initializes a VuDev vhost-user context.
+ *
+ * Returns: true on success, false on failure.
+ **/
+bool vu_init(VuDev *dev,
+ uint16_t max_queues,
+ int socket,
+ vu_panic_cb panic,
+ vu_read_msg_cb read_msg,
+ vu_set_watch_cb set_watch,
+ vu_remove_watch_cb remove_watch,
+ const VuDevIface *iface);
+
+
+/**
+ * vu_deinit:
+ * @dev: a VuDev context
+ *
+ * Cleans up the VuDev context
+ */
+void vu_deinit(VuDev *dev);
+
+/**
+ * vu_dispatch:
+ * @dev: a VuDev context
+ *
+ * Process one vhost-user message.
+ *
+ * Returns: TRUE on success, FALSE on failure.
+ */
+bool vu_dispatch(VuDev *dev);
+
+/**
+ * vu_gpa_to_va:
+ * @dev: a VuDev context
+ * @plen: guest memory size
+ * @guest_addr: guest address
+ *
+ * Translate a guest address to a pointer. Returns NULL on failure.
+ */
+void *vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr);
+
+/**
+ * vu_get_queue:
+ * @dev: a VuDev context
+ * @qidx: queue index
+ *
+ * Returns the queue number @qidx.
+ */
+VuVirtq *vu_get_queue(VuDev *dev, int qidx);
+
+/**
+ * vu_set_queue_handler:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @handler: the queue handler callback
+ *
+ * Set the queue handler. This function may be called several times
+ * for the same queue. If called with NULL @handler, the handler is
+ * removed.
+ */
+void vu_set_queue_handler(VuDev *dev, VuVirtq *vq,
+ vu_queue_handler_cb handler);
+
+/**
+ * vu_set_queue_host_notifier:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @fd: a file descriptor
+ * @size: host page size
+ * @offset: notifier offset in @fd file
+ *
+ * Set queue's host notifier. This function may be called several
+ * times for the same queue. If called with -1 @fd, the notifier
+ * is removed.
+ */
+bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
+ int size, int offset);
+
+/**
+ * vu_queue_set_notification:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @enable: state
+ *
+ * Set whether the queue notifies (via event index or interrupt)
+ */
+void vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable);
+
+/**
+ * vu_queue_enabled:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ *
+ * Returns: whether the queue is enabled.
+ */
+bool vu_queue_enabled(VuDev *dev, VuVirtq *vq);
+
+/**
+ * vu_queue_started:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ *
+ * Returns: whether the queue is started.
+ */
+bool vu_queue_started(const VuDev *dev, const VuVirtq *vq);
+
+/**
+ * vu_queue_empty:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ *
+ * Returns: true if the queue is empty or not ready.
+ */
+bool vu_queue_empty(VuDev *dev, VuVirtq *vq);
+
+/**
+ * vu_queue_notify:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ *
+ * Request to notify the queue via callfd (skipped if unnecessary)
+ */
+void vu_queue_notify(VuDev *dev, VuVirtq *vq);
+
+/**
+ * vu_queue_notify_sync:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ *
+ * Request to notify the queue via callfd (skipped if unnecessary)
+ * or sync message if possible.
+ */
+void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq);
+
+/**
+ * vu_queue_pop:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @sz: the size of struct to return (must be >= VuVirtqElement)
+ *
+ * Returns: a VuVirtqElement filled from the queue or NULL. The
+ * returned element must be free()-d by the caller.
+ */
+void *vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz);
+
+
+/**
+ * vu_queue_unpop:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @elem: The #VuVirtqElement
+ * @len: number of bytes written
+ *
+ * Pretend the most recent element wasn't popped from the virtqueue. The next
+ * call to vu_queue_pop() will refetch the element.
+ */
+void vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
+ size_t len);
+
+/**
+ * vu_queue_rewind:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @num: number of elements to push back
+ *
+ * Pretend that elements weren't popped from the virtqueue. The next
+ * virtqueue_pop() will refetch the oldest element.
+ *
+ * Returns: true on success, false if @num is greater than the number of in use
+ * elements.
+ */
+bool vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num);
+
+/**
+ * vu_queue_fill:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @elem: a VuVirtqElement
+ * @len: length in bytes to write
+ * @idx: optional offset for the used ring index (0 in general)
+ *
+ * Fill the used ring with @elem element.
+ */
+void vu_queue_fill(VuDev *dev, VuVirtq *vq,
+ const VuVirtqElement *elem,
+ unsigned int len, unsigned int idx);
+
+/**
+ * vu_queue_push:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @elem: a VuVirtqElement
+ * @len: length in bytes to write
+ *
+ * Helper that combines vu_queue_fill() with a vu_queue_flush().
+ */
+void vu_queue_push(VuDev *dev, VuVirtq *vq,
+ const VuVirtqElement *elem, unsigned int len);
+
+/**
+ * vu_queue_flush:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @num: number of elements to flush
+ *
+ * Mark the last number of elements as done (used.idx is updated by
+ * num elements).
+ */
+void vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int num);
+
+/**
+ * vu_queue_get_avail_bytes:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @in_bytes: in bytes
+ * @out_bytes: out bytes
+ * @max_in_bytes: stop counting after max_in_bytes
+ * @max_out_bytes: stop counting after max_out_bytes
+ *
+ * Count the number of available bytes, up to max_in_bytes/max_out_bytes.
+ */
+void vu_queue_get_avail_bytes(VuDev *vdev, VuVirtq *vq, unsigned int *in_bytes,
+ unsigned int *out_bytes,
+ unsigned max_in_bytes, unsigned max_out_bytes);
+
+/**
+ * vu_queue_avail_bytes:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @in_bytes: expected in bytes
+ * @out_bytes: expected out bytes
+ *
+ * Returns: true if in_bytes <= in_total && out_bytes <= out_total
+ */
+bool vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes,
+ unsigned int out_bytes);
+
+
+bool vhost_user_one_time_request(VhostUserRequest request);
+void vmsg_close_fds(VhostUserMsg *vmsg);
+int vhost_user_set_owner(struct vhost_dev *dev);
+int process_message_reply(struct vhost_dev *dev, const VhostUserMsg *msg);
+int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64);
+int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features);
+int enforce_reply(struct vhost_dev *dev, const VhostUserMsg *msg);
+int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64, bool wait_for_reply);
+int vhost_user_set_protocol_features(struct vhost_dev *dev, uint64_t features);
+int vhost_user_get_max_memslots(struct vhost_dev *dev, uint64_t *max_memslots);
+int vhost_setup_slave_channel(struct vhost_dev *dev);
+int vhost_user_get_vq_index(struct vhost_dev *dev, int idx);
+int vhost_set_vring_file(struct vhost_dev *dev,
+ VhostUserRequest request,
+ struct vhost_vring_file *file);
+int vhost_user_set_vring_kick(struct vhost_dev *dev, struct vhost_vring_file *file);
+int vhost_user_set_vring_call(struct vhost_dev *dev, struct vhost_vring_file *file);
+int vhost_virtqueue_init(struct vhost_dev *dev,
+ struct vhost_virtqueue *vq, int n);
+void vhost_dev_init(struct vhost_dev *vhdev);
+int vhost_user_set_features(struct vhost_dev *dev,
+ uint64_t features);
+int vhost_user_set_mem_table(struct vhost_dev *dev);
+int vhost_user_get_vq_index(struct vhost_dev *dev, int idx);
+void vhost_user_share_fd(void);
+int vhost_user_set_vring_num(struct vhost_dev *dev,
+ struct vhost_vring_state *ring);
+int vhost_user_set_vring_base(struct vhost_dev *dev,
+ struct vhost_vring_state *ring);
+int vhost_user_set_vring_addr(struct vhost_dev *dev,
+ struct vhost_vring_addr *addr);
+int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
+ uint32_t config_len);
+int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
+ uint32_t offset, uint32_t size, uint32_t flags);
+int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable);
+int vhost_user_dev_start(struct vhost_dev *dev, bool started);
+
+void vhost_commit_init_vqs(struct vhost_dev *dev);
+void vhost_commit_mem_regions(struct vhost_dev *dev);
+void vhost_commit_vqs(struct vhost_dev *dev);
+void find_add_new_reg(struct vhost_dev *dev);
+void print_mem_table(struct vhost_dev *dev);
+void print_vhost_user_messages(int request);
+void vhost_user_dev_class_init(VirtIODevice *vdev);
+
+/* FIXME: This need to move in a better place */
+struct vhost_inflight;
+int vhost_user_get_inflight_fd(struct vhost_dev *dev,
+ uint16_t queue_size,
+ struct vhost_inflight *inflight);
+int vhost_user_set_inflight_fd(struct vhost_dev *dev,
+ struct vhost_inflight *inflight);
+
+#endif /* LIBVHOST_USER_H */
diff --git a/include/vhost_user_rng.h b/include/vhost_user_rng.h
new file mode 100644
index 0000000..b6f58da
--- /dev/null
+++ b/include/vhost_user_rng.h
@@ -0,0 +1,43 @@
+/*
+ * Based on vhost-user-rng of QEMU project
+ *
+ * Copyright (c) 2021 Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * Copyright (c) 2022-2023 Virtual Open Systems SAS.
+ *
+ * Author:
+ * Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef VHOST_USER_RNG
+#define VHOST_USER_RNG
+
+#include "vhost_loopback.h"
+#include "virtio_rng.h"
+#include "vhost_user_loopback.h"
+#include "virtio_loopback.h"
+
+typedef struct VHostUserRNG {
+ VirtIODevice *parent;
+ struct vhost_virtqueue *vhost_vq;
+ struct vhost_dev *vhost_dev;
+ VirtQueue *req_vq;
+ bool connected;
+} VHostUserRNG;
+
+void vhost_user_rng_realize(struct adapter_dev *adev);
+#endif /* VHOST_USER_RNG */
diff --git a/include/vhost_user_sound.h b/include/vhost_user_sound.h
new file mode 100644
index 0000000..e63e9d3
--- /dev/null
+++ b/include/vhost_user_sound.h
@@ -0,0 +1,42 @@
+/*
+ * Based on vhost-user-sound.h of QEMU project
+ *
+ * Copyright 2020 Red Hat, Inc.
+ *
+ * Copyright (c) 2023-2024 Virtual Open Systems SAS.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ *
+ */
+
+#ifndef VHOST_USER_SOUND
+#define VHOST_USER_SOUND
+
+#include "virtio_loopback.h"
+#include "vhost_loopback.h"
+#include "vhost_user_loopback.h"
+
+struct virtio_snd_config {
+ /* # of available physical jacks */
+ uint32_t jacks;
+ /* # of available PCM streams */
+ uint32_t streams;
+ /* # of available channel maps */
+ uint32_t chmaps;
+};
+
+typedef struct VHostUserSound {
+ VirtIODevice *parent;
+ struct vhost_virtqueue *vhost_vqs;
+ VirtQueue **virtqs;
+ uint16_t num_queues;
+ uint32_t queue_size;
+ struct virtio_snd_config config;
+ struct vhost_dev *vhost_dev;
+} VHostUserSound;
+
+void vus_device_realize(struct adapter_dev *adev);
+
+#endif /* VHOST_USER_SOUND */
diff --git a/include/virtio_loopback.h b/include/virtio_loopback.h
new file mode 100644
index 0000000..d8e68f1
--- /dev/null
+++ b/include/virtio_loopback.h
@@ -0,0 +1,715 @@
+/*
+ * Based on:
+ * 1) virtio.h of QEMU project
+ *
+ * Copyright IBM, Corp. 2007
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * 2) virtio-mmio.h of QEMU project
+ *
+ * Copyright (c) 2011 Linaro Limited
+ *
+ * Author:
+ * Peter Maydell <peter.maydell@linaro.org>
+ *
+ * 3) vhost.h of QEMU project
+ *
+ * Copyright 2022-2023 Virtual Open Systems SAS.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+/*
+ * Control registers
+ */
+#ifndef VIRTIO_LOOPBACK
+#define VIRTIO_LOOPBACK
+
+#include "event_notifier.h"
+
+#define sizeof_field(type, field) sizeof(((type *)0)->field)
+
+/* Magic value ("virt" string) - Read Only */
+#define VIRTIO_MMIO_MAGIC_VALUE 0x000
+
+/* Virtio device version - Read Only */
+#define VIRTIO_MMIO_VERSION 0x004
+
+/* Virtio device ID - Read Only */
+#define VIRTIO_MMIO_DEVICE_ID 0x008
+
+/* Virtio vendor ID - Read Only */
+#define VIRTIO_MMIO_VENDOR_ID 0x00c
+
+/*
+ * Bitmask of the features supported by the device (host)
+ * (32 bits per set) - Read Only
+ */
+#define VIRTIO_MMIO_DEVICE_FEATURES 0x010
+
+/* Device (host) features set selector - Write Only */
+#define VIRTIO_MMIO_DEVICE_FEATURES_SEL 0x014
+
+/*
+ * Bitmask of features activated by the driver (guest)
+ * (32 bits per set) - Write Only
+ */
+#define VIRTIO_MMIO_DRIVER_FEATURES 0x020
+
+/* Activated features set selector - Write Only */
+#define VIRTIO_MMIO_DRIVER_FEATURES_SEL 0x024
+
+/* Guest's memory page size in bytes - Write Only */
+#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028
+
+/* Queue selector - Write Only */
+#define VIRTIO_MMIO_QUEUE_SEL 0x030
+
+/* Maximum size of the currently selected queue - Read Only */
+#define VIRTIO_MMIO_QUEUE_NUM_MAX 0x034
+
+/* Queue size for the currently selected queue - Write Only */
+#define VIRTIO_MMIO_QUEUE_NUM 0x038
+
+
+/* Used Ring alignment for the currently selected queue - Write Only */
+#define VIRTIO_MMIO_QUEUE_ALIGN 0x03c
+
+/* Guest's PFN for the currently selected queue - Read Write */
+#define VIRTIO_MMIO_QUEUE_PFN 0x040
+
+/* Ready bit for the currently selected queue - Read Write */
+#define VIRTIO_MMIO_QUEUE_READY 0x044
+
+/* Queue notifier - Write Only */
+#define VIRTIO_MMIO_QUEUE_NOTIFY 0x050
+
+/* Interrupt status - Read Only */
+#define VIRTIO_MMIO_INTERRUPT_STATUS 0x060
+
+/* Interrupt acknowledge - Write Only */
+#define VIRTIO_MMIO_INTERRUPT_ACK 0x064
+
+/* Device status register - Read Write */
+#define VIRTIO_MMIO_STATUS 0x070
+
+/* Selected queue's Descriptor Table address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_DESC_LOW 0x080
+#define VIRTIO_MMIO_QUEUE_DESC_HIGH 0x084
+
+/* Selected queue's Available Ring address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_AVAIL_LOW 0x090
+#define VIRTIO_MMIO_QUEUE_AVAIL_HIGH 0x094
+
+/* Selected queue's Used Ring address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0
+#define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4
+
+/* Shared memory region id */
+#define VIRTIO_MMIO_SHM_SEL 0x0ac
+
+/* Shared memory region length, 64 bits in two halves */
+#define VIRTIO_MMIO_SHM_LEN_LOW 0x0b0
+#define VIRTIO_MMIO_SHM_LEN_HIGH 0x0b4
+
+/* Shared memory region base address, 64 bits in two halves */
+#define VIRTIO_MMIO_SHM_BASE_LOW 0x0b8
+#define VIRTIO_MMIO_SHM_BASE_HIGH 0x0bc
+
+/* Configuration atomicity value */
+#define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc
+
+/*
+ * The config space is defined by each driver as
+ * the per-driver configuration space - Read Write
+ */
+#define VIRTIO_MMIO_CONFIG 0x100
+
+/*
+ * Interrupt flags (re: interrupt status & acknowledge registers)
+ */
+#define VIRTIO_MMIO_INT_VRING (1 << 0)
+#define VIRTIO_MMIO_INT_CONFIG (1 << 1)
+
+#define VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD_BIT 1
+#define VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD \
+ (1 << VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD_BIT)
+
+
+/* Virtio loopback driver related */
+
+/* QEMU defines */
+#define VIRT_MAGIC 0x74726976 /* 'virt' */
+#define VIRT_VERSION 2
+#define VIRT_VERSION_LEGACY 1
+#define VIRT_VENDOR 0x554D4551 /* 'QEMU' */
+
+#define VIRTQUEUE_MAX_SIZE 1024
+#define VIRTIO_QUEUE_MAX VIRTQUEUE_MAX_SIZE
+
+#define VIRTIO_NO_VECTOR 0xffff
+#define TYPE_VIRTIO_DEVICE "virtio-device"
+
+/* Loopback negotiation code */
+
+#define PAGE_SHIFT 12
+#define PAGE_SIZE 4096
+
+#define EFD_INIT _IOC(_IOC_WRITE, 'k', 1, sizeof(efd_data_t))
+#define WAKEUP _IOC(_IOC_WRITE, 'k', 2, 0)
+#define START_LOOPBACK _IOC(_IOC_WRITE, 'k', 3, \
+ sizeof(virtio_device_info_struct_t))
+#define IRQ _IOC(_IOC_WRITE, 'k', 4, sizeof(int))
+#define SHARE_VQS _IOC(_IOC_WRITE, 'k', 5, sizeof(uint32_t))
+#define SHARE_COM_STRUCT _IOC(_IOC_WRITE, 'k', 6, 0)
+
+#define VIRTIO_PCI_VRING_ALIGN 4096
+
+typedef struct VirtIOMMIOProxy {
+ /* Generic */
+ bool legacy;
+ uint32_t flags;
+ /* Guest accessible state needing migration and reset */
+ uint32_t host_features_sel;
+ uint32_t guest_features_sel;
+ uint32_t guest_page_shift;
+ /* virtio-bus */
+ bool format_transport_address;
+ /* Fields only used for non-legacy (v2) devices */
+ uint32_t guest_features[2];
+} VirtIOMMIOProxy;
+
+
+/* Vring specific */
+/* This marks a buffer as continuing via the next field. */
+#define VRING_DESC_F_NEXT 1
+/* This marks a buffer as write-only (otherwise read-only). */
+#define VRING_DESC_F_WRITE 2
+/* This means the buffer contains a list of buffer descriptors. */
+#define VRING_DESC_F_INDIRECT 4
+/* Reset vrings value */
+#define VIRTIO_F_RING_RESET 40
+
+
+/*
+ * Mark a descriptor as available or used in packed ring.
+ * Notice: they are defined as shifts instead of shifted values.
+ */
+#define VRING_PACKED_DESC_F_AVAIL 7
+#define VRING_PACKED_DESC_F_USED 15
+
+/*
+ * The Host uses this in used->flags to advise the Guest: don't kick me when
+ * you add a buffer. It's unreliable, so it's simply an optimization. Guest
+ * will still kick if it's out of buffers.
+ */
+#define VRING_USED_F_NO_NOTIFY 1
+/*
+ * The Guest uses this in avail->flags to advise the Host: don't interrupt me
+ * when you consume a buffer. It's unreliable, so it's simply an
+ * optimization.
+ */
+#define VRING_AVAIL_F_NO_INTERRUPT 1
+
+/* Enable events in packed ring. */
+#define VRING_PACKED_EVENT_FLAG_ENABLE 0x0
+/* Disable events in packed ring. */
+#define VRING_PACKED_EVENT_FLAG_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor in packed ring.
+ * (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
+ * Only valid if VIRTIO_RING_F_EVENT_IDX has been negotiated.
+ */
+#define VRING_PACKED_EVENT_FLAG_DESC 0x2
+
+/*
+ * Wrap counter bit shift in event suppression structure
+ * of packed ring.
+ */
+#define VRING_PACKED_EVENT_F_WRAP_CTR 15
+
+/* We support indirect buffer descriptors */
+#define VIRTIO_RING_F_INDIRECT_DESC 28
+
+/*
+ * The Guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field.
+ */
+/*
+ * The Host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field.
+ */
+#define VIRTIO_RING_F_EVENT_IDX 29
+
+/*
+ * Alignment requirements for vring elements.
+ * When using pre-virtio 1.0 layout, these fall out naturally.
+ */
+#define VRING_AVAIL_ALIGN_SIZE 2
+#define VRING_USED_ALIGN_SIZE 4
+#define VRING_DESC_ALIGN_SIZE 16
+/******************/
+
+#define container_of(ptr, type, member) ({ \
+ const typeof(((type *) 0)->member) *__mptr = (ptr); \
+ (type *) ((char *) __mptr - offsetof(type, member));})
+
+typedef struct VRing {
+ unsigned int num;
+ unsigned int num_default;
+ unsigned int align;
+ uint64_t desc;
+ uint64_t avail;
+ uint64_t used;
+} VRing;
+
+typedef struct VRingDesc {
+ uint64_t addr;
+ uint32_t len;
+ uint16_t flags;
+ uint16_t next;
+} VRingDesc;
+
+typedef struct VRingPackedDesc {
+ uint64_t addr;
+ uint32_t len;
+ uint16_t id;
+ uint16_t flags;
+} VRingPackedDesc;
+
+typedef struct VRingAvail {
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[];
+} VRingAvail;
+
+typedef struct VRingUsedElem {
+ uint32_t id;
+ uint32_t len;
+} VRingUsedElem;
+
+typedef struct VRingUsed {
+ uint16_t flags;
+ uint16_t idx;
+ VRingUsedElem ring[];
+} VRingUsed;
+
+typedef struct VirtQueueElement {
+ unsigned int index;
+ unsigned int len;
+ unsigned int ndescs;
+ unsigned int out_num;
+ unsigned int in_num;
+ uint64_t *in_addr;
+ uint64_t *out_addr;
+ struct iovec *in_sg;
+ struct iovec *out_sg;
+} VirtQueueElement;
+
+typedef struct VirtIODevice VirtIODevice;
+typedef struct VirtQueue VirtQueue;
+typedef void (*VirtIOHandleOutput)(VirtIODevice *, VirtQueue *);
+
+typedef struct VirtQueue {
+ VRing vring;
+ VirtQueueElement *used_elems;
+
+ /* Next head to pop */
+ uint16_t last_avail_idx;
+ bool last_avail_wrap_counter;
+
+ /* Last avail_idx read from VQ. */
+ uint16_t shadow_avail_idx;
+ bool shadow_avail_wrap_counter;
+
+ uint16_t used_idx;
+ bool used_wrap_counter;
+
+ /* Last used index value we have signalled on */
+ uint16_t signalled_used;
+
+ /* Last used index value we have signalled on */
+ bool signalled_used_valid;
+
+ /* Notification enabled? */
+ bool notification;
+
+ uint16_t queue_index;
+
+ unsigned int inuse;
+
+ uint16_t vector;
+ VirtIOHandleOutput handle_output;
+ VirtIODevice *vdev;
+
+ EventNotifier guest_notifier;
+ EventNotifier host_notifier;
+ bool host_notifier_enabled;
+} VirtQueue;
+
+typedef struct VirtIORNG VirtIORNG;
+typedef struct VHostUserRNG VHostUserRNG;
+typedef struct VirtioDeviceClass VirtioDeviceClass;
+typedef struct VHostUserBlk VHostUserBlk;
+typedef struct VhostUserInput VhostUserInput;
+typedef struct VHostUserGPIO VHostUserGPIO;
+typedef struct VHostUserSound VHostUserSound;
+typedef struct VHostUserCan VHostUserCan;
+typedef struct VHostUserConsole VHostUserConsole;
+typedef struct VirtioBus VirtioBus;
+
+typedef struct VirtIODevice {
+ VirtioBus *vbus;
+ VirtioDeviceClass *vdev_class;
+ struct vhost_dev *vhdev;
+ const char *name;
+ uint8_t status;
+ uint8_t isr;
+ uint16_t queue_sel;
+ uint64_t guest_features;
+ uint64_t host_features;
+ uint64_t backend_features;
+ size_t config_len;
+ void *config;
+ uint16_t config_vector;
+ uint32_t generation;
+ int last_avail;
+ int prev_level;
+ int int_count;
+ int eventfd_count;
+ int notify_cnt;
+ bool enable_virtio_interrupt;
+ pthread_mutex_t interrupt_lock;
+ int nvectors;
+ VirtQueue *vq;
+ VirtQueue **vqs;
+ int *nvqs;
+ /* TODO: Put a defined value for vq num */
+ uint64_t vring_phys_addrs[12];
+ uint32_t vring_phys_addrs_idx;
+ uint16_t device_id;
+ bool vm_running;
+ bool broken; /* device in invalid state, needs reset */
+ bool use_disabled_flag; /* allow use of 'disable' flag when needed */
+ bool disabled; /* device in temporarily disabled state */
+ bool use_started;
+ bool started;
+ bool start_on_kick; /* when virtio 1.0 feature has not been negotiated */
+ bool disable_legacy_check;
+ char *bus_name;
+ uint8_t device_endian;
+ bool use_guest_notifier_mask;
+ union {
+ VirtIORNG *vrng;
+ VHostUserRNG *vhrng;
+ VHostUserBlk *vhublk;
+ VhostUserInput *vhuinput;
+ VHostUserSound *vhusnd;
+ VHostUserGPIO *vhugpio;
+ VHostUserCan *vhucan;
+ VHostUserConsole *vhuconsole;
+ };
+ const int *user_feature_bits;
+} VirtIODevice;
+
+typedef struct efd_data {
+ int efd[2];
+ int pid;
+} efd_data_t;
+
+typedef struct virtio_device_info_struct {
+ unsigned long magic;
+ unsigned long version;
+ unsigned long device_id;
+ unsigned long vendor;
+
+} virtio_device_info_struct_t;
+
+
+/* Negotiation structs */
+
+typedef struct { int counter; } atomic_t;
+
+typedef struct virtio_neg {
+ uint64_t notification;
+ uint64_t data;
+ uint64_t size;
+ bool read;
+ atomic_t done;
+} virtio_neg_t;
+
+
+/* This is left here as a reference, might be useful in the future */
+/*
+ * static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
+ * {
+ * BusClass *bus_class = BUS_CLASS(klass);
+ * VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
+ *
+ * k->notify = virtio_mmio_update_irq;
+ * k->save_config = virtio_mmio_save_config;
+ * k->load_config = virtio_mmio_load_config;
+ * k->save_extra_state = virtio_mmio_save_extra_state;
+ * k->load_extra_state = virtio_mmio_load_extra_state;
+ * k->has_extra_state = virtio_mmio_has_extra_state;
+ * k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
+ * k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled;
+ * k->ioeventfd_assign = virtio_mmio_ioeventfd_assign;
+ * k->pre_plugged = virtio_mmio_pre_plugged;
+ * k->vmstate_change = virtio_mmio_vmstate_change;
+ * k->has_variable_vring_alignment = true;
+ * bus_class->max_dev = 1;
+ * bus_class->get_dev_path = virtio_mmio_bus_get_dev_path;
+ * }
+ *
+ */
+
+
+typedef struct VirtioBus {
+
+ VirtIODevice *vdev;
+ void (*notify)(VirtIODevice *d, uint16_t vector);
+ bool (*has_extra_state)(VirtIODevice *d);
+ bool (*query_guest_notifiers)(VirtIODevice *d);
+ int (*set_guest_notifiers)(VirtIODevice *d, int nvqs, bool assign);
+ void (*vmstate_change)(VirtIODevice *d, bool running);
+ void (*pre_plugged)(VirtIODevice *d);
+ void (*device_plugged)(VirtIODevice *d);
+ /*
+ * transport independent exit function.
+ * This is called by virtio-bus just before the device is unplugged.
+ */
+ void (*device_unplugged)(VirtIODevice *d);
+ int (*query_nvectors)(VirtIODevice *d);
+ /*
+ * ioeventfd handling: if the transport implements ioeventfd_assign,
+ * it must implement ioeventfd_enabled as well.
+ */
+ /* Returns true if the ioeventfd is enabled for the device. */
+ bool (*ioeventfd_enabled)(VirtIODevice *d);
+ /*
+ * Assigns/deassigns the ioeventfd backing for the transport on
+ * the device for queue number n. Returns an error value on
+ * failure.
+ */
+ int (*ioeventfd_assign)(VirtIOMMIOProxy *d, EventNotifier *notifier,
+ int n, bool assign);
+ /*
+ * Whether queue number n is enabled.
+ */
+ bool (*queue_enabled)(VirtIODevice *d, int n);
+ /*
+ * Does the transport have variable vring alignment?
+ * (ie can it ever call virtio_queue_set_align()?)
+ * Note that changing this will break migration for this transport.
+ */
+ bool has_variable_vring_alignment;
+ bool (*iommu_enabled)(VirtIODevice *d);
+
+ /*
+ * Set if ioeventfd has been started.
+ */
+ bool ioeventfd_started;
+
+ /*
+ * Set if ioeventfd has been grabbed by vhost. When ioeventfd
+ * is grabbed by vhost, we track its started/stopped state (which
+ * depends in turn on the virtio status register), but do not
+ * register a handler for the ioeventfd. When ioeventfd is
+ * released, if ioeventfd_started is true we finally register
+ * the handler so that QEMU's device model can use ioeventfd.
+ */
+ int ioeventfd_grabbed;
+} VirtioBus;
+
+struct adapter_dev;
+
+typedef struct VirtioDeviceClass {
+ /*< private >*/
+ VirtIODevice *parent;
+ /*< public >*/
+ /* This is what a VirtioDevice must implement */
+ uint64_t (*get_features)(VirtIODevice *vdev,
+ uint64_t requested_features);
+ uint64_t (*bad_features)(VirtIODevice *vdev);
+ void (*set_features)(VirtIODevice *vdev, uint64_t val);
+ int (*validate_features)(VirtIODevice *vdev);
+ void (*get_config)(VirtIODevice *vdev, uint8_t *config);
+ void (*set_config)(VirtIODevice *vdev, const uint8_t *config);
+ void (*reset)(VirtIODevice *vdev);
+ void (*set_status)(VirtIODevice *vdev, uint8_t val);
+ void (*realize)(struct adapter_dev *adev);
+ void (*unrealize)(VirtIODevice *vdev);
+ void (*start)(VirtIODevice *vdev);
+ void (*stop)(VirtIODevice *vdev);
+ /*
+ * For transitional devices, this is a bitmap of features
+ * that are only exposed on the legacy interface but not
+ * the modern one.
+ */
+ uint64_t legacy_features;
+ /*
+ * Test and clear event pending status.
+ * Should be called after unmask to avoid losing events.
+ * If backend does not support masking,
+ * must check in frontend instead.
+ */
+ bool (*guest_notifier_pending)(VirtIODevice *vdev, int n);
+ /*
+ * Mask/unmask events from this vq. Any events reported
+ * while masked will become pending.
+ * If backend does not support masking,
+ * must mask in frontend instead.
+ */
+ void (*guest_notifier_mask)(VirtIODevice *vdev, int n, bool mask);
+ int (*start_ioeventfd)(VirtIODevice *vdev);
+ void (*stop_ioeventfd)(VirtIODevice *vdev);
+ /*
+ * Saving and loading of a device; trying to deprecate save/load
+ * use vmsd for new devices.
+ */
+ /*
+ * Post load hook in vmsd is called early while device is processed, and
+ * when VirtIODevice isn't fully initialized. Devices should use this
+ * instead, unless they specifically want to verify the migration stream
+ * as it's processed, e.g. for bounds checking.
+ */
+ int (*post_load)(VirtIODevice *vdev);
+ bool (*primary_unplug_pending)(void *opaque);
+
+ void (*update_mem_table)(VirtIODevice *vdev);
+ void (*print_config)(uint8_t *config_data);
+
+ struct vhost_dev *(*get_vhost)(VirtIODevice *vdev);
+} VirtioDeviceClass;
+
+void handle_input(VirtIODevice *vdev, VirtQueue *vq);
+void *my_select(void *data);
+void *wait_read_write(void *data);
+void virtio_notify_config(VirtIODevice *vdev);
+void print_neg_flag(uint64_t neg_flag, bool read);
+
+void adapter_read_write_cb(struct adapter_dev *adev);
+int virtio_loopback_start(struct adapter_dev *adev, pthread_t *thread_id);
+void virtio_loopback_stop(struct adapter_dev *adev);
+int virtio_queue_ready(VirtQueue *vq);
+void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
+ unsigned int *out_bytes,
+ unsigned max_in_bytes, unsigned max_out_bytes);
+void virtio_add_feature(uint64_t *features, unsigned int fbit);
+bool virtio_has_feature(uint64_t features, unsigned int fbit);
+bool virtio_device_started(VirtIODevice *vdev, uint8_t status);
+bool virtio_device_should_start(VirtIODevice *vdev, uint8_t status);
+
+int virtio_queue_empty(VirtQueue *vq);
+void *virtqueue_pop(VirtQueue *vq, size_t sz);
+void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
+ unsigned int len);
+size_t iov_from_buf(const struct iovec *iov, unsigned int iov_cnt,
+ size_t offset, const void *buf, size_t bytes);
+bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
+ unsigned int *head);
+void virtio_notify_vector(VirtIODevice *vdev);
+
+enum {
+ VIRTQUEUE_READ_DESC_ERROR = -1,
+ VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
+ VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
+};
+
+size_t qemu_iov_from_buf(const struct iovec *iov, unsigned int iov_cnt,
+ size_t offset, const void *buf, size_t bytes);
+VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
+ VirtIOHandleOutput handle_output);
+VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n);
+void virtio_dev_init(VirtIODevice *vdev, const char *name,
+ uint16_t device_id, size_t config_size);
+void virtio_loopback_bus_init(VirtioBus *k);
+int virtio_bus_set_host_notifier(VirtioBus *vbus, int n, bool assign);
+EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
+EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
+uint64_t virtio_queue_get_desc_addr(VirtIODevice *vdev, int n);
+uint64_t virtio_queue_get_avail_addr(VirtIODevice *vdev, int n);
+uint64_t virtio_queue_get_used_addr(VirtIODevice *vdev, int n);
+int virtio_queue_get_num(VirtIODevice *vdev, int n);
+unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n);
+uint64_t virtio_queue_get_desc_size(VirtIODevice *vdev, int n);
+uint64_t virtio_queue_get_avail_size(VirtIODevice *vdev, int n);
+uint64_t virtio_queue_get_used_size(VirtIODevice *vdev, int n);
+void virtio_set_isr(VirtIODevice *vdev, int value);
+int virtio_device_grab_ioeventfd(VirtIODevice *vdev);
+bool virtio_bus_device_iommu_enabled(VirtIODevice *vdev);
+size_t iov_from_buf_full(const struct iovec *iov, unsigned int iov_cnt,
+ size_t offset, const void *buf, size_t bytes);
+void event_notifier_set_handler(EventNotifier *e,
+ void *handler);
+void virtio_notify(VirtIODevice *vdev, VirtQueue *vq);
+int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
+ unsigned int max, unsigned int *next);
+void print_config(uint8_t *config);
+uint32_t get_vqs_max_size(VirtIODevice *vdev);
+
+/*
+ * Do we get callbacks when the ring is completely used, even if we've
+ * suppressed them?
+ */
+#define VIRTIO_F_NOTIFY_ON_EMPTY 24
+#define VIRTIO_CONFIG_S_FEATURES_OK 8
+#define VIRTIO_CONFIG_S_DRIVER_OK 4
+#define VIRTIO_F_VERSION_1 32
+#define VIRTIO_F_ACCESS_PLATFORM 33
+#define VIRTIO_F_IN_ORDER 35
+#define VIRTIO_F_NOTIFICATION_DATA 38
+
+/*
+ * Legacy name for VIRTIO_F_ACCESS_PLATFORM
+ * (for compatibility with old userspace)
+ */
+#ifndef VIRTIO_F_IOMMU_PLATFORM
+#define VIRTIO_F_IOMMU_PLATFORM 33
+#endif
+
+/* QEMU Aligned functions */
+/*
+ * Round number down to multiple. Safe when m is not a power of 2 (see
+ * ROUND_DOWN for a faster version when a power of 2 is guaranteed).
+ */
+#define QEMU_ALIGN_DOWN(n, m) ((n) / (m) * (m))
+
+/*
+ * Round number up to multiple. Safe when m is not a power of 2 (see
+ * ROUND_UP for a faster version when a power of 2 is guaranteed).
+ */
+#define QEMU_ALIGN_UP(n, m) QEMU_ALIGN_DOWN((n) + (m) - 1, (m))
+
+/* Check if n is a multiple of m */
+#define QEMU_IS_ALIGNED(n, m) (((n) % (m)) == 0)
+
+/* n-byte align pointer down */
+#define QEMU_ALIGN_PTR_DOWN(p, n) \
+ ((typeof(p))QEMU_ALIGN_DOWN((uintptr_t)(p), (n)))
+
+/* n-byte align pointer up */
+#define QEMU_ALIGN_PTR_UP(p, n) \
+ ((typeof(p))QEMU_ALIGN_UP((uintptr_t)(p), (n)))
+
+/* Check if pointer p is n-bytes aligned */
+#define QEMU_PTR_IS_ALIGNED(p, n) QEMU_IS_ALIGNED((uintptr_t)(p), (n))
+
+/*
+ * Define 1 GB offset in order to request big enough
+ * memory blocks from the kernel:
+ * 0x40000000 = 1024 * 1024 * 1024 = 64 * 4096 * 4096 = 1G
+ */
+#define OFFSET_1GB (64ULL * PAGE_SIZE * PAGE_SIZE)
+
+/*
+ * Define starting physical address of host memory address space
+ */
+#define INIT_PA 0
+
+#endif /* VIRTIO_LOOPBACK */
diff --git a/include/virtio_rng.h b/include/virtio_rng.h
new file mode 100644
index 0000000..6e97294
--- /dev/null
+++ b/include/virtio_rng.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2022-2023 Virtual Open Systems SAS.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef VIRTIO_RNG_DEV
+#define VIRTIO_RNG_DEV
+
+#include "virtio_loopback.h"
+#include "vhost_user_loopback.h"
+
+extern const char test_str[64];
+
+typedef struct VirtIORNGConf {
+ uint64_t max_bytes;
+ uint32_t period_ms;
+} VirtIORNGConf;
+
+typedef struct VirtIORNG {
+ VirtIODevice *parent_obj;
+
+ /* Only one vq - guest puts buffer(s) on it when it needs entropy */
+ VirtQueue *vq;
+ VirtIORNGConf conf;
+
+ /*
+ * We purposefully don't migrate this state. The quota will reset on the
+ * destination as a result. Rate limiting is host state, not guest state.
+ */
+ int64_t quota_remaining;
+ bool activate_timer;
+
+} VirtIORNG;
+
+bool is_guest_ready(VirtIORNG *vrng);
+size_t get_request_size(VirtQueue *vq, unsigned quota);
+void virtio_rng_set_status(VirtIODevice *vdev, uint8_t status);
+void virtio_rng_process(VirtIORNG *vrng);
+void chr_read(VirtIORNG *vrng, const char *buf, size_t size);
+void virtio_rng_realize(struct adapter_dev *adev);
+void virtio_rng_init(VirtIODevice *vdev);
+
+#endif /* VIRTIO_RNG */