summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--adapter.c128
-rw-r--r--queue.h498
-rw-r--r--vhost_loopback.c40
-rw-r--r--vhost_loopback.h2
-rw-r--r--vhost_user_blk.c18
-rw-r--r--vhost_user_blk.h1
-rw-r--r--vhost_user_input.c5
-rw-r--r--vhost_user_loopback.c513
-rw-r--r--vhost_user_loopback.h147
-rw-r--r--vhost_user_rng.c4
-rw-r--r--virtio_input.c4
-rw-r--r--virtio_loopback.c93
-rw-r--r--virtio_loopback.h13
13 files changed, 1357 insertions, 109 deletions
diff --git a/adapter.c b/adapter.c
index c907acd..e74da5f 100644
--- a/adapter.c
+++ b/adapter.c
@@ -80,6 +80,8 @@ void vhost_user_adapter_init(void)
global_vbus = (VirtioBus *)malloc(sizeof(VirtioBus));
global_vbus->vdev = global_vdev;
global_vdev->vbus = global_vbus;
+ global_vdev->vhdev = dev;
+
/* Store virtio_dev reference into vhost_dev struct*/
dev->vdev = global_vdev;
@@ -131,17 +133,87 @@ void client(char *sock_path)
static void help_args(void)
{
- printf("Run example:\n\t./adapter -s /path_to_socket/rng.sock\n");
+ printf("Run example:\n\t./adapter -s /path_to_socket/rng.sock\n"
+ "\t\t -d device_name\n"
+ "The 'device_name' can be one of the following:\n"
+ "\tvrng, vhurng, vhublk, vhuinput\n");
+}
+
+int find_arg(int argc, char **argv, char *str)
+{
+ int i;
+
+ for (i = 0; i < argc; i++) {
+ if (!strcmp(argv[i], str)) {
+ return i + 1;
+ }
+ }
+ printf("You have not specified parameter \"%s\"\n", str);
+ return -1;
+}
+
+int val_device_arg(char *str)
+{
+ char *adapter_devices[] = {"vrng", "vhurng", "vhublk", "vhuinput"};
+ char *vhu_devices[] = {"vhurng", "vhublk", "vhuinput"};
+ int adapter_devices_num = 4, i;
+
+ for (i = 0; i < adapter_devices_num; i++) {
+ if (!strcmp(adapter_devices[i], str)) {
+ return i + 1;
+ }
+ }
+
+ return 0;
+}
+
+bool check_vhu_device(char *str)
+{
+ char *vhu_devices[] = {"vhurng", "vhublk", "vhuinput"};
+ int vhu_devices_num = 3, i;
+
+ for (i = 0; i < vhu_devices_num; i++) {
+ if (!strcmp(vhu_devices[i], str)) {
+ return true;
+ }
+ }
+
+ return false;
}
int main(int argc, char **argv)
{
-#ifdef VHOST_USER
+ int socket_idx, device_idx, device_id;
+ bool vhost_user_enabled;
+
/*
- * Check if the user has provided a socket path.
+ * Check if the user has provided all the required arguments.
* If not, print the help messages.
*/
- if ((argc <= 2) || (strcmp(argv[1], "-s") != 0)) {
+
+ device_idx = find_arg(argc, argv, "-d");
+
+ if (device_idx < 0) {
+ goto error_args;
+ }
+
+ /* Validate the argumetns */
+
+ device_id = val_device_arg(argv[device_idx]);
+
+ if (device_id == 0) {
+ goto error_args;
+ }
+
+ /* Check if this is a vhost-user device */
+ vhost_user_enabled = check_vhu_device(argv[device_idx]);
+
+
+ /* Check if a socket is needed and provided */
+
+ socket_idx = find_arg(argc, argv, "-s");
+
+ if ((socket_idx < 0) && (vhost_user_enabled)) {
goto error_args;
}
@@ -149,39 +221,33 @@ int main(int argc, char **argv)
* Create the socket and connect to the backend.
* Enabled on vhost-user case
*/
- client(argv[2]);
-#endif
+ if (vhost_user_enabled) {
+ client(argv[socket_idx]);
+ }
/* Initialize the adapter data structures */
vhost_user_adapter_init();
/* Initialize the virtio/vhost-user device */
-#ifdef VHOST_USER
-
-#ifdef VHOST_USER_INPUT_DEV
- vhost_user_input_init(global_vdev); /* <-- Enable that for vhost-user-rng */
- virtio_input_device_realize();
-#endif /* VHOST_USER_INPUT_DEV */
-
-#ifdef VHOST_USER_BLK_DEV
- vhost_user_blk_realize(); /* <-- Enable that for vhost-user-blk */
-#endif /* VHOST_USER_BLK_DEV */
-
-#ifdef VHOST_USER_RNG_DEV
- vhost_user_rng_realize(); /* <-- Enable that for vhost-user-rng */
-#endif /* VHOST_USER_RNG_DEV */
-
-#else /* VHOST_USER */
-
-#ifdef VIRTIO_RNG
- virtio_rng_realize(); /* <-- Enable that for simple rng */
-#else /* VIRTIO_RNG */
- DBG("You have not defined any device\n");
- exit(1);
-#endif /* VIRTIO_RNG */
-
-#endif /* VHOST_USER */
+ switch (device_id) {
+ case 1:
+ virtio_rng_realize(); /* <-- Enable that for simple rng */
+ break;
+ case 2:
+ vhost_user_rng_realize(); /* <-- Enable that for vhost-user-rng */
+ break;
+ case 3:
+ vhost_user_blk_realize(); /* <-- Enable that for vhost-user-blk */
+ break;
+ case 4:
+ /* Enable that for vhost-user-rng */
+ vhost_user_input_init(global_vdev);
+ virtio_input_device_realize();
+ break;
+ default:
+ exit(1);
+ }
/*
* Start loopback trasnport layer and communiation with the loopback driver
diff --git a/queue.h b/queue.h
index a2505e2..e029e7b 100644
--- a/queue.h
+++ b/queue.h
@@ -7,12 +7,8 @@
*/
/*
- * Based on queue.h of QEMU project
- *
- * Copyright (c) 1991, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * Copyright (c) 2022 Virtual Open Systems SAS.
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -82,6 +78,309 @@
* For details on the use of these macros, see the queue(3) manual page.
*/
+/*
+ * List definitions.
+ */
+#define QLIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define QLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define QLIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+/*
+ * List functions.
+ */
+#define QLIST_INIT(head) do { \
+ (head)->lh_first = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_SWAP(dstlist, srclist, field) do { \
+ void *tmplist; \
+ tmplist = (srclist)->lh_first; \
+ (srclist)->lh_first = (dstlist)->lh_first; \
+ if ((srclist)->lh_first != NULL) { \
+ (srclist)->lh_first->field.le_prev = &(srclist)->lh_first; \
+ } \
+ (dstlist)->lh_first = tmplist; \
+ if ((dstlist)->lh_first != NULL) { \
+ (dstlist)->lh_first->field.le_prev = &(dstlist)->lh_first; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_INSERT_AFTER(listelm, elm, field) do { \
+ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
+ (listelm)->field.le_next->field.le_prev = \
+ &(elm)->field.le_next; \
+ (listelm)->field.le_next = (elm); \
+ (elm)->field.le_prev = &(listelm)->field.le_next; \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ (elm)->field.le_next = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &(elm)->field.le_next; \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.le_next = (head)->lh_first) != NULL) \
+ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+ (head)->lh_first = (elm); \
+ (elm)->field.le_prev = &(head)->lh_first; \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_REMOVE(elm, field) do { \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+ (elm)->field.le_next = NULL; \
+ (elm)->field.le_prev = NULL; \
+} while (/*CONSTCOND*/0)
+
+/*
+ * Like QLIST_REMOVE() but safe to call when elm is not in a list
+ */
+#define QLIST_SAFE_REMOVE(elm, field) do { \
+ if ((elm)->field.le_prev != NULL) { \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+ (elm)->field.le_next = NULL; \
+ (elm)->field.le_prev = NULL; \
+ } \
+} while (/*CONSTCOND*/0)
+
+/* Is elm in a list? */
+#define QLIST_IS_INSERTED(elm, field) ((elm)->field.le_prev != NULL)
+
+#define QLIST_FOREACH(var, head, field) \
+ for ((var) = ((head)->lh_first); \
+ (var); \
+ (var) = ((var)->field.le_next))
+
+#define QLIST_FOREACH_SAFE(var, head, field, next_var) \
+ for ((var) = ((head)->lh_first); \
+ (var) && ((next_var) = ((var)->field.le_next), 1); \
+ (var) = (next_var))
+
+/*
+ * List access methods.
+ */
+#define QLIST_EMPTY(head) ((head)->lh_first == NULL)
+#define QLIST_FIRST(head) ((head)->lh_first)
+#define QLIST_NEXT(elm, field) ((elm)->field.le_next)
+
+
+/*
+ * Singly-linked List definitions.
+ */
+#define QSLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define QSLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define QSLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+
+/*
+ * Singly-linked List functions.
+ */
+#define QSLIST_INIT(head) do { \
+ (head)->slh_first = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_INSERT_AFTER(slistelm, elm, field) do { \
+ (elm)->field.sle_next = (slistelm)->field.sle_next; \
+ (slistelm)->field.sle_next = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.sle_next = (head)->slh_first; \
+ (head)->slh_first = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_INSERT_HEAD_ATOMIC(head, elm, field) do { \
+ typeof(elm) save_sle_next; \
+ do { \
+ save_sle_next = (elm)->field.sle_next = (head)->slh_first; \
+ } while (qatomic_cmpxchg(&(head)->slh_first, save_sle_next, (elm)) !=\
+ save_sle_next); \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_MOVE_ATOMIC(dest, src) do { \
+ (dest)->slh_first = qatomic_xchg(&(src)->slh_first, NULL); \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_REMOVE_HEAD(head, field) do { \
+ typeof((head)->slh_first) elm = (head)->slh_first; \
+ (head)->slh_first = elm->field.sle_next; \
+ elm->field.sle_next = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_REMOVE_AFTER(slistelm, field) do { \
+ typeof(slistelm) next = (slistelm)->field.sle_next; \
+ (slistelm)->field.sle_next = next->field.sle_next; \
+ next->field.sle_next = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_REMOVE(head, elm, type, field) do { \
+ if ((head)->slh_first == (elm)) { \
+ QSLIST_REMOVE_HEAD((head), field); \
+ } else { \
+ struct type *curelm = (head)->slh_first; \
+ while (curelm->field.sle_next != (elm)) \
+ curelm = curelm->field.sle_next; \
+ curelm->field.sle_next = curelm->field.sle_next->field.sle_next; \
+ (elm)->field.sle_next = NULL; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_FOREACH(var, head, field) \
+ for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
+
+#define QSLIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = QSLIST_FIRST((head)); \
+ (var) && ((tvar) = QSLIST_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+/*
+ * Singly-linked List access methods.
+ */
+#define QSLIST_EMPTY(head) ((head)->slh_first == NULL)
+#define QSLIST_FIRST(head) ((head)->slh_first)
+#define QSLIST_NEXT(elm, field) ((elm)->field.sle_next)
+
+
+/*
+ * Simple queue definitions.
+ */
+#define QSIMPLEQ_HEAD(name, type) \
+struct name { \
+ struct type *sqh_first; /* first element */ \
+ struct type **sqh_last; /* addr of last next element */ \
+}
+
+#define QSIMPLEQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).sqh_first }
+
+#define QSIMPLEQ_ENTRY(type) \
+struct { \
+ struct type *sqe_next; /* next element */ \
+}
+
+/*
+ * Simple queue functions.
+ */
+#define QSIMPLEQ_INIT(head) do { \
+ (head)->sqh_first = NULL; \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (head)->sqh_first = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.sqe_next = NULL; \
+ *(head)->sqh_last = (elm); \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (listelm)->field.sqe_next = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_REMOVE_HEAD(head, field) do { \
+ typeof((head)->sqh_first) elm = (head)->sqh_first; \
+ if (((head)->sqh_first = elm->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(head)->sqh_first; \
+ elm->field.sqe_next = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_SPLIT_AFTER(head, elm, field, removed) do { \
+ QSIMPLEQ_INIT(removed); \
+ if (((removed)->sqh_first = (head)->sqh_first) != NULL) { \
+ if (((head)->sqh_first = (elm)->field.sqe_next) == NULL) { \
+ (head)->sqh_last = &(head)->sqh_first; \
+ } \
+ (removed)->sqh_last = &(elm)->field.sqe_next; \
+ (elm)->field.sqe_next = NULL; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_REMOVE(head, elm, type, field) do { \
+ if ((head)->sqh_first == (elm)) { \
+ QSIMPLEQ_REMOVE_HEAD((head), field); \
+ } else { \
+ struct type *curelm = (head)->sqh_first; \
+ while (curelm->field.sqe_next != (elm)) \
+ curelm = curelm->field.sqe_next; \
+ if ((curelm->field.sqe_next = \
+ curelm->field.sqe_next->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(curelm)->field.sqe_next; \
+ (elm)->field.sqe_next = NULL; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->sqh_first); \
+ (var); \
+ (var) = ((var)->field.sqe_next))
+
+#define QSIMPLEQ_FOREACH_SAFE(var, head, field, next) \
+ for ((var) = ((head)->sqh_first); \
+ (var) && ((next = ((var)->field.sqe_next)), 1); \
+ (var) = (next))
+
+#define QSIMPLEQ_CONCAT(head1, head2) do { \
+ if (!QSIMPLEQ_EMPTY((head2))) { \
+ *(head1)->sqh_last = (head2)->sqh_first; \
+ (head1)->sqh_last = (head2)->sqh_last; \
+ QSIMPLEQ_INIT((head2)); \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_PREPEND(head1, head2) do { \
+ if (!QSIMPLEQ_EMPTY((head2))) { \
+ *(head2)->sqh_last = (head1)->sqh_first; \
+ (head1)->sqh_first = (head2)->sqh_first; \
+ QSIMPLEQ_INIT((head2)); \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_LAST(head, type, field) \
+ (QSIMPLEQ_EMPTY((head)) ? \
+ NULL : \
+ ((struct type *)(void *) \
+ ((char *)((head)->sqh_last) - offsetof(struct type, field))))
+
+/*
+ * Simple queue access methods.
+ */
+#define QSIMPLEQ_EMPTY_ATOMIC(head) \
+ (qatomic_read(&((head)->sqh_first)) == NULL)
+#define QSIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
+#define QSIMPLEQ_FIRST(head) ((head)->sqh_first)
+#define QSIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
typedef struct QTailQLink {
void *tql_next;
@@ -92,41 +391,104 @@ typedef struct QTailQLink {
* Tail queue definitions. The union acts as a poor man template, as if
* it were QTailQLink<type>.
*/
-#define QTAILQ_HEAD(name, type) \
-union name { \
- struct type *tqh_first; \
- QTailQLink tqh_circ; \
+#define QTAILQ_HEAD(name, type) \
+union name { \
+ struct type *tqh_first; /* first element */ \
+ QTailQLink tqh_circ; /* link for circular backwards list */ \
}
-#define QTAILQ_HEAD_INITIALIZER(head) \
- { .tqh_circ = { NULL, &(head).tqh_circ } }
+#define QTAILQ_HEAD_INITIALIZER(head) \
+ { .tqh_circ = { NULL, &(head).tqh_circ } }
-#define QTAILQ_ENTRY(type) \
-union { \
- struct type *tqe_next; \
- QTailQLink tqe_circ; \
+#define QTAILQ_ENTRY(type) \
+union { \
+ struct type *tqe_next; /* next element */ \
+ QTailQLink tqe_circ; /* link for circular backwards list */ \
}
/*
* Tail queue functions.
*/
-#define QTAILQ_INIT(head) do { \
- (head)->tqh_first = NULL; \
- (head)->tqh_circ.tql_prev = &(head)->tqh_circ; \
-} while (0)
+#define QTAILQ_INIT(head) do { \
+ (head)->tqh_first = NULL; \
+ (head)->tqh_circ.tql_prev = &(head)->tqh_circ; \
+} while (/*CONSTCOND*/0)
-#define QTAILQ_INSERT_TAIL(head, elm, field) do { \
- (elm)->field.tqe_next = NULL; \
- (elm)->field.tqe_circ.tql_prev = (head)->tqh_circ.tql_prev; \
- (head)->tqh_circ.tql_prev->tql_next = (elm); \
- (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
-} while (0)
+#define QTAILQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
+ (head)->tqh_first->field.tqe_circ.tql_prev = \
+ &(elm)->field.tqe_circ; \
+ else \
+ (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
+ (head)->tqh_first = (elm); \
+ (elm)->field.tqe_circ.tql_prev = &(head)->tqh_circ; \
+} while (/*CONSTCOND*/0)
+#define QTAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.tqe_next = NULL; \
+ (elm)->field.tqe_circ.tql_prev = (head)->tqh_circ.tql_prev; \
+ (head)->tqh_circ.tql_prev->tql_next = (elm); \
+ (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
+} while (/*CONSTCOND*/0)
-#define QTAILQ_FOREACH(var, head, field) \
- for ((var) = ((head)->tqh_first); \
- (var); \
- (var) = ((var)->field.tqe_next))
+#define QTAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
+ (elm)->field.tqe_next->field.tqe_circ.tql_prev = \
+ &(elm)->field.tqe_circ; \
+ else \
+ (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
+ (listelm)->field.tqe_next = (elm); \
+ (elm)->field.tqe_circ.tql_prev = &(listelm)->field.tqe_circ; \
+} while (/*CONSTCOND*/0)
+
+#define QTAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.tqe_circ.tql_prev = (listelm)->field.tqe_circ.tql_prev; \
+ (elm)->field.tqe_next = (listelm); \
+ (listelm)->field.tqe_circ.tql_prev->tql_next = (elm); \
+ (listelm)->field.tqe_circ.tql_prev = &(elm)->field.tqe_circ; \
+} while (/*CONSTCOND*/0)
+
+#define QTAILQ_REMOVE(head, elm, field) do { \
+ if (((elm)->field.tqe_next) != NULL) \
+ (elm)->field.tqe_next->field.tqe_circ.tql_prev = \
+ (elm)->field.tqe_circ.tql_prev; \
+ else \
+ (head)->tqh_circ.tql_prev = (elm)->field.tqe_circ.tql_prev; \
+ (elm)->field.tqe_circ.tql_prev->tql_next = (elm)->field.tqe_next; \
+ (elm)->field.tqe_circ.tql_prev = NULL; \
+ (elm)->field.tqe_circ.tql_next = NULL; \
+ (elm)->field.tqe_next = NULL; \
+} while (/*CONSTCOND*/0)
+
+/* remove @left, @right and all elements in between from @head */
+#define QTAILQ_REMOVE_SEVERAL(head, left, right, field) do { \
+ if (((right)->field.tqe_next) != NULL) \
+ (right)->field.tqe_next->field.tqe_circ.tql_prev = \
+ (left)->field.tqe_circ.tql_prev; \
+ else \
+ (head)->tqh_circ.tql_prev = (left)->field.tqe_circ.tql_prev; \
+ (left)->field.tqe_circ.tql_prev->tql_next = (right)->field.tqe_next; \
+ } while (/*CONSTCOND*/0)
+
+#define QTAILQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->tqh_first); \
+ (var); \
+ (var) = ((var)->field.tqe_next))
+
+#define QTAILQ_FOREACH_SAFE(var, head, field, next_var) \
+ for ((var) = ((head)->tqh_first); \
+ (var) && ((next_var) = ((var)->field.tqe_next), 1); \
+ (var) = (next_var))
+
+#define QTAILQ_FOREACH_REVERSE(var, head, field) \
+ for ((var) = QTAILQ_LAST(head); \
+ (var); \
+ (var) = QTAILQ_PREV(var, field))
+
+#define QTAILQ_FOREACH_REVERSE_SAFE(var, head, field, prev_var) \
+ for ((var) = QTAILQ_LAST(head); \
+ (var) && ((prev_var) = QTAILQ_PREV(var, field), 1); \
+ (var) = (prev_var))
/*
* Tail queue access methods.
@@ -134,9 +496,81 @@ union { \
#define QTAILQ_EMPTY(head) ((head)->tqh_first == NULL)
#define QTAILQ_FIRST(head) ((head)->tqh_first)
#define QTAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+#define QTAILQ_IN_USE(elm, field) ((elm)->field.tqe_circ.tql_prev != NULL)
-#define field_at_offset(base, offset, type) \
- ((type *) (((char *) (base)) + (offset)))
+#define QTAILQ_LINK_PREV(link) \
+ ((link).tql_prev->tql_prev->tql_next)
+#define QTAILQ_LAST(head) \
+ ((typeof((head)->tqh_first)) QTAILQ_LINK_PREV((head)->tqh_circ))
+#define QTAILQ_PREV(elm, field) \
+ ((typeof((elm)->field.tqe_next)) QTAILQ_LINK_PREV((elm)->field.tqe_circ))
+#define field_at_offset(base, offset, type) \
+ ((type *) (((char *) (base)) + (offset)))
+
+/*
+ * Raw access of elements of a tail queue head. Offsets are all zero
+ * because it's a union.
+ */
+#define QTAILQ_RAW_FIRST(head) \
+ field_at_offset(head, 0, void *)
+#define QTAILQ_RAW_TQH_CIRC(head) \
+ field_at_offset(head, 0, QTailQLink)
+
+/*
+ * Raw access of elements of a tail entry
+ */
+#define QTAILQ_RAW_NEXT(elm, entry) \
+ field_at_offset(elm, entry, void *)
+#define QTAILQ_RAW_TQE_CIRC(elm, entry) \
+ field_at_offset(elm, entry, QTailQLink)
+/*
+ * Tail queue traversal using pointer arithmetic.
+ */
+#define QTAILQ_RAW_FOREACH(elm, head, entry) \
+ for ((elm) = *QTAILQ_RAW_FIRST(head); \
+ (elm); \
+ (elm) = *QTAILQ_RAW_NEXT(elm, entry))
+/*
+ * Tail queue insertion using pointer arithmetic.
+ */
+#define QTAILQ_RAW_INSERT_TAIL(head, elm, entry) do { \
+ *QTAILQ_RAW_NEXT(elm, entry) = NULL; \
+ QTAILQ_RAW_TQE_CIRC(elm, entry)->tql_prev = QTAILQ_RAW_TQH_CIRC(head)->tql_prev; \
+ QTAILQ_RAW_TQH_CIRC(head)->tql_prev->tql_next = (elm); \
+ QTAILQ_RAW_TQH_CIRC(head)->tql_prev = QTAILQ_RAW_TQE_CIRC(elm, entry); \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_RAW_FIRST(head) \
+ field_at_offset(head, 0, void *)
+
+#define QLIST_RAW_NEXT(elm, entry) \
+ field_at_offset(elm, entry, void *)
+
+#define QLIST_RAW_PREVIOUS(elm, entry) \
+ field_at_offset(elm, entry + sizeof(void *), void *)
+
+#define QLIST_RAW_FOREACH(elm, head, entry) \
+ for ((elm) = *QLIST_RAW_FIRST(head); \
+ (elm); \
+ (elm) = *QLIST_RAW_NEXT(elm, entry))
+
+#define QLIST_RAW_INSERT_AFTER(head, prev, elem, entry) do { \
+ *QLIST_RAW_NEXT(prev, entry) = elem; \
+ *QLIST_RAW_PREVIOUS(elem, entry) = QLIST_RAW_NEXT(prev, entry); \
+ *QLIST_RAW_NEXT(elem, entry) = NULL; \
+} while (0)
+
+#define QLIST_RAW_INSERT_HEAD(head, elm, entry) do { \
+ void *first = *QLIST_RAW_FIRST(head); \
+ *QLIST_RAW_FIRST(head) = elm; \
+ *QLIST_RAW_PREVIOUS(elm, entry) = QLIST_RAW_FIRST(head); \
+ if (first) { \
+ *QLIST_RAW_NEXT(elm, entry) = first; \
+ *QLIST_RAW_PREVIOUS(first, entry) = QLIST_RAW_NEXT(elm, entry); \
+ } else { \
+ *QLIST_RAW_NEXT(elm, entry) = NULL; \
+ } \
+} while (0)
#endif /* QEMU_SYS_QUEUE_H */
diff --git a/vhost_loopback.c b/vhost_loopback.c
index aed1cc5..a8b78b6 100644
--- a/vhost_loopback.c
+++ b/vhost_loopback.c
@@ -70,8 +70,6 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
{
int i, r, e;
- DBG("vhost_dev_enable_notifiers(...)\n");
-
/*
* We will pass the notifiers to the kernel, make sure that QEMU
* doesn't interfere.
@@ -84,6 +82,7 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
goto fail;
}
+
for (i = 0; i < hdev->nvqs; ++i) {
r = virtio_bus_set_host_notifier(vdev->vbus, hdev->vq_index + i,
true);
@@ -180,8 +179,6 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
}
file.index = vhost_user_get_vq_index(hdev, n);
- DBG("vhost_virtqueue_mask -> index: %d, n: %d, file.fd: %d\n",
- index, n, file.fd);
r = vhost_user_set_vring_call(&file);
if (r < 0) {
@@ -198,8 +195,6 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
uint64_t s, l, a;
int r;
- DBG("vhost_virtqueue_start()\n");
-
int vhost_vq_index = vhost_user_get_vq_index(dev, idx);
struct vhost_vring_file file = {
.index = vhost_vq_index
@@ -250,7 +245,6 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
- DBG("vdev->vq[n].vring.used: 0x%lx\n", a);
vq->used = (void *)a;
if (!vq->used || l != s) {
DBG("Error : vq->used = a\n");
@@ -264,6 +258,10 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
return r;
}
+ /* The next line has to be disable for rng */
+ /* Clear and discard previous events if any. */
+ //event_notifier_test_and_clear(virtio_queue_get_host_notifier(vvq));
+
file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
r = vhost_user_set_vring_kick(&file);
if (r) {
@@ -288,6 +286,14 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
return 0;
}
+void update_mem_table(VirtIODevice *vdev)
+{
+ print_mem_table(vdev->vhdev);
+ vhost_commit_vqs(vdev->vhdev);
+ print_mem_table(vdev->vhdev);
+ (void)vhost_user_set_mem_table(vdev->vhdev);
+}
+
/* Host notifiers must be enabled at this point. */
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
{
@@ -296,8 +302,6 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
hdev->started = true;
hdev->vdev = vdev;
- DBG("vhost_dev_start()\n");
-
r = vhost_dev_set_features(hdev, hdev->log_enabled);
if (r < 0) {
return r;
@@ -308,20 +312,9 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
DBG("memory_listener_register?\n");
}
- /* TODO: We might need this function in the next release */
- /*
- * r = vhost_user_set_mem_table(hdev);
- * if (r < 0) {
- * DBG("vhost_set_mem_table failed\n");
- * return r;
- * }
- */
-
/* This is used to exhange the loopback_fd to the vhost-user-device */
vhost_user_share_fd();
- DBG("hdev->nvqs: %d\n", hdev->nvqs);
-
for (i = 0; i < hdev->nvqs; ++i) {
r = vhost_virtqueue_start(hdev,
vdev,
@@ -333,8 +326,6 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
}
}
- DBG("vhost_dev_start return successfully\n");
-
return 0;
}
@@ -342,23 +333,18 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
uint32_t config_len)
{
- DBG("vhost_dev_get_config(...)\n");
-
return vhost_user_get_config(hdev, config, config_len);
}
int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
uint32_t offset, uint32_t size, uint32_t flags)
{
- DBG("vhost_dev_set_config(...)\n");
return vhost_user_set_config(hdev, data, offset, size, flags);
-
}
void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
const VhostDevConfigOps *ops)
{
- DBG("vhost_dev_set_config_notifier(...)\n");
hdev->config_ops = ops;
}
diff --git a/vhost_loopback.h b/vhost_loopback.h
index 69a3f0d..b73caeb 100644
--- a/vhost_loopback.h
+++ b/vhost_loopback.h
@@ -58,6 +58,8 @@ int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
int vhost_dev_set_inflight(struct vhost_dev *dev,
struct vhost_inflight *inflight);
+void update_mem_table(VirtIODevice *vdev);
+
struct vhost_inflight {
int fd;
diff --git a/vhost_user_blk.c b/vhost_user_blk.c
index eebd599..f92d1c7 100644
--- a/vhost_user_blk.c
+++ b/vhost_user_blk.c
@@ -98,11 +98,13 @@ static int vhost_user_blk_start(VirtIODevice *vdev)
DBG("After vhost_dev_set_inflight\n");
+
ret = vhost_dev_start(s->vhost_dev, vdev);
if (ret < 0) {
DBG("Error starting vhost\n");
return ret;
}
+
s->started_vu = true;
DBG("vhost_virtqueue_mask\n");
@@ -172,6 +174,8 @@ static uint64_t vhost_user_blk_get_features(VirtIODevice *vdev,
virtio_add_feature(&features, VIRTIO_BLK_F_FLUSH);
virtio_add_feature(&features, VIRTIO_BLK_F_DISCARD);
virtio_add_feature(&features, VIRTIO_BLK_F_WRITE_ZEROES);
+ virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE);
+ virtio_add_feature(&features, VIRTIO_BLK_F_RO);
/*
* TODO: Delete if not needed
* virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE);
@@ -184,11 +188,16 @@ static uint64_t vhost_user_blk_get_features(VirtIODevice *vdev,
*
*/
+ if (s->config_wce) {
+ DBG("Add config feature\n");
+ virtio_add_feature(&features, VIRTIO_BLK_F_CONFIG_WCE);
+ }
+
if (s->num_queues > 1) {
virtio_add_feature(&features, VIRTIO_BLK_F_MQ);
}
- return features;
+ return vhost_user_get_features(&features);
}
static int vhost_user_blk_connect(VirtIODevice *vdev)
@@ -320,6 +329,10 @@ static void vhost_user_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
* return;
* }
*/
+ if (blkcfg->wce == s->blkcfg.wce) {
+ DBG("blkcfg->wce == s->blkcfg.wce\n");
+ return;
+ }
ret = vhost_dev_set_config(s->vhost_dev, &blkcfg->wce,
offsetof(struct virtio_blk_config, wce),
@@ -382,6 +395,7 @@ static void virtio_dev_class_init(VirtIODevice *vdev)
vdev->vdev_class->get_features = vhost_user_blk_get_features;
vdev->vdev_class->set_status = vhost_user_blk_set_status;
vdev->vdev_class->reset = vhost_user_blk_reset;
+ vdev->vdev_class->update_mem_table = update_mem_table;
}
@@ -392,6 +406,7 @@ void vhost_user_blk_init(VirtIODevice *vdev)
VHostUserBlk *vhublk = (VHostUserBlk *)malloc(sizeof(VHostUserBlk));
vdev->vhublk = vhublk;
+ vdev->nvqs = &dev->nvqs;
vhublk->parent = vdev;
vhublk->virtqs = vdev->vqs;
vhublk->vhost_dev = dev;
@@ -472,6 +487,7 @@ void vhost_user_blk_realize(void)
vhost_user_blk_init(global_vdev);
+ global_vdev->vhublk->config_wce = 1;
/* FIXME: We temporarily hardcoded the vrtqueues number */
global_vdev->vhublk->num_queues = 1;
diff --git a/vhost_user_blk.h b/vhost_user_blk.h
index ddb21fe..33e140a 100644
--- a/vhost_user_blk.h
+++ b/vhost_user_blk.h
@@ -46,6 +46,7 @@ struct VHostUserBlk {
uint16_t num_queues;
uint32_t queue_size;
/* uint32_t config_wce; //We will need it for the next release */
+ uint32_t config_wce;
struct vhost_inflight *inflight;
struct vhost_virtqueue *vhost_vqs;
struct virtio_blk_config blkcfg;
diff --git a/vhost_user_input.c b/vhost_user_input.c
index cf3fb2e..cd5cb4f 100644
--- a/vhost_user_input.c
+++ b/vhost_user_input.c
@@ -106,6 +106,7 @@ static void vhost_input_class_init(VirtIODevice *vdev)
vdev->vhuinput->vdev_input->input_class->realize = vhost_user_input_realize;
vdev->vhuinput->vdev_input->input_class->change_active =
vhost_input_change_active;
+ vdev->vdev_class->update_mem_table = update_mem_table;
}
@@ -123,6 +124,7 @@ void vhost_user_input_init(VirtIODevice *vdev)
vdev->vinput->input_class = input_class;
vdev->vhuinput = vhuinput;
+ vdev->nvqs = &dev->nvqs;
vhuinput->vdev = vdev;
vhuinput->vhost_dev = dev;
vhuinput->vdev_input = vinput;
@@ -158,6 +160,9 @@ void vhost_user_input_realize()
global_vdev->vhuinput->vhost_dev->num_queues = nvqs;
+ global_vdev->vq = (struct VirtQueue *)malloc(
+ sizeof(struct VirtQueue) * nvqs);
+
global_vdev->vhuinput->vhost_dev->nvqs = nvqs;
global_vdev->vhuinput->vhost_dev->vqs = (struct vhost_virtqueue *)malloc(
sizeof(struct vhost_virtqueue) * nvqs);
diff --git a/vhost_user_loopback.c b/vhost_user_loopback.c
index 368e699..8ea366f 100644
--- a/vhost_user_loopback.c
+++ b/vhost_user_loopback.c
@@ -225,6 +225,7 @@ int process_message_reply(const VhostUserMsg *msg)
VhostUserMsg msg_reply;
if ((msg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
+ DBG("Don't wait for any reply!\n");
return 0;
}
@@ -813,16 +814,304 @@ int vhost_user_set_inflight_fd(struct vhost_dev *dev,
}
-/* -------------------- Vring functions -------------------- */
+/* -------------------- Mem regions functions -------------------- */
+
+
+static MemoryRegion *vhost_user_get_mr_data(struct vhost_memory_region *reg,
+ ram_addr_t *offset, int *fd)
+{
+ MemoryRegion *mr;
+
+ *offset = reg->guest_phys_addr & (PAGE_SIZE - 1);
+
+ *fd = loopback_fd;
+
+ return mr;
+}
+
+static void vhost_user_fill_msg_region(VhostUserMemoryRegion *dst,
+ struct vhost_memory_region *src,
+ uint64_t mmap_offset)
+{
+ assert(src != NULL && dst != NULL);
+ dst->userspace_addr = src->userspace_addr;
+ dst->memory_size = src->memory_size;
+ dst->guest_phys_addr = src->guest_phys_addr;
+ dst->mmap_offset = mmap_offset;
+}
+
+
+
+
+static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u,
+ struct vhost_dev *dev,
+ VhostUserMsg *msg,
+ int *fds, size_t *fd_num,
+ bool track_ramblocks)
+{
+ DBG("vhost_user_fill_set_mem_table_msg(...) not yet implemented\n");
+ return 1;
+}
+
+static inline bool reg_equal(struct vhost_memory_region *shadow_reg,
+ struct vhost_memory_region *vdev_reg)
+{
+ return shadow_reg->guest_phys_addr == vdev_reg->guest_phys_addr &&
+ shadow_reg->userspace_addr == vdev_reg->userspace_addr &&
+ shadow_reg->memory_size == vdev_reg->memory_size;
+}
+
+
+/* Sync the two region lists (device / adapter) */
+static void scrub_shadow_regions(struct vhost_dev *dev,
+ struct scrub_regions *add_reg,
+ int *nr_add_reg,
+ struct scrub_regions *rem_reg,
+ int *nr_rem_reg, uint64_t *shadow_pcb,
+ bool track_ramblocks)
+{
+ struct vhost_user *u = adev->vudev;
+ bool found[VHOST_USER_MAX_RAM_SLOTS] = {};
+ struct vhost_memory_region *reg, *shadow_reg;
+ int i, j, fd, add_idx = 0, rm_idx = 0, fd_num = 0;
+ ram_addr_t offset;
+ MemoryRegion *mr;
+ bool matching;
+
+ /*
+ * Find memory regions present in our shadow state which are not in
+ * the device's current memory state.
+ *
+ * Mark regions in both the shadow and device state as "found".
+ */
+ for (i = 0; i < u->num_shadow_regions; i++) {
+ shadow_reg = &u->shadow_regions[i];
+ matching = false;
+
+ for (j = 0; j < dev->mem->nregions; j++) {
+ reg = &dev->mem->regions[j];
+
+ mr = vhost_user_get_mr_data(reg, &offset, &fd);
+
+ if (reg_equal(shadow_reg, reg)) {
+ matching = true;
+ found[j] = true;
+ break;
+ }
+ }
+
+ /*
+ * If the region was not found in the current device memory state
+ * create an entry for it in the removed list.
+ */
+ if (!matching) {
+ rem_reg[rm_idx].region = shadow_reg;
+ rem_reg[rm_idx++].reg_idx = i;
+ }
+ }
+
+ /*
+ * For regions not marked "found", create entries in the added list.
+ *
+ * Note their indexes in the device memory state and the indexes of their
+ * file descriptors.
+ */
+
+ DBG("For regions not marked 'found', create entries in the added list\n");
+ DBG("dev->mem->nregions: %d\n", dev->mem->nregions);
+
+ for (i = 0; i < dev->mem->nregions; i++) {
+
+ reg = &dev->mem->regions[i];
+
+ mr = vhost_user_get_mr_data(reg, &offset, &fd);
+
+ /*
+ * If the region was in both the shadow and device state we don't
+ * need to send a VHOST_USER_ADD_MEM_REG message for it.
+ */
+ if (found[i]) {
+ continue;
+ }
+
+ add_reg[add_idx].region = reg;
+ add_reg[add_idx].reg_idx = i;
+ add_reg[add_idx++].fd_idx = fd_num;
+
+ }
+ *nr_rem_reg = rm_idx;
+ *nr_add_reg = add_idx;
+
+ return;
+}
+
+
+static int send_remove_regions(struct vhost_dev *dev,
+ struct scrub_regions *remove_reg,
+ int nr_rem_reg, VhostUserMsg *msg,
+ bool reply_supported)
+{
+ struct vhost_user *u = adev->vudev;
+ struct vhost_memory_region *shadow_reg;
+ int i, fd, shadow_reg_idx, ret;
+ ram_addr_t offset;
+ VhostUserMemoryRegion region_buffer;
+
+ /*
+ * The regions in remove_reg appear in the same order they do in the
+ * shadow table. Therefore we can minimize memory copies by iterating
+ * through remove_reg backwards.
+ */
+ for (i = nr_rem_reg - 1; i >= 0; i--) {
+ shadow_reg = remove_reg[i].region;
+ shadow_reg_idx = remove_reg[i].reg_idx;
+
+ DBG("Try to remove: 0x%llx\n", remove_reg[i].region->guest_phys_addr);
+
+ (void)vhost_user_get_mr_data(shadow_reg, &offset, &fd);
+
+ if (fd > 0) {
+ msg->request = VHOST_USER_REM_MEM_REG;
+ vhost_user_fill_msg_region(&region_buffer, shadow_reg, 0);
+ msg->payload.memreg.region = region_buffer;
+
+ msg->fd_num = 1;
+ memcpy(msg->fds, &loopback_fd, sizeof(int));
+
+ if (vu_message_write(client_sock, msg) < 0) {
+ return -1;
+ }
+
+ if (reply_supported) {
+ msg->flags |= VHOST_USER_NEED_REPLY_MASK;
+ ret = process_message_reply(msg);
+
+ /*
+ * TODO: For this release do not process the message:
+ * if (ret) {
+ * return ret;
+ * }
+ */
+ }
+ }
+
+ }
+
+ return 0;
+}
+
+static int send_add_regions(struct vhost_dev *dev,
+ struct scrub_regions *add_reg, int nr_add_reg,
+ VhostUserMsg *msg, uint64_t *shadow_pcb,
+ bool reply_supported, bool track_ramblocks)
+{
+ struct vhost_user *u = adev->vudev;
+ int i, fd, ret, reg_idx, reg_fd_idx;
+ struct vhost_memory_region *reg;
+ MemoryRegion *mr;
+ ram_addr_t offset;
+ VhostUserMsg msg_reply;
+ VhostUserMemoryRegion region_buffer;
+
+ for (i = 0; i < nr_add_reg; i++) {
+ reg = add_reg[i].region;
+ reg_idx = add_reg[i].reg_idx;
+ reg_fd_idx = add_reg[i].fd_idx;
+
+ DBG("Try to add: 0x%llx\n", add_reg[i].region->guest_phys_addr);
+
+ mr = vhost_user_get_mr_data(reg, &offset, &fd);
+
+ if (fd > 0) {
+
+ msg->request = VHOST_USER_ADD_MEM_REG;
+ vhost_user_fill_msg_region(&region_buffer, reg, offset);
+ msg->payload.memreg.region = region_buffer;
+
+ msg->fd_num = 1;
+ memcpy(msg->fds, &loopback_fd, sizeof(int));
+
+ if (vu_message_write(client_sock, msg) < 0) {
+ DBG("send_add_regions -> write failed\n");
+ return -1;
+ }
+
+ if (reply_supported) {
+ msg->flags |= VHOST_USER_NEED_REPLY_MASK;
+ ret = process_message_reply(msg);
+
+ /*
+ * TODO: For this release do not process the message:
+ * if (ret) {
+ * return ret;
+ * }
+ */
+ }
+ } else if (track_ramblocks) {
+ u->region_rb_offset[reg_idx] = 0;
+ u->region_rb[reg_idx] = NULL;
+ }
+
+ }
+
+ return 0;
+}
+
+static int vhost_user_add_remove_regions(struct vhost_dev *dev,
+ VhostUserMsg *msg,
+ bool reply_supported,
+ bool track_ramblocks)
+{
+ struct vhost_user *u = adev->vudev;
+ struct scrub_regions add_reg[VHOST_USER_MAX_RAM_SLOTS];
+ struct scrub_regions rem_reg[VHOST_USER_MAX_RAM_SLOTS];
+ uint64_t shadow_pcb[VHOST_USER_MAX_RAM_SLOTS] = {};
+ int nr_add_reg, nr_rem_reg;
+
+ msg->size = sizeof(msg->payload.memreg);
+
+ /* Find the regions which need to be removed or added. */
+ scrub_shadow_regions(dev, add_reg, &nr_add_reg, rem_reg, &nr_rem_reg,
+ shadow_pcb, track_ramblocks);
+
+ if (nr_rem_reg && send_remove_regions(dev, rem_reg, nr_rem_reg, msg,
+ reply_supported) < 0)
+ {
+ DBG("send_remove_regions failed\n");
+ goto err;
+ }
+
+ if (nr_add_reg && send_add_regions(dev, add_reg, nr_add_reg, msg,
+ shadow_pcb, reply_supported, track_ramblocks) < 0)
+ {
+ DBG("send_add_regions failed\n");
+ goto err;
+ }
+
+
+ /* TODO: At this point we need to update the shadow list */
+ u->num_shadow_regions = dev->mem->nregions;
+ memcpy(u->shadow_regions, dev->mem->regions,
+ dev->mem->nregions * sizeof(struct vhost_memory_region));
+
+ return 0;
+
+err:
+ DBG("vhost_user_add_remove_regions failed\n");
+ return -1;
+}
+
/* TODO: This funciton might be implemented in a later release */
static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
bool reply_supported,
bool config_mem_slots)
{
+ DBG("vhost_user_set_mem_table_postcopy(...)\n");
return 0;
}
+
/*
* TODO: This function is not yet fully optimized because in the current release
* it is not used. t will be implemented or deleted in a later release.
@@ -837,6 +1126,7 @@ int vhost_user_set_mem_table(struct vhost_dev *dev)
virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS);
int ret;
+ struct vhost_user *u = adev->vudev;
bool do_postcopy = false;
if (do_postcopy) {
@@ -856,18 +1146,222 @@ int vhost_user_set_mem_table(struct vhost_dev *dev)
msg.flags |= VHOST_USER_NEED_REPLY_MASK;
}
+ if (config_mem_slots) {
+ DBG("vonfig_mem_slots is enabled\n");
+ if (vhost_user_add_remove_regions(dev, &msg, reply_supported,
+ false) < 0) {
+ return -1;
+ }
+ } else {
+
+ DBG("To be implemented!\n");
+ exit(1);
+
+ if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
+ false) < 0) {
+ return -1;
+ }
+ if (vu_message_write(client_sock, &msg) < 0) {
+ DBG("vhost_user_set_mem_table failed write msg\n");
+ return -1;
+ }
+
+ if (reply_supported) {
+ DBG("reply is supported\n");
+ return process_message_reply(&msg);
+ }
+ }
+
return 0;
}
-/* ----------------- End of Vring functions ---------------- */
+
+
+void print_mem_table(struct vhost_dev *dev)
+{
+ struct vhost_memory_region *cur_vmr;
+ int i;
+
+ DBG("print_mem_table:\n");
+
+ for (i = 0; i < dev->n_mem_sections; i++) {
+
+ cur_vmr = dev->mem->regions + i;
+ DBG("regions[%d]->guest_phys_addr: 0x%llx\n",
+ i, cur_vmr->guest_phys_addr);
+ DBG("regions[%d]->memory_size: 0x%llu\n",
+ i, cur_vmr->memory_size);
+ DBG("regions[%d]->userspace_addr: 0x%llx\n",
+ i, cur_vmr->userspace_addr);
+ DBG("regions[%d]->flags_padding: 0x%llx\n",
+ i, cur_vmr->flags_padding);
+
+ }
+}
+
+static void vhost_add_reg(struct vhost_dev *dev, uint64_t hpa, uint64_t len)
+{
+ size_t regions_size, old_regions_size;
+ struct vhost_memory *temp_mem;
+ struct vhost_memory_region *cur_vmr;
+
+ DBG("vhost_add_reg (hpa: 0x%lx, len: %lu)\n", hpa, len);
+
+ /* Rebuild the regions list from the new sections list */
+ regions_size = offsetof(struct vhost_memory, regions) +
+ (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
+ temp_mem = (struct vhost_memory *)malloc(regions_size);
+
+ /* Copy the old mem structure */
+ old_regions_size = offsetof(struct vhost_memory, regions) +
+ (dev->mem->nregions) * sizeof dev->mem->regions[0];
+ memcpy(temp_mem, dev->mem, old_regions_size);
+
+ /* Increase the regions' counter */
+ temp_mem->nregions = dev->mem->nregions + 1;
+ dev->n_mem_sections = temp_mem->nregions;
+
+ /* Clear the previous structure */
+ free(dev->mem);
+
+ /* Point to the new one */
+ dev->mem = temp_mem;
+
+ /* Init the new region */
+ cur_vmr = dev->mem->regions + (dev->mem->nregions - 1);
+ cur_vmr->guest_phys_addr = hpa;
+ cur_vmr->memory_size = len;
+ cur_vmr->userspace_addr = 0;
+ cur_vmr->flags_padding = 0;
+}
+
+static bool find_reg(struct vhost_dev *dev, uint64_t hpa, uint64_t len)
+{
+ struct vhost_memory_region *cur_vmr;
+ int i;
+
+ DBG("Try to find hpa: 0x%lx\n", hpa);
+
+ for (i = dev->nvqs; i < dev->n_mem_sections; i++) {
+
+ cur_vmr = dev->mem->regions + i;
+ if ((hpa >= cur_vmr->guest_phys_addr) &&
+ ((hpa + len) <= (cur_vmr->guest_phys_addr
+ + cur_vmr->memory_size))) {
+ DBG("Find region with hpa: 0x%llx, and len: %lld\n",
+ cur_vmr->guest_phys_addr, cur_vmr->memory_size);
+ return true;
+ }
+ }
+
+ DBG("Did not find region with hpa: 0x%lx\n", hpa);
+ return false;
+}
+
+int last_avail = -1;
+
+void find_add_new_reg(struct vhost_dev *dev)
+{
+ int sglist_elem_num;
+ int i;
+
+ (void)ioctl(loopback_fd, BARRIER);
+
+ DBG("Total nvqs: %d\n", dev->nvqs);
+ for (int i = 0; i < dev->nvqs; i++) {
+
+ VRing *vring = &dev->vdev->vq[i].vring;
+ uint64_t vring_num = vring->num;
+
+ DBG("For vq[%d]:\n", i);
+ DBG("vqs[%u] hpa 0x%lx\n", i, vring_phys_addrs[i]);
+ DBG("vq[%d].vring.num: %ld\n", i, vring_num);
+ DBG("We got avail buf: %d\n",
+ ((VRingAvail *)(dev->vdev->vq[i].vring.avail))->idx);
+
+ int avail_diff = ((VRingAvail *)(dev->vdev->vq[i].vring.avail))->idx
+ - last_avail;
+
+ for (int j = 0; j < vring_num; j++) {
+
+ uint64_t desc_addr = dev->vdev->vq[i].vring.desc;
+ VRingDesc desc_p = ((VRingDesc *)desc_addr)[j];
+ uint64_t sg_addr = desc_p.addr;
+ uint64_t sg_len = desc_p.len;
+
+ if (desc_p.addr == 0) {
+ sglist_elem_num = j;
+ DBG("We got avail buf: %d\n",
+ ((VRingAvail *)(dev->vdev->vq[i].vring.avail))->idx);
+ DBG("We got sglist_ele_num: %d\n", sglist_elem_num);
+ break;
+ }
+
+
+ DBG("desc[%u] 0x%lx\n", j, desc_addr);
+ DBG("desc[%u].addr 0x%lx\n", j, sg_addr);
+ DBG("desc[%u].len 0x%lu\n", j, sg_len);
+ DBG("desc[%u].flags 0x%u\n", j, desc_p.flags);
+
+ if (!find_reg(dev, sg_addr, sg_len)) {
+ vhost_add_reg(dev, sg_addr, sg_len);
+ }
+
+ }
+ DBG("We got avail buf: %d\n",
+ ((VRingAvail *)(dev->vdev->vq[i].vring.avail))->idx);
+
+ last_avail = ((VRingAvail *)(dev->vdev->vq[i].vring.avail))->idx;
+ sglist_elem_num = 3 * avail_diff;
+ }
+}
+
+void vhost_commit_init_vqs(struct vhost_dev *dev)
+{
+ MemoryRegionSection *old_sections;
+ int n_old_sections;
+ uint64_t log_size;
+ size_t regions_size;
+ int r;
+ int i;
+ bool changed = false;
+ int sglist_elem_num;
+
+ dev->n_mem_sections = dev->nvqs;
+ DBG("dev->n_mem_sections: %d\n", dev->n_mem_sections);
+
+ /* Rebuild the regions list from the new sections list */
+ regions_size = offsetof(struct vhost_memory, regions) +
+ dev->n_mem_sections * sizeof dev->mem->regions[0];
+ dev->mem = (struct vhost_memory *)malloc(regions_size);
+ dev->mem->nregions = dev->n_mem_sections;
+
+ for (i = 0; i < dev->nvqs; i++) {
+ struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
+
+ cur_vmr->guest_phys_addr = vring_phys_addrs[i] << PAGE_SHIFT;
+ cur_vmr->memory_size = get_vqs_max_size(global_vdev);
+ cur_vmr->userspace_addr = 0;
+ cur_vmr->flags_padding = 0;
+ }
+}
+
+void vhost_commit_vqs(struct vhost_dev *dev)
+{
+ free(dev->mem);
+ vhost_commit_init_vqs(dev);
+ find_add_new_reg(dev);
+}
+
+
+/* -------------------- End of Mem regions functions -------------------- */
+
int vhost_user_backend_init(struct vhost_dev *vhdev)
{
uint64_t features, protocol_features, ram_slots;
int err;
- DBG("vhost_user_backend_init(...)\n");
-
err = vhost_user_get_features(&features);
if (err < 0) {
DBG("vhost_backend_init failed\n");
@@ -1024,6 +1518,17 @@ void vhost_dev_init(struct vhost_dev *vhdev)
}
}
+ vhdev->mem = (struct vhost_memory *)malloc(sizeof(struct vhost_memory));
+ vhdev->mem->nregions = 0;
+
+ vhdev->n_mem_sections = 0;
+ vhdev->mem_sections = NULL;
+ vhdev->log = NULL;
+ vhdev->log_size = 0;
+ vhdev->log_enabled = false;
+ vhdev->started = false;
+
+
/*
* TODO: busyloop == 0 in rng case, but we might need it for new devices:
*
diff --git a/vhost_user_loopback.h b/vhost_user_loopback.h
index a56a1d2..45b6206 100644
--- a/vhost_user_loopback.h
+++ b/vhost_user_loopback.h
@@ -34,6 +34,7 @@
#include <linux/vhost.h>
#include <pthread.h>
#include "virtio_loopback.h"
+#include "queue.h"
typedef struct adapter_dev {
struct vhost_dev *vdev;
@@ -42,6 +43,15 @@ typedef struct adapter_dev {
VirtioBus *vbus;
} AdapterDev;
+
+
+
+struct scrub_regions {
+ struct vhost_memory_region *region;
+ int reg_idx;
+ int fd_idx;
+};
+
struct vhost_virtqueue {
int kick;
int call;
@@ -64,8 +74,25 @@ typedef struct VhostDevConfigOps {
int (*vhost_dev_config_notifier)(struct vhost_dev *dev);
} VhostDevConfigOps;
+
+typedef struct MemoryRegion MemoryRegion;
+
+typedef struct MemoryRegionSection {
+ uint64_t size;
+ MemoryRegion *mr;
+ uint64_t offset_within_region;
+ uint64_t offset_within_address_space;
+ bool readonly;
+ bool nonvolatile;
+} MemoryRegionSection;
+
struct vhost_dev {
VirtIODevice *vdev;
+ struct vhost_memory *mem;
+ int n_mem_sections;
+ MemoryRegionSection *mem_sections;
+ int n_tmp_sections;
+ MemoryRegionSection *tmp_sections;
struct vhost_virtqueue *vqs;
unsigned int nvqs;
/* the first virtqueue which would be used by this vhost dev */
@@ -73,7 +100,7 @@ struct vhost_dev {
/* one past the last vq index for the virtio device (not vhost) */
int vq_index_end;
/* if non-zero, minimum required value for max_queues */
- uint64_t num_queues;
+ int num_queues;
uint64_t features;
uint64_t acked_features;
uint64_t backend_features;
@@ -84,19 +111,128 @@ struct vhost_dev {
bool log_enabled;
uint64_t log_size;
void *migration_blocker;
- /* Vhost-user struct */
+ void *opaque;
+ struct vhost_log *log;
+ QLIST_ENTRY(vhost_dev) entry;
uint64_t memory_slots;
const VhostDevConfigOps *config_ops;
};
+
+#define VHOST_USER_MAX_RAM_SLOTS 512
+
+typedef uint64_t ram_addr_t;
+typedef struct RAMBlock RAMBlock;
+
+typedef struct RAMBlock {
+ struct MemoryRegion *mr;
+ uint8_t *host;
+ uint8_t *colo_cache; /* For colo, VM's ram cache */
+ ram_addr_t offset;
+ ram_addr_t used_length;
+ ram_addr_t max_length;
+ void (*resized)(const char*, uint64_t length, void *host);
+ uint32_t flags;
+ /* Protected by iothread lock. */
+ char idstr[256];
+ /* RCU-enabled, writes protected by the ramlist lock */
+ int fd;
+ size_t page_size;
+ /* dirty bitmap used during migration */
+ unsigned long *bmap;
+ /* bitmap of already received pages in postcopy */
+ unsigned long *receivedmap;
+
+ /*
+ * bitmap to track already cleared dirty bitmap. When the bit is
+ * set, it means the corresponding memory chunk needs a log-clear.
+ * Set this up to non-NULL to enable the capability to postpone
+ * and split clearing of dirty bitmap on the remote node (e.g.,
+ * KVM). The bitmap will be set only when doing global sync.
+ *
+ * NOTE: this bitmap is different comparing to the other bitmaps
+ * in that one bit can represent multiple guest pages (which is
+ * decided by the `clear_bmap_shift' variable below). On
+ * destination side, this should always be NULL, and the variable
+ * `clear_bmap_shift' is meaningless.
+ */
+ unsigned long *clear_bmap;
+ uint8_t clear_bmap_shift;
+
+ /*
+ * RAM block length that corresponds to the used_length on the migration
+ * source (after RAM block sizes were synchronized). Especially, after
+ * starting to run the guest, used_length and postcopy_length can differ.
+ * Used to register/unregister uffd handlers and as the size of the received
+ * bitmap. Receiving any page beyond this length will bail out, as it
+ * could not have been valid on the source.
+ */
+ ram_addr_t postcopy_length;
+} RAMBlock;
+
+
+/*
+ * MemoryRegion:
+ *
+ * A struct representing a memory region.
+ */
+typedef struct MemoryRegion {
+ /* private: */
+
+ /* The following fields should fit in a cache line */
+ bool romd_mode;
+ bool ram;
+ bool subpage;
+ bool readonly; /* For RAM regions */
+ bool nonvolatile;
+ bool rom_device;
+ bool flush_coalesced_mmio;
+ uint8_t dirty_log_mask;
+ bool is_iommu;
+ RAMBlock *ram_block;
+
+ void *opaque;
+ MemoryRegion *container;
+ uint64_t size;
+ uint64_t addr;
+ void (*destructor)(MemoryRegion *mr);
+ uint64_t align;
+ bool terminates;
+ bool ram_device;
+ bool enabled;
+ bool warning_printed; /* For reservations */
+ uint8_t vga_logging_count;
+ MemoryRegion *alias;
+ uint64_t alias_offset;
+ int32_t priority;
+ QTAILQ_HEAD(, MemoryRegion) subregions;
+ QTAILQ_ENTRY(MemoryRegion) subregions_link;
+ const char *name;
+ unsigned ioeventfd_nb;
+} MemoryRegion;
+
struct vhost_user {
struct vhost_dev *dev;
+
+ /* Shared between vhost devs of the same virtio device */
+
+ uint64_t postcopy_client_bases[VHOST_USER_MAX_RAM_SLOTS];
/* Length of the region_rb and region_rb_offset arrays */
size_t region_rb_len;
+ /* RAMBlock associated with a given region */
+ RAMBlock **region_rb;
+ /*
+ * The offset from the start of the RAMBlock to the start of the
+ * vhost region.
+ */
+ ram_addr_t *region_rb_offset;
+
/* True once we've entered postcopy_listen */
bool postcopy_listen;
+
/* Our current regions */
int num_shadow_regions;
+ struct vhost_memory_region shadow_regions[VHOST_USER_MAX_RAM_SLOTS];
};
/* Global variables */
@@ -118,7 +254,6 @@ extern struct vhost_user *vudev;
* Set a reasonable maximum number of ram slots, which will be supported by
* any architecture.
*/
-#define VHOST_USER_MAX_RAM_SLOTS 32
#define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
/*
@@ -809,6 +944,12 @@ int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
uint32_t offset, uint32_t size, uint32_t flags);
+void vhost_commit_init_vqs(struct vhost_dev *dev);
+void vhost_commit_vqs(struct vhost_dev *dev);
+void find_add_new_reg(struct vhost_dev *dev);
+void print_mem_table(struct vhost_dev *dev);
+
+
/* FIXME: This need to move in a better place */
struct vhost_inflight;
int vhost_user_get_inflight_fd(struct vhost_dev *dev,
diff --git a/vhost_user_rng.c b/vhost_user_rng.c
index f674ef6..860c33e 100644
--- a/vhost_user_rng.c
+++ b/vhost_user_rng.c
@@ -139,6 +139,7 @@ static void virtio_dev_class_init(VirtIODevice *vdev)
vdev->vdev_class->get_features = vu_rng_get_features;
vdev->vdev_class->guest_notifier_mask = vu_rng_guest_notifier_mask;
vdev->vdev_class->guest_notifier_pending = vu_rng_guest_notifier_pending;
+ vdev->vdev_class->update_mem_table = update_mem_table;
}
@@ -146,6 +147,7 @@ void vhost_user_rng_init(VirtIODevice *vdev)
{
VHostUserRNG *vhrng = (VHostUserRNG *)malloc(sizeof(VHostUserRNG));
vdev->vhrng = vhrng;
+ vdev->nvqs = &dev->nvqs;
vhrng->parent = vdev;
vhrng->req_vq = vdev->vq;
vhrng->vhost_dev = dev;
@@ -166,6 +168,7 @@ static void vu_rng_handle_output(VirtIODevice *vdev, VirtQueue *vq)
void vhost_user_rng_realize(void)
{
+ /* Initiliaze virtio_dev data structures */
virtio_dev_init(global_vdev, "virtio-rng", 4, 0);
/* This needs to be change to vhost-user-rng init */
@@ -185,5 +188,6 @@ void vhost_user_rng_realize(void)
dev->vqs = (struct vhost_virtqueue *)malloc(dev->nvqs *
sizeof(struct vhost_virtqueue));
+ /* Initiale vhost-user communication */
vhost_dev_init(dev);
}
diff --git a/virtio_input.c b/virtio_input.c
index 793806c..18ef3d4 100644
--- a/virtio_input.c
+++ b/virtio_input.c
@@ -175,6 +175,8 @@ static void virtio_input_set_status(VirtIODevice *vdev, uint8_t val)
VirtIOInput *vinput = vdev->vinput;
bool should_start = virtio_device_started(vdev, val);
+ DBG("virtio_input_set_status(...): %u\n", val);
+
if (should_start) {
if (!vinput->active) {
vinput->active = true;
@@ -252,7 +254,7 @@ void virtio_input_device_realize()
/* FIXME: do we need that? */
memcpy(global_vdev->vq, vinput->evt, sizeof(VirtQueue));
- memcpy(global_vdev->vq, vinput->sts, sizeof(VirtQueue));
+ memcpy(&global_vdev->vq[1], vinput->sts, sizeof(VirtQueue));
DBG("global_vdev->guest_features: 0x%lx\n", global_vdev->guest_features);
}
diff --git a/virtio_loopback.c b/virtio_loopback.c
index 90458cc..b9fd353 100644
--- a/virtio_loopback.c
+++ b/virtio_loopback.c
@@ -71,10 +71,10 @@
/* Global variables */
+int s; /* To be deleted */
int efd; /* Eventfd file descriptor */
uint64_t eftd_ctr;
fd_set rfds;
-int s;
int fd;
int loopback_fd;
@@ -146,6 +146,8 @@ int virtio_set_status(VirtIODevice *vdev, uint8_t val)
virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK);
}
+ DBG("set vdev->status :%u \n", vdev->status);
+
if (k->set_status) {
DBG("k->set_status\n");
k->set_status(vdev, val);
@@ -407,6 +409,8 @@ void virtio_set_isr(VirtIODevice *vdev, int value)
if ((old & value) != value) {
vdev->isr |= value;
}
+
+ DBG("Update isr: %d\n", vdev->isr);
}
static void virtio_irq(VirtQueue *vq)
@@ -439,6 +443,7 @@ void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
DBG("Do not notify!\n");
return;
}
+ DBG("Go on and notify!\n");
virtio_irq(vq);
}
@@ -749,6 +754,44 @@ bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
return true;
}
+uint32_t get_vqs_max_size(VirtIODevice *vdev)
+{
+ uint32_t vq_max_size = VIRTQUEUE_MAX_SIZE;
+ uint32_t total_size, temp_size, total_p2 = 1;
+ int i, log_res = 0;
+
+ total_size = VIRTQUEUE_MAX_SIZE * sizeof(VRingDesc);
+ total_size += offsetof(VRingAvail, ring) +
+ VIRTQUEUE_MAX_SIZE * sizeof(uint16_t);
+ total_size += offsetof(VRingUsed, ring) +
+ VIRTQUEUE_MAX_SIZE * sizeof(uint16_t);
+
+ temp_size = total_size;
+
+ /* Compute log2 of total_size (Needs to be power of 2) */
+ while ((temp_size /= 2) > 0) {
+ log_res++;
+ total_p2 *= 2;
+ }
+
+ /* if total_size is not a power of 2: (total_size > 8) -> 16 */
+ if (total_size > total_p2) {
+ total_size = 2 * total_p2;
+ }
+
+ /*
+ * Align to page size: This needed only in case total_size
+ * is less than 4096 (PAGE_SIZE)
+ */
+ if (total_size % PAGE_SIZE > 0) {
+ total_size = (total_size / PAGE_SIZE) * PAGE_SIZE + PAGE_SIZE;
+ }
+
+ DBG("Total vqs size to mmap is: %u\n", total_size);
+
+ return total_size;
+}
+
int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
{
uint16_t num_heads = vring_avail_idx(vq) - idx;
@@ -1037,6 +1080,8 @@ static void virtio_queue_guest_notifier_read(EventNotifier *n)
int vhost_user_loopback_eventfd = 0;
+int eventfd_count = 0;
+
void *loopback_event_select(void *wfd)
{
int retval;
@@ -1066,7 +1111,10 @@ void *loopback_event_select(void *wfd)
exit(1);
} else {
DBG("\n\nEvent has come from the vhost-user-device "
- "(eventfd: %d)\n\n", *(int *)wfd);
+ "(eventfd: %d) -> event_count: %d\n\n",
+ *(int *)wfd, eventfd_count);
+
+ eventfd_count++;
virtio_irq(global_vdev->vq);
}
@@ -1243,6 +1291,17 @@ bool virtio_device_disabled(VirtIODevice *vdev)
int prev_level = 0;
+void *my_notify(void *data) {
+
+ int irq_num = 44;
+ (void) data;
+ (void) ioctl(fd, IRQ, &irq_num);
+
+ pthread_exit(NULL);
+}
+
+int int_count = 0;
+
void virtio_loopback_update_irq(VirtIODevice *vdev)
{
int level, irq_num = 44;
@@ -1254,6 +1313,9 @@ void virtio_loopback_update_irq(VirtIODevice *vdev)
level = (vdev->isr != 0);
+ DBG("level: %d\n", level);
+ DBG("prev_level: %d\n", prev_level);
+
if (!((level == 1) && (prev_level == 0))) {
prev_level = level;
return;
@@ -1261,9 +1323,13 @@ void virtio_loopback_update_irq(VirtIODevice *vdev)
prev_level = level;
DBG("Trigger interrupt (ioctl)\n");
- ioctl(fd, IRQ, &irq_num);
+ DBG("Interrupt counter: %d\n", int_count++);
+
+ (void)pthread_create(&my_thread_id, NULL, my_notify, NULL);
+
}
+bool enable_virtio_interrupt = false;
/* virtio device */
void virtio_notify_vector(VirtIODevice *vdev)
@@ -1296,15 +1362,17 @@ void virtio_queue_notify(VirtIODevice *vdev, int n)
{
VirtQueue *vq = &vdev->vq[n];
- DBG("virtio_queue_notify(...)\n");
+ DBG("virtio_queue_notify(..., vq_n: %d)\n", n);
if (!vq->vring.desc || vdev->broken) {
+ DBG("virtio_queue_notify: broken\n");
return;
}
if (vq->host_notifier_enabled) {
event_notifier_set(&vq->host_notifier);
} else if (vq->handle_output) {
+ DBG("vq->handle_output\n");
vq->handle_output(vdev, vq);
if (vdev->start_on_kick) {
@@ -1569,6 +1637,8 @@ static uint64_t virtio_loopback_read(VirtIODevice *vdev, uint64_t offset,
return 0;
}
+uint64_t vring_phys_addrs[2] = {0};
+uint32_t vring_phys_addrs_idx = 0;
void virtio_loopback_write(VirtIODevice *vdev, uint64_t offset,
uint64_t value, unsigned size)
@@ -1688,9 +1758,14 @@ void virtio_loopback_write(VirtIODevice *vdev, uint64_t offset,
if (value == 0) {
/* TODO: To be implemented */
} else {
- (void)value;
+
+ DBG("desc_addr: 0x%lx\n", value);
+ vring_phys_addrs[vring_phys_addrs_idx++] = value;
+
uint64_t desc_addr;
- desc_addr = (uint64_t)mmap(NULL, 10 * PAGE_SIZE,
+ uint32_t vqs_size = get_vqs_max_size(global_vdev);
+
+ desc_addr = (uint64_t)mmap(NULL, vqs_size,
PROT_READ | PROT_WRITE,
MAP_SHARED, fd, 0);
@@ -1733,6 +1808,8 @@ void virtio_loopback_write(VirtIODevice *vdev, uint64_t offset,
virtio_set_status(vdev, value & 0xff);
+ DBG("STATUS -> %ld\n", value);
+
/*
* TODO: Check if this is still needed
*
@@ -1843,7 +1920,6 @@ void *driver_event_select(void *data)
(void) data;
DBG("\nWaiting for loopback read/write events\n");
- fflush(stdout);
FD_ZERO(&rfds);
FD_SET(efd, &rfds);
@@ -2021,9 +2097,6 @@ int virtio_loopback_start(void)
exit(1);
}
- /* Fille the device info */
- create_rng_struct();
-
/* Start loopback transport */
(void)ioctl(fd, START_LOOPBACK, &device_info);
diff --git a/virtio_loopback.h b/virtio_loopback.h
index 779da70..1bd2d79 100644
--- a/virtio_loopback.h
+++ b/virtio_loopback.h
@@ -155,7 +155,9 @@
/* Loopback negotiation code */
+#define PAGE_SHIFT 12
#define PAGE_SIZE 4096
+
#define EFD_INIT _IOC(_IOC_WRITE, 'k', 1, sizeof(efd_data_t))
#define WAKEUP _IOC(_IOC_WRITE, 'k', 2, 0)
#define START_LOOPBACK _IOC(_IOC_WRITE, 'k', 3, \
@@ -166,6 +168,7 @@
#define USED_INFO _IOC(_IOC_WRITE, 'k', 7, 0)
#define DATA_INFO _IOC(_IOC_WRITE, 'k', 8, 0)
#define MAP_BLK _IOC(_IOC_WRITE, 'k', 9, 0)
+#define BARRIER _IOC(_IOC_WRITE, 'k', 10, 0)
#define VIRTIO_PCI_VRING_ALIGN 4096
@@ -251,6 +254,10 @@ typedef struct VirtIOMMIOProxy {
#define VRING_DESC_ALIGN_SIZE 16
/******************/
+
+extern uint64_t vring_phys_addrs[2];
+extern uint32_t vring_phys_addrs_idx;
+
typedef struct VRing {
unsigned int num;
unsigned int num_default;
@@ -355,6 +362,7 @@ typedef struct VirtioBus VirtioBus;
typedef struct VirtIODevice {
VirtioBus *vbus;
VirtioDeviceClass *vdev_class;
+ struct vhost_dev *vhdev;
const char *name;
uint8_t status;
uint8_t isr;
@@ -369,6 +377,7 @@ typedef struct VirtIODevice {
int nvectors;
VirtQueue *vq;
VirtQueue **vqs;
+ int *nvqs;
uint16_t device_id;
bool vm_running;
bool broken; /* device in invalid state, needs reset */
@@ -549,6 +558,9 @@ typedef struct VirtioDeviceClass {
*/
int (*post_load)(VirtIODevice *vdev);
bool (*primary_unplug_pending)(void *opaque);
+
+ void (*update_mem_table)(VirtIODevice *vdev);
+
struct vhost_dev *(*get_vhost)(VirtIODevice *vdev);
} VirtioDeviceClass;
@@ -619,6 +631,7 @@ void virtio_notify(VirtIODevice *vdev, VirtQueue *vq);
int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
unsigned int max, unsigned int *next);
void print_config(uint8_t *config);
+uint32_t get_vqs_max_size(VirtIODevice *vdev);
/*
* Do we get callbacks when the ring is completely used, even if we've