diff options
-rw-r--r-- | Makefile | 21 | ||||
-rw-r--r-- | adapter.c | 11 | ||||
-rw-r--r-- | vhost_loopback.c | 94 | ||||
-rw-r--r-- | vhost_loopback.h | 10 | ||||
-rw-r--r-- | vhost_user_blk.c | 2 | ||||
-rw-r--r-- | vhost_user_input.c | 2 | ||||
-rw-r--r-- | vhost_user_input.h | 2 | ||||
-rw-r--r-- | vhost_user_loopback.c | 331 | ||||
-rw-r--r-- | vhost_user_loopback.h | 14 | ||||
-rw-r--r-- | vhost_user_rng.c | 10 | ||||
-rw-r--r-- | virtio_loopback.c | 171 | ||||
-rw-r--r-- | virtio_loopback.h | 23 |
12 files changed, 473 insertions, 218 deletions
@@ -21,7 +21,7 @@ #CFLAGS := -Wall -Wextra -Werror #CFLAGS := -Wall -Wextra -Wno-unused-variable -Wno-unused-function CFLAGS := -Wno-unused-variable -Wno-unused-function -D_GNU_SOURCE -CFLAGS = -D_GNU_SOURCE -O2 +CFLAGS = -D_GNU_SOURCE -O2 -static CC ?= ifeq ($(ARCH), arm64) @@ -31,25 +31,6 @@ else CC ?= gcc endif -ifeq ($(VHOST_USER_RNG), 1) - CFLAGS += -DVHOST_USER_RNG_DEV - CFLAGS += -DVHOST_USER -endif - -ifeq ($(VHOST_USER_BLK), 1) - CFLAGS += -DVHOST_USER_BLK_DEV - CFLAGS += -DVHOST_USER -endif - -ifeq ($(VHOST_USER_INPUT), 1) - CFLAGS += -DVHOST_USER_INPUT_DEV - CFLAGS += -DVHOST_USER -endif - -ifeq ($(VIRTIO_RNG), 1) - CFLAGS += -DVIRTIO_RNG -endif - INCL += -I . DEPS = adapter.h vhost_user_loopback.h event_notifier.h virtio_loopback.h SRC_C = event_notifier.c vhost_user_loopback.c virtio_loopback.c virtio_rng.c virtio_input.c vhost_user_input.c vhost_user_blk.c vhost_user_rng.c vhost_loopback.c adapter.c @@ -3,6 +3,7 @@ * * Authors: * Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com> + * Stefanos Gerangelos <s.gerangelos@virtualopensystems.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -47,7 +48,6 @@ #include "vhost_user_blk.h" #include "vhost_user_input.h" - #ifdef DEBUG #define DBG(...) printf("adapter: " __VA_ARGS__) #else @@ -137,7 +137,7 @@ static void help_args(void) "\t\t [ -qn number of queues ]\n" "\t\t [ -qs size of queues ]\n" "The 'device_name' can be one of the following:\n" - "\tvrng, vhurng, vhublk, vhuinput\n"); + "\tvrng, vhurng, vhublk, vhuinput, vhusnd, vhugpio\n"); } int find_arg(int argc, char **argv, char *str) @@ -221,14 +221,14 @@ int main(int argc, char **argv) int socket_idx, device_idx, device_id; bool vhost_user_enabled; /* Assign default queue num and size */ - int queue_num = 1, queue_size = 1024; + int queue_num = 1, queue_size = 64; /* * Check if the user has provided all the required arguments. * If not, print the help messages. */ - if (argc < 5) { + if (argc < 3) { goto error_args; } @@ -249,9 +249,7 @@ int main(int argc, char **argv) /* Check if this is a vhost-user device */ vhost_user_enabled = check_vhu_device(argv[device_idx]); - /* Check if a socket is needed and provided */ - socket_idx = find_arg(argc, argv, "-s"); if ((socket_idx < 0) && (vhost_user_enabled)) { @@ -270,7 +268,6 @@ int main(int argc, char **argv) /* Initialize the adapter data structures */ vhost_user_adapter_init(); - /* Initialize the virtio/vhost-user device */ switch (device_id) { case 1: diff --git a/vhost_loopback.c b/vhost_loopback.c index 0dc5c52..3e137eb 100644 --- a/vhost_loopback.c +++ b/vhost_loopback.c @@ -148,9 +148,14 @@ static int vhost_virtqueue_set_addr(struct vhost_dev *dev, memset(&addr, 0, sizeof(struct vhost_vring_addr)); - addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc; - addr.avail_user_addr = (uint64_t)(unsigned long)vq->avail; - addr.used_user_addr = (uint64_t)(unsigned long)vq->used; + addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys; + addr.avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys; + addr.used_user_addr = (uint64_t)(unsigned long)vq->used_phys; + + DBG("Print physical addresses of vrings:\n"); + DBG("\tvq->desc_phys: 0x%llx\n", vq->desc_phys); + DBG("\tvq->avail_phys: 0x%llx\n", vq->avail_phys); + DBG("\tvq->used_phys: 0x%llx\n", vq->used_phys); addr.index = idx; addr.log_guest_addr = vq->used_phys; @@ -163,6 +168,34 @@ static int vhost_virtqueue_set_addr(struct vhost_dev *dev, return r; } +uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, + uint64_t features) +{ + const int *bit = feature_bits; + while (*bit != VHOST_INVALID_FEATURE_BIT) { + uint64_t bit_mask = (1ULL << *bit); + if (!(hdev->features & bit_mask)) { + features &= ~bit_mask; + } + bit++; + } + return features; +} + +void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, + uint64_t features) +{ + const int *bit = feature_bits; + while (*bit != VHOST_INVALID_FEATURE_BIT) { + uint64_t bit_mask = (1ULL << *bit); + if (features & bit_mask) { + hdev->acked_features |= bit_mask; + } + bit++; + } +} + + /* Mask/unmask events from this vq. */ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, @@ -207,6 +240,7 @@ static int vhost_virtqueue_start(struct vhost_dev *dev, a = virtio_queue_get_desc_addr(vdev, idx); if (a == 0) { /* Queue might not be ready for start */ + DBG("Error: Queue might not be ready for start\n"); return 0; } @@ -226,8 +260,8 @@ static int vhost_virtqueue_start(struct vhost_dev *dev, } vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx); - vq->desc_phys = a; - vq->desc = (void *)a; + vq->desc_phys = vring_phys_addrs[idx] << 12; + vq->desc = (void *)virtio_queue_get_desc_addr(vdev, idx); if (!vq->desc || l != s) { DBG("Error : vq->desc = a\n"); r = -ENOMEM; @@ -235,8 +269,9 @@ static int vhost_virtqueue_start(struct vhost_dev *dev, } vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx); - vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx); - vq->avail = (void *)a; + vq->avail_phys = vq->desc_phys + virtio_queue_get_avail_addr(vdev, idx) + - virtio_queue_get_desc_addr(vdev, idx); + vq->avail = (void *)virtio_queue_get_avail_addr(vdev, idx); if (!vq->avail || l != s) { DBG("Error : vq->avail = a\n"); r = -ENOMEM; @@ -244,8 +279,9 @@ static int vhost_virtqueue_start(struct vhost_dev *dev, } vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx); - vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx); - vq->used = (void *)a; + vq->used_phys = a = vq->avail_phys + virtio_queue_get_used_addr(vdev, idx) + - virtio_queue_get_avail_addr(vdev, idx); + vq->used = (void *)virtio_queue_get_used_addr(vdev, idx); if (!vq->used || l != s) { DBG("Error : vq->used = a\n"); r = -ENOMEM; @@ -277,12 +313,10 @@ static int vhost_virtqueue_start(struct vhost_dev *dev, * will do it later. */ if (!vdev->use_guest_notifier_mask) { - DBG("!vdev->use_guest_notifier_mask\n"); /* TODO: check and handle errors. */ vhost_virtqueue_mask(dev, vdev, idx, false); } - DBG("vhost_virtqueue_start return successfully\n"); return 0; } @@ -294,8 +328,27 @@ void update_mem_table(VirtIODevice *vdev) (void)vhost_user_set_mem_table(vdev->vhdev); } +static int vhost_dev_set_vring_enable(struct vhost_dev *hdev, int enable) +{ + DBG("vhost_dev_set_vring_enable not yet implemented\n"); + + /* + * For vhost-user devices, if VHOST_USER_F_PROTOCOL_FEATURES has not + * been negotiated, the rings start directly in the enabled state, and + * .vhost_set_vring_enable callback will fail since + * VHOST_USER_SET_VRING_ENABLE is not supported. + */ + if (!virtio_has_feature(hdev->backend_features, + VHOST_USER_F_PROTOCOL_FEATURES)) { + DBG("Does not have VHOST_USER_F_PROTOCOL_FEATURES\n"); + return 0; + } + + return vhost_user_set_vring_enable(hdev, enable); +} + /* Host notifiers must be enabled at this point. */ -int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) +int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) { int i, r; @@ -312,8 +365,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) DBG("memory_listener_register?\n"); } - /* This is used to exhange the loopback_fd to the vhost-user-device */ - vhost_user_share_fd(); + vhost_commit_mem_regions(hdev); for (i = 0; i < hdev->nvqs; ++i) { r = vhost_virtqueue_start(hdev, @@ -326,6 +378,20 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) } } + if (vrings) { + r = vhost_dev_set_vring_enable(hdev, true); + if (r) { + DBG("Fail vhost_dev_set_vring_enable\n"); + return r; + } + } + + r = vhost_user_dev_start(hdev, true); + if (r) { + DBG("Fail vhost_dev_set_vring_enable\n"); + return r; + } + return 0; } diff --git a/vhost_loopback.h b/vhost_loopback.h index b73caeb..78c2755 100644 --- a/vhost_loopback.h +++ b/vhost_loopback.h @@ -29,14 +29,22 @@ #include "vhost_user_loopback.h" #include "virtio_loopback.h" +#define VHOST_INVALID_FEATURE_BIT (0xff) +#define VHOST_QUEUE_NUM_CONFIG_INR 0 + int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev); -int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev); +int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings); void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, bool mask); int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config, uint32_t config_len); int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data, uint32_t offset, uint32_t size, uint32_t flags); +uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, + uint64_t features); +void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, + uint64_t features); + /** * vhost_dev_set_config_notifier() - register VhostDevConfigOps * @hdev: common vhost_dev_structure diff --git a/vhost_user_blk.c b/vhost_user_blk.c index 084710b..756b0a5 100644 --- a/vhost_user_blk.c +++ b/vhost_user_blk.c @@ -102,7 +102,7 @@ static int vhost_user_blk_start(VirtIODevice *vdev) DBG("After vhost_dev_set_inflight\n"); - ret = vhost_dev_start(s->vhost_dev, vdev); + ret = vhost_dev_start(s->vhost_dev, vdev, false); if (ret < 0) { DBG("Error starting vhost\n"); return ret; diff --git a/vhost_user_input.c b/vhost_user_input.c index e33d22c..d6764cc 100644 --- a/vhost_user_input.c +++ b/vhost_user_input.c @@ -208,7 +208,7 @@ void vhost_user_backend_start(VirtIODevice *vdev) } vdev->vhuinput->vhost_dev->acked_features = vdev->guest_features; - ret = vhost_dev_start(vdev->vhuinput->vhost_dev, vdev); + ret = vhost_dev_start(vdev->vhuinput->vhost_dev, vdev, false); if (ret < 0) { DBG("Error start vhost dev\n"); return; diff --git a/vhost_user_input.h b/vhost_user_input.h index bbed7f2..0d5c900 100644 --- a/vhost_user_input.h +++ b/vhost_user_input.h @@ -182,4 +182,4 @@ void virtio_input_device_realize(); void vhost_user_input_init(VirtIODevice *vdev); void vhost_user_input_realize(); -#endif /* VHOST_USER_INPU */ +#endif /* VHOST_USER_INPUT */ diff --git a/vhost_user_loopback.c b/vhost_user_loopback.c index d448e55..47ce1cd 100644 --- a/vhost_user_loopback.c +++ b/vhost_user_loopback.c @@ -252,6 +252,8 @@ int vhost_user_get_u64(int request, uint64_t *u64) .flags = VHOST_USER_VERSION, }; + print_vhost_user_messages(request); + if (vhost_user_one_time_request(request) && dev->vq_index != 0) { return 0; } @@ -278,6 +280,7 @@ int vhost_user_get_u64(int request, uint64_t *u64) } *u64 = msg.payload.u64; + DBG("\tGet value: 0x%lx\n", msg.payload.u64); return 0; } @@ -319,6 +322,9 @@ int vhost_user_set_u64(int request, uint64_t u64, bool wait_for_reply) }; int ret; + print_vhost_user_messages(request); + DBG("\tSet value: 0x%lx\n", u64); + if (wait_for_reply) { bool reply_supported = virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_REPLY_ACK); @@ -352,9 +358,9 @@ int vhost_user_set_features(struct vhost_dev *dev, (void) dev; /* Pass hdev as parameter! */ - DBG("vhost_user_set_features: 0x%lx\n", features); - return vhost_user_set_u64(VHOST_USER_SET_FEATURES, features, - log_enabled); + DBG("vhost_user_set_features: 0x%lx\n", features | dev->backend_features); + return vhost_user_set_u64(VHOST_USER_SET_FEATURES, + features | dev->backend_features, log_enabled); } int vhost_user_set_protocol_features(uint64_t features) @@ -447,36 +453,6 @@ int vhost_user_get_vq_index(struct vhost_dev *dev, int idx) return idx; } -void vhost_user_share_fd(void) -{ - size_t fd_num = 1; - VhostUserMsg msg = { - .request = (VhostUserRequest) VHOST_USER_SHARE_LOOPBACK_FD, - .flags = VHOST_USER_VERSION, - .payload.u64 = ((uint64_t)getpid() << 32) | (uint64_t)loopback_fd, - .size = sizeof(msg.payload.u64), - }; - - msg.fd_num = 1; - memcpy(msg.fds, &loopback_fd, fd_num * sizeof(int)); - - /* - * TODO: Check if we need to remove the VHOST_USER_NEED_REPLY_MASK flag - * - * msg.flags &= ~VHOST_USER_NEED_REPLY_MASK; - */ - - if (vu_message_write(client_sock, &msg) < 0) { - DBG("vhost_user_share_fd -> write failed\n"); - exit(1); - } - - if (msg.flags & VHOST_USER_NEED_REPLY_MASK) { - process_message_reply(&msg); - } - -} - int vhost_set_vring_file(VhostUserRequest request, struct vhost_vring_file *file) { @@ -831,8 +807,7 @@ static MemoryRegion *vhost_user_get_mr_data(struct vhost_memory_region *reg, { MemoryRegion *mr; - *offset = reg->guest_phys_addr & (PAGE_SIZE - 1); - + *offset = 0; *fd = loopback_fd; return mr; @@ -849,16 +824,52 @@ static void vhost_user_fill_msg_region(VhostUserMemoryRegion *dst, dst->mmap_offset = mmap_offset; } - - - static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u, struct vhost_dev *dev, VhostUserMsg *msg, int *fds, size_t *fd_num, bool track_ramblocks) { - DBG("vhost_user_fill_set_mem_table_msg(...) not yet implemented\n"); + int i, fd; + ram_addr_t offset; + MemoryRegion *mr; + struct vhost_memory_region *reg; + VhostUserMemoryRegion region_buffer; + + msg->request = VHOST_USER_SET_MEM_TABLE; + + for (i = 0; i < dev->mem->nregions; ++i) { + reg = dev->mem->regions + i; + + mr = vhost_user_get_mr_data(reg, &offset, &fd); + if (fd > 0) { + if (track_ramblocks) { + u->region_rb_offset[i] = offset; + u->region_rb[i] = mr->ram_block; + } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) { + DBG("Failed preparing vhost-user memory table msg: %d\n", *fd_num); + return -1; + } + vhost_user_fill_msg_region(®ion_buffer, reg, offset); + msg->payload.memory.regions[*fd_num] = region_buffer; + fds[(*fd_num)++] = fd; + } else if (track_ramblocks) { + u->region_rb_offset[i] = 0; + u->region_rb[i] = NULL; + } + } + + msg->payload.memory.nregions = *fd_num; + if (!*fd_num) { + DBG("Failed initializing vhost-user memory map, " + "consider using -object memory-backend-file share=on\n"); + return -1; + } + + msg->size = sizeof(msg->payload.memory.nregions); + msg->size += sizeof(msg->payload.memory.padding); + msg->size += *fd_num * sizeof(VhostUserMemoryRegion); + return 1; } @@ -981,7 +992,7 @@ static int send_remove_regions(struct vhost_dev *dev, if (fd > 0) { msg->request = VHOST_USER_REM_MEM_REG; - vhost_user_fill_msg_region(®ion_buffer, shadow_reg, 0); + vhost_user_fill_msg_region(®ion_buffer, shadow_reg, offset); msg->payload.memreg.region = region_buffer; msg->fd_num = 1; @@ -1060,7 +1071,6 @@ static int send_add_regions(struct vhost_dev *dev, u->region_rb_offset[reg_idx] = 0; u->region_rb[reg_idx] = NULL; } - } return 0; @@ -1116,7 +1126,7 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev, bool reply_supported, bool config_mem_slots) { - DBG("vhost_user_set_mem_table_postcopy(...)\n"); + DBG("vhost_user_set_mem_table_postcopy(...) not yet implemented\n"); return 0; } @@ -1163,20 +1173,23 @@ int vhost_user_set_mem_table(struct vhost_dev *dev) } } else { - DBG("To be implemented!\n"); - exit(1); - if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num, false) < 0) { + DBG("vhost_user_fill_set_mem_table_msg error\n"); return -1; } + + /* Update message parameters */ + DBG("\nfd_num: %d\n", fd_num); + msg.fd_num = fd_num; + memcpy(msg.fds, fds, fd_num * sizeof(int)); + if (vu_message_write(client_sock, &msg) < 0) { DBG("vhost_user_set_mem_table failed write msg\n"); return -1; } if (reply_supported) { - DBG("reply is supported\n"); return process_message_reply(&msg); } } @@ -1240,7 +1253,7 @@ static void vhost_add_reg(struct vhost_dev *dev, uint64_t hpa, uint64_t len) cur_vmr = dev->mem->regions + (dev->mem->nregions - 1); cur_vmr->guest_phys_addr = hpa; cur_vmr->memory_size = len; - cur_vmr->userspace_addr = 0; + cur_vmr->userspace_addr = hpa; cur_vmr->flags_padding = 0; } @@ -1304,7 +1317,6 @@ void find_add_new_reg(struct vhost_dev *dev) break; } - DBG("desc[%u] 0x%lx\n", j, desc_addr); DBG("desc[%u].addr 0x%lx\n", j, sg_addr); DBG("desc[%u].len 0x%lu\n", j, sg_len); @@ -1335,7 +1347,6 @@ void vhost_commit_init_vqs(struct vhost_dev *dev) int sglist_elem_num; dev->n_mem_sections = dev->nvqs; - DBG("dev->n_mem_sections: %d\n", dev->n_mem_sections); /* Rebuild the regions list from the new sections list */ regions_size = offsetof(struct vhost_memory, regions) + @@ -1360,15 +1371,37 @@ void vhost_commit_vqs(struct vhost_dev *dev) find_add_new_reg(dev); } +void vhost_commit_mem_regions(struct vhost_dev *dev) +{ + uint64_t mmap_pa_req; + int i; -/* -------------------- End of Mem regions functions -------------------- */ + /* Create and add all ram memory regions */ + for (i = 0; i < VHOST_USER_MAX_RAM_SLOTS; i++) { + /* Calculate new Physical Address */ + mmap_pa_req = INIT_PA + i * 1 * OFFSET_1GB; + + /* Add a new region */ + vhost_add_reg(dev, mmap_pa_req, 1 * OFFSET_1GB); + } + + /* Send new region */ + if (vhost_user_set_mem_table(dev) < 0) { + DBG("vhost_user_set_mem_table -> Error\n"); + exit(1); + } +} + +/* -------------------- End of Mem regions functions -------------------- */ int vhost_user_backend_init(struct vhost_dev *vhdev) { uint64_t features, protocol_features, ram_slots; int err; + DBG("vhost_user_backend_init (...)\n"); + err = vhost_user_get_features(&features); if (err < 0) { DBG("vhost_backend_init failed\n"); @@ -1400,6 +1433,7 @@ int vhost_user_backend_init(struct vhost_dev *vhdev) if (!vhdev->config_ops || !vhdev->config_ops->vhost_dev_config_notifier) { + DBG("There is no config_ops or vhost_dev_config_notifier\n"); /* Don't acknowledge CONFIG feature if device doesn't support it */ dev->protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG); } else if (!(protocol_features & @@ -1409,7 +1443,6 @@ int vhost_user_backend_init(struct vhost_dev *vhdev) return -EINVAL; } - err = vhost_user_set_protocol_features(vhdev->protocol_features); if (err < 0) { DBG("vhost_backend_init failed\n"); @@ -1492,7 +1525,7 @@ int vhost_user_backend_init(struct vhost_dev *vhdev) return 0; } - +/* TODO: Return an error code */ void vhost_dev_init(struct vhost_dev *vhdev) { uint64_t features; @@ -1552,4 +1585,198 @@ void vhost_dev_init(struct vhost_dev *vhdev) */ vhdev->features = features; + DBG("vhdev->backend_features 0x%llx\n", vhdev->backend_features); + DBG("vhdev->features 0x%llx\n", vhdev->features); +} + +int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable) +{ + int i; + DBG("vhost_user_set_vring_enable not yet implemented\n"); + + if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) { + DBG("Does not have VHOST_USER_F_PROTOCOL_FEATURES\n"); + return -EINVAL; + } + + for (i = 0; i < dev->nvqs; ++i) { + int ret; + struct vhost_vring_state state = { + .index = dev->vq_index + i, + .num = enable, + }; + + ret = vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state); + if (ret < 0) { + /* + * Restoring the previous state is likely infeasible, as well as + * proceeding regardless the error, so just bail out and hope for + * the device-level recovery. + */ + return ret; + } + } + + return 0; +} + +static int vhost_user_set_status(struct vhost_dev *dev, uint8_t status) +{ + return vhost_user_set_u64(VHOST_USER_SET_STATUS, status, false); +} + +static int vhost_user_get_status(struct vhost_dev *dev, uint8_t *status) +{ + uint64_t value; + int ret; + + ret = vhost_user_get_u64(VHOST_USER_GET_STATUS, &value); + if (ret < 0) { + return ret; + } + *status = value; + + return 0; +} + +static int vhost_user_add_status(struct vhost_dev *dev, uint8_t status) +{ + uint8_t s; + int ret; + + ret = vhost_user_get_status(dev, &s); + if (ret < 0) { + return ret; + } + + if ((s & status) == status) { + return 0; + } + s |= status; + + return vhost_user_set_status(dev, s); +} + +int vhost_user_dev_start(struct vhost_dev *dev, bool started) +{ + DBG("vhost_user_dev_start(...)\n"); + if (!virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_STATUS)) { + DBG("VHOST_USER_PROTOCOL_F_STATUS not in features\n"); + return 0; + } + + /* Set device status only for last queue pair */ + if (dev->vq_index + dev->nvqs != dev->vq_index_end) { + return 0; + } + + if (started) { + return vhost_user_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | + VIRTIO_CONFIG_S_DRIVER | + VIRTIO_CONFIG_S_DRIVER_OK); + } else { + return 0; + } +} + +void print_vhost_user_messages(int request) +{ + switch (request) { + case VHOST_USER_GET_FEATURES: + DBG("VHOST_USER_GET_FEATURES\n"); + break; + case VHOST_USER_SET_FEATURES: + DBG("VHOST_USER_SET_FEATURES\n"); + break; + case VHOST_USER_GET_PROTOCOL_FEATURES: + DBG("VHOST_USER_GET_PROTOCOL_FEATURES\n"); + break; + case VHOST_USER_SET_PROTOCOL_FEATURES: + DBG("VHOST_USER_SET_PROTOCOL_FEATURES\n"); + break; + case VHOST_USER_SET_OWNER: + DBG("VHOST_USER_SET_OWNER\n"); + break; + case VHOST_USER_RESET_OWNER: + DBG("VHOST_USER_RESET_OWNER\n"); + break; + case VHOST_USER_SET_MEM_TABLE: + DBG("VHOST_USER_SET_MEM_TABLE\n"); + break; + case VHOST_USER_SET_LOG_BASE: + DBG("VHOST_USER_SET_LOG_BASE\n"); + break; + case VHOST_USER_SET_LOG_FD: + DBG("VHOST_USER_SET_LOG_FD\n"); + break; + case VHOST_USER_SET_VRING_NUM: + DBG("VHOST_USER_SET_VRING_NUM\n"); + break; + case VHOST_USER_SET_VRING_ADDR: + DBG("VHOST_USER_SET_VRING_ADDR\n"); + break; + case VHOST_USER_SET_VRING_BASE: + DBG("VHOST_USER_SET_VRING_BASE\n"); + break; + case VHOST_USER_GET_VRING_BASE: + DBG("VHOST_USER_GET_VRING_BASE\n"); + break; + case VHOST_USER_SET_VRING_KICK: + DBG("VHOST_USER_SET_VRING_KICK\n"); + break; + case VHOST_USER_SET_VRING_CALL: + DBG("VHOST_USER_SET_VRING_CALL\n"); + break; + case VHOST_USER_SET_VRING_ERR: + DBG("VHOST_USER_SET_VRING_ERR\n"); + break; + case VHOST_USER_GET_QUEUE_NUM: + DBG("VHOST_USER_GET_QUEUE_NUM\n"); + break; + case VHOST_USER_SET_VRING_ENABLE: + DBG("VHOST_USER_SET_VRING_ENABLE\n"); + break; + case VHOST_USER_SET_SLAVE_REQ_FD: + DBG("VHOST_USER_SET_SLAVE_REQ_FD\n"); + break; + case VHOST_USER_GET_CONFIG: + DBG("VHOST_USER_GET_CONFIG\n"); + break; + case VHOST_USER_SET_CONFIG: + DBG("VHOST_USER_SET_CONFIG\n"); + break; + case VHOST_USER_NONE: + DBG("VHOST_USER_NONE\n"); + break; + case VHOST_USER_POSTCOPY_ADVISE: + DBG("VHOST_USER_POSTCOPY_ADVISE\n"); + break; + case VHOST_USER_POSTCOPY_LISTEN: + DBG("VHOST_USER_POSTCOPY_LISTEN\n"); + break; + case VHOST_USER_POSTCOPY_END: + DBG("VHOST_USER_POSTCOPY_END\n"); + break; + case VHOST_USER_GET_INFLIGHT_FD: + DBG("VHOST_USER_GET_INFLIGHT_FD\n"); + break; + case VHOST_USER_SET_INFLIGHT_FD: + DBG("VHOST_USER_SET_INFLIGHT_FD\n"); + break; + case VHOST_USER_VRING_KICK: + DBG("VHOST_USER_VRING_KICK\n"); + break; + case VHOST_USER_GET_MAX_MEM_SLOTS: + DBG("VHOST_USER_GET_MAX_MEM_SLOTS\n"); + break; + case VHOST_USER_ADD_MEM_REG: + DBG("VHOST_USER_ADD_MEM_REG\n"); + break; + case VHOST_USER_REM_MEM_REG: + DBG("VHOST_USER_REM_MEM_REG\n"); + break; + default: + DBG("Unhandled request: %d\n", request); + } } diff --git a/vhost_user_loopback.h b/vhost_user_loopback.h index 82d0c6c..2ced983 100644 --- a/vhost_user_loopback.h +++ b/vhost_user_loopback.h @@ -66,6 +66,7 @@ struct vhost_virtqueue { unsigned long long used_phys; unsigned used_size; EventNotifier masked_notifier; + EventNotifier masked_config_notifier; struct vhost_dev *dev; }; @@ -119,7 +120,7 @@ struct vhost_dev { }; -#define VHOST_USER_MAX_RAM_SLOTS 512 +#define VHOST_USER_MAX_RAM_SLOTS 8 typedef uint64_t ram_addr_t; typedef struct RAMBlock RAMBlock; @@ -244,7 +245,7 @@ extern struct vhost_user *vudev; /* Based on qemu/hw/virtio/vhost-user.c */ #define VHOST_USER_F_PROTOCOL_FEATURES 30 #define VHOST_LOG_PAGE 4096 -#define VHOST_MEMORY_BASELINE_NREGIONS 8 +#define VHOST_MEMORY_BASELINE_NREGIONS VHOST_USER_MAX_RAM_SLOTS /* The version of the protocol we support */ #define VHOST_USER_VERSION (0x1) @@ -281,6 +282,7 @@ enum VhostUserProtocolFeature { VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12, VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14, VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15, + VHOST_USER_PROTOCOL_F_STATUS = 16, VHOST_USER_PROTOCOL_F_MAX }; @@ -325,7 +327,8 @@ typedef enum VhostUserRequest { VHOST_USER_GET_MAX_MEM_SLOTS = 36, VHOST_USER_ADD_MEM_REG = 37, VHOST_USER_REM_MEM_REG = 38, - VHOST_USER_SHARE_LOOPBACK_FD = 39, + VHOST_USER_SET_STATUS = 39, + VHOST_USER_GET_STATUS = 40, VHOST_USER_MAX } VhostUserRequest; @@ -436,6 +439,7 @@ typedef struct VuDevRegion { uint64_t mmap_addr; } VuDevRegion; + typedef struct VuDev VuDev; typedef uint64_t (*vu_get_features_cb) (VuDev *dev); typedef void (*vu_set_features_cb) (VuDev *dev, uint64_t features); @@ -942,11 +946,15 @@ int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config, uint32_t config_len); int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data, uint32_t offset, uint32_t size, uint32_t flags); +int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable); +int vhost_user_dev_start(struct vhost_dev *dev, bool started); void vhost_commit_init_vqs(struct vhost_dev *dev); +void vhost_commit_mem_regions(struct vhost_dev *dev); void vhost_commit_vqs(struct vhost_dev *dev); void find_add_new_reg(struct vhost_dev *dev); void print_mem_table(struct vhost_dev *dev); +void print_vhost_user_messages(int request); /* FIXME: This need to move in a better place */ diff --git a/vhost_user_rng.c b/vhost_user_rng.c index 860c33e..6d4fd5c 100644 --- a/vhost_user_rng.c +++ b/vhost_user_rng.c @@ -64,8 +64,9 @@ static void vu_rng_start(VirtIODevice *vdev) } rng->vhost_dev->acked_features = vdev->guest_features; + DBG("rng->vhost_dev->acked_features: 0x%lx\n", vdev->guest_features); - ret = vhost_dev_start(rng->vhost_dev, vdev); + ret = vhost_dev_start(rng->vhost_dev, vdev, true); if (ret < 0) { DBG("Error starting vhost-user-rng: %d\n", ret); return; @@ -80,6 +81,9 @@ static void vu_rng_start(VirtIODevice *vdev) vhost_virtqueue_mask(rng->vhost_dev, vdev, i, false); } + /* Wait a bit for the vrings to be set in vhost-user-device */ + sleep(1); + } /* TODO: We need to implement this function in a future release */ @@ -190,4 +194,8 @@ void vhost_user_rng_realize(void) /* Initiale vhost-user communication */ vhost_dev_init(dev); + + /* Write the final features */ + global_vdev->host_features = dev->features; + DBG("dev->host_features: 0x%lx\n", dev->features); } diff --git a/virtio_loopback.c b/virtio_loopback.c index 91110c4..f9e5464 100644 --- a/virtio_loopback.c +++ b/virtio_loopback.c @@ -71,7 +71,6 @@ int s; /* To be deleted */ int efd; /* Eventfd file descriptor */ int efd_notify; /* Eventfd file descriptor */ -uint64_t eftd_ctr; fd_set rfds; int fd; int loopback_fd; @@ -101,6 +100,15 @@ static int virtio_validate_features(VirtIODevice *vdev) return 0; } +bool virtio_device_should_start(VirtIODevice *vdev, uint8_t status) +{ + if (!vdev->vm_running) { + return false; + } + + return virtio_device_started(vdev, status); +} + bool virtio_device_started(VirtIODevice *vdev, uint8_t status) { @@ -144,7 +152,7 @@ int virtio_set_status(VirtIODevice *vdev, uint8_t val) virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK); } - DBG("set vdev->status :%u \n", vdev->status); + DBG("set vdev->status:%u\n", vdev->status); if (k->set_status) { DBG("k->set_status\n"); @@ -524,88 +532,7 @@ static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg, unsigned int max_num_sg, bool is_write, uint64_t pa, size_t sz) { - unsigned num_sg = *p_num_sg; - bool ok = false; - uint64_t mmap_addr; - int ioctl_res; - - if (!sz) { - DBG("virtio: zero sized buffers are not allowed\n"); - goto out; - } - - while (sz) { - uint64_t len = sz; - - if (num_sg == max_num_sg) { - DBG("virtio: too many write descriptors in\n" - "indirect table"); - goto out; - } - - DBG("\tpa address is: 0x%lx\n", pa); - - memcpy(&mmap_addr, &pa, sizeof(uint64_t)); - ioctl_res = ioctl(loopback_fd, SHARE_BUF, &mmap_addr); - - /* Notify the loopback driver what you want to' mmap' */ - if (ioctl_res < 0) { - DBG("SHARE_BUF failed\n"); - exit(1); - } else { - if (mmap_addr == 0) { - - if ((pa & 0xff) == 0) { - ioctl(loopback_fd, MAP_BLK); - } - - DBG("Try to mmap pa: 0x%lx, size: %lx\n", pa, len); - iov[num_sg].iov_base = mmap(NULL, len, PROT_READ | PROT_WRITE, - MAP_SHARED, loopback_fd, 0); - int retries = 5; - while ((retries > 0) && ((int64_t)iov[num_sg].iov_base < 0)) { - iov[num_sg].iov_base = mmap(NULL, len, - PROT_READ | PROT_WRITE, - MAP_SHARED, loopback_fd, 0); - retries--; - } - - if ((int64_t)iov[num_sg].iov_base < 0) { - DBG("Bad mapping\n"); - exit(1); - } - } else { - iov[num_sg].iov_base = (void *)mmap_addr; - } - } - - /* Fix the offset */ - iov[num_sg].iov_base += pa & 0xfff; - DBG("\tMMap address (iov_base): 0x%lx\n", - (uint64_t)iov[num_sg].iov_base); - - /* Update len: Remaining size in the current page */ - if (sz > PAGE_SIZE - (pa & 0xfff)) { - len = PAGE_SIZE - (pa & 0xfff); - } - - if (!iov[num_sg].iov_base) { - DBG("virtio: bogus descriptor or out of resources\n"); - goto out; - } - - iov[num_sg].iov_len = len; - addr[num_sg] = pa; - - sz -= len; - pa += len; - num_sg++; - } - ok = true; - -out: - *p_num_sg = num_sg; - return ok; + DBG("Not implemented\n"); } static void *virtqueue_alloc_element(size_t sz, unsigned out_num, @@ -1078,16 +1005,11 @@ static void virtio_queue_guest_notifier_read(EventNotifier *n) } } - -int eventfd_count = 0; - - - +int eventfd_count; void *loopback_event_select(void *_e) { int retval; - uint64_t eftd_ctr; fd_set rfds; int s; EventNotifier *e = (EventNotifier *)_e; @@ -1286,18 +1208,8 @@ bool virtio_device_disabled(VirtIODevice *vdev) return vdev->disabled || vdev->broken; } -int prev_level = 0; - -void *my_notify(void *data) { - - int irq_num = 44; - (void) data; - (void) ioctl(fd, IRQ, &irq_num); - - pthread_exit(NULL); -} - -int int_count = 0; +static int prev_level; +static int int_count; void virtio_loopback_update_irq(VirtIODevice *vdev) { @@ -1314,6 +1226,7 @@ void virtio_loopback_update_irq(VirtIODevice *vdev) DBG("prev_level: %d\n", prev_level); if (!((level == 1) && (prev_level == 0))) { + DBG("!((level == 1) && (prev_level == 0))\n"); prev_level = level; return; } @@ -1322,12 +1235,11 @@ void virtio_loopback_update_irq(VirtIODevice *vdev) DBG("Trigger interrupt (ioctl)\n"); DBG("Interrupt counter: %d\n", int_count++); - //(void)pthread_create(&my_thread_id, NULL, my_notify, NULL); (void) ioctl(fd, IRQ, &irq_num); } -bool enable_virtio_interrupt = false; +bool enable_virtio_interrupt; /* virtio device */ void virtio_notify_vector(VirtIODevice *vdev) @@ -1485,6 +1397,8 @@ static uint64_t virtio_loopback_read(VirtIODevice *vdev, uint64_t offset, uint64_t ret; + DBG("READ\n"); + if (!vdev) { /* * If no backend is present, we treat most registers as @@ -1544,26 +1458,28 @@ static uint64_t virtio_loopback_read(VirtIODevice *vdev, uint64_t offset, case VIRTIO_MMIO_MAGIC_VALUE: return VIRT_MAGIC; case VIRTIO_MMIO_VERSION: + DBG("VIRTIO_MMIO_VERSION ->\n"); if (proxy->legacy) { + DBG("VIRTIO_MMIO_VERSION -> legacy\n"); return VIRT_VERSION_LEGACY; } else { + DBG("VIRTIO_MMIO_VERSION -> version\n"); return VIRT_VERSION; } case VIRTIO_MMIO_DEVICE_ID: return vdev->device_id; case VIRTIO_MMIO_VENDOR_ID: + DBG("READ\n"); return VIRT_VENDOR; case VIRTIO_MMIO_DEVICE_FEATURES: if (proxy->legacy) { if (proxy->host_features_sel) { - DBG("vdev->host_features: 0x%lx\n", (vdev->host_features >> 32)); - return (vdev->host_features >> 32); + return vdev->host_features >> 32; } else { - DBG("vdev->host_features: 0x%lx\n", vdev->host_features & (uint64_t)(((1ULL << 32) - 1))); - return (vdev->host_features & (uint64_t)(((1ULL << 32) - 1))); + return vdev->host_features & (uint64_t)(((1ULL << 32) - 1)); } } else { - /* TODO: To be implemented */ + /* TODO: To be implemented */ } case VIRTIO_MMIO_QUEUE_NUM_MAX: /* TODO: To be implemented */ @@ -1587,6 +1503,7 @@ static uint64_t virtio_loopback_read(VirtIODevice *vdev, uint64_t offset, case VIRTIO_MMIO_INTERRUPT_STATUS: return vdev->isr; case VIRTIO_MMIO_STATUS: + DBG("Read VIRTIO_MMIO_STATUS: %d\n", vdev->status); return vdev->status; case VIRTIO_MMIO_CONFIG_GENERATION: if (proxy->legacy) { @@ -1629,7 +1546,8 @@ static uint64_t virtio_loopback_read(VirtIODevice *vdev, uint64_t offset, } uint64_t vring_phys_addrs[2] = {0}; -uint32_t vring_phys_addrs_idx = 0; +uint32_t vring_phys_addrs_idx; +static int notify_cnt; void virtio_loopback_write(VirtIODevice *vdev, uint64_t offset, uint64_t value, unsigned size) @@ -1673,6 +1591,7 @@ void virtio_loopback_write(VirtIODevice *vdev, uint64_t offset, } switch (offset) { case VIRTIO_MMIO_DEVICE_FEATURES_SEL: + DBG("VIRTIO_MMIO_DEVICE_FEATURES_SEL: 0x%lx\n", value); if (value) { proxy->host_features_sel = 1; } else { @@ -1771,6 +1690,8 @@ void virtio_loopback_write(VirtIODevice *vdev, uint64_t offset, /* TODO: To be implemented */ break; case VIRTIO_MMIO_QUEUE_NOTIFY: + DBG("\nVIRTIO_MMIO_QUEUE_NOTIFY: vq_index -> %d, notify_cnt: %d\n", + value, notify_cnt++); if (value < VIRTIO_QUEUE_MAX) { virtio_queue_notify(vdev, value); } @@ -1883,7 +1804,7 @@ void adapter_read_write_cb(void) * * print_neg_flag (address->notification, address->read); */ - print_neg_flag (address->notification, address->read); + print_neg_flag(address->notification, address->read); if (address->read) { address->data = virtio_loopback_read(global_vdev, @@ -1907,7 +1828,9 @@ void *notify_select(void *data) { int retval; fd_set rfds; + uint64_t eftd_ctr; int efd = *(int *)data; + int32_t vq_index; DBG("\nWaiting for loopback notify events\n"); @@ -1924,9 +1847,11 @@ void *notify_select(void *data) DBG("Eventfd read error\n"); exit(1); } else { - //rcu_read_lock(); - virtio_queue_notify(global_vdev, 0); - //rcu_read_unlock(); + DBG("\nnotify select\n"); + (void)ioctl(fd, SHARE_NOTIFIED_VQ_INDEX, &vq_index); + DBG("\nnotify_select: vq_index -> %d, notify_cnt: %d," + "eventfd_val: %lu\n\n", vq_index, notify_cnt++, eftd_ctr); + virtio_queue_notify(global_vdev, vq_index); } } } @@ -1935,6 +1860,7 @@ void *notify_select(void *data) void *driver_event_select(void *data) { int retval; + uint64_t eftd_ctr; int efd = *(int *)data; DBG("\nWaiting for loopback read/write events\n"); @@ -1990,6 +1916,7 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, } if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE) { + DBG("Error: queue_size > VIRTQUEUE_MAX_SIZE\n"); exit(1); } @@ -2008,12 +1935,15 @@ void virtio_dev_init(VirtIODevice *vdev, const char *name, { int i; + DBG("virtio_dev_init\n"); + vdev->start_on_kick = false; vdev->started = false; vdev->device_id = device_id; vdev->status = 0; vdev->queue_sel = 0; vdev->config_vector = VIRTIO_NO_VECTOR; + /* TODO: check malloc return value */ vdev->vq = (VirtQueue *) malloc(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX); vdev->vm_running = false; vdev->broken = false; @@ -2033,6 +1963,7 @@ void virtio_dev_init(VirtIODevice *vdev, const char *name, } vdev->use_guest_notifier_mask = true; + DBG("virtio_dev_init return\n"); } static bool virtio_loopback_ioeventfd_enabled(VirtIODevice *d) @@ -2065,6 +1996,7 @@ void virtio_loopback_bus_init(VirtioBus *k) k->set_guest_notifiers = virtio_loopback_set_guest_notifiers; k->ioeventfd_enabled = virtio_loopback_ioeventfd_enabled; k->ioeventfd_assign = virtio_loopback_ioeventfd_assign; + DBG("virtio_loopback_bus_init(...) return\n"); } @@ -2076,6 +2008,14 @@ int virtio_loopback_start(void) int ret = -1; int flags; + /* Initialize global variables */ + prev_level = 0; + int_count = 0; + eventfd_count = 0; + enable_virtio_interrupt = false; + vring_phys_addrs_idx = 0; + notify_cnt = 0; + fd = open("/dev/loopback", O_RDWR); if (fd < 0) { perror("Open call failed"); @@ -2121,7 +2061,8 @@ int virtio_loopback_start(void) } /* Wait the eventfd */ - ret = pthread_create(&thread_id_notify, NULL, notify_select, (void *)&efd_notify); + ret = pthread_create(&thread_id_notify, NULL, notify_select, + (void *)&efd_notify); if (ret != 0) { exit(1); } diff --git a/virtio_loopback.h b/virtio_loopback.h index 25f116f..849f3a9 100644 --- a/virtio_loopback.h +++ b/virtio_loopback.h @@ -148,7 +148,7 @@ #define VIRT_VERSION_LEGACY 1 #define VIRT_VENDOR 0x554D4551 /* 'QEMU' */ -#define VIRTQUEUE_MAX_SIZE 1024 +#define VIRTQUEUE_MAX_SIZE 64 #define VIRTIO_QUEUE_MAX VIRTQUEUE_MAX_SIZE #define VIRTIO_NO_VECTOR 0xffff @@ -167,7 +167,7 @@ #define SHARE_VQS _IOC(_IOC_WRITE, 'k', 5, sizeof(uint32_t)) #define SHARE_BUF _IOC(_IOC_WRITE, 'k', 6, sizeof(uint64_t)) #define SHARE_COM_STRUCT _IOC(_IOC_WRITE, 'k', 7, 0) -#define MAP_BLK _IOC(_IOC_WRITE, 'k', 9, 0) +#define SHARE_NOTIFIED_VQ_INDEX _IOC(_IOC_WRITE, 'k', 8, sizeof(int32_t)) #define VIRTIO_PCI_VRING_ALIGN 4096 @@ -193,6 +193,9 @@ typedef struct VirtIOMMIOProxy { #define VRING_DESC_F_WRITE 2 /* This means the buffer contains a list of buffer descriptors. */ #define VRING_DESC_F_INDIRECT 4 +/* Reset vrings value */ +#define VIRTIO_F_RING_RESET 40 + /* * Mark a descriptor as available or used in packed ring. @@ -393,6 +396,7 @@ typedef struct VirtIODevice { char *bus_name; uint8_t device_endian; bool use_guest_notifier_mask; + /* TODO: Switch to union? */ VirtIORNG *vrng; VirtIOInput *vinput; VHostUserRNG *vhrng; @@ -588,6 +592,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, void virtio_add_feature(uint64_t *features, unsigned int fbit); bool virtio_has_feature(uint64_t features, unsigned int fbit); bool virtio_device_started(VirtIODevice *vdev, uint8_t status); +bool virtio_device_should_start(VirtIODevice *vdev, uint8_t status); int virtio_queue_empty(VirtQueue *vq); void *virtqueue_pop(VirtQueue *vq, size_t sz); @@ -646,6 +651,7 @@ uint32_t get_vqs_max_size(VirtIODevice *vdev); #define VIRTIO_CONFIG_S_DRIVER_OK 4 #define VIRTIO_F_VERSION_1 32 #define VIRTIO_F_ACCESS_PLATFORM 33 + /* * Legacy name for VIRTIO_F_ACCESS_PLATFORM * (for compatibility with old userspace) @@ -679,6 +685,19 @@ uint32_t get_vqs_max_size(VirtIODevice *vdev); /* Check if pointer p is n-bytes aligned */ #define QEMU_PTR_IS_ALIGNED(p, n) QEMU_IS_ALIGNED((uintptr_t)(p), (n)) +/* + * Define 1 GB offset in order to request big enough + * memory blocks from the kernel: + * 0x40000000 = 1024 * 1024 * 1024 = 64 * 4096 * 4096 = 1G + */ +#define OFFSET_1GB 64ULL * PAGE_SIZE * PAGE_SIZE + +/* + * Define starting physical address of host memory address space + */ +#define INIT_PA 0 + + extern VirtIODevice *global_vdev; extern VirtIOMMIOProxy *proxy; extern VirtioBus *global_vbus; |