aboutsummaryrefslogtreecommitdiffstats
path: root/hw/rdma/vmw/pvrdma_dev_ring.c
diff options
context:
space:
mode:
authorTimos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>2023-10-10 11:40:56 +0000
committerTimos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>2023-10-10 11:40:56 +0000
commite02cda008591317b1625707ff8e115a4841aa889 (patch)
treeaee302e3cf8b59ec2d32ec481be3d1afddfc8968 /hw/rdma/vmw/pvrdma_dev_ring.c
parentcc668e6b7e0ffd8c9d130513d12053cf5eda1d3b (diff)
Introduce Virtio-loopback epsilon release:
Epsilon release introduces a new compatibility layer which make virtio-loopback design to work with QEMU and rust-vmm vhost-user backend without require any changes. Signed-off-by: Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com> Change-Id: I52e57563e08a7d0bdc002f8e928ee61ba0c53dd9
Diffstat (limited to 'hw/rdma/vmw/pvrdma_dev_ring.c')
-rw-r--r--hw/rdma/vmw/pvrdma_dev_ring.c142
1 files changed, 142 insertions, 0 deletions
diff --git a/hw/rdma/vmw/pvrdma_dev_ring.c b/hw/rdma/vmw/pvrdma_dev_ring.c
new file mode 100644
index 000000000..42130667a
--- /dev/null
+++ b/hw/rdma/vmw/pvrdma_dev_ring.c
@@ -0,0 +1,142 @@
+/*
+ * QEMU paravirtual RDMA - Device rings
+ *
+ * Copyright (C) 2018 Oracle
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Yuval Shaia <yuval.shaia@oracle.com>
+ * Marcel Apfelbaum <marcel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/cutils.h"
+#include "hw/pci/pci.h"
+#include "cpu.h"
+#include "qemu/cutils.h"
+
+#include "trace.h"
+
+#include "../rdma_utils.h"
+#include "pvrdma_dev_ring.h"
+
+int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev,
+ PvrdmaRingState *ring_state, uint32_t max_elems,
+ size_t elem_sz, dma_addr_t *tbl, uint32_t npages)
+{
+ int i;
+ int rc = 0;
+
+ pstrcpy(ring->name, MAX_RING_NAME_SZ, name);
+ ring->dev = dev;
+ ring->ring_state = ring_state;
+ ring->max_elems = max_elems;
+ ring->elem_sz = elem_sz;
+ /* TODO: Give a moment to think if we want to redo driver settings
+ qatomic_set(&ring->ring_state->prod_tail, 0);
+ qatomic_set(&ring->ring_state->cons_head, 0);
+ */
+ ring->npages = npages;
+ ring->pages = g_malloc0(npages * sizeof(void *));
+
+ for (i = 0; i < npages; i++) {
+ if (!tbl[i]) {
+ rdma_error_report("npages=%d but tbl[%d] is NULL", npages, i);
+ continue;
+ }
+
+ ring->pages[i] = rdma_pci_dma_map(dev, tbl[i], TARGET_PAGE_SIZE);
+ if (!ring->pages[i]) {
+ rc = -ENOMEM;
+ rdma_error_report("Failed to map to page %d in ring %s", i, name);
+ goto out_free;
+ }
+ memset(ring->pages[i], 0, TARGET_PAGE_SIZE);
+ }
+
+ goto out;
+
+out_free:
+ while (i--) {
+ rdma_pci_dma_unmap(dev, ring->pages[i], TARGET_PAGE_SIZE);
+ }
+ g_free(ring->pages);
+
+out:
+ return rc;
+}
+
+void *pvrdma_ring_next_elem_read(PvrdmaRing *ring)
+{
+ unsigned int idx, offset;
+ const uint32_t tail = qatomic_read(&ring->ring_state->prod_tail);
+ const uint32_t head = qatomic_read(&ring->ring_state->cons_head);
+
+ if (tail & ~((ring->max_elems << 1) - 1) ||
+ head & ~((ring->max_elems << 1) - 1) ||
+ tail == head) {
+ trace_pvrdma_ring_next_elem_read_no_data(ring->name);
+ return NULL;
+ }
+
+ idx = head & (ring->max_elems - 1);
+ offset = idx * ring->elem_sz;
+ return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE);
+}
+
+void pvrdma_ring_read_inc(PvrdmaRing *ring)
+{
+ uint32_t idx = qatomic_read(&ring->ring_state->cons_head);
+
+ idx = (idx + 1) & ((ring->max_elems << 1) - 1);
+ qatomic_set(&ring->ring_state->cons_head, idx);
+}
+
+void *pvrdma_ring_next_elem_write(PvrdmaRing *ring)
+{
+ unsigned int idx, offset;
+ const uint32_t tail = qatomic_read(&ring->ring_state->prod_tail);
+ const uint32_t head = qatomic_read(&ring->ring_state->cons_head);
+
+ if (tail & ~((ring->max_elems << 1) - 1) ||
+ head & ~((ring->max_elems << 1) - 1) ||
+ tail == (head ^ ring->max_elems)) {
+ rdma_error_report("CQ is full");
+ return NULL;
+ }
+
+ idx = tail & (ring->max_elems - 1);
+ offset = idx * ring->elem_sz;
+ return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE);
+}
+
+void pvrdma_ring_write_inc(PvrdmaRing *ring)
+{
+ uint32_t idx = qatomic_read(&ring->ring_state->prod_tail);
+
+ idx = (idx + 1) & ((ring->max_elems << 1) - 1);
+ qatomic_set(&ring->ring_state->prod_tail, idx);
+}
+
+void pvrdma_ring_free(PvrdmaRing *ring)
+{
+ if (!ring) {
+ return;
+ }
+
+ if (!ring->pages) {
+ return;
+ }
+
+ while (ring->npages--) {
+ rdma_pci_dma_unmap(ring->dev, ring->pages[ring->npages],
+ TARGET_PAGE_SIZE);
+ }
+
+ g_free(ring->pages);
+ ring->pages = NULL;
+}