aboutsummaryrefslogtreecommitdiffstats
path: root/hw/rdma
diff options
context:
space:
mode:
Diffstat (limited to 'hw/rdma')
-rw-r--r--hw/rdma/Kconfig3
-rw-r--r--hw/rdma/meson.build10
-rw-r--r--hw/rdma/rdma.c30
-rw-r--r--hw/rdma/rdma_backend.c1401
-rw-r--r--hw/rdma/rdma_backend.h129
-rw-r--r--hw/rdma/rdma_backend_defs.h76
-rw-r--r--hw/rdma/rdma_rm.c816
-rw-r--r--hw/rdma/rdma_rm.h97
-rw-r--r--hw/rdma/rdma_rm_defs.h146
-rw-r--r--hw/rdma/rdma_utils.c125
-rw-r--r--hw/rdma/rdma_utils.h64
-rw-r--r--hw/rdma/trace-events31
-rw-r--r--hw/rdma/trace.h1
-rw-r--r--hw/rdma/vmw/pvrdma.h144
-rw-r--r--hw/rdma/vmw/pvrdma_cmd.c825
-rw-r--r--hw/rdma/vmw/pvrdma_dev_ring.c142
-rw-r--r--hw/rdma/vmw/pvrdma_dev_ring.h46
-rw-r--r--hw/rdma/vmw/pvrdma_main.c723
-rw-r--r--hw/rdma/vmw/pvrdma_qp_ops.c298
-rw-r--r--hw/rdma/vmw/pvrdma_qp_ops.h28
-rw-r--r--hw/rdma/vmw/trace-events17
-rw-r--r--hw/rdma/vmw/trace.h1
22 files changed, 5153 insertions, 0 deletions
diff --git a/hw/rdma/Kconfig b/hw/rdma/Kconfig
new file mode 100644
index 000000000..8e2211288
--- /dev/null
+++ b/hw/rdma/Kconfig
@@ -0,0 +1,3 @@
+config VMW_PVRDMA
+ default y if PCI_DEVICES
+ depends on PVRDMA && PCI && MSI_NONBROKEN
diff --git a/hw/rdma/meson.build b/hw/rdma/meson.build
new file mode 100644
index 000000000..7325f40c3
--- /dev/null
+++ b/hw/rdma/meson.build
@@ -0,0 +1,10 @@
+specific_ss.add(when: 'CONFIG_VMW_PVRDMA', if_true: files(
+ 'rdma.c',
+ 'rdma_backend.c',
+ 'rdma_rm.c',
+ 'rdma_utils.c',
+ 'vmw/pvrdma_cmd.c',
+ 'vmw/pvrdma_dev_ring.c',
+ 'vmw/pvrdma_main.c',
+ 'vmw/pvrdma_qp_ops.c',
+))
diff --git a/hw/rdma/rdma.c b/hw/rdma/rdma.c
new file mode 100644
index 000000000..7bec0d0d2
--- /dev/null
+++ b/hw/rdma/rdma.c
@@ -0,0 +1,30 @@
+/*
+ * RDMA device interface
+ *
+ * Copyright (C) 2018 Oracle
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Yuval Shaia <yuval.shaia@oracle.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "hw/rdma/rdma.h"
+#include "qemu/module.h"
+
+static const TypeInfo rdma_hmp_info = {
+ .name = INTERFACE_RDMA_PROVIDER,
+ .parent = TYPE_INTERFACE,
+ .class_size = sizeof(RdmaProviderClass),
+};
+
+static void rdma_register_types(void)
+{
+ type_register_static(&rdma_hmp_info);
+}
+
+type_init(rdma_register_types)
diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
new file mode 100644
index 000000000..6dcdfbbbe
--- /dev/null
+++ b/hw/rdma/rdma_backend.c
@@ -0,0 +1,1401 @@
+/*
+ * QEMU paravirtual RDMA - Generic RDMA backend
+ *
+ * Copyright (C) 2018 Oracle
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Yuval Shaia <yuval.shaia@oracle.com>
+ * Marcel Apfelbaum <marcel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/qapi-events-rdma.h"
+
+#include <infiniband/verbs.h>
+
+#include "contrib/rdmacm-mux/rdmacm-mux.h"
+#include "trace.h"
+#include "rdma_utils.h"
+#include "rdma_rm.h"
+#include "rdma_backend.h"
+
+#define THR_NAME_LEN 16
+#define THR_POLL_TO 5000
+
+#define MAD_HDR_SIZE sizeof(struct ibv_grh)
+
+typedef struct BackendCtx {
+ void *up_ctx;
+ struct ibv_sge sge; /* Used to save MAD recv buffer */
+ RdmaBackendQP *backend_qp; /* To maintain recv buffers */
+ RdmaBackendSRQ *backend_srq;
+} BackendCtx;
+
+struct backend_umad {
+ struct ib_user_mad hdr;
+ char mad[RDMA_MAX_PRIVATE_DATA];
+};
+
+static void (*comp_handler)(void *ctx, struct ibv_wc *wc);
+
+static void dummy_comp_handler(void *ctx, struct ibv_wc *wc)
+{
+ rdma_error_report("No completion handler is registered");
+}
+
+static inline void complete_work(enum ibv_wc_status status, uint32_t vendor_err,
+ void *ctx)
+{
+ struct ibv_wc wc = {};
+
+ wc.status = status;
+ wc.vendor_err = vendor_err;
+
+ comp_handler(ctx, &wc);
+}
+
+static void free_cqe_ctx(gpointer data, gpointer user_data)
+{
+ BackendCtx *bctx;
+ RdmaDeviceResources *rdma_dev_res = user_data;
+ unsigned long cqe_ctx_id = GPOINTER_TO_INT(data);
+
+ bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, cqe_ctx_id);
+ if (bctx) {
+ rdma_rm_dealloc_cqe_ctx(rdma_dev_res, cqe_ctx_id);
+ qatomic_dec(&rdma_dev_res->stats.missing_cqe);
+ }
+ g_free(bctx);
+}
+
+static void clean_recv_mads(RdmaBackendDev *backend_dev)
+{
+ unsigned long cqe_ctx_id;
+
+ do {
+ cqe_ctx_id = rdma_protected_gqueue_pop_int64(&backend_dev->
+ recv_mads_list);
+ if (cqe_ctx_id != -ENOENT) {
+ qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
+ free_cqe_ctx(GINT_TO_POINTER(cqe_ctx_id),
+ backend_dev->rdma_dev_res);
+ }
+ } while (cqe_ctx_id != -ENOENT);
+}
+
+static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
+{
+ int i, ne, total_ne = 0;
+ BackendCtx *bctx;
+ struct ibv_wc wc[2];
+ RdmaProtectedGSList *cqe_ctx_list;
+
+ WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
+ do {
+ ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
+
+ trace_rdma_poll_cq(ne, ibcq);
+
+ for (i = 0; i < ne; i++) {
+ bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
+ if (unlikely(!bctx)) {
+ rdma_error_report("No matching ctx for req %"PRId64,
+ wc[i].wr_id);
+ continue;
+ }
+
+ comp_handler(bctx->up_ctx, &wc[i]);
+
+ if (bctx->backend_qp) {
+ cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
+ } else {
+ cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
+ }
+
+ rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
+ rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
+ g_free(bctx);
+ }
+ total_ne += ne;
+ } while (ne > 0);
+ qatomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
+ }
+
+ if (ne < 0) {
+ rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
+ }
+
+ rdma_dev_res->stats.completions += total_ne;
+
+ return total_ne;
+}
+
+static void *comp_handler_thread(void *arg)
+{
+ RdmaBackendDev *backend_dev = (RdmaBackendDev *)arg;
+ int rc;
+ struct ibv_cq *ev_cq;
+ void *ev_ctx;
+ int flags;
+ GPollFD pfds[1];
+
+ /* Change to non-blocking mode */
+ flags = fcntl(backend_dev->channel->fd, F_GETFL);
+ rc = fcntl(backend_dev->channel->fd, F_SETFL, flags | O_NONBLOCK);
+ if (rc < 0) {
+ rdma_error_report("Failed to change backend channel FD to non-blocking");
+ return NULL;
+ }
+
+ pfds[0].fd = backend_dev->channel->fd;
+ pfds[0].events = G_IO_IN | G_IO_HUP | G_IO_ERR;
+
+ backend_dev->comp_thread.is_running = true;
+
+ while (backend_dev->comp_thread.run) {
+ do {
+ rc = qemu_poll_ns(pfds, 1, THR_POLL_TO * (int64_t)SCALE_MS);
+ if (!rc) {
+ backend_dev->rdma_dev_res->stats.poll_cq_ppoll_to++;
+ }
+ } while (!rc && backend_dev->comp_thread.run);
+
+ if (backend_dev->comp_thread.run) {
+ rc = ibv_get_cq_event(backend_dev->channel, &ev_cq, &ev_ctx);
+ if (unlikely(rc)) {
+ rdma_error_report("ibv_get_cq_event fail, rc=%d, errno=%d", rc,
+ errno);
+ continue;
+ }
+
+ rc = ibv_req_notify_cq(ev_cq, 0);
+ if (unlikely(rc)) {
+ rdma_error_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc,
+ errno);
+ }
+
+ backend_dev->rdma_dev_res->stats.poll_cq_from_bk++;
+ rdma_poll_cq(backend_dev->rdma_dev_res, ev_cq);
+
+ ibv_ack_cq_events(ev_cq, 1);
+ }
+ }
+
+ backend_dev->comp_thread.is_running = false;
+
+ qemu_thread_exit(0);
+
+ return NULL;
+}
+
+static inline void disable_rdmacm_mux_async(RdmaBackendDev *backend_dev)
+{
+ qatomic_set(&backend_dev->rdmacm_mux.can_receive, 0);
+}
+
+static inline void enable_rdmacm_mux_async(RdmaBackendDev *backend_dev)
+{
+ qatomic_set(&backend_dev->rdmacm_mux.can_receive, sizeof(RdmaCmMuxMsg));
+}
+
+static inline int rdmacm_mux_can_process_async(RdmaBackendDev *backend_dev)
+{
+ return qatomic_read(&backend_dev->rdmacm_mux.can_receive);
+}
+
+static int rdmacm_mux_check_op_status(CharBackend *mad_chr_be)
+{
+ RdmaCmMuxMsg msg = {};
+ int ret;
+
+ ret = qemu_chr_fe_read_all(mad_chr_be, (uint8_t *)&msg, sizeof(msg));
+ if (ret != sizeof(msg)) {
+ rdma_error_report("Got invalid message from mux: size %d, expecting %d",
+ ret, (int)sizeof(msg));
+ return -EIO;
+ }
+
+ trace_rdmacm_mux_check_op_status(msg.hdr.msg_type, msg.hdr.op_code,
+ msg.hdr.err_code);
+
+ if (msg.hdr.msg_type != RDMACM_MUX_MSG_TYPE_RESP) {
+ rdma_error_report("Got invalid message type %d", msg.hdr.msg_type);
+ return -EIO;
+ }
+
+ if (msg.hdr.err_code != RDMACM_MUX_ERR_CODE_OK) {
+ rdma_error_report("Operation failed in mux, error code %d",
+ msg.hdr.err_code);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int rdmacm_mux_send(RdmaBackendDev *backend_dev, RdmaCmMuxMsg *msg)
+{
+ int rc = 0;
+
+ msg->hdr.msg_type = RDMACM_MUX_MSG_TYPE_REQ;
+ trace_rdmacm_mux("send", msg->hdr.msg_type, msg->hdr.op_code);
+ disable_rdmacm_mux_async(backend_dev);
+ rc = qemu_chr_fe_write(backend_dev->rdmacm_mux.chr_be,
+ (const uint8_t *)msg, sizeof(*msg));
+ if (rc != sizeof(*msg)) {
+ enable_rdmacm_mux_async(backend_dev);
+ rdma_error_report("Failed to send request to rdmacm_mux (rc=%d)", rc);
+ return -EIO;
+ }
+
+ rc = rdmacm_mux_check_op_status(backend_dev->rdmacm_mux.chr_be);
+ if (rc) {
+ rdma_error_report("Failed to execute rdmacm_mux request %d (rc=%d)",
+ msg->hdr.op_code, rc);
+ }
+
+ enable_rdmacm_mux_async(backend_dev);
+
+ return 0;
+}
+
+static void stop_backend_thread(RdmaBackendThread *thread)
+{
+ thread->run = false;
+ while (thread->is_running) {
+ sleep(THR_POLL_TO / SCALE_US / 2);
+ }
+}
+
+static void start_comp_thread(RdmaBackendDev *backend_dev)
+{
+ char thread_name[THR_NAME_LEN] = {};
+
+ stop_backend_thread(&backend_dev->comp_thread);
+
+ snprintf(thread_name, sizeof(thread_name), "rdma_comp_%s",
+ ibv_get_device_name(backend_dev->ib_dev));
+ backend_dev->comp_thread.run = true;
+ qemu_thread_create(&backend_dev->comp_thread.thread, thread_name,
+ comp_handler_thread, backend_dev, QEMU_THREAD_DETACHED);
+}
+
+void rdma_backend_register_comp_handler(void (*handler)(void *ctx,
+ struct ibv_wc *wc))
+{
+ comp_handler = handler;
+}
+
+void rdma_backend_unregister_comp_handler(void)
+{
+ rdma_backend_register_comp_handler(dummy_comp_handler);
+}
+
+int rdma_backend_query_port(RdmaBackendDev *backend_dev,
+ struct ibv_port_attr *port_attr)
+{
+ int rc;
+
+ rc = ibv_query_port(backend_dev->context, backend_dev->port_num, port_attr);
+ if (rc) {
+ rdma_error_report("ibv_query_port fail, rc=%d, errno=%d", rc, errno);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void rdma_backend_poll_cq(RdmaDeviceResources *rdma_dev_res, RdmaBackendCQ *cq)
+{
+ int polled;
+
+ rdma_dev_res->stats.poll_cq_from_guest++;
+ polled = rdma_poll_cq(rdma_dev_res, cq->ibcq);
+ if (!polled) {
+ rdma_dev_res->stats.poll_cq_from_guest_empty++;
+ }
+}
+
+static GHashTable *ah_hash;
+
+static struct ibv_ah *create_ah(RdmaBackendDev *backend_dev, struct ibv_pd *pd,
+ uint8_t sgid_idx, union ibv_gid *dgid)
+{
+ GBytes *ah_key = g_bytes_new(dgid, sizeof(*dgid));
+ struct ibv_ah *ah = g_hash_table_lookup(ah_hash, ah_key);
+
+ if (ah) {
+ trace_rdma_create_ah_cache_hit(be64_to_cpu(dgid->global.subnet_prefix),
+ be64_to_cpu(dgid->global.interface_id));
+ g_bytes_unref(ah_key);
+ } else {
+ struct ibv_ah_attr ah_attr = {
+ .is_global = 1,
+ .port_num = backend_dev->port_num,
+ .grh.hop_limit = 1,
+ };
+
+ ah_attr.grh.dgid = *dgid;
+ ah_attr.grh.sgid_index = sgid_idx;
+
+ ah = ibv_create_ah(pd, &ah_attr);
+ if (ah) {
+ g_hash_table_insert(ah_hash, ah_key, ah);
+ } else {
+ g_bytes_unref(ah_key);
+ rdma_error_report("Failed to create AH for gid <0x%" PRIx64", 0x%"PRIx64">",
+ be64_to_cpu(dgid->global.subnet_prefix),
+ be64_to_cpu(dgid->global.interface_id));
+ }
+
+ trace_rdma_create_ah_cache_miss(be64_to_cpu(dgid->global.subnet_prefix),
+ be64_to_cpu(dgid->global.interface_id));
+ }
+
+ return ah;
+}
+
+static void destroy_ah_hash_key(gpointer data)
+{
+ g_bytes_unref(data);
+}
+
+static void destroy_ah_hast_data(gpointer data)
+{
+ struct ibv_ah *ah = data;
+
+ ibv_destroy_ah(ah);
+}
+
+static void ah_cache_init(void)
+{
+ ah_hash = g_hash_table_new_full(g_bytes_hash, g_bytes_equal,
+ destroy_ah_hash_key, destroy_ah_hast_data);
+}
+
+#ifdef LEGACY_RDMA_REG_MR
+static int build_host_sge_array(RdmaDeviceResources *rdma_dev_res,
+ struct ibv_sge *sge, uint8_t num_sge,
+ uint64_t *total_length)
+{
+ RdmaRmMR *mr;
+ int idx;
+
+ for (idx = 0; idx < num_sge; idx++) {
+ mr = rdma_rm_get_mr(rdma_dev_res, sge[idx].lkey);
+ if (unlikely(!mr)) {
+ rdma_error_report("Invalid lkey 0x%x", sge[idx].lkey);
+ return VENDOR_ERR_INVLKEY | sge[idx].lkey;
+ }
+
+ sge[idx].addr = (uintptr_t)mr->virt + sge[idx].addr - mr->start;
+ sge[idx].lkey = rdma_backend_mr_lkey(&mr->backend_mr);
+
+ *total_length += sge[idx].length;
+ }
+
+ return 0;
+}
+#else
+static inline int build_host_sge_array(RdmaDeviceResources *rdma_dev_res,
+ struct ibv_sge *sge, uint8_t num_sge,
+ uint64_t *total_length)
+{
+ int idx;
+
+ for (idx = 0; idx < num_sge; idx++) {
+ *total_length += sge[idx].length;
+ }
+ return 0;
+}
+#endif
+
+static void trace_mad_message(const char *title, char *buf, int len)
+{
+ int i;
+ char *b = g_malloc0(len * 3 + 1);
+ char b1[4];
+
+ for (i = 0; i < len; i++) {
+ sprintf(b1, "%.2X ", buf[i] & 0x000000FF);
+ strcat(b, b1);
+ }
+
+ trace_rdma_mad_message(title, len, b);
+
+ g_free(b);
+}
+
+static int mad_send(RdmaBackendDev *backend_dev, uint8_t sgid_idx,
+ union ibv_gid *sgid, struct ibv_sge *sge, uint32_t num_sge)
+{
+ RdmaCmMuxMsg msg = {};
+ char *hdr, *data;
+ int ret;
+
+ if (num_sge != 2) {
+ return -EINVAL;
+ }
+
+ msg.hdr.op_code = RDMACM_MUX_OP_CODE_MAD;
+ memcpy(msg.hdr.sgid.raw, sgid->raw, sizeof(msg.hdr.sgid));
+
+ msg.umad_len = sge[0].length + sge[1].length;
+
+ if (msg.umad_len > sizeof(msg.umad.mad)) {
+ return -ENOMEM;
+ }
+
+ msg.umad.hdr.addr.qpn = htobe32(1);
+ msg.umad.hdr.addr.grh_present = 1;
+ msg.umad.hdr.addr.gid_index = sgid_idx;
+ memcpy(msg.umad.hdr.addr.gid, sgid->raw, sizeof(msg.umad.hdr.addr.gid));
+ msg.umad.hdr.addr.hop_limit = 0xFF;
+
+ hdr = rdma_pci_dma_map(backend_dev->dev, sge[0].addr, sge[0].length);
+ if (!hdr) {
+ return -ENOMEM;
+ }
+ data = rdma_pci_dma_map(backend_dev->dev, sge[1].addr, sge[1].length);
+ if (!data) {
+ rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length);
+ return -ENOMEM;
+ }
+
+ memcpy(&msg.umad.mad[0], hdr, sge[0].length);
+ memcpy(&msg.umad.mad[sge[0].length], data, sge[1].length);
+
+ rdma_pci_dma_unmap(backend_dev->dev, data, sge[1].length);
+ rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length);
+
+ trace_mad_message("send", msg.umad.mad, msg.umad_len);
+
+ ret = rdmacm_mux_send(backend_dev, &msg);
+ if (ret) {
+ rdma_error_report("Failed to send MAD to rdma_umadmux (%d)", ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void rdma_backend_post_send(RdmaBackendDev *backend_dev,
+ RdmaBackendQP *qp, uint8_t qp_type,
+ struct ibv_sge *sge, uint32_t num_sge,
+ uint8_t sgid_idx, union ibv_gid *sgid,
+ union ibv_gid *dgid, uint32_t dqpn, uint32_t dqkey,
+ void *ctx)
+{
+ BackendCtx *bctx;
+ uint32_t bctx_id;
+ int rc;
+ struct ibv_send_wr wr = {}, *bad_wr;
+
+ if (!qp->ibqp) { /* This field is not initialized for QP0 and QP1 */
+ if (qp_type == IBV_QPT_SMI) {
+ rdma_error_report("Got QP0 request");
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx);
+ } else if (qp_type == IBV_QPT_GSI) {
+ rc = mad_send(backend_dev, sgid_idx, sgid, sge, num_sge);
+ if (rc) {
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_MAD_SEND, ctx);
+ backend_dev->rdma_dev_res->stats.mad_tx_err++;
+ } else {
+ complete_work(IBV_WC_SUCCESS, 0, ctx);
+ backend_dev->rdma_dev_res->stats.mad_tx++;
+ }
+ }
+ return;
+ }
+
+ bctx = g_malloc0(sizeof(*bctx));
+ bctx->up_ctx = ctx;
+ bctx->backend_qp = qp;
+
+ rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx);
+ if (unlikely(rc)) {
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx);
+ goto err_free_bctx;
+ }
+
+ rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id);
+
+ rc = build_host_sge_array(backend_dev->rdma_dev_res, sge, num_sge,
+ &backend_dev->rdma_dev_res->stats.tx_len);
+ if (rc) {
+ complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
+ goto err_dealloc_cqe_ctx;
+ }
+
+ if (qp_type == IBV_QPT_UD) {
+ wr.wr.ud.ah = create_ah(backend_dev, qp->ibpd, sgid_idx, dgid);
+ if (!wr.wr.ud.ah) {
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx);
+ goto err_dealloc_cqe_ctx;
+ }
+ wr.wr.ud.remote_qpn = dqpn;
+ wr.wr.ud.remote_qkey = dqkey;
+ }
+
+ wr.num_sge = num_sge;
+ wr.opcode = IBV_WR_SEND;
+ wr.send_flags = IBV_SEND_SIGNALED;
+ wr.sg_list = sge;
+ wr.wr_id = bctx_id;
+
+ rc = ibv_post_send(qp->ibqp, &wr, &bad_wr);
+ if (rc) {
+ rdma_error_report("ibv_post_send fail, qpn=0x%x, rc=%d, errno=%d",
+ qp->ibqp->qp_num, rc, errno);
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx);
+ goto err_dealloc_cqe_ctx;
+ }
+
+ qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
+ backend_dev->rdma_dev_res->stats.tx++;
+
+ return;
+
+err_dealloc_cqe_ctx:
+ backend_dev->rdma_dev_res->stats.tx_err++;
+ rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id);
+
+err_free_bctx:
+ g_free(bctx);
+}
+
+static unsigned int save_mad_recv_buffer(RdmaBackendDev *backend_dev,
+ struct ibv_sge *sge, uint32_t num_sge,
+ void *ctx)
+{
+ BackendCtx *bctx;
+ int rc;
+ uint32_t bctx_id;
+
+ if (num_sge != 1) {
+ rdma_error_report("Invalid num_sge (%d), expecting 1", num_sge);
+ return VENDOR_ERR_INV_NUM_SGE;
+ }
+
+ if (sge[0].length < RDMA_MAX_PRIVATE_DATA + sizeof(struct ibv_grh)) {
+ rdma_error_report("Too small buffer for MAD");
+ return VENDOR_ERR_INV_MAD_BUFF;
+ }
+
+ bctx = g_malloc0(sizeof(*bctx));
+
+ rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx);
+ if (unlikely(rc)) {
+ g_free(bctx);
+ return VENDOR_ERR_NOMEM;
+ }
+
+ bctx->up_ctx = ctx;
+ bctx->sge = *sge;
+
+ rdma_protected_gqueue_append_int64(&backend_dev->recv_mads_list, bctx_id);
+
+ return 0;
+}
+
+void rdma_backend_post_recv(RdmaBackendDev *backend_dev,
+ RdmaBackendQP *qp, uint8_t qp_type,
+ struct ibv_sge *sge, uint32_t num_sge, void *ctx)
+{
+ BackendCtx *bctx;
+ uint32_t bctx_id;
+ int rc;
+ struct ibv_recv_wr wr = {}, *bad_wr;
+
+ if (!qp->ibqp) { /* This field does not get initialized for QP0 and QP1 */
+ if (qp_type == IBV_QPT_SMI) {
+ rdma_error_report("Got QP0 request");
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx);
+ }
+ if (qp_type == IBV_QPT_GSI) {
+ rc = save_mad_recv_buffer(backend_dev, sge, num_sge, ctx);
+ if (rc) {
+ complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
+ backend_dev->rdma_dev_res->stats.mad_rx_bufs_err++;
+ } else {
+ backend_dev->rdma_dev_res->stats.mad_rx_bufs++;
+ }
+ }
+ return;
+ }
+
+ bctx = g_malloc0(sizeof(*bctx));
+ bctx->up_ctx = ctx;
+ bctx->backend_qp = qp;
+
+ rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx);
+ if (unlikely(rc)) {
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx);
+ goto err_free_bctx;
+ }
+
+ rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id);
+
+ rc = build_host_sge_array(backend_dev->rdma_dev_res, sge, num_sge,
+ &backend_dev->rdma_dev_res->stats.rx_bufs_len);
+ if (rc) {
+ complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
+ goto err_dealloc_cqe_ctx;
+ }
+
+ wr.num_sge = num_sge;
+ wr.sg_list = sge;
+ wr.wr_id = bctx_id;
+ rc = ibv_post_recv(qp->ibqp, &wr, &bad_wr);
+ if (rc) {
+ rdma_error_report("ibv_post_recv fail, qpn=0x%x, rc=%d, errno=%d",
+ qp->ibqp->qp_num, rc, errno);
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx);
+ goto err_dealloc_cqe_ctx;
+ }
+
+ qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
+ backend_dev->rdma_dev_res->stats.rx_bufs++;
+
+ return;
+
+err_dealloc_cqe_ctx:
+ backend_dev->rdma_dev_res->stats.rx_bufs_err++;
+ rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id);
+
+err_free_bctx:
+ g_free(bctx);
+}
+
+void rdma_backend_post_srq_recv(RdmaBackendDev *backend_dev,
+ RdmaBackendSRQ *srq, struct ibv_sge *sge,
+ uint32_t num_sge, void *ctx)
+{
+ BackendCtx *bctx;
+ uint32_t bctx_id;
+ int rc;
+ struct ibv_recv_wr wr = {}, *bad_wr;
+
+ bctx = g_malloc0(sizeof(*bctx));
+ bctx->up_ctx = ctx;
+ bctx->backend_srq = srq;
+
+ rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx);
+ if (unlikely(rc)) {
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx);
+ goto err_free_bctx;
+ }
+
+ rdma_protected_gslist_append_int32(&srq->cqe_ctx_list, bctx_id);
+
+ rc = build_host_sge_array(backend_dev->rdma_dev_res, sge, num_sge,
+ &backend_dev->rdma_dev_res->stats.rx_bufs_len);
+ if (rc) {
+ complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
+ goto err_dealloc_cqe_ctx;
+ }
+
+ wr.num_sge = num_sge;
+ wr.sg_list = sge;
+ wr.wr_id = bctx_id;
+ rc = ibv_post_srq_recv(srq->ibsrq, &wr, &bad_wr);
+ if (rc) {
+ rdma_error_report("ibv_post_srq_recv fail, srqn=0x%x, rc=%d, errno=%d",
+ srq->ibsrq->handle, rc, errno);
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx);
+ goto err_dealloc_cqe_ctx;
+ }
+
+ qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
+ backend_dev->rdma_dev_res->stats.rx_bufs++;
+ backend_dev->rdma_dev_res->stats.rx_srq++;
+
+ return;
+
+err_dealloc_cqe_ctx:
+ backend_dev->rdma_dev_res->stats.rx_bufs_err++;
+ rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id);
+
+err_free_bctx:
+ g_free(bctx);
+}
+
+int rdma_backend_create_pd(RdmaBackendDev *backend_dev, RdmaBackendPD *pd)
+{
+ pd->ibpd = ibv_alloc_pd(backend_dev->context);
+
+ if (!pd->ibpd) {
+ rdma_error_report("ibv_alloc_pd fail, errno=%d", errno);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void rdma_backend_destroy_pd(RdmaBackendPD *pd)
+{
+ if (pd->ibpd) {
+ ibv_dealloc_pd(pd->ibpd);
+ }
+}
+
+int rdma_backend_create_mr(RdmaBackendMR *mr, RdmaBackendPD *pd, void *addr,
+ size_t length, uint64_t guest_start, int access)
+{
+#ifdef LEGACY_RDMA_REG_MR
+ mr->ibmr = ibv_reg_mr(pd->ibpd, addr, length, access);
+#else
+ mr->ibmr = ibv_reg_mr_iova(pd->ibpd, addr, length, guest_start, access);
+#endif
+ if (!mr->ibmr) {
+ rdma_error_report("ibv_reg_mr fail, errno=%d", errno);
+ return -EIO;
+ }
+
+ mr->ibpd = pd->ibpd;
+
+ return 0;
+}
+
+void rdma_backend_destroy_mr(RdmaBackendMR *mr)
+{
+ if (mr->ibmr) {
+ ibv_dereg_mr(mr->ibmr);
+ }
+}
+
+int rdma_backend_create_cq(RdmaBackendDev *backend_dev, RdmaBackendCQ *cq,
+ int cqe)
+{
+ int rc;
+
+ cq->ibcq = ibv_create_cq(backend_dev->context, cqe + 1, NULL,
+ backend_dev->channel, 0);
+ if (!cq->ibcq) {
+ rdma_error_report("ibv_create_cq fail, errno=%d", errno);
+ return -EIO;
+ }
+
+ rc = ibv_req_notify_cq(cq->ibcq, 0);
+ if (rc) {
+ rdma_warn_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc, errno);
+ }
+
+ cq->backend_dev = backend_dev;
+
+ return 0;
+}
+
+void rdma_backend_destroy_cq(RdmaBackendCQ *cq)
+{
+ if (cq->ibcq) {
+ ibv_destroy_cq(cq->ibcq);
+ }
+}
+
+int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type,
+ RdmaBackendPD *pd, RdmaBackendCQ *scq,
+ RdmaBackendCQ *rcq, RdmaBackendSRQ *srq,
+ uint32_t max_send_wr, uint32_t max_recv_wr,
+ uint32_t max_send_sge, uint32_t max_recv_sge)
+{
+ struct ibv_qp_init_attr attr = {};
+
+ qp->ibqp = 0;
+
+ switch (qp_type) {
+ case IBV_QPT_GSI:
+ return 0;
+
+ case IBV_QPT_RC:
+ /* fall through */
+ case IBV_QPT_UD:
+ /* do nothing */
+ break;
+
+ default:
+ rdma_error_report("Unsupported QP type %d", qp_type);
+ return -EIO;
+ }
+
+ attr.qp_type = qp_type;
+ attr.send_cq = scq->ibcq;
+ attr.recv_cq = rcq->ibcq;
+ attr.cap.max_send_wr = max_send_wr;
+ attr.cap.max_recv_wr = max_recv_wr;
+ attr.cap.max_send_sge = max_send_sge;
+ attr.cap.max_recv_sge = max_recv_sge;
+ if (srq) {
+ attr.srq = srq->ibsrq;
+ }
+
+ qp->ibqp = ibv_create_qp(pd->ibpd, &attr);
+ if (!qp->ibqp) {
+ rdma_error_report("ibv_create_qp fail, errno=%d", errno);
+ return -EIO;
+ }
+
+ rdma_protected_gslist_init(&qp->cqe_ctx_list);
+
+ qp->ibpd = pd->ibpd;
+
+ /* TODO: Query QP to get max_inline_data and save it to be used in send */
+
+ return 0;
+}
+
+int rdma_backend_qp_state_init(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,
+ uint8_t qp_type, uint32_t qkey)
+{
+ struct ibv_qp_attr attr = {};
+ int rc, attr_mask;
+
+ attr_mask = IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT;
+ attr.qp_state = IBV_QPS_INIT;
+ attr.pkey_index = 0;
+ attr.port_num = backend_dev->port_num;
+
+ switch (qp_type) {
+ case IBV_QPT_RC:
+ attr_mask |= IBV_QP_ACCESS_FLAGS;
+ trace_rdma_backend_rc_qp_state_init(qp->ibqp->qp_num);
+ break;
+
+ case IBV_QPT_UD:
+ attr.qkey = qkey;
+ attr_mask |= IBV_QP_QKEY;
+ trace_rdma_backend_ud_qp_state_init(qp->ibqp->qp_num, qkey);
+ break;
+
+ default:
+ rdma_error_report("Unsupported QP type %d", qp_type);
+ return -EIO;
+ }
+
+ rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask);
+ if (rc) {
+ rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,
+ uint8_t qp_type, uint8_t sgid_idx,
+ union ibv_gid *dgid, uint32_t dqpn,
+ uint32_t rq_psn, uint32_t qkey, bool use_qkey)
+{
+ struct ibv_qp_attr attr = {};
+ union ibv_gid ibv_gid = {
+ .global.interface_id = dgid->global.interface_id,
+ .global.subnet_prefix = dgid->global.subnet_prefix
+ };
+ int rc, attr_mask;
+
+ attr.qp_state = IBV_QPS_RTR;
+ attr_mask = IBV_QP_STATE;
+
+ qp->sgid_idx = sgid_idx;
+
+ switch (qp_type) {
+ case IBV_QPT_RC:
+ attr.path_mtu = IBV_MTU_1024;
+ attr.dest_qp_num = dqpn;
+ attr.max_dest_rd_atomic = 1;
+ attr.min_rnr_timer = 12;
+ attr.ah_attr.port_num = backend_dev->port_num;
+ attr.ah_attr.is_global = 1;
+ attr.ah_attr.grh.hop_limit = 1;
+ attr.ah_attr.grh.dgid = ibv_gid;
+ attr.ah_attr.grh.sgid_index = qp->sgid_idx;
+ attr.rq_psn = rq_psn;
+
+ attr_mask |= IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN |
+ IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC |
+ IBV_QP_MIN_RNR_TIMER;
+
+ trace_rdma_backend_rc_qp_state_rtr(qp->ibqp->qp_num,
+ be64_to_cpu(ibv_gid.global.
+ subnet_prefix),
+ be64_to_cpu(ibv_gid.global.
+ interface_id),
+ qp->sgid_idx, dqpn, rq_psn);
+ break;
+
+ case IBV_QPT_UD:
+ if (use_qkey) {
+ attr.qkey = qkey;
+ attr_mask |= IBV_QP_QKEY;
+ }
+ trace_rdma_backend_ud_qp_state_rtr(qp->ibqp->qp_num, use_qkey ? qkey :
+ 0);
+ break;
+ }
+
+ rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask);
+ if (rc) {
+ rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int rdma_backend_qp_state_rts(RdmaBackendQP *qp, uint8_t qp_type,
+ uint32_t sq_psn, uint32_t qkey, bool use_qkey)
+{
+ struct ibv_qp_attr attr = {};
+ int rc, attr_mask;
+
+ attr.qp_state = IBV_QPS_RTS;
+ attr.sq_psn = sq_psn;
+ attr_mask = IBV_QP_STATE | IBV_QP_SQ_PSN;
+
+ switch (qp_type) {
+ case IBV_QPT_RC:
+ attr.timeout = 14;
+ attr.retry_cnt = 7;
+ attr.rnr_retry = 7;
+ attr.max_rd_atomic = 1;
+
+ attr_mask |= IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY |
+ IBV_QP_MAX_QP_RD_ATOMIC;
+ trace_rdma_backend_rc_qp_state_rts(qp->ibqp->qp_num, sq_psn);
+ break;
+
+ case IBV_QPT_UD:
+ if (use_qkey) {
+ attr.qkey = qkey;
+ attr_mask |= IBV_QP_QKEY;
+ }
+ trace_rdma_backend_ud_qp_state_rts(qp->ibqp->qp_num, sq_psn,
+ use_qkey ? qkey : 0);
+ break;
+ }
+
+ rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask);
+ if (rc) {
+ rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int rdma_backend_query_qp(RdmaBackendQP *qp, struct ibv_qp_attr *attr,
+ int attr_mask, struct ibv_qp_init_attr *init_attr)
+{
+ if (!qp->ibqp) {
+ attr->qp_state = IBV_QPS_RTS;
+ return 0;
+ }
+
+ return ibv_query_qp(qp->ibqp, attr, attr_mask, init_attr);
+}
+
+void rdma_backend_destroy_qp(RdmaBackendQP *qp, RdmaDeviceResources *dev_res)
+{
+ if (qp->ibqp) {
+ ibv_destroy_qp(qp->ibqp);
+ }
+ g_slist_foreach(qp->cqe_ctx_list.list, free_cqe_ctx, dev_res);
+ rdma_protected_gslist_destroy(&qp->cqe_ctx_list);
+}
+
+int rdma_backend_create_srq(RdmaBackendSRQ *srq, RdmaBackendPD *pd,
+ uint32_t max_wr, uint32_t max_sge,
+ uint32_t srq_limit)
+{
+ struct ibv_srq_init_attr srq_init_attr = {};
+
+ srq_init_attr.attr.max_wr = max_wr;
+ srq_init_attr.attr.max_sge = max_sge;
+ srq_init_attr.attr.srq_limit = srq_limit;
+
+ srq->ibsrq = ibv_create_srq(pd->ibpd, &srq_init_attr);
+ if (!srq->ibsrq) {
+ rdma_error_report("ibv_create_srq failed, errno=%d", errno);
+ return -EIO;
+ }
+
+ rdma_protected_gslist_init(&srq->cqe_ctx_list);
+
+ return 0;
+}
+
+int rdma_backend_query_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr)
+{
+ if (!srq->ibsrq) {
+ return -EINVAL;
+ }
+
+ return ibv_query_srq(srq->ibsrq, srq_attr);
+}
+
+int rdma_backend_modify_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr,
+ int srq_attr_mask)
+{
+ if (!srq->ibsrq) {
+ return -EINVAL;
+ }
+
+ return ibv_modify_srq(srq->ibsrq, srq_attr, srq_attr_mask);
+}
+
+void rdma_backend_destroy_srq(RdmaBackendSRQ *srq, RdmaDeviceResources *dev_res)
+{
+ if (srq->ibsrq) {
+ ibv_destroy_srq(srq->ibsrq);
+ }
+ g_slist_foreach(srq->cqe_ctx_list.list, free_cqe_ctx, dev_res);
+ rdma_protected_gslist_destroy(&srq->cqe_ctx_list);
+}
+
+#define CHK_ATTR(req, dev, member, fmt) ({ \
+ trace_rdma_check_dev_attr(#member, dev.member, req->member); \
+ if (req->member > dev.member) { \
+ rdma_warn_report("%s = "fmt" is higher than host device capability "fmt, \
+ #member, req->member, dev.member); \
+ req->member = dev.member; \
+ } \
+})
+
+static int init_device_caps(RdmaBackendDev *backend_dev,
+ struct ibv_device_attr *dev_attr)
+{
+ struct ibv_device_attr bk_dev_attr;
+ int rc;
+
+ rc = ibv_query_device(backend_dev->context, &bk_dev_attr);
+ if (rc) {
+ rdma_error_report("ibv_query_device fail, rc=%d, errno=%d", rc, errno);
+ return -EIO;
+ }
+
+ dev_attr->max_sge = MAX_SGE;
+ dev_attr->max_srq_sge = MAX_SGE;
+
+ CHK_ATTR(dev_attr, bk_dev_attr, max_mr_size, "%" PRId64);
+ CHK_ATTR(dev_attr, bk_dev_attr, max_qp, "%d");
+ CHK_ATTR(dev_attr, bk_dev_attr, max_sge, "%d");
+ CHK_ATTR(dev_attr, bk_dev_attr, max_cq, "%d");
+ CHK_ATTR(dev_attr, bk_dev_attr, max_mr, "%d");
+ CHK_ATTR(dev_attr, bk_dev_attr, max_pd, "%d");
+ CHK_ATTR(dev_attr, bk_dev_attr, max_qp_rd_atom, "%d");
+ CHK_ATTR(dev_attr, bk_dev_attr, max_qp_init_rd_atom, "%d");
+ CHK_ATTR(dev_attr, bk_dev_attr, max_ah, "%d");
+ CHK_ATTR(dev_attr, bk_dev_attr, max_srq, "%d");
+
+ return 0;
+}
+
+static inline void build_mad_hdr(struct ibv_grh *grh, union ibv_gid *sgid,
+ union ibv_gid *my_gid, int paylen)
+{
+ grh->paylen = htons(paylen);
+ grh->sgid = *sgid;
+ grh->dgid = *my_gid;
+}
+
+static void process_incoming_mad_req(RdmaBackendDev *backend_dev,
+ RdmaCmMuxMsg *msg)
+{
+ unsigned long cqe_ctx_id;
+ BackendCtx *bctx;
+ char *mad;
+
+ trace_mad_message("recv", msg->umad.mad, msg->umad_len);
+
+ cqe_ctx_id = rdma_protected_gqueue_pop_int64(&backend_dev->recv_mads_list);
+ if (cqe_ctx_id == -ENOENT) {
+ rdma_warn_report("No more free MADs buffers, waiting for a while");
+ sleep(THR_POLL_TO);
+ return;
+ }
+
+ bctx = rdma_rm_get_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id);
+ if (unlikely(!bctx)) {
+ rdma_error_report("No matching ctx for req %ld", cqe_ctx_id);
+ backend_dev->rdma_dev_res->stats.mad_rx_err++;
+ return;
+ }
+
+ mad = rdma_pci_dma_map(backend_dev->dev, bctx->sge.addr,
+ bctx->sge.length);
+ if (!mad || bctx->sge.length < msg->umad_len + MAD_HDR_SIZE) {
+ backend_dev->rdma_dev_res->stats.mad_rx_err++;
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_INV_MAD_BUFF,
+ bctx->up_ctx);
+ } else {
+ struct ibv_wc wc = {};
+ memset(mad, 0, bctx->sge.length);
+ build_mad_hdr((struct ibv_grh *)mad,
+ (union ibv_gid *)&msg->umad.hdr.addr.gid, &msg->hdr.sgid,
+ msg->umad_len);
+ memcpy(&mad[MAD_HDR_SIZE], msg->umad.mad, msg->umad_len);
+ rdma_pci_dma_unmap(backend_dev->dev, mad, bctx->sge.length);
+
+ wc.byte_len = msg->umad_len;
+ wc.status = IBV_WC_SUCCESS;
+ wc.wc_flags = IBV_WC_GRH;
+ backend_dev->rdma_dev_res->stats.mad_rx++;
+ comp_handler(bctx->up_ctx, &wc);
+ }
+
+ g_free(bctx);
+ rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id);
+}
+
+static inline int rdmacm_mux_can_receive(void *opaque)
+{
+ RdmaBackendDev *backend_dev = (RdmaBackendDev *)opaque;
+
+ return rdmacm_mux_can_process_async(backend_dev);
+}
+
+static void rdmacm_mux_read(void *opaque, const uint8_t *buf, int size)
+{
+ RdmaBackendDev *backend_dev = (RdmaBackendDev *)opaque;
+ RdmaCmMuxMsg *msg = (RdmaCmMuxMsg *)buf;
+
+ trace_rdmacm_mux("read", msg->hdr.msg_type, msg->hdr.op_code);
+
+ if (msg->hdr.msg_type != RDMACM_MUX_MSG_TYPE_REQ &&
+ msg->hdr.op_code != RDMACM_MUX_OP_CODE_MAD) {
+ rdma_error_report("Error: Not a MAD request, skipping");
+ return;
+ }
+ process_incoming_mad_req(backend_dev, msg);
+}
+
+static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be)
+{
+ int ret;
+
+ backend_dev->rdmacm_mux.chr_be = mad_chr_be;
+
+ ret = qemu_chr_fe_backend_connected(backend_dev->rdmacm_mux.chr_be);
+ if (!ret) {
+ rdma_error_report("Missing chardev for MAD multiplexer");
+ return -EIO;
+ }
+
+ rdma_protected_gqueue_init(&backend_dev->recv_mads_list);
+
+ enable_rdmacm_mux_async(backend_dev);
+
+ qemu_chr_fe_set_handlers(backend_dev->rdmacm_mux.chr_be,
+ rdmacm_mux_can_receive, rdmacm_mux_read, NULL,
+ NULL, backend_dev, NULL, true);
+
+ return 0;
+}
+
+static void mad_stop(RdmaBackendDev *backend_dev)
+{
+ clean_recv_mads(backend_dev);
+}
+
+static void mad_fini(RdmaBackendDev *backend_dev)
+{
+ disable_rdmacm_mux_async(backend_dev);
+ qemu_chr_fe_disconnect(backend_dev->rdmacm_mux.chr_be);
+ rdma_protected_gqueue_destroy(&backend_dev->recv_mads_list);
+}
+
+int rdma_backend_get_gid_index(RdmaBackendDev *backend_dev,
+ union ibv_gid *gid)
+{
+ union ibv_gid sgid;
+ int ret;
+ int i = 0;
+
+ do {
+ ret = ibv_query_gid(backend_dev->context, backend_dev->port_num, i,
+ &sgid);
+ i++;
+ } while (!ret && (memcmp(&sgid, gid, sizeof(*gid))));
+
+ trace_rdma_backend_get_gid_index(be64_to_cpu(gid->global.subnet_prefix),
+ be64_to_cpu(gid->global.interface_id),
+ i - 1);
+
+ return ret ? ret : i - 1;
+}
+
+int rdma_backend_add_gid(RdmaBackendDev *backend_dev, const char *ifname,
+ union ibv_gid *gid)
+{
+ RdmaCmMuxMsg msg = {};
+ int ret;
+
+ trace_rdma_backend_gid_change("add", be64_to_cpu(gid->global.subnet_prefix),
+ be64_to_cpu(gid->global.interface_id));
+
+ msg.hdr.op_code = RDMACM_MUX_OP_CODE_REG;
+ memcpy(msg.hdr.sgid.raw, gid->raw, sizeof(msg.hdr.sgid));
+
+ ret = rdmacm_mux_send(backend_dev, &msg);
+ if (ret) {
+ rdma_error_report("Failed to register GID to rdma_umadmux (%d)", ret);
+ return -EIO;
+ }
+
+ qapi_event_send_rdma_gid_status_changed(ifname, true,
+ gid->global.subnet_prefix,
+ gid->global.interface_id);
+
+ return ret;
+}
+
+int rdma_backend_del_gid(RdmaBackendDev *backend_dev, const char *ifname,
+ union ibv_gid *gid)
+{
+ RdmaCmMuxMsg msg = {};
+ int ret;
+
+ trace_rdma_backend_gid_change("del", be64_to_cpu(gid->global.subnet_prefix),
+ be64_to_cpu(gid->global.interface_id));
+
+ msg.hdr.op_code = RDMACM_MUX_OP_CODE_UNREG;
+ memcpy(msg.hdr.sgid.raw, gid->raw, sizeof(msg.hdr.sgid));
+
+ ret = rdmacm_mux_send(backend_dev, &msg);
+ if (ret) {
+ rdma_error_report("Failed to unregister GID from rdma_umadmux (%d)",
+ ret);
+ return -EIO;
+ }
+
+ qapi_event_send_rdma_gid_status_changed(ifname, false,
+ gid->global.subnet_prefix,
+ gid->global.interface_id);
+
+ return 0;
+}
+
+int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev,
+ RdmaDeviceResources *rdma_dev_res,
+ const char *backend_device_name, uint8_t port_num,
+ struct ibv_device_attr *dev_attr, CharBackend *mad_chr_be)
+{
+ int i;
+ int ret = 0;
+ int num_ibv_devices;
+ struct ibv_device **dev_list;
+
+ memset(backend_dev, 0, sizeof(*backend_dev));
+
+ backend_dev->dev = pdev;
+ backend_dev->port_num = port_num;
+ backend_dev->rdma_dev_res = rdma_dev_res;
+
+ rdma_backend_register_comp_handler(dummy_comp_handler);
+
+ dev_list = ibv_get_device_list(&num_ibv_devices);
+ if (!dev_list) {
+ rdma_error_report("Failed to get IB devices list");
+ return -EIO;
+ }
+
+ if (num_ibv_devices == 0) {
+ rdma_error_report("No IB devices were found");
+ ret = -ENXIO;
+ goto out_free_dev_list;
+ }
+
+ if (backend_device_name) {
+ for (i = 0; dev_list[i]; ++i) {
+ if (!strcmp(ibv_get_device_name(dev_list[i]),
+ backend_device_name)) {
+ break;
+ }
+ }
+
+ backend_dev->ib_dev = dev_list[i];
+ if (!backend_dev->ib_dev) {
+ rdma_error_report("Failed to find IB device %s",
+ backend_device_name);
+ ret = -EIO;
+ goto out_free_dev_list;
+ }
+ } else {
+ backend_dev->ib_dev = *dev_list;
+ }
+
+ rdma_info_report("uverb device %s", backend_dev->ib_dev->dev_name);
+
+ backend_dev->context = ibv_open_device(backend_dev->ib_dev);
+ if (!backend_dev->context) {
+ rdma_error_report("Failed to open IB device %s",
+ ibv_get_device_name(backend_dev->ib_dev));
+ ret = -EIO;
+ goto out;
+ }
+
+ backend_dev->channel = ibv_create_comp_channel(backend_dev->context);
+ if (!backend_dev->channel) {
+ rdma_error_report("Failed to create IB communication channel");
+ ret = -EIO;
+ goto out_close_device;
+ }
+
+ ret = init_device_caps(backend_dev, dev_attr);
+ if (ret) {
+ rdma_error_report("Failed to initialize device capabilities");
+ ret = -EIO;
+ goto out_destroy_comm_channel;
+ }
+
+
+ ret = mad_init(backend_dev, mad_chr_be);
+ if (ret) {
+ rdma_error_report("Failed to initialize mad");
+ ret = -EIO;
+ goto out_destroy_comm_channel;
+ }
+
+ backend_dev->comp_thread.run = false;
+ backend_dev->comp_thread.is_running = false;
+
+ ah_cache_init();
+
+ goto out_free_dev_list;
+
+out_destroy_comm_channel:
+ ibv_destroy_comp_channel(backend_dev->channel);
+
+out_close_device:
+ ibv_close_device(backend_dev->context);
+
+out_free_dev_list:
+ ibv_free_device_list(dev_list);
+
+out:
+ return ret;
+}
+
+
+void rdma_backend_start(RdmaBackendDev *backend_dev)
+{
+ start_comp_thread(backend_dev);
+}
+
+void rdma_backend_stop(RdmaBackendDev *backend_dev)
+{
+ mad_stop(backend_dev);
+ stop_backend_thread(&backend_dev->comp_thread);
+}
+
+void rdma_backend_fini(RdmaBackendDev *backend_dev)
+{
+ mad_fini(backend_dev);
+ g_hash_table_destroy(ah_hash);
+ ibv_destroy_comp_channel(backend_dev->channel);
+ ibv_close_device(backend_dev->context);
+}
diff --git a/hw/rdma/rdma_backend.h b/hw/rdma/rdma_backend.h
new file mode 100644
index 000000000..225af481e
--- /dev/null
+++ b/hw/rdma/rdma_backend.h
@@ -0,0 +1,129 @@
+/*
+ * RDMA device: Definitions of Backend Device functions
+ *
+ * Copyright (C) 2018 Oracle
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Yuval Shaia <yuval.shaia@oracle.com>
+ * Marcel Apfelbaum <marcel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef RDMA_BACKEND_H
+#define RDMA_BACKEND_H
+
+#include "qapi/error.h"
+#include "chardev/char-fe.h"
+
+#include "rdma_rm_defs.h"
+#include "rdma_backend_defs.h"
+
+/* Vendor Errors */
+#define VENDOR_ERR_FAIL_BACKEND 0x201
+#define VENDOR_ERR_TOO_MANY_SGES 0x202
+#define VENDOR_ERR_NOMEM 0x203
+#define VENDOR_ERR_QP0 0x204
+#define VENDOR_ERR_INV_NUM_SGE 0x205
+#define VENDOR_ERR_MAD_SEND 0x206
+#define VENDOR_ERR_INVLKEY 0x207
+#define VENDOR_ERR_MR_SMALL 0x208
+#define VENDOR_ERR_INV_MAD_BUFF 0x209
+#define VENDOR_ERR_INV_GID_IDX 0x210
+
+/* Add definition for QP0 and QP1 as there is no userspace enums for them */
+enum ibv_special_qp_type {
+ IBV_QPT_SMI = 0,
+ IBV_QPT_GSI = 1,
+};
+
+static inline uint32_t rdma_backend_qpn(const RdmaBackendQP *qp)
+{
+ return qp->ibqp ? qp->ibqp->qp_num : 1;
+}
+
+static inline uint32_t rdma_backend_mr_lkey(const RdmaBackendMR *mr)
+{
+ return mr->ibmr ? mr->ibmr->lkey : 0;
+}
+
+static inline uint32_t rdma_backend_mr_rkey(const RdmaBackendMR *mr)
+{
+ return mr->ibmr ? mr->ibmr->rkey : 0;
+}
+
+int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev,
+ RdmaDeviceResources *rdma_dev_res,
+ const char *backend_device_name, uint8_t port_num,
+ struct ibv_device_attr *dev_attr,
+ CharBackend *mad_chr_be);
+void rdma_backend_fini(RdmaBackendDev *backend_dev);
+int rdma_backend_add_gid(RdmaBackendDev *backend_dev, const char *ifname,
+ union ibv_gid *gid);
+int rdma_backend_del_gid(RdmaBackendDev *backend_dev, const char *ifname,
+ union ibv_gid *gid);
+int rdma_backend_get_gid_index(RdmaBackendDev *backend_dev,
+ union ibv_gid *gid);
+void rdma_backend_start(RdmaBackendDev *backend_dev);
+void rdma_backend_stop(RdmaBackendDev *backend_dev);
+void rdma_backend_register_comp_handler(void (*handler)(void *ctx,
+ struct ibv_wc *wc));
+void rdma_backend_unregister_comp_handler(void);
+
+int rdma_backend_query_port(RdmaBackendDev *backend_dev,
+ struct ibv_port_attr *port_attr);
+int rdma_backend_create_pd(RdmaBackendDev *backend_dev, RdmaBackendPD *pd);
+void rdma_backend_destroy_pd(RdmaBackendPD *pd);
+
+int rdma_backend_create_mr(RdmaBackendMR *mr, RdmaBackendPD *pd, void *addr,
+ size_t length, uint64_t guest_start, int access);
+void rdma_backend_destroy_mr(RdmaBackendMR *mr);
+
+int rdma_backend_create_cq(RdmaBackendDev *backend_dev, RdmaBackendCQ *cq,
+ int cqe);
+void rdma_backend_destroy_cq(RdmaBackendCQ *cq);
+void rdma_backend_poll_cq(RdmaDeviceResources *rdma_dev_res, RdmaBackendCQ *cq);
+
+int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type,
+ RdmaBackendPD *pd, RdmaBackendCQ *scq,
+ RdmaBackendCQ *rcq, RdmaBackendSRQ *srq,
+ uint32_t max_send_wr, uint32_t max_recv_wr,
+ uint32_t max_send_sge, uint32_t max_recv_sge);
+int rdma_backend_qp_state_init(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,
+ uint8_t qp_type, uint32_t qkey);
+int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,
+ uint8_t qp_type, uint8_t sgid_idx,
+ union ibv_gid *dgid, uint32_t dqpn,
+ uint32_t rq_psn, uint32_t qkey, bool use_qkey);
+int rdma_backend_qp_state_rts(RdmaBackendQP *qp, uint8_t qp_type,
+ uint32_t sq_psn, uint32_t qkey, bool use_qkey);
+int rdma_backend_query_qp(RdmaBackendQP *qp, struct ibv_qp_attr *attr,
+ int attr_mask, struct ibv_qp_init_attr *init_attr);
+void rdma_backend_destroy_qp(RdmaBackendQP *qp, RdmaDeviceResources *dev_res);
+
+void rdma_backend_post_send(RdmaBackendDev *backend_dev,
+ RdmaBackendQP *qp, uint8_t qp_type,
+ struct ibv_sge *sge, uint32_t num_sge,
+ uint8_t sgid_idx, union ibv_gid *sgid,
+ union ibv_gid *dgid, uint32_t dqpn, uint32_t dqkey,
+ void *ctx);
+void rdma_backend_post_recv(RdmaBackendDev *backend_dev,
+ RdmaBackendQP *qp, uint8_t qp_type,
+ struct ibv_sge *sge, uint32_t num_sge, void *ctx);
+
+int rdma_backend_create_srq(RdmaBackendSRQ *srq, RdmaBackendPD *pd,
+ uint32_t max_wr, uint32_t max_sge,
+ uint32_t srq_limit);
+int rdma_backend_query_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr);
+int rdma_backend_modify_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr,
+ int srq_attr_mask);
+void rdma_backend_destroy_srq(RdmaBackendSRQ *srq,
+ RdmaDeviceResources *dev_res);
+void rdma_backend_post_srq_recv(RdmaBackendDev *backend_dev,
+ RdmaBackendSRQ *srq, struct ibv_sge *sge,
+ uint32_t num_sge, void *ctx);
+
+#endif
diff --git a/hw/rdma/rdma_backend_defs.h b/hw/rdma/rdma_backend_defs.h
new file mode 100644
index 000000000..4e6c0ad69
--- /dev/null
+++ b/hw/rdma/rdma_backend_defs.h
@@ -0,0 +1,76 @@
+/*
+ * RDMA device: Definitions of Backend Device structures
+ *
+ * Copyright (C) 2018 Oracle
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Yuval Shaia <yuval.shaia@oracle.com>
+ * Marcel Apfelbaum <marcel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef RDMA_BACKEND_DEFS_H
+#define RDMA_BACKEND_DEFS_H
+
+#include "qemu/thread.h"
+#include "chardev/char-fe.h"
+#include <infiniband/verbs.h>
+#include "contrib/rdmacm-mux/rdmacm-mux.h"
+#include "rdma_utils.h"
+
+typedef struct RdmaDeviceResources RdmaDeviceResources;
+
+typedef struct RdmaBackendThread {
+ QemuThread thread;
+ bool run; /* Set by thread manager to let thread know it should exit */
+ bool is_running; /* Set by the thread to report its status */
+} RdmaBackendThread;
+
+typedef struct RdmaCmMux {
+ CharBackend *chr_be;
+ int can_receive;
+} RdmaCmMux;
+
+typedef struct RdmaBackendDev {
+ RdmaBackendThread comp_thread;
+ PCIDevice *dev;
+ RdmaDeviceResources *rdma_dev_res;
+ struct ibv_device *ib_dev;
+ struct ibv_context *context;
+ struct ibv_comp_channel *channel;
+ uint8_t port_num;
+ RdmaProtectedGQueue recv_mads_list;
+ RdmaCmMux rdmacm_mux;
+} RdmaBackendDev;
+
+typedef struct RdmaBackendPD {
+ struct ibv_pd *ibpd;
+} RdmaBackendPD;
+
+typedef struct RdmaBackendMR {
+ struct ibv_pd *ibpd;
+ struct ibv_mr *ibmr;
+} RdmaBackendMR;
+
+typedef struct RdmaBackendCQ {
+ RdmaBackendDev *backend_dev;
+ struct ibv_cq *ibcq;
+} RdmaBackendCQ;
+
+typedef struct RdmaBackendQP {
+ struct ibv_pd *ibpd;
+ struct ibv_qp *ibqp;
+ uint8_t sgid_idx;
+ RdmaProtectedGSList cqe_ctx_list;
+} RdmaBackendQP;
+
+typedef struct RdmaBackendSRQ {
+ struct ibv_srq *ibsrq;
+ RdmaProtectedGSList cqe_ctx_list;
+} RdmaBackendSRQ;
+
+#endif
diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c
new file mode 100644
index 000000000..cfd85de3e
--- /dev/null
+++ b/hw/rdma/rdma_rm.c
@@ -0,0 +1,816 @@
+/*
+ * QEMU paravirtual RDMA - Resource Manager Implementation
+ *
+ * Copyright (C) 2018 Oracle
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Yuval Shaia <yuval.shaia@oracle.com>
+ * Marcel Apfelbaum <marcel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "monitor/monitor.h"
+
+#include "trace.h"
+#include "rdma_utils.h"
+#include "rdma_backend.h"
+#include "rdma_rm.h"
+
+/* Page directory and page tables */
+#define PG_DIR_SZ { TARGET_PAGE_SIZE / sizeof(__u64) }
+#define PG_TBL_SZ { TARGET_PAGE_SIZE / sizeof(__u64) }
+
+void rdma_format_device_counters(RdmaDeviceResources *dev_res, GString *buf)
+{
+ g_string_append_printf(buf, "\ttx : %" PRId64 "\n",
+ dev_res->stats.tx);
+ g_string_append_printf(buf, "\ttx_len : %" PRId64 "\n",
+ dev_res->stats.tx_len);
+ g_string_append_printf(buf, "\ttx_err : %" PRId64 "\n",
+ dev_res->stats.tx_err);
+ g_string_append_printf(buf, "\trx_bufs : %" PRId64 "\n",
+ dev_res->stats.rx_bufs);
+ g_string_append_printf(buf, "\trx_srq : %" PRId64 "\n",
+ dev_res->stats.rx_srq);
+ g_string_append_printf(buf, "\trx_bufs_len : %" PRId64 "\n",
+ dev_res->stats.rx_bufs_len);
+ g_string_append_printf(buf, "\trx_bufs_err : %" PRId64 "\n",
+ dev_res->stats.rx_bufs_err);
+ g_string_append_printf(buf, "\tcomps : %" PRId64 "\n",
+ dev_res->stats.completions);
+ g_string_append_printf(buf, "\tmissing_comps : %" PRId32 "\n",
+ dev_res->stats.missing_cqe);
+ g_string_append_printf(buf, "\tpoll_cq (bk) : %" PRId64 "\n",
+ dev_res->stats.poll_cq_from_bk);
+ g_string_append_printf(buf, "\tpoll_cq_ppoll_to : %" PRId64 "\n",
+ dev_res->stats.poll_cq_ppoll_to);
+ g_string_append_printf(buf, "\tpoll_cq (fe) : %" PRId64 "\n",
+ dev_res->stats.poll_cq_from_guest);
+ g_string_append_printf(buf, "\tpoll_cq_empty : %" PRId64 "\n",
+ dev_res->stats.poll_cq_from_guest_empty);
+ g_string_append_printf(buf, "\tmad_tx : %" PRId64 "\n",
+ dev_res->stats.mad_tx);
+ g_string_append_printf(buf, "\tmad_tx_err : %" PRId64 "\n",
+ dev_res->stats.mad_tx_err);
+ g_string_append_printf(buf, "\tmad_rx : %" PRId64 "\n",
+ dev_res->stats.mad_rx);
+ g_string_append_printf(buf, "\tmad_rx_err : %" PRId64 "\n",
+ dev_res->stats.mad_rx_err);
+ g_string_append_printf(buf, "\tmad_rx_bufs : %" PRId64 "\n",
+ dev_res->stats.mad_rx_bufs);
+ g_string_append_printf(buf, "\tmad_rx_bufs_err : %" PRId64 "\n",
+ dev_res->stats.mad_rx_bufs_err);
+ g_string_append_printf(buf, "\tPDs : %" PRId32 "\n",
+ dev_res->pd_tbl.used);
+ g_string_append_printf(buf, "\tMRs : %" PRId32 "\n",
+ dev_res->mr_tbl.used);
+ g_string_append_printf(buf, "\tUCs : %" PRId32 "\n",
+ dev_res->uc_tbl.used);
+ g_string_append_printf(buf, "\tQPs : %" PRId32 "\n",
+ dev_res->qp_tbl.used);
+ g_string_append_printf(buf, "\tCQs : %" PRId32 "\n",
+ dev_res->cq_tbl.used);
+ g_string_append_printf(buf, "\tCEQ_CTXs : %" PRId32 "\n",
+ dev_res->cqe_ctx_tbl.used);
+}
+
+static inline void res_tbl_init(const char *name, RdmaRmResTbl *tbl,
+ uint32_t tbl_sz, uint32_t res_sz)
+{
+ tbl->tbl = g_malloc(tbl_sz * res_sz);
+
+ strncpy(tbl->name, name, MAX_RM_TBL_NAME);
+ tbl->name[MAX_RM_TBL_NAME - 1] = 0;
+
+ tbl->bitmap = bitmap_new(tbl_sz);
+ tbl->tbl_sz = tbl_sz;
+ tbl->res_sz = res_sz;
+ tbl->used = 0;
+ qemu_mutex_init(&tbl->lock);
+}
+
+static inline void res_tbl_free(RdmaRmResTbl *tbl)
+{
+ if (!tbl->bitmap) {
+ return;
+ }
+ qemu_mutex_destroy(&tbl->lock);
+ g_free(tbl->tbl);
+ g_free(tbl->bitmap);
+}
+
+static inline void *rdma_res_tbl_get(RdmaRmResTbl *tbl, uint32_t handle)
+{
+ trace_rdma_res_tbl_get(tbl->name, handle);
+
+ if ((handle < tbl->tbl_sz) && (test_bit(handle, tbl->bitmap))) {
+ return tbl->tbl + handle * tbl->res_sz;
+ } else {
+ rdma_error_report("Table %s, invalid handle %d", tbl->name, handle);
+ return NULL;
+ }
+}
+
+static inline void *rdma_res_tbl_alloc(RdmaRmResTbl *tbl, uint32_t *handle)
+{
+ qemu_mutex_lock(&tbl->lock);
+
+ *handle = find_first_zero_bit(tbl->bitmap, tbl->tbl_sz);
+ if (*handle > tbl->tbl_sz) {
+ rdma_error_report("Table %s, failed to allocate, bitmap is full",
+ tbl->name);
+ qemu_mutex_unlock(&tbl->lock);
+ return NULL;
+ }
+
+ set_bit(*handle, tbl->bitmap);
+
+ tbl->used++;
+
+ qemu_mutex_unlock(&tbl->lock);
+
+ memset(tbl->tbl + *handle * tbl->res_sz, 0, tbl->res_sz);
+
+ trace_rdma_res_tbl_alloc(tbl->name, *handle);
+
+ return tbl->tbl + *handle * tbl->res_sz;
+}
+
+static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle)
+{
+ trace_rdma_res_tbl_dealloc(tbl->name, handle);
+
+ QEMU_LOCK_GUARD(&tbl->lock);
+
+ if (handle < tbl->tbl_sz) {
+ clear_bit(handle, tbl->bitmap);
+ tbl->used--;
+ }
+
+}
+
+int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
+ uint32_t *pd_handle, uint32_t ctx_handle)
+{
+ RdmaRmPD *pd;
+ int ret = -ENOMEM;
+
+ pd = rdma_res_tbl_alloc(&dev_res->pd_tbl, pd_handle);
+ if (!pd) {
+ goto out;
+ }
+
+ ret = rdma_backend_create_pd(backend_dev, &pd->backend_pd);
+ if (ret) {
+ ret = -EIO;
+ goto out_tbl_dealloc;
+ }
+
+ pd->ctx_handle = ctx_handle;
+
+ return 0;
+
+out_tbl_dealloc:
+ rdma_res_tbl_dealloc(&dev_res->pd_tbl, *pd_handle);
+
+out:
+ return ret;
+}
+
+RdmaRmPD *rdma_rm_get_pd(RdmaDeviceResources *dev_res, uint32_t pd_handle)
+{
+ return rdma_res_tbl_get(&dev_res->pd_tbl, pd_handle);
+}
+
+void rdma_rm_dealloc_pd(RdmaDeviceResources *dev_res, uint32_t pd_handle)
+{
+ RdmaRmPD *pd = rdma_rm_get_pd(dev_res, pd_handle);
+
+ if (pd) {
+ rdma_backend_destroy_pd(&pd->backend_pd);
+ rdma_res_tbl_dealloc(&dev_res->pd_tbl, pd_handle);
+ }
+}
+
+int rdma_rm_alloc_mr(RdmaDeviceResources *dev_res, uint32_t pd_handle,
+ uint64_t guest_start, uint64_t guest_length,
+ void *host_virt, int access_flags, uint32_t *mr_handle,
+ uint32_t *lkey, uint32_t *rkey)
+{
+ RdmaRmMR *mr;
+ int ret = 0;
+ RdmaRmPD *pd;
+
+ pd = rdma_rm_get_pd(dev_res, pd_handle);
+ if (!pd) {
+ return -EINVAL;
+ }
+
+ mr = rdma_res_tbl_alloc(&dev_res->mr_tbl, mr_handle);
+ if (!mr) {
+ return -ENOMEM;
+ }
+ trace_rdma_rm_alloc_mr(*mr_handle, host_virt, guest_start, guest_length,
+ access_flags);
+
+ if (host_virt) {
+ mr->virt = host_virt;
+ mr->start = guest_start;
+ mr->length = guest_length;
+ mr->virt += (mr->start & (TARGET_PAGE_SIZE - 1));
+
+ ret = rdma_backend_create_mr(&mr->backend_mr, &pd->backend_pd, mr->virt,
+ mr->length, guest_start, access_flags);
+ if (ret) {
+ ret = -EIO;
+ goto out_dealloc_mr;
+ }
+#ifdef LEGACY_RDMA_REG_MR
+ /* We keep mr_handle in lkey so send and recv get get mr ptr */
+ *lkey = *mr_handle;
+#else
+ *lkey = rdma_backend_mr_lkey(&mr->backend_mr);
+#endif
+ }
+
+ *rkey = -1;
+
+ mr->pd_handle = pd_handle;
+
+ return 0;
+
+out_dealloc_mr:
+ rdma_res_tbl_dealloc(&dev_res->mr_tbl, *mr_handle);
+
+ return ret;
+}
+
+RdmaRmMR *rdma_rm_get_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle)
+{
+ return rdma_res_tbl_get(&dev_res->mr_tbl, mr_handle);
+}
+
+void rdma_rm_dealloc_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle)
+{
+ RdmaRmMR *mr = rdma_rm_get_mr(dev_res, mr_handle);
+
+ if (mr) {
+ rdma_backend_destroy_mr(&mr->backend_mr);
+ trace_rdma_rm_dealloc_mr(mr_handle, mr->start);
+ if (mr->start) {
+ mr->virt -= (mr->start & (TARGET_PAGE_SIZE - 1));
+ munmap(mr->virt, mr->length);
+ }
+ rdma_res_tbl_dealloc(&dev_res->mr_tbl, mr_handle);
+ }
+}
+
+int rdma_rm_alloc_uc(RdmaDeviceResources *dev_res, uint32_t pfn,
+ uint32_t *uc_handle)
+{
+ RdmaRmUC *uc;
+
+ /* TODO: Need to make sure pfn is between bar start address and
+ * bsd+RDMA_BAR2_UAR_SIZE
+ if (pfn > RDMA_BAR2_UAR_SIZE) {
+ rdma_error_report("pfn out of range (%d > %d)", pfn,
+ RDMA_BAR2_UAR_SIZE);
+ return -ENOMEM;
+ }
+ */
+
+ uc = rdma_res_tbl_alloc(&dev_res->uc_tbl, uc_handle);
+ if (!uc) {
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+RdmaRmUC *rdma_rm_get_uc(RdmaDeviceResources *dev_res, uint32_t uc_handle)
+{
+ return rdma_res_tbl_get(&dev_res->uc_tbl, uc_handle);
+}
+
+void rdma_rm_dealloc_uc(RdmaDeviceResources *dev_res, uint32_t uc_handle)
+{
+ RdmaRmUC *uc = rdma_rm_get_uc(dev_res, uc_handle);
+
+ if (uc) {
+ rdma_res_tbl_dealloc(&dev_res->uc_tbl, uc_handle);
+ }
+}
+
+RdmaRmCQ *rdma_rm_get_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle)
+{
+ return rdma_res_tbl_get(&dev_res->cq_tbl, cq_handle);
+}
+
+int rdma_rm_alloc_cq(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
+ uint32_t cqe, uint32_t *cq_handle, void *opaque)
+{
+ int rc;
+ RdmaRmCQ *cq;
+
+ cq = rdma_res_tbl_alloc(&dev_res->cq_tbl, cq_handle);
+ if (!cq) {
+ return -ENOMEM;
+ }
+
+ cq->opaque = opaque;
+ cq->notify = CNT_CLEAR;
+
+ rc = rdma_backend_create_cq(backend_dev, &cq->backend_cq, cqe);
+ if (rc) {
+ rc = -EIO;
+ goto out_dealloc_cq;
+ }
+
+ return 0;
+
+out_dealloc_cq:
+ rdma_rm_dealloc_cq(dev_res, *cq_handle);
+
+ return rc;
+}
+
+void rdma_rm_req_notify_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle,
+ bool notify)
+{
+ RdmaRmCQ *cq;
+
+ cq = rdma_rm_get_cq(dev_res, cq_handle);
+ if (!cq) {
+ return;
+ }
+
+ if (cq->notify != CNT_SET) {
+ cq->notify = notify ? CNT_ARM : CNT_CLEAR;
+ }
+}
+
+void rdma_rm_dealloc_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle)
+{
+ RdmaRmCQ *cq;
+
+ cq = rdma_rm_get_cq(dev_res, cq_handle);
+ if (!cq) {
+ return;
+ }
+
+ rdma_backend_destroy_cq(&cq->backend_cq);
+
+ rdma_res_tbl_dealloc(&dev_res->cq_tbl, cq_handle);
+}
+
+RdmaRmQP *rdma_rm_get_qp(RdmaDeviceResources *dev_res, uint32_t qpn)
+{
+ GBytes *key = g_bytes_new(&qpn, sizeof(qpn));
+
+ RdmaRmQP *qp = g_hash_table_lookup(dev_res->qp_hash, key);
+
+ g_bytes_unref(key);
+
+ if (!qp) {
+ rdma_error_report("Invalid QP handle %d", qpn);
+ }
+
+ return qp;
+}
+
+int rdma_rm_alloc_qp(RdmaDeviceResources *dev_res, uint32_t pd_handle,
+ uint8_t qp_type, uint32_t max_send_wr,
+ uint32_t max_send_sge, uint32_t send_cq_handle,
+ uint32_t max_recv_wr, uint32_t max_recv_sge,
+ uint32_t recv_cq_handle, void *opaque, uint32_t *qpn,
+ uint8_t is_srq, uint32_t srq_handle)
+{
+ int rc;
+ RdmaRmQP *qp;
+ RdmaRmCQ *scq, *rcq;
+ RdmaRmPD *pd;
+ RdmaRmSRQ *srq = NULL;
+ uint32_t rm_qpn;
+
+ pd = rdma_rm_get_pd(dev_res, pd_handle);
+ if (!pd) {
+ return -EINVAL;
+ }
+
+ scq = rdma_rm_get_cq(dev_res, send_cq_handle);
+ rcq = rdma_rm_get_cq(dev_res, recv_cq_handle);
+
+ if (!scq || !rcq) {
+ rdma_error_report("Invalid send_cqn or recv_cqn (%d, %d)",
+ send_cq_handle, recv_cq_handle);
+ return -EINVAL;
+ }
+
+ if (is_srq) {
+ srq = rdma_rm_get_srq(dev_res, srq_handle);
+ if (!srq) {
+ rdma_error_report("Invalid srqn %d", srq_handle);
+ return -EINVAL;
+ }
+
+ srq->recv_cq_handle = recv_cq_handle;
+ }
+
+ if (qp_type == IBV_QPT_GSI) {
+ scq->notify = CNT_SET;
+ rcq->notify = CNT_SET;
+ }
+
+ qp = rdma_res_tbl_alloc(&dev_res->qp_tbl, &rm_qpn);
+ if (!qp) {
+ return -ENOMEM;
+ }
+
+ qp->qpn = rm_qpn;
+ qp->qp_state = IBV_QPS_RESET;
+ qp->qp_type = qp_type;
+ qp->send_cq_handle = send_cq_handle;
+ qp->recv_cq_handle = recv_cq_handle;
+ qp->opaque = opaque;
+ qp->is_srq = is_srq;
+
+ rc = rdma_backend_create_qp(&qp->backend_qp, qp_type, &pd->backend_pd,
+ &scq->backend_cq, &rcq->backend_cq,
+ is_srq ? &srq->backend_srq : NULL,
+ max_send_wr, max_recv_wr, max_send_sge,
+ max_recv_sge);
+
+ if (rc) {
+ rc = -EIO;
+ goto out_dealloc_qp;
+ }
+
+ *qpn = rdma_backend_qpn(&qp->backend_qp);
+ trace_rdma_rm_alloc_qp(rm_qpn, *qpn, qp_type);
+ g_hash_table_insert(dev_res->qp_hash, g_bytes_new(qpn, sizeof(*qpn)), qp);
+
+ return 0;
+
+out_dealloc_qp:
+ rdma_res_tbl_dealloc(&dev_res->qp_tbl, qp->qpn);
+
+ return rc;
+}
+
+int rdma_rm_modify_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
+ uint32_t qp_handle, uint32_t attr_mask, uint8_t sgid_idx,
+ union ibv_gid *dgid, uint32_t dqpn,
+ enum ibv_qp_state qp_state, uint32_t qkey,
+ uint32_t rq_psn, uint32_t sq_psn)
+{
+ RdmaRmQP *qp;
+ int ret;
+
+ qp = rdma_rm_get_qp(dev_res, qp_handle);
+ if (!qp) {
+ return -EINVAL;
+ }
+
+ if (qp->qp_type == IBV_QPT_SMI) {
+ rdma_error_report("Got QP0 request");
+ return -EPERM;
+ } else if (qp->qp_type == IBV_QPT_GSI) {
+ return 0;
+ }
+
+ trace_rdma_rm_modify_qp(qp_handle, attr_mask, qp_state, sgid_idx);
+
+ if (attr_mask & IBV_QP_STATE) {
+ qp->qp_state = qp_state;
+
+ if (qp->qp_state == IBV_QPS_INIT) {
+ ret = rdma_backend_qp_state_init(backend_dev, &qp->backend_qp,
+ qp->qp_type, qkey);
+ if (ret) {
+ return -EIO;
+ }
+ }
+
+ if (qp->qp_state == IBV_QPS_RTR) {
+ /* Get backend gid index */
+ sgid_idx = rdma_rm_get_backend_gid_index(dev_res, backend_dev,
+ sgid_idx);
+ if (sgid_idx <= 0) { /* TODO check also less than bk.max_sgid */
+ rdma_error_report("Failed to get bk sgid_idx for sgid_idx %d",
+ sgid_idx);
+ return -EIO;
+ }
+
+ ret = rdma_backend_qp_state_rtr(backend_dev, &qp->backend_qp,
+ qp->qp_type, sgid_idx, dgid, dqpn,
+ rq_psn, qkey,
+ attr_mask & IBV_QP_QKEY);
+ if (ret) {
+ return -EIO;
+ }
+ }
+
+ if (qp->qp_state == IBV_QPS_RTS) {
+ ret = rdma_backend_qp_state_rts(&qp->backend_qp, qp->qp_type,
+ sq_psn, qkey,
+ attr_mask & IBV_QP_QKEY);
+ if (ret) {
+ return -EIO;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int rdma_rm_query_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
+ uint32_t qp_handle, struct ibv_qp_attr *attr,
+ int attr_mask, struct ibv_qp_init_attr *init_attr)
+{
+ RdmaRmQP *qp;
+
+ qp = rdma_rm_get_qp(dev_res, qp_handle);
+ if (!qp) {
+ return -EINVAL;
+ }
+
+ return rdma_backend_query_qp(&qp->backend_qp, attr, attr_mask, init_attr);
+}
+
+void rdma_rm_dealloc_qp(RdmaDeviceResources *dev_res, uint32_t qp_handle)
+{
+ RdmaRmQP *qp;
+ GBytes *key;
+
+ key = g_bytes_new(&qp_handle, sizeof(qp_handle));
+ qp = g_hash_table_lookup(dev_res->qp_hash, key);
+ g_hash_table_remove(dev_res->qp_hash, key);
+ g_bytes_unref(key);
+
+ if (!qp) {
+ return;
+ }
+
+ rdma_backend_destroy_qp(&qp->backend_qp, dev_res);
+
+ rdma_res_tbl_dealloc(&dev_res->qp_tbl, qp->qpn);
+}
+
+RdmaRmSRQ *rdma_rm_get_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle)
+{
+ return rdma_res_tbl_get(&dev_res->srq_tbl, srq_handle);
+}
+
+int rdma_rm_alloc_srq(RdmaDeviceResources *dev_res, uint32_t pd_handle,
+ uint32_t max_wr, uint32_t max_sge, uint32_t srq_limit,
+ uint32_t *srq_handle, void *opaque)
+{
+ RdmaRmSRQ *srq;
+ RdmaRmPD *pd;
+ int rc;
+
+ pd = rdma_rm_get_pd(dev_res, pd_handle);
+ if (!pd) {
+ return -EINVAL;
+ }
+
+ srq = rdma_res_tbl_alloc(&dev_res->srq_tbl, srq_handle);
+ if (!srq) {
+ return -ENOMEM;
+ }
+
+ rc = rdma_backend_create_srq(&srq->backend_srq, &pd->backend_pd,
+ max_wr, max_sge, srq_limit);
+ if (rc) {
+ rc = -EIO;
+ goto out_dealloc_srq;
+ }
+
+ srq->opaque = opaque;
+
+ return 0;
+
+out_dealloc_srq:
+ rdma_res_tbl_dealloc(&dev_res->srq_tbl, *srq_handle);
+
+ return rc;
+}
+
+int rdma_rm_query_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle,
+ struct ibv_srq_attr *srq_attr)
+{
+ RdmaRmSRQ *srq;
+
+ srq = rdma_rm_get_srq(dev_res, srq_handle);
+ if (!srq) {
+ return -EINVAL;
+ }
+
+ return rdma_backend_query_srq(&srq->backend_srq, srq_attr);
+}
+
+int rdma_rm_modify_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle,
+ struct ibv_srq_attr *srq_attr, int srq_attr_mask)
+{
+ RdmaRmSRQ *srq;
+
+ srq = rdma_rm_get_srq(dev_res, srq_handle);
+ if (!srq) {
+ return -EINVAL;
+ }
+
+ if ((srq_attr_mask & IBV_SRQ_LIMIT) &&
+ (srq_attr->srq_limit == 0)) {
+ return -EINVAL;
+ }
+
+ if ((srq_attr_mask & IBV_SRQ_MAX_WR) &&
+ (srq_attr->max_wr == 0)) {
+ return -EINVAL;
+ }
+
+ return rdma_backend_modify_srq(&srq->backend_srq, srq_attr,
+ srq_attr_mask);
+}
+
+void rdma_rm_dealloc_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle)
+{
+ RdmaRmSRQ *srq;
+
+ srq = rdma_rm_get_srq(dev_res, srq_handle);
+ if (!srq) {
+ return;
+ }
+
+ rdma_backend_destroy_srq(&srq->backend_srq, dev_res);
+ rdma_res_tbl_dealloc(&dev_res->srq_tbl, srq_handle);
+}
+
+void *rdma_rm_get_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t cqe_ctx_id)
+{
+ void **cqe_ctx;
+
+ cqe_ctx = rdma_res_tbl_get(&dev_res->cqe_ctx_tbl, cqe_ctx_id);
+ if (!cqe_ctx) {
+ return NULL;
+ }
+
+ return *cqe_ctx;
+}
+
+int rdma_rm_alloc_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t *cqe_ctx_id,
+ void *ctx)
+{
+ void **cqe_ctx;
+
+ cqe_ctx = rdma_res_tbl_alloc(&dev_res->cqe_ctx_tbl, cqe_ctx_id);
+ if (!cqe_ctx) {
+ return -ENOMEM;
+ }
+
+ *cqe_ctx = ctx;
+
+ return 0;
+}
+
+void rdma_rm_dealloc_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t cqe_ctx_id)
+{
+ rdma_res_tbl_dealloc(&dev_res->cqe_ctx_tbl, cqe_ctx_id);
+}
+
+int rdma_rm_add_gid(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
+ const char *ifname, union ibv_gid *gid, int gid_idx)
+{
+ int rc;
+
+ rc = rdma_backend_add_gid(backend_dev, ifname, gid);
+ if (rc) {
+ return -EINVAL;
+ }
+
+ memcpy(&dev_res->port.gid_tbl[gid_idx].gid, gid, sizeof(*gid));
+
+ return 0;
+}
+
+int rdma_rm_del_gid(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
+ const char *ifname, int gid_idx)
+{
+ int rc;
+
+ if (!dev_res->port.gid_tbl[gid_idx].gid.global.interface_id) {
+ return 0;
+ }
+
+ rc = rdma_backend_del_gid(backend_dev, ifname,
+ &dev_res->port.gid_tbl[gid_idx].gid);
+ if (rc) {
+ return -EINVAL;
+ }
+
+ memset(dev_res->port.gid_tbl[gid_idx].gid.raw, 0,
+ sizeof(dev_res->port.gid_tbl[gid_idx].gid));
+ dev_res->port.gid_tbl[gid_idx].backend_gid_index = -1;
+
+ return 0;
+}
+
+int rdma_rm_get_backend_gid_index(RdmaDeviceResources *dev_res,
+ RdmaBackendDev *backend_dev, int sgid_idx)
+{
+ if (unlikely(sgid_idx < 0 || sgid_idx >= MAX_PORT_GIDS)) {
+ rdma_error_report("Got invalid sgid_idx %d", sgid_idx);
+ return -EINVAL;
+ }
+
+ if (unlikely(dev_res->port.gid_tbl[sgid_idx].backend_gid_index == -1)) {
+ dev_res->port.gid_tbl[sgid_idx].backend_gid_index =
+ rdma_backend_get_gid_index(backend_dev,
+ &dev_res->port.gid_tbl[sgid_idx].gid);
+ }
+
+ return dev_res->port.gid_tbl[sgid_idx].backend_gid_index;
+}
+
+static void destroy_qp_hash_key(gpointer data)
+{
+ g_bytes_unref(data);
+}
+
+static void init_ports(RdmaDeviceResources *dev_res)
+{
+ int i;
+
+ memset(&dev_res->port, 0, sizeof(dev_res->port));
+
+ dev_res->port.state = IBV_PORT_DOWN;
+ for (i = 0; i < MAX_PORT_GIDS; i++) {
+ dev_res->port.gid_tbl[i].backend_gid_index = -1;
+ }
+}
+
+static void fini_ports(RdmaDeviceResources *dev_res,
+ RdmaBackendDev *backend_dev, const char *ifname)
+{
+ int i;
+
+ dev_res->port.state = IBV_PORT_DOWN;
+ for (i = 0; i < MAX_PORT_GIDS; i++) {
+ rdma_rm_del_gid(dev_res, backend_dev, ifname, i);
+ }
+}
+
+int rdma_rm_init(RdmaDeviceResources *dev_res, struct ibv_device_attr *dev_attr)
+{
+ dev_res->qp_hash = g_hash_table_new_full(g_bytes_hash, g_bytes_equal,
+ destroy_qp_hash_key, NULL);
+ if (!dev_res->qp_hash) {
+ return -ENOMEM;
+ }
+
+ res_tbl_init("PD", &dev_res->pd_tbl, dev_attr->max_pd, sizeof(RdmaRmPD));
+ res_tbl_init("CQ", &dev_res->cq_tbl, dev_attr->max_cq, sizeof(RdmaRmCQ));
+ res_tbl_init("MR", &dev_res->mr_tbl, dev_attr->max_mr, sizeof(RdmaRmMR));
+ res_tbl_init("QP", &dev_res->qp_tbl, dev_attr->max_qp, sizeof(RdmaRmQP));
+ res_tbl_init("CQE_CTX", &dev_res->cqe_ctx_tbl, dev_attr->max_qp *
+ dev_attr->max_qp_wr, sizeof(void *));
+ res_tbl_init("UC", &dev_res->uc_tbl, MAX_UCS, sizeof(RdmaRmUC));
+ res_tbl_init("SRQ", &dev_res->srq_tbl, dev_attr->max_srq,
+ sizeof(RdmaRmSRQ));
+
+ init_ports(dev_res);
+
+ qemu_mutex_init(&dev_res->lock);
+
+ memset(&dev_res->stats, 0, sizeof(dev_res->stats));
+ qatomic_set(&dev_res->stats.missing_cqe, 0);
+
+ return 0;
+}
+
+void rdma_rm_fini(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
+ const char *ifname)
+{
+ qemu_mutex_destroy(&dev_res->lock);
+
+ fini_ports(dev_res, backend_dev, ifname);
+
+ res_tbl_free(&dev_res->srq_tbl);
+ res_tbl_free(&dev_res->uc_tbl);
+ res_tbl_free(&dev_res->cqe_ctx_tbl);
+ res_tbl_free(&dev_res->qp_tbl);
+ res_tbl_free(&dev_res->mr_tbl);
+ res_tbl_free(&dev_res->cq_tbl);
+ res_tbl_free(&dev_res->pd_tbl);
+
+ if (dev_res->qp_hash) {
+ g_hash_table_destroy(dev_res->qp_hash);
+ }
+}
diff --git a/hw/rdma/rdma_rm.h b/hw/rdma/rdma_rm.h
new file mode 100644
index 000000000..d69a91779
--- /dev/null
+++ b/hw/rdma/rdma_rm.h
@@ -0,0 +1,97 @@
+/*
+ * RDMA device: Definitions of Resource Manager functions
+ *
+ * Copyright (C) 2018 Oracle
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Yuval Shaia <yuval.shaia@oracle.com>
+ * Marcel Apfelbaum <marcel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef RDMA_RM_H
+#define RDMA_RM_H
+
+#include "qapi/error.h"
+#include "rdma_backend_defs.h"
+#include "rdma_rm_defs.h"
+
+int rdma_rm_init(RdmaDeviceResources *dev_res,
+ struct ibv_device_attr *dev_attr);
+void rdma_rm_fini(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
+ const char *ifname);
+
+int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
+ uint32_t *pd_handle, uint32_t ctx_handle);
+RdmaRmPD *rdma_rm_get_pd(RdmaDeviceResources *dev_res, uint32_t pd_handle);
+void rdma_rm_dealloc_pd(RdmaDeviceResources *dev_res, uint32_t pd_handle);
+
+int rdma_rm_alloc_mr(RdmaDeviceResources *dev_res, uint32_t pd_handle,
+ uint64_t guest_start, uint64_t guest_length,
+ void *host_virt, int access_flags, uint32_t *mr_handle,
+ uint32_t *lkey, uint32_t *rkey);
+RdmaRmMR *rdma_rm_get_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle);
+void rdma_rm_dealloc_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle);
+
+int rdma_rm_alloc_uc(RdmaDeviceResources *dev_res, uint32_t pfn,
+ uint32_t *uc_handle);
+RdmaRmUC *rdma_rm_get_uc(RdmaDeviceResources *dev_res, uint32_t uc_handle);
+void rdma_rm_dealloc_uc(RdmaDeviceResources *dev_res, uint32_t uc_handle);
+
+int rdma_rm_alloc_cq(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
+ uint32_t cqe, uint32_t *cq_handle, void *opaque);
+RdmaRmCQ *rdma_rm_get_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle);
+void rdma_rm_req_notify_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle,
+ bool notify);
+void rdma_rm_dealloc_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle);
+
+int rdma_rm_alloc_qp(RdmaDeviceResources *dev_res, uint32_t pd_handle,
+ uint8_t qp_type, uint32_t max_send_wr,
+ uint32_t max_send_sge, uint32_t send_cq_handle,
+ uint32_t max_recv_wr, uint32_t max_recv_sge,
+ uint32_t recv_cq_handle, void *opaque, uint32_t *qpn,
+ uint8_t is_srq, uint32_t srq_handle);
+RdmaRmQP *rdma_rm_get_qp(RdmaDeviceResources *dev_res, uint32_t qpn);
+int rdma_rm_modify_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
+ uint32_t qp_handle, uint32_t attr_mask, uint8_t sgid_idx,
+ union ibv_gid *dgid, uint32_t dqpn,
+ enum ibv_qp_state qp_state, uint32_t qkey,
+ uint32_t rq_psn, uint32_t sq_psn);
+int rdma_rm_query_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
+ uint32_t qp_handle, struct ibv_qp_attr *attr,
+ int attr_mask, struct ibv_qp_init_attr *init_attr);
+void rdma_rm_dealloc_qp(RdmaDeviceResources *dev_res, uint32_t qp_handle);
+
+RdmaRmSRQ *rdma_rm_get_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle);
+int rdma_rm_alloc_srq(RdmaDeviceResources *dev_res, uint32_t pd_handle,
+ uint32_t max_wr, uint32_t max_sge, uint32_t srq_limit,
+ uint32_t *srq_handle, void *opaque);
+int rdma_rm_query_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle,
+ struct ibv_srq_attr *srq_attr);
+int rdma_rm_modify_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle,
+ struct ibv_srq_attr *srq_attr, int srq_attr_mask);
+void rdma_rm_dealloc_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle);
+
+int rdma_rm_alloc_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t *cqe_ctx_id,
+ void *ctx);
+void *rdma_rm_get_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t cqe_ctx_id);
+void rdma_rm_dealloc_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t cqe_ctx_id);
+
+int rdma_rm_add_gid(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
+ const char *ifname, union ibv_gid *gid, int gid_idx);
+int rdma_rm_del_gid(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
+ const char *ifname, int gid_idx);
+int rdma_rm_get_backend_gid_index(RdmaDeviceResources *dev_res,
+ RdmaBackendDev *backend_dev, int sgid_idx);
+static inline union ibv_gid *rdma_rm_get_gid(RdmaDeviceResources *dev_res,
+ int sgid_idx)
+{
+ return &dev_res->port.gid_tbl[sgid_idx].gid;
+}
+void rdma_format_device_counters(RdmaDeviceResources *dev_res, GString *buf);
+
+#endif
diff --git a/hw/rdma/rdma_rm_defs.h b/hw/rdma/rdma_rm_defs.h
new file mode 100644
index 000000000..534f2f74d
--- /dev/null
+++ b/hw/rdma/rdma_rm_defs.h
@@ -0,0 +1,146 @@
+/*
+ * RDMA device: Definitions of Resource Manager structures
+ *
+ * Copyright (C) 2018 Oracle
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Yuval Shaia <yuval.shaia@oracle.com>
+ * Marcel Apfelbaum <marcel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef RDMA_RM_DEFS_H
+#define RDMA_RM_DEFS_H
+
+#include "rdma_backend_defs.h"
+
+#define MAX_PORTS 1 /* Do not change - we support only one port */
+#define MAX_PORT_GIDS 255
+#define MAX_GIDS MAX_PORT_GIDS
+#define MAX_PORT_PKEYS 1
+#define MAX_PKEYS MAX_PORT_PKEYS
+#define MAX_UCS 512
+#define MAX_MR_SIZE (1UL << 27)
+#define MAX_QP 1024
+#define MAX_SGE 4
+#define MAX_CQ 2048
+#define MAX_MR 1024
+#define MAX_PD 1024
+#define MAX_QP_RD_ATOM 16
+#define MAX_QP_INIT_RD_ATOM 16
+#define MAX_AH 64
+#define MAX_SRQ 512
+
+#define MAX_RM_TBL_NAME 16
+#define MAX_CONSEQ_EMPTY_POLL_CQ 4096 /* considered as error above this */
+
+typedef struct RdmaRmResTbl {
+ char name[MAX_RM_TBL_NAME];
+ QemuMutex lock;
+ unsigned long *bitmap;
+ size_t tbl_sz;
+ size_t res_sz;
+ void *tbl;
+ uint32_t used; /* number of used entries in the table */
+} RdmaRmResTbl;
+
+typedef struct RdmaRmPD {
+ RdmaBackendPD backend_pd;
+ uint32_t ctx_handle;
+} RdmaRmPD;
+
+typedef enum CQNotificationType {
+ CNT_CLEAR,
+ CNT_ARM,
+ CNT_SET,
+} CQNotificationType;
+
+typedef struct RdmaRmCQ {
+ RdmaBackendCQ backend_cq;
+ void *opaque;
+ CQNotificationType notify;
+} RdmaRmCQ;
+
+/* MR (DMA region) */
+typedef struct RdmaRmMR {
+ RdmaBackendMR backend_mr;
+ void *virt;
+ uint64_t start;
+ size_t length;
+ uint32_t pd_handle;
+ uint32_t lkey;
+ uint32_t rkey;
+} RdmaRmMR;
+
+typedef struct RdmaRmUC {
+ uint64_t uc_handle;
+} RdmaRmUC;
+
+typedef struct RdmaRmQP {
+ RdmaBackendQP backend_qp;
+ void *opaque;
+ uint32_t qp_type;
+ uint32_t qpn;
+ uint32_t send_cq_handle;
+ uint32_t recv_cq_handle;
+ enum ibv_qp_state qp_state;
+ uint8_t is_srq;
+} RdmaRmQP;
+
+typedef struct RdmaRmSRQ {
+ RdmaBackendSRQ backend_srq;
+ uint32_t recv_cq_handle;
+ void *opaque;
+} RdmaRmSRQ;
+
+typedef struct RdmaRmGid {
+ union ibv_gid gid;
+ int backend_gid_index;
+} RdmaRmGid;
+
+typedef struct RdmaRmPort {
+ RdmaRmGid gid_tbl[MAX_PORT_GIDS];
+ enum ibv_port_state state;
+} RdmaRmPort;
+
+typedef struct RdmaRmStats {
+ uint64_t tx;
+ uint64_t tx_len;
+ uint64_t tx_err;
+ uint64_t rx_bufs;
+ uint64_t rx_bufs_len;
+ uint64_t rx_bufs_err;
+ uint64_t rx_srq;
+ uint64_t completions;
+ uint64_t mad_tx;
+ uint64_t mad_tx_err;
+ uint64_t mad_rx;
+ uint64_t mad_rx_err;
+ uint64_t mad_rx_bufs;
+ uint64_t mad_rx_bufs_err;
+ uint64_t poll_cq_from_bk;
+ uint64_t poll_cq_from_guest;
+ uint64_t poll_cq_from_guest_empty;
+ uint64_t poll_cq_ppoll_to;
+ uint32_t missing_cqe;
+} RdmaRmStats;
+
+struct RdmaDeviceResources {
+ RdmaRmPort port;
+ RdmaRmResTbl pd_tbl;
+ RdmaRmResTbl mr_tbl;
+ RdmaRmResTbl uc_tbl;
+ RdmaRmResTbl qp_tbl;
+ RdmaRmResTbl cq_tbl;
+ RdmaRmResTbl cqe_ctx_tbl;
+ RdmaRmResTbl srq_tbl;
+ GHashTable *qp_hash; /* Keeps mapping between real and emulated */
+ QemuMutex lock;
+ RdmaRmStats stats;
+};
+
+#endif
diff --git a/hw/rdma/rdma_utils.c b/hw/rdma/rdma_utils.c
new file mode 100644
index 000000000..98df58f68
--- /dev/null
+++ b/hw/rdma/rdma_utils.c
@@ -0,0 +1,125 @@
+/*
+ * QEMU paravirtual RDMA - Generic RDMA backend
+ *
+ * Copyright (C) 2018 Oracle
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Yuval Shaia <yuval.shaia@oracle.com>
+ * Marcel Apfelbaum <marcel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "trace.h"
+#include "rdma_utils.h"
+
+void *rdma_pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t plen)
+{
+ void *p;
+ hwaddr len = plen;
+
+ if (!addr) {
+ rdma_error_report("addr is NULL");
+ return NULL;
+ }
+
+ p = pci_dma_map(dev, addr, &len, DMA_DIRECTION_TO_DEVICE);
+ if (!p) {
+ rdma_error_report("pci_dma_map fail, addr=0x%"PRIx64", len=%"PRId64,
+ addr, len);
+ return NULL;
+ }
+
+ if (len != plen) {
+ rdma_pci_dma_unmap(dev, p, len);
+ return NULL;
+ }
+
+ trace_rdma_pci_dma_map(addr, p, len);
+
+ return p;
+}
+
+void rdma_pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len)
+{
+ trace_rdma_pci_dma_unmap(buffer);
+ if (buffer) {
+ pci_dma_unmap(dev, buffer, len, DMA_DIRECTION_TO_DEVICE, 0);
+ }
+}
+
+void rdma_protected_gqueue_init(RdmaProtectedGQueue *list)
+{
+ qemu_mutex_init(&list->lock);
+ list->list = g_queue_new();
+}
+
+void rdma_protected_gqueue_destroy(RdmaProtectedGQueue *list)
+{
+ if (list->list) {
+ g_queue_free_full(list->list, g_free);
+ qemu_mutex_destroy(&list->lock);
+ list->list = NULL;
+ }
+}
+
+void rdma_protected_gqueue_append_int64(RdmaProtectedGQueue *list,
+ int64_t value)
+{
+ qemu_mutex_lock(&list->lock);
+ g_queue_push_tail(list->list, g_memdup(&value, sizeof(value)));
+ qemu_mutex_unlock(&list->lock);
+}
+
+int64_t rdma_protected_gqueue_pop_int64(RdmaProtectedGQueue *list)
+{
+ int64_t *valp;
+ int64_t val;
+
+ qemu_mutex_lock(&list->lock);
+
+ valp = g_queue_pop_head(list->list);
+ qemu_mutex_unlock(&list->lock);
+
+ if (!valp) {
+ return -ENOENT;
+ }
+
+ val = *valp;
+ g_free(valp);
+ return val;
+}
+
+void rdma_protected_gslist_init(RdmaProtectedGSList *list)
+{
+ qemu_mutex_init(&list->lock);
+}
+
+void rdma_protected_gslist_destroy(RdmaProtectedGSList *list)
+{
+ if (list->list) {
+ g_slist_free(list->list);
+ qemu_mutex_destroy(&list->lock);
+ list->list = NULL;
+ }
+}
+
+void rdma_protected_gslist_append_int32(RdmaProtectedGSList *list,
+ int32_t value)
+{
+ qemu_mutex_lock(&list->lock);
+ list->list = g_slist_prepend(list->list, GINT_TO_POINTER(value));
+ qemu_mutex_unlock(&list->lock);
+}
+
+void rdma_protected_gslist_remove_int32(RdmaProtectedGSList *list,
+ int32_t value)
+{
+ qemu_mutex_lock(&list->lock);
+ list->list = g_slist_remove(list->list, GINT_TO_POINTER(value));
+ qemu_mutex_unlock(&list->lock);
+}
diff --git a/hw/rdma/rdma_utils.h b/hw/rdma/rdma_utils.h
new file mode 100644
index 000000000..9fd0efd94
--- /dev/null
+++ b/hw/rdma/rdma_utils.h
@@ -0,0 +1,64 @@
+/*
+ * RDMA device: Debug utilities
+ *
+ * Copyright (C) 2018 Oracle
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ *
+ * Authors:
+ * Yuval Shaia <yuval.shaia@oracle.com>
+ * Marcel Apfelbaum <marcel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef RDMA_UTILS_H
+#define RDMA_UTILS_H
+
+#include "qemu/error-report.h"
+#include "hw/pci/pci.h"
+#include "sysemu/dma.h"
+
+#define rdma_error_report(fmt, ...) \
+ error_report("%s: " fmt, "rdma", ## __VA_ARGS__)
+#define rdma_warn_report(fmt, ...) \
+ warn_report("%s: " fmt, "rdma", ## __VA_ARGS__)
+#define rdma_info_report(fmt, ...) \
+ info_report("%s: " fmt, "rdma", ## __VA_ARGS__)
+
+typedef struct RdmaProtectedGQueue {
+ QemuMutex lock;
+ GQueue *list;
+} RdmaProtectedGQueue;
+
+typedef struct RdmaProtectedGSList {
+ QemuMutex lock;
+ GSList *list;
+} RdmaProtectedGSList;
+
+void *rdma_pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t plen);
+void rdma_pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len);
+void rdma_protected_gqueue_init(RdmaProtectedGQueue *list);
+void rdma_protected_gqueue_destroy(RdmaProtectedGQueue *list);
+void rdma_protected_gqueue_append_int64(RdmaProtectedGQueue *list,
+ int64_t value);
+int64_t rdma_protected_gqueue_pop_int64(RdmaProtectedGQueue *list);
+void rdma_protected_gslist_init(RdmaProtectedGSList *list);
+void rdma_protected_gslist_destroy(RdmaProtectedGSList *list);
+void rdma_protected_gslist_append_int32(RdmaProtectedGSList *list,
+ int32_t value);
+void rdma_protected_gslist_remove_int32(RdmaProtectedGSList *list,
+ int32_t value);
+
+static inline void addrconf_addr_eui48(uint8_t *eui, const char *addr)
+{
+ memcpy(eui, addr, 3);
+ eui[3] = 0xFF;
+ eui[4] = 0xFE;
+ memcpy(eui + 5, addr + 3, 3);
+ eui[0] ^= 2;
+}
+
+#endif
diff --git a/hw/rdma/trace-events b/hw/rdma/trace-events
new file mode 100644
index 000000000..9accb1497
--- /dev/null
+++ b/hw/rdma/trace-events
@@ -0,0 +1,31 @@
+# See docs/devel/tracing.rst for syntax documentation.
+
+# rdma_backend.c
+rdma_check_dev_attr(const char *name, int max_bk, int max_fe) "%s: be=%d, fe=%d"
+rdma_create_ah_cache_hit(uint64_t subnet, uint64_t if_id) "subnet=0x%"PRIx64",if_id=0x%"PRIx64
+rdma_create_ah_cache_miss(uint64_t subnet, uint64_t if_id) "subnet=0x%"PRIx64",if_id=0x%"PRIx64
+rdma_poll_cq(int ne, void *ibcq) "Got %d completion(s) from cq %p"
+rdmacm_mux(const char *title, int msg_type, int op_code) "%s: msg_type=%d, op_code=%d"
+rdmacm_mux_check_op_status(int msg_type, int op_code, int err_code) "resp: msg_type=%d, op_code=%d, err_code=%d"
+rdma_mad_message(const char *title, int len, char *data) "mad %s (%d): %s"
+rdma_backend_rc_qp_state_init(uint32_t qpn) "RC QP 0x%x switch to INIT"
+rdma_backend_ud_qp_state_init(uint32_t qpn, uint32_t qkey) "UD QP 0x%x switch to INIT, qkey=0x%x"
+rdma_backend_rc_qp_state_rtr(uint32_t qpn, uint64_t subnet, uint64_t ifid, uint8_t sgid_idx, uint32_t dqpn, uint32_t rq_psn) "RC QP 0x%x switch to RTR, subnet = 0x%"PRIx64", ifid = 0x%"PRIx64 ", sgid_idx=%d, dqpn=0x%x, rq_psn=0x%x"
+rdma_backend_ud_qp_state_rtr(uint32_t qpn, uint32_t qkey) "UD QP 0x%x switch to RTR, qkey=0x%x"
+rdma_backend_rc_qp_state_rts(uint32_t qpn, uint32_t sq_psn) "RC QP 0x%x switch to RTS, sq_psn=0x%x, "
+rdma_backend_ud_qp_state_rts(uint32_t qpn, uint32_t sq_psn, uint32_t qkey) "UD QP 0x%x switch to RTS, sq_psn=0x%x, qkey=0x%x"
+rdma_backend_get_gid_index(uint64_t subnet, uint64_t ifid, int gid_idx) "subnet=0x%"PRIx64", ifid=0x%"PRIx64 ", gid_idx=%d"
+rdma_backend_gid_change(const char *op, uint64_t subnet, uint64_t ifid) "%s subnet=0x%"PRIx64", ifid=0x%"PRIx64
+
+# rdma_rm.c
+rdma_res_tbl_get(char *name, uint32_t handle) "tbl %s, handle %d"
+rdma_res_tbl_alloc(char *name, uint32_t handle) "tbl %s, handle %d"
+rdma_res_tbl_dealloc(char *name, uint32_t handle) "tbl %s, handle %d"
+rdma_rm_alloc_mr(uint32_t mr_handle, void *host_virt, uint64_t guest_start, uint64_t guest_length, int access_flags) "mr_handle=%d, host_virt=%p, guest_start=0x%"PRIx64", length=%" PRId64", access_flags=0x%x"
+rdma_rm_dealloc_mr(uint32_t mr_handle, uint64_t guest_start) "mr_handle=%d, guest_start=0x%"PRIx64
+rdma_rm_alloc_qp(uint32_t rm_qpn, uint32_t backend_qpn, uint8_t qp_type) "rm_qpn=%d, backend_qpn=0x%x, qp_type=%d"
+rdma_rm_modify_qp(uint32_t qpn, uint32_t attr_mask, int qp_state, uint8_t sgid_idx) "qpn=0x%x, attr_mask=0x%x, qp_state=%d, sgid_idx=%d"
+
+# rdma_utils.c
+rdma_pci_dma_map(uint64_t addr, void *vaddr, uint64_t len) "0x%"PRIx64" -> %p (len=%" PRId64")"
+rdma_pci_dma_unmap(void *vaddr) "%p"
diff --git a/hw/rdma/trace.h b/hw/rdma/trace.h
new file mode 100644
index 000000000..b3fa8ebc5
--- /dev/null
+++ b/hw/rdma/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-hw_rdma.h"
diff --git a/hw/rdma/vmw/pvrdma.h b/hw/rdma/vmw/pvrdma.h
new file mode 100644
index 000000000..d08965d3e
--- /dev/null
+++ b/hw/rdma/vmw/pvrdma.h
@@ -0,0 +1,144 @@
+/*
+ * QEMU VMWARE paravirtual RDMA device definitions
+ *
+ * Copyright (C) 2018 Oracle
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Yuval Shaia <yuval.shaia@oracle.com>
+ * Marcel Apfelbaum <marcel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef PVRDMA_PVRDMA_H
+#define PVRDMA_PVRDMA_H
+
+#include "qemu/units.h"
+#include "qemu/notify.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/msix.h"
+#include "chardev/char-fe.h"
+#include "hw/net/vmxnet3_defs.h"
+
+#include "../rdma_backend_defs.h"
+#include "../rdma_rm_defs.h"
+
+#include "standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h"
+#include "pvrdma_dev_ring.h"
+#include "qom/object.h"
+
+/* BARs */
+#define RDMA_MSIX_BAR_IDX 0
+#define RDMA_REG_BAR_IDX 1
+#define RDMA_UAR_BAR_IDX 2
+#define RDMA_BAR0_MSIX_SIZE (16 * KiB)
+#define RDMA_BAR1_REGS_SIZE 64
+#define RDMA_BAR2_UAR_SIZE (0x1000 * MAX_UCS) /* each uc gets page */
+
+/* MSIX */
+#define RDMA_MAX_INTRS 3
+#define RDMA_MSIX_TABLE 0x0000
+#define RDMA_MSIX_PBA 0x2000
+
+/* Interrupts Vectors */
+#define INTR_VEC_CMD_RING 0
+#define INTR_VEC_CMD_ASYNC_EVENTS 1
+#define INTR_VEC_CMD_COMPLETION_Q 2
+
+/* HW attributes */
+#define PVRDMA_HW_NAME "pvrdma"
+#define PVRDMA_HW_VERSION 17
+#define PVRDMA_FW_VERSION 14
+
+/* Some defaults */
+#define PVRDMA_PKEY 0xFFFF
+
+typedef struct DSRInfo {
+ dma_addr_t dma;
+ struct pvrdma_device_shared_region *dsr;
+
+ union pvrdma_cmd_req *req;
+ union pvrdma_cmd_resp *rsp;
+
+ PvrdmaRingState *async_ring_state;
+ PvrdmaRing async;
+
+ PvrdmaRingState *cq_ring_state;
+ PvrdmaRing cq;
+} DSRInfo;
+
+typedef struct PVRDMADevStats {
+ uint64_t commands;
+ uint64_t regs_reads;
+ uint64_t regs_writes;
+ uint64_t uar_writes;
+ uint64_t interrupts;
+} PVRDMADevStats;
+
+struct PVRDMADev {
+ PCIDevice parent_obj;
+ MemoryRegion msix;
+ MemoryRegion regs;
+ uint32_t regs_data[RDMA_BAR1_REGS_SIZE];
+ MemoryRegion uar;
+ uint32_t uar_data[RDMA_BAR2_UAR_SIZE];
+ DSRInfo dsr_info;
+ int interrupt_mask;
+ struct ibv_device_attr dev_attr;
+ uint64_t node_guid;
+ char *backend_eth_device_name;
+ char *backend_device_name;
+ uint8_t backend_port_num;
+ RdmaBackendDev backend_dev;
+ RdmaDeviceResources rdma_dev_res;
+ CharBackend mad_chr;
+ VMXNET3State *func0;
+ Notifier shutdown_notifier;
+ PVRDMADevStats stats;
+};
+typedef struct PVRDMADev PVRDMADev;
+DECLARE_INSTANCE_CHECKER(PVRDMADev, PVRDMA_DEV,
+ PVRDMA_HW_NAME)
+
+static inline int get_reg_val(PVRDMADev *dev, hwaddr addr, uint32_t *val)
+{
+ int idx = addr >> 2;
+
+ if (idx >= RDMA_BAR1_REGS_SIZE) {
+ return -EINVAL;
+ }
+
+ *val = dev->regs_data[idx];
+
+ return 0;
+}
+
+static inline int set_reg_val(PVRDMADev *dev, hwaddr addr, uint32_t val)
+{
+ int idx = addr >> 2;
+
+ if (idx >= RDMA_BAR1_REGS_SIZE) {
+ return -EINVAL;
+ }
+
+ dev->regs_data[idx] = val;
+
+ return 0;
+}
+
+static inline void post_interrupt(PVRDMADev *dev, unsigned vector)
+{
+ PCIDevice *pci_dev = PCI_DEVICE(dev);
+
+ if (likely(!dev->interrupt_mask)) {
+ dev->stats.interrupts++;
+ msix_notify(pci_dev, vector);
+ }
+}
+
+int pvrdma_exec_cmd(PVRDMADev *dev);
+
+#endif
diff --git a/hw/rdma/vmw/pvrdma_cmd.c b/hw/rdma/vmw/pvrdma_cmd.c
new file mode 100644
index 000000000..da7ddfa54
--- /dev/null
+++ b/hw/rdma/vmw/pvrdma_cmd.c
@@ -0,0 +1,825 @@
+/*
+ * QEMU paravirtual RDMA - Command channel
+ *
+ * Copyright (C) 2018 Oracle
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Yuval Shaia <yuval.shaia@oracle.com>
+ * Marcel Apfelbaum <marcel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/pci_ids.h"
+
+#include "../rdma_backend.h"
+#include "../rdma_rm.h"
+#include "../rdma_utils.h"
+
+#include "trace.h"
+#include "pvrdma.h"
+#include "standard-headers/rdma/vmw_pvrdma-abi.h"
+
+static void *pvrdma_map_to_pdir(PCIDevice *pdev, uint64_t pdir_dma,
+ uint32_t nchunks, size_t length)
+{
+ uint64_t *dir, *tbl;
+ int tbl_idx, dir_idx, addr_idx;
+ void *host_virt = NULL, *curr_page;
+
+ if (!nchunks) {
+ rdma_error_report("Got nchunks=0");
+ return NULL;
+ }
+
+ length = ROUND_UP(length, TARGET_PAGE_SIZE);
+ if (nchunks * TARGET_PAGE_SIZE != length) {
+ rdma_error_report("Invalid nchunks/length (%u, %lu)", nchunks,
+ (unsigned long)length);
+ return NULL;
+ }
+
+ dir = rdma_pci_dma_map(pdev, pdir_dma, TARGET_PAGE_SIZE);
+ if (!dir) {
+ rdma_error_report("Failed to map to page directory");
+ return NULL;
+ }
+
+ tbl = rdma_pci_dma_map(pdev, dir[0], TARGET_PAGE_SIZE);
+ if (!tbl) {
+ rdma_error_report("Failed to map to page table 0");
+ goto out_unmap_dir;
+ }
+
+ curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[0], TARGET_PAGE_SIZE);
+ if (!curr_page) {
+ rdma_error_report("Failed to map the page 0");
+ goto out_unmap_tbl;
+ }
+
+ host_virt = mremap(curr_page, 0, length, MREMAP_MAYMOVE);
+ if (host_virt == MAP_FAILED) {
+ host_virt = NULL;
+ rdma_error_report("Failed to remap memory for host_virt");
+ goto out_unmap_tbl;
+ }
+ trace_pvrdma_map_to_pdir_host_virt(curr_page, host_virt);
+
+ rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
+
+ dir_idx = 0;
+ tbl_idx = 1;
+ addr_idx = 1;
+ while (addr_idx < nchunks) {
+ if (tbl_idx == TARGET_PAGE_SIZE / sizeof(uint64_t)) {
+ tbl_idx = 0;
+ dir_idx++;
+ rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
+ tbl = rdma_pci_dma_map(pdev, dir[dir_idx], TARGET_PAGE_SIZE);
+ if (!tbl) {
+ rdma_error_report("Failed to map to page table %d", dir_idx);
+ goto out_unmap_host_virt;
+ }
+ }
+
+ curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[tbl_idx],
+ TARGET_PAGE_SIZE);
+ if (!curr_page) {
+ rdma_error_report("Failed to map to page %d, dir %d", tbl_idx,
+ dir_idx);
+ goto out_unmap_host_virt;
+ }
+
+ mremap(curr_page, 0, TARGET_PAGE_SIZE, MREMAP_MAYMOVE | MREMAP_FIXED,
+ host_virt + TARGET_PAGE_SIZE * addr_idx);
+
+ trace_pvrdma_map_to_pdir_next_page(addr_idx, curr_page, host_virt +
+ TARGET_PAGE_SIZE * addr_idx);
+
+ rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
+
+ addr_idx++;
+
+ tbl_idx++;
+ }
+
+ goto out_unmap_tbl;
+
+out_unmap_host_virt:
+ munmap(host_virt, length);
+ host_virt = NULL;
+
+out_unmap_tbl:
+ rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
+
+out_unmap_dir:
+ rdma_pci_dma_unmap(pdev, dir, TARGET_PAGE_SIZE);
+
+ return host_virt;
+}
+
+static int query_port(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ struct pvrdma_cmd_query_port *cmd = &req->query_port;
+ struct pvrdma_cmd_query_port_resp *resp = &rsp->query_port_resp;
+ struct pvrdma_port_attr attrs = {};
+
+ if (cmd->port_num > MAX_PORTS) {
+ return -EINVAL;
+ }
+
+ if (rdma_backend_query_port(&dev->backend_dev,
+ (struct ibv_port_attr *)&attrs)) {
+ return -ENOMEM;
+ }
+
+ memset(resp, 0, sizeof(*resp));
+
+ resp->attrs.state = dev->func0->device_active ? attrs.state :
+ PVRDMA_PORT_DOWN;
+ resp->attrs.max_mtu = attrs.max_mtu;
+ resp->attrs.active_mtu = attrs.active_mtu;
+ resp->attrs.phys_state = attrs.phys_state;
+ resp->attrs.gid_tbl_len = MIN(MAX_PORT_GIDS, attrs.gid_tbl_len);
+ resp->attrs.max_msg_sz = 1024;
+ resp->attrs.pkey_tbl_len = MIN(MAX_PORT_PKEYS, attrs.pkey_tbl_len);
+ resp->attrs.active_width = 1;
+ resp->attrs.active_speed = 1;
+
+ return 0;
+}
+
+static int query_pkey(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ struct pvrdma_cmd_query_pkey *cmd = &req->query_pkey;
+ struct pvrdma_cmd_query_pkey_resp *resp = &rsp->query_pkey_resp;
+
+ if (cmd->port_num > MAX_PORTS) {
+ return -EINVAL;
+ }
+
+ if (cmd->index > MAX_PKEYS) {
+ return -EINVAL;
+ }
+
+ memset(resp, 0, sizeof(*resp));
+
+ resp->pkey = PVRDMA_PKEY;
+
+ return 0;
+}
+
+static int create_pd(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ struct pvrdma_cmd_create_pd *cmd = &req->create_pd;
+ struct pvrdma_cmd_create_pd_resp *resp = &rsp->create_pd_resp;
+ int rc;
+
+ memset(resp, 0, sizeof(*resp));
+ rc = rdma_rm_alloc_pd(&dev->rdma_dev_res, &dev->backend_dev,
+ &resp->pd_handle, cmd->ctx_handle);
+
+ return rc;
+}
+
+static int destroy_pd(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ struct pvrdma_cmd_destroy_pd *cmd = &req->destroy_pd;
+
+ rdma_rm_dealloc_pd(&dev->rdma_dev_res, cmd->pd_handle);
+
+ return 0;
+}
+
+static int create_mr(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ struct pvrdma_cmd_create_mr *cmd = &req->create_mr;
+ struct pvrdma_cmd_create_mr_resp *resp = &rsp->create_mr_resp;
+ PCIDevice *pci_dev = PCI_DEVICE(dev);
+ void *host_virt = NULL;
+ int rc = 0;
+
+ memset(resp, 0, sizeof(*resp));
+
+ if (!(cmd->flags & PVRDMA_MR_FLAG_DMA)) {
+ host_virt = pvrdma_map_to_pdir(pci_dev, cmd->pdir_dma, cmd->nchunks,
+ cmd->length);
+ if (!host_virt) {
+ rdma_error_report("Failed to map to pdir");
+ return -EINVAL;
+ }
+ }
+
+ rc = rdma_rm_alloc_mr(&dev->rdma_dev_res, cmd->pd_handle, cmd->start,
+ cmd->length, host_virt, cmd->access_flags,
+ &resp->mr_handle, &resp->lkey, &resp->rkey);
+ if (rc && host_virt) {
+ munmap(host_virt, cmd->length);
+ }
+
+ return rc;
+}
+
+static int destroy_mr(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ struct pvrdma_cmd_destroy_mr *cmd = &req->destroy_mr;
+
+ rdma_rm_dealloc_mr(&dev->rdma_dev_res, cmd->mr_handle);
+
+ return 0;
+}
+
+static int create_cq_ring(PCIDevice *pci_dev , PvrdmaRing **ring,
+ uint64_t pdir_dma, uint32_t nchunks, uint32_t cqe)
+{
+ uint64_t *dir = NULL, *tbl = NULL;
+ PvrdmaRing *r;
+ int rc = -EINVAL;
+ char ring_name[MAX_RING_NAME_SZ];
+
+ if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) {
+ rdma_error_report("Got invalid nchunks: %d", nchunks);
+ return rc;
+ }
+
+ dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
+ if (!dir) {
+ rdma_error_report("Failed to map to CQ page directory");
+ goto out;
+ }
+
+ tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
+ if (!tbl) {
+ rdma_error_report("Failed to map to CQ page table");
+ goto out;
+ }
+
+ r = g_malloc(sizeof(*r));
+ *ring = r;
+
+ r->ring_state = (PvrdmaRingState *)
+ rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
+
+ if (!r->ring_state) {
+ rdma_error_report("Failed to map to CQ ring state");
+ goto out_free_ring;
+ }
+
+ sprintf(ring_name, "cq_ring_%" PRIx64, pdir_dma);
+ rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1],
+ cqe, sizeof(struct pvrdma_cqe),
+ /* first page is ring state */
+ (dma_addr_t *)&tbl[1], nchunks - 1);
+ if (rc) {
+ goto out_unmap_ring_state;
+ }
+
+ goto out;
+
+out_unmap_ring_state:
+ /* ring_state was in slot 1, not 0 so need to jump back */
+ rdma_pci_dma_unmap(pci_dev, --r->ring_state, TARGET_PAGE_SIZE);
+
+out_free_ring:
+ g_free(r);
+
+out:
+ rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
+ rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
+
+ return rc;
+}
+
+static void destroy_cq_ring(PvrdmaRing *ring)
+{
+ pvrdma_ring_free(ring);
+ /* ring_state was in slot 1, not 0 so need to jump back */
+ rdma_pci_dma_unmap(ring->dev, --ring->ring_state, TARGET_PAGE_SIZE);
+ g_free(ring);
+}
+
+static int create_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ struct pvrdma_cmd_create_cq *cmd = &req->create_cq;
+ struct pvrdma_cmd_create_cq_resp *resp = &rsp->create_cq_resp;
+ PvrdmaRing *ring = NULL;
+ int rc;
+
+ memset(resp, 0, sizeof(*resp));
+
+ resp->cqe = cmd->cqe;
+
+ rc = create_cq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma, cmd->nchunks,
+ cmd->cqe);
+ if (rc) {
+ return rc;
+ }
+
+ rc = rdma_rm_alloc_cq(&dev->rdma_dev_res, &dev->backend_dev, cmd->cqe,
+ &resp->cq_handle, ring);
+ if (rc) {
+ destroy_cq_ring(ring);
+ }
+
+ resp->cqe = cmd->cqe;
+
+ return rc;
+}
+
+static int destroy_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ struct pvrdma_cmd_destroy_cq *cmd = &req->destroy_cq;
+ RdmaRmCQ *cq;
+ PvrdmaRing *ring;
+
+ cq = rdma_rm_get_cq(&dev->rdma_dev_res, cmd->cq_handle);
+ if (!cq) {
+ rdma_error_report("Got invalid CQ handle");
+ return -EINVAL;
+ }
+
+ ring = (PvrdmaRing *)cq->opaque;
+ destroy_cq_ring(ring);
+
+ rdma_rm_dealloc_cq(&dev->rdma_dev_res, cmd->cq_handle);
+
+ return 0;
+}
+
+static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
+ PvrdmaRing **rings, uint32_t scqe, uint32_t smax_sge,
+ uint32_t spages, uint32_t rcqe, uint32_t rmax_sge,
+ uint32_t rpages, uint8_t is_srq)
+{
+ uint64_t *dir = NULL, *tbl = NULL;
+ PvrdmaRing *sr, *rr;
+ int rc = -EINVAL;
+ char ring_name[MAX_RING_NAME_SZ];
+ uint32_t wqe_sz;
+
+ if (!spages || spages > PVRDMA_MAX_FAST_REG_PAGES) {
+ rdma_error_report("Got invalid send page count for QP ring: %d",
+ spages);
+ return rc;
+ }
+
+ if (!is_srq && (!rpages || rpages > PVRDMA_MAX_FAST_REG_PAGES)) {
+ rdma_error_report("Got invalid recv page count for QP ring: %d",
+ rpages);
+ return rc;
+ }
+
+ dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
+ if (!dir) {
+ rdma_error_report("Failed to map to QP page directory");
+ goto out;
+ }
+
+ tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
+ if (!tbl) {
+ rdma_error_report("Failed to map to QP page table");
+ goto out;
+ }
+
+ if (!is_srq) {
+ sr = g_malloc(2 * sizeof(*rr));
+ rr = &sr[1];
+ } else {
+ sr = g_malloc(sizeof(*sr));
+ }
+
+ *rings = sr;
+
+ /* Create send ring */
+ sr->ring_state = (PvrdmaRingState *)
+ rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
+ if (!sr->ring_state) {
+ rdma_error_report("Failed to map to QP ring state");
+ goto out_free_sr_mem;
+ }
+
+ wqe_sz = pow2ceil(sizeof(struct pvrdma_sq_wqe_hdr) +
+ sizeof(struct pvrdma_sge) * smax_sge - 1);
+
+ sprintf(ring_name, "qp_sring_%" PRIx64, pdir_dma);
+ rc = pvrdma_ring_init(sr, ring_name, pci_dev, sr->ring_state,
+ scqe, wqe_sz, (dma_addr_t *)&tbl[1], spages);
+ if (rc) {
+ goto out_unmap_ring_state;
+ }
+
+ if (!is_srq) {
+ /* Create recv ring */
+ rr->ring_state = &sr->ring_state[1];
+ wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
+ sizeof(struct pvrdma_sge) * rmax_sge - 1);
+ sprintf(ring_name, "qp_rring_%" PRIx64, pdir_dma);
+ rc = pvrdma_ring_init(rr, ring_name, pci_dev, rr->ring_state,
+ rcqe, wqe_sz, (dma_addr_t *)&tbl[1 + spages],
+ rpages);
+ if (rc) {
+ goto out_free_sr;
+ }
+ }
+
+ goto out;
+
+out_free_sr:
+ pvrdma_ring_free(sr);
+
+out_unmap_ring_state:
+ rdma_pci_dma_unmap(pci_dev, sr->ring_state, TARGET_PAGE_SIZE);
+
+out_free_sr_mem:
+ g_free(sr);
+
+out:
+ rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
+ rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
+
+ return rc;
+}
+
+static void destroy_qp_rings(PvrdmaRing *ring, uint8_t is_srq)
+{
+ pvrdma_ring_free(&ring[0]);
+ if (!is_srq) {
+ pvrdma_ring_free(&ring[1]);
+ }
+
+ rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE);
+ g_free(ring);
+}
+
+static int create_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ struct pvrdma_cmd_create_qp *cmd = &req->create_qp;
+ struct pvrdma_cmd_create_qp_resp *resp = &rsp->create_qp_resp;
+ PvrdmaRing *rings = NULL;
+ int rc;
+
+ memset(resp, 0, sizeof(*resp));
+
+ rc = create_qp_rings(PCI_DEVICE(dev), cmd->pdir_dma, &rings,
+ cmd->max_send_wr, cmd->max_send_sge, cmd->send_chunks,
+ cmd->max_recv_wr, cmd->max_recv_sge,
+ cmd->total_chunks - cmd->send_chunks - 1, cmd->is_srq);
+ if (rc) {
+ return rc;
+ }
+
+ rc = rdma_rm_alloc_qp(&dev->rdma_dev_res, cmd->pd_handle, cmd->qp_type,
+ cmd->max_send_wr, cmd->max_send_sge,
+ cmd->send_cq_handle, cmd->max_recv_wr,
+ cmd->max_recv_sge, cmd->recv_cq_handle, rings,
+ &resp->qpn, cmd->is_srq, cmd->srq_handle);
+ if (rc) {
+ destroy_qp_rings(rings, cmd->is_srq);
+ return rc;
+ }
+
+ resp->max_send_wr = cmd->max_send_wr;
+ resp->max_recv_wr = cmd->max_recv_wr;
+ resp->max_send_sge = cmd->max_send_sge;
+ resp->max_recv_sge = cmd->max_recv_sge;
+ resp->max_inline_data = cmd->max_inline_data;
+
+ return 0;
+}
+
+static int modify_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ struct pvrdma_cmd_modify_qp *cmd = &req->modify_qp;
+ int rc;
+
+ /* No need to verify sgid_index since it is u8 */
+
+ rc = rdma_rm_modify_qp(&dev->rdma_dev_res, &dev->backend_dev,
+ cmd->qp_handle, cmd->attr_mask,
+ cmd->attrs.ah_attr.grh.sgid_index,
+ (union ibv_gid *)&cmd->attrs.ah_attr.grh.dgid,
+ cmd->attrs.dest_qp_num,
+ (enum ibv_qp_state)cmd->attrs.qp_state,
+ cmd->attrs.qkey, cmd->attrs.rq_psn,
+ cmd->attrs.sq_psn);
+
+ return rc;
+}
+
+static int query_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ struct pvrdma_cmd_query_qp *cmd = &req->query_qp;
+ struct pvrdma_cmd_query_qp_resp *resp = &rsp->query_qp_resp;
+ struct ibv_qp_init_attr init_attr;
+ int rc;
+
+ memset(resp, 0, sizeof(*resp));
+
+ rc = rdma_rm_query_qp(&dev->rdma_dev_res, &dev->backend_dev, cmd->qp_handle,
+ (struct ibv_qp_attr *)&resp->attrs, cmd->attr_mask,
+ &init_attr);
+
+ return rc;
+}
+
+static int destroy_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ struct pvrdma_cmd_destroy_qp *cmd = &req->destroy_qp;
+ RdmaRmQP *qp;
+ PvrdmaRing *ring;
+
+ qp = rdma_rm_get_qp(&dev->rdma_dev_res, cmd->qp_handle);
+ if (!qp) {
+ return -EINVAL;
+ }
+
+ ring = (PvrdmaRing *)qp->opaque;
+ destroy_qp_rings(ring, qp->is_srq);
+ rdma_rm_dealloc_qp(&dev->rdma_dev_res, cmd->qp_handle);
+
+ return 0;
+}
+
+static int create_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ struct pvrdma_cmd_create_bind *cmd = &req->create_bind;
+ int rc;
+ union ibv_gid *gid = (union ibv_gid *)&cmd->new_gid;
+
+ if (cmd->index >= MAX_PORT_GIDS) {
+ return -EINVAL;
+ }
+
+ rc = rdma_rm_add_gid(&dev->rdma_dev_res, &dev->backend_dev,
+ dev->backend_eth_device_name, gid, cmd->index);
+
+ return rc;
+}
+
+static int destroy_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ int rc;
+
+ struct pvrdma_cmd_destroy_bind *cmd = &req->destroy_bind;
+
+ if (cmd->index >= MAX_PORT_GIDS) {
+ return -EINVAL;
+ }
+
+ rc = rdma_rm_del_gid(&dev->rdma_dev_res, &dev->backend_dev,
+ dev->backend_eth_device_name, cmd->index);
+
+ return rc;
+}
+
+static int create_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ struct pvrdma_cmd_create_uc *cmd = &req->create_uc;
+ struct pvrdma_cmd_create_uc_resp *resp = &rsp->create_uc_resp;
+ int rc;
+
+ memset(resp, 0, sizeof(*resp));
+ rc = rdma_rm_alloc_uc(&dev->rdma_dev_res, cmd->pfn, &resp->ctx_handle);
+
+ return rc;
+}
+
+static int destroy_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ struct pvrdma_cmd_destroy_uc *cmd = &req->destroy_uc;
+
+ rdma_rm_dealloc_uc(&dev->rdma_dev_res, cmd->ctx_handle);
+
+ return 0;
+}
+
+static int create_srq_ring(PCIDevice *pci_dev, PvrdmaRing **ring,
+ uint64_t pdir_dma, uint32_t max_wr,
+ uint32_t max_sge, uint32_t nchunks)
+{
+ uint64_t *dir = NULL, *tbl = NULL;
+ PvrdmaRing *r;
+ int rc = -EINVAL;
+ char ring_name[MAX_RING_NAME_SZ];
+ uint32_t wqe_sz;
+
+ if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) {
+ rdma_error_report("Got invalid page count for SRQ ring: %d",
+ nchunks);
+ return rc;
+ }
+
+ dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
+ if (!dir) {
+ rdma_error_report("Failed to map to SRQ page directory");
+ goto out;
+ }
+
+ tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
+ if (!tbl) {
+ rdma_error_report("Failed to map to SRQ page table");
+ goto out;
+ }
+
+ r = g_malloc(sizeof(*r));
+ *ring = r;
+
+ r->ring_state = (PvrdmaRingState *)
+ rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
+ if (!r->ring_state) {
+ rdma_error_report("Failed to map tp SRQ ring state");
+ goto out_free_ring_mem;
+ }
+
+ wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
+ sizeof(struct pvrdma_sge) * max_sge - 1);
+ sprintf(ring_name, "srq_ring_%" PRIx64, pdir_dma);
+ rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1], max_wr,
+ wqe_sz, (dma_addr_t *)&tbl[1], nchunks - 1);
+ if (rc) {
+ goto out_unmap_ring_state;
+ }
+
+ goto out;
+
+out_unmap_ring_state:
+ rdma_pci_dma_unmap(pci_dev, r->ring_state, TARGET_PAGE_SIZE);
+
+out_free_ring_mem:
+ g_free(r);
+
+out:
+ rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
+ rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
+
+ return rc;
+}
+
+static void destroy_srq_ring(PvrdmaRing *ring)
+{
+ pvrdma_ring_free(ring);
+ rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE);
+ g_free(ring);
+}
+
+static int create_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ struct pvrdma_cmd_create_srq *cmd = &req->create_srq;
+ struct pvrdma_cmd_create_srq_resp *resp = &rsp->create_srq_resp;
+ PvrdmaRing *ring = NULL;
+ int rc;
+
+ memset(resp, 0, sizeof(*resp));
+
+ rc = create_srq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma,
+ cmd->attrs.max_wr, cmd->attrs.max_sge,
+ cmd->nchunks);
+ if (rc) {
+ return rc;
+ }
+
+ rc = rdma_rm_alloc_srq(&dev->rdma_dev_res, cmd->pd_handle,
+ cmd->attrs.max_wr, cmd->attrs.max_sge,
+ cmd->attrs.srq_limit, &resp->srqn, ring);
+ if (rc) {
+ destroy_srq_ring(ring);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int query_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ struct pvrdma_cmd_query_srq *cmd = &req->query_srq;
+ struct pvrdma_cmd_query_srq_resp *resp = &rsp->query_srq_resp;
+
+ memset(resp, 0, sizeof(*resp));
+
+ return rdma_rm_query_srq(&dev->rdma_dev_res, cmd->srq_handle,
+ (struct ibv_srq_attr *)&resp->attrs);
+}
+
+static int modify_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ struct pvrdma_cmd_modify_srq *cmd = &req->modify_srq;
+
+ /* Only support SRQ limit */
+ if (!(cmd->attr_mask & IBV_SRQ_LIMIT) ||
+ (cmd->attr_mask & IBV_SRQ_MAX_WR))
+ return -EINVAL;
+
+ return rdma_rm_modify_srq(&dev->rdma_dev_res, cmd->srq_handle,
+ (struct ibv_srq_attr *)&cmd->attrs,
+ cmd->attr_mask);
+}
+
+static int destroy_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+ struct pvrdma_cmd_destroy_srq *cmd = &req->destroy_srq;
+ RdmaRmSRQ *srq;
+ PvrdmaRing *ring;
+
+ srq = rdma_rm_get_srq(&dev->rdma_dev_res, cmd->srq_handle);
+ if (!srq) {
+ return -EINVAL;
+ }
+
+ ring = (PvrdmaRing *)srq->opaque;
+ destroy_srq_ring(ring);
+ rdma_rm_dealloc_srq(&dev->rdma_dev_res, cmd->srq_handle);
+
+ return 0;
+}
+
+struct cmd_handler {
+ uint32_t cmd;
+ uint32_t ack;
+ int (*exec)(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp);
+};
+
+static struct cmd_handler cmd_handlers[] = {
+ {PVRDMA_CMD_QUERY_PORT, PVRDMA_CMD_QUERY_PORT_RESP, query_port},
+ {PVRDMA_CMD_QUERY_PKEY, PVRDMA_CMD_QUERY_PKEY_RESP, query_pkey},
+ {PVRDMA_CMD_CREATE_PD, PVRDMA_CMD_CREATE_PD_RESP, create_pd},
+ {PVRDMA_CMD_DESTROY_PD, PVRDMA_CMD_DESTROY_PD_RESP_NOOP, destroy_pd},
+ {PVRDMA_CMD_CREATE_MR, PVRDMA_CMD_CREATE_MR_RESP, create_mr},
+ {PVRDMA_CMD_DESTROY_MR, PVRDMA_CMD_DESTROY_MR_RESP_NOOP, destroy_mr},
+ {PVRDMA_CMD_CREATE_CQ, PVRDMA_CMD_CREATE_CQ_RESP, create_cq},
+ {PVRDMA_CMD_RESIZE_CQ, PVRDMA_CMD_RESIZE_CQ_RESP, NULL},
+ {PVRDMA_CMD_DESTROY_CQ, PVRDMA_CMD_DESTROY_CQ_RESP_NOOP, destroy_cq},
+ {PVRDMA_CMD_CREATE_QP, PVRDMA_CMD_CREATE_QP_RESP, create_qp},
+ {PVRDMA_CMD_MODIFY_QP, PVRDMA_CMD_MODIFY_QP_RESP, modify_qp},
+ {PVRDMA_CMD_QUERY_QP, PVRDMA_CMD_QUERY_QP_RESP, query_qp},
+ {PVRDMA_CMD_DESTROY_QP, PVRDMA_CMD_DESTROY_QP_RESP, destroy_qp},
+ {PVRDMA_CMD_CREATE_UC, PVRDMA_CMD_CREATE_UC_RESP, create_uc},
+ {PVRDMA_CMD_DESTROY_UC, PVRDMA_CMD_DESTROY_UC_RESP_NOOP, destroy_uc},
+ {PVRDMA_CMD_CREATE_BIND, PVRDMA_CMD_CREATE_BIND_RESP_NOOP, create_bind},
+ {PVRDMA_CMD_DESTROY_BIND, PVRDMA_CMD_DESTROY_BIND_RESP_NOOP, destroy_bind},
+ {PVRDMA_CMD_CREATE_SRQ, PVRDMA_CMD_CREATE_SRQ_RESP, create_srq},
+ {PVRDMA_CMD_QUERY_SRQ, PVRDMA_CMD_QUERY_SRQ_RESP, query_srq},
+ {PVRDMA_CMD_MODIFY_SRQ, PVRDMA_CMD_MODIFY_SRQ_RESP, modify_srq},
+ {PVRDMA_CMD_DESTROY_SRQ, PVRDMA_CMD_DESTROY_SRQ_RESP, destroy_srq},
+};
+
+int pvrdma_exec_cmd(PVRDMADev *dev)
+{
+ int err = 0xFFFF;
+ DSRInfo *dsr_info;
+
+ dsr_info = &dev->dsr_info;
+
+ if (dsr_info->req->hdr.cmd >= sizeof(cmd_handlers) /
+ sizeof(struct cmd_handler)) {
+ rdma_error_report("Unsupported command");
+ goto out;
+ }
+
+ if (!cmd_handlers[dsr_info->req->hdr.cmd].exec) {
+ rdma_error_report("Unsupported command (not implemented yet)");
+ goto out;
+ }
+
+ err = cmd_handlers[dsr_info->req->hdr.cmd].exec(dev, dsr_info->req,
+ dsr_info->rsp);
+ dsr_info->rsp->hdr.response = dsr_info->req->hdr.response;
+ dsr_info->rsp->hdr.ack = cmd_handlers[dsr_info->req->hdr.cmd].ack;
+ dsr_info->rsp->hdr.err = err < 0 ? -err : 0;
+
+ trace_pvrdma_exec_cmd(dsr_info->req->hdr.cmd, dsr_info->rsp->hdr.err);
+
+ dev->stats.commands++;
+
+out:
+ set_reg_val(dev, PVRDMA_REG_ERR, err);
+ post_interrupt(dev, INTR_VEC_CMD_RING);
+
+ return (err == 0) ? 0 : -EINVAL;
+}
diff --git a/hw/rdma/vmw/pvrdma_dev_ring.c b/hw/rdma/vmw/pvrdma_dev_ring.c
new file mode 100644
index 000000000..42130667a
--- /dev/null
+++ b/hw/rdma/vmw/pvrdma_dev_ring.c
@@ -0,0 +1,142 @@
+/*
+ * QEMU paravirtual RDMA - Device rings
+ *
+ * Copyright (C) 2018 Oracle
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Yuval Shaia <yuval.shaia@oracle.com>
+ * Marcel Apfelbaum <marcel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/cutils.h"
+#include "hw/pci/pci.h"
+#include "cpu.h"
+#include "qemu/cutils.h"
+
+#include "trace.h"
+
+#include "../rdma_utils.h"
+#include "pvrdma_dev_ring.h"
+
+int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev,
+ PvrdmaRingState *ring_state, uint32_t max_elems,
+ size_t elem_sz, dma_addr_t *tbl, uint32_t npages)
+{
+ int i;
+ int rc = 0;
+
+ pstrcpy(ring->name, MAX_RING_NAME_SZ, name);
+ ring->dev = dev;
+ ring->ring_state = ring_state;
+ ring->max_elems = max_elems;
+ ring->elem_sz = elem_sz;
+ /* TODO: Give a moment to think if we want to redo driver settings
+ qatomic_set(&ring->ring_state->prod_tail, 0);
+ qatomic_set(&ring->ring_state->cons_head, 0);
+ */
+ ring->npages = npages;
+ ring->pages = g_malloc0(npages * sizeof(void *));
+
+ for (i = 0; i < npages; i++) {
+ if (!tbl[i]) {
+ rdma_error_report("npages=%d but tbl[%d] is NULL", npages, i);
+ continue;
+ }
+
+ ring->pages[i] = rdma_pci_dma_map(dev, tbl[i], TARGET_PAGE_SIZE);
+ if (!ring->pages[i]) {
+ rc = -ENOMEM;
+ rdma_error_report("Failed to map to page %d in ring %s", i, name);
+ goto out_free;
+ }
+ memset(ring->pages[i], 0, TARGET_PAGE_SIZE);
+ }
+
+ goto out;
+
+out_free:
+ while (i--) {
+ rdma_pci_dma_unmap(dev, ring->pages[i], TARGET_PAGE_SIZE);
+ }
+ g_free(ring->pages);
+
+out:
+ return rc;
+}
+
+void *pvrdma_ring_next_elem_read(PvrdmaRing *ring)
+{
+ unsigned int idx, offset;
+ const uint32_t tail = qatomic_read(&ring->ring_state->prod_tail);
+ const uint32_t head = qatomic_read(&ring->ring_state->cons_head);
+
+ if (tail & ~((ring->max_elems << 1) - 1) ||
+ head & ~((ring->max_elems << 1) - 1) ||
+ tail == head) {
+ trace_pvrdma_ring_next_elem_read_no_data(ring->name);
+ return NULL;
+ }
+
+ idx = head & (ring->max_elems - 1);
+ offset = idx * ring->elem_sz;
+ return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE);
+}
+
+void pvrdma_ring_read_inc(PvrdmaRing *ring)
+{
+ uint32_t idx = qatomic_read(&ring->ring_state->cons_head);
+
+ idx = (idx + 1) & ((ring->max_elems << 1) - 1);
+ qatomic_set(&ring->ring_state->cons_head, idx);
+}
+
+void *pvrdma_ring_next_elem_write(PvrdmaRing *ring)
+{
+ unsigned int idx, offset;
+ const uint32_t tail = qatomic_read(&ring->ring_state->prod_tail);
+ const uint32_t head = qatomic_read(&ring->ring_state->cons_head);
+
+ if (tail & ~((ring->max_elems << 1) - 1) ||
+ head & ~((ring->max_elems << 1) - 1) ||
+ tail == (head ^ ring->max_elems)) {
+ rdma_error_report("CQ is full");
+ return NULL;
+ }
+
+ idx = tail & (ring->max_elems - 1);
+ offset = idx * ring->elem_sz;
+ return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE);
+}
+
+void pvrdma_ring_write_inc(PvrdmaRing *ring)
+{
+ uint32_t idx = qatomic_read(&ring->ring_state->prod_tail);
+
+ idx = (idx + 1) & ((ring->max_elems << 1) - 1);
+ qatomic_set(&ring->ring_state->prod_tail, idx);
+}
+
+void pvrdma_ring_free(PvrdmaRing *ring)
+{
+ if (!ring) {
+ return;
+ }
+
+ if (!ring->pages) {
+ return;
+ }
+
+ while (ring->npages--) {
+ rdma_pci_dma_unmap(ring->dev, ring->pages[ring->npages],
+ TARGET_PAGE_SIZE);
+ }
+
+ g_free(ring->pages);
+ ring->pages = NULL;
+}
diff --git a/hw/rdma/vmw/pvrdma_dev_ring.h b/hw/rdma/vmw/pvrdma_dev_ring.h
new file mode 100644
index 000000000..d231588ce
--- /dev/null
+++ b/hw/rdma/vmw/pvrdma_dev_ring.h
@@ -0,0 +1,46 @@
+/*
+ * QEMU VMWARE paravirtual RDMA ring utilities
+ *
+ * Copyright (C) 2018 Oracle
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Yuval Shaia <yuval.shaia@oracle.com>
+ * Marcel Apfelbaum <marcel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef PVRDMA_DEV_RING_H
+#define PVRDMA_DEV_RING_H
+
+
+#define MAX_RING_NAME_SZ 32
+
+typedef struct PvrdmaRingState {
+ int prod_tail; /* producer tail */
+ int cons_head; /* consumer head */
+} PvrdmaRingState;
+
+typedef struct PvrdmaRing {
+ char name[MAX_RING_NAME_SZ];
+ PCIDevice *dev;
+ uint32_t max_elems;
+ size_t elem_sz;
+ PvrdmaRingState *ring_state; /* used only for unmap */
+ int npages;
+ void **pages;
+} PvrdmaRing;
+
+int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev,
+ PvrdmaRingState *ring_state, uint32_t max_elems,
+ size_t elem_sz, dma_addr_t *tbl, uint32_t npages);
+void *pvrdma_ring_next_elem_read(PvrdmaRing *ring);
+void pvrdma_ring_read_inc(PvrdmaRing *ring);
+void *pvrdma_ring_next_elem_write(PvrdmaRing *ring);
+void pvrdma_ring_write_inc(PvrdmaRing *ring);
+void pvrdma_ring_free(PvrdmaRing *ring);
+
+#endif
diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c
new file mode 100644
index 000000000..91206dbb8
--- /dev/null
+++ b/hw/rdma/vmw/pvrdma_main.c
@@ -0,0 +1,723 @@
+/*
+ * QEMU paravirtual RDMA
+ *
+ * Copyright (C) 2018 Oracle
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Yuval Shaia <yuval.shaia@oracle.com>
+ * Marcel Apfelbaum <marcel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/pci_ids.h"
+#include "hw/pci/msi.h"
+#include "hw/pci/msix.h"
+#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
+#include "cpu.h"
+#include "trace.h"
+#include "monitor/monitor.h"
+#include "hw/rdma/rdma.h"
+
+#include "../rdma_rm.h"
+#include "../rdma_backend.h"
+#include "../rdma_utils.h"
+
+#include <infiniband/verbs.h>
+#include "pvrdma.h"
+#include "standard-headers/rdma/vmw_pvrdma-abi.h"
+#include "sysemu/runstate.h"
+#include "standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h"
+#include "pvrdma_qp_ops.h"
+
+static Property pvrdma_dev_properties[] = {
+ DEFINE_PROP_STRING("netdev", PVRDMADev, backend_eth_device_name),
+ DEFINE_PROP_STRING("ibdev", PVRDMADev, backend_device_name),
+ DEFINE_PROP_UINT8("ibport", PVRDMADev, backend_port_num, 1),
+ DEFINE_PROP_UINT64("dev-caps-max-mr-size", PVRDMADev, dev_attr.max_mr_size,
+ MAX_MR_SIZE),
+ DEFINE_PROP_INT32("dev-caps-max-qp", PVRDMADev, dev_attr.max_qp, MAX_QP),
+ DEFINE_PROP_INT32("dev-caps-max-cq", PVRDMADev, dev_attr.max_cq, MAX_CQ),
+ DEFINE_PROP_INT32("dev-caps-max-mr", PVRDMADev, dev_attr.max_mr, MAX_MR),
+ DEFINE_PROP_INT32("dev-caps-max-pd", PVRDMADev, dev_attr.max_pd, MAX_PD),
+ DEFINE_PROP_INT32("dev-caps-qp-rd-atom", PVRDMADev, dev_attr.max_qp_rd_atom,
+ MAX_QP_RD_ATOM),
+ DEFINE_PROP_INT32("dev-caps-max-qp-init-rd-atom", PVRDMADev,
+ dev_attr.max_qp_init_rd_atom, MAX_QP_INIT_RD_ATOM),
+ DEFINE_PROP_INT32("dev-caps-max-ah", PVRDMADev, dev_attr.max_ah, MAX_AH),
+ DEFINE_PROP_INT32("dev-caps-max-srq", PVRDMADev, dev_attr.max_srq, MAX_SRQ),
+ DEFINE_PROP_CHR("mad-chardev", PVRDMADev, mad_chr),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void pvrdma_format_statistics(RdmaProvider *obj, GString *buf)
+{
+ PVRDMADev *dev = PVRDMA_DEV(obj);
+ PCIDevice *pdev = PCI_DEVICE(dev);
+
+ g_string_append_printf(buf, "%s, %x.%x\n",
+ pdev->name, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+ g_string_append_printf(buf, "\tcommands : %" PRId64 "\n",
+ dev->stats.commands);
+ g_string_append_printf(buf, "\tregs_reads : %" PRId64 "\n",
+ dev->stats.regs_reads);
+ g_string_append_printf(buf, "\tregs_writes : %" PRId64 "\n",
+ dev->stats.regs_writes);
+ g_string_append_printf(buf, "\tuar_writes : %" PRId64 "\n",
+ dev->stats.uar_writes);
+ g_string_append_printf(buf, "\tinterrupts : %" PRId64 "\n",
+ dev->stats.interrupts);
+ rdma_format_device_counters(&dev->rdma_dev_res, buf);
+}
+
+static void free_dev_ring(PCIDevice *pci_dev, PvrdmaRing *ring,
+ void *ring_state)
+{
+ pvrdma_ring_free(ring);
+ rdma_pci_dma_unmap(pci_dev, ring_state, TARGET_PAGE_SIZE);
+}
+
+static int init_dev_ring(PvrdmaRing *ring, PvrdmaRingState **ring_state,
+ const char *name, PCIDevice *pci_dev,
+ dma_addr_t dir_addr, uint32_t num_pages)
+{
+ uint64_t *dir, *tbl;
+ int rc = 0;
+
+ if (!num_pages) {
+ rdma_error_report("Ring pages count must be strictly positive");
+ return -EINVAL;
+ }
+
+ dir = rdma_pci_dma_map(pci_dev, dir_addr, TARGET_PAGE_SIZE);
+ if (!dir) {
+ rdma_error_report("Failed to map to page directory (ring %s)", name);
+ rc = -ENOMEM;
+ goto out;
+ }
+ tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
+ if (!tbl) {
+ rdma_error_report("Failed to map to page table (ring %s)", name);
+ rc = -ENOMEM;
+ goto out_free_dir;
+ }
+
+ *ring_state = rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
+ if (!*ring_state) {
+ rdma_error_report("Failed to map to ring state (ring %s)", name);
+ rc = -ENOMEM;
+ goto out_free_tbl;
+ }
+ /* RX ring is the second */
+ (*ring_state)++;
+ rc = pvrdma_ring_init(ring, name, pci_dev,
+ (PvrdmaRingState *)*ring_state,
+ (num_pages - 1) * TARGET_PAGE_SIZE /
+ sizeof(struct pvrdma_cqne),
+ sizeof(struct pvrdma_cqne),
+ (dma_addr_t *)&tbl[1], (dma_addr_t)num_pages - 1);
+ if (rc) {
+ rc = -ENOMEM;
+ goto out_free_ring_state;
+ }
+
+ goto out_free_tbl;
+
+out_free_ring_state:
+ rdma_pci_dma_unmap(pci_dev, *ring_state, TARGET_PAGE_SIZE);
+
+out_free_tbl:
+ rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
+
+out_free_dir:
+ rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
+
+out:
+ return rc;
+}
+
+static void free_dsr(PVRDMADev *dev)
+{
+ PCIDevice *pci_dev = PCI_DEVICE(dev);
+
+ if (!dev->dsr_info.dsr) {
+ return;
+ }
+
+ free_dev_ring(pci_dev, &dev->dsr_info.async,
+ dev->dsr_info.async_ring_state);
+
+ free_dev_ring(pci_dev, &dev->dsr_info.cq, dev->dsr_info.cq_ring_state);
+
+ rdma_pci_dma_unmap(pci_dev, dev->dsr_info.req,
+ sizeof(union pvrdma_cmd_req));
+
+ rdma_pci_dma_unmap(pci_dev, dev->dsr_info.rsp,
+ sizeof(union pvrdma_cmd_resp));
+
+ rdma_pci_dma_unmap(pci_dev, dev->dsr_info.dsr,
+ sizeof(struct pvrdma_device_shared_region));
+
+ dev->dsr_info.dsr = NULL;
+}
+
+static int load_dsr(PVRDMADev *dev)
+{
+ int rc = 0;
+ PCIDevice *pci_dev = PCI_DEVICE(dev);
+ DSRInfo *dsr_info;
+ struct pvrdma_device_shared_region *dsr;
+
+ free_dsr(dev);
+
+ /* Map to DSR */
+ dev->dsr_info.dsr = rdma_pci_dma_map(pci_dev, dev->dsr_info.dma,
+ sizeof(struct pvrdma_device_shared_region));
+ if (!dev->dsr_info.dsr) {
+ rdma_error_report("Failed to map to DSR");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Shortcuts */
+ dsr_info = &dev->dsr_info;
+ dsr = dsr_info->dsr;
+
+ /* Map to command slot */
+ dsr_info->req = rdma_pci_dma_map(pci_dev, dsr->cmd_slot_dma,
+ sizeof(union pvrdma_cmd_req));
+ if (!dsr_info->req) {
+ rdma_error_report("Failed to map to command slot address");
+ rc = -ENOMEM;
+ goto out_free_dsr;
+ }
+
+ /* Map to response slot */
+ dsr_info->rsp = rdma_pci_dma_map(pci_dev, dsr->resp_slot_dma,
+ sizeof(union pvrdma_cmd_resp));
+ if (!dsr_info->rsp) {
+ rdma_error_report("Failed to map to response slot address");
+ rc = -ENOMEM;
+ goto out_free_req;
+ }
+
+ /* Map to CQ notification ring */
+ rc = init_dev_ring(&dsr_info->cq, &dsr_info->cq_ring_state, "dev_cq",
+ pci_dev, dsr->cq_ring_pages.pdir_dma,
+ dsr->cq_ring_pages.num_pages);
+ if (rc) {
+ rc = -ENOMEM;
+ goto out_free_rsp;
+ }
+
+ /* Map to event notification ring */
+ rc = init_dev_ring(&dsr_info->async, &dsr_info->async_ring_state,
+ "dev_async", pci_dev, dsr->async_ring_pages.pdir_dma,
+ dsr->async_ring_pages.num_pages);
+ if (rc) {
+ rc = -ENOMEM;
+ goto out_free_rsp;
+ }
+
+ goto out;
+
+out_free_rsp:
+ rdma_pci_dma_unmap(pci_dev, dsr_info->rsp, sizeof(union pvrdma_cmd_resp));
+
+out_free_req:
+ rdma_pci_dma_unmap(pci_dev, dsr_info->req, sizeof(union pvrdma_cmd_req));
+
+out_free_dsr:
+ rdma_pci_dma_unmap(pci_dev, dsr_info->dsr,
+ sizeof(struct pvrdma_device_shared_region));
+ dsr_info->dsr = NULL;
+
+out:
+ return rc;
+}
+
+static void init_dsr_dev_caps(PVRDMADev *dev)
+{
+ struct pvrdma_device_shared_region *dsr;
+
+ if (dev->dsr_info.dsr == NULL) {
+ rdma_error_report("Can't initialized DSR");
+ return;
+ }
+
+ dsr = dev->dsr_info.dsr;
+ dsr->caps.fw_ver = PVRDMA_FW_VERSION;
+ dsr->caps.mode = PVRDMA_DEVICE_MODE_ROCE;
+ dsr->caps.gid_types |= PVRDMA_GID_TYPE_FLAG_ROCE_V1;
+ dsr->caps.max_uar = RDMA_BAR2_UAR_SIZE;
+ dsr->caps.max_mr_size = dev->dev_attr.max_mr_size;
+ dsr->caps.max_qp = dev->dev_attr.max_qp;
+ dsr->caps.max_qp_wr = dev->dev_attr.max_qp_wr;
+ dsr->caps.max_sge = dev->dev_attr.max_sge;
+ dsr->caps.max_cq = dev->dev_attr.max_cq;
+ dsr->caps.max_cqe = dev->dev_attr.max_cqe;
+ dsr->caps.max_mr = dev->dev_attr.max_mr;
+ dsr->caps.max_pd = dev->dev_attr.max_pd;
+ dsr->caps.max_ah = dev->dev_attr.max_ah;
+ dsr->caps.max_srq = dev->dev_attr.max_srq;
+ dsr->caps.max_srq_wr = dev->dev_attr.max_srq_wr;
+ dsr->caps.max_srq_sge = dev->dev_attr.max_srq_sge;
+ dsr->caps.gid_tbl_len = MAX_GIDS;
+ dsr->caps.sys_image_guid = 0;
+ dsr->caps.node_guid = dev->node_guid;
+ dsr->caps.phys_port_cnt = MAX_PORTS;
+ dsr->caps.max_pkeys = MAX_PKEYS;
+}
+
+static void uninit_msix(PCIDevice *pdev, int used_vectors)
+{
+ PVRDMADev *dev = PVRDMA_DEV(pdev);
+ int i;
+
+ for (i = 0; i < used_vectors; i++) {
+ msix_vector_unuse(pdev, i);
+ }
+
+ msix_uninit(pdev, &dev->msix, &dev->msix);
+}
+
+static int init_msix(PCIDevice *pdev)
+{
+ PVRDMADev *dev = PVRDMA_DEV(pdev);
+ int i;
+ int rc;
+
+ rc = msix_init(pdev, RDMA_MAX_INTRS, &dev->msix, RDMA_MSIX_BAR_IDX,
+ RDMA_MSIX_TABLE, &dev->msix, RDMA_MSIX_BAR_IDX,
+ RDMA_MSIX_PBA, 0, NULL);
+
+ if (rc < 0) {
+ rdma_error_report("Failed to initialize MSI-X");
+ return rc;
+ }
+
+ for (i = 0; i < RDMA_MAX_INTRS; i++) {
+ rc = msix_vector_use(PCI_DEVICE(dev), i);
+ if (rc < 0) {
+ rdma_error_report("Fail mark MSI-X vector %d", i);
+ uninit_msix(pdev, i);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static void pvrdma_fini(PCIDevice *pdev)
+{
+ PVRDMADev *dev = PVRDMA_DEV(pdev);
+
+ notifier_remove(&dev->shutdown_notifier);
+
+ pvrdma_qp_ops_fini();
+
+ rdma_backend_stop(&dev->backend_dev);
+
+ rdma_rm_fini(&dev->rdma_dev_res, &dev->backend_dev,
+ dev->backend_eth_device_name);
+
+ rdma_backend_fini(&dev->backend_dev);
+
+ free_dsr(dev);
+
+ if (msix_enabled(pdev)) {
+ uninit_msix(pdev, RDMA_MAX_INTRS);
+ }
+
+ rdma_info_report("Device %s %x.%x is down", pdev->name,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+}
+
+static void pvrdma_stop(PVRDMADev *dev)
+{
+ rdma_backend_stop(&dev->backend_dev);
+}
+
+static void pvrdma_start(PVRDMADev *dev)
+{
+ rdma_backend_start(&dev->backend_dev);
+}
+
+static void activate_device(PVRDMADev *dev)
+{
+ pvrdma_start(dev);
+ set_reg_val(dev, PVRDMA_REG_ERR, 0);
+}
+
+static int unquiesce_device(PVRDMADev *dev)
+{
+ return 0;
+}
+
+static void reset_device(PVRDMADev *dev)
+{
+ pvrdma_stop(dev);
+}
+
+static uint64_t pvrdma_regs_read(void *opaque, hwaddr addr, unsigned size)
+{
+ PVRDMADev *dev = opaque;
+ uint32_t val;
+
+ dev->stats.regs_reads++;
+
+ if (get_reg_val(dev, addr, &val)) {
+ rdma_error_report("Failed to read REG value from address 0x%x",
+ (uint32_t)addr);
+ return -EINVAL;
+ }
+
+ trace_pvrdma_regs_read(addr, val);
+
+ return val;
+}
+
+static void pvrdma_regs_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ PVRDMADev *dev = opaque;
+
+ dev->stats.regs_writes++;
+
+ if (set_reg_val(dev, addr, val)) {
+ rdma_error_report("Failed to set REG value, addr=0x%"PRIx64 ", val=0x%"PRIx64,
+ addr, val);
+ return;
+ }
+
+ switch (addr) {
+ case PVRDMA_REG_DSRLOW:
+ trace_pvrdma_regs_write(addr, val, "DSRLOW", "");
+ dev->dsr_info.dma = val;
+ break;
+ case PVRDMA_REG_DSRHIGH:
+ trace_pvrdma_regs_write(addr, val, "DSRHIGH", "");
+ dev->dsr_info.dma |= val << 32;
+ load_dsr(dev);
+ init_dsr_dev_caps(dev);
+ break;
+ case PVRDMA_REG_CTL:
+ switch (val) {
+ case PVRDMA_DEVICE_CTL_ACTIVATE:
+ trace_pvrdma_regs_write(addr, val, "CTL", "ACTIVATE");
+ activate_device(dev);
+ break;
+ case PVRDMA_DEVICE_CTL_UNQUIESCE:
+ trace_pvrdma_regs_write(addr, val, "CTL", "UNQUIESCE");
+ unquiesce_device(dev);
+ break;
+ case PVRDMA_DEVICE_CTL_RESET:
+ trace_pvrdma_regs_write(addr, val, "CTL", "URESET");
+ reset_device(dev);
+ break;
+ }
+ break;
+ case PVRDMA_REG_IMR:
+ trace_pvrdma_regs_write(addr, val, "INTR_MASK", "");
+ dev->interrupt_mask = val;
+ break;
+ case PVRDMA_REG_REQUEST:
+ if (val == 0) {
+ trace_pvrdma_regs_write(addr, val, "REQUEST", "");
+ pvrdma_exec_cmd(dev);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static const MemoryRegionOps regs_ops = {
+ .read = pvrdma_regs_read,
+ .write = pvrdma_regs_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = sizeof(uint32_t),
+ .max_access_size = sizeof(uint32_t),
+ },
+};
+
+static uint64_t pvrdma_uar_read(void *opaque, hwaddr addr, unsigned size)
+{
+ return 0xffffffff;
+}
+
+static void pvrdma_uar_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ PVRDMADev *dev = opaque;
+
+ dev->stats.uar_writes++;
+
+ switch (addr & 0xFFF) { /* Mask with 0xFFF as each UC gets page */
+ case PVRDMA_UAR_QP_OFFSET:
+ if (val & PVRDMA_UAR_QP_SEND) {
+ trace_pvrdma_uar_write(addr, val, "QP", "SEND",
+ val & PVRDMA_UAR_HANDLE_MASK, 0);
+ pvrdma_qp_send(dev, val & PVRDMA_UAR_HANDLE_MASK);
+ }
+ if (val & PVRDMA_UAR_QP_RECV) {
+ trace_pvrdma_uar_write(addr, val, "QP", "RECV",
+ val & PVRDMA_UAR_HANDLE_MASK, 0);
+ pvrdma_qp_recv(dev, val & PVRDMA_UAR_HANDLE_MASK);
+ }
+ break;
+ case PVRDMA_UAR_CQ_OFFSET:
+ if (val & PVRDMA_UAR_CQ_ARM) {
+ trace_pvrdma_uar_write(addr, val, "CQ", "ARM",
+ val & PVRDMA_UAR_HANDLE_MASK,
+ !!(val & PVRDMA_UAR_CQ_ARM_SOL));
+ rdma_rm_req_notify_cq(&dev->rdma_dev_res,
+ val & PVRDMA_UAR_HANDLE_MASK,
+ !!(val & PVRDMA_UAR_CQ_ARM_SOL));
+ }
+ if (val & PVRDMA_UAR_CQ_ARM_SOL) {
+ trace_pvrdma_uar_write(addr, val, "CQ", "ARMSOL - not supported", 0,
+ 0);
+ }
+ if (val & PVRDMA_UAR_CQ_POLL) {
+ trace_pvrdma_uar_write(addr, val, "CQ", "POLL",
+ val & PVRDMA_UAR_HANDLE_MASK, 0);
+ pvrdma_cq_poll(&dev->rdma_dev_res, val & PVRDMA_UAR_HANDLE_MASK);
+ }
+ break;
+ case PVRDMA_UAR_SRQ_OFFSET:
+ if (val & PVRDMA_UAR_SRQ_RECV) {
+ trace_pvrdma_uar_write(addr, val, "QP", "SRQ",
+ val & PVRDMA_UAR_HANDLE_MASK, 0);
+ pvrdma_srq_recv(dev, val & PVRDMA_UAR_HANDLE_MASK);
+ }
+ break;
+ default:
+ rdma_error_report("Unsupported command, addr=0x%"PRIx64", val=0x%"PRIx64,
+ addr, val);
+ break;
+ }
+}
+
+static const MemoryRegionOps uar_ops = {
+ .read = pvrdma_uar_read,
+ .write = pvrdma_uar_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = sizeof(uint32_t),
+ .max_access_size = sizeof(uint32_t),
+ },
+};
+
+static void init_pci_config(PCIDevice *pdev)
+{
+ pdev->config[PCI_INTERRUPT_PIN] = 1;
+}
+
+static void init_bars(PCIDevice *pdev)
+{
+ PVRDMADev *dev = PVRDMA_DEV(pdev);
+
+ /* BAR 0 - MSI-X */
+ memory_region_init(&dev->msix, OBJECT(dev), "pvrdma-msix",
+ RDMA_BAR0_MSIX_SIZE);
+ pci_register_bar(pdev, RDMA_MSIX_BAR_IDX, PCI_BASE_ADDRESS_SPACE_MEMORY,
+ &dev->msix);
+
+ /* BAR 1 - Registers */
+ memset(&dev->regs_data, 0, sizeof(dev->regs_data));
+ memory_region_init_io(&dev->regs, OBJECT(dev), &regs_ops, dev,
+ "pvrdma-regs", sizeof(dev->regs_data));
+ pci_register_bar(pdev, RDMA_REG_BAR_IDX, PCI_BASE_ADDRESS_SPACE_MEMORY,
+ &dev->regs);
+
+ /* BAR 2 - UAR */
+ memset(&dev->uar_data, 0, sizeof(dev->uar_data));
+ memory_region_init_io(&dev->uar, OBJECT(dev), &uar_ops, dev, "rdma-uar",
+ sizeof(dev->uar_data));
+ pci_register_bar(pdev, RDMA_UAR_BAR_IDX, PCI_BASE_ADDRESS_SPACE_MEMORY,
+ &dev->uar);
+}
+
+static void init_regs(PCIDevice *pdev)
+{
+ PVRDMADev *dev = PVRDMA_DEV(pdev);
+
+ set_reg_val(dev, PVRDMA_REG_VERSION, PVRDMA_HW_VERSION);
+ set_reg_val(dev, PVRDMA_REG_ERR, 0xFFFF);
+}
+
+static void init_dev_caps(PVRDMADev *dev)
+{
+ size_t pg_tbl_bytes = TARGET_PAGE_SIZE *
+ (TARGET_PAGE_SIZE / sizeof(uint64_t));
+ size_t wr_sz = MAX(sizeof(struct pvrdma_sq_wqe_hdr),
+ sizeof(struct pvrdma_rq_wqe_hdr));
+
+ dev->dev_attr.max_qp_wr = pg_tbl_bytes /
+ (wr_sz + sizeof(struct pvrdma_sge) *
+ dev->dev_attr.max_sge) - TARGET_PAGE_SIZE;
+ /* First page is ring state ^^^^ */
+
+ dev->dev_attr.max_cqe = pg_tbl_bytes / sizeof(struct pvrdma_cqe) -
+ TARGET_PAGE_SIZE; /* First page is ring state */
+
+ dev->dev_attr.max_srq_wr = pg_tbl_bytes /
+ ((sizeof(struct pvrdma_rq_wqe_hdr) +
+ sizeof(struct pvrdma_sge)) *
+ dev->dev_attr.max_sge) - TARGET_PAGE_SIZE;
+}
+
+static int pvrdma_check_ram_shared(Object *obj, void *opaque)
+{
+ bool *shared = opaque;
+
+ if (object_dynamic_cast(obj, "memory-backend-ram")) {
+ *shared = object_property_get_bool(obj, "share", NULL);
+ }
+
+ return 0;
+}
+
+static void pvrdma_shutdown_notifier(Notifier *n, void *opaque)
+{
+ PVRDMADev *dev = container_of(n, PVRDMADev, shutdown_notifier);
+ PCIDevice *pci_dev = PCI_DEVICE(dev);
+
+ pvrdma_fini(pci_dev);
+}
+
+static void pvrdma_realize(PCIDevice *pdev, Error **errp)
+{
+ int rc = 0;
+ PVRDMADev *dev = PVRDMA_DEV(pdev);
+ Object *memdev_root;
+ bool ram_shared = false;
+ PCIDevice *func0;
+
+ rdma_info_report("Initializing device %s %x.%x", pdev->name,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+
+ if (TARGET_PAGE_SIZE != qemu_real_host_page_size) {
+ error_setg(errp, "Target page size must be the same as host page size");
+ return;
+ }
+
+ func0 = pci_get_function_0(pdev);
+ /* Break if not vmxnet3 device in slot 0 */
+ if (strcmp(object_get_typename(OBJECT(func0)), TYPE_VMXNET3)) {
+ error_setg(errp, "Device on %x.0 must be %s", PCI_SLOT(pdev->devfn),
+ TYPE_VMXNET3);
+ return;
+ }
+ dev->func0 = VMXNET3(func0);
+
+ addrconf_addr_eui48((unsigned char *)&dev->node_guid,
+ (const char *)&dev->func0->conf.macaddr.a);
+
+ memdev_root = object_resolve_path("/objects", NULL);
+ if (memdev_root) {
+ object_child_foreach(memdev_root, pvrdma_check_ram_shared, &ram_shared);
+ }
+ if (!ram_shared) {
+ error_setg(errp, "Only shared memory backed ram is supported");
+ return;
+ }
+
+ dev->dsr_info.dsr = NULL;
+
+ init_pci_config(pdev);
+
+ init_bars(pdev);
+
+ init_regs(pdev);
+
+ rc = init_msix(pdev);
+ if (rc) {
+ goto out;
+ }
+
+ rc = rdma_backend_init(&dev->backend_dev, pdev, &dev->rdma_dev_res,
+ dev->backend_device_name, dev->backend_port_num,
+ &dev->dev_attr, &dev->mad_chr);
+ if (rc) {
+ goto out;
+ }
+
+ init_dev_caps(dev);
+
+ rc = rdma_rm_init(&dev->rdma_dev_res, &dev->dev_attr);
+ if (rc) {
+ goto out;
+ }
+
+ rc = pvrdma_qp_ops_init();
+ if (rc) {
+ goto out;
+ }
+
+ memset(&dev->stats, 0, sizeof(dev->stats));
+
+ dev->shutdown_notifier.notify = pvrdma_shutdown_notifier;
+ qemu_register_shutdown_notifier(&dev->shutdown_notifier);
+
+#ifdef LEGACY_RDMA_REG_MR
+ rdma_info_report("Using legacy reg_mr");
+#else
+ rdma_info_report("Using iova reg_mr");
+#endif
+
+out:
+ if (rc) {
+ pvrdma_fini(pdev);
+ error_append_hint(errp, "Device failed to load\n");
+ }
+}
+
+static void pvrdma_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+ RdmaProviderClass *ir = RDMA_PROVIDER_CLASS(klass);
+
+ k->realize = pvrdma_realize;
+ k->vendor_id = PCI_VENDOR_ID_VMWARE;
+ k->device_id = PCI_DEVICE_ID_VMWARE_PVRDMA;
+ k->revision = 0x00;
+ k->class_id = PCI_CLASS_NETWORK_OTHER;
+
+ dc->desc = "RDMA Device";
+ device_class_set_props(dc, pvrdma_dev_properties);
+ set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
+
+ ir->format_statistics = pvrdma_format_statistics;
+}
+
+static const TypeInfo pvrdma_info = {
+ .name = PVRDMA_HW_NAME,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(PVRDMADev),
+ .class_init = pvrdma_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { INTERFACE_CONVENTIONAL_PCI_DEVICE },
+ { INTERFACE_RDMA_PROVIDER },
+ { }
+ }
+};
+
+static void register_types(void)
+{
+ type_register_static(&pvrdma_info);
+}
+
+type_init(register_types)
diff --git a/hw/rdma/vmw/pvrdma_qp_ops.c b/hw/rdma/vmw/pvrdma_qp_ops.c
new file mode 100644
index 000000000..8050287a6
--- /dev/null
+++ b/hw/rdma/vmw/pvrdma_qp_ops.c
@@ -0,0 +1,298 @@
+/*
+ * QEMU paravirtual RDMA - QP implementation
+ *
+ * Copyright (C) 2018 Oracle
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Yuval Shaia <yuval.shaia@oracle.com>
+ * Marcel Apfelbaum <marcel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+
+#include "../rdma_utils.h"
+#include "../rdma_rm.h"
+#include "../rdma_backend.h"
+
+#include "trace.h"
+
+#include "pvrdma.h"
+#include "standard-headers/rdma/vmw_pvrdma-abi.h"
+#include "pvrdma_qp_ops.h"
+
+typedef struct CompHandlerCtx {
+ PVRDMADev *dev;
+ uint32_t cq_handle;
+ struct pvrdma_cqe cqe;
+} CompHandlerCtx;
+
+/* Send Queue WQE */
+typedef struct PvrdmaSqWqe {
+ struct pvrdma_sq_wqe_hdr hdr;
+ struct pvrdma_sge sge[];
+} PvrdmaSqWqe;
+
+/* Recv Queue WQE */
+typedef struct PvrdmaRqWqe {
+ struct pvrdma_rq_wqe_hdr hdr;
+ struct pvrdma_sge sge[];
+} PvrdmaRqWqe;
+
+/*
+ * 1. Put CQE on send CQ ring
+ * 2. Put CQ number on dsr completion ring
+ * 3. Interrupt host
+ */
+static int pvrdma_post_cqe(PVRDMADev *dev, uint32_t cq_handle,
+ struct pvrdma_cqe *cqe, struct ibv_wc *wc)
+{
+ struct pvrdma_cqe *cqe1;
+ struct pvrdma_cqne *cqne;
+ PvrdmaRing *ring;
+ RdmaRmCQ *cq = rdma_rm_get_cq(&dev->rdma_dev_res, cq_handle);
+
+ if (unlikely(!cq)) {
+ return -EINVAL;
+ }
+
+ ring = (PvrdmaRing *)cq->opaque;
+
+ /* Step #1: Put CQE on CQ ring */
+ cqe1 = pvrdma_ring_next_elem_write(ring);
+ if (unlikely(!cqe1)) {
+ return -EINVAL;
+ }
+
+ memset(cqe1, 0, sizeof(*cqe1));
+ cqe1->wr_id = cqe->wr_id;
+ cqe1->qp = cqe->qp ? cqe->qp : wc->qp_num;
+ cqe1->opcode = cqe->opcode;
+ cqe1->status = wc->status;
+ cqe1->byte_len = wc->byte_len;
+ cqe1->src_qp = wc->src_qp;
+ cqe1->wc_flags = wc->wc_flags;
+ cqe1->vendor_err = wc->vendor_err;
+
+ trace_pvrdma_post_cqe(cq_handle, cq->notify, cqe1->wr_id, cqe1->qp,
+ cqe1->opcode, cqe1->status, cqe1->byte_len,
+ cqe1->src_qp, cqe1->wc_flags, cqe1->vendor_err);
+
+ pvrdma_ring_write_inc(ring);
+
+ /* Step #2: Put CQ number on dsr completion ring */
+ cqne = pvrdma_ring_next_elem_write(&dev->dsr_info.cq);
+ if (unlikely(!cqne)) {
+ return -EINVAL;
+ }
+
+ cqne->info = cq_handle;
+ pvrdma_ring_write_inc(&dev->dsr_info.cq);
+
+ if (cq->notify != CNT_CLEAR) {
+ if (cq->notify == CNT_ARM) {
+ cq->notify = CNT_CLEAR;
+ }
+ post_interrupt(dev, INTR_VEC_CMD_COMPLETION_Q);
+ }
+
+ return 0;
+}
+
+static void pvrdma_qp_ops_comp_handler(void *ctx, struct ibv_wc *wc)
+{
+ CompHandlerCtx *comp_ctx = (CompHandlerCtx *)ctx;
+
+ pvrdma_post_cqe(comp_ctx->dev, comp_ctx->cq_handle, &comp_ctx->cqe, wc);
+
+ g_free(ctx);
+}
+
+static void complete_with_error(uint32_t vendor_err, void *ctx)
+{
+ struct ibv_wc wc = {};
+
+ wc.status = IBV_WC_GENERAL_ERR;
+ wc.vendor_err = vendor_err;
+
+ pvrdma_qp_ops_comp_handler(ctx, &wc);
+}
+
+void pvrdma_qp_ops_fini(void)
+{
+ rdma_backend_unregister_comp_handler();
+}
+
+int pvrdma_qp_ops_init(void)
+{
+ rdma_backend_register_comp_handler(pvrdma_qp_ops_comp_handler);
+
+ return 0;
+}
+
+void pvrdma_qp_send(PVRDMADev *dev, uint32_t qp_handle)
+{
+ RdmaRmQP *qp;
+ PvrdmaSqWqe *wqe;
+ PvrdmaRing *ring;
+ int sgid_idx;
+ union ibv_gid *sgid;
+
+ qp = rdma_rm_get_qp(&dev->rdma_dev_res, qp_handle);
+ if (unlikely(!qp)) {
+ return;
+ }
+
+ ring = (PvrdmaRing *)qp->opaque;
+
+ wqe = (struct PvrdmaSqWqe *)pvrdma_ring_next_elem_read(ring);
+ while (wqe) {
+ CompHandlerCtx *comp_ctx;
+
+ /* Prepare CQE */
+ comp_ctx = g_malloc(sizeof(CompHandlerCtx));
+ comp_ctx->dev = dev;
+ comp_ctx->cq_handle = qp->send_cq_handle;
+ comp_ctx->cqe.wr_id = wqe->hdr.wr_id;
+ comp_ctx->cqe.qp = qp_handle;
+ comp_ctx->cqe.opcode = IBV_WC_SEND;
+
+ sgid = rdma_rm_get_gid(&dev->rdma_dev_res, wqe->hdr.wr.ud.av.gid_index);
+ if (!sgid) {
+ rdma_error_report("Failed to get gid for idx %d",
+ wqe->hdr.wr.ud.av.gid_index);
+ complete_with_error(VENDOR_ERR_INV_GID_IDX, comp_ctx);
+ continue;
+ }
+
+ sgid_idx = rdma_rm_get_backend_gid_index(&dev->rdma_dev_res,
+ &dev->backend_dev,
+ wqe->hdr.wr.ud.av.gid_index);
+ if (sgid_idx <= 0) {
+ rdma_error_report("Failed to get bk sgid_idx for sgid_idx %d",
+ wqe->hdr.wr.ud.av.gid_index);
+ complete_with_error(VENDOR_ERR_INV_GID_IDX, comp_ctx);
+ continue;
+ }
+
+ if (wqe->hdr.num_sge > dev->dev_attr.max_sge) {
+ rdma_error_report("Invalid num_sge=%d (max %d)", wqe->hdr.num_sge,
+ dev->dev_attr.max_sge);
+ complete_with_error(VENDOR_ERR_INV_NUM_SGE, comp_ctx);
+ continue;
+ }
+
+ rdma_backend_post_send(&dev->backend_dev, &qp->backend_qp, qp->qp_type,
+ (struct ibv_sge *)&wqe->sge[0], wqe->hdr.num_sge,
+ sgid_idx, sgid,
+ (union ibv_gid *)wqe->hdr.wr.ud.av.dgid,
+ wqe->hdr.wr.ud.remote_qpn,
+ wqe->hdr.wr.ud.remote_qkey, comp_ctx);
+
+ pvrdma_ring_read_inc(ring);
+
+ wqe = pvrdma_ring_next_elem_read(ring);
+ }
+}
+
+void pvrdma_qp_recv(PVRDMADev *dev, uint32_t qp_handle)
+{
+ RdmaRmQP *qp;
+ PvrdmaRqWqe *wqe;
+ PvrdmaRing *ring;
+
+ qp = rdma_rm_get_qp(&dev->rdma_dev_res, qp_handle);
+ if (unlikely(!qp)) {
+ return;
+ }
+
+ ring = &((PvrdmaRing *)qp->opaque)[1];
+
+ wqe = (struct PvrdmaRqWqe *)pvrdma_ring_next_elem_read(ring);
+ while (wqe) {
+ CompHandlerCtx *comp_ctx;
+
+ /* Prepare CQE */
+ comp_ctx = g_malloc(sizeof(CompHandlerCtx));
+ comp_ctx->dev = dev;
+ comp_ctx->cq_handle = qp->recv_cq_handle;
+ comp_ctx->cqe.wr_id = wqe->hdr.wr_id;
+ comp_ctx->cqe.qp = qp_handle;
+ comp_ctx->cqe.opcode = IBV_WC_RECV;
+
+ if (wqe->hdr.num_sge > dev->dev_attr.max_sge) {
+ rdma_error_report("Invalid num_sge=%d (max %d)", wqe->hdr.num_sge,
+ dev->dev_attr.max_sge);
+ complete_with_error(VENDOR_ERR_INV_NUM_SGE, comp_ctx);
+ continue;
+ }
+
+ rdma_backend_post_recv(&dev->backend_dev, &qp->backend_qp, qp->qp_type,
+ (struct ibv_sge *)&wqe->sge[0], wqe->hdr.num_sge,
+ comp_ctx);
+
+ pvrdma_ring_read_inc(ring);
+
+ wqe = pvrdma_ring_next_elem_read(ring);
+ }
+}
+
+void pvrdma_srq_recv(PVRDMADev *dev, uint32_t srq_handle)
+{
+ RdmaRmSRQ *srq;
+ PvrdmaRqWqe *wqe;
+ PvrdmaRing *ring;
+
+ srq = rdma_rm_get_srq(&dev->rdma_dev_res, srq_handle);
+ if (unlikely(!srq)) {
+ return;
+ }
+
+ ring = (PvrdmaRing *)srq->opaque;
+
+ wqe = (struct PvrdmaRqWqe *)pvrdma_ring_next_elem_read(ring);
+ while (wqe) {
+ CompHandlerCtx *comp_ctx;
+
+ /* Prepare CQE */
+ comp_ctx = g_malloc(sizeof(CompHandlerCtx));
+ comp_ctx->dev = dev;
+ comp_ctx->cq_handle = srq->recv_cq_handle;
+ comp_ctx->cqe.wr_id = wqe->hdr.wr_id;
+ comp_ctx->cqe.qp = 0;
+ comp_ctx->cqe.opcode = IBV_WC_RECV;
+
+ if (wqe->hdr.num_sge > dev->dev_attr.max_sge) {
+ rdma_error_report("Invalid num_sge=%d (max %d)", wqe->hdr.num_sge,
+ dev->dev_attr.max_sge);
+ complete_with_error(VENDOR_ERR_INV_NUM_SGE, comp_ctx);
+ continue;
+ }
+
+ rdma_backend_post_srq_recv(&dev->backend_dev, &srq->backend_srq,
+ (struct ibv_sge *)&wqe->sge[0],
+ wqe->hdr.num_sge,
+ comp_ctx);
+
+ pvrdma_ring_read_inc(ring);
+
+ wqe = pvrdma_ring_next_elem_read(ring);
+ }
+
+}
+
+void pvrdma_cq_poll(RdmaDeviceResources *dev_res, uint32_t cq_handle)
+{
+ RdmaRmCQ *cq;
+
+ cq = rdma_rm_get_cq(dev_res, cq_handle);
+ if (!cq) {
+ return;
+ }
+
+ rdma_backend_poll_cq(dev_res, &cq->backend_cq);
+}
diff --git a/hw/rdma/vmw/pvrdma_qp_ops.h b/hw/rdma/vmw/pvrdma_qp_ops.h
new file mode 100644
index 000000000..bf2b15c5c
--- /dev/null
+++ b/hw/rdma/vmw/pvrdma_qp_ops.h
@@ -0,0 +1,28 @@
+/*
+ * QEMU VMWARE paravirtual RDMA QP Operations
+ *
+ * Copyright (C) 2018 Oracle
+ * Copyright (C) 2018 Red Hat Inc
+ *
+ * Authors:
+ * Yuval Shaia <yuval.shaia@oracle.com>
+ * Marcel Apfelbaum <marcel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef PVRDMA_QP_OPS_H
+#define PVRDMA_QP_OPS_H
+
+#include "pvrdma.h"
+
+int pvrdma_qp_ops_init(void);
+void pvrdma_qp_ops_fini(void);
+void pvrdma_qp_send(PVRDMADev *dev, uint32_t qp_handle);
+void pvrdma_qp_recv(PVRDMADev *dev, uint32_t qp_handle);
+void pvrdma_srq_recv(PVRDMADev *dev, uint32_t srq_handle);
+void pvrdma_cq_poll(RdmaDeviceResources *dev_res, uint32_t cq_handle);
+
+#endif
diff --git a/hw/rdma/vmw/trace-events b/hw/rdma/vmw/trace-events
new file mode 100644
index 000000000..a6c77e1e1
--- /dev/null
+++ b/hw/rdma/vmw/trace-events
@@ -0,0 +1,17 @@
+# See docs/devel/tracing.rst for syntax documentation.
+
+# pvrdma_main.c
+pvrdma_regs_read(uint64_t addr, uint64_t val) "pvrdma.regs[0x%"PRIx64"]=0x%"PRIx64
+pvrdma_regs_write(uint64_t addr, uint64_t val, const char *reg_name, const char *val_name) "pvrdma.regs[0x%"PRIx64"]=0x%"PRIx64" (%s %s)"
+pvrdma_uar_write(uint64_t addr, uint64_t val, const char *reg_name, const char *val_name, int val1, int val2) "uar[0x%"PRIx64"]=0x%"PRIx64" (cls=%s, op=%s, obj=%d, val=%d)"
+
+# pvrdma_cmd.c
+pvrdma_map_to_pdir_host_virt(void *vfirst, void *vremaped) "mremap %p -> %p"
+pvrdma_map_to_pdir_next_page(int page_idx, void *vnext, void *vremaped) "mremap [%d] %p -> %p"
+pvrdma_exec_cmd(int cmd, int err) "cmd=%d, err=%d"
+
+# pvrdma_dev_ring.c
+pvrdma_ring_next_elem_read_no_data(char *ring_name) "pvrdma_ring %s is empty"
+
+# pvrdma_qp_ops.c
+pvrdma_post_cqe(uint32_t cq_handle, int notify, uint64_t wr_id, uint64_t qpn, uint32_t op_code, uint32_t status, uint32_t byte_len, uint32_t src_qp, uint32_t wc_flags, uint32_t vendor_err) "cq_handle=%d, notify=%d, wr_id=0x%"PRIx64", qpn=0x%"PRIx64", opcode=%d, status=%d, byte_len=%d, src_qp=%d, wc_flags=%d, vendor_err=%d"
diff --git a/hw/rdma/vmw/trace.h b/hw/rdma/vmw/trace.h
new file mode 100644
index 000000000..3ebc9fb7a
--- /dev/null
+++ b/hw/rdma/vmw/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-hw_rdma_vmw.h"