diff options
Diffstat (limited to 'hw/rdma/vmw')
-rw-r--r-- | hw/rdma/vmw/pvrdma.h | 144 | ||||
-rw-r--r-- | hw/rdma/vmw/pvrdma_cmd.c | 825 | ||||
-rw-r--r-- | hw/rdma/vmw/pvrdma_dev_ring.c | 142 | ||||
-rw-r--r-- | hw/rdma/vmw/pvrdma_dev_ring.h | 46 | ||||
-rw-r--r-- | hw/rdma/vmw/pvrdma_main.c | 723 | ||||
-rw-r--r-- | hw/rdma/vmw/pvrdma_qp_ops.c | 298 | ||||
-rw-r--r-- | hw/rdma/vmw/pvrdma_qp_ops.h | 28 | ||||
-rw-r--r-- | hw/rdma/vmw/trace-events | 17 | ||||
-rw-r--r-- | hw/rdma/vmw/trace.h | 1 |
9 files changed, 2224 insertions, 0 deletions
diff --git a/hw/rdma/vmw/pvrdma.h b/hw/rdma/vmw/pvrdma.h new file mode 100644 index 000000000..d08965d3e --- /dev/null +++ b/hw/rdma/vmw/pvrdma.h @@ -0,0 +1,144 @@ +/* + * QEMU VMWARE paravirtual RDMA device definitions + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia <yuval.shaia@oracle.com> + * Marcel Apfelbaum <marcel@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef PVRDMA_PVRDMA_H +#define PVRDMA_PVRDMA_H + +#include "qemu/units.h" +#include "qemu/notify.h" +#include "hw/pci/pci.h" +#include "hw/pci/msix.h" +#include "chardev/char-fe.h" +#include "hw/net/vmxnet3_defs.h" + +#include "../rdma_backend_defs.h" +#include "../rdma_rm_defs.h" + +#include "standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h" +#include "pvrdma_dev_ring.h" +#include "qom/object.h" + +/* BARs */ +#define RDMA_MSIX_BAR_IDX 0 +#define RDMA_REG_BAR_IDX 1 +#define RDMA_UAR_BAR_IDX 2 +#define RDMA_BAR0_MSIX_SIZE (16 * KiB) +#define RDMA_BAR1_REGS_SIZE 64 +#define RDMA_BAR2_UAR_SIZE (0x1000 * MAX_UCS) /* each uc gets page */ + +/* MSIX */ +#define RDMA_MAX_INTRS 3 +#define RDMA_MSIX_TABLE 0x0000 +#define RDMA_MSIX_PBA 0x2000 + +/* Interrupts Vectors */ +#define INTR_VEC_CMD_RING 0 +#define INTR_VEC_CMD_ASYNC_EVENTS 1 +#define INTR_VEC_CMD_COMPLETION_Q 2 + +/* HW attributes */ +#define PVRDMA_HW_NAME "pvrdma" +#define PVRDMA_HW_VERSION 17 +#define PVRDMA_FW_VERSION 14 + +/* Some defaults */ +#define PVRDMA_PKEY 0xFFFF + +typedef struct DSRInfo { + dma_addr_t dma; + struct pvrdma_device_shared_region *dsr; + + union pvrdma_cmd_req *req; + union pvrdma_cmd_resp *rsp; + + PvrdmaRingState *async_ring_state; + PvrdmaRing async; + + PvrdmaRingState *cq_ring_state; + PvrdmaRing cq; +} DSRInfo; + +typedef struct PVRDMADevStats { + uint64_t commands; + uint64_t regs_reads; + uint64_t regs_writes; + uint64_t uar_writes; + uint64_t interrupts; +} PVRDMADevStats; + +struct PVRDMADev { + PCIDevice parent_obj; + MemoryRegion msix; + MemoryRegion regs; + uint32_t regs_data[RDMA_BAR1_REGS_SIZE]; + MemoryRegion uar; + uint32_t uar_data[RDMA_BAR2_UAR_SIZE]; + DSRInfo dsr_info; + int interrupt_mask; + struct ibv_device_attr dev_attr; + uint64_t node_guid; + char *backend_eth_device_name; + char *backend_device_name; + uint8_t backend_port_num; + RdmaBackendDev backend_dev; + RdmaDeviceResources rdma_dev_res; + CharBackend mad_chr; + VMXNET3State *func0; + Notifier shutdown_notifier; + PVRDMADevStats stats; +}; +typedef struct PVRDMADev PVRDMADev; +DECLARE_INSTANCE_CHECKER(PVRDMADev, PVRDMA_DEV, + PVRDMA_HW_NAME) + +static inline int get_reg_val(PVRDMADev *dev, hwaddr addr, uint32_t *val) +{ + int idx = addr >> 2; + + if (idx >= RDMA_BAR1_REGS_SIZE) { + return -EINVAL; + } + + *val = dev->regs_data[idx]; + + return 0; +} + +static inline int set_reg_val(PVRDMADev *dev, hwaddr addr, uint32_t val) +{ + int idx = addr >> 2; + + if (idx >= RDMA_BAR1_REGS_SIZE) { + return -EINVAL; + } + + dev->regs_data[idx] = val; + + return 0; +} + +static inline void post_interrupt(PVRDMADev *dev, unsigned vector) +{ + PCIDevice *pci_dev = PCI_DEVICE(dev); + + if (likely(!dev->interrupt_mask)) { + dev->stats.interrupts++; + msix_notify(pci_dev, vector); + } +} + +int pvrdma_exec_cmd(PVRDMADev *dev); + +#endif diff --git a/hw/rdma/vmw/pvrdma_cmd.c b/hw/rdma/vmw/pvrdma_cmd.c new file mode 100644 index 000000000..da7ddfa54 --- /dev/null +++ b/hw/rdma/vmw/pvrdma_cmd.c @@ -0,0 +1,825 @@ +/* + * QEMU paravirtual RDMA - Command channel + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia <yuval.shaia@oracle.com> + * Marcel Apfelbaum <marcel@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_ids.h" + +#include "../rdma_backend.h" +#include "../rdma_rm.h" +#include "../rdma_utils.h" + +#include "trace.h" +#include "pvrdma.h" +#include "standard-headers/rdma/vmw_pvrdma-abi.h" + +static void *pvrdma_map_to_pdir(PCIDevice *pdev, uint64_t pdir_dma, + uint32_t nchunks, size_t length) +{ + uint64_t *dir, *tbl; + int tbl_idx, dir_idx, addr_idx; + void *host_virt = NULL, *curr_page; + + if (!nchunks) { + rdma_error_report("Got nchunks=0"); + return NULL; + } + + length = ROUND_UP(length, TARGET_PAGE_SIZE); + if (nchunks * TARGET_PAGE_SIZE != length) { + rdma_error_report("Invalid nchunks/length (%u, %lu)", nchunks, + (unsigned long)length); + return NULL; + } + + dir = rdma_pci_dma_map(pdev, pdir_dma, TARGET_PAGE_SIZE); + if (!dir) { + rdma_error_report("Failed to map to page directory"); + return NULL; + } + + tbl = rdma_pci_dma_map(pdev, dir[0], TARGET_PAGE_SIZE); + if (!tbl) { + rdma_error_report("Failed to map to page table 0"); + goto out_unmap_dir; + } + + curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[0], TARGET_PAGE_SIZE); + if (!curr_page) { + rdma_error_report("Failed to map the page 0"); + goto out_unmap_tbl; + } + + host_virt = mremap(curr_page, 0, length, MREMAP_MAYMOVE); + if (host_virt == MAP_FAILED) { + host_virt = NULL; + rdma_error_report("Failed to remap memory for host_virt"); + goto out_unmap_tbl; + } + trace_pvrdma_map_to_pdir_host_virt(curr_page, host_virt); + + rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE); + + dir_idx = 0; + tbl_idx = 1; + addr_idx = 1; + while (addr_idx < nchunks) { + if (tbl_idx == TARGET_PAGE_SIZE / sizeof(uint64_t)) { + tbl_idx = 0; + dir_idx++; + rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE); + tbl = rdma_pci_dma_map(pdev, dir[dir_idx], TARGET_PAGE_SIZE); + if (!tbl) { + rdma_error_report("Failed to map to page table %d", dir_idx); + goto out_unmap_host_virt; + } + } + + curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[tbl_idx], + TARGET_PAGE_SIZE); + if (!curr_page) { + rdma_error_report("Failed to map to page %d, dir %d", tbl_idx, + dir_idx); + goto out_unmap_host_virt; + } + + mremap(curr_page, 0, TARGET_PAGE_SIZE, MREMAP_MAYMOVE | MREMAP_FIXED, + host_virt + TARGET_PAGE_SIZE * addr_idx); + + trace_pvrdma_map_to_pdir_next_page(addr_idx, curr_page, host_virt + + TARGET_PAGE_SIZE * addr_idx); + + rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE); + + addr_idx++; + + tbl_idx++; + } + + goto out_unmap_tbl; + +out_unmap_host_virt: + munmap(host_virt, length); + host_virt = NULL; + +out_unmap_tbl: + rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE); + +out_unmap_dir: + rdma_pci_dma_unmap(pdev, dir, TARGET_PAGE_SIZE); + + return host_virt; +} + +static int query_port(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_query_port *cmd = &req->query_port; + struct pvrdma_cmd_query_port_resp *resp = &rsp->query_port_resp; + struct pvrdma_port_attr attrs = {}; + + if (cmd->port_num > MAX_PORTS) { + return -EINVAL; + } + + if (rdma_backend_query_port(&dev->backend_dev, + (struct ibv_port_attr *)&attrs)) { + return -ENOMEM; + } + + memset(resp, 0, sizeof(*resp)); + + resp->attrs.state = dev->func0->device_active ? attrs.state : + PVRDMA_PORT_DOWN; + resp->attrs.max_mtu = attrs.max_mtu; + resp->attrs.active_mtu = attrs.active_mtu; + resp->attrs.phys_state = attrs.phys_state; + resp->attrs.gid_tbl_len = MIN(MAX_PORT_GIDS, attrs.gid_tbl_len); + resp->attrs.max_msg_sz = 1024; + resp->attrs.pkey_tbl_len = MIN(MAX_PORT_PKEYS, attrs.pkey_tbl_len); + resp->attrs.active_width = 1; + resp->attrs.active_speed = 1; + + return 0; +} + +static int query_pkey(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_query_pkey *cmd = &req->query_pkey; + struct pvrdma_cmd_query_pkey_resp *resp = &rsp->query_pkey_resp; + + if (cmd->port_num > MAX_PORTS) { + return -EINVAL; + } + + if (cmd->index > MAX_PKEYS) { + return -EINVAL; + } + + memset(resp, 0, sizeof(*resp)); + + resp->pkey = PVRDMA_PKEY; + + return 0; +} + +static int create_pd(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_create_pd *cmd = &req->create_pd; + struct pvrdma_cmd_create_pd_resp *resp = &rsp->create_pd_resp; + int rc; + + memset(resp, 0, sizeof(*resp)); + rc = rdma_rm_alloc_pd(&dev->rdma_dev_res, &dev->backend_dev, + &resp->pd_handle, cmd->ctx_handle); + + return rc; +} + +static int destroy_pd(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_destroy_pd *cmd = &req->destroy_pd; + + rdma_rm_dealloc_pd(&dev->rdma_dev_res, cmd->pd_handle); + + return 0; +} + +static int create_mr(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_create_mr *cmd = &req->create_mr; + struct pvrdma_cmd_create_mr_resp *resp = &rsp->create_mr_resp; + PCIDevice *pci_dev = PCI_DEVICE(dev); + void *host_virt = NULL; + int rc = 0; + + memset(resp, 0, sizeof(*resp)); + + if (!(cmd->flags & PVRDMA_MR_FLAG_DMA)) { + host_virt = pvrdma_map_to_pdir(pci_dev, cmd->pdir_dma, cmd->nchunks, + cmd->length); + if (!host_virt) { + rdma_error_report("Failed to map to pdir"); + return -EINVAL; + } + } + + rc = rdma_rm_alloc_mr(&dev->rdma_dev_res, cmd->pd_handle, cmd->start, + cmd->length, host_virt, cmd->access_flags, + &resp->mr_handle, &resp->lkey, &resp->rkey); + if (rc && host_virt) { + munmap(host_virt, cmd->length); + } + + return rc; +} + +static int destroy_mr(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_destroy_mr *cmd = &req->destroy_mr; + + rdma_rm_dealloc_mr(&dev->rdma_dev_res, cmd->mr_handle); + + return 0; +} + +static int create_cq_ring(PCIDevice *pci_dev , PvrdmaRing **ring, + uint64_t pdir_dma, uint32_t nchunks, uint32_t cqe) +{ + uint64_t *dir = NULL, *tbl = NULL; + PvrdmaRing *r; + int rc = -EINVAL; + char ring_name[MAX_RING_NAME_SZ]; + + if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) { + rdma_error_report("Got invalid nchunks: %d", nchunks); + return rc; + } + + dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE); + if (!dir) { + rdma_error_report("Failed to map to CQ page directory"); + goto out; + } + + tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE); + if (!tbl) { + rdma_error_report("Failed to map to CQ page table"); + goto out; + } + + r = g_malloc(sizeof(*r)); + *ring = r; + + r->ring_state = (PvrdmaRingState *) + rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE); + + if (!r->ring_state) { + rdma_error_report("Failed to map to CQ ring state"); + goto out_free_ring; + } + + sprintf(ring_name, "cq_ring_%" PRIx64, pdir_dma); + rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1], + cqe, sizeof(struct pvrdma_cqe), + /* first page is ring state */ + (dma_addr_t *)&tbl[1], nchunks - 1); + if (rc) { + goto out_unmap_ring_state; + } + + goto out; + +out_unmap_ring_state: + /* ring_state was in slot 1, not 0 so need to jump back */ + rdma_pci_dma_unmap(pci_dev, --r->ring_state, TARGET_PAGE_SIZE); + +out_free_ring: + g_free(r); + +out: + rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE); + rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE); + + return rc; +} + +static void destroy_cq_ring(PvrdmaRing *ring) +{ + pvrdma_ring_free(ring); + /* ring_state was in slot 1, not 0 so need to jump back */ + rdma_pci_dma_unmap(ring->dev, --ring->ring_state, TARGET_PAGE_SIZE); + g_free(ring); +} + +static int create_cq(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_create_cq *cmd = &req->create_cq; + struct pvrdma_cmd_create_cq_resp *resp = &rsp->create_cq_resp; + PvrdmaRing *ring = NULL; + int rc; + + memset(resp, 0, sizeof(*resp)); + + resp->cqe = cmd->cqe; + + rc = create_cq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma, cmd->nchunks, + cmd->cqe); + if (rc) { + return rc; + } + + rc = rdma_rm_alloc_cq(&dev->rdma_dev_res, &dev->backend_dev, cmd->cqe, + &resp->cq_handle, ring); + if (rc) { + destroy_cq_ring(ring); + } + + resp->cqe = cmd->cqe; + + return rc; +} + +static int destroy_cq(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_destroy_cq *cmd = &req->destroy_cq; + RdmaRmCQ *cq; + PvrdmaRing *ring; + + cq = rdma_rm_get_cq(&dev->rdma_dev_res, cmd->cq_handle); + if (!cq) { + rdma_error_report("Got invalid CQ handle"); + return -EINVAL; + } + + ring = (PvrdmaRing *)cq->opaque; + destroy_cq_ring(ring); + + rdma_rm_dealloc_cq(&dev->rdma_dev_res, cmd->cq_handle); + + return 0; +} + +static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma, + PvrdmaRing **rings, uint32_t scqe, uint32_t smax_sge, + uint32_t spages, uint32_t rcqe, uint32_t rmax_sge, + uint32_t rpages, uint8_t is_srq) +{ + uint64_t *dir = NULL, *tbl = NULL; + PvrdmaRing *sr, *rr; + int rc = -EINVAL; + char ring_name[MAX_RING_NAME_SZ]; + uint32_t wqe_sz; + + if (!spages || spages > PVRDMA_MAX_FAST_REG_PAGES) { + rdma_error_report("Got invalid send page count for QP ring: %d", + spages); + return rc; + } + + if (!is_srq && (!rpages || rpages > PVRDMA_MAX_FAST_REG_PAGES)) { + rdma_error_report("Got invalid recv page count for QP ring: %d", + rpages); + return rc; + } + + dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE); + if (!dir) { + rdma_error_report("Failed to map to QP page directory"); + goto out; + } + + tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE); + if (!tbl) { + rdma_error_report("Failed to map to QP page table"); + goto out; + } + + if (!is_srq) { + sr = g_malloc(2 * sizeof(*rr)); + rr = &sr[1]; + } else { + sr = g_malloc(sizeof(*sr)); + } + + *rings = sr; + + /* Create send ring */ + sr->ring_state = (PvrdmaRingState *) + rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE); + if (!sr->ring_state) { + rdma_error_report("Failed to map to QP ring state"); + goto out_free_sr_mem; + } + + wqe_sz = pow2ceil(sizeof(struct pvrdma_sq_wqe_hdr) + + sizeof(struct pvrdma_sge) * smax_sge - 1); + + sprintf(ring_name, "qp_sring_%" PRIx64, pdir_dma); + rc = pvrdma_ring_init(sr, ring_name, pci_dev, sr->ring_state, + scqe, wqe_sz, (dma_addr_t *)&tbl[1], spages); + if (rc) { + goto out_unmap_ring_state; + } + + if (!is_srq) { + /* Create recv ring */ + rr->ring_state = &sr->ring_state[1]; + wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) + + sizeof(struct pvrdma_sge) * rmax_sge - 1); + sprintf(ring_name, "qp_rring_%" PRIx64, pdir_dma); + rc = pvrdma_ring_init(rr, ring_name, pci_dev, rr->ring_state, + rcqe, wqe_sz, (dma_addr_t *)&tbl[1 + spages], + rpages); + if (rc) { + goto out_free_sr; + } + } + + goto out; + +out_free_sr: + pvrdma_ring_free(sr); + +out_unmap_ring_state: + rdma_pci_dma_unmap(pci_dev, sr->ring_state, TARGET_PAGE_SIZE); + +out_free_sr_mem: + g_free(sr); + +out: + rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE); + rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE); + + return rc; +} + +static void destroy_qp_rings(PvrdmaRing *ring, uint8_t is_srq) +{ + pvrdma_ring_free(&ring[0]); + if (!is_srq) { + pvrdma_ring_free(&ring[1]); + } + + rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE); + g_free(ring); +} + +static int create_qp(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_create_qp *cmd = &req->create_qp; + struct pvrdma_cmd_create_qp_resp *resp = &rsp->create_qp_resp; + PvrdmaRing *rings = NULL; + int rc; + + memset(resp, 0, sizeof(*resp)); + + rc = create_qp_rings(PCI_DEVICE(dev), cmd->pdir_dma, &rings, + cmd->max_send_wr, cmd->max_send_sge, cmd->send_chunks, + cmd->max_recv_wr, cmd->max_recv_sge, + cmd->total_chunks - cmd->send_chunks - 1, cmd->is_srq); + if (rc) { + return rc; + } + + rc = rdma_rm_alloc_qp(&dev->rdma_dev_res, cmd->pd_handle, cmd->qp_type, + cmd->max_send_wr, cmd->max_send_sge, + cmd->send_cq_handle, cmd->max_recv_wr, + cmd->max_recv_sge, cmd->recv_cq_handle, rings, + &resp->qpn, cmd->is_srq, cmd->srq_handle); + if (rc) { + destroy_qp_rings(rings, cmd->is_srq); + return rc; + } + + resp->max_send_wr = cmd->max_send_wr; + resp->max_recv_wr = cmd->max_recv_wr; + resp->max_send_sge = cmd->max_send_sge; + resp->max_recv_sge = cmd->max_recv_sge; + resp->max_inline_data = cmd->max_inline_data; + + return 0; +} + +static int modify_qp(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_modify_qp *cmd = &req->modify_qp; + int rc; + + /* No need to verify sgid_index since it is u8 */ + + rc = rdma_rm_modify_qp(&dev->rdma_dev_res, &dev->backend_dev, + cmd->qp_handle, cmd->attr_mask, + cmd->attrs.ah_attr.grh.sgid_index, + (union ibv_gid *)&cmd->attrs.ah_attr.grh.dgid, + cmd->attrs.dest_qp_num, + (enum ibv_qp_state)cmd->attrs.qp_state, + cmd->attrs.qkey, cmd->attrs.rq_psn, + cmd->attrs.sq_psn); + + return rc; +} + +static int query_qp(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_query_qp *cmd = &req->query_qp; + struct pvrdma_cmd_query_qp_resp *resp = &rsp->query_qp_resp; + struct ibv_qp_init_attr init_attr; + int rc; + + memset(resp, 0, sizeof(*resp)); + + rc = rdma_rm_query_qp(&dev->rdma_dev_res, &dev->backend_dev, cmd->qp_handle, + (struct ibv_qp_attr *)&resp->attrs, cmd->attr_mask, + &init_attr); + + return rc; +} + +static int destroy_qp(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_destroy_qp *cmd = &req->destroy_qp; + RdmaRmQP *qp; + PvrdmaRing *ring; + + qp = rdma_rm_get_qp(&dev->rdma_dev_res, cmd->qp_handle); + if (!qp) { + return -EINVAL; + } + + ring = (PvrdmaRing *)qp->opaque; + destroy_qp_rings(ring, qp->is_srq); + rdma_rm_dealloc_qp(&dev->rdma_dev_res, cmd->qp_handle); + + return 0; +} + +static int create_bind(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_create_bind *cmd = &req->create_bind; + int rc; + union ibv_gid *gid = (union ibv_gid *)&cmd->new_gid; + + if (cmd->index >= MAX_PORT_GIDS) { + return -EINVAL; + } + + rc = rdma_rm_add_gid(&dev->rdma_dev_res, &dev->backend_dev, + dev->backend_eth_device_name, gid, cmd->index); + + return rc; +} + +static int destroy_bind(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + int rc; + + struct pvrdma_cmd_destroy_bind *cmd = &req->destroy_bind; + + if (cmd->index >= MAX_PORT_GIDS) { + return -EINVAL; + } + + rc = rdma_rm_del_gid(&dev->rdma_dev_res, &dev->backend_dev, + dev->backend_eth_device_name, cmd->index); + + return rc; +} + +static int create_uc(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_create_uc *cmd = &req->create_uc; + struct pvrdma_cmd_create_uc_resp *resp = &rsp->create_uc_resp; + int rc; + + memset(resp, 0, sizeof(*resp)); + rc = rdma_rm_alloc_uc(&dev->rdma_dev_res, cmd->pfn, &resp->ctx_handle); + + return rc; +} + +static int destroy_uc(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_destroy_uc *cmd = &req->destroy_uc; + + rdma_rm_dealloc_uc(&dev->rdma_dev_res, cmd->ctx_handle); + + return 0; +} + +static int create_srq_ring(PCIDevice *pci_dev, PvrdmaRing **ring, + uint64_t pdir_dma, uint32_t max_wr, + uint32_t max_sge, uint32_t nchunks) +{ + uint64_t *dir = NULL, *tbl = NULL; + PvrdmaRing *r; + int rc = -EINVAL; + char ring_name[MAX_RING_NAME_SZ]; + uint32_t wqe_sz; + + if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) { + rdma_error_report("Got invalid page count for SRQ ring: %d", + nchunks); + return rc; + } + + dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE); + if (!dir) { + rdma_error_report("Failed to map to SRQ page directory"); + goto out; + } + + tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE); + if (!tbl) { + rdma_error_report("Failed to map to SRQ page table"); + goto out; + } + + r = g_malloc(sizeof(*r)); + *ring = r; + + r->ring_state = (PvrdmaRingState *) + rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE); + if (!r->ring_state) { + rdma_error_report("Failed to map tp SRQ ring state"); + goto out_free_ring_mem; + } + + wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) + + sizeof(struct pvrdma_sge) * max_sge - 1); + sprintf(ring_name, "srq_ring_%" PRIx64, pdir_dma); + rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1], max_wr, + wqe_sz, (dma_addr_t *)&tbl[1], nchunks - 1); + if (rc) { + goto out_unmap_ring_state; + } + + goto out; + +out_unmap_ring_state: + rdma_pci_dma_unmap(pci_dev, r->ring_state, TARGET_PAGE_SIZE); + +out_free_ring_mem: + g_free(r); + +out: + rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE); + rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE); + + return rc; +} + +static void destroy_srq_ring(PvrdmaRing *ring) +{ + pvrdma_ring_free(ring); + rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE); + g_free(ring); +} + +static int create_srq(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_create_srq *cmd = &req->create_srq; + struct pvrdma_cmd_create_srq_resp *resp = &rsp->create_srq_resp; + PvrdmaRing *ring = NULL; + int rc; + + memset(resp, 0, sizeof(*resp)); + + rc = create_srq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma, + cmd->attrs.max_wr, cmd->attrs.max_sge, + cmd->nchunks); + if (rc) { + return rc; + } + + rc = rdma_rm_alloc_srq(&dev->rdma_dev_res, cmd->pd_handle, + cmd->attrs.max_wr, cmd->attrs.max_sge, + cmd->attrs.srq_limit, &resp->srqn, ring); + if (rc) { + destroy_srq_ring(ring); + return rc; + } + + return 0; +} + +static int query_srq(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_query_srq *cmd = &req->query_srq; + struct pvrdma_cmd_query_srq_resp *resp = &rsp->query_srq_resp; + + memset(resp, 0, sizeof(*resp)); + + return rdma_rm_query_srq(&dev->rdma_dev_res, cmd->srq_handle, + (struct ibv_srq_attr *)&resp->attrs); +} + +static int modify_srq(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_modify_srq *cmd = &req->modify_srq; + + /* Only support SRQ limit */ + if (!(cmd->attr_mask & IBV_SRQ_LIMIT) || + (cmd->attr_mask & IBV_SRQ_MAX_WR)) + return -EINVAL; + + return rdma_rm_modify_srq(&dev->rdma_dev_res, cmd->srq_handle, + (struct ibv_srq_attr *)&cmd->attrs, + cmd->attr_mask); +} + +static int destroy_srq(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_destroy_srq *cmd = &req->destroy_srq; + RdmaRmSRQ *srq; + PvrdmaRing *ring; + + srq = rdma_rm_get_srq(&dev->rdma_dev_res, cmd->srq_handle); + if (!srq) { + return -EINVAL; + } + + ring = (PvrdmaRing *)srq->opaque; + destroy_srq_ring(ring); + rdma_rm_dealloc_srq(&dev->rdma_dev_res, cmd->srq_handle); + + return 0; +} + +struct cmd_handler { + uint32_t cmd; + uint32_t ack; + int (*exec)(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp); +}; + +static struct cmd_handler cmd_handlers[] = { + {PVRDMA_CMD_QUERY_PORT, PVRDMA_CMD_QUERY_PORT_RESP, query_port}, + {PVRDMA_CMD_QUERY_PKEY, PVRDMA_CMD_QUERY_PKEY_RESP, query_pkey}, + {PVRDMA_CMD_CREATE_PD, PVRDMA_CMD_CREATE_PD_RESP, create_pd}, + {PVRDMA_CMD_DESTROY_PD, PVRDMA_CMD_DESTROY_PD_RESP_NOOP, destroy_pd}, + {PVRDMA_CMD_CREATE_MR, PVRDMA_CMD_CREATE_MR_RESP, create_mr}, + {PVRDMA_CMD_DESTROY_MR, PVRDMA_CMD_DESTROY_MR_RESP_NOOP, destroy_mr}, + {PVRDMA_CMD_CREATE_CQ, PVRDMA_CMD_CREATE_CQ_RESP, create_cq}, + {PVRDMA_CMD_RESIZE_CQ, PVRDMA_CMD_RESIZE_CQ_RESP, NULL}, + {PVRDMA_CMD_DESTROY_CQ, PVRDMA_CMD_DESTROY_CQ_RESP_NOOP, destroy_cq}, + {PVRDMA_CMD_CREATE_QP, PVRDMA_CMD_CREATE_QP_RESP, create_qp}, + {PVRDMA_CMD_MODIFY_QP, PVRDMA_CMD_MODIFY_QP_RESP, modify_qp}, + {PVRDMA_CMD_QUERY_QP, PVRDMA_CMD_QUERY_QP_RESP, query_qp}, + {PVRDMA_CMD_DESTROY_QP, PVRDMA_CMD_DESTROY_QP_RESP, destroy_qp}, + {PVRDMA_CMD_CREATE_UC, PVRDMA_CMD_CREATE_UC_RESP, create_uc}, + {PVRDMA_CMD_DESTROY_UC, PVRDMA_CMD_DESTROY_UC_RESP_NOOP, destroy_uc}, + {PVRDMA_CMD_CREATE_BIND, PVRDMA_CMD_CREATE_BIND_RESP_NOOP, create_bind}, + {PVRDMA_CMD_DESTROY_BIND, PVRDMA_CMD_DESTROY_BIND_RESP_NOOP, destroy_bind}, + {PVRDMA_CMD_CREATE_SRQ, PVRDMA_CMD_CREATE_SRQ_RESP, create_srq}, + {PVRDMA_CMD_QUERY_SRQ, PVRDMA_CMD_QUERY_SRQ_RESP, query_srq}, + {PVRDMA_CMD_MODIFY_SRQ, PVRDMA_CMD_MODIFY_SRQ_RESP, modify_srq}, + {PVRDMA_CMD_DESTROY_SRQ, PVRDMA_CMD_DESTROY_SRQ_RESP, destroy_srq}, +}; + +int pvrdma_exec_cmd(PVRDMADev *dev) +{ + int err = 0xFFFF; + DSRInfo *dsr_info; + + dsr_info = &dev->dsr_info; + + if (dsr_info->req->hdr.cmd >= sizeof(cmd_handlers) / + sizeof(struct cmd_handler)) { + rdma_error_report("Unsupported command"); + goto out; + } + + if (!cmd_handlers[dsr_info->req->hdr.cmd].exec) { + rdma_error_report("Unsupported command (not implemented yet)"); + goto out; + } + + err = cmd_handlers[dsr_info->req->hdr.cmd].exec(dev, dsr_info->req, + dsr_info->rsp); + dsr_info->rsp->hdr.response = dsr_info->req->hdr.response; + dsr_info->rsp->hdr.ack = cmd_handlers[dsr_info->req->hdr.cmd].ack; + dsr_info->rsp->hdr.err = err < 0 ? -err : 0; + + trace_pvrdma_exec_cmd(dsr_info->req->hdr.cmd, dsr_info->rsp->hdr.err); + + dev->stats.commands++; + +out: + set_reg_val(dev, PVRDMA_REG_ERR, err); + post_interrupt(dev, INTR_VEC_CMD_RING); + + return (err == 0) ? 0 : -EINVAL; +} diff --git a/hw/rdma/vmw/pvrdma_dev_ring.c b/hw/rdma/vmw/pvrdma_dev_ring.c new file mode 100644 index 000000000..42130667a --- /dev/null +++ b/hw/rdma/vmw/pvrdma_dev_ring.c @@ -0,0 +1,142 @@ +/* + * QEMU paravirtual RDMA - Device rings + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia <yuval.shaia@oracle.com> + * Marcel Apfelbaum <marcel@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "hw/pci/pci.h" +#include "cpu.h" +#include "qemu/cutils.h" + +#include "trace.h" + +#include "../rdma_utils.h" +#include "pvrdma_dev_ring.h" + +int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev, + PvrdmaRingState *ring_state, uint32_t max_elems, + size_t elem_sz, dma_addr_t *tbl, uint32_t npages) +{ + int i; + int rc = 0; + + pstrcpy(ring->name, MAX_RING_NAME_SZ, name); + ring->dev = dev; + ring->ring_state = ring_state; + ring->max_elems = max_elems; + ring->elem_sz = elem_sz; + /* TODO: Give a moment to think if we want to redo driver settings + qatomic_set(&ring->ring_state->prod_tail, 0); + qatomic_set(&ring->ring_state->cons_head, 0); + */ + ring->npages = npages; + ring->pages = g_malloc0(npages * sizeof(void *)); + + for (i = 0; i < npages; i++) { + if (!tbl[i]) { + rdma_error_report("npages=%d but tbl[%d] is NULL", npages, i); + continue; + } + + ring->pages[i] = rdma_pci_dma_map(dev, tbl[i], TARGET_PAGE_SIZE); + if (!ring->pages[i]) { + rc = -ENOMEM; + rdma_error_report("Failed to map to page %d in ring %s", i, name); + goto out_free; + } + memset(ring->pages[i], 0, TARGET_PAGE_SIZE); + } + + goto out; + +out_free: + while (i--) { + rdma_pci_dma_unmap(dev, ring->pages[i], TARGET_PAGE_SIZE); + } + g_free(ring->pages); + +out: + return rc; +} + +void *pvrdma_ring_next_elem_read(PvrdmaRing *ring) +{ + unsigned int idx, offset; + const uint32_t tail = qatomic_read(&ring->ring_state->prod_tail); + const uint32_t head = qatomic_read(&ring->ring_state->cons_head); + + if (tail & ~((ring->max_elems << 1) - 1) || + head & ~((ring->max_elems << 1) - 1) || + tail == head) { + trace_pvrdma_ring_next_elem_read_no_data(ring->name); + return NULL; + } + + idx = head & (ring->max_elems - 1); + offset = idx * ring->elem_sz; + return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE); +} + +void pvrdma_ring_read_inc(PvrdmaRing *ring) +{ + uint32_t idx = qatomic_read(&ring->ring_state->cons_head); + + idx = (idx + 1) & ((ring->max_elems << 1) - 1); + qatomic_set(&ring->ring_state->cons_head, idx); +} + +void *pvrdma_ring_next_elem_write(PvrdmaRing *ring) +{ + unsigned int idx, offset; + const uint32_t tail = qatomic_read(&ring->ring_state->prod_tail); + const uint32_t head = qatomic_read(&ring->ring_state->cons_head); + + if (tail & ~((ring->max_elems << 1) - 1) || + head & ~((ring->max_elems << 1) - 1) || + tail == (head ^ ring->max_elems)) { + rdma_error_report("CQ is full"); + return NULL; + } + + idx = tail & (ring->max_elems - 1); + offset = idx * ring->elem_sz; + return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE); +} + +void pvrdma_ring_write_inc(PvrdmaRing *ring) +{ + uint32_t idx = qatomic_read(&ring->ring_state->prod_tail); + + idx = (idx + 1) & ((ring->max_elems << 1) - 1); + qatomic_set(&ring->ring_state->prod_tail, idx); +} + +void pvrdma_ring_free(PvrdmaRing *ring) +{ + if (!ring) { + return; + } + + if (!ring->pages) { + return; + } + + while (ring->npages--) { + rdma_pci_dma_unmap(ring->dev, ring->pages[ring->npages], + TARGET_PAGE_SIZE); + } + + g_free(ring->pages); + ring->pages = NULL; +} diff --git a/hw/rdma/vmw/pvrdma_dev_ring.h b/hw/rdma/vmw/pvrdma_dev_ring.h new file mode 100644 index 000000000..d231588ce --- /dev/null +++ b/hw/rdma/vmw/pvrdma_dev_ring.h @@ -0,0 +1,46 @@ +/* + * QEMU VMWARE paravirtual RDMA ring utilities + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia <yuval.shaia@oracle.com> + * Marcel Apfelbaum <marcel@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef PVRDMA_DEV_RING_H +#define PVRDMA_DEV_RING_H + + +#define MAX_RING_NAME_SZ 32 + +typedef struct PvrdmaRingState { + int prod_tail; /* producer tail */ + int cons_head; /* consumer head */ +} PvrdmaRingState; + +typedef struct PvrdmaRing { + char name[MAX_RING_NAME_SZ]; + PCIDevice *dev; + uint32_t max_elems; + size_t elem_sz; + PvrdmaRingState *ring_state; /* used only for unmap */ + int npages; + void **pages; +} PvrdmaRing; + +int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev, + PvrdmaRingState *ring_state, uint32_t max_elems, + size_t elem_sz, dma_addr_t *tbl, uint32_t npages); +void *pvrdma_ring_next_elem_read(PvrdmaRing *ring); +void pvrdma_ring_read_inc(PvrdmaRing *ring); +void *pvrdma_ring_next_elem_write(PvrdmaRing *ring); +void pvrdma_ring_write_inc(PvrdmaRing *ring); +void pvrdma_ring_free(PvrdmaRing *ring); + +#endif diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c new file mode 100644 index 000000000..91206dbb8 --- /dev/null +++ b/hw/rdma/vmw/pvrdma_main.c @@ -0,0 +1,723 @@ +/* + * QEMU paravirtual RDMA + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia <yuval.shaia@oracle.com> + * Marcel Apfelbaum <marcel@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_ids.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "cpu.h" +#include "trace.h" +#include "monitor/monitor.h" +#include "hw/rdma/rdma.h" + +#include "../rdma_rm.h" +#include "../rdma_backend.h" +#include "../rdma_utils.h" + +#include <infiniband/verbs.h> +#include "pvrdma.h" +#include "standard-headers/rdma/vmw_pvrdma-abi.h" +#include "sysemu/runstate.h" +#include "standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h" +#include "pvrdma_qp_ops.h" + +static Property pvrdma_dev_properties[] = { + DEFINE_PROP_STRING("netdev", PVRDMADev, backend_eth_device_name), + DEFINE_PROP_STRING("ibdev", PVRDMADev, backend_device_name), + DEFINE_PROP_UINT8("ibport", PVRDMADev, backend_port_num, 1), + DEFINE_PROP_UINT64("dev-caps-max-mr-size", PVRDMADev, dev_attr.max_mr_size, + MAX_MR_SIZE), + DEFINE_PROP_INT32("dev-caps-max-qp", PVRDMADev, dev_attr.max_qp, MAX_QP), + DEFINE_PROP_INT32("dev-caps-max-cq", PVRDMADev, dev_attr.max_cq, MAX_CQ), + DEFINE_PROP_INT32("dev-caps-max-mr", PVRDMADev, dev_attr.max_mr, MAX_MR), + DEFINE_PROP_INT32("dev-caps-max-pd", PVRDMADev, dev_attr.max_pd, MAX_PD), + DEFINE_PROP_INT32("dev-caps-qp-rd-atom", PVRDMADev, dev_attr.max_qp_rd_atom, + MAX_QP_RD_ATOM), + DEFINE_PROP_INT32("dev-caps-max-qp-init-rd-atom", PVRDMADev, + dev_attr.max_qp_init_rd_atom, MAX_QP_INIT_RD_ATOM), + DEFINE_PROP_INT32("dev-caps-max-ah", PVRDMADev, dev_attr.max_ah, MAX_AH), + DEFINE_PROP_INT32("dev-caps-max-srq", PVRDMADev, dev_attr.max_srq, MAX_SRQ), + DEFINE_PROP_CHR("mad-chardev", PVRDMADev, mad_chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pvrdma_format_statistics(RdmaProvider *obj, GString *buf) +{ + PVRDMADev *dev = PVRDMA_DEV(obj); + PCIDevice *pdev = PCI_DEVICE(dev); + + g_string_append_printf(buf, "%s, %x.%x\n", + pdev->name, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + g_string_append_printf(buf, "\tcommands : %" PRId64 "\n", + dev->stats.commands); + g_string_append_printf(buf, "\tregs_reads : %" PRId64 "\n", + dev->stats.regs_reads); + g_string_append_printf(buf, "\tregs_writes : %" PRId64 "\n", + dev->stats.regs_writes); + g_string_append_printf(buf, "\tuar_writes : %" PRId64 "\n", + dev->stats.uar_writes); + g_string_append_printf(buf, "\tinterrupts : %" PRId64 "\n", + dev->stats.interrupts); + rdma_format_device_counters(&dev->rdma_dev_res, buf); +} + +static void free_dev_ring(PCIDevice *pci_dev, PvrdmaRing *ring, + void *ring_state) +{ + pvrdma_ring_free(ring); + rdma_pci_dma_unmap(pci_dev, ring_state, TARGET_PAGE_SIZE); +} + +static int init_dev_ring(PvrdmaRing *ring, PvrdmaRingState **ring_state, + const char *name, PCIDevice *pci_dev, + dma_addr_t dir_addr, uint32_t num_pages) +{ + uint64_t *dir, *tbl; + int rc = 0; + + if (!num_pages) { + rdma_error_report("Ring pages count must be strictly positive"); + return -EINVAL; + } + + dir = rdma_pci_dma_map(pci_dev, dir_addr, TARGET_PAGE_SIZE); + if (!dir) { + rdma_error_report("Failed to map to page directory (ring %s)", name); + rc = -ENOMEM; + goto out; + } + tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE); + if (!tbl) { + rdma_error_report("Failed to map to page table (ring %s)", name); + rc = -ENOMEM; + goto out_free_dir; + } + + *ring_state = rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE); + if (!*ring_state) { + rdma_error_report("Failed to map to ring state (ring %s)", name); + rc = -ENOMEM; + goto out_free_tbl; + } + /* RX ring is the second */ + (*ring_state)++; + rc = pvrdma_ring_init(ring, name, pci_dev, + (PvrdmaRingState *)*ring_state, + (num_pages - 1) * TARGET_PAGE_SIZE / + sizeof(struct pvrdma_cqne), + sizeof(struct pvrdma_cqne), + (dma_addr_t *)&tbl[1], (dma_addr_t)num_pages - 1); + if (rc) { + rc = -ENOMEM; + goto out_free_ring_state; + } + + goto out_free_tbl; + +out_free_ring_state: + rdma_pci_dma_unmap(pci_dev, *ring_state, TARGET_PAGE_SIZE); + +out_free_tbl: + rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE); + +out_free_dir: + rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE); + +out: + return rc; +} + +static void free_dsr(PVRDMADev *dev) +{ + PCIDevice *pci_dev = PCI_DEVICE(dev); + + if (!dev->dsr_info.dsr) { + return; + } + + free_dev_ring(pci_dev, &dev->dsr_info.async, + dev->dsr_info.async_ring_state); + + free_dev_ring(pci_dev, &dev->dsr_info.cq, dev->dsr_info.cq_ring_state); + + rdma_pci_dma_unmap(pci_dev, dev->dsr_info.req, + sizeof(union pvrdma_cmd_req)); + + rdma_pci_dma_unmap(pci_dev, dev->dsr_info.rsp, + sizeof(union pvrdma_cmd_resp)); + + rdma_pci_dma_unmap(pci_dev, dev->dsr_info.dsr, + sizeof(struct pvrdma_device_shared_region)); + + dev->dsr_info.dsr = NULL; +} + +static int load_dsr(PVRDMADev *dev) +{ + int rc = 0; + PCIDevice *pci_dev = PCI_DEVICE(dev); + DSRInfo *dsr_info; + struct pvrdma_device_shared_region *dsr; + + free_dsr(dev); + + /* Map to DSR */ + dev->dsr_info.dsr = rdma_pci_dma_map(pci_dev, dev->dsr_info.dma, + sizeof(struct pvrdma_device_shared_region)); + if (!dev->dsr_info.dsr) { + rdma_error_report("Failed to map to DSR"); + rc = -ENOMEM; + goto out; + } + + /* Shortcuts */ + dsr_info = &dev->dsr_info; + dsr = dsr_info->dsr; + + /* Map to command slot */ + dsr_info->req = rdma_pci_dma_map(pci_dev, dsr->cmd_slot_dma, + sizeof(union pvrdma_cmd_req)); + if (!dsr_info->req) { + rdma_error_report("Failed to map to command slot address"); + rc = -ENOMEM; + goto out_free_dsr; + } + + /* Map to response slot */ + dsr_info->rsp = rdma_pci_dma_map(pci_dev, dsr->resp_slot_dma, + sizeof(union pvrdma_cmd_resp)); + if (!dsr_info->rsp) { + rdma_error_report("Failed to map to response slot address"); + rc = -ENOMEM; + goto out_free_req; + } + + /* Map to CQ notification ring */ + rc = init_dev_ring(&dsr_info->cq, &dsr_info->cq_ring_state, "dev_cq", + pci_dev, dsr->cq_ring_pages.pdir_dma, + dsr->cq_ring_pages.num_pages); + if (rc) { + rc = -ENOMEM; + goto out_free_rsp; + } + + /* Map to event notification ring */ + rc = init_dev_ring(&dsr_info->async, &dsr_info->async_ring_state, + "dev_async", pci_dev, dsr->async_ring_pages.pdir_dma, + dsr->async_ring_pages.num_pages); + if (rc) { + rc = -ENOMEM; + goto out_free_rsp; + } + + goto out; + +out_free_rsp: + rdma_pci_dma_unmap(pci_dev, dsr_info->rsp, sizeof(union pvrdma_cmd_resp)); + +out_free_req: + rdma_pci_dma_unmap(pci_dev, dsr_info->req, sizeof(union pvrdma_cmd_req)); + +out_free_dsr: + rdma_pci_dma_unmap(pci_dev, dsr_info->dsr, + sizeof(struct pvrdma_device_shared_region)); + dsr_info->dsr = NULL; + +out: + return rc; +} + +static void init_dsr_dev_caps(PVRDMADev *dev) +{ + struct pvrdma_device_shared_region *dsr; + + if (dev->dsr_info.dsr == NULL) { + rdma_error_report("Can't initialized DSR"); + return; + } + + dsr = dev->dsr_info.dsr; + dsr->caps.fw_ver = PVRDMA_FW_VERSION; + dsr->caps.mode = PVRDMA_DEVICE_MODE_ROCE; + dsr->caps.gid_types |= PVRDMA_GID_TYPE_FLAG_ROCE_V1; + dsr->caps.max_uar = RDMA_BAR2_UAR_SIZE; + dsr->caps.max_mr_size = dev->dev_attr.max_mr_size; + dsr->caps.max_qp = dev->dev_attr.max_qp; + dsr->caps.max_qp_wr = dev->dev_attr.max_qp_wr; + dsr->caps.max_sge = dev->dev_attr.max_sge; + dsr->caps.max_cq = dev->dev_attr.max_cq; + dsr->caps.max_cqe = dev->dev_attr.max_cqe; + dsr->caps.max_mr = dev->dev_attr.max_mr; + dsr->caps.max_pd = dev->dev_attr.max_pd; + dsr->caps.max_ah = dev->dev_attr.max_ah; + dsr->caps.max_srq = dev->dev_attr.max_srq; + dsr->caps.max_srq_wr = dev->dev_attr.max_srq_wr; + dsr->caps.max_srq_sge = dev->dev_attr.max_srq_sge; + dsr->caps.gid_tbl_len = MAX_GIDS; + dsr->caps.sys_image_guid = 0; + dsr->caps.node_guid = dev->node_guid; + dsr->caps.phys_port_cnt = MAX_PORTS; + dsr->caps.max_pkeys = MAX_PKEYS; +} + +static void uninit_msix(PCIDevice *pdev, int used_vectors) +{ + PVRDMADev *dev = PVRDMA_DEV(pdev); + int i; + + for (i = 0; i < used_vectors; i++) { + msix_vector_unuse(pdev, i); + } + + msix_uninit(pdev, &dev->msix, &dev->msix); +} + +static int init_msix(PCIDevice *pdev) +{ + PVRDMADev *dev = PVRDMA_DEV(pdev); + int i; + int rc; + + rc = msix_init(pdev, RDMA_MAX_INTRS, &dev->msix, RDMA_MSIX_BAR_IDX, + RDMA_MSIX_TABLE, &dev->msix, RDMA_MSIX_BAR_IDX, + RDMA_MSIX_PBA, 0, NULL); + + if (rc < 0) { + rdma_error_report("Failed to initialize MSI-X"); + return rc; + } + + for (i = 0; i < RDMA_MAX_INTRS; i++) { + rc = msix_vector_use(PCI_DEVICE(dev), i); + if (rc < 0) { + rdma_error_report("Fail mark MSI-X vector %d", i); + uninit_msix(pdev, i); + return rc; + } + } + + return 0; +} + +static void pvrdma_fini(PCIDevice *pdev) +{ + PVRDMADev *dev = PVRDMA_DEV(pdev); + + notifier_remove(&dev->shutdown_notifier); + + pvrdma_qp_ops_fini(); + + rdma_backend_stop(&dev->backend_dev); + + rdma_rm_fini(&dev->rdma_dev_res, &dev->backend_dev, + dev->backend_eth_device_name); + + rdma_backend_fini(&dev->backend_dev); + + free_dsr(dev); + + if (msix_enabled(pdev)) { + uninit_msix(pdev, RDMA_MAX_INTRS); + } + + rdma_info_report("Device %s %x.%x is down", pdev->name, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); +} + +static void pvrdma_stop(PVRDMADev *dev) +{ + rdma_backend_stop(&dev->backend_dev); +} + +static void pvrdma_start(PVRDMADev *dev) +{ + rdma_backend_start(&dev->backend_dev); +} + +static void activate_device(PVRDMADev *dev) +{ + pvrdma_start(dev); + set_reg_val(dev, PVRDMA_REG_ERR, 0); +} + +static int unquiesce_device(PVRDMADev *dev) +{ + return 0; +} + +static void reset_device(PVRDMADev *dev) +{ + pvrdma_stop(dev); +} + +static uint64_t pvrdma_regs_read(void *opaque, hwaddr addr, unsigned size) +{ + PVRDMADev *dev = opaque; + uint32_t val; + + dev->stats.regs_reads++; + + if (get_reg_val(dev, addr, &val)) { + rdma_error_report("Failed to read REG value from address 0x%x", + (uint32_t)addr); + return -EINVAL; + } + + trace_pvrdma_regs_read(addr, val); + + return val; +} + +static void pvrdma_regs_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + PVRDMADev *dev = opaque; + + dev->stats.regs_writes++; + + if (set_reg_val(dev, addr, val)) { + rdma_error_report("Failed to set REG value, addr=0x%"PRIx64 ", val=0x%"PRIx64, + addr, val); + return; + } + + switch (addr) { + case PVRDMA_REG_DSRLOW: + trace_pvrdma_regs_write(addr, val, "DSRLOW", ""); + dev->dsr_info.dma = val; + break; + case PVRDMA_REG_DSRHIGH: + trace_pvrdma_regs_write(addr, val, "DSRHIGH", ""); + dev->dsr_info.dma |= val << 32; + load_dsr(dev); + init_dsr_dev_caps(dev); + break; + case PVRDMA_REG_CTL: + switch (val) { + case PVRDMA_DEVICE_CTL_ACTIVATE: + trace_pvrdma_regs_write(addr, val, "CTL", "ACTIVATE"); + activate_device(dev); + break; + case PVRDMA_DEVICE_CTL_UNQUIESCE: + trace_pvrdma_regs_write(addr, val, "CTL", "UNQUIESCE"); + unquiesce_device(dev); + break; + case PVRDMA_DEVICE_CTL_RESET: + trace_pvrdma_regs_write(addr, val, "CTL", "URESET"); + reset_device(dev); + break; + } + break; + case PVRDMA_REG_IMR: + trace_pvrdma_regs_write(addr, val, "INTR_MASK", ""); + dev->interrupt_mask = val; + break; + case PVRDMA_REG_REQUEST: + if (val == 0) { + trace_pvrdma_regs_write(addr, val, "REQUEST", ""); + pvrdma_exec_cmd(dev); + } + break; + default: + break; + } +} + +static const MemoryRegionOps regs_ops = { + .read = pvrdma_regs_read, + .write = pvrdma_regs_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = sizeof(uint32_t), + .max_access_size = sizeof(uint32_t), + }, +}; + +static uint64_t pvrdma_uar_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0xffffffff; +} + +static void pvrdma_uar_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + PVRDMADev *dev = opaque; + + dev->stats.uar_writes++; + + switch (addr & 0xFFF) { /* Mask with 0xFFF as each UC gets page */ + case PVRDMA_UAR_QP_OFFSET: + if (val & PVRDMA_UAR_QP_SEND) { + trace_pvrdma_uar_write(addr, val, "QP", "SEND", + val & PVRDMA_UAR_HANDLE_MASK, 0); + pvrdma_qp_send(dev, val & PVRDMA_UAR_HANDLE_MASK); + } + if (val & PVRDMA_UAR_QP_RECV) { + trace_pvrdma_uar_write(addr, val, "QP", "RECV", + val & PVRDMA_UAR_HANDLE_MASK, 0); + pvrdma_qp_recv(dev, val & PVRDMA_UAR_HANDLE_MASK); + } + break; + case PVRDMA_UAR_CQ_OFFSET: + if (val & PVRDMA_UAR_CQ_ARM) { + trace_pvrdma_uar_write(addr, val, "CQ", "ARM", + val & PVRDMA_UAR_HANDLE_MASK, + !!(val & PVRDMA_UAR_CQ_ARM_SOL)); + rdma_rm_req_notify_cq(&dev->rdma_dev_res, + val & PVRDMA_UAR_HANDLE_MASK, + !!(val & PVRDMA_UAR_CQ_ARM_SOL)); + } + if (val & PVRDMA_UAR_CQ_ARM_SOL) { + trace_pvrdma_uar_write(addr, val, "CQ", "ARMSOL - not supported", 0, + 0); + } + if (val & PVRDMA_UAR_CQ_POLL) { + trace_pvrdma_uar_write(addr, val, "CQ", "POLL", + val & PVRDMA_UAR_HANDLE_MASK, 0); + pvrdma_cq_poll(&dev->rdma_dev_res, val & PVRDMA_UAR_HANDLE_MASK); + } + break; + case PVRDMA_UAR_SRQ_OFFSET: + if (val & PVRDMA_UAR_SRQ_RECV) { + trace_pvrdma_uar_write(addr, val, "QP", "SRQ", + val & PVRDMA_UAR_HANDLE_MASK, 0); + pvrdma_srq_recv(dev, val & PVRDMA_UAR_HANDLE_MASK); + } + break; + default: + rdma_error_report("Unsupported command, addr=0x%"PRIx64", val=0x%"PRIx64, + addr, val); + break; + } +} + +static const MemoryRegionOps uar_ops = { + .read = pvrdma_uar_read, + .write = pvrdma_uar_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = sizeof(uint32_t), + .max_access_size = sizeof(uint32_t), + }, +}; + +static void init_pci_config(PCIDevice *pdev) +{ + pdev->config[PCI_INTERRUPT_PIN] = 1; +} + +static void init_bars(PCIDevice *pdev) +{ + PVRDMADev *dev = PVRDMA_DEV(pdev); + + /* BAR 0 - MSI-X */ + memory_region_init(&dev->msix, OBJECT(dev), "pvrdma-msix", + RDMA_BAR0_MSIX_SIZE); + pci_register_bar(pdev, RDMA_MSIX_BAR_IDX, PCI_BASE_ADDRESS_SPACE_MEMORY, + &dev->msix); + + /* BAR 1 - Registers */ + memset(&dev->regs_data, 0, sizeof(dev->regs_data)); + memory_region_init_io(&dev->regs, OBJECT(dev), ®s_ops, dev, + "pvrdma-regs", sizeof(dev->regs_data)); + pci_register_bar(pdev, RDMA_REG_BAR_IDX, PCI_BASE_ADDRESS_SPACE_MEMORY, + &dev->regs); + + /* BAR 2 - UAR */ + memset(&dev->uar_data, 0, sizeof(dev->uar_data)); + memory_region_init_io(&dev->uar, OBJECT(dev), &uar_ops, dev, "rdma-uar", + sizeof(dev->uar_data)); + pci_register_bar(pdev, RDMA_UAR_BAR_IDX, PCI_BASE_ADDRESS_SPACE_MEMORY, + &dev->uar); +} + +static void init_regs(PCIDevice *pdev) +{ + PVRDMADev *dev = PVRDMA_DEV(pdev); + + set_reg_val(dev, PVRDMA_REG_VERSION, PVRDMA_HW_VERSION); + set_reg_val(dev, PVRDMA_REG_ERR, 0xFFFF); +} + +static void init_dev_caps(PVRDMADev *dev) +{ + size_t pg_tbl_bytes = TARGET_PAGE_SIZE * + (TARGET_PAGE_SIZE / sizeof(uint64_t)); + size_t wr_sz = MAX(sizeof(struct pvrdma_sq_wqe_hdr), + sizeof(struct pvrdma_rq_wqe_hdr)); + + dev->dev_attr.max_qp_wr = pg_tbl_bytes / + (wr_sz + sizeof(struct pvrdma_sge) * + dev->dev_attr.max_sge) - TARGET_PAGE_SIZE; + /* First page is ring state ^^^^ */ + + dev->dev_attr.max_cqe = pg_tbl_bytes / sizeof(struct pvrdma_cqe) - + TARGET_PAGE_SIZE; /* First page is ring state */ + + dev->dev_attr.max_srq_wr = pg_tbl_bytes / + ((sizeof(struct pvrdma_rq_wqe_hdr) + + sizeof(struct pvrdma_sge)) * + dev->dev_attr.max_sge) - TARGET_PAGE_SIZE; +} + +static int pvrdma_check_ram_shared(Object *obj, void *opaque) +{ + bool *shared = opaque; + + if (object_dynamic_cast(obj, "memory-backend-ram")) { + *shared = object_property_get_bool(obj, "share", NULL); + } + + return 0; +} + +static void pvrdma_shutdown_notifier(Notifier *n, void *opaque) +{ + PVRDMADev *dev = container_of(n, PVRDMADev, shutdown_notifier); + PCIDevice *pci_dev = PCI_DEVICE(dev); + + pvrdma_fini(pci_dev); +} + +static void pvrdma_realize(PCIDevice *pdev, Error **errp) +{ + int rc = 0; + PVRDMADev *dev = PVRDMA_DEV(pdev); + Object *memdev_root; + bool ram_shared = false; + PCIDevice *func0; + + rdma_info_report("Initializing device %s %x.%x", pdev->name, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + + if (TARGET_PAGE_SIZE != qemu_real_host_page_size) { + error_setg(errp, "Target page size must be the same as host page size"); + return; + } + + func0 = pci_get_function_0(pdev); + /* Break if not vmxnet3 device in slot 0 */ + if (strcmp(object_get_typename(OBJECT(func0)), TYPE_VMXNET3)) { + error_setg(errp, "Device on %x.0 must be %s", PCI_SLOT(pdev->devfn), + TYPE_VMXNET3); + return; + } + dev->func0 = VMXNET3(func0); + + addrconf_addr_eui48((unsigned char *)&dev->node_guid, + (const char *)&dev->func0->conf.macaddr.a); + + memdev_root = object_resolve_path("/objects", NULL); + if (memdev_root) { + object_child_foreach(memdev_root, pvrdma_check_ram_shared, &ram_shared); + } + if (!ram_shared) { + error_setg(errp, "Only shared memory backed ram is supported"); + return; + } + + dev->dsr_info.dsr = NULL; + + init_pci_config(pdev); + + init_bars(pdev); + + init_regs(pdev); + + rc = init_msix(pdev); + if (rc) { + goto out; + } + + rc = rdma_backend_init(&dev->backend_dev, pdev, &dev->rdma_dev_res, + dev->backend_device_name, dev->backend_port_num, + &dev->dev_attr, &dev->mad_chr); + if (rc) { + goto out; + } + + init_dev_caps(dev); + + rc = rdma_rm_init(&dev->rdma_dev_res, &dev->dev_attr); + if (rc) { + goto out; + } + + rc = pvrdma_qp_ops_init(); + if (rc) { + goto out; + } + + memset(&dev->stats, 0, sizeof(dev->stats)); + + dev->shutdown_notifier.notify = pvrdma_shutdown_notifier; + qemu_register_shutdown_notifier(&dev->shutdown_notifier); + +#ifdef LEGACY_RDMA_REG_MR + rdma_info_report("Using legacy reg_mr"); +#else + rdma_info_report("Using iova reg_mr"); +#endif + +out: + if (rc) { + pvrdma_fini(pdev); + error_append_hint(errp, "Device failed to load\n"); + } +} + +static void pvrdma_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + RdmaProviderClass *ir = RDMA_PROVIDER_CLASS(klass); + + k->realize = pvrdma_realize; + k->vendor_id = PCI_VENDOR_ID_VMWARE; + k->device_id = PCI_DEVICE_ID_VMWARE_PVRDMA; + k->revision = 0x00; + k->class_id = PCI_CLASS_NETWORK_OTHER; + + dc->desc = "RDMA Device"; + device_class_set_props(dc, pvrdma_dev_properties); + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + + ir->format_statistics = pvrdma_format_statistics; +} + +static const TypeInfo pvrdma_info = { + .name = PVRDMA_HW_NAME, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PVRDMADev), + .class_init = pvrdma_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { INTERFACE_RDMA_PROVIDER }, + { } + } +}; + +static void register_types(void) +{ + type_register_static(&pvrdma_info); +} + +type_init(register_types) diff --git a/hw/rdma/vmw/pvrdma_qp_ops.c b/hw/rdma/vmw/pvrdma_qp_ops.c new file mode 100644 index 000000000..8050287a6 --- /dev/null +++ b/hw/rdma/vmw/pvrdma_qp_ops.c @@ -0,0 +1,298 @@ +/* + * QEMU paravirtual RDMA - QP implementation + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia <yuval.shaia@oracle.com> + * Marcel Apfelbaum <marcel@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" + +#include "../rdma_utils.h" +#include "../rdma_rm.h" +#include "../rdma_backend.h" + +#include "trace.h" + +#include "pvrdma.h" +#include "standard-headers/rdma/vmw_pvrdma-abi.h" +#include "pvrdma_qp_ops.h" + +typedef struct CompHandlerCtx { + PVRDMADev *dev; + uint32_t cq_handle; + struct pvrdma_cqe cqe; +} CompHandlerCtx; + +/* Send Queue WQE */ +typedef struct PvrdmaSqWqe { + struct pvrdma_sq_wqe_hdr hdr; + struct pvrdma_sge sge[]; +} PvrdmaSqWqe; + +/* Recv Queue WQE */ +typedef struct PvrdmaRqWqe { + struct pvrdma_rq_wqe_hdr hdr; + struct pvrdma_sge sge[]; +} PvrdmaRqWqe; + +/* + * 1. Put CQE on send CQ ring + * 2. Put CQ number on dsr completion ring + * 3. Interrupt host + */ +static int pvrdma_post_cqe(PVRDMADev *dev, uint32_t cq_handle, + struct pvrdma_cqe *cqe, struct ibv_wc *wc) +{ + struct pvrdma_cqe *cqe1; + struct pvrdma_cqne *cqne; + PvrdmaRing *ring; + RdmaRmCQ *cq = rdma_rm_get_cq(&dev->rdma_dev_res, cq_handle); + + if (unlikely(!cq)) { + return -EINVAL; + } + + ring = (PvrdmaRing *)cq->opaque; + + /* Step #1: Put CQE on CQ ring */ + cqe1 = pvrdma_ring_next_elem_write(ring); + if (unlikely(!cqe1)) { + return -EINVAL; + } + + memset(cqe1, 0, sizeof(*cqe1)); + cqe1->wr_id = cqe->wr_id; + cqe1->qp = cqe->qp ? cqe->qp : wc->qp_num; + cqe1->opcode = cqe->opcode; + cqe1->status = wc->status; + cqe1->byte_len = wc->byte_len; + cqe1->src_qp = wc->src_qp; + cqe1->wc_flags = wc->wc_flags; + cqe1->vendor_err = wc->vendor_err; + + trace_pvrdma_post_cqe(cq_handle, cq->notify, cqe1->wr_id, cqe1->qp, + cqe1->opcode, cqe1->status, cqe1->byte_len, + cqe1->src_qp, cqe1->wc_flags, cqe1->vendor_err); + + pvrdma_ring_write_inc(ring); + + /* Step #2: Put CQ number on dsr completion ring */ + cqne = pvrdma_ring_next_elem_write(&dev->dsr_info.cq); + if (unlikely(!cqne)) { + return -EINVAL; + } + + cqne->info = cq_handle; + pvrdma_ring_write_inc(&dev->dsr_info.cq); + + if (cq->notify != CNT_CLEAR) { + if (cq->notify == CNT_ARM) { + cq->notify = CNT_CLEAR; + } + post_interrupt(dev, INTR_VEC_CMD_COMPLETION_Q); + } + + return 0; +} + +static void pvrdma_qp_ops_comp_handler(void *ctx, struct ibv_wc *wc) +{ + CompHandlerCtx *comp_ctx = (CompHandlerCtx *)ctx; + + pvrdma_post_cqe(comp_ctx->dev, comp_ctx->cq_handle, &comp_ctx->cqe, wc); + + g_free(ctx); +} + +static void complete_with_error(uint32_t vendor_err, void *ctx) +{ + struct ibv_wc wc = {}; + + wc.status = IBV_WC_GENERAL_ERR; + wc.vendor_err = vendor_err; + + pvrdma_qp_ops_comp_handler(ctx, &wc); +} + +void pvrdma_qp_ops_fini(void) +{ + rdma_backend_unregister_comp_handler(); +} + +int pvrdma_qp_ops_init(void) +{ + rdma_backend_register_comp_handler(pvrdma_qp_ops_comp_handler); + + return 0; +} + +void pvrdma_qp_send(PVRDMADev *dev, uint32_t qp_handle) +{ + RdmaRmQP *qp; + PvrdmaSqWqe *wqe; + PvrdmaRing *ring; + int sgid_idx; + union ibv_gid *sgid; + + qp = rdma_rm_get_qp(&dev->rdma_dev_res, qp_handle); + if (unlikely(!qp)) { + return; + } + + ring = (PvrdmaRing *)qp->opaque; + + wqe = (struct PvrdmaSqWqe *)pvrdma_ring_next_elem_read(ring); + while (wqe) { + CompHandlerCtx *comp_ctx; + + /* Prepare CQE */ + comp_ctx = g_malloc(sizeof(CompHandlerCtx)); + comp_ctx->dev = dev; + comp_ctx->cq_handle = qp->send_cq_handle; + comp_ctx->cqe.wr_id = wqe->hdr.wr_id; + comp_ctx->cqe.qp = qp_handle; + comp_ctx->cqe.opcode = IBV_WC_SEND; + + sgid = rdma_rm_get_gid(&dev->rdma_dev_res, wqe->hdr.wr.ud.av.gid_index); + if (!sgid) { + rdma_error_report("Failed to get gid for idx %d", + wqe->hdr.wr.ud.av.gid_index); + complete_with_error(VENDOR_ERR_INV_GID_IDX, comp_ctx); + continue; + } + + sgid_idx = rdma_rm_get_backend_gid_index(&dev->rdma_dev_res, + &dev->backend_dev, + wqe->hdr.wr.ud.av.gid_index); + if (sgid_idx <= 0) { + rdma_error_report("Failed to get bk sgid_idx for sgid_idx %d", + wqe->hdr.wr.ud.av.gid_index); + complete_with_error(VENDOR_ERR_INV_GID_IDX, comp_ctx); + continue; + } + + if (wqe->hdr.num_sge > dev->dev_attr.max_sge) { + rdma_error_report("Invalid num_sge=%d (max %d)", wqe->hdr.num_sge, + dev->dev_attr.max_sge); + complete_with_error(VENDOR_ERR_INV_NUM_SGE, comp_ctx); + continue; + } + + rdma_backend_post_send(&dev->backend_dev, &qp->backend_qp, qp->qp_type, + (struct ibv_sge *)&wqe->sge[0], wqe->hdr.num_sge, + sgid_idx, sgid, + (union ibv_gid *)wqe->hdr.wr.ud.av.dgid, + wqe->hdr.wr.ud.remote_qpn, + wqe->hdr.wr.ud.remote_qkey, comp_ctx); + + pvrdma_ring_read_inc(ring); + + wqe = pvrdma_ring_next_elem_read(ring); + } +} + +void pvrdma_qp_recv(PVRDMADev *dev, uint32_t qp_handle) +{ + RdmaRmQP *qp; + PvrdmaRqWqe *wqe; + PvrdmaRing *ring; + + qp = rdma_rm_get_qp(&dev->rdma_dev_res, qp_handle); + if (unlikely(!qp)) { + return; + } + + ring = &((PvrdmaRing *)qp->opaque)[1]; + + wqe = (struct PvrdmaRqWqe *)pvrdma_ring_next_elem_read(ring); + while (wqe) { + CompHandlerCtx *comp_ctx; + + /* Prepare CQE */ + comp_ctx = g_malloc(sizeof(CompHandlerCtx)); + comp_ctx->dev = dev; + comp_ctx->cq_handle = qp->recv_cq_handle; + comp_ctx->cqe.wr_id = wqe->hdr.wr_id; + comp_ctx->cqe.qp = qp_handle; + comp_ctx->cqe.opcode = IBV_WC_RECV; + + if (wqe->hdr.num_sge > dev->dev_attr.max_sge) { + rdma_error_report("Invalid num_sge=%d (max %d)", wqe->hdr.num_sge, + dev->dev_attr.max_sge); + complete_with_error(VENDOR_ERR_INV_NUM_SGE, comp_ctx); + continue; + } + + rdma_backend_post_recv(&dev->backend_dev, &qp->backend_qp, qp->qp_type, + (struct ibv_sge *)&wqe->sge[0], wqe->hdr.num_sge, + comp_ctx); + + pvrdma_ring_read_inc(ring); + + wqe = pvrdma_ring_next_elem_read(ring); + } +} + +void pvrdma_srq_recv(PVRDMADev *dev, uint32_t srq_handle) +{ + RdmaRmSRQ *srq; + PvrdmaRqWqe *wqe; + PvrdmaRing *ring; + + srq = rdma_rm_get_srq(&dev->rdma_dev_res, srq_handle); + if (unlikely(!srq)) { + return; + } + + ring = (PvrdmaRing *)srq->opaque; + + wqe = (struct PvrdmaRqWqe *)pvrdma_ring_next_elem_read(ring); + while (wqe) { + CompHandlerCtx *comp_ctx; + + /* Prepare CQE */ + comp_ctx = g_malloc(sizeof(CompHandlerCtx)); + comp_ctx->dev = dev; + comp_ctx->cq_handle = srq->recv_cq_handle; + comp_ctx->cqe.wr_id = wqe->hdr.wr_id; + comp_ctx->cqe.qp = 0; + comp_ctx->cqe.opcode = IBV_WC_RECV; + + if (wqe->hdr.num_sge > dev->dev_attr.max_sge) { + rdma_error_report("Invalid num_sge=%d (max %d)", wqe->hdr.num_sge, + dev->dev_attr.max_sge); + complete_with_error(VENDOR_ERR_INV_NUM_SGE, comp_ctx); + continue; + } + + rdma_backend_post_srq_recv(&dev->backend_dev, &srq->backend_srq, + (struct ibv_sge *)&wqe->sge[0], + wqe->hdr.num_sge, + comp_ctx); + + pvrdma_ring_read_inc(ring); + + wqe = pvrdma_ring_next_elem_read(ring); + } + +} + +void pvrdma_cq_poll(RdmaDeviceResources *dev_res, uint32_t cq_handle) +{ + RdmaRmCQ *cq; + + cq = rdma_rm_get_cq(dev_res, cq_handle); + if (!cq) { + return; + } + + rdma_backend_poll_cq(dev_res, &cq->backend_cq); +} diff --git a/hw/rdma/vmw/pvrdma_qp_ops.h b/hw/rdma/vmw/pvrdma_qp_ops.h new file mode 100644 index 000000000..bf2b15c5c --- /dev/null +++ b/hw/rdma/vmw/pvrdma_qp_ops.h @@ -0,0 +1,28 @@ +/* + * QEMU VMWARE paravirtual RDMA QP Operations + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia <yuval.shaia@oracle.com> + * Marcel Apfelbaum <marcel@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef PVRDMA_QP_OPS_H +#define PVRDMA_QP_OPS_H + +#include "pvrdma.h" + +int pvrdma_qp_ops_init(void); +void pvrdma_qp_ops_fini(void); +void pvrdma_qp_send(PVRDMADev *dev, uint32_t qp_handle); +void pvrdma_qp_recv(PVRDMADev *dev, uint32_t qp_handle); +void pvrdma_srq_recv(PVRDMADev *dev, uint32_t srq_handle); +void pvrdma_cq_poll(RdmaDeviceResources *dev_res, uint32_t cq_handle); + +#endif diff --git a/hw/rdma/vmw/trace-events b/hw/rdma/vmw/trace-events new file mode 100644 index 000000000..a6c77e1e1 --- /dev/null +++ b/hw/rdma/vmw/trace-events @@ -0,0 +1,17 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# pvrdma_main.c +pvrdma_regs_read(uint64_t addr, uint64_t val) "pvrdma.regs[0x%"PRIx64"]=0x%"PRIx64 +pvrdma_regs_write(uint64_t addr, uint64_t val, const char *reg_name, const char *val_name) "pvrdma.regs[0x%"PRIx64"]=0x%"PRIx64" (%s %s)" +pvrdma_uar_write(uint64_t addr, uint64_t val, const char *reg_name, const char *val_name, int val1, int val2) "uar[0x%"PRIx64"]=0x%"PRIx64" (cls=%s, op=%s, obj=%d, val=%d)" + +# pvrdma_cmd.c +pvrdma_map_to_pdir_host_virt(void *vfirst, void *vremaped) "mremap %p -> %p" +pvrdma_map_to_pdir_next_page(int page_idx, void *vnext, void *vremaped) "mremap [%d] %p -> %p" +pvrdma_exec_cmd(int cmd, int err) "cmd=%d, err=%d" + +# pvrdma_dev_ring.c +pvrdma_ring_next_elem_read_no_data(char *ring_name) "pvrdma_ring %s is empty" + +# pvrdma_qp_ops.c +pvrdma_post_cqe(uint32_t cq_handle, int notify, uint64_t wr_id, uint64_t qpn, uint32_t op_code, uint32_t status, uint32_t byte_len, uint32_t src_qp, uint32_t wc_flags, uint32_t vendor_err) "cq_handle=%d, notify=%d, wr_id=0x%"PRIx64", qpn=0x%"PRIx64", opcode=%d, status=%d, byte_len=%d, src_qp=%d, wc_flags=%d, vendor_err=%d" diff --git a/hw/rdma/vmw/trace.h b/hw/rdma/vmw/trace.h new file mode 100644 index 000000000..3ebc9fb7a --- /dev/null +++ b/hw/rdma/vmw/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_rdma_vmw.h" |