#include <rdma/ib_verbs.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_addr.h>
+#include <rdma/rw.h>
#include "core_priv.h"
{
struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
struct ib_qp *qp;
+ int ret;
+
+ /*
+ * If the callers is using the RDMA API calculate the resources
+ * needed for the RDMA READ/WRITE operations.
+ *
+ * Note that these callers need to pass in a port number.
+ */
+ if (qp_init_attr->cap.max_rdma_ctxs)
+ rdma_rw_init_qp(device, qp_init_attr);
qp = device->create_qp(pd, qp_init_attr, NULL);
if (IS_ERR(qp))
atomic_set(&qp->usecnt, 0);
qp->mrs_used = 0;
spin_lock_init(&qp->mr_lock);
+ INIT_LIST_HEAD(&qp->rdma_mrs);
+ INIT_LIST_HEAD(&qp->sig_mrs);
if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
return ib_create_xrc_qp(qp, qp_init_attr);
atomic_inc(&pd->usecnt);
atomic_inc(&qp_init_attr->send_cq->usecnt);
+
+ if (qp_init_attr->cap.max_rdma_ctxs) {
+ ret = rdma_rw_init_mrs(qp, qp_init_attr);
+ if (ret) {
+ pr_err("failed to init MR pool ret= %d\n", ret);
+ ib_destroy_qp(qp);
+ qp = ERR_PTR(ret);
+ }
+ }
+
return qp;
}
EXPORT_SYMBOL(ib_create_qp);
rcq = qp->recv_cq;
srq = qp->srq;
+ if (!qp->uobject)
+ rdma_rw_cleanup_mrs(qp);
+
ret = qp->device->destroy_qp(qp);
if (!ret) {
if (pd)
mr->pd = pd;
mr->uobject = NULL;
atomic_inc(&pd->usecnt);
+ mr->need_inval = false;
}
return mr;
mr->pd = pd;
mr->uobject = NULL;
atomic_inc(&pd->usecnt);
+ mr->need_inval = false;
}
return mr;
* is ready for registration.
*/
int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
- unsigned int sg_offset, unsigned int page_size)
+ unsigned int *sg_offset, unsigned int page_size)
{
if (unlikely(!mr->device->map_mr_sg))
return -ENOSYS;
* @mr: memory region
* @sgl: dma mapped scatterlist
* @sg_nents: number of entries in sg
- * @sg_offset: offset in bytes into sg
+ * @sg_offset_p: IN: start offset in bytes into sg
+ * OUT: offset in bytes for element n of the sg of the first
+ * byte that has not been processed where n is the return
+ * value of this function.
* @set_page: driver page assignment function pointer
*
* Core service helper for drivers to convert the largest
* a page vector.
*/
int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
- unsigned int sg_offset, int (*set_page)(struct ib_mr *, u64))
+ unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
{
struct scatterlist *sg;
u64 last_end_dma_addr = 0;
+ unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
unsigned int last_page_off = 0;
u64 page_mask = ~((u64)mr->page_size - 1);
int i, ret;
+ if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
+ return -EINVAL;
+
mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
mr->length = 0;
for_each_sg(sgl, sg, sg_nents, i) {
u64 dma_addr = sg_dma_address(sg) + sg_offset;
+ u64 prev_addr = dma_addr;
unsigned int dma_len = sg_dma_len(sg) - sg_offset;
u64 end_dma_addr = dma_addr + dma_len;
u64 page_addr = dma_addr & page_mask;
do {
ret = set_page(mr, page_addr);
- if (unlikely(ret < 0))
- return i ? : ret;
+ if (unlikely(ret < 0)) {
+ sg_offset = prev_addr - sg_dma_address(sg);
+ mr->length += prev_addr - dma_addr;
+ if (sg_offset_p)
+ *sg_offset_p = sg_offset;
+ return i || sg_offset ? i : ret;
+ }
+ prev_addr = page_addr;
next_page:
page_addr += mr->page_size;
} while (page_addr < end_dma_addr);
sg_offset = 0;
}
+ if (sg_offset_p)
+ *sg_offset_p = 0;
return i;
}
EXPORT_SYMBOL(ib_sg_to_pages);