IB/core: Enhance ib_map_mr_sg()
[deliverable/linux.git] / drivers / infiniband / core / verbs.c
index 8549345c616918c8eb9d03dca6c341f73c292a0c..1d7d4cf442e3c9646b6824e943e7e649c873d104 100644 (file)
@@ -48,6 +48,7 @@
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_cache.h>
 #include <rdma/ib_addr.h>
+#include <rdma/rw.h>
 
 #include "core_priv.h"
 
@@ -751,6 +752,16 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
 {
        struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
        struct ib_qp *qp;
+       int ret;
+
+       /*
+        * If the callers is using the RDMA API calculate the resources
+        * needed for the RDMA READ/WRITE operations.
+        *
+        * Note that these callers need to pass in a port number.
+        */
+       if (qp_init_attr->cap.max_rdma_ctxs)
+               rdma_rw_init_qp(device, qp_init_attr);
 
        qp = device->create_qp(pd, qp_init_attr, NULL);
        if (IS_ERR(qp))
@@ -764,6 +775,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
        atomic_set(&qp->usecnt, 0);
        qp->mrs_used = 0;
        spin_lock_init(&qp->mr_lock);
+       INIT_LIST_HEAD(&qp->rdma_mrs);
+       INIT_LIST_HEAD(&qp->sig_mrs);
 
        if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
                return ib_create_xrc_qp(qp, qp_init_attr);
@@ -787,6 +800,16 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
 
        atomic_inc(&pd->usecnt);
        atomic_inc(&qp_init_attr->send_cq->usecnt);
+
+       if (qp_init_attr->cap.max_rdma_ctxs) {
+               ret = rdma_rw_init_mrs(qp, qp_init_attr);
+               if (ret) {
+                       pr_err("failed to init MR pool ret= %d\n", ret);
+                       ib_destroy_qp(qp);
+                       qp = ERR_PTR(ret);
+               }
+       }
+
        return qp;
 }
 EXPORT_SYMBOL(ib_create_qp);
@@ -1271,6 +1294,9 @@ int ib_destroy_qp(struct ib_qp *qp)
        rcq  = qp->recv_cq;
        srq  = qp->srq;
 
+       if (!qp->uobject)
+               rdma_rw_cleanup_mrs(qp);
+
        ret = qp->device->destroy_qp(qp);
        if (!ret) {
                if (pd)
@@ -1353,6 +1379,7 @@ struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
                mr->pd      = pd;
                mr->uobject = NULL;
                atomic_inc(&pd->usecnt);
+               mr->need_inval = false;
        }
 
        return mr;
@@ -1399,6 +1426,7 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
                mr->pd      = pd;
                mr->uobject = NULL;
                atomic_inc(&pd->usecnt);
+               mr->need_inval = false;
        }
 
        return mr;
@@ -1627,7 +1655,7 @@ EXPORT_SYMBOL(ib_set_vf_guid);
  * is ready for registration.
  */
 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
-               unsigned int sg_offset, unsigned int page_size)
+                unsigned int *sg_offset, unsigned int page_size)
 {
        if (unlikely(!mr->device->map_mr_sg))
                return -ENOSYS;
@@ -1644,7 +1672,10 @@ EXPORT_SYMBOL(ib_map_mr_sg);
  * @mr:            memory region
  * @sgl:           dma mapped scatterlist
  * @sg_nents:      number of entries in sg
- * @sg_offset:     offset in bytes into sg
+ * @sg_offset_p:   IN:  start offset in bytes into sg
+ *                 OUT: offset in bytes for element n of the sg of the first
+ *                      byte that has not been processed where n is the return
+ *                      value of this function.
  * @set_page:      driver page assignment function pointer
  *
  * Core service helper for drivers to convert the largest
@@ -1656,19 +1687,24 @@ EXPORT_SYMBOL(ib_map_mr_sg);
  * a page vector.
  */
 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
-               unsigned int sg_offset, int (*set_page)(struct ib_mr *, u64))
+               unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
 {
        struct scatterlist *sg;
        u64 last_end_dma_addr = 0;
+       unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
        unsigned int last_page_off = 0;
        u64 page_mask = ~((u64)mr->page_size - 1);
        int i, ret;
 
+       if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
+               return -EINVAL;
+
        mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
        mr->length = 0;
 
        for_each_sg(sgl, sg, sg_nents, i) {
                u64 dma_addr = sg_dma_address(sg) + sg_offset;
+               u64 prev_addr = dma_addr;
                unsigned int dma_len = sg_dma_len(sg) - sg_offset;
                u64 end_dma_addr = dma_addr + dma_len;
                u64 page_addr = dma_addr & page_mask;
@@ -1693,8 +1729,14 @@ int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
 
                do {
                        ret = set_page(mr, page_addr);
-                       if (unlikely(ret < 0))
-                               return i ? : ret;
+                       if (unlikely(ret < 0)) {
+                               sg_offset = prev_addr - sg_dma_address(sg);
+                               mr->length += prev_addr - dma_addr;
+                               if (sg_offset_p)
+                                       *sg_offset_p = sg_offset;
+                               return i || sg_offset ? i : ret;
+                       }
+                       prev_addr = page_addr;
 next_page:
                        page_addr += mr->page_size;
                } while (page_addr < end_dma_addr);
@@ -1706,6 +1748,8 @@ next_page:
                sg_offset = 0;
        }
 
+       if (sg_offset_p)
+               *sg_offset_p = 0;
        return i;
 }
 EXPORT_SYMBOL(ib_sg_to_pages);
This page took 0.026479 seconds and 5 git commands to generate.