2 * Copyright (c) 2016 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 struct rds_ib_mr
*rds_ib_alloc_fmr(struct rds_ib_device
*rds_ibdev
, int npages
)
37 struct rds_ib_mr_pool
*pool
;
38 struct rds_ib_mr
*ibmr
= NULL
;
39 struct rds_ib_fmr
*fmr
;
40 int err
= 0, iter
= 0;
42 if (npages
<= RDS_MR_8K_MSG_SIZE
)
43 pool
= rds_ibdev
->mr_8k_pool
;
45 pool
= rds_ibdev
->mr_1m_pool
;
47 if (atomic_read(&pool
->dirty_count
) >= pool
->max_items
/ 10)
48 queue_delayed_work(rds_ib_mr_wq
, &pool
->flush_worker
, 10);
50 /* Switch pools if one of the pool is reaching upper limit */
51 if (atomic_read(&pool
->dirty_count
) >= pool
->max_items
* 9 / 10) {
52 if (pool
->pool_type
== RDS_IB_MR_8K_POOL
)
53 pool
= rds_ibdev
->mr_1m_pool
;
55 pool
= rds_ibdev
->mr_8k_pool
;
59 ibmr
= rds_ib_reuse_mr(pool
);
63 /* No clean MRs - now we have the choice of either
64 * allocating a fresh MR up to the limit imposed by the
65 * driver, or flush any dirty unused MRs.
66 * We try to avoid stalling in the send path if possible,
67 * so we allocate as long as we're allowed to.
69 * We're fussy with enforcing the FMR limit, though. If the
70 * driver tells us we can't use more than N fmrs, we shouldn't
71 * start arguing with it
73 if (atomic_inc_return(&pool
->item_count
) <= pool
->max_items
)
76 atomic_dec(&pool
->item_count
);
79 if (pool
->pool_type
== RDS_IB_MR_8K_POOL
)
80 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted
);
82 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted
);
83 return ERR_PTR(-EAGAIN
);
86 /* We do have some empty MRs. Flush them out. */
87 if (pool
->pool_type
== RDS_IB_MR_8K_POOL
)
88 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait
);
90 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait
);
91 rds_ib_flush_mr_pool(pool
, 0, &ibmr
);
96 ibmr
= kzalloc_node(sizeof(*ibmr
), GFP_KERNEL
,
97 rdsibdev_to_node(rds_ibdev
));
104 fmr
->fmr
= ib_alloc_fmr(rds_ibdev
->pd
,
105 (IB_ACCESS_LOCAL_WRITE
|
106 IB_ACCESS_REMOTE_READ
|
107 IB_ACCESS_REMOTE_WRITE
|
108 IB_ACCESS_REMOTE_ATOMIC
),
110 if (IS_ERR(fmr
->fmr
)) {
111 err
= PTR_ERR(fmr
->fmr
);
113 pr_warn("RDS/IB: %s failed (err=%d)\n", __func__
, err
);
118 if (pool
->pool_type
== RDS_IB_MR_8K_POOL
)
119 rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc
);
121 rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc
);
128 ib_dealloc_fmr(fmr
->fmr
);
131 atomic_dec(&pool
->item_count
);
135 int rds_ib_map_fmr(struct rds_ib_device
*rds_ibdev
, struct rds_ib_mr
*ibmr
,
136 struct scatterlist
*sg
, unsigned int nents
)
138 struct ib_device
*dev
= rds_ibdev
->dev
;
139 struct rds_ib_fmr
*fmr
= &ibmr
->u
.fmr
;
140 struct scatterlist
*scat
= sg
;
144 int page_cnt
, sg_dma_len
;
148 sg_dma_len
= ib_dma_map_sg(dev
, sg
, nents
, DMA_BIDIRECTIONAL
);
149 if (unlikely(!sg_dma_len
)) {
150 pr_warn("RDS/IB: %s failed!\n", __func__
);
157 for (i
= 0; i
< sg_dma_len
; ++i
) {
158 unsigned int dma_len
= ib_sg_dma_len(dev
, &scat
[i
]);
159 u64 dma_addr
= ib_sg_dma_address(dev
, &scat
[i
]);
161 if (dma_addr
& ~PAGE_MASK
) {
167 if ((dma_addr
+ dma_len
) & ~PAGE_MASK
) {
168 if (i
< sg_dma_len
- 1)
177 page_cnt
+= len
>> PAGE_SHIFT
;
178 if (page_cnt
> ibmr
->pool
->fmr_attr
.max_pages
)
181 dma_pages
= kmalloc_node(sizeof(u64
) * page_cnt
, GFP_ATOMIC
,
182 rdsibdev_to_node(rds_ibdev
));
187 for (i
= 0; i
< sg_dma_len
; ++i
) {
188 unsigned int dma_len
= ib_sg_dma_len(dev
, &scat
[i
]);
189 u64 dma_addr
= ib_sg_dma_address(dev
, &scat
[i
]);
191 for (j
= 0; j
< dma_len
; j
+= PAGE_SIZE
)
192 dma_pages
[page_cnt
++] =
193 (dma_addr
& PAGE_MASK
) + j
;
196 ret
= ib_map_phys_fmr(fmr
->fmr
, dma_pages
, page_cnt
, io_addr
);
200 /* Success - we successfully remapped the MR, so we can
201 * safely tear down the old mapping.
203 rds_ib_teardown_mr(ibmr
);
206 ibmr
->sg_len
= nents
;
207 ibmr
->sg_dma_len
= sg_dma_len
;
210 if (ibmr
->pool
->pool_type
== RDS_IB_MR_8K_POOL
)
211 rds_ib_stats_inc(s_ib_rdma_mr_8k_used
);
213 rds_ib_stats_inc(s_ib_rdma_mr_1m_used
);