2 * Copyright (c) 2016 HGST, a Western Digital Company.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 #include <linux/moduleparam.h>
14 #include <linux/slab.h>
15 #include <rdma/mr_pool.h>
24 static bool rdma_rw_force_mr
;
25 module_param_named(force_mr
, rdma_rw_force_mr
, bool, 0);
26 MODULE_PARM_DESC(force_mr
, "Force usage of MRs for RDMA READ/WRITE operations");
29 * Check if the device might use memory registration. This is currently only
30 * true for iWarp devices. In the future we can hopefully fine tune this based
31 * on HCA driver input.
33 static inline bool rdma_rw_can_use_mr(struct ib_device
*dev
, u8 port_num
)
35 if (rdma_protocol_iwarp(dev
, port_num
))
37 if (unlikely(rdma_rw_force_mr
))
43 * Check if the device will use memory registration for this RW operation.
44 * We currently always use memory registrations for iWarp RDMA READs, and
45 * have a debug option to force usage of MRs.
47 * XXX: In the future we can hopefully fine tune this based on HCA driver
50 static inline bool rdma_rw_io_needs_mr(struct ib_device
*dev
, u8 port_num
,
51 enum dma_data_direction dir
, int dma_nents
)
53 if (rdma_protocol_iwarp(dev
, port_num
) && dir
== DMA_FROM_DEVICE
)
55 if (unlikely(rdma_rw_force_mr
))
60 static inline u32
rdma_rw_max_sge(struct ib_device
*dev
,
61 enum dma_data_direction dir
)
63 return dir
== DMA_TO_DEVICE
?
64 dev
->attrs
.max_sge
: dev
->attrs
.max_sge_rd
;
67 static inline u32
rdma_rw_fr_page_list_len(struct ib_device
*dev
)
69 /* arbitrary limit to avoid allocating gigantic resources */
70 return min_t(u32
, dev
->attrs
.max_fast_reg_page_list_len
, 256);
73 static int rdma_rw_init_one_mr(struct ib_qp
*qp
, u8 port_num
,
74 struct rdma_rw_reg_ctx
*reg
, struct scatterlist
*sg
,
75 u32 sg_cnt
, u32 offset
)
77 u32 pages_per_mr
= rdma_rw_fr_page_list_len(qp
->pd
->device
);
78 u32 nents
= min(sg_cnt
, pages_per_mr
);
81 reg
->mr
= ib_mr_pool_get(qp
, &qp
->rdma_mrs
);
85 if (reg
->mr
->need_inval
) {
86 reg
->inv_wr
.opcode
= IB_WR_LOCAL_INV
;
87 reg
->inv_wr
.ex
.invalidate_rkey
= reg
->mr
->lkey
;
88 reg
->inv_wr
.next
= ®
->reg_wr
.wr
;
91 reg
->inv_wr
.next
= NULL
;
94 ret
= ib_map_mr_sg(reg
->mr
, sg
, nents
, offset
, PAGE_SIZE
);
96 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, reg
->mr
);
100 reg
->reg_wr
.wr
.opcode
= IB_WR_REG_MR
;
101 reg
->reg_wr
.mr
= reg
->mr
;
102 reg
->reg_wr
.access
= IB_ACCESS_LOCAL_WRITE
;
103 if (rdma_protocol_iwarp(qp
->device
, port_num
))
104 reg
->reg_wr
.access
|= IB_ACCESS_REMOTE_WRITE
;
107 reg
->sge
.addr
= reg
->mr
->iova
;
108 reg
->sge
.length
= reg
->mr
->length
;
112 static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
113 u8 port_num
, struct scatterlist
*sg
, u32 sg_cnt
, u32 offset
,
114 u64 remote_addr
, u32 rkey
, enum dma_data_direction dir
)
116 u32 pages_per_mr
= rdma_rw_fr_page_list_len(qp
->pd
->device
);
117 int i
, j
, ret
= 0, count
= 0;
119 ctx
->nr_ops
= (sg_cnt
+ pages_per_mr
- 1) / pages_per_mr
;
120 ctx
->reg
= kcalloc(ctx
->nr_ops
, sizeof(*ctx
->reg
), GFP_KERNEL
);
126 for (i
= 0; i
< ctx
->nr_ops
; i
++) {
127 struct rdma_rw_reg_ctx
*prev
= i
? &ctx
->reg
[i
- 1] : NULL
;
128 struct rdma_rw_reg_ctx
*reg
= &ctx
->reg
[i
];
129 u32 nents
= min(sg_cnt
, pages_per_mr
);
131 ret
= rdma_rw_init_one_mr(qp
, port_num
, reg
, sg
, sg_cnt
,
138 if (reg
->mr
->need_inval
)
139 prev
->wr
.wr
.next
= ®
->inv_wr
;
141 prev
->wr
.wr
.next
= ®
->reg_wr
.wr
;
144 reg
->reg_wr
.wr
.next
= ®
->wr
.wr
;
146 reg
->wr
.wr
.sg_list
= ®
->sge
;
147 reg
->wr
.wr
.num_sge
= 1;
148 reg
->wr
.remote_addr
= remote_addr
;
150 if (dir
== DMA_TO_DEVICE
) {
151 reg
->wr
.wr
.opcode
= IB_WR_RDMA_WRITE
;
152 } else if (!rdma_cap_read_inv(qp
->device
, port_num
)) {
153 reg
->wr
.wr
.opcode
= IB_WR_RDMA_READ
;
155 reg
->wr
.wr
.opcode
= IB_WR_RDMA_READ_WITH_INV
;
156 reg
->wr
.wr
.ex
.invalidate_rkey
= reg
->mr
->lkey
;
160 remote_addr
+= reg
->sge
.length
;
162 for (j
= 0; j
< nents
; j
++)
167 ctx
->type
= RDMA_RW_MR
;
172 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->reg
[i
].mr
);
178 static int rdma_rw_init_map_wrs(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
179 struct scatterlist
*sg
, u32 sg_cnt
, u32 offset
,
180 u64 remote_addr
, u32 rkey
, enum dma_data_direction dir
)
182 struct ib_device
*dev
= qp
->pd
->device
;
183 u32 max_sge
= rdma_rw_max_sge(dev
, dir
);
185 u32 total_len
= 0, i
, j
;
187 ctx
->nr_ops
= DIV_ROUND_UP(sg_cnt
, max_sge
);
189 ctx
->map
.sges
= sge
= kcalloc(sg_cnt
, sizeof(*sge
), GFP_KERNEL
);
193 ctx
->map
.wrs
= kcalloc(ctx
->nr_ops
, sizeof(*ctx
->map
.wrs
), GFP_KERNEL
);
197 for (i
= 0; i
< ctx
->nr_ops
; i
++) {
198 struct ib_rdma_wr
*rdma_wr
= &ctx
->map
.wrs
[i
];
199 u32 nr_sge
= min(sg_cnt
, max_sge
);
201 if (dir
== DMA_TO_DEVICE
)
202 rdma_wr
->wr
.opcode
= IB_WR_RDMA_WRITE
;
204 rdma_wr
->wr
.opcode
= IB_WR_RDMA_READ
;
205 rdma_wr
->remote_addr
= remote_addr
+ total_len
;
206 rdma_wr
->rkey
= rkey
;
207 rdma_wr
->wr
.sg_list
= sge
;
209 for (j
= 0; j
< nr_sge
; j
++, sg
= sg_next(sg
)) {
210 rdma_wr
->wr
.num_sge
++;
212 sge
->addr
= ib_sg_dma_address(dev
, sg
) + offset
;
213 sge
->length
= ib_sg_dma_len(dev
, sg
) - offset
;
214 sge
->lkey
= qp
->pd
->local_dma_lkey
;
216 total_len
+= sge
->length
;
222 if (i
+ 1 < ctx
->nr_ops
)
223 rdma_wr
->wr
.next
= &ctx
->map
.wrs
[i
+ 1].wr
;
226 ctx
->type
= RDMA_RW_MULTI_WR
;
230 kfree(ctx
->map
.sges
);
235 static int rdma_rw_init_single_wr(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
236 struct scatterlist
*sg
, u32 offset
, u64 remote_addr
, u32 rkey
,
237 enum dma_data_direction dir
)
239 struct ib_device
*dev
= qp
->pd
->device
;
240 struct ib_rdma_wr
*rdma_wr
= &ctx
->single
.wr
;
244 ctx
->single
.sge
.lkey
= qp
->pd
->local_dma_lkey
;
245 ctx
->single
.sge
.addr
= ib_sg_dma_address(dev
, sg
) + offset
;
246 ctx
->single
.sge
.length
= ib_sg_dma_len(dev
, sg
) - offset
;
248 memset(rdma_wr
, 0, sizeof(*rdma_wr
));
249 if (dir
== DMA_TO_DEVICE
)
250 rdma_wr
->wr
.opcode
= IB_WR_RDMA_WRITE
;
252 rdma_wr
->wr
.opcode
= IB_WR_RDMA_READ
;
253 rdma_wr
->wr
.sg_list
= &ctx
->single
.sge
;
254 rdma_wr
->wr
.num_sge
= 1;
255 rdma_wr
->remote_addr
= remote_addr
;
256 rdma_wr
->rkey
= rkey
;
258 ctx
->type
= RDMA_RW_SINGLE_WR
;
263 * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
264 * @ctx: context to initialize
265 * @qp: queue pair to operate on
266 * @port_num: port num to which the connection is bound
267 * @sg: scatterlist to READ/WRITE from/to
268 * @sg_cnt: number of entries in @sg
269 * @sg_offset: current byte offset into @sg
270 * @remote_addr:remote address to read/write (relative to @rkey)
271 * @rkey: remote key to operate on
272 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
274 * Returns the number of WQEs that will be needed on the workqueue if
275 * successful, or a negative error code.
277 int rdma_rw_ctx_init(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
, u8 port_num
,
278 struct scatterlist
*sg
, u32 sg_cnt
, u32 sg_offset
,
279 u64 remote_addr
, u32 rkey
, enum dma_data_direction dir
)
281 struct ib_device
*dev
= qp
->pd
->device
;
284 ret
= ib_dma_map_sg(dev
, sg
, sg_cnt
, dir
);
290 * Skip to the S/G entry that sg_offset falls into:
293 u32 len
= ib_sg_dma_len(dev
, sg
);
304 if (WARN_ON_ONCE(sg_cnt
== 0))
307 if (rdma_rw_io_needs_mr(qp
->device
, port_num
, dir
, sg_cnt
)) {
308 ret
= rdma_rw_init_mr_wrs(ctx
, qp
, port_num
, sg
, sg_cnt
,
309 sg_offset
, remote_addr
, rkey
, dir
);
310 } else if (sg_cnt
> 1) {
311 ret
= rdma_rw_init_map_wrs(ctx
, qp
, sg
, sg_cnt
, sg_offset
,
312 remote_addr
, rkey
, dir
);
314 ret
= rdma_rw_init_single_wr(ctx
, qp
, sg
, sg_offset
,
315 remote_addr
, rkey
, dir
);
323 ib_dma_unmap_sg(dev
, sg
, sg_cnt
, dir
);
326 EXPORT_SYMBOL(rdma_rw_ctx_init
);
329 * Now that we are going to post the WRs we can update the lkey and need_inval
330 * state on the MRs. If we were doing this at init time, we would get double
331 * or missing invalidations if a context was initialized but not actually
334 static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx
*reg
, bool need_inval
)
336 reg
->mr
->need_inval
= need_inval
;
337 ib_update_fast_reg_key(reg
->mr
, ib_inc_rkey(reg
->mr
->lkey
));
338 reg
->reg_wr
.key
= reg
->mr
->lkey
;
339 reg
->sge
.lkey
= reg
->mr
->lkey
;
343 * rdma_rw_ctx_wrs - return chain of WRs for a RDMA READ or WRITE operation
344 * @ctx: context to operate on
345 * @qp: queue pair to operate on
346 * @port_num: port num to which the connection is bound
347 * @cqe: completion queue entry for the last WR
348 * @chain_wr: WR to append to the posted chain
350 * Return the WR chain for the set of RDMA READ/WRITE operations described by
351 * @ctx, as well as any memory registration operations needed. If @chain_wr
352 * is non-NULL the WR it points to will be appended to the chain of WRs posted.
353 * If @chain_wr is not set @cqe must be set so that the caller gets a
354 * completion notification.
356 struct ib_send_wr
*rdma_rw_ctx_wrs(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
357 u8 port_num
, struct ib_cqe
*cqe
, struct ib_send_wr
*chain_wr
)
359 struct ib_send_wr
*first_wr
, *last_wr
;
364 for (i
= 0; i
< ctx
->nr_ops
; i
++) {
365 rdma_rw_update_lkey(&ctx
->reg
[i
],
366 ctx
->reg
[i
].wr
.wr
.opcode
!=
367 IB_WR_RDMA_READ_WITH_INV
);
370 if (ctx
->reg
[0].inv_wr
.next
)
371 first_wr
= &ctx
->reg
[0].inv_wr
;
373 first_wr
= &ctx
->reg
[0].reg_wr
.wr
;
374 last_wr
= &ctx
->reg
[ctx
->nr_ops
- 1].wr
.wr
;
376 case RDMA_RW_MULTI_WR
:
377 first_wr
= &ctx
->map
.wrs
[0].wr
;
378 last_wr
= &ctx
->map
.wrs
[ctx
->nr_ops
- 1].wr
;
380 case RDMA_RW_SINGLE_WR
:
381 first_wr
= &ctx
->single
.wr
.wr
;
382 last_wr
= &ctx
->single
.wr
.wr
;
389 last_wr
->next
= chain_wr
;
391 last_wr
->wr_cqe
= cqe
;
392 last_wr
->send_flags
|= IB_SEND_SIGNALED
;
397 EXPORT_SYMBOL(rdma_rw_ctx_wrs
);
400 * rdma_rw_ctx_post - post a RDMA READ or RDMA WRITE operation
401 * @ctx: context to operate on
402 * @qp: queue pair to operate on
403 * @port_num: port num to which the connection is bound
404 * @cqe: completion queue entry for the last WR
405 * @chain_wr: WR to append to the posted chain
407 * Post the set of RDMA READ/WRITE operations described by @ctx, as well as
408 * any memory registration operations needed. If @chain_wr is non-NULL the
409 * WR it points to will be appended to the chain of WRs posted. If @chain_wr
410 * is not set @cqe must be set so that the caller gets a completion
413 int rdma_rw_ctx_post(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
, u8 port_num
,
414 struct ib_cqe
*cqe
, struct ib_send_wr
*chain_wr
)
416 struct ib_send_wr
*first_wr
, *bad_wr
;
418 first_wr
= rdma_rw_ctx_wrs(ctx
, qp
, port_num
, cqe
, chain_wr
);
419 return ib_post_send(qp
, first_wr
, &bad_wr
);
421 EXPORT_SYMBOL(rdma_rw_ctx_post
);
424 * rdma_rw_ctx_destroy - release all resources allocated by rdma_rw_ctx_init
425 * @ctx: context to release
426 * @qp: queue pair to operate on
427 * @port_num: port num to which the connection is bound
428 * @sg: scatterlist that was used for the READ/WRITE
429 * @sg_cnt: number of entries in @sg
430 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
432 void rdma_rw_ctx_destroy(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
, u8 port_num
,
433 struct scatterlist
*sg
, u32 sg_cnt
, enum dma_data_direction dir
)
439 for (i
= 0; i
< ctx
->nr_ops
; i
++)
440 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->reg
[i
].mr
);
443 case RDMA_RW_MULTI_WR
:
445 kfree(ctx
->map
.sges
);
447 case RDMA_RW_SINGLE_WR
:
454 ib_dma_unmap_sg(qp
->pd
->device
, sg
, sg_cnt
, dir
);
456 EXPORT_SYMBOL(rdma_rw_ctx_destroy
);
458 void rdma_rw_init_qp(struct ib_device
*dev
, struct ib_qp_init_attr
*attr
)
462 WARN_ON_ONCE(attr
->port_num
== 0);
465 * Each context needs at least one RDMA READ or WRITE WR.
467 * For some hardware we might need more, eventually we should ask the
468 * HCA driver for a multiplier here.
473 * If the devices needs MRs to perform RDMA READ or WRITE operations,
474 * we'll need two additional MRs for the registrations and the
477 if (rdma_rw_can_use_mr(dev
, attr
->port_num
))
478 factor
+= 2; /* inv + reg */
480 attr
->cap
.max_send_wr
+= factor
* attr
->cap
.max_rdma_ctxs
;
483 * But maybe we were just too high in the sky and the device doesn't
484 * even support all we need, and we'll have to live with what we get..
486 attr
->cap
.max_send_wr
=
487 min_t(u32
, attr
->cap
.max_send_wr
, dev
->attrs
.max_qp_wr
);
490 int rdma_rw_init_mrs(struct ib_qp
*qp
, struct ib_qp_init_attr
*attr
)
492 struct ib_device
*dev
= qp
->pd
->device
;
495 if (rdma_rw_can_use_mr(dev
, attr
->port_num
)) {
496 ret
= ib_mr_pool_init(qp
, &qp
->rdma_mrs
,
497 attr
->cap
.max_rdma_ctxs
, IB_MR_TYPE_MEM_REG
,
498 rdma_rw_fr_page_list_len(dev
));
506 void rdma_rw_cleanup_mrs(struct ib_qp
*qp
)
508 ib_mr_pool_destroy(qp
, &qp
->rdma_mrs
);