2 * Copyright (c) 2016 HGST, a Western Digital Company.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 #include <linux/moduleparam.h>
14 #include <linux/slab.h>
15 #include <rdma/mr_pool.h>
25 static bool rdma_rw_force_mr
;
26 module_param_named(force_mr
, rdma_rw_force_mr
, bool, 0);
27 MODULE_PARM_DESC(force_mr
, "Force usage of MRs for RDMA READ/WRITE operations");
30 * Check if the device might use memory registration. This is currently only
31 * true for iWarp devices. In the future we can hopefully fine tune this based
32 * on HCA driver input.
34 static inline bool rdma_rw_can_use_mr(struct ib_device
*dev
, u8 port_num
)
36 if (rdma_protocol_iwarp(dev
, port_num
))
38 if (unlikely(rdma_rw_force_mr
))
44 * Check if the device will use memory registration for this RW operation.
45 * We currently always use memory registrations for iWarp RDMA READs, and
46 * have a debug option to force usage of MRs.
48 * XXX: In the future we can hopefully fine tune this based on HCA driver
51 static inline bool rdma_rw_io_needs_mr(struct ib_device
*dev
, u8 port_num
,
52 enum dma_data_direction dir
, int dma_nents
)
54 if (rdma_protocol_iwarp(dev
, port_num
) && dir
== DMA_FROM_DEVICE
)
56 if (unlikely(rdma_rw_force_mr
))
61 static inline u32
rdma_rw_max_sge(struct ib_device
*dev
,
62 enum dma_data_direction dir
)
64 return dir
== DMA_TO_DEVICE
?
65 dev
->attrs
.max_sge
: dev
->attrs
.max_sge_rd
;
68 static inline u32
rdma_rw_fr_page_list_len(struct ib_device
*dev
)
70 /* arbitrary limit to avoid allocating gigantic resources */
71 return min_t(u32
, dev
->attrs
.max_fast_reg_page_list_len
, 256);
74 static int rdma_rw_init_one_mr(struct ib_qp
*qp
, u8 port_num
,
75 struct rdma_rw_reg_ctx
*reg
, struct scatterlist
*sg
,
76 u32 sg_cnt
, u32 offset
)
78 u32 pages_per_mr
= rdma_rw_fr_page_list_len(qp
->pd
->device
);
79 u32 nents
= min(sg_cnt
, pages_per_mr
);
82 reg
->mr
= ib_mr_pool_get(qp
, &qp
->rdma_mrs
);
86 if (reg
->mr
->need_inval
) {
87 reg
->inv_wr
.opcode
= IB_WR_LOCAL_INV
;
88 reg
->inv_wr
.ex
.invalidate_rkey
= reg
->mr
->lkey
;
89 reg
->inv_wr
.next
= ®
->reg_wr
.wr
;
92 reg
->inv_wr
.next
= NULL
;
95 ret
= ib_map_mr_sg(reg
->mr
, sg
, nents
, &offset
, PAGE_SIZE
);
97 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, reg
->mr
);
101 reg
->reg_wr
.wr
.opcode
= IB_WR_REG_MR
;
102 reg
->reg_wr
.mr
= reg
->mr
;
103 reg
->reg_wr
.access
= IB_ACCESS_LOCAL_WRITE
;
104 if (rdma_protocol_iwarp(qp
->device
, port_num
))
105 reg
->reg_wr
.access
|= IB_ACCESS_REMOTE_WRITE
;
108 reg
->sge
.addr
= reg
->mr
->iova
;
109 reg
->sge
.length
= reg
->mr
->length
;
113 static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
114 u8 port_num
, struct scatterlist
*sg
, u32 sg_cnt
, u32 offset
,
115 u64 remote_addr
, u32 rkey
, enum dma_data_direction dir
)
117 u32 pages_per_mr
= rdma_rw_fr_page_list_len(qp
->pd
->device
);
118 int i
, j
, ret
= 0, count
= 0;
120 ctx
->nr_ops
= (sg_cnt
+ pages_per_mr
- 1) / pages_per_mr
;
121 ctx
->reg
= kcalloc(ctx
->nr_ops
, sizeof(*ctx
->reg
), GFP_KERNEL
);
127 for (i
= 0; i
< ctx
->nr_ops
; i
++) {
128 struct rdma_rw_reg_ctx
*prev
= i
? &ctx
->reg
[i
- 1] : NULL
;
129 struct rdma_rw_reg_ctx
*reg
= &ctx
->reg
[i
];
130 u32 nents
= min(sg_cnt
, pages_per_mr
);
132 ret
= rdma_rw_init_one_mr(qp
, port_num
, reg
, sg
, sg_cnt
,
139 if (reg
->mr
->need_inval
)
140 prev
->wr
.wr
.next
= ®
->inv_wr
;
142 prev
->wr
.wr
.next
= ®
->reg_wr
.wr
;
145 reg
->reg_wr
.wr
.next
= ®
->wr
.wr
;
147 reg
->wr
.wr
.sg_list
= ®
->sge
;
148 reg
->wr
.wr
.num_sge
= 1;
149 reg
->wr
.remote_addr
= remote_addr
;
151 if (dir
== DMA_TO_DEVICE
) {
152 reg
->wr
.wr
.opcode
= IB_WR_RDMA_WRITE
;
153 } else if (!rdma_cap_read_inv(qp
->device
, port_num
)) {
154 reg
->wr
.wr
.opcode
= IB_WR_RDMA_READ
;
156 reg
->wr
.wr
.opcode
= IB_WR_RDMA_READ_WITH_INV
;
157 reg
->wr
.wr
.ex
.invalidate_rkey
= reg
->mr
->lkey
;
161 remote_addr
+= reg
->sge
.length
;
163 for (j
= 0; j
< nents
; j
++)
168 ctx
->type
= RDMA_RW_MR
;
173 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->reg
[i
].mr
);
179 static int rdma_rw_init_map_wrs(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
180 struct scatterlist
*sg
, u32 sg_cnt
, u32 offset
,
181 u64 remote_addr
, u32 rkey
, enum dma_data_direction dir
)
183 struct ib_device
*dev
= qp
->pd
->device
;
184 u32 max_sge
= rdma_rw_max_sge(dev
, dir
);
186 u32 total_len
= 0, i
, j
;
188 ctx
->nr_ops
= DIV_ROUND_UP(sg_cnt
, max_sge
);
190 ctx
->map
.sges
= sge
= kcalloc(sg_cnt
, sizeof(*sge
), GFP_KERNEL
);
194 ctx
->map
.wrs
= kcalloc(ctx
->nr_ops
, sizeof(*ctx
->map
.wrs
), GFP_KERNEL
);
198 for (i
= 0; i
< ctx
->nr_ops
; i
++) {
199 struct ib_rdma_wr
*rdma_wr
= &ctx
->map
.wrs
[i
];
200 u32 nr_sge
= min(sg_cnt
, max_sge
);
202 if (dir
== DMA_TO_DEVICE
)
203 rdma_wr
->wr
.opcode
= IB_WR_RDMA_WRITE
;
205 rdma_wr
->wr
.opcode
= IB_WR_RDMA_READ
;
206 rdma_wr
->remote_addr
= remote_addr
+ total_len
;
207 rdma_wr
->rkey
= rkey
;
208 rdma_wr
->wr
.sg_list
= sge
;
210 for (j
= 0; j
< nr_sge
; j
++, sg
= sg_next(sg
)) {
211 rdma_wr
->wr
.num_sge
++;
213 sge
->addr
= ib_sg_dma_address(dev
, sg
) + offset
;
214 sge
->length
= ib_sg_dma_len(dev
, sg
) - offset
;
215 sge
->lkey
= qp
->pd
->local_dma_lkey
;
217 total_len
+= sge
->length
;
223 if (i
+ 1 < ctx
->nr_ops
)
224 rdma_wr
->wr
.next
= &ctx
->map
.wrs
[i
+ 1].wr
;
227 ctx
->type
= RDMA_RW_MULTI_WR
;
231 kfree(ctx
->map
.sges
);
236 static int rdma_rw_init_single_wr(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
237 struct scatterlist
*sg
, u32 offset
, u64 remote_addr
, u32 rkey
,
238 enum dma_data_direction dir
)
240 struct ib_device
*dev
= qp
->pd
->device
;
241 struct ib_rdma_wr
*rdma_wr
= &ctx
->single
.wr
;
245 ctx
->single
.sge
.lkey
= qp
->pd
->local_dma_lkey
;
246 ctx
->single
.sge
.addr
= ib_sg_dma_address(dev
, sg
) + offset
;
247 ctx
->single
.sge
.length
= ib_sg_dma_len(dev
, sg
) - offset
;
249 memset(rdma_wr
, 0, sizeof(*rdma_wr
));
250 if (dir
== DMA_TO_DEVICE
)
251 rdma_wr
->wr
.opcode
= IB_WR_RDMA_WRITE
;
253 rdma_wr
->wr
.opcode
= IB_WR_RDMA_READ
;
254 rdma_wr
->wr
.sg_list
= &ctx
->single
.sge
;
255 rdma_wr
->wr
.num_sge
= 1;
256 rdma_wr
->remote_addr
= remote_addr
;
257 rdma_wr
->rkey
= rkey
;
259 ctx
->type
= RDMA_RW_SINGLE_WR
;
264 * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
265 * @ctx: context to initialize
266 * @qp: queue pair to operate on
267 * @port_num: port num to which the connection is bound
268 * @sg: scatterlist to READ/WRITE from/to
269 * @sg_cnt: number of entries in @sg
270 * @sg_offset: current byte offset into @sg
271 * @remote_addr:remote address to read/write (relative to @rkey)
272 * @rkey: remote key to operate on
273 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
275 * Returns the number of WQEs that will be needed on the workqueue if
276 * successful, or a negative error code.
278 int rdma_rw_ctx_init(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
, u8 port_num
,
279 struct scatterlist
*sg
, u32 sg_cnt
, u32 sg_offset
,
280 u64 remote_addr
, u32 rkey
, enum dma_data_direction dir
)
282 struct ib_device
*dev
= qp
->pd
->device
;
285 ret
= ib_dma_map_sg(dev
, sg
, sg_cnt
, dir
);
291 * Skip to the S/G entry that sg_offset falls into:
294 u32 len
= ib_sg_dma_len(dev
, sg
);
305 if (WARN_ON_ONCE(sg_cnt
== 0))
308 if (rdma_rw_io_needs_mr(qp
->device
, port_num
, dir
, sg_cnt
)) {
309 ret
= rdma_rw_init_mr_wrs(ctx
, qp
, port_num
, sg
, sg_cnt
,
310 sg_offset
, remote_addr
, rkey
, dir
);
311 } else if (sg_cnt
> 1) {
312 ret
= rdma_rw_init_map_wrs(ctx
, qp
, sg
, sg_cnt
, sg_offset
,
313 remote_addr
, rkey
, dir
);
315 ret
= rdma_rw_init_single_wr(ctx
, qp
, sg
, sg_offset
,
316 remote_addr
, rkey
, dir
);
324 ib_dma_unmap_sg(dev
, sg
, sg_cnt
, dir
);
327 EXPORT_SYMBOL(rdma_rw_ctx_init
);
330 * rdma_rw_ctx_signature init - initialize a RW context with signature offload
331 * @ctx: context to initialize
332 * @qp: queue pair to operate on
333 * @port_num: port num to which the connection is bound
334 * @sg: scatterlist to READ/WRITE from/to
335 * @sg_cnt: number of entries in @sg
336 * @prot_sg: scatterlist to READ/WRITE protection information from/to
337 * @prot_sg_cnt: number of entries in @prot_sg
338 * @sig_attrs: signature offloading algorithms
339 * @remote_addr:remote address to read/write (relative to @rkey)
340 * @rkey: remote key to operate on
341 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
343 * Returns the number of WQEs that will be needed on the workqueue if
344 * successful, or a negative error code.
346 int rdma_rw_ctx_signature_init(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
347 u8 port_num
, struct scatterlist
*sg
, u32 sg_cnt
,
348 struct scatterlist
*prot_sg
, u32 prot_sg_cnt
,
349 struct ib_sig_attrs
*sig_attrs
,
350 u64 remote_addr
, u32 rkey
, enum dma_data_direction dir
)
352 struct ib_device
*dev
= qp
->pd
->device
;
353 u32 pages_per_mr
= rdma_rw_fr_page_list_len(qp
->pd
->device
);
354 struct ib_rdma_wr
*rdma_wr
;
355 struct ib_send_wr
*prev_wr
= NULL
;
358 if (sg_cnt
> pages_per_mr
|| prot_sg_cnt
> pages_per_mr
) {
359 pr_err("SG count too large\n");
363 ret
= ib_dma_map_sg(dev
, sg
, sg_cnt
, dir
);
368 ret
= ib_dma_map_sg(dev
, prot_sg
, prot_sg_cnt
, dir
);
375 ctx
->type
= RDMA_RW_SIG_MR
;
377 ctx
->sig
= kcalloc(1, sizeof(*ctx
->sig
), GFP_KERNEL
);
380 goto out_unmap_prot_sg
;
383 ret
= rdma_rw_init_one_mr(qp
, port_num
, &ctx
->sig
->data
, sg
, sg_cnt
, 0);
387 prev_wr
= &ctx
->sig
->data
.reg_wr
.wr
;
390 ret
= rdma_rw_init_one_mr(qp
, port_num
, &ctx
->sig
->prot
,
391 prot_sg
, prot_sg_cnt
, 0);
393 goto out_destroy_data_mr
;
396 if (ctx
->sig
->prot
.inv_wr
.next
)
397 prev_wr
->next
= &ctx
->sig
->prot
.inv_wr
;
399 prev_wr
->next
= &ctx
->sig
->prot
.reg_wr
.wr
;
400 prev_wr
= &ctx
->sig
->prot
.reg_wr
.wr
;
402 ctx
->sig
->prot
.mr
= NULL
;
405 ctx
->sig
->sig_mr
= ib_mr_pool_get(qp
, &qp
->sig_mrs
);
406 if (!ctx
->sig
->sig_mr
) {
408 goto out_destroy_prot_mr
;
411 if (ctx
->sig
->sig_mr
->need_inval
) {
412 memset(&ctx
->sig
->sig_inv_wr
, 0, sizeof(ctx
->sig
->sig_inv_wr
));
414 ctx
->sig
->sig_inv_wr
.opcode
= IB_WR_LOCAL_INV
;
415 ctx
->sig
->sig_inv_wr
.ex
.invalidate_rkey
= ctx
->sig
->sig_mr
->rkey
;
417 prev_wr
->next
= &ctx
->sig
->sig_inv_wr
;
418 prev_wr
= &ctx
->sig
->sig_inv_wr
;
421 ctx
->sig
->sig_wr
.wr
.opcode
= IB_WR_REG_SIG_MR
;
422 ctx
->sig
->sig_wr
.wr
.wr_cqe
= NULL
;
423 ctx
->sig
->sig_wr
.wr
.sg_list
= &ctx
->sig
->data
.sge
;
424 ctx
->sig
->sig_wr
.wr
.num_sge
= 1;
425 ctx
->sig
->sig_wr
.access_flags
= IB_ACCESS_LOCAL_WRITE
;
426 ctx
->sig
->sig_wr
.sig_attrs
= sig_attrs
;
427 ctx
->sig
->sig_wr
.sig_mr
= ctx
->sig
->sig_mr
;
429 ctx
->sig
->sig_wr
.prot
= &ctx
->sig
->prot
.sge
;
430 prev_wr
->next
= &ctx
->sig
->sig_wr
.wr
;
431 prev_wr
= &ctx
->sig
->sig_wr
.wr
;
434 ctx
->sig
->sig_sge
.addr
= 0;
435 ctx
->sig
->sig_sge
.length
= ctx
->sig
->data
.sge
.length
;
436 if (sig_attrs
->wire
.sig_type
!= IB_SIG_TYPE_NONE
)
437 ctx
->sig
->sig_sge
.length
+= ctx
->sig
->prot
.sge
.length
;
439 rdma_wr
= &ctx
->sig
->data
.wr
;
440 rdma_wr
->wr
.sg_list
= &ctx
->sig
->sig_sge
;
441 rdma_wr
->wr
.num_sge
= 1;
442 rdma_wr
->remote_addr
= remote_addr
;
443 rdma_wr
->rkey
= rkey
;
444 if (dir
== DMA_TO_DEVICE
)
445 rdma_wr
->wr
.opcode
= IB_WR_RDMA_WRITE
;
447 rdma_wr
->wr
.opcode
= IB_WR_RDMA_READ
;
448 prev_wr
->next
= &rdma_wr
->wr
;
449 prev_wr
= &rdma_wr
->wr
;
456 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->sig
->prot
.mr
);
458 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->sig
->data
.mr
);
462 ib_dma_unmap_sg(dev
, prot_sg
, prot_sg_cnt
, dir
);
464 ib_dma_unmap_sg(dev
, sg
, sg_cnt
, dir
);
467 EXPORT_SYMBOL(rdma_rw_ctx_signature_init
);
470 * Now that we are going to post the WRs we can update the lkey and need_inval
471 * state on the MRs. If we were doing this at init time, we would get double
472 * or missing invalidations if a context was initialized but not actually
475 static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx
*reg
, bool need_inval
)
477 reg
->mr
->need_inval
= need_inval
;
478 ib_update_fast_reg_key(reg
->mr
, ib_inc_rkey(reg
->mr
->lkey
));
479 reg
->reg_wr
.key
= reg
->mr
->lkey
;
480 reg
->sge
.lkey
= reg
->mr
->lkey
;
484 * rdma_rw_ctx_wrs - return chain of WRs for a RDMA READ or WRITE operation
485 * @ctx: context to operate on
486 * @qp: queue pair to operate on
487 * @port_num: port num to which the connection is bound
488 * @cqe: completion queue entry for the last WR
489 * @chain_wr: WR to append to the posted chain
491 * Return the WR chain for the set of RDMA READ/WRITE operations described by
492 * @ctx, as well as any memory registration operations needed. If @chain_wr
493 * is non-NULL the WR it points to will be appended to the chain of WRs posted.
494 * If @chain_wr is not set @cqe must be set so that the caller gets a
495 * completion notification.
497 struct ib_send_wr
*rdma_rw_ctx_wrs(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
498 u8 port_num
, struct ib_cqe
*cqe
, struct ib_send_wr
*chain_wr
)
500 struct ib_send_wr
*first_wr
, *last_wr
;
505 rdma_rw_update_lkey(&ctx
->sig
->data
, true);
506 if (ctx
->sig
->prot
.mr
)
507 rdma_rw_update_lkey(&ctx
->sig
->prot
, true);
509 ctx
->sig
->sig_mr
->need_inval
= true;
510 ib_update_fast_reg_key(ctx
->sig
->sig_mr
,
511 ib_inc_rkey(ctx
->sig
->sig_mr
->lkey
));
512 ctx
->sig
->sig_sge
.lkey
= ctx
->sig
->sig_mr
->lkey
;
514 if (ctx
->sig
->data
.inv_wr
.next
)
515 first_wr
= &ctx
->sig
->data
.inv_wr
;
517 first_wr
= &ctx
->sig
->data
.reg_wr
.wr
;
518 last_wr
= &ctx
->sig
->data
.wr
.wr
;
521 for (i
= 0; i
< ctx
->nr_ops
; i
++) {
522 rdma_rw_update_lkey(&ctx
->reg
[i
],
523 ctx
->reg
[i
].wr
.wr
.opcode
!=
524 IB_WR_RDMA_READ_WITH_INV
);
527 if (ctx
->reg
[0].inv_wr
.next
)
528 first_wr
= &ctx
->reg
[0].inv_wr
;
530 first_wr
= &ctx
->reg
[0].reg_wr
.wr
;
531 last_wr
= &ctx
->reg
[ctx
->nr_ops
- 1].wr
.wr
;
533 case RDMA_RW_MULTI_WR
:
534 first_wr
= &ctx
->map
.wrs
[0].wr
;
535 last_wr
= &ctx
->map
.wrs
[ctx
->nr_ops
- 1].wr
;
537 case RDMA_RW_SINGLE_WR
:
538 first_wr
= &ctx
->single
.wr
.wr
;
539 last_wr
= &ctx
->single
.wr
.wr
;
546 last_wr
->next
= chain_wr
;
548 last_wr
->wr_cqe
= cqe
;
549 last_wr
->send_flags
|= IB_SEND_SIGNALED
;
554 EXPORT_SYMBOL(rdma_rw_ctx_wrs
);
557 * rdma_rw_ctx_post - post a RDMA READ or RDMA WRITE operation
558 * @ctx: context to operate on
559 * @qp: queue pair to operate on
560 * @port_num: port num to which the connection is bound
561 * @cqe: completion queue entry for the last WR
562 * @chain_wr: WR to append to the posted chain
564 * Post the set of RDMA READ/WRITE operations described by @ctx, as well as
565 * any memory registration operations needed. If @chain_wr is non-NULL the
566 * WR it points to will be appended to the chain of WRs posted. If @chain_wr
567 * is not set @cqe must be set so that the caller gets a completion
570 int rdma_rw_ctx_post(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
, u8 port_num
,
571 struct ib_cqe
*cqe
, struct ib_send_wr
*chain_wr
)
573 struct ib_send_wr
*first_wr
, *bad_wr
;
575 first_wr
= rdma_rw_ctx_wrs(ctx
, qp
, port_num
, cqe
, chain_wr
);
576 return ib_post_send(qp
, first_wr
, &bad_wr
);
578 EXPORT_SYMBOL(rdma_rw_ctx_post
);
581 * rdma_rw_ctx_destroy - release all resources allocated by rdma_rw_ctx_init
582 * @ctx: context to release
583 * @qp: queue pair to operate on
584 * @port_num: port num to which the connection is bound
585 * @sg: scatterlist that was used for the READ/WRITE
586 * @sg_cnt: number of entries in @sg
587 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
589 void rdma_rw_ctx_destroy(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
, u8 port_num
,
590 struct scatterlist
*sg
, u32 sg_cnt
, enum dma_data_direction dir
)
596 for (i
= 0; i
< ctx
->nr_ops
; i
++)
597 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->reg
[i
].mr
);
600 case RDMA_RW_MULTI_WR
:
602 kfree(ctx
->map
.sges
);
604 case RDMA_RW_SINGLE_WR
:
611 ib_dma_unmap_sg(qp
->pd
->device
, sg
, sg_cnt
, dir
);
613 EXPORT_SYMBOL(rdma_rw_ctx_destroy
);
616 * rdma_rw_ctx_destroy_signature - release all resources allocated by
617 * rdma_rw_ctx_init_signature
618 * @ctx: context to release
619 * @qp: queue pair to operate on
620 * @port_num: port num to which the connection is bound
621 * @sg: scatterlist that was used for the READ/WRITE
622 * @sg_cnt: number of entries in @sg
623 * @prot_sg: scatterlist that was used for the READ/WRITE of the PI
624 * @prot_sg_cnt: number of entries in @prot_sg
625 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
627 void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
628 u8 port_num
, struct scatterlist
*sg
, u32 sg_cnt
,
629 struct scatterlist
*prot_sg
, u32 prot_sg_cnt
,
630 enum dma_data_direction dir
)
632 if (WARN_ON_ONCE(ctx
->type
!= RDMA_RW_SIG_MR
))
635 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->sig
->data
.mr
);
636 ib_dma_unmap_sg(qp
->pd
->device
, sg
, sg_cnt
, dir
);
638 if (ctx
->sig
->prot
.mr
) {
639 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->sig
->prot
.mr
);
640 ib_dma_unmap_sg(qp
->pd
->device
, prot_sg
, prot_sg_cnt
, dir
);
643 ib_mr_pool_put(qp
, &qp
->sig_mrs
, ctx
->sig
->sig_mr
);
646 EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature
);
648 void rdma_rw_init_qp(struct ib_device
*dev
, struct ib_qp_init_attr
*attr
)
652 WARN_ON_ONCE(attr
->port_num
== 0);
655 * Each context needs at least one RDMA READ or WRITE WR.
657 * For some hardware we might need more, eventually we should ask the
658 * HCA driver for a multiplier here.
663 * If the devices needs MRs to perform RDMA READ or WRITE operations,
664 * we'll need two additional MRs for the registrations and the
667 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
)
668 factor
+= 6; /* (inv + reg) * (data + prot + sig) */
669 else if (rdma_rw_can_use_mr(dev
, attr
->port_num
))
670 factor
+= 2; /* inv + reg */
672 attr
->cap
.max_send_wr
+= factor
* attr
->cap
.max_rdma_ctxs
;
675 * But maybe we were just too high in the sky and the device doesn't
676 * even support all we need, and we'll have to live with what we get..
678 attr
->cap
.max_send_wr
=
679 min_t(u32
, attr
->cap
.max_send_wr
, dev
->attrs
.max_qp_wr
);
682 int rdma_rw_init_mrs(struct ib_qp
*qp
, struct ib_qp_init_attr
*attr
)
684 struct ib_device
*dev
= qp
->pd
->device
;
685 u32 nr_mrs
= 0, nr_sig_mrs
= 0;
688 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
) {
689 nr_sig_mrs
= attr
->cap
.max_rdma_ctxs
;
690 nr_mrs
= attr
->cap
.max_rdma_ctxs
* 2;
691 } else if (rdma_rw_can_use_mr(dev
, attr
->port_num
)) {
692 nr_mrs
= attr
->cap
.max_rdma_ctxs
;
696 ret
= ib_mr_pool_init(qp
, &qp
->rdma_mrs
, nr_mrs
,
698 rdma_rw_fr_page_list_len(dev
));
700 pr_err("%s: failed to allocated %d MRs\n",
707 ret
= ib_mr_pool_init(qp
, &qp
->sig_mrs
, nr_sig_mrs
,
708 IB_MR_TYPE_SIGNATURE
, 2);
710 pr_err("%s: failed to allocated %d SIG MRs\n",
712 goto out_free_rdma_mrs
;
719 ib_mr_pool_destroy(qp
, &qp
->rdma_mrs
);
723 void rdma_rw_cleanup_mrs(struct ib_qp
*qp
)
725 ib_mr_pool_destroy(qp
, &qp
->sig_mrs
);
726 ib_mr_pool_destroy(qp
, &qp
->rdma_mrs
);