2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/slab.h>
37 #include <linux/highmem.h>
38 #include <linux/scatterlist.h>
40 #include "iscsi_iser.h"
42 int iser_fast_reg_fmr(struct iscsi_iser_task
*iser_task
,
43 struct iser_data_buf
*mem
,
44 struct iser_reg_resources
*rsc
,
45 struct iser_mem_reg
*mem_reg
);
47 int iser_fast_reg_mr(struct iscsi_iser_task
*iser_task
,
48 struct iser_data_buf
*mem
,
49 struct iser_reg_resources
*rsc
,
50 struct iser_mem_reg
*mem_reg
);
52 static struct iser_reg_ops fastreg_ops
= {
53 .alloc_reg_res
= iser_alloc_fastreg_pool
,
54 .free_reg_res
= iser_free_fastreg_pool
,
55 .reg_mem
= iser_fast_reg_mr
,
56 .unreg_mem
= iser_unreg_mem_fastreg
,
57 .reg_desc_get
= iser_reg_desc_get_fr
,
58 .reg_desc_put
= iser_reg_desc_put_fr
,
61 static struct iser_reg_ops fmr_ops
= {
62 .alloc_reg_res
= iser_alloc_fmr_pool
,
63 .free_reg_res
= iser_free_fmr_pool
,
64 .reg_mem
= iser_fast_reg_fmr
,
65 .unreg_mem
= iser_unreg_mem_fmr
,
66 .reg_desc_get
= iser_reg_desc_get_fmr
,
67 .reg_desc_put
= iser_reg_desc_put_fmr
,
70 int iser_assign_reg_ops(struct iser_device
*device
)
72 struct ib_device_attr
*dev_attr
= &device
->dev_attr
;
74 /* Assign function handles - based on FMR support */
75 if (device
->ib_device
->alloc_fmr
&& device
->ib_device
->dealloc_fmr
&&
76 device
->ib_device
->map_phys_fmr
&& device
->ib_device
->unmap_fmr
) {
77 iser_info("FMR supported, using FMR for registration\n");
78 device
->reg_ops
= &fmr_ops
;
80 if (dev_attr
->device_cap_flags
& IB_DEVICE_MEM_MGT_EXTENSIONS
) {
81 iser_info("FastReg supported, using FastReg for registration\n");
82 device
->reg_ops
= &fastreg_ops
;
84 iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
92 iser_free_bounce_sg(struct iser_data_buf
*data
)
94 struct scatterlist
*sg
;
97 for_each_sg(data
->sg
, sg
, data
->size
, count
)
98 __free_page(sg_page(sg
));
102 data
->sg
= data
->orig_sg
;
103 data
->size
= data
->orig_size
;
104 data
->orig_sg
= NULL
;
109 iser_alloc_bounce_sg(struct iser_data_buf
*data
)
111 struct scatterlist
*sg
;
113 unsigned long length
= data
->data_len
;
114 int i
= 0, nents
= DIV_ROUND_UP(length
, PAGE_SIZE
);
116 sg
= kcalloc(nents
, sizeof(*sg
), GFP_ATOMIC
);
120 sg_init_table(sg
, nents
);
122 u32 page_len
= min_t(u32
, length
, PAGE_SIZE
);
124 page
= alloc_page(GFP_ATOMIC
);
128 sg_set_page(&sg
[i
], page
, page_len
, 0);
133 data
->orig_sg
= data
->sg
;
134 data
->orig_size
= data
->size
;
142 __free_page(sg_page(&sg
[i
- 1]));
149 iser_copy_bounce(struct iser_data_buf
*data
, bool to_buffer
)
151 struct scatterlist
*osg
, *bsg
= data
->sg
;
153 unsigned int left
= data
->data_len
;
154 unsigned int bsg_off
= 0;
157 for_each_sg(data
->orig_sg
, osg
, data
->orig_size
, i
) {
158 unsigned int copy_len
, osg_off
= 0;
160 oaddr
= kmap_atomic(sg_page(osg
)) + osg
->offset
;
161 copy_len
= min(left
, osg
->length
);
163 unsigned int len
= min(copy_len
, bsg
->length
- bsg_off
);
165 baddr
= kmap_atomic(sg_page(bsg
)) + bsg
->offset
;
167 memcpy(baddr
+ bsg_off
, oaddr
+ osg_off
, len
);
169 memcpy(oaddr
+ osg_off
, baddr
+ bsg_off
, len
);
171 kunmap_atomic(baddr
- bsg
->offset
);
176 if (bsg_off
>= bsg
->length
) {
181 kunmap_atomic(oaddr
- osg
->offset
);
187 iser_copy_from_bounce(struct iser_data_buf
*data
)
189 iser_copy_bounce(data
, false);
193 iser_copy_to_bounce(struct iser_data_buf
*data
)
195 iser_copy_bounce(data
, true);
198 struct iser_fr_desc
*
199 iser_reg_desc_get_fr(struct ib_conn
*ib_conn
)
201 struct iser_fr_pool
*fr_pool
= &ib_conn
->fr_pool
;
202 struct iser_fr_desc
*desc
;
205 spin_lock_irqsave(&fr_pool
->lock
, flags
);
206 desc
= list_first_entry(&fr_pool
->list
,
207 struct iser_fr_desc
, list
);
208 list_del(&desc
->list
);
209 spin_unlock_irqrestore(&fr_pool
->lock
, flags
);
215 iser_reg_desc_put_fr(struct ib_conn
*ib_conn
,
216 struct iser_fr_desc
*desc
)
218 struct iser_fr_pool
*fr_pool
= &ib_conn
->fr_pool
;
221 spin_lock_irqsave(&fr_pool
->lock
, flags
);
222 list_add(&desc
->list
, &fr_pool
->list
);
223 spin_unlock_irqrestore(&fr_pool
->lock
, flags
);
226 struct iser_fr_desc
*
227 iser_reg_desc_get_fmr(struct ib_conn
*ib_conn
)
229 struct iser_fr_pool
*fr_pool
= &ib_conn
->fr_pool
;
231 return list_first_entry(&fr_pool
->list
,
232 struct iser_fr_desc
, list
);
236 iser_reg_desc_put_fmr(struct ib_conn
*ib_conn
,
237 struct iser_fr_desc
*desc
)
242 * iser_start_rdma_unaligned_sg
244 static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task
*iser_task
,
245 struct iser_data_buf
*data
,
246 enum iser_data_dir cmd_dir
)
248 struct ib_device
*dev
= iser_task
->iser_conn
->ib_conn
.device
->ib_device
;
251 rc
= iser_alloc_bounce_sg(data
);
253 iser_err("Failed to allocate bounce for data len %lu\n",
258 if (cmd_dir
== ISER_DIR_OUT
)
259 iser_copy_to_bounce(data
);
261 data
->dma_nents
= ib_dma_map_sg(dev
, data
->sg
, data
->size
,
262 (cmd_dir
== ISER_DIR_OUT
) ?
263 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
264 if (!data
->dma_nents
) {
265 iser_err("Got dma_nents %d, something went wrong...\n",
273 iser_free_bounce_sg(data
);
278 * iser_finalize_rdma_unaligned_sg
281 void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task
*iser_task
,
282 struct iser_data_buf
*data
,
283 enum iser_data_dir cmd_dir
)
285 struct ib_device
*dev
= iser_task
->iser_conn
->ib_conn
.device
->ib_device
;
287 ib_dma_unmap_sg(dev
, data
->sg
, data
->size
,
288 (cmd_dir
== ISER_DIR_OUT
) ?
289 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
291 if (cmd_dir
== ISER_DIR_IN
)
292 iser_copy_from_bounce(data
);
294 iser_free_bounce_sg(data
);
297 #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
300 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
301 * and returns the length of resulting physical address array (may be less than
302 * the original due to possible compaction).
304 * we build a "page vec" under the assumption that the SG meets the RDMA
305 * alignment requirements. Other then the first and last SG elements, all
306 * the "internal" elements can be compacted into a list whose elements are
307 * dma addresses of physical pages. The code supports also the weird case
308 * where --few fragments of the same page-- are present in the SG as
309 * consecutive elements. Also, it handles one entry SG.
312 static int iser_sg_to_page_vec(struct iser_data_buf
*data
,
313 struct ib_device
*ibdev
, u64
*pages
,
314 int *offset
, int *data_size
)
316 struct scatterlist
*sg
, *sgl
= data
->sg
;
317 u64 start_addr
, end_addr
, page
, chunk_start
= 0;
318 unsigned long total_sz
= 0;
319 unsigned int dma_len
;
320 int i
, new_chunk
, cur_page
, last_ent
= data
->dma_nents
- 1;
322 /* compute the offset of first element */
323 *offset
= (u64
) sgl
[0].offset
& ~MASK_4K
;
327 for_each_sg(sgl
, sg
, data
->dma_nents
, i
) {
328 start_addr
= ib_sg_dma_address(ibdev
, sg
);
330 chunk_start
= start_addr
;
331 dma_len
= ib_sg_dma_len(ibdev
, sg
);
332 end_addr
= start_addr
+ dma_len
;
335 /* collect page fragments until aligned or end of SG list */
336 if (!IS_4K_ALIGNED(end_addr
) && i
< last_ent
) {
342 /* address of the first page in the contiguous chunk;
343 masking relevant for the very first SG entry,
344 which might be unaligned */
345 page
= chunk_start
& MASK_4K
;
347 pages
[cur_page
++] = page
;
349 } while (page
< end_addr
);
352 *data_size
= total_sz
;
353 iser_dbg("page_vec->data_size:%d cur_page %d\n",
354 *data_size
, cur_page
);
360 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
361 * for RDMA sub-list of a scatter-gather list of memory buffers, and returns
362 * the number of entries which are aligned correctly. Supports the case where
363 * consecutive SG elements are actually fragments of the same physcial page.
365 static int iser_data_buf_aligned_len(struct iser_data_buf
*data
,
366 struct ib_device
*ibdev
,
367 unsigned sg_tablesize
)
369 struct scatterlist
*sg
, *sgl
, *next_sg
= NULL
;
370 u64 start_addr
, end_addr
;
371 int i
, ret_len
, start_check
= 0;
373 if (data
->dma_nents
== 1)
377 start_addr
= ib_sg_dma_address(ibdev
, sgl
);
379 if (unlikely(sgl
[0].offset
&&
380 data
->data_len
>= sg_tablesize
* PAGE_SIZE
)) {
381 iser_dbg("can't register length %lx with offset %x "
382 "fall to bounce buffer\n", data
->data_len
,
387 for_each_sg(sgl
, sg
, data
->dma_nents
, i
) {
388 if (start_check
&& !IS_4K_ALIGNED(start_addr
))
391 next_sg
= sg_next(sg
);
395 end_addr
= start_addr
+ ib_sg_dma_len(ibdev
, sg
);
396 start_addr
= ib_sg_dma_address(ibdev
, next_sg
);
398 if (end_addr
== start_addr
) {
404 if (!IS_4K_ALIGNED(end_addr
))
407 ret_len
= (next_sg
) ? i
: i
+1;
409 if (unlikely(ret_len
!= data
->dma_nents
))
410 iser_warn("rdma alignment violation (%d/%d aligned)\n",
411 ret_len
, data
->dma_nents
);
416 static void iser_data_buf_dump(struct iser_data_buf
*data
,
417 struct ib_device
*ibdev
)
419 struct scatterlist
*sg
;
422 for_each_sg(data
->sg
, sg
, data
->dma_nents
, i
)
423 iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
424 "off:0x%x sz:0x%x dma_len:0x%x\n",
425 i
, (unsigned long)ib_sg_dma_address(ibdev
, sg
),
426 sg_page(sg
), sg
->offset
,
427 sg
->length
, ib_sg_dma_len(ibdev
, sg
));
430 static void iser_dump_page_vec(struct iser_page_vec
*page_vec
)
434 iser_err("page vec length %d data size %d\n",
435 page_vec
->length
, page_vec
->data_size
);
436 for (i
= 0; i
< page_vec
->length
; i
++)
437 iser_err("%d %lx\n",i
,(unsigned long)page_vec
->pages
[i
]);
440 int iser_dma_map_task_data(struct iscsi_iser_task
*iser_task
,
441 struct iser_data_buf
*data
,
442 enum iser_data_dir iser_dir
,
443 enum dma_data_direction dma_dir
)
445 struct ib_device
*dev
;
447 iser_task
->dir
[iser_dir
] = 1;
448 dev
= iser_task
->iser_conn
->ib_conn
.device
->ib_device
;
450 data
->dma_nents
= ib_dma_map_sg(dev
, data
->sg
, data
->size
, dma_dir
);
451 if (data
->dma_nents
== 0) {
452 iser_err("dma_map_sg failed!!!\n");
458 void iser_dma_unmap_task_data(struct iscsi_iser_task
*iser_task
,
459 struct iser_data_buf
*data
,
460 enum dma_data_direction dir
)
462 struct ib_device
*dev
;
464 dev
= iser_task
->iser_conn
->ib_conn
.device
->ib_device
;
465 ib_dma_unmap_sg(dev
, data
->sg
, data
->size
, dir
);
469 iser_reg_dma(struct iser_device
*device
, struct iser_data_buf
*mem
,
470 struct iser_mem_reg
*reg
)
472 struct scatterlist
*sg
= mem
->sg
;
474 reg
->sge
.lkey
= device
->pd
->local_dma_lkey
;
475 reg
->rkey
= device
->mr
->rkey
;
476 reg
->sge
.addr
= ib_sg_dma_address(device
->ib_device
, &sg
[0]);
477 reg
->sge
.length
= ib_sg_dma_len(device
->ib_device
, &sg
[0]);
479 iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
480 " length=0x%x\n", reg
->sge
.lkey
, reg
->rkey
,
481 reg
->sge
.addr
, reg
->sge
.length
);
486 static int fall_to_bounce_buf(struct iscsi_iser_task
*iser_task
,
487 struct iser_data_buf
*mem
,
488 enum iser_data_dir cmd_dir
)
490 struct iscsi_conn
*iscsi_conn
= iser_task
->iser_conn
->iscsi_conn
;
491 struct iser_device
*device
= iser_task
->iser_conn
->ib_conn
.device
;
493 iscsi_conn
->fmr_unalign_cnt
++;
495 if (iser_debug_level
> 0)
496 iser_data_buf_dump(mem
, device
->ib_device
);
498 /* unmap the command data before accessing it */
499 iser_dma_unmap_task_data(iser_task
, mem
,
500 (cmd_dir
== ISER_DIR_OUT
) ?
501 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
503 /* allocate copy buf, if we are writing, copy the */
504 /* unaligned scatterlist, dma map the copy */
505 if (iser_start_rdma_unaligned_sg(iser_task
, mem
, cmd_dir
) != 0)
512 * iser_reg_page_vec - Register physical memory
514 * returns: 0 on success, errno code on failure
517 int iser_fast_reg_fmr(struct iscsi_iser_task
*iser_task
,
518 struct iser_data_buf
*mem
,
519 struct iser_reg_resources
*rsc
,
520 struct iser_mem_reg
*reg
)
522 struct ib_conn
*ib_conn
= &iser_task
->iser_conn
->ib_conn
;
523 struct iser_device
*device
= ib_conn
->device
;
524 struct iser_page_vec
*page_vec
= rsc
->page_vec
;
525 struct ib_fmr_pool
*fmr_pool
= rsc
->fmr_pool
;
526 struct ib_pool_fmr
*fmr
;
529 plen
= iser_sg_to_page_vec(mem
, device
->ib_device
,
532 &page_vec
->data_size
);
533 page_vec
->length
= plen
;
534 if (plen
* SIZE_4K
< page_vec
->data_size
) {
535 iser_err("page vec too short to hold this SG\n");
536 iser_data_buf_dump(mem
, device
->ib_device
);
537 iser_dump_page_vec(page_vec
);
541 fmr
= ib_fmr_pool_map_phys(fmr_pool
,
547 iser_err("ib_fmr_pool_map_phys failed: %d\n", ret
);
551 reg
->sge
.lkey
= fmr
->fmr
->lkey
;
552 reg
->rkey
= fmr
->fmr
->rkey
;
553 reg
->sge
.addr
= page_vec
->pages
[0] + page_vec
->offset
;
554 reg
->sge
.length
= page_vec
->data_size
;
557 iser_dbg("fmr reg: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
558 " length=0x%x\n", reg
->sge
.lkey
, reg
->rkey
,
559 reg
->sge
.addr
, reg
->sge
.length
);
565 * Unregister (previosuly registered using FMR) memory.
566 * If memory is non-FMR does nothing.
568 void iser_unreg_mem_fmr(struct iscsi_iser_task
*iser_task
,
569 enum iser_data_dir cmd_dir
)
571 struct iser_mem_reg
*reg
= &iser_task
->rdma_reg
[cmd_dir
];
577 iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg
->mem_h
);
579 ret
= ib_fmr_pool_unmap((struct ib_pool_fmr
*)reg
->mem_h
);
581 iser_err("ib_fmr_pool_unmap failed %d\n", ret
);
586 void iser_unreg_mem_fastreg(struct iscsi_iser_task
*iser_task
,
587 enum iser_data_dir cmd_dir
)
589 struct iser_device
*device
= iser_task
->iser_conn
->ib_conn
.device
;
590 struct iser_mem_reg
*reg
= &iser_task
->rdma_reg
[cmd_dir
];
595 device
->reg_ops
->reg_desc_put(&iser_task
->iser_conn
->ib_conn
,
601 iser_set_dif_domain(struct scsi_cmnd
*sc
, struct ib_sig_attrs
*sig_attrs
,
602 struct ib_sig_domain
*domain
)
604 domain
->sig_type
= IB_SIG_TYPE_T10_DIF
;
605 domain
->sig
.dif
.pi_interval
= scsi_prot_interval(sc
);
606 domain
->sig
.dif
.ref_tag
= scsi_prot_ref_tag(sc
);
608 * At the moment we hard code those, but in the future
609 * we will take them from sc.
611 domain
->sig
.dif
.apptag_check_mask
= 0xffff;
612 domain
->sig
.dif
.app_escape
= true;
613 domain
->sig
.dif
.ref_escape
= true;
614 if (sc
->prot_flags
& SCSI_PROT_REF_INCREMENT
)
615 domain
->sig
.dif
.ref_remap
= true;
619 iser_set_sig_attrs(struct scsi_cmnd
*sc
, struct ib_sig_attrs
*sig_attrs
)
621 switch (scsi_get_prot_op(sc
)) {
622 case SCSI_PROT_WRITE_INSERT
:
623 case SCSI_PROT_READ_STRIP
:
624 sig_attrs
->mem
.sig_type
= IB_SIG_TYPE_NONE
;
625 iser_set_dif_domain(sc
, sig_attrs
, &sig_attrs
->wire
);
626 sig_attrs
->wire
.sig
.dif
.bg_type
= IB_T10DIF_CRC
;
628 case SCSI_PROT_READ_INSERT
:
629 case SCSI_PROT_WRITE_STRIP
:
630 sig_attrs
->wire
.sig_type
= IB_SIG_TYPE_NONE
;
631 iser_set_dif_domain(sc
, sig_attrs
, &sig_attrs
->mem
);
632 sig_attrs
->mem
.sig
.dif
.bg_type
= sc
->prot_flags
& SCSI_PROT_IP_CHECKSUM
?
633 IB_T10DIF_CSUM
: IB_T10DIF_CRC
;
635 case SCSI_PROT_READ_PASS
:
636 case SCSI_PROT_WRITE_PASS
:
637 iser_set_dif_domain(sc
, sig_attrs
, &sig_attrs
->wire
);
638 sig_attrs
->wire
.sig
.dif
.bg_type
= IB_T10DIF_CRC
;
639 iser_set_dif_domain(sc
, sig_attrs
, &sig_attrs
->mem
);
640 sig_attrs
->mem
.sig
.dif
.bg_type
= sc
->prot_flags
& SCSI_PROT_IP_CHECKSUM
?
641 IB_T10DIF_CSUM
: IB_T10DIF_CRC
;
644 iser_err("Unsupported PI operation %d\n",
645 scsi_get_prot_op(sc
));
653 iser_set_prot_checks(struct scsi_cmnd
*sc
, u8
*mask
)
656 if (sc
->prot_flags
& SCSI_PROT_REF_CHECK
)
657 *mask
|= ISER_CHECK_REFTAG
;
658 if (sc
->prot_flags
& SCSI_PROT_GUARD_CHECK
)
659 *mask
|= ISER_CHECK_GUARD
;
663 iser_inv_rkey(struct ib_send_wr
*inv_wr
, struct ib_mr
*mr
)
667 inv_wr
->opcode
= IB_WR_LOCAL_INV
;
668 inv_wr
->wr_id
= ISER_FASTREG_LI_WRID
;
669 inv_wr
->ex
.invalidate_rkey
= mr
->rkey
;
670 inv_wr
->send_flags
= 0;
673 rkey
= ib_inc_rkey(mr
->rkey
);
674 ib_update_fast_reg_key(mr
, rkey
);
678 iser_reg_sig_mr(struct iscsi_iser_task
*iser_task
,
679 struct iser_pi_context
*pi_ctx
,
680 struct iser_mem_reg
*data_reg
,
681 struct iser_mem_reg
*prot_reg
,
682 struct iser_mem_reg
*sig_reg
)
684 struct iser_tx_desc
*tx_desc
= &iser_task
->desc
;
685 struct ib_sig_attrs
*sig_attrs
= &tx_desc
->sig_attrs
;
686 struct ib_send_wr
*wr
;
689 memset(sig_attrs
, 0, sizeof(*sig_attrs
));
690 ret
= iser_set_sig_attrs(iser_task
->sc
, sig_attrs
);
694 iser_set_prot_checks(iser_task
->sc
, &sig_attrs
->check_mask
);
696 if (!pi_ctx
->sig_mr_valid
) {
697 wr
= iser_tx_next_wr(tx_desc
);
698 iser_inv_rkey(wr
, pi_ctx
->sig_mr
);
701 wr
= iser_tx_next_wr(tx_desc
);
702 wr
->opcode
= IB_WR_REG_SIG_MR
;
703 wr
->wr_id
= ISER_FASTREG_LI_WRID
;
704 wr
->sg_list
= &data_reg
->sge
;
707 wr
->wr
.sig_handover
.sig_attrs
= sig_attrs
;
708 wr
->wr
.sig_handover
.sig_mr
= pi_ctx
->sig_mr
;
709 if (scsi_prot_sg_count(iser_task
->sc
))
710 wr
->wr
.sig_handover
.prot
= &prot_reg
->sge
;
712 wr
->wr
.sig_handover
.prot
= NULL
;
713 wr
->wr
.sig_handover
.access_flags
= IB_ACCESS_LOCAL_WRITE
|
714 IB_ACCESS_REMOTE_READ
|
715 IB_ACCESS_REMOTE_WRITE
;
716 pi_ctx
->sig_mr_valid
= 0;
718 sig_reg
->sge
.lkey
= pi_ctx
->sig_mr
->lkey
;
719 sig_reg
->rkey
= pi_ctx
->sig_mr
->rkey
;
720 sig_reg
->sge
.addr
= 0;
721 sig_reg
->sge
.length
= scsi_transfer_length(iser_task
->sc
);
723 iser_dbg("sig reg: lkey: 0x%x, rkey: 0x%x, addr: 0x%llx, length: %u\n",
724 sig_reg
->sge
.lkey
, sig_reg
->rkey
, sig_reg
->sge
.addr
,
725 sig_reg
->sge
.length
);
730 static int iser_fast_reg_mr(struct iscsi_iser_task
*iser_task
,
731 struct iser_data_buf
*mem
,
732 struct iser_reg_resources
*rsc
,
733 struct iser_mem_reg
*reg
)
735 struct ib_conn
*ib_conn
= &iser_task
->iser_conn
->ib_conn
;
736 struct iser_device
*device
= ib_conn
->device
;
737 struct ib_mr
*mr
= rsc
->mr
;
738 struct ib_fast_reg_page_list
*frpl
= rsc
->frpl
;
739 struct iser_tx_desc
*tx_desc
= &iser_task
->desc
;
740 struct ib_send_wr
*wr
;
741 int offset
, size
, plen
;
743 plen
= iser_sg_to_page_vec(mem
, device
->ib_device
, frpl
->page_list
,
745 if (plen
* SIZE_4K
< size
) {
746 iser_err("fast reg page_list too short to hold this SG\n");
750 if (!rsc
->mr_valid
) {
751 wr
= iser_tx_next_wr(tx_desc
);
752 iser_inv_rkey(wr
, mr
);
755 wr
= iser_tx_next_wr(tx_desc
);
756 wr
->opcode
= IB_WR_FAST_REG_MR
;
757 wr
->wr_id
= ISER_FASTREG_LI_WRID
;
759 wr
->wr
.fast_reg
.iova_start
= frpl
->page_list
[0] + offset
;
760 wr
->wr
.fast_reg
.page_list
= frpl
;
761 wr
->wr
.fast_reg
.page_list_len
= plen
;
762 wr
->wr
.fast_reg
.page_shift
= SHIFT_4K
;
763 wr
->wr
.fast_reg
.length
= size
;
764 wr
->wr
.fast_reg
.rkey
= mr
->rkey
;
765 wr
->wr
.fast_reg
.access_flags
= (IB_ACCESS_LOCAL_WRITE
|
766 IB_ACCESS_REMOTE_WRITE
|
767 IB_ACCESS_REMOTE_READ
);
770 reg
->sge
.lkey
= mr
->lkey
;
771 reg
->rkey
= mr
->rkey
;
772 reg
->sge
.addr
= frpl
->page_list
[0] + offset
;
773 reg
->sge
.length
= size
;
775 iser_dbg("fast reg: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
776 " length=0x%x\n", reg
->sge
.lkey
, reg
->rkey
,
777 reg
->sge
.addr
, reg
->sge
.length
);
783 iser_handle_unaligned_buf(struct iscsi_iser_task
*task
,
784 struct iser_data_buf
*mem
,
785 enum iser_data_dir dir
)
787 struct iser_conn
*iser_conn
= task
->iser_conn
;
788 struct iser_device
*device
= iser_conn
->ib_conn
.device
;
789 int err
, aligned_len
;
791 aligned_len
= iser_data_buf_aligned_len(mem
, device
->ib_device
,
792 iser_conn
->scsi_sg_tablesize
);
793 if (aligned_len
!= mem
->dma_nents
) {
794 err
= fall_to_bounce_buf(task
, mem
, dir
);
803 iser_reg_prot_sg(struct iscsi_iser_task
*task
,
804 struct iser_data_buf
*mem
,
805 struct iser_fr_desc
*desc
,
807 struct iser_mem_reg
*reg
)
809 struct iser_device
*device
= task
->iser_conn
->ib_conn
.device
;
812 return iser_reg_dma(device
, mem
, reg
);
814 return device
->reg_ops
->reg_mem(task
, mem
, &desc
->pi_ctx
->rsc
, reg
);
818 iser_reg_data_sg(struct iscsi_iser_task
*task
,
819 struct iser_data_buf
*mem
,
820 struct iser_fr_desc
*desc
,
822 struct iser_mem_reg
*reg
)
824 struct iser_device
*device
= task
->iser_conn
->ib_conn
.device
;
827 return iser_reg_dma(device
, mem
, reg
);
829 return device
->reg_ops
->reg_mem(task
, mem
, &desc
->rsc
, reg
);
832 int iser_reg_rdma_mem(struct iscsi_iser_task
*task
,
833 enum iser_data_dir dir
)
835 struct ib_conn
*ib_conn
= &task
->iser_conn
->ib_conn
;
836 struct iser_device
*device
= ib_conn
->device
;
837 struct iser_data_buf
*mem
= &task
->data
[dir
];
838 struct iser_mem_reg
*reg
= &task
->rdma_reg
[dir
];
839 struct iser_mem_reg
*data_reg
;
840 struct iser_fr_desc
*desc
= NULL
;
844 err
= iser_handle_unaligned_buf(task
, mem
, dir
);
848 use_dma_key
= (mem
->dma_nents
== 1 && !iser_always_reg
&&
849 scsi_get_prot_op(task
->sc
) == SCSI_PROT_NORMAL
);
852 desc
= device
->reg_ops
->reg_desc_get(ib_conn
);
856 if (scsi_get_prot_op(task
->sc
) == SCSI_PROT_NORMAL
)
859 data_reg
= &task
->desc
.data_reg
;
861 err
= iser_reg_data_sg(task
, mem
, desc
, use_dma_key
, data_reg
);
865 if (scsi_get_prot_op(task
->sc
) != SCSI_PROT_NORMAL
) {
866 struct iser_mem_reg
*prot_reg
= &task
->desc
.prot_reg
;
868 if (scsi_prot_sg_count(task
->sc
)) {
869 mem
= &task
->prot
[dir
];
870 err
= iser_handle_unaligned_buf(task
, mem
, dir
);
874 err
= iser_reg_prot_sg(task
, mem
, desc
,
875 use_dma_key
, prot_reg
);
880 err
= iser_reg_sig_mr(task
, desc
->pi_ctx
, data_reg
,
885 desc
->pi_ctx
->sig_protected
= 1;
892 device
->reg_ops
->reg_desc_put(ib_conn
, desc
);
897 void iser_unreg_rdma_mem(struct iscsi_iser_task
*task
,
898 enum iser_data_dir dir
)
900 struct iser_device
*device
= task
->iser_conn
->ib_conn
.device
;
902 device
->reg_ops
->unreg_mem(task
, dir
);