2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <rdma/ib_cache.h>
45 #include <linux/atomic.h>
47 #include <scsi/scsi.h>
48 #include <scsi/scsi_device.h>
49 #include <scsi/scsi_dbg.h>
50 #include <scsi/scsi_tcq.h>
52 #include <scsi/scsi_transport_srp.h>
56 #define DRV_NAME "ib_srp"
57 #define PFX DRV_NAME ": "
58 #define DRV_VERSION "2.0"
59 #define DRV_RELDATE "July 26, 2015"
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63 MODULE_LICENSE("Dual BSD/GPL");
64 MODULE_VERSION(DRV_VERSION
);
65 MODULE_INFO(release_date
, DRV_RELDATE
);
67 static unsigned int srp_sg_tablesize
;
68 static unsigned int cmd_sg_entries
;
69 static unsigned int indirect_sg_entries
;
70 static bool allow_ext_sg
;
71 static bool prefer_fr
= true;
72 static bool register_always
= true;
73 static int topspin_workarounds
= 1;
75 module_param(srp_sg_tablesize
, uint
, 0444);
76 MODULE_PARM_DESC(srp_sg_tablesize
, "Deprecated name for cmd_sg_entries");
78 module_param(cmd_sg_entries
, uint
, 0444);
79 MODULE_PARM_DESC(cmd_sg_entries
,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
82 module_param(indirect_sg_entries
, uint
, 0444);
83 MODULE_PARM_DESC(indirect_sg_entries
,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS
) ")");
86 module_param(allow_ext_sg
, bool, 0444);
87 MODULE_PARM_DESC(allow_ext_sg
,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
90 module_param(topspin_workarounds
, int, 0444);
91 MODULE_PARM_DESC(topspin_workarounds
,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
94 module_param(prefer_fr
, bool, 0444);
95 MODULE_PARM_DESC(prefer_fr
,
96 "Whether to use fast registration if both FMR and fast registration are supported");
98 module_param(register_always
, bool, 0444);
99 MODULE_PARM_DESC(register_always
,
100 "Use memory registration even for contiguous memory regions");
102 static const struct kernel_param_ops srp_tmo_ops
;
104 static int srp_reconnect_delay
= 10;
105 module_param_cb(reconnect_delay
, &srp_tmo_ops
, &srp_reconnect_delay
,
107 MODULE_PARM_DESC(reconnect_delay
, "Time between successive reconnect attempts");
109 static int srp_fast_io_fail_tmo
= 15;
110 module_param_cb(fast_io_fail_tmo
, &srp_tmo_ops
, &srp_fast_io_fail_tmo
,
112 MODULE_PARM_DESC(fast_io_fail_tmo
,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
117 static int srp_dev_loss_tmo
= 600;
118 module_param_cb(dev_loss_tmo
, &srp_tmo_ops
, &srp_dev_loss_tmo
,
120 MODULE_PARM_DESC(dev_loss_tmo
,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT
)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
128 static unsigned ch_count
;
129 module_param(ch_count
, uint
, 0444);
130 MODULE_PARM_DESC(ch_count
,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
133 static void srp_add_one(struct ib_device
*device
);
134 static void srp_remove_one(struct ib_device
*device
, void *client_data
);
135 static void srp_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
136 static void srp_handle_qp_err(struct ib_cq
*cq
, struct ib_wc
*wc
,
138 static int srp_cm_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*event
);
140 static struct scsi_transport_template
*ib_srp_transport_template
;
141 static struct workqueue_struct
*srp_remove_wq
;
143 static struct ib_client srp_client
= {
146 .remove
= srp_remove_one
149 static struct ib_sa_client srp_sa_client
;
151 static int srp_tmo_get(char *buffer
, const struct kernel_param
*kp
)
153 int tmo
= *(int *)kp
->arg
;
156 return sprintf(buffer
, "%d", tmo
);
158 return sprintf(buffer
, "off");
161 static int srp_tmo_set(const char *val
, const struct kernel_param
*kp
)
165 res
= srp_parse_tmo(&tmo
, val
);
169 if (kp
->arg
== &srp_reconnect_delay
)
170 res
= srp_tmo_valid(tmo
, srp_fast_io_fail_tmo
,
172 else if (kp
->arg
== &srp_fast_io_fail_tmo
)
173 res
= srp_tmo_valid(srp_reconnect_delay
, tmo
, srp_dev_loss_tmo
);
175 res
= srp_tmo_valid(srp_reconnect_delay
, srp_fast_io_fail_tmo
,
179 *(int *)kp
->arg
= tmo
;
185 static const struct kernel_param_ops srp_tmo_ops
= {
190 static inline struct srp_target_port
*host_to_target(struct Scsi_Host
*host
)
192 return (struct srp_target_port
*) host
->hostdata
;
195 static const char *srp_target_info(struct Scsi_Host
*host
)
197 return host_to_target(host
)->target_name
;
200 static int srp_target_is_topspin(struct srp_target_port
*target
)
202 static const u8 topspin_oui
[3] = { 0x00, 0x05, 0xad };
203 static const u8 cisco_oui
[3] = { 0x00, 0x1b, 0x0d };
205 return topspin_workarounds
&&
206 (!memcmp(&target
->ioc_guid
, topspin_oui
, sizeof topspin_oui
) ||
207 !memcmp(&target
->ioc_guid
, cisco_oui
, sizeof cisco_oui
));
210 static struct srp_iu
*srp_alloc_iu(struct srp_host
*host
, size_t size
,
212 enum dma_data_direction direction
)
216 iu
= kmalloc(sizeof *iu
, gfp_mask
);
220 iu
->buf
= kzalloc(size
, gfp_mask
);
224 iu
->dma
= ib_dma_map_single(host
->srp_dev
->dev
, iu
->buf
, size
,
226 if (ib_dma_mapping_error(host
->srp_dev
->dev
, iu
->dma
))
230 iu
->direction
= direction
;
242 static void srp_free_iu(struct srp_host
*host
, struct srp_iu
*iu
)
247 ib_dma_unmap_single(host
->srp_dev
->dev
, iu
->dma
, iu
->size
,
253 static void srp_qp_event(struct ib_event
*event
, void *context
)
255 pr_debug("QP event %s (%d)\n",
256 ib_event_msg(event
->event
), event
->event
);
259 static int srp_init_qp(struct srp_target_port
*target
,
262 struct ib_qp_attr
*attr
;
265 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
269 ret
= ib_find_cached_pkey(target
->srp_host
->srp_dev
->dev
,
270 target
->srp_host
->port
,
271 be16_to_cpu(target
->pkey
),
276 attr
->qp_state
= IB_QPS_INIT
;
277 attr
->qp_access_flags
= (IB_ACCESS_REMOTE_READ
|
278 IB_ACCESS_REMOTE_WRITE
);
279 attr
->port_num
= target
->srp_host
->port
;
281 ret
= ib_modify_qp(qp
, attr
,
292 static int srp_new_cm_id(struct srp_rdma_ch
*ch
)
294 struct srp_target_port
*target
= ch
->target
;
295 struct ib_cm_id
*new_cm_id
;
297 new_cm_id
= ib_create_cm_id(target
->srp_host
->srp_dev
->dev
,
299 if (IS_ERR(new_cm_id
))
300 return PTR_ERR(new_cm_id
);
303 ib_destroy_cm_id(ch
->cm_id
);
304 ch
->cm_id
= new_cm_id
;
305 ch
->path
.sgid
= target
->sgid
;
306 ch
->path
.dgid
= target
->orig_dgid
;
307 ch
->path
.pkey
= target
->pkey
;
308 ch
->path
.service_id
= target
->service_id
;
313 static struct ib_fmr_pool
*srp_alloc_fmr_pool(struct srp_target_port
*target
)
315 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
316 struct ib_fmr_pool_param fmr_param
;
318 memset(&fmr_param
, 0, sizeof(fmr_param
));
319 fmr_param
.pool_size
= target
->scsi_host
->can_queue
;
320 fmr_param
.dirty_watermark
= fmr_param
.pool_size
/ 4;
322 fmr_param
.max_pages_per_fmr
= dev
->max_pages_per_mr
;
323 fmr_param
.page_shift
= ilog2(dev
->mr_page_size
);
324 fmr_param
.access
= (IB_ACCESS_LOCAL_WRITE
|
325 IB_ACCESS_REMOTE_WRITE
|
326 IB_ACCESS_REMOTE_READ
);
328 return ib_create_fmr_pool(dev
->pd
, &fmr_param
);
332 * srp_destroy_fr_pool() - free the resources owned by a pool
333 * @pool: Fast registration pool to be destroyed.
335 static void srp_destroy_fr_pool(struct srp_fr_pool
*pool
)
338 struct srp_fr_desc
*d
;
343 for (i
= 0, d
= &pool
->desc
[0]; i
< pool
->size
; i
++, d
++) {
351 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
352 * @device: IB device to allocate fast registration descriptors for.
353 * @pd: Protection domain associated with the FR descriptors.
354 * @pool_size: Number of descriptors to allocate.
355 * @max_page_list_len: Maximum fast registration work request page list length.
357 static struct srp_fr_pool
*srp_create_fr_pool(struct ib_device
*device
,
358 struct ib_pd
*pd
, int pool_size
,
359 int max_page_list_len
)
361 struct srp_fr_pool
*pool
;
362 struct srp_fr_desc
*d
;
364 int i
, ret
= -EINVAL
;
369 pool
= kzalloc(sizeof(struct srp_fr_pool
) +
370 pool_size
* sizeof(struct srp_fr_desc
), GFP_KERNEL
);
373 pool
->size
= pool_size
;
374 pool
->max_page_list_len
= max_page_list_len
;
375 spin_lock_init(&pool
->lock
);
376 INIT_LIST_HEAD(&pool
->free_list
);
378 for (i
= 0, d
= &pool
->desc
[0]; i
< pool
->size
; i
++, d
++) {
379 mr
= ib_alloc_mr(pd
, IB_MR_TYPE_MEM_REG
,
386 list_add_tail(&d
->entry
, &pool
->free_list
);
393 srp_destroy_fr_pool(pool
);
401 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
402 * @pool: Pool to obtain descriptor from.
404 static struct srp_fr_desc
*srp_fr_pool_get(struct srp_fr_pool
*pool
)
406 struct srp_fr_desc
*d
= NULL
;
409 spin_lock_irqsave(&pool
->lock
, flags
);
410 if (!list_empty(&pool
->free_list
)) {
411 d
= list_first_entry(&pool
->free_list
, typeof(*d
), entry
);
414 spin_unlock_irqrestore(&pool
->lock
, flags
);
420 * srp_fr_pool_put() - put an FR descriptor back in the free list
421 * @pool: Pool the descriptor was allocated from.
422 * @desc: Pointer to an array of fast registration descriptor pointers.
423 * @n: Number of descriptors to put back.
425 * Note: The caller must already have queued an invalidation request for
426 * desc->mr->rkey before calling this function.
428 static void srp_fr_pool_put(struct srp_fr_pool
*pool
, struct srp_fr_desc
**desc
,
434 spin_lock_irqsave(&pool
->lock
, flags
);
435 for (i
= 0; i
< n
; i
++)
436 list_add(&desc
[i
]->entry
, &pool
->free_list
);
437 spin_unlock_irqrestore(&pool
->lock
, flags
);
440 static struct srp_fr_pool
*srp_alloc_fr_pool(struct srp_target_port
*target
)
442 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
444 return srp_create_fr_pool(dev
->dev
, dev
->pd
,
445 target
->scsi_host
->can_queue
,
446 dev
->max_pages_per_mr
);
449 static void srp_drain_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
451 struct srp_rdma_ch
*ch
= cq
->cq_context
;
456 static struct ib_cqe srp_drain_cqe
= {
457 .done
= srp_drain_done
,
461 * srp_destroy_qp() - destroy an RDMA queue pair
462 * @ch: SRP RDMA channel.
464 * Change a queue pair into the error state and wait until all receive
465 * completions have been processed before destroying it. This avoids that
466 * the receive completion handler can access the queue pair while it is
469 static void srp_destroy_qp(struct srp_rdma_ch
*ch
)
471 static struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
472 static struct ib_recv_wr wr
= { 0 };
473 struct ib_recv_wr
*bad_wr
;
476 wr
.wr_cqe
= &srp_drain_cqe
;
477 /* Destroying a QP and reusing ch->done is only safe if not connected */
478 WARN_ON_ONCE(ch
->connected
);
480 ret
= ib_modify_qp(ch
->qp
, &attr
, IB_QP_STATE
);
481 WARN_ONCE(ret
, "ib_cm_init_qp_attr() returned %d\n", ret
);
485 init_completion(&ch
->done
);
486 ret
= ib_post_recv(ch
->qp
, &wr
, &bad_wr
);
487 WARN_ONCE(ret
, "ib_post_recv() returned %d\n", ret
);
489 wait_for_completion(&ch
->done
);
492 ib_destroy_qp(ch
->qp
);
495 static int srp_create_ch_ib(struct srp_rdma_ch
*ch
)
497 struct srp_target_port
*target
= ch
->target
;
498 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
499 struct ib_qp_init_attr
*init_attr
;
500 struct ib_cq
*recv_cq
, *send_cq
;
502 struct ib_fmr_pool
*fmr_pool
= NULL
;
503 struct srp_fr_pool
*fr_pool
= NULL
;
504 const int m
= dev
->use_fast_reg
? 3 : 1;
507 init_attr
= kzalloc(sizeof *init_attr
, GFP_KERNEL
);
511 /* queue_size + 1 for ib_drain_qp */
512 recv_cq
= ib_alloc_cq(dev
->dev
, ch
, target
->queue_size
+ 1,
513 ch
->comp_vector
, IB_POLL_SOFTIRQ
);
514 if (IS_ERR(recv_cq
)) {
515 ret
= PTR_ERR(recv_cq
);
519 send_cq
= ib_alloc_cq(dev
->dev
, ch
, m
* target
->queue_size
,
520 ch
->comp_vector
, IB_POLL_DIRECT
);
521 if (IS_ERR(send_cq
)) {
522 ret
= PTR_ERR(send_cq
);
526 init_attr
->event_handler
= srp_qp_event
;
527 init_attr
->cap
.max_send_wr
= m
* target
->queue_size
;
528 init_attr
->cap
.max_recv_wr
= target
->queue_size
+ 1;
529 init_attr
->cap
.max_recv_sge
= 1;
530 init_attr
->cap
.max_send_sge
= 1;
531 init_attr
->sq_sig_type
= IB_SIGNAL_REQ_WR
;
532 init_attr
->qp_type
= IB_QPT_RC
;
533 init_attr
->send_cq
= send_cq
;
534 init_attr
->recv_cq
= recv_cq
;
536 qp
= ib_create_qp(dev
->pd
, init_attr
);
542 ret
= srp_init_qp(target
, qp
);
546 if (dev
->use_fast_reg
) {
547 fr_pool
= srp_alloc_fr_pool(target
);
548 if (IS_ERR(fr_pool
)) {
549 ret
= PTR_ERR(fr_pool
);
550 shost_printk(KERN_WARNING
, target
->scsi_host
, PFX
551 "FR pool allocation failed (%d)\n", ret
);
554 } else if (dev
->use_fmr
) {
555 fmr_pool
= srp_alloc_fmr_pool(target
);
556 if (IS_ERR(fmr_pool
)) {
557 ret
= PTR_ERR(fmr_pool
);
558 shost_printk(KERN_WARNING
, target
->scsi_host
, PFX
559 "FMR pool allocation failed (%d)\n", ret
);
567 ib_free_cq(ch
->recv_cq
);
569 ib_free_cq(ch
->send_cq
);
572 ch
->recv_cq
= recv_cq
;
573 ch
->send_cq
= send_cq
;
575 if (dev
->use_fast_reg
) {
577 srp_destroy_fr_pool(ch
->fr_pool
);
578 ch
->fr_pool
= fr_pool
;
579 } else if (dev
->use_fmr
) {
581 ib_destroy_fmr_pool(ch
->fmr_pool
);
582 ch
->fmr_pool
= fmr_pool
;
603 * Note: this function may be called without srp_alloc_iu_bufs() having been
604 * invoked. Hence the ch->[rt]x_ring checks.
606 static void srp_free_ch_ib(struct srp_target_port
*target
,
607 struct srp_rdma_ch
*ch
)
609 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
616 ib_destroy_cm_id(ch
->cm_id
);
620 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
624 if (dev
->use_fast_reg
) {
626 srp_destroy_fr_pool(ch
->fr_pool
);
627 } else if (dev
->use_fmr
) {
629 ib_destroy_fmr_pool(ch
->fmr_pool
);
633 ib_free_cq(ch
->send_cq
);
634 ib_free_cq(ch
->recv_cq
);
637 * Avoid that the SCSI error handler tries to use this channel after
638 * it has been freed. The SCSI error handler can namely continue
639 * trying to perform recovery actions after scsi_remove_host()
645 ch
->send_cq
= ch
->recv_cq
= NULL
;
648 for (i
= 0; i
< target
->queue_size
; ++i
)
649 srp_free_iu(target
->srp_host
, ch
->rx_ring
[i
]);
654 for (i
= 0; i
< target
->queue_size
; ++i
)
655 srp_free_iu(target
->srp_host
, ch
->tx_ring
[i
]);
661 static void srp_path_rec_completion(int status
,
662 struct ib_sa_path_rec
*pathrec
,
665 struct srp_rdma_ch
*ch
= ch_ptr
;
666 struct srp_target_port
*target
= ch
->target
;
670 shost_printk(KERN_ERR
, target
->scsi_host
,
671 PFX
"Got failed path rec status %d\n", status
);
677 static int srp_lookup_path(struct srp_rdma_ch
*ch
)
679 struct srp_target_port
*target
= ch
->target
;
682 ch
->path
.numb_path
= 1;
684 init_completion(&ch
->done
);
686 ch
->path_query_id
= ib_sa_path_rec_get(&srp_sa_client
,
687 target
->srp_host
->srp_dev
->dev
,
688 target
->srp_host
->port
,
690 IB_SA_PATH_REC_SERVICE_ID
|
691 IB_SA_PATH_REC_DGID
|
692 IB_SA_PATH_REC_SGID
|
693 IB_SA_PATH_REC_NUMB_PATH
|
695 SRP_PATH_REC_TIMEOUT_MS
,
697 srp_path_rec_completion
,
698 ch
, &ch
->path_query
);
699 if (ch
->path_query_id
< 0)
700 return ch
->path_query_id
;
702 ret
= wait_for_completion_interruptible(&ch
->done
);
707 shost_printk(KERN_WARNING
, target
->scsi_host
,
708 PFX
"Path record query failed\n");
713 static int srp_send_req(struct srp_rdma_ch
*ch
, bool multich
)
715 struct srp_target_port
*target
= ch
->target
;
717 struct ib_cm_req_param param
;
718 struct srp_login_req priv
;
722 req
= kzalloc(sizeof *req
, GFP_KERNEL
);
726 req
->param
.primary_path
= &ch
->path
;
727 req
->param
.alternate_path
= NULL
;
728 req
->param
.service_id
= target
->service_id
;
729 req
->param
.qp_num
= ch
->qp
->qp_num
;
730 req
->param
.qp_type
= ch
->qp
->qp_type
;
731 req
->param
.private_data
= &req
->priv
;
732 req
->param
.private_data_len
= sizeof req
->priv
;
733 req
->param
.flow_control
= 1;
735 get_random_bytes(&req
->param
.starting_psn
, 4);
736 req
->param
.starting_psn
&= 0xffffff;
739 * Pick some arbitrary defaults here; we could make these
740 * module parameters if anyone cared about setting them.
742 req
->param
.responder_resources
= 4;
743 req
->param
.remote_cm_response_timeout
= 20;
744 req
->param
.local_cm_response_timeout
= 20;
745 req
->param
.retry_count
= target
->tl_retry_count
;
746 req
->param
.rnr_retry_count
= 7;
747 req
->param
.max_cm_retries
= 15;
749 req
->priv
.opcode
= SRP_LOGIN_REQ
;
751 req
->priv
.req_it_iu_len
= cpu_to_be32(target
->max_iu_len
);
752 req
->priv
.req_buf_fmt
= cpu_to_be16(SRP_BUF_FORMAT_DIRECT
|
753 SRP_BUF_FORMAT_INDIRECT
);
754 req
->priv
.req_flags
= (multich
? SRP_MULTICHAN_MULTI
:
755 SRP_MULTICHAN_SINGLE
);
757 * In the published SRP specification (draft rev. 16a), the
758 * port identifier format is 8 bytes of ID extension followed
759 * by 8 bytes of GUID. Older drafts put the two halves in the
760 * opposite order, so that the GUID comes first.
762 * Targets conforming to these obsolete drafts can be
763 * recognized by the I/O Class they report.
765 if (target
->io_class
== SRP_REV10_IB_IO_CLASS
) {
766 memcpy(req
->priv
.initiator_port_id
,
767 &target
->sgid
.global
.interface_id
, 8);
768 memcpy(req
->priv
.initiator_port_id
+ 8,
769 &target
->initiator_ext
, 8);
770 memcpy(req
->priv
.target_port_id
, &target
->ioc_guid
, 8);
771 memcpy(req
->priv
.target_port_id
+ 8, &target
->id_ext
, 8);
773 memcpy(req
->priv
.initiator_port_id
,
774 &target
->initiator_ext
, 8);
775 memcpy(req
->priv
.initiator_port_id
+ 8,
776 &target
->sgid
.global
.interface_id
, 8);
777 memcpy(req
->priv
.target_port_id
, &target
->id_ext
, 8);
778 memcpy(req
->priv
.target_port_id
+ 8, &target
->ioc_guid
, 8);
782 * Topspin/Cisco SRP targets will reject our login unless we
783 * zero out the first 8 bytes of our initiator port ID and set
784 * the second 8 bytes to the local node GUID.
786 if (srp_target_is_topspin(target
)) {
787 shost_printk(KERN_DEBUG
, target
->scsi_host
,
788 PFX
"Topspin/Cisco initiator port ID workaround "
789 "activated for target GUID %016llx\n",
790 be64_to_cpu(target
->ioc_guid
));
791 memset(req
->priv
.initiator_port_id
, 0, 8);
792 memcpy(req
->priv
.initiator_port_id
+ 8,
793 &target
->srp_host
->srp_dev
->dev
->node_guid
, 8);
796 status
= ib_send_cm_req(ch
->cm_id
, &req
->param
);
803 static bool srp_queue_remove_work(struct srp_target_port
*target
)
805 bool changed
= false;
807 spin_lock_irq(&target
->lock
);
808 if (target
->state
!= SRP_TARGET_REMOVED
) {
809 target
->state
= SRP_TARGET_REMOVED
;
812 spin_unlock_irq(&target
->lock
);
815 queue_work(srp_remove_wq
, &target
->remove_work
);
820 static void srp_disconnect_target(struct srp_target_port
*target
)
822 struct srp_rdma_ch
*ch
;
825 /* XXX should send SRP_I_LOGOUT request */
827 for (i
= 0; i
< target
->ch_count
; i
++) {
829 ch
->connected
= false;
830 if (ch
->cm_id
&& ib_send_cm_dreq(ch
->cm_id
, NULL
, 0)) {
831 shost_printk(KERN_DEBUG
, target
->scsi_host
,
832 PFX
"Sending CM DREQ failed\n");
837 static void srp_free_req_data(struct srp_target_port
*target
,
838 struct srp_rdma_ch
*ch
)
840 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
841 struct ib_device
*ibdev
= dev
->dev
;
842 struct srp_request
*req
;
848 for (i
= 0; i
< target
->req_ring_size
; ++i
) {
849 req
= &ch
->req_ring
[i
];
850 if (dev
->use_fast_reg
) {
853 kfree(req
->fmr_list
);
854 kfree(req
->map_page
);
856 if (req
->indirect_dma_addr
) {
857 ib_dma_unmap_single(ibdev
, req
->indirect_dma_addr
,
858 target
->indirect_size
,
861 kfree(req
->indirect_desc
);
868 static int srp_alloc_req_data(struct srp_rdma_ch
*ch
)
870 struct srp_target_port
*target
= ch
->target
;
871 struct srp_device
*srp_dev
= target
->srp_host
->srp_dev
;
872 struct ib_device
*ibdev
= srp_dev
->dev
;
873 struct srp_request
*req
;
876 int i
, ret
= -ENOMEM
;
878 ch
->req_ring
= kcalloc(target
->req_ring_size
, sizeof(*ch
->req_ring
),
883 for (i
= 0; i
< target
->req_ring_size
; ++i
) {
884 req
= &ch
->req_ring
[i
];
885 mr_list
= kmalloc(target
->cmd_sg_cnt
* sizeof(void *),
889 if (srp_dev
->use_fast_reg
) {
890 req
->fr_list
= mr_list
;
892 req
->fmr_list
= mr_list
;
893 req
->map_page
= kmalloc(srp_dev
->max_pages_per_mr
*
894 sizeof(void *), GFP_KERNEL
);
898 req
->indirect_desc
= kmalloc(target
->indirect_size
, GFP_KERNEL
);
899 if (!req
->indirect_desc
)
902 dma_addr
= ib_dma_map_single(ibdev
, req
->indirect_desc
,
903 target
->indirect_size
,
905 if (ib_dma_mapping_error(ibdev
, dma_addr
))
908 req
->indirect_dma_addr
= dma_addr
;
917 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
918 * @shost: SCSI host whose attributes to remove from sysfs.
920 * Note: Any attributes defined in the host template and that did not exist
921 * before invocation of this function will be ignored.
923 static void srp_del_scsi_host_attr(struct Scsi_Host
*shost
)
925 struct device_attribute
**attr
;
927 for (attr
= shost
->hostt
->shost_attrs
; attr
&& *attr
; ++attr
)
928 device_remove_file(&shost
->shost_dev
, *attr
);
931 static void srp_remove_target(struct srp_target_port
*target
)
933 struct srp_rdma_ch
*ch
;
936 WARN_ON_ONCE(target
->state
!= SRP_TARGET_REMOVED
);
938 srp_del_scsi_host_attr(target
->scsi_host
);
939 srp_rport_get(target
->rport
);
940 srp_remove_host(target
->scsi_host
);
941 scsi_remove_host(target
->scsi_host
);
942 srp_stop_rport_timers(target
->rport
);
943 srp_disconnect_target(target
);
944 for (i
= 0; i
< target
->ch_count
; i
++) {
946 srp_free_ch_ib(target
, ch
);
948 cancel_work_sync(&target
->tl_err_work
);
949 srp_rport_put(target
->rport
);
950 for (i
= 0; i
< target
->ch_count
; i
++) {
952 srp_free_req_data(target
, ch
);
957 spin_lock(&target
->srp_host
->target_lock
);
958 list_del(&target
->list
);
959 spin_unlock(&target
->srp_host
->target_lock
);
961 scsi_host_put(target
->scsi_host
);
964 static void srp_remove_work(struct work_struct
*work
)
966 struct srp_target_port
*target
=
967 container_of(work
, struct srp_target_port
, remove_work
);
969 WARN_ON_ONCE(target
->state
!= SRP_TARGET_REMOVED
);
971 srp_remove_target(target
);
974 static void srp_rport_delete(struct srp_rport
*rport
)
976 struct srp_target_port
*target
= rport
->lld_data
;
978 srp_queue_remove_work(target
);
982 * srp_connected_ch() - number of connected channels
983 * @target: SRP target port.
985 static int srp_connected_ch(struct srp_target_port
*target
)
989 for (i
= 0; i
< target
->ch_count
; i
++)
990 c
+= target
->ch
[i
].connected
;
995 static int srp_connect_ch(struct srp_rdma_ch
*ch
, bool multich
)
997 struct srp_target_port
*target
= ch
->target
;
1000 WARN_ON_ONCE(!multich
&& srp_connected_ch(target
) > 0);
1002 ret
= srp_lookup_path(ch
);
1007 init_completion(&ch
->done
);
1008 ret
= srp_send_req(ch
, multich
);
1011 ret
= wait_for_completion_interruptible(&ch
->done
);
1016 * The CM event handling code will set status to
1017 * SRP_PORT_REDIRECT if we get a port redirect REJ
1018 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1019 * redirect REJ back.
1024 ch
->connected
= true;
1027 case SRP_PORT_REDIRECT
:
1028 ret
= srp_lookup_path(ch
);
1033 case SRP_DLID_REDIRECT
:
1036 case SRP_STALE_CONN
:
1037 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1038 "giving up on stale connection\n");
1048 return ret
<= 0 ? ret
: -ENODEV
;
1051 static void srp_inv_rkey_err_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1053 srp_handle_qp_err(cq
, wc
, "INV RKEY");
1056 static int srp_inv_rkey(struct srp_request
*req
, struct srp_rdma_ch
*ch
,
1059 struct ib_send_wr
*bad_wr
;
1060 struct ib_send_wr wr
= {
1061 .opcode
= IB_WR_LOCAL_INV
,
1065 .ex
.invalidate_rkey
= rkey
,
1068 wr
.wr_cqe
= &req
->reg_cqe
;
1069 req
->reg_cqe
.done
= srp_inv_rkey_err_done
;
1070 return ib_post_send(ch
->qp
, &wr
, &bad_wr
);
1073 static void srp_unmap_data(struct scsi_cmnd
*scmnd
,
1074 struct srp_rdma_ch
*ch
,
1075 struct srp_request
*req
)
1077 struct srp_target_port
*target
= ch
->target
;
1078 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
1079 struct ib_device
*ibdev
= dev
->dev
;
1082 if (!scsi_sglist(scmnd
) ||
1083 (scmnd
->sc_data_direction
!= DMA_TO_DEVICE
&&
1084 scmnd
->sc_data_direction
!= DMA_FROM_DEVICE
))
1087 if (dev
->use_fast_reg
) {
1088 struct srp_fr_desc
**pfr
;
1090 for (i
= req
->nmdesc
, pfr
= req
->fr_list
; i
> 0; i
--, pfr
++) {
1091 res
= srp_inv_rkey(req
, ch
, (*pfr
)->mr
->rkey
);
1093 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1094 "Queueing INV WR for rkey %#x failed (%d)\n",
1095 (*pfr
)->mr
->rkey
, res
);
1096 queue_work(system_long_wq
,
1097 &target
->tl_err_work
);
1101 srp_fr_pool_put(ch
->fr_pool
, req
->fr_list
,
1103 } else if (dev
->use_fmr
) {
1104 struct ib_pool_fmr
**pfmr
;
1106 for (i
= req
->nmdesc
, pfmr
= req
->fmr_list
; i
> 0; i
--, pfmr
++)
1107 ib_fmr_pool_unmap(*pfmr
);
1110 ib_dma_unmap_sg(ibdev
, scsi_sglist(scmnd
), scsi_sg_count(scmnd
),
1111 scmnd
->sc_data_direction
);
1115 * srp_claim_req - Take ownership of the scmnd associated with a request.
1116 * @ch: SRP RDMA channel.
1117 * @req: SRP request.
1118 * @sdev: If not NULL, only take ownership for this SCSI device.
1119 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1120 * ownership of @req->scmnd if it equals @scmnd.
1123 * Either NULL or a pointer to the SCSI command the caller became owner of.
1125 static struct scsi_cmnd
*srp_claim_req(struct srp_rdma_ch
*ch
,
1126 struct srp_request
*req
,
1127 struct scsi_device
*sdev
,
1128 struct scsi_cmnd
*scmnd
)
1130 unsigned long flags
;
1132 spin_lock_irqsave(&ch
->lock
, flags
);
1134 (!sdev
|| req
->scmnd
->device
== sdev
) &&
1135 (!scmnd
|| req
->scmnd
== scmnd
)) {
1141 spin_unlock_irqrestore(&ch
->lock
, flags
);
1147 * srp_free_req() - Unmap data and add request to the free request list.
1148 * @ch: SRP RDMA channel.
1149 * @req: Request to be freed.
1150 * @scmnd: SCSI command associated with @req.
1151 * @req_lim_delta: Amount to be added to @target->req_lim.
1153 static void srp_free_req(struct srp_rdma_ch
*ch
, struct srp_request
*req
,
1154 struct scsi_cmnd
*scmnd
, s32 req_lim_delta
)
1156 unsigned long flags
;
1158 srp_unmap_data(scmnd
, ch
, req
);
1160 spin_lock_irqsave(&ch
->lock
, flags
);
1161 ch
->req_lim
+= req_lim_delta
;
1162 spin_unlock_irqrestore(&ch
->lock
, flags
);
1165 static void srp_finish_req(struct srp_rdma_ch
*ch
, struct srp_request
*req
,
1166 struct scsi_device
*sdev
, int result
)
1168 struct scsi_cmnd
*scmnd
= srp_claim_req(ch
, req
, sdev
, NULL
);
1171 srp_free_req(ch
, req
, scmnd
, 0);
1172 scmnd
->result
= result
;
1173 scmnd
->scsi_done(scmnd
);
1177 static void srp_terminate_io(struct srp_rport
*rport
)
1179 struct srp_target_port
*target
= rport
->lld_data
;
1180 struct srp_rdma_ch
*ch
;
1181 struct Scsi_Host
*shost
= target
->scsi_host
;
1182 struct scsi_device
*sdev
;
1186 * Invoking srp_terminate_io() while srp_queuecommand() is running
1187 * is not safe. Hence the warning statement below.
1189 shost_for_each_device(sdev
, shost
)
1190 WARN_ON_ONCE(sdev
->request_queue
->request_fn_active
);
1192 for (i
= 0; i
< target
->ch_count
; i
++) {
1193 ch
= &target
->ch
[i
];
1195 for (j
= 0; j
< target
->req_ring_size
; ++j
) {
1196 struct srp_request
*req
= &ch
->req_ring
[j
];
1198 srp_finish_req(ch
, req
, NULL
,
1199 DID_TRANSPORT_FAILFAST
<< 16);
1205 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1206 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1207 * srp_reset_device() or srp_reset_host() calls will occur while this function
1208 * is in progress. One way to realize that is not to call this function
1209 * directly but to call srp_reconnect_rport() instead since that last function
1210 * serializes calls of this function via rport->mutex and also blocks
1211 * srp_queuecommand() calls before invoking this function.
1213 static int srp_rport_reconnect(struct srp_rport
*rport
)
1215 struct srp_target_port
*target
= rport
->lld_data
;
1216 struct srp_rdma_ch
*ch
;
1218 bool multich
= false;
1220 srp_disconnect_target(target
);
1222 if (target
->state
== SRP_TARGET_SCANNING
)
1226 * Now get a new local CM ID so that we avoid confusing the target in
1227 * case things are really fouled up. Doing so also ensures that all CM
1228 * callbacks will have finished before a new QP is allocated.
1230 for (i
= 0; i
< target
->ch_count
; i
++) {
1231 ch
= &target
->ch
[i
];
1232 ret
+= srp_new_cm_id(ch
);
1234 for (i
= 0; i
< target
->ch_count
; i
++) {
1235 ch
= &target
->ch
[i
];
1236 for (j
= 0; j
< target
->req_ring_size
; ++j
) {
1237 struct srp_request
*req
= &ch
->req_ring
[j
];
1239 srp_finish_req(ch
, req
, NULL
, DID_RESET
<< 16);
1242 for (i
= 0; i
< target
->ch_count
; i
++) {
1243 ch
= &target
->ch
[i
];
1245 * Whether or not creating a new CM ID succeeded, create a new
1246 * QP. This guarantees that all completion callback function
1247 * invocations have finished before request resetting starts.
1249 ret
+= srp_create_ch_ib(ch
);
1251 INIT_LIST_HEAD(&ch
->free_tx
);
1252 for (j
= 0; j
< target
->queue_size
; ++j
)
1253 list_add(&ch
->tx_ring
[j
]->list
, &ch
->free_tx
);
1256 target
->qp_in_error
= false;
1258 for (i
= 0; i
< target
->ch_count
; i
++) {
1259 ch
= &target
->ch
[i
];
1262 ret
= srp_connect_ch(ch
, multich
);
1267 shost_printk(KERN_INFO
, target
->scsi_host
,
1268 PFX
"reconnect succeeded\n");
1273 static void srp_map_desc(struct srp_map_state
*state
, dma_addr_t dma_addr
,
1274 unsigned int dma_len
, u32 rkey
)
1276 struct srp_direct_buf
*desc
= state
->desc
;
1278 WARN_ON_ONCE(!dma_len
);
1280 desc
->va
= cpu_to_be64(dma_addr
);
1281 desc
->key
= cpu_to_be32(rkey
);
1282 desc
->len
= cpu_to_be32(dma_len
);
1284 state
->total_len
+= dma_len
;
1289 static int srp_map_finish_fmr(struct srp_map_state
*state
,
1290 struct srp_rdma_ch
*ch
)
1292 struct srp_target_port
*target
= ch
->target
;
1293 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
1294 struct ib_pool_fmr
*fmr
;
1297 if (state
->fmr
.next
>= state
->fmr
.end
)
1300 WARN_ON_ONCE(!dev
->use_fmr
);
1302 if (state
->npages
== 0)
1305 if (state
->npages
== 1 && target
->global_mr
) {
1306 srp_map_desc(state
, state
->base_dma_addr
, state
->dma_len
,
1307 target
->global_mr
->rkey
);
1311 fmr
= ib_fmr_pool_map_phys(ch
->fmr_pool
, state
->pages
,
1312 state
->npages
, io_addr
);
1314 return PTR_ERR(fmr
);
1316 *state
->fmr
.next
++ = fmr
;
1319 srp_map_desc(state
, state
->base_dma_addr
& ~dev
->mr_page_mask
,
1320 state
->dma_len
, fmr
->fmr
->rkey
);
1329 static void srp_reg_mr_err_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1331 srp_handle_qp_err(cq
, wc
, "FAST REG");
1334 static int srp_map_finish_fr(struct srp_map_state
*state
,
1335 struct srp_request
*req
,
1336 struct srp_rdma_ch
*ch
, int sg_nents
)
1338 struct srp_target_port
*target
= ch
->target
;
1339 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
1340 struct ib_send_wr
*bad_wr
;
1341 struct ib_reg_wr wr
;
1342 struct srp_fr_desc
*desc
;
1346 if (state
->fr
.next
>= state
->fr
.end
)
1349 WARN_ON_ONCE(!dev
->use_fast_reg
);
1354 if (sg_nents
== 1 && target
->global_mr
) {
1355 srp_map_desc(state
, sg_dma_address(state
->sg
),
1356 sg_dma_len(state
->sg
),
1357 target
->global_mr
->rkey
);
1361 desc
= srp_fr_pool_get(ch
->fr_pool
);
1365 rkey
= ib_inc_rkey(desc
->mr
->rkey
);
1366 ib_update_fast_reg_key(desc
->mr
, rkey
);
1368 n
= ib_map_mr_sg(desc
->mr
, state
->sg
, sg_nents
, dev
->mr_page_size
);
1369 if (unlikely(n
< 0))
1372 req
->reg_cqe
.done
= srp_reg_mr_err_done
;
1375 wr
.wr
.opcode
= IB_WR_REG_MR
;
1376 wr
.wr
.wr_cqe
= &req
->reg_cqe
;
1378 wr
.wr
.send_flags
= 0;
1380 wr
.key
= desc
->mr
->rkey
;
1381 wr
.access
= (IB_ACCESS_LOCAL_WRITE
|
1382 IB_ACCESS_REMOTE_READ
|
1383 IB_ACCESS_REMOTE_WRITE
);
1385 *state
->fr
.next
++ = desc
;
1388 srp_map_desc(state
, desc
->mr
->iova
,
1389 desc
->mr
->length
, desc
->mr
->rkey
);
1391 err
= ib_post_send(ch
->qp
, &wr
.wr
, &bad_wr
);
1398 static int srp_map_sg_entry(struct srp_map_state
*state
,
1399 struct srp_rdma_ch
*ch
,
1400 struct scatterlist
*sg
, int sg_index
)
1402 struct srp_target_port
*target
= ch
->target
;
1403 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
1404 struct ib_device
*ibdev
= dev
->dev
;
1405 dma_addr_t dma_addr
= ib_sg_dma_address(ibdev
, sg
);
1406 unsigned int dma_len
= ib_sg_dma_len(ibdev
, sg
);
1407 unsigned int len
= 0;
1410 WARN_ON_ONCE(!dma_len
);
1413 unsigned offset
= dma_addr
& ~dev
->mr_page_mask
;
1414 if (state
->npages
== dev
->max_pages_per_mr
|| offset
!= 0) {
1415 ret
= srp_map_finish_fmr(state
, ch
);
1420 len
= min_t(unsigned int, dma_len
, dev
->mr_page_size
- offset
);
1423 state
->base_dma_addr
= dma_addr
;
1424 state
->pages
[state
->npages
++] = dma_addr
& dev
->mr_page_mask
;
1425 state
->dma_len
+= len
;
1431 * If the last entry of the MR wasn't a full page, then we need to
1432 * close it out and start a new one -- we can only merge at page
1436 if (len
!= dev
->mr_page_size
)
1437 ret
= srp_map_finish_fmr(state
, ch
);
1441 static int srp_map_sg_fmr(struct srp_map_state
*state
, struct srp_rdma_ch
*ch
,
1442 struct srp_request
*req
, struct scatterlist
*scat
,
1445 struct scatterlist
*sg
;
1448 state
->desc
= req
->indirect_desc
;
1449 state
->pages
= req
->map_page
;
1450 state
->fmr
.next
= req
->fmr_list
;
1451 state
->fmr
.end
= req
->fmr_list
+ ch
->target
->cmd_sg_cnt
;
1453 for_each_sg(scat
, sg
, count
, i
) {
1454 ret
= srp_map_sg_entry(state
, ch
, sg
, i
);
1459 ret
= srp_map_finish_fmr(state
, ch
);
1463 req
->nmdesc
= state
->nmdesc
;
1468 static int srp_map_sg_fr(struct srp_map_state
*state
, struct srp_rdma_ch
*ch
,
1469 struct srp_request
*req
, struct scatterlist
*scat
,
1472 state
->desc
= req
->indirect_desc
;
1473 state
->fr
.next
= req
->fr_list
;
1474 state
->fr
.end
= req
->fr_list
+ ch
->target
->cmd_sg_cnt
;
1480 n
= srp_map_finish_fr(state
, req
, ch
, count
);
1481 if (unlikely(n
< 0))
1485 for (i
= 0; i
< n
; i
++)
1486 state
->sg
= sg_next(state
->sg
);
1489 req
->nmdesc
= state
->nmdesc
;
1494 static int srp_map_sg_dma(struct srp_map_state
*state
, struct srp_rdma_ch
*ch
,
1495 struct srp_request
*req
, struct scatterlist
*scat
,
1498 struct srp_target_port
*target
= ch
->target
;
1499 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
1500 struct scatterlist
*sg
;
1503 state
->desc
= req
->indirect_desc
;
1504 for_each_sg(scat
, sg
, count
, i
) {
1505 srp_map_desc(state
, ib_sg_dma_address(dev
->dev
, sg
),
1506 ib_sg_dma_len(dev
->dev
, sg
),
1507 target
->global_mr
->rkey
);
1510 req
->nmdesc
= state
->nmdesc
;
1516 * Register the indirect data buffer descriptor with the HCA.
1518 * Note: since the indirect data buffer descriptor has been allocated with
1519 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1522 static int srp_map_idb(struct srp_rdma_ch
*ch
, struct srp_request
*req
,
1523 void **next_mr
, void **end_mr
, u32 idb_len
,
1526 struct srp_target_port
*target
= ch
->target
;
1527 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
1528 struct srp_map_state state
;
1529 struct srp_direct_buf idb_desc
;
1531 struct scatterlist idb_sg
[1];
1534 memset(&state
, 0, sizeof(state
));
1535 memset(&idb_desc
, 0, sizeof(idb_desc
));
1536 state
.gen
.next
= next_mr
;
1537 state
.gen
.end
= end_mr
;
1538 state
.desc
= &idb_desc
;
1539 state
.base_dma_addr
= req
->indirect_dma_addr
;
1540 state
.dma_len
= idb_len
;
1542 if (dev
->use_fast_reg
) {
1544 sg_set_buf(idb_sg
, req
->indirect_desc
, idb_len
);
1545 idb_sg
->dma_address
= req
->indirect_dma_addr
; /* hack! */
1546 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1547 idb_sg
->dma_length
= idb_sg
->length
; /* hack^2 */
1549 ret
= srp_map_finish_fr(&state
, req
, ch
, 1);
1552 } else if (dev
->use_fmr
) {
1553 state
.pages
= idb_pages
;
1554 state
.pages
[0] = (req
->indirect_dma_addr
&
1557 ret
= srp_map_finish_fmr(&state
, ch
);
1564 *idb_rkey
= idb_desc
.key
;
1569 static int srp_map_data(struct scsi_cmnd
*scmnd
, struct srp_rdma_ch
*ch
,
1570 struct srp_request
*req
)
1572 struct srp_target_port
*target
= ch
->target
;
1573 struct scatterlist
*scat
;
1574 struct srp_cmd
*cmd
= req
->cmd
->buf
;
1575 int len
, nents
, count
, ret
;
1576 struct srp_device
*dev
;
1577 struct ib_device
*ibdev
;
1578 struct srp_map_state state
;
1579 struct srp_indirect_buf
*indirect_hdr
;
1580 u32 idb_len
, table_len
;
1584 if (!scsi_sglist(scmnd
) || scmnd
->sc_data_direction
== DMA_NONE
)
1585 return sizeof (struct srp_cmd
);
1587 if (scmnd
->sc_data_direction
!= DMA_FROM_DEVICE
&&
1588 scmnd
->sc_data_direction
!= DMA_TO_DEVICE
) {
1589 shost_printk(KERN_WARNING
, target
->scsi_host
,
1590 PFX
"Unhandled data direction %d\n",
1591 scmnd
->sc_data_direction
);
1595 nents
= scsi_sg_count(scmnd
);
1596 scat
= scsi_sglist(scmnd
);
1598 dev
= target
->srp_host
->srp_dev
;
1601 count
= ib_dma_map_sg(ibdev
, scat
, nents
, scmnd
->sc_data_direction
);
1602 if (unlikely(count
== 0))
1605 fmt
= SRP_DATA_DESC_DIRECT
;
1606 len
= sizeof (struct srp_cmd
) + sizeof (struct srp_direct_buf
);
1608 if (count
== 1 && target
->global_mr
) {
1610 * The midlayer only generated a single gather/scatter
1611 * entry, or DMA mapping coalesced everything to a
1612 * single entry. So a direct descriptor along with
1613 * the DMA MR suffices.
1615 struct srp_direct_buf
*buf
= (void *) cmd
->add_data
;
1617 buf
->va
= cpu_to_be64(ib_sg_dma_address(ibdev
, scat
));
1618 buf
->key
= cpu_to_be32(target
->global_mr
->rkey
);
1619 buf
->len
= cpu_to_be32(ib_sg_dma_len(ibdev
, scat
));
1626 * We have more than one scatter/gather entry, so build our indirect
1627 * descriptor table, trying to merge as many entries as we can.
1629 indirect_hdr
= (void *) cmd
->add_data
;
1631 ib_dma_sync_single_for_cpu(ibdev
, req
->indirect_dma_addr
,
1632 target
->indirect_size
, DMA_TO_DEVICE
);
1634 memset(&state
, 0, sizeof(state
));
1635 if (dev
->use_fast_reg
)
1636 srp_map_sg_fr(&state
, ch
, req
, scat
, count
);
1637 else if (dev
->use_fmr
)
1638 srp_map_sg_fmr(&state
, ch
, req
, scat
, count
);
1640 srp_map_sg_dma(&state
, ch
, req
, scat
, count
);
1642 /* We've mapped the request, now pull as much of the indirect
1643 * descriptor table as we can into the command buffer. If this
1644 * target is not using an external indirect table, we are
1645 * guaranteed to fit into the command, as the SCSI layer won't
1646 * give us more S/G entries than we allow.
1648 if (state
.ndesc
== 1) {
1650 * Memory registration collapsed the sg-list into one entry,
1651 * so use a direct descriptor.
1653 struct srp_direct_buf
*buf
= (void *) cmd
->add_data
;
1655 *buf
= req
->indirect_desc
[0];
1659 if (unlikely(target
->cmd_sg_cnt
< state
.ndesc
&&
1660 !target
->allow_ext_sg
)) {
1661 shost_printk(KERN_ERR
, target
->scsi_host
,
1662 "Could not fit S/G list into SRP_CMD\n");
1666 count
= min(state
.ndesc
, target
->cmd_sg_cnt
);
1667 table_len
= state
.ndesc
* sizeof (struct srp_direct_buf
);
1668 idb_len
= sizeof(struct srp_indirect_buf
) + table_len
;
1670 fmt
= SRP_DATA_DESC_INDIRECT
;
1671 len
= sizeof(struct srp_cmd
) + sizeof (struct srp_indirect_buf
);
1672 len
+= count
* sizeof (struct srp_direct_buf
);
1674 memcpy(indirect_hdr
->desc_list
, req
->indirect_desc
,
1675 count
* sizeof (struct srp_direct_buf
));
1677 if (!target
->global_mr
) {
1678 ret
= srp_map_idb(ch
, req
, state
.gen
.next
, state
.gen
.end
,
1679 idb_len
, &idb_rkey
);
1684 idb_rkey
= cpu_to_be32(target
->global_mr
->rkey
);
1687 indirect_hdr
->table_desc
.va
= cpu_to_be64(req
->indirect_dma_addr
);
1688 indirect_hdr
->table_desc
.key
= idb_rkey
;
1689 indirect_hdr
->table_desc
.len
= cpu_to_be32(table_len
);
1690 indirect_hdr
->len
= cpu_to_be32(state
.total_len
);
1692 if (scmnd
->sc_data_direction
== DMA_TO_DEVICE
)
1693 cmd
->data_out_desc_cnt
= count
;
1695 cmd
->data_in_desc_cnt
= count
;
1697 ib_dma_sync_single_for_device(ibdev
, req
->indirect_dma_addr
, table_len
,
1701 if (scmnd
->sc_data_direction
== DMA_TO_DEVICE
)
1702 cmd
->buf_fmt
= fmt
<< 4;
1710 * Return an IU and possible credit to the free pool
1712 static void srp_put_tx_iu(struct srp_rdma_ch
*ch
, struct srp_iu
*iu
,
1713 enum srp_iu_type iu_type
)
1715 unsigned long flags
;
1717 spin_lock_irqsave(&ch
->lock
, flags
);
1718 list_add(&iu
->list
, &ch
->free_tx
);
1719 if (iu_type
!= SRP_IU_RSP
)
1721 spin_unlock_irqrestore(&ch
->lock
, flags
);
1725 * Must be called with ch->lock held to protect req_lim and free_tx.
1726 * If IU is not sent, it must be returned using srp_put_tx_iu().
1729 * An upper limit for the number of allocated information units for each
1731 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1732 * more than Scsi_Host.can_queue requests.
1733 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1734 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1735 * one unanswered SRP request to an initiator.
1737 static struct srp_iu
*__srp_get_tx_iu(struct srp_rdma_ch
*ch
,
1738 enum srp_iu_type iu_type
)
1740 struct srp_target_port
*target
= ch
->target
;
1741 s32 rsv
= (iu_type
== SRP_IU_TSK_MGMT
) ? 0 : SRP_TSK_MGMT_SQ_SIZE
;
1744 ib_process_cq_direct(ch
->send_cq
, -1);
1746 if (list_empty(&ch
->free_tx
))
1749 /* Initiator responses to target requests do not consume credits */
1750 if (iu_type
!= SRP_IU_RSP
) {
1751 if (ch
->req_lim
<= rsv
) {
1752 ++target
->zero_req_lim
;
1759 iu
= list_first_entry(&ch
->free_tx
, struct srp_iu
, list
);
1760 list_del(&iu
->list
);
1764 static void srp_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1766 struct srp_iu
*iu
= container_of(wc
->wr_cqe
, struct srp_iu
, cqe
);
1767 struct srp_rdma_ch
*ch
= cq
->cq_context
;
1769 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1770 srp_handle_qp_err(cq
, wc
, "SEND");
1774 list_add(&iu
->list
, &ch
->free_tx
);
1777 static int srp_post_send(struct srp_rdma_ch
*ch
, struct srp_iu
*iu
, int len
)
1779 struct srp_target_port
*target
= ch
->target
;
1781 struct ib_send_wr wr
, *bad_wr
;
1783 list
.addr
= iu
->dma
;
1785 list
.lkey
= target
->lkey
;
1787 iu
->cqe
.done
= srp_send_done
;
1790 wr
.wr_cqe
= &iu
->cqe
;
1793 wr
.opcode
= IB_WR_SEND
;
1794 wr
.send_flags
= IB_SEND_SIGNALED
;
1796 return ib_post_send(ch
->qp
, &wr
, &bad_wr
);
1799 static int srp_post_recv(struct srp_rdma_ch
*ch
, struct srp_iu
*iu
)
1801 struct srp_target_port
*target
= ch
->target
;
1802 struct ib_recv_wr wr
, *bad_wr
;
1805 list
.addr
= iu
->dma
;
1806 list
.length
= iu
->size
;
1807 list
.lkey
= target
->lkey
;
1809 iu
->cqe
.done
= srp_recv_done
;
1812 wr
.wr_cqe
= &iu
->cqe
;
1816 return ib_post_recv(ch
->qp
, &wr
, &bad_wr
);
1819 static void srp_process_rsp(struct srp_rdma_ch
*ch
, struct srp_rsp
*rsp
)
1821 struct srp_target_port
*target
= ch
->target
;
1822 struct srp_request
*req
;
1823 struct scsi_cmnd
*scmnd
;
1824 unsigned long flags
;
1826 if (unlikely(rsp
->tag
& SRP_TAG_TSK_MGMT
)) {
1827 spin_lock_irqsave(&ch
->lock
, flags
);
1828 ch
->req_lim
+= be32_to_cpu(rsp
->req_lim_delta
);
1829 spin_unlock_irqrestore(&ch
->lock
, flags
);
1831 ch
->tsk_mgmt_status
= -1;
1832 if (be32_to_cpu(rsp
->resp_data_len
) >= 4)
1833 ch
->tsk_mgmt_status
= rsp
->data
[3];
1834 complete(&ch
->tsk_mgmt_done
);
1836 scmnd
= scsi_host_find_tag(target
->scsi_host
, rsp
->tag
);
1838 req
= (void *)scmnd
->host_scribble
;
1839 scmnd
= srp_claim_req(ch
, req
, NULL
, scmnd
);
1842 shost_printk(KERN_ERR
, target
->scsi_host
,
1843 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1844 rsp
->tag
, ch
- target
->ch
, ch
->qp
->qp_num
);
1846 spin_lock_irqsave(&ch
->lock
, flags
);
1847 ch
->req_lim
+= be32_to_cpu(rsp
->req_lim_delta
);
1848 spin_unlock_irqrestore(&ch
->lock
, flags
);
1852 scmnd
->result
= rsp
->status
;
1854 if (rsp
->flags
& SRP_RSP_FLAG_SNSVALID
) {
1855 memcpy(scmnd
->sense_buffer
, rsp
->data
+
1856 be32_to_cpu(rsp
->resp_data_len
),
1857 min_t(int, be32_to_cpu(rsp
->sense_data_len
),
1858 SCSI_SENSE_BUFFERSIZE
));
1861 if (unlikely(rsp
->flags
& SRP_RSP_FLAG_DIUNDER
))
1862 scsi_set_resid(scmnd
, be32_to_cpu(rsp
->data_in_res_cnt
));
1863 else if (unlikely(rsp
->flags
& SRP_RSP_FLAG_DIOVER
))
1864 scsi_set_resid(scmnd
, -be32_to_cpu(rsp
->data_in_res_cnt
));
1865 else if (unlikely(rsp
->flags
& SRP_RSP_FLAG_DOUNDER
))
1866 scsi_set_resid(scmnd
, be32_to_cpu(rsp
->data_out_res_cnt
));
1867 else if (unlikely(rsp
->flags
& SRP_RSP_FLAG_DOOVER
))
1868 scsi_set_resid(scmnd
, -be32_to_cpu(rsp
->data_out_res_cnt
));
1870 srp_free_req(ch
, req
, scmnd
,
1871 be32_to_cpu(rsp
->req_lim_delta
));
1873 scmnd
->host_scribble
= NULL
;
1874 scmnd
->scsi_done(scmnd
);
1878 static int srp_response_common(struct srp_rdma_ch
*ch
, s32 req_delta
,
1881 struct srp_target_port
*target
= ch
->target
;
1882 struct ib_device
*dev
= target
->srp_host
->srp_dev
->dev
;
1883 unsigned long flags
;
1887 spin_lock_irqsave(&ch
->lock
, flags
);
1888 ch
->req_lim
+= req_delta
;
1889 iu
= __srp_get_tx_iu(ch
, SRP_IU_RSP
);
1890 spin_unlock_irqrestore(&ch
->lock
, flags
);
1893 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1894 "no IU available to send response\n");
1898 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, len
, DMA_TO_DEVICE
);
1899 memcpy(iu
->buf
, rsp
, len
);
1900 ib_dma_sync_single_for_device(dev
, iu
->dma
, len
, DMA_TO_DEVICE
);
1902 err
= srp_post_send(ch
, iu
, len
);
1904 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1905 "unable to post response: %d\n", err
);
1906 srp_put_tx_iu(ch
, iu
, SRP_IU_RSP
);
1912 static void srp_process_cred_req(struct srp_rdma_ch
*ch
,
1913 struct srp_cred_req
*req
)
1915 struct srp_cred_rsp rsp
= {
1916 .opcode
= SRP_CRED_RSP
,
1919 s32 delta
= be32_to_cpu(req
->req_lim_delta
);
1921 if (srp_response_common(ch
, delta
, &rsp
, sizeof(rsp
)))
1922 shost_printk(KERN_ERR
, ch
->target
->scsi_host
, PFX
1923 "problems processing SRP_CRED_REQ\n");
1926 static void srp_process_aer_req(struct srp_rdma_ch
*ch
,
1927 struct srp_aer_req
*req
)
1929 struct srp_target_port
*target
= ch
->target
;
1930 struct srp_aer_rsp rsp
= {
1931 .opcode
= SRP_AER_RSP
,
1934 s32 delta
= be32_to_cpu(req
->req_lim_delta
);
1936 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1937 "ignoring AER for LUN %llu\n", scsilun_to_int(&req
->lun
));
1939 if (srp_response_common(ch
, delta
, &rsp
, sizeof(rsp
)))
1940 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1941 "problems processing SRP_AER_REQ\n");
1944 static void srp_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1946 struct srp_iu
*iu
= container_of(wc
->wr_cqe
, struct srp_iu
, cqe
);
1947 struct srp_rdma_ch
*ch
= cq
->cq_context
;
1948 struct srp_target_port
*target
= ch
->target
;
1949 struct ib_device
*dev
= target
->srp_host
->srp_dev
->dev
;
1953 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1954 srp_handle_qp_err(cq
, wc
, "RECV");
1958 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, ch
->max_ti_iu_len
,
1961 opcode
= *(u8
*) iu
->buf
;
1964 shost_printk(KERN_ERR
, target
->scsi_host
,
1965 PFX
"recv completion, opcode 0x%02x\n", opcode
);
1966 print_hex_dump(KERN_ERR
, "", DUMP_PREFIX_OFFSET
, 8, 1,
1967 iu
->buf
, wc
->byte_len
, true);
1972 srp_process_rsp(ch
, iu
->buf
);
1976 srp_process_cred_req(ch
, iu
->buf
);
1980 srp_process_aer_req(ch
, iu
->buf
);
1984 /* XXX Handle target logout */
1985 shost_printk(KERN_WARNING
, target
->scsi_host
,
1986 PFX
"Got target logout request\n");
1990 shost_printk(KERN_WARNING
, target
->scsi_host
,
1991 PFX
"Unhandled SRP opcode 0x%02x\n", opcode
);
1995 ib_dma_sync_single_for_device(dev
, iu
->dma
, ch
->max_ti_iu_len
,
1998 res
= srp_post_recv(ch
, iu
);
2000 shost_printk(KERN_ERR
, target
->scsi_host
,
2001 PFX
"Recv failed with error code %d\n", res
);
2005 * srp_tl_err_work() - handle a transport layer error
2006 * @work: Work structure embedded in an SRP target port.
2008 * Note: This function may get invoked before the rport has been created,
2009 * hence the target->rport test.
2011 static void srp_tl_err_work(struct work_struct
*work
)
2013 struct srp_target_port
*target
;
2015 target
= container_of(work
, struct srp_target_port
, tl_err_work
);
2017 srp_start_tl_fail_timers(target
->rport
);
2020 static void srp_handle_qp_err(struct ib_cq
*cq
, struct ib_wc
*wc
,
2023 struct srp_rdma_ch
*ch
= cq
->cq_context
;
2024 struct srp_target_port
*target
= ch
->target
;
2026 if (ch
->connected
&& !target
->qp_in_error
) {
2027 shost_printk(KERN_ERR
, target
->scsi_host
,
2028 PFX
"failed %s status %s (%d) for CQE %p\n",
2029 opname
, ib_wc_status_msg(wc
->status
), wc
->status
,
2031 queue_work(system_long_wq
, &target
->tl_err_work
);
2033 target
->qp_in_error
= true;
2036 static int srp_queuecommand(struct Scsi_Host
*shost
, struct scsi_cmnd
*scmnd
)
2038 struct srp_target_port
*target
= host_to_target(shost
);
2039 struct srp_rport
*rport
= target
->rport
;
2040 struct srp_rdma_ch
*ch
;
2041 struct srp_request
*req
;
2043 struct srp_cmd
*cmd
;
2044 struct ib_device
*dev
;
2045 unsigned long flags
;
2049 const bool in_scsi_eh
= !in_interrupt() && current
== shost
->ehandler
;
2052 * The SCSI EH thread is the only context from which srp_queuecommand()
2053 * can get invoked for blocked devices (SDEV_BLOCK /
2054 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2055 * locking the rport mutex if invoked from inside the SCSI EH.
2058 mutex_lock(&rport
->mutex
);
2060 scmnd
->result
= srp_chkready(target
->rport
);
2061 if (unlikely(scmnd
->result
))
2064 WARN_ON_ONCE(scmnd
->request
->tag
< 0);
2065 tag
= blk_mq_unique_tag(scmnd
->request
);
2066 ch
= &target
->ch
[blk_mq_unique_tag_to_hwq(tag
)];
2067 idx
= blk_mq_unique_tag_to_tag(tag
);
2068 WARN_ONCE(idx
>= target
->req_ring_size
, "%s: tag %#x: idx %d >= %d\n",
2069 dev_name(&shost
->shost_gendev
), tag
, idx
,
2070 target
->req_ring_size
);
2072 spin_lock_irqsave(&ch
->lock
, flags
);
2073 iu
= __srp_get_tx_iu(ch
, SRP_IU_CMD
);
2074 spin_unlock_irqrestore(&ch
->lock
, flags
);
2079 req
= &ch
->req_ring
[idx
];
2080 dev
= target
->srp_host
->srp_dev
->dev
;
2081 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, target
->max_iu_len
,
2084 scmnd
->host_scribble
= (void *) req
;
2087 memset(cmd
, 0, sizeof *cmd
);
2089 cmd
->opcode
= SRP_CMD
;
2090 int_to_scsilun(scmnd
->device
->lun
, &cmd
->lun
);
2092 memcpy(cmd
->cdb
, scmnd
->cmnd
, scmnd
->cmd_len
);
2097 len
= srp_map_data(scmnd
, ch
, req
);
2099 shost_printk(KERN_ERR
, target
->scsi_host
,
2100 PFX
"Failed to map data (%d)\n", len
);
2102 * If we ran out of memory descriptors (-ENOMEM) because an
2103 * application is queuing many requests with more than
2104 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2105 * to reduce queue depth temporarily.
2107 scmnd
->result
= len
== -ENOMEM
?
2108 DID_OK
<< 16 | QUEUE_FULL
<< 1 : DID_ERROR
<< 16;
2112 ib_dma_sync_single_for_device(dev
, iu
->dma
, target
->max_iu_len
,
2115 if (srp_post_send(ch
, iu
, len
)) {
2116 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
"Send failed\n");
2124 mutex_unlock(&rport
->mutex
);
2129 srp_unmap_data(scmnd
, ch
, req
);
2132 srp_put_tx_iu(ch
, iu
, SRP_IU_CMD
);
2135 * Avoid that the loops that iterate over the request ring can
2136 * encounter a dangling SCSI command pointer.
2141 if (scmnd
->result
) {
2142 scmnd
->scsi_done(scmnd
);
2145 ret
= SCSI_MLQUEUE_HOST_BUSY
;
2152 * Note: the resources allocated in this function are freed in
2155 static int srp_alloc_iu_bufs(struct srp_rdma_ch
*ch
)
2157 struct srp_target_port
*target
= ch
->target
;
2160 ch
->rx_ring
= kcalloc(target
->queue_size
, sizeof(*ch
->rx_ring
),
2164 ch
->tx_ring
= kcalloc(target
->queue_size
, sizeof(*ch
->tx_ring
),
2169 for (i
= 0; i
< target
->queue_size
; ++i
) {
2170 ch
->rx_ring
[i
] = srp_alloc_iu(target
->srp_host
,
2172 GFP_KERNEL
, DMA_FROM_DEVICE
);
2173 if (!ch
->rx_ring
[i
])
2177 for (i
= 0; i
< target
->queue_size
; ++i
) {
2178 ch
->tx_ring
[i
] = srp_alloc_iu(target
->srp_host
,
2180 GFP_KERNEL
, DMA_TO_DEVICE
);
2181 if (!ch
->tx_ring
[i
])
2184 list_add(&ch
->tx_ring
[i
]->list
, &ch
->free_tx
);
2190 for (i
= 0; i
< target
->queue_size
; ++i
) {
2191 srp_free_iu(target
->srp_host
, ch
->rx_ring
[i
]);
2192 srp_free_iu(target
->srp_host
, ch
->tx_ring
[i
]);
2205 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr
*qp_attr
, int attr_mask
)
2207 uint64_t T_tr_ns
, max_compl_time_ms
;
2208 uint32_t rq_tmo_jiffies
;
2211 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2212 * table 91), both the QP timeout and the retry count have to be set
2213 * for RC QP's during the RTR to RTS transition.
2215 WARN_ON_ONCE((attr_mask
& (IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
)) !=
2216 (IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
));
2219 * Set target->rq_tmo_jiffies to one second more than the largest time
2220 * it can take before an error completion is generated. See also
2221 * C9-140..142 in the IBTA spec for more information about how to
2222 * convert the QP Local ACK Timeout value to nanoseconds.
2224 T_tr_ns
= 4096 * (1ULL << qp_attr
->timeout
);
2225 max_compl_time_ms
= qp_attr
->retry_cnt
* 4 * T_tr_ns
;
2226 do_div(max_compl_time_ms
, NSEC_PER_MSEC
);
2227 rq_tmo_jiffies
= msecs_to_jiffies(max_compl_time_ms
+ 1000);
2229 return rq_tmo_jiffies
;
2232 static void srp_cm_rep_handler(struct ib_cm_id
*cm_id
,
2233 const struct srp_login_rsp
*lrsp
,
2234 struct srp_rdma_ch
*ch
)
2236 struct srp_target_port
*target
= ch
->target
;
2237 struct ib_qp_attr
*qp_attr
= NULL
;
2242 if (lrsp
->opcode
== SRP_LOGIN_RSP
) {
2243 ch
->max_ti_iu_len
= be32_to_cpu(lrsp
->max_ti_iu_len
);
2244 ch
->req_lim
= be32_to_cpu(lrsp
->req_lim_delta
);
2247 * Reserve credits for task management so we don't
2248 * bounce requests back to the SCSI mid-layer.
2250 target
->scsi_host
->can_queue
2251 = min(ch
->req_lim
- SRP_TSK_MGMT_SQ_SIZE
,
2252 target
->scsi_host
->can_queue
);
2253 target
->scsi_host
->cmd_per_lun
2254 = min_t(int, target
->scsi_host
->can_queue
,
2255 target
->scsi_host
->cmd_per_lun
);
2257 shost_printk(KERN_WARNING
, target
->scsi_host
,
2258 PFX
"Unhandled RSP opcode %#x\n", lrsp
->opcode
);
2264 ret
= srp_alloc_iu_bufs(ch
);
2270 qp_attr
= kmalloc(sizeof *qp_attr
, GFP_KERNEL
);
2274 qp_attr
->qp_state
= IB_QPS_RTR
;
2275 ret
= ib_cm_init_qp_attr(cm_id
, qp_attr
, &attr_mask
);
2279 ret
= ib_modify_qp(ch
->qp
, qp_attr
, attr_mask
);
2283 for (i
= 0; i
< target
->queue_size
; i
++) {
2284 struct srp_iu
*iu
= ch
->rx_ring
[i
];
2286 ret
= srp_post_recv(ch
, iu
);
2291 qp_attr
->qp_state
= IB_QPS_RTS
;
2292 ret
= ib_cm_init_qp_attr(cm_id
, qp_attr
, &attr_mask
);
2296 target
->rq_tmo_jiffies
= srp_compute_rq_tmo(qp_attr
, attr_mask
);
2298 ret
= ib_modify_qp(ch
->qp
, qp_attr
, attr_mask
);
2302 ret
= ib_send_cm_rtu(cm_id
, NULL
, 0);
2311 static void srp_cm_rej_handler(struct ib_cm_id
*cm_id
,
2312 struct ib_cm_event
*event
,
2313 struct srp_rdma_ch
*ch
)
2315 struct srp_target_port
*target
= ch
->target
;
2316 struct Scsi_Host
*shost
= target
->scsi_host
;
2317 struct ib_class_port_info
*cpi
;
2320 switch (event
->param
.rej_rcvd
.reason
) {
2321 case IB_CM_REJ_PORT_CM_REDIRECT
:
2322 cpi
= event
->param
.rej_rcvd
.ari
;
2323 ch
->path
.dlid
= cpi
->redirect_lid
;
2324 ch
->path
.pkey
= cpi
->redirect_pkey
;
2325 cm_id
->remote_cm_qpn
= be32_to_cpu(cpi
->redirect_qp
) & 0x00ffffff;
2326 memcpy(ch
->path
.dgid
.raw
, cpi
->redirect_gid
, 16);
2328 ch
->status
= ch
->path
.dlid
?
2329 SRP_DLID_REDIRECT
: SRP_PORT_REDIRECT
;
2332 case IB_CM_REJ_PORT_REDIRECT
:
2333 if (srp_target_is_topspin(target
)) {
2335 * Topspin/Cisco SRP gateways incorrectly send
2336 * reject reason code 25 when they mean 24
2339 memcpy(ch
->path
.dgid
.raw
,
2340 event
->param
.rej_rcvd
.ari
, 16);
2342 shost_printk(KERN_DEBUG
, shost
,
2343 PFX
"Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2344 be64_to_cpu(ch
->path
.dgid
.global
.subnet_prefix
),
2345 be64_to_cpu(ch
->path
.dgid
.global
.interface_id
));
2347 ch
->status
= SRP_PORT_REDIRECT
;
2349 shost_printk(KERN_WARNING
, shost
,
2350 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2351 ch
->status
= -ECONNRESET
;
2355 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID
:
2356 shost_printk(KERN_WARNING
, shost
,
2357 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2358 ch
->status
= -ECONNRESET
;
2361 case IB_CM_REJ_CONSUMER_DEFINED
:
2362 opcode
= *(u8
*) event
->private_data
;
2363 if (opcode
== SRP_LOGIN_REJ
) {
2364 struct srp_login_rej
*rej
= event
->private_data
;
2365 u32 reason
= be32_to_cpu(rej
->reason
);
2367 if (reason
== SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE
)
2368 shost_printk(KERN_WARNING
, shost
,
2369 PFX
"SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2371 shost_printk(KERN_WARNING
, shost
, PFX
2372 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2374 target
->orig_dgid
.raw
, reason
);
2376 shost_printk(KERN_WARNING
, shost
,
2377 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2378 " opcode 0x%02x\n", opcode
);
2379 ch
->status
= -ECONNRESET
;
2382 case IB_CM_REJ_STALE_CONN
:
2383 shost_printk(KERN_WARNING
, shost
, " REJ reason: stale connection\n");
2384 ch
->status
= SRP_STALE_CONN
;
2388 shost_printk(KERN_WARNING
, shost
, " REJ reason 0x%x\n",
2389 event
->param
.rej_rcvd
.reason
);
2390 ch
->status
= -ECONNRESET
;
2394 static int srp_cm_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*event
)
2396 struct srp_rdma_ch
*ch
= cm_id
->context
;
2397 struct srp_target_port
*target
= ch
->target
;
2400 switch (event
->event
) {
2401 case IB_CM_REQ_ERROR
:
2402 shost_printk(KERN_DEBUG
, target
->scsi_host
,
2403 PFX
"Sending CM REQ failed\n");
2405 ch
->status
= -ECONNRESET
;
2408 case IB_CM_REP_RECEIVED
:
2410 srp_cm_rep_handler(cm_id
, event
->private_data
, ch
);
2413 case IB_CM_REJ_RECEIVED
:
2414 shost_printk(KERN_DEBUG
, target
->scsi_host
, PFX
"REJ received\n");
2417 srp_cm_rej_handler(cm_id
, event
, ch
);
2420 case IB_CM_DREQ_RECEIVED
:
2421 shost_printk(KERN_WARNING
, target
->scsi_host
,
2422 PFX
"DREQ received - connection closed\n");
2423 ch
->connected
= false;
2424 if (ib_send_cm_drep(cm_id
, NULL
, 0))
2425 shost_printk(KERN_ERR
, target
->scsi_host
,
2426 PFX
"Sending CM DREP failed\n");
2427 queue_work(system_long_wq
, &target
->tl_err_work
);
2430 case IB_CM_TIMEWAIT_EXIT
:
2431 shost_printk(KERN_ERR
, target
->scsi_host
,
2432 PFX
"connection closed\n");
2438 case IB_CM_MRA_RECEIVED
:
2439 case IB_CM_DREQ_ERROR
:
2440 case IB_CM_DREP_RECEIVED
:
2444 shost_printk(KERN_WARNING
, target
->scsi_host
,
2445 PFX
"Unhandled CM event %d\n", event
->event
);
2450 complete(&ch
->done
);
2456 * srp_change_queue_depth - setting device queue depth
2457 * @sdev: scsi device struct
2458 * @qdepth: requested queue depth
2460 * Returns queue depth.
2463 srp_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
2465 if (!sdev
->tagged_supported
)
2467 return scsi_change_queue_depth(sdev
, qdepth
);
2470 static int srp_send_tsk_mgmt(struct srp_rdma_ch
*ch
, u64 req_tag
, u64 lun
,
2473 struct srp_target_port
*target
= ch
->target
;
2474 struct srp_rport
*rport
= target
->rport
;
2475 struct ib_device
*dev
= target
->srp_host
->srp_dev
->dev
;
2477 struct srp_tsk_mgmt
*tsk_mgmt
;
2479 if (!ch
->connected
|| target
->qp_in_error
)
2482 init_completion(&ch
->tsk_mgmt_done
);
2485 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2486 * invoked while a task management function is being sent.
2488 mutex_lock(&rport
->mutex
);
2489 spin_lock_irq(&ch
->lock
);
2490 iu
= __srp_get_tx_iu(ch
, SRP_IU_TSK_MGMT
);
2491 spin_unlock_irq(&ch
->lock
);
2494 mutex_unlock(&rport
->mutex
);
2499 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, sizeof *tsk_mgmt
,
2502 memset(tsk_mgmt
, 0, sizeof *tsk_mgmt
);
2504 tsk_mgmt
->opcode
= SRP_TSK_MGMT
;
2505 int_to_scsilun(lun
, &tsk_mgmt
->lun
);
2506 tsk_mgmt
->tag
= req_tag
| SRP_TAG_TSK_MGMT
;
2507 tsk_mgmt
->tsk_mgmt_func
= func
;
2508 tsk_mgmt
->task_tag
= req_tag
;
2510 ib_dma_sync_single_for_device(dev
, iu
->dma
, sizeof *tsk_mgmt
,
2512 if (srp_post_send(ch
, iu
, sizeof(*tsk_mgmt
))) {
2513 srp_put_tx_iu(ch
, iu
, SRP_IU_TSK_MGMT
);
2514 mutex_unlock(&rport
->mutex
);
2518 mutex_unlock(&rport
->mutex
);
2520 if (!wait_for_completion_timeout(&ch
->tsk_mgmt_done
,
2521 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS
)))
2527 static int srp_abort(struct scsi_cmnd
*scmnd
)
2529 struct srp_target_port
*target
= host_to_target(scmnd
->device
->host
);
2530 struct srp_request
*req
= (struct srp_request
*) scmnd
->host_scribble
;
2533 struct srp_rdma_ch
*ch
;
2536 shost_printk(KERN_ERR
, target
->scsi_host
, "SRP abort called\n");
2540 tag
= blk_mq_unique_tag(scmnd
->request
);
2541 ch_idx
= blk_mq_unique_tag_to_hwq(tag
);
2542 if (WARN_ON_ONCE(ch_idx
>= target
->ch_count
))
2544 ch
= &target
->ch
[ch_idx
];
2545 if (!srp_claim_req(ch
, req
, NULL
, scmnd
))
2547 shost_printk(KERN_ERR
, target
->scsi_host
,
2548 "Sending SRP abort for tag %#x\n", tag
);
2549 if (srp_send_tsk_mgmt(ch
, tag
, scmnd
->device
->lun
,
2550 SRP_TSK_ABORT_TASK
) == 0)
2552 else if (target
->rport
->state
== SRP_RPORT_LOST
)
2556 srp_free_req(ch
, req
, scmnd
, 0);
2557 scmnd
->result
= DID_ABORT
<< 16;
2558 scmnd
->scsi_done(scmnd
);
2563 static int srp_reset_device(struct scsi_cmnd
*scmnd
)
2565 struct srp_target_port
*target
= host_to_target(scmnd
->device
->host
);
2566 struct srp_rdma_ch
*ch
;
2569 shost_printk(KERN_ERR
, target
->scsi_host
, "SRP reset_device called\n");
2571 ch
= &target
->ch
[0];
2572 if (srp_send_tsk_mgmt(ch
, SRP_TAG_NO_REQ
, scmnd
->device
->lun
,
2575 if (ch
->tsk_mgmt_status
)
2578 for (i
= 0; i
< target
->ch_count
; i
++) {
2579 ch
= &target
->ch
[i
];
2580 for (i
= 0; i
< target
->req_ring_size
; ++i
) {
2581 struct srp_request
*req
= &ch
->req_ring
[i
];
2583 srp_finish_req(ch
, req
, scmnd
->device
, DID_RESET
<< 16);
2590 static int srp_reset_host(struct scsi_cmnd
*scmnd
)
2592 struct srp_target_port
*target
= host_to_target(scmnd
->device
->host
);
2594 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
"SRP reset_host called\n");
2596 return srp_reconnect_rport(target
->rport
) == 0 ? SUCCESS
: FAILED
;
2599 static int srp_slave_configure(struct scsi_device
*sdev
)
2601 struct Scsi_Host
*shost
= sdev
->host
;
2602 struct srp_target_port
*target
= host_to_target(shost
);
2603 struct request_queue
*q
= sdev
->request_queue
;
2604 unsigned long timeout
;
2606 if (sdev
->type
== TYPE_DISK
) {
2607 timeout
= max_t(unsigned, 30 * HZ
, target
->rq_tmo_jiffies
);
2608 blk_queue_rq_timeout(q
, timeout
);
2614 static ssize_t
show_id_ext(struct device
*dev
, struct device_attribute
*attr
,
2617 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2619 return sprintf(buf
, "0x%016llx\n", be64_to_cpu(target
->id_ext
));
2622 static ssize_t
show_ioc_guid(struct device
*dev
, struct device_attribute
*attr
,
2625 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2627 return sprintf(buf
, "0x%016llx\n", be64_to_cpu(target
->ioc_guid
));
2630 static ssize_t
show_service_id(struct device
*dev
,
2631 struct device_attribute
*attr
, char *buf
)
2633 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2635 return sprintf(buf
, "0x%016llx\n", be64_to_cpu(target
->service_id
));
2638 static ssize_t
show_pkey(struct device
*dev
, struct device_attribute
*attr
,
2641 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2643 return sprintf(buf
, "0x%04x\n", be16_to_cpu(target
->pkey
));
2646 static ssize_t
show_sgid(struct device
*dev
, struct device_attribute
*attr
,
2649 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2651 return sprintf(buf
, "%pI6\n", target
->sgid
.raw
);
2654 static ssize_t
show_dgid(struct device
*dev
, struct device_attribute
*attr
,
2657 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2658 struct srp_rdma_ch
*ch
= &target
->ch
[0];
2660 return sprintf(buf
, "%pI6\n", ch
->path
.dgid
.raw
);
2663 static ssize_t
show_orig_dgid(struct device
*dev
,
2664 struct device_attribute
*attr
, char *buf
)
2666 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2668 return sprintf(buf
, "%pI6\n", target
->orig_dgid
.raw
);
2671 static ssize_t
show_req_lim(struct device
*dev
,
2672 struct device_attribute
*attr
, char *buf
)
2674 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2675 struct srp_rdma_ch
*ch
;
2676 int i
, req_lim
= INT_MAX
;
2678 for (i
= 0; i
< target
->ch_count
; i
++) {
2679 ch
= &target
->ch
[i
];
2680 req_lim
= min(req_lim
, ch
->req_lim
);
2682 return sprintf(buf
, "%d\n", req_lim
);
2685 static ssize_t
show_zero_req_lim(struct device
*dev
,
2686 struct device_attribute
*attr
, char *buf
)
2688 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2690 return sprintf(buf
, "%d\n", target
->zero_req_lim
);
2693 static ssize_t
show_local_ib_port(struct device
*dev
,
2694 struct device_attribute
*attr
, char *buf
)
2696 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2698 return sprintf(buf
, "%d\n", target
->srp_host
->port
);
2701 static ssize_t
show_local_ib_device(struct device
*dev
,
2702 struct device_attribute
*attr
, char *buf
)
2704 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2706 return sprintf(buf
, "%s\n", target
->srp_host
->srp_dev
->dev
->name
);
2709 static ssize_t
show_ch_count(struct device
*dev
, struct device_attribute
*attr
,
2712 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2714 return sprintf(buf
, "%d\n", target
->ch_count
);
2717 static ssize_t
show_comp_vector(struct device
*dev
,
2718 struct device_attribute
*attr
, char *buf
)
2720 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2722 return sprintf(buf
, "%d\n", target
->comp_vector
);
2725 static ssize_t
show_tl_retry_count(struct device
*dev
,
2726 struct device_attribute
*attr
, char *buf
)
2728 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2730 return sprintf(buf
, "%d\n", target
->tl_retry_count
);
2733 static ssize_t
show_cmd_sg_entries(struct device
*dev
,
2734 struct device_attribute
*attr
, char *buf
)
2736 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2738 return sprintf(buf
, "%u\n", target
->cmd_sg_cnt
);
2741 static ssize_t
show_allow_ext_sg(struct device
*dev
,
2742 struct device_attribute
*attr
, char *buf
)
2744 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2746 return sprintf(buf
, "%s\n", target
->allow_ext_sg
? "true" : "false");
2749 static DEVICE_ATTR(id_ext
, S_IRUGO
, show_id_ext
, NULL
);
2750 static DEVICE_ATTR(ioc_guid
, S_IRUGO
, show_ioc_guid
, NULL
);
2751 static DEVICE_ATTR(service_id
, S_IRUGO
, show_service_id
, NULL
);
2752 static DEVICE_ATTR(pkey
, S_IRUGO
, show_pkey
, NULL
);
2753 static DEVICE_ATTR(sgid
, S_IRUGO
, show_sgid
, NULL
);
2754 static DEVICE_ATTR(dgid
, S_IRUGO
, show_dgid
, NULL
);
2755 static DEVICE_ATTR(orig_dgid
, S_IRUGO
, show_orig_dgid
, NULL
);
2756 static DEVICE_ATTR(req_lim
, S_IRUGO
, show_req_lim
, NULL
);
2757 static DEVICE_ATTR(zero_req_lim
, S_IRUGO
, show_zero_req_lim
, NULL
);
2758 static DEVICE_ATTR(local_ib_port
, S_IRUGO
, show_local_ib_port
, NULL
);
2759 static DEVICE_ATTR(local_ib_device
, S_IRUGO
, show_local_ib_device
, NULL
);
2760 static DEVICE_ATTR(ch_count
, S_IRUGO
, show_ch_count
, NULL
);
2761 static DEVICE_ATTR(comp_vector
, S_IRUGO
, show_comp_vector
, NULL
);
2762 static DEVICE_ATTR(tl_retry_count
, S_IRUGO
, show_tl_retry_count
, NULL
);
2763 static DEVICE_ATTR(cmd_sg_entries
, S_IRUGO
, show_cmd_sg_entries
, NULL
);
2764 static DEVICE_ATTR(allow_ext_sg
, S_IRUGO
, show_allow_ext_sg
, NULL
);
2766 static struct device_attribute
*srp_host_attrs
[] = {
2769 &dev_attr_service_id
,
2773 &dev_attr_orig_dgid
,
2775 &dev_attr_zero_req_lim
,
2776 &dev_attr_local_ib_port
,
2777 &dev_attr_local_ib_device
,
2779 &dev_attr_comp_vector
,
2780 &dev_attr_tl_retry_count
,
2781 &dev_attr_cmd_sg_entries
,
2782 &dev_attr_allow_ext_sg
,
2786 static struct scsi_host_template srp_template
= {
2787 .module
= THIS_MODULE
,
2788 .name
= "InfiniBand SRP initiator",
2789 .proc_name
= DRV_NAME
,
2790 .slave_configure
= srp_slave_configure
,
2791 .info
= srp_target_info
,
2792 .queuecommand
= srp_queuecommand
,
2793 .change_queue_depth
= srp_change_queue_depth
,
2794 .eh_abort_handler
= srp_abort
,
2795 .eh_device_reset_handler
= srp_reset_device
,
2796 .eh_host_reset_handler
= srp_reset_host
,
2797 .skip_settle_delay
= true,
2798 .sg_tablesize
= SRP_DEF_SG_TABLESIZE
,
2799 .can_queue
= SRP_DEFAULT_CMD_SQ_SIZE
,
2801 .cmd_per_lun
= SRP_DEFAULT_CMD_SQ_SIZE
,
2802 .use_clustering
= ENABLE_CLUSTERING
,
2803 .shost_attrs
= srp_host_attrs
,
2804 .track_queue_depth
= 1,
2807 static int srp_sdev_count(struct Scsi_Host
*host
)
2809 struct scsi_device
*sdev
;
2812 shost_for_each_device(sdev
, host
)
2820 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2821 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2822 * removal has been scheduled.
2823 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2825 static int srp_add_target(struct srp_host
*host
, struct srp_target_port
*target
)
2827 struct srp_rport_identifiers ids
;
2828 struct srp_rport
*rport
;
2830 target
->state
= SRP_TARGET_SCANNING
;
2831 sprintf(target
->target_name
, "SRP.T10:%016llX",
2832 be64_to_cpu(target
->id_ext
));
2834 if (scsi_add_host(target
->scsi_host
, host
->srp_dev
->dev
->dma_device
))
2837 memcpy(ids
.port_id
, &target
->id_ext
, 8);
2838 memcpy(ids
.port_id
+ 8, &target
->ioc_guid
, 8);
2839 ids
.roles
= SRP_RPORT_ROLE_TARGET
;
2840 rport
= srp_rport_add(target
->scsi_host
, &ids
);
2841 if (IS_ERR(rport
)) {
2842 scsi_remove_host(target
->scsi_host
);
2843 return PTR_ERR(rport
);
2846 rport
->lld_data
= target
;
2847 target
->rport
= rport
;
2849 spin_lock(&host
->target_lock
);
2850 list_add_tail(&target
->list
, &host
->target_list
);
2851 spin_unlock(&host
->target_lock
);
2853 scsi_scan_target(&target
->scsi_host
->shost_gendev
,
2854 0, target
->scsi_id
, SCAN_WILD_CARD
, 0);
2856 if (srp_connected_ch(target
) < target
->ch_count
||
2857 target
->qp_in_error
) {
2858 shost_printk(KERN_INFO
, target
->scsi_host
,
2859 PFX
"SCSI scan failed - removing SCSI host\n");
2860 srp_queue_remove_work(target
);
2864 pr_debug(PFX
"%s: SCSI scan succeeded - detected %d LUNs\n",
2865 dev_name(&target
->scsi_host
->shost_gendev
),
2866 srp_sdev_count(target
->scsi_host
));
2868 spin_lock_irq(&target
->lock
);
2869 if (target
->state
== SRP_TARGET_SCANNING
)
2870 target
->state
= SRP_TARGET_LIVE
;
2871 spin_unlock_irq(&target
->lock
);
2877 static void srp_release_dev(struct device
*dev
)
2879 struct srp_host
*host
=
2880 container_of(dev
, struct srp_host
, dev
);
2882 complete(&host
->released
);
2885 static struct class srp_class
= {
2886 .name
= "infiniband_srp",
2887 .dev_release
= srp_release_dev
2891 * srp_conn_unique() - check whether the connection to a target is unique
2893 * @target: SRP target port.
2895 static bool srp_conn_unique(struct srp_host
*host
,
2896 struct srp_target_port
*target
)
2898 struct srp_target_port
*t
;
2901 if (target
->state
== SRP_TARGET_REMOVED
)
2906 spin_lock(&host
->target_lock
);
2907 list_for_each_entry(t
, &host
->target_list
, list
) {
2909 target
->id_ext
== t
->id_ext
&&
2910 target
->ioc_guid
== t
->ioc_guid
&&
2911 target
->initiator_ext
== t
->initiator_ext
) {
2916 spin_unlock(&host
->target_lock
);
2923 * Target ports are added by writing
2925 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2926 * pkey=<P_Key>,service_id=<service ID>
2928 * to the add_target sysfs attribute.
2932 SRP_OPT_ID_EXT
= 1 << 0,
2933 SRP_OPT_IOC_GUID
= 1 << 1,
2934 SRP_OPT_DGID
= 1 << 2,
2935 SRP_OPT_PKEY
= 1 << 3,
2936 SRP_OPT_SERVICE_ID
= 1 << 4,
2937 SRP_OPT_MAX_SECT
= 1 << 5,
2938 SRP_OPT_MAX_CMD_PER_LUN
= 1 << 6,
2939 SRP_OPT_IO_CLASS
= 1 << 7,
2940 SRP_OPT_INITIATOR_EXT
= 1 << 8,
2941 SRP_OPT_CMD_SG_ENTRIES
= 1 << 9,
2942 SRP_OPT_ALLOW_EXT_SG
= 1 << 10,
2943 SRP_OPT_SG_TABLESIZE
= 1 << 11,
2944 SRP_OPT_COMP_VECTOR
= 1 << 12,
2945 SRP_OPT_TL_RETRY_COUNT
= 1 << 13,
2946 SRP_OPT_QUEUE_SIZE
= 1 << 14,
2947 SRP_OPT_ALL
= (SRP_OPT_ID_EXT
|
2951 SRP_OPT_SERVICE_ID
),
2954 static const match_table_t srp_opt_tokens
= {
2955 { SRP_OPT_ID_EXT
, "id_ext=%s" },
2956 { SRP_OPT_IOC_GUID
, "ioc_guid=%s" },
2957 { SRP_OPT_DGID
, "dgid=%s" },
2958 { SRP_OPT_PKEY
, "pkey=%x" },
2959 { SRP_OPT_SERVICE_ID
, "service_id=%s" },
2960 { SRP_OPT_MAX_SECT
, "max_sect=%d" },
2961 { SRP_OPT_MAX_CMD_PER_LUN
, "max_cmd_per_lun=%d" },
2962 { SRP_OPT_IO_CLASS
, "io_class=%x" },
2963 { SRP_OPT_INITIATOR_EXT
, "initiator_ext=%s" },
2964 { SRP_OPT_CMD_SG_ENTRIES
, "cmd_sg_entries=%u" },
2965 { SRP_OPT_ALLOW_EXT_SG
, "allow_ext_sg=%u" },
2966 { SRP_OPT_SG_TABLESIZE
, "sg_tablesize=%u" },
2967 { SRP_OPT_COMP_VECTOR
, "comp_vector=%u" },
2968 { SRP_OPT_TL_RETRY_COUNT
, "tl_retry_count=%u" },
2969 { SRP_OPT_QUEUE_SIZE
, "queue_size=%d" },
2970 { SRP_OPT_ERR
, NULL
}
2973 static int srp_parse_options(const char *buf
, struct srp_target_port
*target
)
2975 char *options
, *sep_opt
;
2978 substring_t args
[MAX_OPT_ARGS
];
2984 options
= kstrdup(buf
, GFP_KERNEL
);
2989 while ((p
= strsep(&sep_opt
, ",\n")) != NULL
) {
2993 token
= match_token(p
, srp_opt_tokens
, args
);
2997 case SRP_OPT_ID_EXT
:
2998 p
= match_strdup(args
);
3003 target
->id_ext
= cpu_to_be64(simple_strtoull(p
, NULL
, 16));
3007 case SRP_OPT_IOC_GUID
:
3008 p
= match_strdup(args
);
3013 target
->ioc_guid
= cpu_to_be64(simple_strtoull(p
, NULL
, 16));
3018 p
= match_strdup(args
);
3023 if (strlen(p
) != 32) {
3024 pr_warn("bad dest GID parameter '%s'\n", p
);
3029 for (i
= 0; i
< 16; ++i
) {
3030 strlcpy(dgid
, p
+ i
* 2, sizeof(dgid
));
3031 if (sscanf(dgid
, "%hhx",
3032 &target
->orig_dgid
.raw
[i
]) < 1) {
3042 if (match_hex(args
, &token
)) {
3043 pr_warn("bad P_Key parameter '%s'\n", p
);
3046 target
->pkey
= cpu_to_be16(token
);
3049 case SRP_OPT_SERVICE_ID
:
3050 p
= match_strdup(args
);
3055 target
->service_id
= cpu_to_be64(simple_strtoull(p
, NULL
, 16));
3059 case SRP_OPT_MAX_SECT
:
3060 if (match_int(args
, &token
)) {
3061 pr_warn("bad max sect parameter '%s'\n", p
);
3064 target
->scsi_host
->max_sectors
= token
;
3067 case SRP_OPT_QUEUE_SIZE
:
3068 if (match_int(args
, &token
) || token
< 1) {
3069 pr_warn("bad queue_size parameter '%s'\n", p
);
3072 target
->scsi_host
->can_queue
= token
;
3073 target
->queue_size
= token
+ SRP_RSP_SQ_SIZE
+
3074 SRP_TSK_MGMT_SQ_SIZE
;
3075 if (!(opt_mask
& SRP_OPT_MAX_CMD_PER_LUN
))
3076 target
->scsi_host
->cmd_per_lun
= token
;
3079 case SRP_OPT_MAX_CMD_PER_LUN
:
3080 if (match_int(args
, &token
) || token
< 1) {
3081 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3085 target
->scsi_host
->cmd_per_lun
= token
;
3088 case SRP_OPT_IO_CLASS
:
3089 if (match_hex(args
, &token
)) {
3090 pr_warn("bad IO class parameter '%s'\n", p
);
3093 if (token
!= SRP_REV10_IB_IO_CLASS
&&
3094 token
!= SRP_REV16A_IB_IO_CLASS
) {
3095 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3096 token
, SRP_REV10_IB_IO_CLASS
,
3097 SRP_REV16A_IB_IO_CLASS
);
3100 target
->io_class
= token
;
3103 case SRP_OPT_INITIATOR_EXT
:
3104 p
= match_strdup(args
);
3109 target
->initiator_ext
= cpu_to_be64(simple_strtoull(p
, NULL
, 16));
3113 case SRP_OPT_CMD_SG_ENTRIES
:
3114 if (match_int(args
, &token
) || token
< 1 || token
> 255) {
3115 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3119 target
->cmd_sg_cnt
= token
;
3122 case SRP_OPT_ALLOW_EXT_SG
:
3123 if (match_int(args
, &token
)) {
3124 pr_warn("bad allow_ext_sg parameter '%s'\n", p
);
3127 target
->allow_ext_sg
= !!token
;
3130 case SRP_OPT_SG_TABLESIZE
:
3131 if (match_int(args
, &token
) || token
< 1 ||
3132 token
> SCSI_MAX_SG_CHAIN_SEGMENTS
) {
3133 pr_warn("bad max sg_tablesize parameter '%s'\n",
3137 target
->sg_tablesize
= token
;
3140 case SRP_OPT_COMP_VECTOR
:
3141 if (match_int(args
, &token
) || token
< 0) {
3142 pr_warn("bad comp_vector parameter '%s'\n", p
);
3145 target
->comp_vector
= token
;
3148 case SRP_OPT_TL_RETRY_COUNT
:
3149 if (match_int(args
, &token
) || token
< 2 || token
> 7) {
3150 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3154 target
->tl_retry_count
= token
;
3158 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3164 if ((opt_mask
& SRP_OPT_ALL
) == SRP_OPT_ALL
)
3167 for (i
= 0; i
< ARRAY_SIZE(srp_opt_tokens
); ++i
)
3168 if ((srp_opt_tokens
[i
].token
& SRP_OPT_ALL
) &&
3169 !(srp_opt_tokens
[i
].token
& opt_mask
))
3170 pr_warn("target creation request is missing parameter '%s'\n",
3171 srp_opt_tokens
[i
].pattern
);
3173 if (target
->scsi_host
->cmd_per_lun
> target
->scsi_host
->can_queue
3174 && (opt_mask
& SRP_OPT_MAX_CMD_PER_LUN
))
3175 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3176 target
->scsi_host
->cmd_per_lun
,
3177 target
->scsi_host
->can_queue
);
3184 static ssize_t
srp_create_target(struct device
*dev
,
3185 struct device_attribute
*attr
,
3186 const char *buf
, size_t count
)
3188 struct srp_host
*host
=
3189 container_of(dev
, struct srp_host
, dev
);
3190 struct Scsi_Host
*target_host
;
3191 struct srp_target_port
*target
;
3192 struct srp_rdma_ch
*ch
;
3193 struct srp_device
*srp_dev
= host
->srp_dev
;
3194 struct ib_device
*ibdev
= srp_dev
->dev
;
3195 int ret
, node_idx
, node
, cpu
, i
;
3196 bool multich
= false;
3198 target_host
= scsi_host_alloc(&srp_template
,
3199 sizeof (struct srp_target_port
));
3203 target_host
->transportt
= ib_srp_transport_template
;
3204 target_host
->max_channel
= 0;
3205 target_host
->max_id
= 1;
3206 target_host
->max_lun
= -1LL;
3207 target_host
->max_cmd_len
= sizeof ((struct srp_cmd
*) (void *) 0L)->cdb
;
3209 target
= host_to_target(target_host
);
3211 target
->io_class
= SRP_REV16A_IB_IO_CLASS
;
3212 target
->scsi_host
= target_host
;
3213 target
->srp_host
= host
;
3214 target
->lkey
= host
->srp_dev
->pd
->local_dma_lkey
;
3215 target
->global_mr
= host
->srp_dev
->global_mr
;
3216 target
->cmd_sg_cnt
= cmd_sg_entries
;
3217 target
->sg_tablesize
= indirect_sg_entries
? : cmd_sg_entries
;
3218 target
->allow_ext_sg
= allow_ext_sg
;
3219 target
->tl_retry_count
= 7;
3220 target
->queue_size
= SRP_DEFAULT_QUEUE_SIZE
;
3223 * Avoid that the SCSI host can be removed by srp_remove_target()
3224 * before this function returns.
3226 scsi_host_get(target
->scsi_host
);
3228 mutex_lock(&host
->add_target_mutex
);
3230 ret
= srp_parse_options(buf
, target
);
3234 target
->req_ring_size
= target
->queue_size
- SRP_TSK_MGMT_SQ_SIZE
;
3236 if (!srp_conn_unique(target
->srp_host
, target
)) {
3237 shost_printk(KERN_INFO
, target
->scsi_host
,
3238 PFX
"Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3239 be64_to_cpu(target
->id_ext
),
3240 be64_to_cpu(target
->ioc_guid
),
3241 be64_to_cpu(target
->initiator_ext
));
3246 if (!srp_dev
->has_fmr
&& !srp_dev
->has_fr
&& !target
->allow_ext_sg
&&
3247 target
->cmd_sg_cnt
< target
->sg_tablesize
) {
3248 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3249 target
->sg_tablesize
= target
->cmd_sg_cnt
;
3252 target_host
->sg_tablesize
= target
->sg_tablesize
;
3253 target
->indirect_size
= target
->sg_tablesize
*
3254 sizeof (struct srp_direct_buf
);
3255 target
->max_iu_len
= sizeof (struct srp_cmd
) +
3256 sizeof (struct srp_indirect_buf
) +
3257 target
->cmd_sg_cnt
* sizeof (struct srp_direct_buf
);
3259 INIT_WORK(&target
->tl_err_work
, srp_tl_err_work
);
3260 INIT_WORK(&target
->remove_work
, srp_remove_work
);
3261 spin_lock_init(&target
->lock
);
3262 ret
= ib_query_gid(ibdev
, host
->port
, 0, &target
->sgid
, NULL
);
3267 target
->ch_count
= max_t(unsigned, num_online_nodes(),
3269 min(4 * num_online_nodes(),
3270 ibdev
->num_comp_vectors
),
3271 num_online_cpus()));
3272 target
->ch
= kcalloc(target
->ch_count
, sizeof(*target
->ch
),
3278 for_each_online_node(node
) {
3279 const int ch_start
= (node_idx
* target
->ch_count
/
3280 num_online_nodes());
3281 const int ch_end
= ((node_idx
+ 1) * target
->ch_count
/
3282 num_online_nodes());
3283 const int cv_start
= (node_idx
* ibdev
->num_comp_vectors
/
3284 num_online_nodes() + target
->comp_vector
)
3285 % ibdev
->num_comp_vectors
;
3286 const int cv_end
= ((node_idx
+ 1) * ibdev
->num_comp_vectors
/
3287 num_online_nodes() + target
->comp_vector
)
3288 % ibdev
->num_comp_vectors
;
3291 for_each_online_cpu(cpu
) {
3292 if (cpu_to_node(cpu
) != node
)
3294 if (ch_start
+ cpu_idx
>= ch_end
)
3296 ch
= &target
->ch
[ch_start
+ cpu_idx
];
3297 ch
->target
= target
;
3298 ch
->comp_vector
= cv_start
== cv_end
? cv_start
:
3299 cv_start
+ cpu_idx
% (cv_end
- cv_start
);
3300 spin_lock_init(&ch
->lock
);
3301 INIT_LIST_HEAD(&ch
->free_tx
);
3302 ret
= srp_new_cm_id(ch
);
3304 goto err_disconnect
;
3306 ret
= srp_create_ch_ib(ch
);
3308 goto err_disconnect
;
3310 ret
= srp_alloc_req_data(ch
);
3312 goto err_disconnect
;
3314 ret
= srp_connect_ch(ch
, multich
);
3316 shost_printk(KERN_ERR
, target
->scsi_host
,
3317 PFX
"Connection %d/%d failed\n",
3320 if (node_idx
== 0 && cpu_idx
== 0) {
3321 goto err_disconnect
;
3323 srp_free_ch_ib(target
, ch
);
3324 srp_free_req_data(target
, ch
);
3325 target
->ch_count
= ch
- target
->ch
;
3337 target
->scsi_host
->nr_hw_queues
= target
->ch_count
;
3339 ret
= srp_add_target(host
, target
);
3341 goto err_disconnect
;
3343 if (target
->state
!= SRP_TARGET_REMOVED
) {
3344 shost_printk(KERN_DEBUG
, target
->scsi_host
, PFX
3345 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3346 be64_to_cpu(target
->id_ext
),
3347 be64_to_cpu(target
->ioc_guid
),
3348 be16_to_cpu(target
->pkey
),
3349 be64_to_cpu(target
->service_id
),
3350 target
->sgid
.raw
, target
->orig_dgid
.raw
);
3356 mutex_unlock(&host
->add_target_mutex
);
3358 scsi_host_put(target
->scsi_host
);
3360 scsi_host_put(target
->scsi_host
);
3365 srp_disconnect_target(target
);
3367 for (i
= 0; i
< target
->ch_count
; i
++) {
3368 ch
= &target
->ch
[i
];
3369 srp_free_ch_ib(target
, ch
);
3370 srp_free_req_data(target
, ch
);
3377 static DEVICE_ATTR(add_target
, S_IWUSR
, NULL
, srp_create_target
);
3379 static ssize_t
show_ibdev(struct device
*dev
, struct device_attribute
*attr
,
3382 struct srp_host
*host
= container_of(dev
, struct srp_host
, dev
);
3384 return sprintf(buf
, "%s\n", host
->srp_dev
->dev
->name
);
3387 static DEVICE_ATTR(ibdev
, S_IRUGO
, show_ibdev
, NULL
);
3389 static ssize_t
show_port(struct device
*dev
, struct device_attribute
*attr
,
3392 struct srp_host
*host
= container_of(dev
, struct srp_host
, dev
);
3394 return sprintf(buf
, "%d\n", host
->port
);
3397 static DEVICE_ATTR(port
, S_IRUGO
, show_port
, NULL
);
3399 static struct srp_host
*srp_add_port(struct srp_device
*device
, u8 port
)
3401 struct srp_host
*host
;
3403 host
= kzalloc(sizeof *host
, GFP_KERNEL
);
3407 INIT_LIST_HEAD(&host
->target_list
);
3408 spin_lock_init(&host
->target_lock
);
3409 init_completion(&host
->released
);
3410 mutex_init(&host
->add_target_mutex
);
3411 host
->srp_dev
= device
;
3414 host
->dev
.class = &srp_class
;
3415 host
->dev
.parent
= device
->dev
->dma_device
;
3416 dev_set_name(&host
->dev
, "srp-%s-%d", device
->dev
->name
, port
);
3418 if (device_register(&host
->dev
))
3420 if (device_create_file(&host
->dev
, &dev_attr_add_target
))
3422 if (device_create_file(&host
->dev
, &dev_attr_ibdev
))
3424 if (device_create_file(&host
->dev
, &dev_attr_port
))
3430 device_unregister(&host
->dev
);
3438 static void srp_add_one(struct ib_device
*device
)
3440 struct srp_device
*srp_dev
;
3441 struct srp_host
*host
;
3442 int mr_page_shift
, p
;
3443 u64 max_pages_per_mr
;
3445 srp_dev
= kmalloc(sizeof *srp_dev
, GFP_KERNEL
);
3449 srp_dev
->has_fmr
= (device
->alloc_fmr
&& device
->dealloc_fmr
&&
3450 device
->map_phys_fmr
&& device
->unmap_fmr
);
3451 srp_dev
->has_fr
= (device
->attrs
.device_cap_flags
&
3452 IB_DEVICE_MEM_MGT_EXTENSIONS
);
3453 if (!srp_dev
->has_fmr
&& !srp_dev
->has_fr
)
3454 dev_warn(&device
->dev
, "neither FMR nor FR is supported\n");
3456 srp_dev
->use_fast_reg
= (srp_dev
->has_fr
&&
3457 (!srp_dev
->has_fmr
|| prefer_fr
));
3458 srp_dev
->use_fmr
= !srp_dev
->use_fast_reg
&& srp_dev
->has_fmr
;
3461 * Use the smallest page size supported by the HCA, down to a
3462 * minimum of 4096 bytes. We're unlikely to build large sglists
3463 * out of smaller entries.
3465 mr_page_shift
= max(12, ffs(device
->attrs
.page_size_cap
) - 1);
3466 srp_dev
->mr_page_size
= 1 << mr_page_shift
;
3467 srp_dev
->mr_page_mask
= ~((u64
) srp_dev
->mr_page_size
- 1);
3468 max_pages_per_mr
= device
->attrs
.max_mr_size
;
3469 do_div(max_pages_per_mr
, srp_dev
->mr_page_size
);
3470 srp_dev
->max_pages_per_mr
= min_t(u64
, SRP_MAX_PAGES_PER_MR
,
3472 if (srp_dev
->use_fast_reg
) {
3473 srp_dev
->max_pages_per_mr
=
3474 min_t(u32
, srp_dev
->max_pages_per_mr
,
3475 device
->attrs
.max_fast_reg_page_list_len
);
3477 srp_dev
->mr_max_size
= srp_dev
->mr_page_size
*
3478 srp_dev
->max_pages_per_mr
;
3479 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3480 device
->name
, mr_page_shift
, device
->attrs
.max_mr_size
,
3481 device
->attrs
.max_fast_reg_page_list_len
,
3482 srp_dev
->max_pages_per_mr
, srp_dev
->mr_max_size
);
3484 INIT_LIST_HEAD(&srp_dev
->dev_list
);
3486 srp_dev
->dev
= device
;
3487 srp_dev
->pd
= ib_alloc_pd(device
);
3488 if (IS_ERR(srp_dev
->pd
))
3491 if (!register_always
|| (!srp_dev
->has_fmr
&& !srp_dev
->has_fr
)) {
3492 srp_dev
->global_mr
= ib_get_dma_mr(srp_dev
->pd
,
3493 IB_ACCESS_LOCAL_WRITE
|
3494 IB_ACCESS_REMOTE_READ
|
3495 IB_ACCESS_REMOTE_WRITE
);
3496 if (IS_ERR(srp_dev
->global_mr
))
3499 srp_dev
->global_mr
= NULL
;
3502 for (p
= rdma_start_port(device
); p
<= rdma_end_port(device
); ++p
) {
3503 host
= srp_add_port(srp_dev
, p
);
3505 list_add_tail(&host
->list
, &srp_dev
->dev_list
);
3508 ib_set_client_data(device
, &srp_client
, srp_dev
);
3512 ib_dealloc_pd(srp_dev
->pd
);
3518 static void srp_remove_one(struct ib_device
*device
, void *client_data
)
3520 struct srp_device
*srp_dev
;
3521 struct srp_host
*host
, *tmp_host
;
3522 struct srp_target_port
*target
;
3524 srp_dev
= client_data
;
3528 list_for_each_entry_safe(host
, tmp_host
, &srp_dev
->dev_list
, list
) {
3529 device_unregister(&host
->dev
);
3531 * Wait for the sysfs entry to go away, so that no new
3532 * target ports can be created.
3534 wait_for_completion(&host
->released
);
3537 * Remove all target ports.
3539 spin_lock(&host
->target_lock
);
3540 list_for_each_entry(target
, &host
->target_list
, list
)
3541 srp_queue_remove_work(target
);
3542 spin_unlock(&host
->target_lock
);
3545 * Wait for tl_err and target port removal tasks.
3547 flush_workqueue(system_long_wq
);
3548 flush_workqueue(srp_remove_wq
);
3553 if (srp_dev
->global_mr
)
3554 ib_dereg_mr(srp_dev
->global_mr
);
3555 ib_dealloc_pd(srp_dev
->pd
);
3560 static struct srp_function_template ib_srp_transport_functions
= {
3561 .has_rport_state
= true,
3562 .reset_timer_if_blocked
= true,
3563 .reconnect_delay
= &srp_reconnect_delay
,
3564 .fast_io_fail_tmo
= &srp_fast_io_fail_tmo
,
3565 .dev_loss_tmo
= &srp_dev_loss_tmo
,
3566 .reconnect
= srp_rport_reconnect
,
3567 .rport_delete
= srp_rport_delete
,
3568 .terminate_rport_io
= srp_terminate_io
,
3571 static int __init
srp_init_module(void)
3575 if (srp_sg_tablesize
) {
3576 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3577 if (!cmd_sg_entries
)
3578 cmd_sg_entries
= srp_sg_tablesize
;
3581 if (!cmd_sg_entries
)
3582 cmd_sg_entries
= SRP_DEF_SG_TABLESIZE
;
3584 if (cmd_sg_entries
> 255) {
3585 pr_warn("Clamping cmd_sg_entries to 255\n");
3586 cmd_sg_entries
= 255;
3589 if (!indirect_sg_entries
)
3590 indirect_sg_entries
= cmd_sg_entries
;
3591 else if (indirect_sg_entries
< cmd_sg_entries
) {
3592 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3594 indirect_sg_entries
= cmd_sg_entries
;
3597 srp_remove_wq
= create_workqueue("srp_remove");
3598 if (!srp_remove_wq
) {
3604 ib_srp_transport_template
=
3605 srp_attach_transport(&ib_srp_transport_functions
);
3606 if (!ib_srp_transport_template
)
3609 ret
= class_register(&srp_class
);
3611 pr_err("couldn't register class infiniband_srp\n");
3615 ib_sa_register_client(&srp_sa_client
);
3617 ret
= ib_register_client(&srp_client
);
3619 pr_err("couldn't register IB client\n");
3627 ib_sa_unregister_client(&srp_sa_client
);
3628 class_unregister(&srp_class
);
3631 srp_release_transport(ib_srp_transport_template
);
3634 destroy_workqueue(srp_remove_wq
);
3638 static void __exit
srp_cleanup_module(void)
3640 ib_unregister_client(&srp_client
);
3641 ib_sa_unregister_client(&srp_sa_client
);
3642 class_unregister(&srp_class
);
3643 srp_release_transport(ib_srp_transport_template
);
3644 destroy_workqueue(srp_remove_wq
);
3647 module_init(srp_init_module
);
3648 module_exit(srp_cleanup_module
);