2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) PFX fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
44 #include <linux/atomic.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_dbg.h>
49 #include <scsi/scsi_tcq.h>
51 #include <scsi/scsi_transport_srp.h>
55 #define DRV_NAME "ib_srp"
56 #define PFX DRV_NAME ": "
57 #define DRV_VERSION "1.0"
58 #define DRV_RELDATE "July 1, 2013"
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62 "v" DRV_VERSION
" (" DRV_RELDATE
")");
63 MODULE_LICENSE("Dual BSD/GPL");
65 static unsigned int srp_sg_tablesize
;
66 static unsigned int cmd_sg_entries
;
67 static unsigned int indirect_sg_entries
;
68 static bool allow_ext_sg
;
69 static int topspin_workarounds
= 1;
71 module_param(srp_sg_tablesize
, uint
, 0444);
72 MODULE_PARM_DESC(srp_sg_tablesize
, "Deprecated name for cmd_sg_entries");
74 module_param(cmd_sg_entries
, uint
, 0444);
75 MODULE_PARM_DESC(cmd_sg_entries
,
76 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
78 module_param(indirect_sg_entries
, uint
, 0444);
79 MODULE_PARM_DESC(indirect_sg_entries
,
80 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS
) ")");
82 module_param(allow_ext_sg
, bool, 0444);
83 MODULE_PARM_DESC(allow_ext_sg
,
84 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
86 module_param(topspin_workarounds
, int, 0444);
87 MODULE_PARM_DESC(topspin_workarounds
,
88 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
90 static struct kernel_param_ops srp_tmo_ops
;
92 static int srp_reconnect_delay
= 10;
93 module_param_cb(reconnect_delay
, &srp_tmo_ops
, &srp_reconnect_delay
,
95 MODULE_PARM_DESC(reconnect_delay
, "Time between successive reconnect attempts");
97 static int srp_fast_io_fail_tmo
= 15;
98 module_param_cb(fast_io_fail_tmo
, &srp_tmo_ops
, &srp_fast_io_fail_tmo
,
100 MODULE_PARM_DESC(fast_io_fail_tmo
,
101 "Number of seconds between the observation of a transport"
102 " layer error and failing all I/O. \"off\" means that this"
103 " functionality is disabled.");
105 static int srp_dev_loss_tmo
= 600;
106 module_param_cb(dev_loss_tmo
, &srp_tmo_ops
, &srp_dev_loss_tmo
,
108 MODULE_PARM_DESC(dev_loss_tmo
,
109 "Maximum number of seconds that the SRP transport should"
110 " insulate transport layer errors. After this time has been"
111 " exceeded the SCSI host is removed. Should be"
112 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT
)
113 " if fast_io_fail_tmo has not been set. \"off\" means that"
114 " this functionality is disabled.");
116 static void srp_add_one(struct ib_device
*device
);
117 static void srp_remove_one(struct ib_device
*device
);
118 static void srp_recv_completion(struct ib_cq
*cq
, void *target_ptr
);
119 static void srp_send_completion(struct ib_cq
*cq
, void *target_ptr
);
120 static int srp_cm_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*event
);
122 static struct scsi_transport_template
*ib_srp_transport_template
;
124 static struct ib_client srp_client
= {
127 .remove
= srp_remove_one
130 static struct ib_sa_client srp_sa_client
;
132 static int srp_tmo_get(char *buffer
, const struct kernel_param
*kp
)
134 int tmo
= *(int *)kp
->arg
;
137 return sprintf(buffer
, "%d", tmo
);
139 return sprintf(buffer
, "off");
142 static int srp_tmo_set(const char *val
, const struct kernel_param
*kp
)
146 if (strncmp(val
, "off", 3) != 0) {
147 res
= kstrtoint(val
, 0, &tmo
);
153 if (kp
->arg
== &srp_reconnect_delay
)
154 res
= srp_tmo_valid(tmo
, srp_fast_io_fail_tmo
,
156 else if (kp
->arg
== &srp_fast_io_fail_tmo
)
157 res
= srp_tmo_valid(srp_reconnect_delay
, tmo
, srp_dev_loss_tmo
);
159 res
= srp_tmo_valid(srp_reconnect_delay
, srp_fast_io_fail_tmo
,
163 *(int *)kp
->arg
= tmo
;
169 static struct kernel_param_ops srp_tmo_ops
= {
174 static inline struct srp_target_port
*host_to_target(struct Scsi_Host
*host
)
176 return (struct srp_target_port
*) host
->hostdata
;
179 static const char *srp_target_info(struct Scsi_Host
*host
)
181 return host_to_target(host
)->target_name
;
184 static int srp_target_is_topspin(struct srp_target_port
*target
)
186 static const u8 topspin_oui
[3] = { 0x00, 0x05, 0xad };
187 static const u8 cisco_oui
[3] = { 0x00, 0x1b, 0x0d };
189 return topspin_workarounds
&&
190 (!memcmp(&target
->ioc_guid
, topspin_oui
, sizeof topspin_oui
) ||
191 !memcmp(&target
->ioc_guid
, cisco_oui
, sizeof cisco_oui
));
194 static struct srp_iu
*srp_alloc_iu(struct srp_host
*host
, size_t size
,
196 enum dma_data_direction direction
)
200 iu
= kmalloc(sizeof *iu
, gfp_mask
);
204 iu
->buf
= kzalloc(size
, gfp_mask
);
208 iu
->dma
= ib_dma_map_single(host
->srp_dev
->dev
, iu
->buf
, size
,
210 if (ib_dma_mapping_error(host
->srp_dev
->dev
, iu
->dma
))
214 iu
->direction
= direction
;
226 static void srp_free_iu(struct srp_host
*host
, struct srp_iu
*iu
)
231 ib_dma_unmap_single(host
->srp_dev
->dev
, iu
->dma
, iu
->size
,
237 static void srp_qp_event(struct ib_event
*event
, void *context
)
239 pr_debug("QP event %d\n", event
->event
);
242 static int srp_init_qp(struct srp_target_port
*target
,
245 struct ib_qp_attr
*attr
;
248 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
252 ret
= ib_find_pkey(target
->srp_host
->srp_dev
->dev
,
253 target
->srp_host
->port
,
254 be16_to_cpu(target
->path
.pkey
),
259 attr
->qp_state
= IB_QPS_INIT
;
260 attr
->qp_access_flags
= (IB_ACCESS_REMOTE_READ
|
261 IB_ACCESS_REMOTE_WRITE
);
262 attr
->port_num
= target
->srp_host
->port
;
264 ret
= ib_modify_qp(qp
, attr
,
275 static int srp_new_cm_id(struct srp_target_port
*target
)
277 struct ib_cm_id
*new_cm_id
;
279 new_cm_id
= ib_create_cm_id(target
->srp_host
->srp_dev
->dev
,
280 srp_cm_handler
, target
);
281 if (IS_ERR(new_cm_id
))
282 return PTR_ERR(new_cm_id
);
285 ib_destroy_cm_id(target
->cm_id
);
286 target
->cm_id
= new_cm_id
;
291 static int srp_create_target_ib(struct srp_target_port
*target
)
293 struct ib_qp_init_attr
*init_attr
;
294 struct ib_cq
*recv_cq
, *send_cq
;
298 init_attr
= kzalloc(sizeof *init_attr
, GFP_KERNEL
);
302 recv_cq
= ib_create_cq(target
->srp_host
->srp_dev
->dev
,
303 srp_recv_completion
, NULL
, target
,
304 target
->queue_size
, target
->comp_vector
);
305 if (IS_ERR(recv_cq
)) {
306 ret
= PTR_ERR(recv_cq
);
310 send_cq
= ib_create_cq(target
->srp_host
->srp_dev
->dev
,
311 srp_send_completion
, NULL
, target
,
312 target
->queue_size
, target
->comp_vector
);
313 if (IS_ERR(send_cq
)) {
314 ret
= PTR_ERR(send_cq
);
318 ib_req_notify_cq(recv_cq
, IB_CQ_NEXT_COMP
);
320 init_attr
->event_handler
= srp_qp_event
;
321 init_attr
->cap
.max_send_wr
= target
->queue_size
;
322 init_attr
->cap
.max_recv_wr
= target
->queue_size
;
323 init_attr
->cap
.max_recv_sge
= 1;
324 init_attr
->cap
.max_send_sge
= 1;
325 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
326 init_attr
->qp_type
= IB_QPT_RC
;
327 init_attr
->send_cq
= send_cq
;
328 init_attr
->recv_cq
= recv_cq
;
330 qp
= ib_create_qp(target
->srp_host
->srp_dev
->pd
, init_attr
);
336 ret
= srp_init_qp(target
, qp
);
341 ib_destroy_qp(target
->qp
);
343 ib_destroy_cq(target
->recv_cq
);
345 ib_destroy_cq(target
->send_cq
);
348 target
->recv_cq
= recv_cq
;
349 target
->send_cq
= send_cq
;
358 ib_destroy_cq(send_cq
);
361 ib_destroy_cq(recv_cq
);
369 * Note: this function may be called without srp_alloc_iu_bufs() having been
370 * invoked. Hence the target->[rt]x_ring checks.
372 static void srp_free_target_ib(struct srp_target_port
*target
)
376 ib_destroy_qp(target
->qp
);
377 ib_destroy_cq(target
->send_cq
);
378 ib_destroy_cq(target
->recv_cq
);
381 target
->send_cq
= target
->recv_cq
= NULL
;
383 if (target
->rx_ring
) {
384 for (i
= 0; i
< target
->queue_size
; ++i
)
385 srp_free_iu(target
->srp_host
, target
->rx_ring
[i
]);
386 kfree(target
->rx_ring
);
387 target
->rx_ring
= NULL
;
389 if (target
->tx_ring
) {
390 for (i
= 0; i
< target
->queue_size
; ++i
)
391 srp_free_iu(target
->srp_host
, target
->tx_ring
[i
]);
392 kfree(target
->tx_ring
);
393 target
->tx_ring
= NULL
;
397 static void srp_path_rec_completion(int status
,
398 struct ib_sa_path_rec
*pathrec
,
401 struct srp_target_port
*target
= target_ptr
;
403 target
->status
= status
;
405 shost_printk(KERN_ERR
, target
->scsi_host
,
406 PFX
"Got failed path rec status %d\n", status
);
408 target
->path
= *pathrec
;
409 complete(&target
->done
);
412 static int srp_lookup_path(struct srp_target_port
*target
)
416 target
->path
.numb_path
= 1;
418 init_completion(&target
->done
);
420 target
->path_query_id
= ib_sa_path_rec_get(&srp_sa_client
,
421 target
->srp_host
->srp_dev
->dev
,
422 target
->srp_host
->port
,
424 IB_SA_PATH_REC_SERVICE_ID
|
425 IB_SA_PATH_REC_DGID
|
426 IB_SA_PATH_REC_SGID
|
427 IB_SA_PATH_REC_NUMB_PATH
|
429 SRP_PATH_REC_TIMEOUT_MS
,
431 srp_path_rec_completion
,
432 target
, &target
->path_query
);
433 if (target
->path_query_id
< 0)
434 return target
->path_query_id
;
436 ret
= wait_for_completion_interruptible(&target
->done
);
440 if (target
->status
< 0)
441 shost_printk(KERN_WARNING
, target
->scsi_host
,
442 PFX
"Path record query failed\n");
444 return target
->status
;
447 static int srp_send_req(struct srp_target_port
*target
)
450 struct ib_cm_req_param param
;
451 struct srp_login_req priv
;
455 req
= kzalloc(sizeof *req
, GFP_KERNEL
);
459 req
->param
.primary_path
= &target
->path
;
460 req
->param
.alternate_path
= NULL
;
461 req
->param
.service_id
= target
->service_id
;
462 req
->param
.qp_num
= target
->qp
->qp_num
;
463 req
->param
.qp_type
= target
->qp
->qp_type
;
464 req
->param
.private_data
= &req
->priv
;
465 req
->param
.private_data_len
= sizeof req
->priv
;
466 req
->param
.flow_control
= 1;
468 get_random_bytes(&req
->param
.starting_psn
, 4);
469 req
->param
.starting_psn
&= 0xffffff;
472 * Pick some arbitrary defaults here; we could make these
473 * module parameters if anyone cared about setting them.
475 req
->param
.responder_resources
= 4;
476 req
->param
.remote_cm_response_timeout
= 20;
477 req
->param
.local_cm_response_timeout
= 20;
478 req
->param
.retry_count
= target
->tl_retry_count
;
479 req
->param
.rnr_retry_count
= 7;
480 req
->param
.max_cm_retries
= 15;
482 req
->priv
.opcode
= SRP_LOGIN_REQ
;
484 req
->priv
.req_it_iu_len
= cpu_to_be32(target
->max_iu_len
);
485 req
->priv
.req_buf_fmt
= cpu_to_be16(SRP_BUF_FORMAT_DIRECT
|
486 SRP_BUF_FORMAT_INDIRECT
);
488 * In the published SRP specification (draft rev. 16a), the
489 * port identifier format is 8 bytes of ID extension followed
490 * by 8 bytes of GUID. Older drafts put the two halves in the
491 * opposite order, so that the GUID comes first.
493 * Targets conforming to these obsolete drafts can be
494 * recognized by the I/O Class they report.
496 if (target
->io_class
== SRP_REV10_IB_IO_CLASS
) {
497 memcpy(req
->priv
.initiator_port_id
,
498 &target
->path
.sgid
.global
.interface_id
, 8);
499 memcpy(req
->priv
.initiator_port_id
+ 8,
500 &target
->initiator_ext
, 8);
501 memcpy(req
->priv
.target_port_id
, &target
->ioc_guid
, 8);
502 memcpy(req
->priv
.target_port_id
+ 8, &target
->id_ext
, 8);
504 memcpy(req
->priv
.initiator_port_id
,
505 &target
->initiator_ext
, 8);
506 memcpy(req
->priv
.initiator_port_id
+ 8,
507 &target
->path
.sgid
.global
.interface_id
, 8);
508 memcpy(req
->priv
.target_port_id
, &target
->id_ext
, 8);
509 memcpy(req
->priv
.target_port_id
+ 8, &target
->ioc_guid
, 8);
513 * Topspin/Cisco SRP targets will reject our login unless we
514 * zero out the first 8 bytes of our initiator port ID and set
515 * the second 8 bytes to the local node GUID.
517 if (srp_target_is_topspin(target
)) {
518 shost_printk(KERN_DEBUG
, target
->scsi_host
,
519 PFX
"Topspin/Cisco initiator port ID workaround "
520 "activated for target GUID %016llx\n",
521 (unsigned long long) be64_to_cpu(target
->ioc_guid
));
522 memset(req
->priv
.initiator_port_id
, 0, 8);
523 memcpy(req
->priv
.initiator_port_id
+ 8,
524 &target
->srp_host
->srp_dev
->dev
->node_guid
, 8);
527 status
= ib_send_cm_req(target
->cm_id
, &req
->param
);
534 static bool srp_queue_remove_work(struct srp_target_port
*target
)
536 bool changed
= false;
538 spin_lock_irq(&target
->lock
);
539 if (target
->state
!= SRP_TARGET_REMOVED
) {
540 target
->state
= SRP_TARGET_REMOVED
;
543 spin_unlock_irq(&target
->lock
);
546 queue_work(system_long_wq
, &target
->remove_work
);
551 static bool srp_change_conn_state(struct srp_target_port
*target
,
554 bool changed
= false;
556 spin_lock_irq(&target
->lock
);
557 if (target
->connected
!= connected
) {
558 target
->connected
= connected
;
561 spin_unlock_irq(&target
->lock
);
566 static void srp_disconnect_target(struct srp_target_port
*target
)
568 if (srp_change_conn_state(target
, false)) {
569 /* XXX should send SRP_I_LOGOUT request */
571 if (ib_send_cm_dreq(target
->cm_id
, NULL
, 0)) {
572 shost_printk(KERN_DEBUG
, target
->scsi_host
,
573 PFX
"Sending CM DREQ failed\n");
578 static void srp_free_req_data(struct srp_target_port
*target
)
580 struct ib_device
*ibdev
= target
->srp_host
->srp_dev
->dev
;
581 struct srp_request
*req
;
584 if (!target
->req_ring
)
587 for (i
= 0; i
< target
->req_ring_size
; ++i
) {
588 req
= &target
->req_ring
[i
];
589 kfree(req
->fmr_list
);
590 kfree(req
->map_page
);
591 if (req
->indirect_dma_addr
) {
592 ib_dma_unmap_single(ibdev
, req
->indirect_dma_addr
,
593 target
->indirect_size
,
596 kfree(req
->indirect_desc
);
599 kfree(target
->req_ring
);
600 target
->req_ring
= NULL
;
603 static int srp_alloc_req_data(struct srp_target_port
*target
)
605 struct srp_device
*srp_dev
= target
->srp_host
->srp_dev
;
606 struct ib_device
*ibdev
= srp_dev
->dev
;
607 struct srp_request
*req
;
609 int i
, ret
= -ENOMEM
;
611 INIT_LIST_HEAD(&target
->free_reqs
);
613 target
->req_ring
= kzalloc(target
->req_ring_size
*
614 sizeof(*target
->req_ring
), GFP_KERNEL
);
615 if (!target
->req_ring
)
618 for (i
= 0; i
< target
->req_ring_size
; ++i
) {
619 req
= &target
->req_ring
[i
];
620 req
->fmr_list
= kmalloc(target
->cmd_sg_cnt
* sizeof(void *),
622 req
->map_page
= kmalloc(SRP_FMR_SIZE
* sizeof(void *),
624 req
->indirect_desc
= kmalloc(target
->indirect_size
, GFP_KERNEL
);
625 if (!req
->fmr_list
|| !req
->map_page
|| !req
->indirect_desc
)
628 dma_addr
= ib_dma_map_single(ibdev
, req
->indirect_desc
,
629 target
->indirect_size
,
631 if (ib_dma_mapping_error(ibdev
, dma_addr
))
634 req
->indirect_dma_addr
= dma_addr
;
636 list_add_tail(&req
->list
, &target
->free_reqs
);
645 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
646 * @shost: SCSI host whose attributes to remove from sysfs.
648 * Note: Any attributes defined in the host template and that did not exist
649 * before invocation of this function will be ignored.
651 static void srp_del_scsi_host_attr(struct Scsi_Host
*shost
)
653 struct device_attribute
**attr
;
655 for (attr
= shost
->hostt
->shost_attrs
; attr
&& *attr
; ++attr
)
656 device_remove_file(&shost
->shost_dev
, *attr
);
659 static void srp_remove_target(struct srp_target_port
*target
)
661 WARN_ON_ONCE(target
->state
!= SRP_TARGET_REMOVED
);
663 srp_del_scsi_host_attr(target
->scsi_host
);
664 srp_rport_get(target
->rport
);
665 srp_remove_host(target
->scsi_host
);
666 scsi_remove_host(target
->scsi_host
);
667 srp_stop_rport_timers(target
->rport
);
668 srp_disconnect_target(target
);
669 ib_destroy_cm_id(target
->cm_id
);
670 srp_free_target_ib(target
);
671 cancel_work_sync(&target
->tl_err_work
);
672 srp_rport_put(target
->rport
);
673 srp_free_req_data(target
);
675 spin_lock(&target
->srp_host
->target_lock
);
676 list_del(&target
->list
);
677 spin_unlock(&target
->srp_host
->target_lock
);
679 scsi_host_put(target
->scsi_host
);
682 static void srp_remove_work(struct work_struct
*work
)
684 struct srp_target_port
*target
=
685 container_of(work
, struct srp_target_port
, remove_work
);
687 WARN_ON_ONCE(target
->state
!= SRP_TARGET_REMOVED
);
689 srp_remove_target(target
);
692 static void srp_rport_delete(struct srp_rport
*rport
)
694 struct srp_target_port
*target
= rport
->lld_data
;
696 srp_queue_remove_work(target
);
699 static int srp_connect_target(struct srp_target_port
*target
)
704 WARN_ON_ONCE(target
->connected
);
706 target
->qp_in_error
= false;
708 ret
= srp_lookup_path(target
);
713 init_completion(&target
->done
);
714 ret
= srp_send_req(target
);
717 ret
= wait_for_completion_interruptible(&target
->done
);
722 * The CM event handling code will set status to
723 * SRP_PORT_REDIRECT if we get a port redirect REJ
724 * back, or SRP_DLID_REDIRECT if we get a lid/qp
727 switch (target
->status
) {
729 srp_change_conn_state(target
, true);
732 case SRP_PORT_REDIRECT
:
733 ret
= srp_lookup_path(target
);
738 case SRP_DLID_REDIRECT
:
742 /* Our current CM id was stale, and is now in timewait.
743 * Try to reconnect with a new one.
745 if (!retries
-- || srp_new_cm_id(target
)) {
746 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
747 "giving up on stale connection\n");
748 target
->status
= -ECONNRESET
;
749 return target
->status
;
752 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
753 "retrying stale connection\n");
757 return target
->status
;
762 static void srp_unmap_data(struct scsi_cmnd
*scmnd
,
763 struct srp_target_port
*target
,
764 struct srp_request
*req
)
766 struct ib_device
*ibdev
= target
->srp_host
->srp_dev
->dev
;
767 struct ib_pool_fmr
**pfmr
;
769 if (!scsi_sglist(scmnd
) ||
770 (scmnd
->sc_data_direction
!= DMA_TO_DEVICE
&&
771 scmnd
->sc_data_direction
!= DMA_FROM_DEVICE
))
774 pfmr
= req
->fmr_list
;
776 ib_fmr_pool_unmap(*pfmr
++);
778 ib_dma_unmap_sg(ibdev
, scsi_sglist(scmnd
), scsi_sg_count(scmnd
),
779 scmnd
->sc_data_direction
);
783 * srp_claim_req - Take ownership of the scmnd associated with a request.
784 * @target: SRP target port.
786 * @sdev: If not NULL, only take ownership for this SCSI device.
787 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
788 * ownership of @req->scmnd if it equals @scmnd.
791 * Either NULL or a pointer to the SCSI command the caller became owner of.
793 static struct scsi_cmnd
*srp_claim_req(struct srp_target_port
*target
,
794 struct srp_request
*req
,
795 struct scsi_device
*sdev
,
796 struct scsi_cmnd
*scmnd
)
800 spin_lock_irqsave(&target
->lock
, flags
);
802 (!sdev
|| req
->scmnd
->device
== sdev
) &&
803 (!scmnd
|| req
->scmnd
== scmnd
)) {
809 spin_unlock_irqrestore(&target
->lock
, flags
);
815 * srp_free_req() - Unmap data and add request to the free request list.
817 static void srp_free_req(struct srp_target_port
*target
,
818 struct srp_request
*req
, struct scsi_cmnd
*scmnd
,
823 srp_unmap_data(scmnd
, target
, req
);
825 spin_lock_irqsave(&target
->lock
, flags
);
826 target
->req_lim
+= req_lim_delta
;
827 list_add_tail(&req
->list
, &target
->free_reqs
);
828 spin_unlock_irqrestore(&target
->lock
, flags
);
831 static void srp_finish_req(struct srp_target_port
*target
,
832 struct srp_request
*req
, struct scsi_device
*sdev
,
835 struct scsi_cmnd
*scmnd
= srp_claim_req(target
, req
, sdev
, NULL
);
838 srp_free_req(target
, req
, scmnd
, 0);
839 scmnd
->result
= result
;
840 scmnd
->scsi_done(scmnd
);
844 static void srp_terminate_io(struct srp_rport
*rport
)
846 struct srp_target_port
*target
= rport
->lld_data
;
847 struct Scsi_Host
*shost
= target
->scsi_host
;
848 struct scsi_device
*sdev
;
852 * Invoking srp_terminate_io() while srp_queuecommand() is running
853 * is not safe. Hence the warning statement below.
855 shost_for_each_device(sdev
, shost
)
856 WARN_ON_ONCE(sdev
->request_queue
->request_fn_active
);
858 for (i
= 0; i
< target
->req_ring_size
; ++i
) {
859 struct srp_request
*req
= &target
->req_ring
[i
];
860 srp_finish_req(target
, req
, NULL
, DID_TRANSPORT_FAILFAST
<< 16);
865 * It is up to the caller to ensure that srp_rport_reconnect() calls are
866 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
867 * srp_reset_device() or srp_reset_host() calls will occur while this function
868 * is in progress. One way to realize that is not to call this function
869 * directly but to call srp_reconnect_rport() instead since that last function
870 * serializes calls of this function via rport->mutex and also blocks
871 * srp_queuecommand() calls before invoking this function.
873 static int srp_rport_reconnect(struct srp_rport
*rport
)
875 struct srp_target_port
*target
= rport
->lld_data
;
878 srp_disconnect_target(target
);
880 * Now get a new local CM ID so that we avoid confusing the target in
881 * case things are really fouled up. Doing so also ensures that all CM
882 * callbacks will have finished before a new QP is allocated.
884 ret
= srp_new_cm_id(target
);
886 * Whether or not creating a new CM ID succeeded, create a new
887 * QP. This guarantees that all completion callback function
888 * invocations have finished before request resetting starts.
891 ret
= srp_create_target_ib(target
);
893 srp_create_target_ib(target
);
895 for (i
= 0; i
< target
->req_ring_size
; ++i
) {
896 struct srp_request
*req
= &target
->req_ring
[i
];
897 srp_finish_req(target
, req
, NULL
, DID_RESET
<< 16);
900 INIT_LIST_HEAD(&target
->free_tx
);
901 for (i
= 0; i
< target
->queue_size
; ++i
)
902 list_add(&target
->tx_ring
[i
]->list
, &target
->free_tx
);
905 ret
= srp_connect_target(target
);
908 shost_printk(KERN_INFO
, target
->scsi_host
,
909 PFX
"reconnect succeeded\n");
914 static void srp_map_desc(struct srp_map_state
*state
, dma_addr_t dma_addr
,
915 unsigned int dma_len
, u32 rkey
)
917 struct srp_direct_buf
*desc
= state
->desc
;
919 desc
->va
= cpu_to_be64(dma_addr
);
920 desc
->key
= cpu_to_be32(rkey
);
921 desc
->len
= cpu_to_be32(dma_len
);
923 state
->total_len
+= dma_len
;
928 static int srp_map_finish_fmr(struct srp_map_state
*state
,
929 struct srp_target_port
*target
)
931 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
932 struct ib_pool_fmr
*fmr
;
938 if (state
->npages
== 1) {
939 srp_map_desc(state
, state
->base_dma_addr
, state
->fmr_len
,
941 state
->npages
= state
->fmr_len
= 0;
945 fmr
= ib_fmr_pool_map_phys(dev
->fmr_pool
, state
->pages
,
946 state
->npages
, io_addr
);
950 *state
->next_fmr
++ = fmr
;
953 srp_map_desc(state
, 0, state
->fmr_len
, fmr
->fmr
->rkey
);
954 state
->npages
= state
->fmr_len
= 0;
958 static void srp_map_update_start(struct srp_map_state
*state
,
959 struct scatterlist
*sg
, int sg_index
,
962 state
->unmapped_sg
= sg
;
963 state
->unmapped_index
= sg_index
;
964 state
->unmapped_addr
= dma_addr
;
967 static int srp_map_sg_entry(struct srp_map_state
*state
,
968 struct srp_target_port
*target
,
969 struct scatterlist
*sg
, int sg_index
,
972 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
973 struct ib_device
*ibdev
= dev
->dev
;
974 dma_addr_t dma_addr
= ib_sg_dma_address(ibdev
, sg
);
975 unsigned int dma_len
= ib_sg_dma_len(ibdev
, sg
);
982 if (use_fmr
== SRP_MAP_NO_FMR
) {
983 /* Once we're in direct map mode for a request, we don't
984 * go back to FMR mode, so no need to update anything
985 * other than the descriptor.
987 srp_map_desc(state
, dma_addr
, dma_len
, target
->rkey
);
991 /* If we start at an offset into the FMR page, don't merge into
992 * the current FMR. Finish it out, and use the kernel's MR for this
993 * sg entry. This is to avoid potential bugs on some SRP targets
994 * that were never quite defined, but went away when the initiator
995 * avoided using FMR on such page fragments.
997 if (dma_addr
& ~dev
->fmr_page_mask
|| dma_len
> dev
->fmr_max_size
) {
998 ret
= srp_map_finish_fmr(state
, target
);
1002 srp_map_desc(state
, dma_addr
, dma_len
, target
->rkey
);
1003 srp_map_update_start(state
, NULL
, 0, 0);
1007 /* If this is the first sg to go into the FMR, save our position.
1008 * We need to know the first unmapped entry, its index, and the
1009 * first unmapped address within that entry to be able to restart
1010 * mapping after an error.
1012 if (!state
->unmapped_sg
)
1013 srp_map_update_start(state
, sg
, sg_index
, dma_addr
);
1016 if (state
->npages
== SRP_FMR_SIZE
) {
1017 ret
= srp_map_finish_fmr(state
, target
);
1021 srp_map_update_start(state
, sg
, sg_index
, dma_addr
);
1024 len
= min_t(unsigned int, dma_len
, dev
->fmr_page_size
);
1027 state
->base_dma_addr
= dma_addr
;
1028 state
->pages
[state
->npages
++] = dma_addr
;
1029 state
->fmr_len
+= len
;
1034 /* If the last entry of the FMR wasn't a full page, then we need to
1035 * close it out and start a new one -- we can only merge at page
1039 if (len
!= dev
->fmr_page_size
) {
1040 ret
= srp_map_finish_fmr(state
, target
);
1042 srp_map_update_start(state
, NULL
, 0, 0);
1047 static int srp_map_data(struct scsi_cmnd
*scmnd
, struct srp_target_port
*target
,
1048 struct srp_request
*req
)
1050 struct scatterlist
*scat
, *sg
;
1051 struct srp_cmd
*cmd
= req
->cmd
->buf
;
1052 int i
, len
, nents
, count
, use_fmr
;
1053 struct srp_device
*dev
;
1054 struct ib_device
*ibdev
;
1055 struct srp_map_state state
;
1056 struct srp_indirect_buf
*indirect_hdr
;
1060 if (!scsi_sglist(scmnd
) || scmnd
->sc_data_direction
== DMA_NONE
)
1061 return sizeof (struct srp_cmd
);
1063 if (scmnd
->sc_data_direction
!= DMA_FROM_DEVICE
&&
1064 scmnd
->sc_data_direction
!= DMA_TO_DEVICE
) {
1065 shost_printk(KERN_WARNING
, target
->scsi_host
,
1066 PFX
"Unhandled data direction %d\n",
1067 scmnd
->sc_data_direction
);
1071 nents
= scsi_sg_count(scmnd
);
1072 scat
= scsi_sglist(scmnd
);
1074 dev
= target
->srp_host
->srp_dev
;
1077 count
= ib_dma_map_sg(ibdev
, scat
, nents
, scmnd
->sc_data_direction
);
1078 if (unlikely(count
== 0))
1081 fmt
= SRP_DATA_DESC_DIRECT
;
1082 len
= sizeof (struct srp_cmd
) + sizeof (struct srp_direct_buf
);
1086 * The midlayer only generated a single gather/scatter
1087 * entry, or DMA mapping coalesced everything to a
1088 * single entry. So a direct descriptor along with
1089 * the DMA MR suffices.
1091 struct srp_direct_buf
*buf
= (void *) cmd
->add_data
;
1093 buf
->va
= cpu_to_be64(ib_sg_dma_address(ibdev
, scat
));
1094 buf
->key
= cpu_to_be32(target
->rkey
);
1095 buf
->len
= cpu_to_be32(ib_sg_dma_len(ibdev
, scat
));
1101 /* We have more than one scatter/gather entry, so build our indirect
1102 * descriptor table, trying to merge as many entries with FMR as we
1105 indirect_hdr
= (void *) cmd
->add_data
;
1107 ib_dma_sync_single_for_cpu(ibdev
, req
->indirect_dma_addr
,
1108 target
->indirect_size
, DMA_TO_DEVICE
);
1110 memset(&state
, 0, sizeof(state
));
1111 state
.desc
= req
->indirect_desc
;
1112 state
.pages
= req
->map_page
;
1113 state
.next_fmr
= req
->fmr_list
;
1115 use_fmr
= dev
->fmr_pool
? SRP_MAP_ALLOW_FMR
: SRP_MAP_NO_FMR
;
1117 for_each_sg(scat
, sg
, count
, i
) {
1118 if (srp_map_sg_entry(&state
, target
, sg
, i
, use_fmr
)) {
1119 /* FMR mapping failed, so backtrack to the first
1120 * unmapped entry and continue on without using FMR.
1122 dma_addr_t dma_addr
;
1123 unsigned int dma_len
;
1126 sg
= state
.unmapped_sg
;
1127 i
= state
.unmapped_index
;
1129 dma_addr
= ib_sg_dma_address(ibdev
, sg
);
1130 dma_len
= ib_sg_dma_len(ibdev
, sg
);
1131 dma_len
-= (state
.unmapped_addr
- dma_addr
);
1132 dma_addr
= state
.unmapped_addr
;
1133 use_fmr
= SRP_MAP_NO_FMR
;
1134 srp_map_desc(&state
, dma_addr
, dma_len
, target
->rkey
);
1138 if (use_fmr
== SRP_MAP_ALLOW_FMR
&& srp_map_finish_fmr(&state
, target
))
1141 /* We've mapped the request, now pull as much of the indirect
1142 * descriptor table as we can into the command buffer. If this
1143 * target is not using an external indirect table, we are
1144 * guaranteed to fit into the command, as the SCSI layer won't
1145 * give us more S/G entries than we allow.
1147 req
->nfmr
= state
.nfmr
;
1148 if (state
.ndesc
== 1) {
1149 /* FMR mapping was able to collapse this to one entry,
1150 * so use a direct descriptor.
1152 struct srp_direct_buf
*buf
= (void *) cmd
->add_data
;
1154 *buf
= req
->indirect_desc
[0];
1158 if (unlikely(target
->cmd_sg_cnt
< state
.ndesc
&&
1159 !target
->allow_ext_sg
)) {
1160 shost_printk(KERN_ERR
, target
->scsi_host
,
1161 "Could not fit S/G list into SRP_CMD\n");
1165 count
= min(state
.ndesc
, target
->cmd_sg_cnt
);
1166 table_len
= state
.ndesc
* sizeof (struct srp_direct_buf
);
1168 fmt
= SRP_DATA_DESC_INDIRECT
;
1169 len
= sizeof(struct srp_cmd
) + sizeof (struct srp_indirect_buf
);
1170 len
+= count
* sizeof (struct srp_direct_buf
);
1172 memcpy(indirect_hdr
->desc_list
, req
->indirect_desc
,
1173 count
* sizeof (struct srp_direct_buf
));
1175 indirect_hdr
->table_desc
.va
= cpu_to_be64(req
->indirect_dma_addr
);
1176 indirect_hdr
->table_desc
.key
= cpu_to_be32(target
->rkey
);
1177 indirect_hdr
->table_desc
.len
= cpu_to_be32(table_len
);
1178 indirect_hdr
->len
= cpu_to_be32(state
.total_len
);
1180 if (scmnd
->sc_data_direction
== DMA_TO_DEVICE
)
1181 cmd
->data_out_desc_cnt
= count
;
1183 cmd
->data_in_desc_cnt
= count
;
1185 ib_dma_sync_single_for_device(ibdev
, req
->indirect_dma_addr
, table_len
,
1189 if (scmnd
->sc_data_direction
== DMA_TO_DEVICE
)
1190 cmd
->buf_fmt
= fmt
<< 4;
1198 * Return an IU and possible credit to the free pool
1200 static void srp_put_tx_iu(struct srp_target_port
*target
, struct srp_iu
*iu
,
1201 enum srp_iu_type iu_type
)
1203 unsigned long flags
;
1205 spin_lock_irqsave(&target
->lock
, flags
);
1206 list_add(&iu
->list
, &target
->free_tx
);
1207 if (iu_type
!= SRP_IU_RSP
)
1209 spin_unlock_irqrestore(&target
->lock
, flags
);
1213 * Must be called with target->lock held to protect req_lim and free_tx.
1214 * If IU is not sent, it must be returned using srp_put_tx_iu().
1217 * An upper limit for the number of allocated information units for each
1219 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1220 * more than Scsi_Host.can_queue requests.
1221 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1222 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1223 * one unanswered SRP request to an initiator.
1225 static struct srp_iu
*__srp_get_tx_iu(struct srp_target_port
*target
,
1226 enum srp_iu_type iu_type
)
1228 s32 rsv
= (iu_type
== SRP_IU_TSK_MGMT
) ? 0 : SRP_TSK_MGMT_SQ_SIZE
;
1231 srp_send_completion(target
->send_cq
, target
);
1233 if (list_empty(&target
->free_tx
))
1236 /* Initiator responses to target requests do not consume credits */
1237 if (iu_type
!= SRP_IU_RSP
) {
1238 if (target
->req_lim
<= rsv
) {
1239 ++target
->zero_req_lim
;
1246 iu
= list_first_entry(&target
->free_tx
, struct srp_iu
, list
);
1247 list_del(&iu
->list
);
1251 static int srp_post_send(struct srp_target_port
*target
,
1252 struct srp_iu
*iu
, int len
)
1255 struct ib_send_wr wr
, *bad_wr
;
1257 list
.addr
= iu
->dma
;
1259 list
.lkey
= target
->lkey
;
1262 wr
.wr_id
= (uintptr_t) iu
;
1265 wr
.opcode
= IB_WR_SEND
;
1266 wr
.send_flags
= IB_SEND_SIGNALED
;
1268 return ib_post_send(target
->qp
, &wr
, &bad_wr
);
1271 static int srp_post_recv(struct srp_target_port
*target
, struct srp_iu
*iu
)
1273 struct ib_recv_wr wr
, *bad_wr
;
1276 list
.addr
= iu
->dma
;
1277 list
.length
= iu
->size
;
1278 list
.lkey
= target
->lkey
;
1281 wr
.wr_id
= (uintptr_t) iu
;
1285 return ib_post_recv(target
->qp
, &wr
, &bad_wr
);
1288 static void srp_process_rsp(struct srp_target_port
*target
, struct srp_rsp
*rsp
)
1290 struct srp_request
*req
;
1291 struct scsi_cmnd
*scmnd
;
1292 unsigned long flags
;
1294 if (unlikely(rsp
->tag
& SRP_TAG_TSK_MGMT
)) {
1295 spin_lock_irqsave(&target
->lock
, flags
);
1296 target
->req_lim
+= be32_to_cpu(rsp
->req_lim_delta
);
1297 spin_unlock_irqrestore(&target
->lock
, flags
);
1299 target
->tsk_mgmt_status
= -1;
1300 if (be32_to_cpu(rsp
->resp_data_len
) >= 4)
1301 target
->tsk_mgmt_status
= rsp
->data
[3];
1302 complete(&target
->tsk_mgmt_done
);
1304 req
= &target
->req_ring
[rsp
->tag
];
1305 scmnd
= srp_claim_req(target
, req
, NULL
, NULL
);
1307 shost_printk(KERN_ERR
, target
->scsi_host
,
1308 "Null scmnd for RSP w/tag %016llx\n",
1309 (unsigned long long) rsp
->tag
);
1311 spin_lock_irqsave(&target
->lock
, flags
);
1312 target
->req_lim
+= be32_to_cpu(rsp
->req_lim_delta
);
1313 spin_unlock_irqrestore(&target
->lock
, flags
);
1317 scmnd
->result
= rsp
->status
;
1319 if (rsp
->flags
& SRP_RSP_FLAG_SNSVALID
) {
1320 memcpy(scmnd
->sense_buffer
, rsp
->data
+
1321 be32_to_cpu(rsp
->resp_data_len
),
1322 min_t(int, be32_to_cpu(rsp
->sense_data_len
),
1323 SCSI_SENSE_BUFFERSIZE
));
1326 if (rsp
->flags
& (SRP_RSP_FLAG_DOOVER
| SRP_RSP_FLAG_DOUNDER
))
1327 scsi_set_resid(scmnd
, be32_to_cpu(rsp
->data_out_res_cnt
));
1328 else if (rsp
->flags
& (SRP_RSP_FLAG_DIOVER
| SRP_RSP_FLAG_DIUNDER
))
1329 scsi_set_resid(scmnd
, be32_to_cpu(rsp
->data_in_res_cnt
));
1331 srp_free_req(target
, req
, scmnd
,
1332 be32_to_cpu(rsp
->req_lim_delta
));
1334 scmnd
->host_scribble
= NULL
;
1335 scmnd
->scsi_done(scmnd
);
1339 static int srp_response_common(struct srp_target_port
*target
, s32 req_delta
,
1342 struct ib_device
*dev
= target
->srp_host
->srp_dev
->dev
;
1343 unsigned long flags
;
1347 spin_lock_irqsave(&target
->lock
, flags
);
1348 target
->req_lim
+= req_delta
;
1349 iu
= __srp_get_tx_iu(target
, SRP_IU_RSP
);
1350 spin_unlock_irqrestore(&target
->lock
, flags
);
1353 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1354 "no IU available to send response\n");
1358 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, len
, DMA_TO_DEVICE
);
1359 memcpy(iu
->buf
, rsp
, len
);
1360 ib_dma_sync_single_for_device(dev
, iu
->dma
, len
, DMA_TO_DEVICE
);
1362 err
= srp_post_send(target
, iu
, len
);
1364 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1365 "unable to post response: %d\n", err
);
1366 srp_put_tx_iu(target
, iu
, SRP_IU_RSP
);
1372 static void srp_process_cred_req(struct srp_target_port
*target
,
1373 struct srp_cred_req
*req
)
1375 struct srp_cred_rsp rsp
= {
1376 .opcode
= SRP_CRED_RSP
,
1379 s32 delta
= be32_to_cpu(req
->req_lim_delta
);
1381 if (srp_response_common(target
, delta
, &rsp
, sizeof rsp
))
1382 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1383 "problems processing SRP_CRED_REQ\n");
1386 static void srp_process_aer_req(struct srp_target_port
*target
,
1387 struct srp_aer_req
*req
)
1389 struct srp_aer_rsp rsp
= {
1390 .opcode
= SRP_AER_RSP
,
1393 s32 delta
= be32_to_cpu(req
->req_lim_delta
);
1395 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1396 "ignoring AER for LUN %llu\n", be64_to_cpu(req
->lun
));
1398 if (srp_response_common(target
, delta
, &rsp
, sizeof rsp
))
1399 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1400 "problems processing SRP_AER_REQ\n");
1403 static void srp_handle_recv(struct srp_target_port
*target
, struct ib_wc
*wc
)
1405 struct ib_device
*dev
= target
->srp_host
->srp_dev
->dev
;
1406 struct srp_iu
*iu
= (struct srp_iu
*) (uintptr_t) wc
->wr_id
;
1410 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, target
->max_ti_iu_len
,
1413 opcode
= *(u8
*) iu
->buf
;
1416 shost_printk(KERN_ERR
, target
->scsi_host
,
1417 PFX
"recv completion, opcode 0x%02x\n", opcode
);
1418 print_hex_dump(KERN_ERR
, "", DUMP_PREFIX_OFFSET
, 8, 1,
1419 iu
->buf
, wc
->byte_len
, true);
1424 srp_process_rsp(target
, iu
->buf
);
1428 srp_process_cred_req(target
, iu
->buf
);
1432 srp_process_aer_req(target
, iu
->buf
);
1436 /* XXX Handle target logout */
1437 shost_printk(KERN_WARNING
, target
->scsi_host
,
1438 PFX
"Got target logout request\n");
1442 shost_printk(KERN_WARNING
, target
->scsi_host
,
1443 PFX
"Unhandled SRP opcode 0x%02x\n", opcode
);
1447 ib_dma_sync_single_for_device(dev
, iu
->dma
, target
->max_ti_iu_len
,
1450 res
= srp_post_recv(target
, iu
);
1452 shost_printk(KERN_ERR
, target
->scsi_host
,
1453 PFX
"Recv failed with error code %d\n", res
);
1457 * srp_tl_err_work() - handle a transport layer error
1459 * Note: This function may get invoked before the rport has been created,
1460 * hence the target->rport test.
1462 static void srp_tl_err_work(struct work_struct
*work
)
1464 struct srp_target_port
*target
;
1466 target
= container_of(work
, struct srp_target_port
, tl_err_work
);
1468 srp_start_tl_fail_timers(target
->rport
);
1471 static void srp_handle_qp_err(enum ib_wc_status wc_status
, bool send_err
,
1472 struct srp_target_port
*target
)
1474 if (target
->connected
&& !target
->qp_in_error
) {
1475 shost_printk(KERN_ERR
, target
->scsi_host
,
1476 PFX
"failed %s status %d\n",
1477 send_err
? "send" : "receive",
1479 queue_work(system_long_wq
, &target
->tl_err_work
);
1481 target
->qp_in_error
= true;
1484 static void srp_recv_completion(struct ib_cq
*cq
, void *target_ptr
)
1486 struct srp_target_port
*target
= target_ptr
;
1489 ib_req_notify_cq(cq
, IB_CQ_NEXT_COMP
);
1490 while (ib_poll_cq(cq
, 1, &wc
) > 0) {
1491 if (likely(wc
.status
== IB_WC_SUCCESS
)) {
1492 srp_handle_recv(target
, &wc
);
1494 srp_handle_qp_err(wc
.status
, false, target
);
1499 static void srp_send_completion(struct ib_cq
*cq
, void *target_ptr
)
1501 struct srp_target_port
*target
= target_ptr
;
1505 while (ib_poll_cq(cq
, 1, &wc
) > 0) {
1506 if (likely(wc
.status
== IB_WC_SUCCESS
)) {
1507 iu
= (struct srp_iu
*) (uintptr_t) wc
.wr_id
;
1508 list_add(&iu
->list
, &target
->free_tx
);
1510 srp_handle_qp_err(wc
.status
, true, target
);
1515 static int srp_queuecommand(struct Scsi_Host
*shost
, struct scsi_cmnd
*scmnd
)
1517 struct srp_target_port
*target
= host_to_target(shost
);
1518 struct srp_rport
*rport
= target
->rport
;
1519 struct srp_request
*req
;
1521 struct srp_cmd
*cmd
;
1522 struct ib_device
*dev
;
1523 unsigned long flags
;
1525 const bool in_scsi_eh
= !in_interrupt() && current
== shost
->ehandler
;
1528 * The SCSI EH thread is the only context from which srp_queuecommand()
1529 * can get invoked for blocked devices (SDEV_BLOCK /
1530 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1531 * locking the rport mutex if invoked from inside the SCSI EH.
1534 mutex_lock(&rport
->mutex
);
1536 result
= srp_chkready(target
->rport
);
1537 if (unlikely(result
)) {
1538 scmnd
->result
= result
;
1539 scmnd
->scsi_done(scmnd
);
1543 spin_lock_irqsave(&target
->lock
, flags
);
1544 iu
= __srp_get_tx_iu(target
, SRP_IU_CMD
);
1548 req
= list_first_entry(&target
->free_reqs
, struct srp_request
, list
);
1549 list_del(&req
->list
);
1550 spin_unlock_irqrestore(&target
->lock
, flags
);
1552 dev
= target
->srp_host
->srp_dev
->dev
;
1553 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, target
->max_iu_len
,
1557 scmnd
->host_scribble
= (void *) req
;
1560 memset(cmd
, 0, sizeof *cmd
);
1562 cmd
->opcode
= SRP_CMD
;
1563 cmd
->lun
= cpu_to_be64((u64
) scmnd
->device
->lun
<< 48);
1564 cmd
->tag
= req
->index
;
1565 memcpy(cmd
->cdb
, scmnd
->cmnd
, scmnd
->cmd_len
);
1570 len
= srp_map_data(scmnd
, target
, req
);
1572 shost_printk(KERN_ERR
, target
->scsi_host
,
1573 PFX
"Failed to map data\n");
1577 ib_dma_sync_single_for_device(dev
, iu
->dma
, target
->max_iu_len
,
1580 if (srp_post_send(target
, iu
, len
)) {
1581 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
"Send failed\n");
1587 mutex_unlock(&rport
->mutex
);
1592 srp_unmap_data(scmnd
, target
, req
);
1595 srp_put_tx_iu(target
, iu
, SRP_IU_CMD
);
1597 spin_lock_irqsave(&target
->lock
, flags
);
1598 list_add(&req
->list
, &target
->free_reqs
);
1601 spin_unlock_irqrestore(&target
->lock
, flags
);
1604 mutex_unlock(&rport
->mutex
);
1606 return SCSI_MLQUEUE_HOST_BUSY
;
1610 * Note: the resources allocated in this function are freed in
1611 * srp_free_target_ib().
1613 static int srp_alloc_iu_bufs(struct srp_target_port
*target
)
1617 target
->rx_ring
= kzalloc(target
->queue_size
* sizeof(*target
->rx_ring
),
1619 if (!target
->rx_ring
)
1621 target
->tx_ring
= kzalloc(target
->queue_size
* sizeof(*target
->tx_ring
),
1623 if (!target
->tx_ring
)
1626 for (i
= 0; i
< target
->queue_size
; ++i
) {
1627 target
->rx_ring
[i
] = srp_alloc_iu(target
->srp_host
,
1628 target
->max_ti_iu_len
,
1629 GFP_KERNEL
, DMA_FROM_DEVICE
);
1630 if (!target
->rx_ring
[i
])
1634 for (i
= 0; i
< target
->queue_size
; ++i
) {
1635 target
->tx_ring
[i
] = srp_alloc_iu(target
->srp_host
,
1637 GFP_KERNEL
, DMA_TO_DEVICE
);
1638 if (!target
->tx_ring
[i
])
1641 list_add(&target
->tx_ring
[i
]->list
, &target
->free_tx
);
1647 for (i
= 0; i
< target
->queue_size
; ++i
) {
1648 srp_free_iu(target
->srp_host
, target
->rx_ring
[i
]);
1649 srp_free_iu(target
->srp_host
, target
->tx_ring
[i
]);
1654 kfree(target
->tx_ring
);
1655 target
->tx_ring
= NULL
;
1656 kfree(target
->rx_ring
);
1657 target
->rx_ring
= NULL
;
1662 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr
*qp_attr
, int attr_mask
)
1664 uint64_t T_tr_ns
, max_compl_time_ms
;
1665 uint32_t rq_tmo_jiffies
;
1668 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
1669 * table 91), both the QP timeout and the retry count have to be set
1670 * for RC QP's during the RTR to RTS transition.
1672 WARN_ON_ONCE((attr_mask
& (IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
)) !=
1673 (IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
));
1676 * Set target->rq_tmo_jiffies to one second more than the largest time
1677 * it can take before an error completion is generated. See also
1678 * C9-140..142 in the IBTA spec for more information about how to
1679 * convert the QP Local ACK Timeout value to nanoseconds.
1681 T_tr_ns
= 4096 * (1ULL << qp_attr
->timeout
);
1682 max_compl_time_ms
= qp_attr
->retry_cnt
* 4 * T_tr_ns
;
1683 do_div(max_compl_time_ms
, NSEC_PER_MSEC
);
1684 rq_tmo_jiffies
= msecs_to_jiffies(max_compl_time_ms
+ 1000);
1686 return rq_tmo_jiffies
;
1689 static void srp_cm_rep_handler(struct ib_cm_id
*cm_id
,
1690 struct srp_login_rsp
*lrsp
,
1691 struct srp_target_port
*target
)
1693 struct ib_qp_attr
*qp_attr
= NULL
;
1698 if (lrsp
->opcode
== SRP_LOGIN_RSP
) {
1699 target
->max_ti_iu_len
= be32_to_cpu(lrsp
->max_ti_iu_len
);
1700 target
->req_lim
= be32_to_cpu(lrsp
->req_lim_delta
);
1703 * Reserve credits for task management so we don't
1704 * bounce requests back to the SCSI mid-layer.
1706 target
->scsi_host
->can_queue
1707 = min(target
->req_lim
- SRP_TSK_MGMT_SQ_SIZE
,
1708 target
->scsi_host
->can_queue
);
1709 target
->scsi_host
->cmd_per_lun
1710 = min_t(int, target
->scsi_host
->can_queue
,
1711 target
->scsi_host
->cmd_per_lun
);
1713 shost_printk(KERN_WARNING
, target
->scsi_host
,
1714 PFX
"Unhandled RSP opcode %#x\n", lrsp
->opcode
);
1719 if (!target
->rx_ring
) {
1720 ret
= srp_alloc_iu_bufs(target
);
1726 qp_attr
= kmalloc(sizeof *qp_attr
, GFP_KERNEL
);
1730 qp_attr
->qp_state
= IB_QPS_RTR
;
1731 ret
= ib_cm_init_qp_attr(cm_id
, qp_attr
, &attr_mask
);
1735 ret
= ib_modify_qp(target
->qp
, qp_attr
, attr_mask
);
1739 for (i
= 0; i
< target
->queue_size
; i
++) {
1740 struct srp_iu
*iu
= target
->rx_ring
[i
];
1741 ret
= srp_post_recv(target
, iu
);
1746 qp_attr
->qp_state
= IB_QPS_RTS
;
1747 ret
= ib_cm_init_qp_attr(cm_id
, qp_attr
, &attr_mask
);
1751 target
->rq_tmo_jiffies
= srp_compute_rq_tmo(qp_attr
, attr_mask
);
1753 ret
= ib_modify_qp(target
->qp
, qp_attr
, attr_mask
);
1757 ret
= ib_send_cm_rtu(cm_id
, NULL
, 0);
1763 target
->status
= ret
;
1766 static void srp_cm_rej_handler(struct ib_cm_id
*cm_id
,
1767 struct ib_cm_event
*event
,
1768 struct srp_target_port
*target
)
1770 struct Scsi_Host
*shost
= target
->scsi_host
;
1771 struct ib_class_port_info
*cpi
;
1774 switch (event
->param
.rej_rcvd
.reason
) {
1775 case IB_CM_REJ_PORT_CM_REDIRECT
:
1776 cpi
= event
->param
.rej_rcvd
.ari
;
1777 target
->path
.dlid
= cpi
->redirect_lid
;
1778 target
->path
.pkey
= cpi
->redirect_pkey
;
1779 cm_id
->remote_cm_qpn
= be32_to_cpu(cpi
->redirect_qp
) & 0x00ffffff;
1780 memcpy(target
->path
.dgid
.raw
, cpi
->redirect_gid
, 16);
1782 target
->status
= target
->path
.dlid
?
1783 SRP_DLID_REDIRECT
: SRP_PORT_REDIRECT
;
1786 case IB_CM_REJ_PORT_REDIRECT
:
1787 if (srp_target_is_topspin(target
)) {
1789 * Topspin/Cisco SRP gateways incorrectly send
1790 * reject reason code 25 when they mean 24
1793 memcpy(target
->path
.dgid
.raw
,
1794 event
->param
.rej_rcvd
.ari
, 16);
1796 shost_printk(KERN_DEBUG
, shost
,
1797 PFX
"Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1798 (unsigned long long) be64_to_cpu(target
->path
.dgid
.global
.subnet_prefix
),
1799 (unsigned long long) be64_to_cpu(target
->path
.dgid
.global
.interface_id
));
1801 target
->status
= SRP_PORT_REDIRECT
;
1803 shost_printk(KERN_WARNING
, shost
,
1804 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1805 target
->status
= -ECONNRESET
;
1809 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID
:
1810 shost_printk(KERN_WARNING
, shost
,
1811 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1812 target
->status
= -ECONNRESET
;
1815 case IB_CM_REJ_CONSUMER_DEFINED
:
1816 opcode
= *(u8
*) event
->private_data
;
1817 if (opcode
== SRP_LOGIN_REJ
) {
1818 struct srp_login_rej
*rej
= event
->private_data
;
1819 u32 reason
= be32_to_cpu(rej
->reason
);
1821 if (reason
== SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE
)
1822 shost_printk(KERN_WARNING
, shost
,
1823 PFX
"SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1825 shost_printk(KERN_WARNING
, shost
, PFX
1826 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
1827 target
->path
.sgid
.raw
,
1828 target
->orig_dgid
, reason
);
1830 shost_printk(KERN_WARNING
, shost
,
1831 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1832 " opcode 0x%02x\n", opcode
);
1833 target
->status
= -ECONNRESET
;
1836 case IB_CM_REJ_STALE_CONN
:
1837 shost_printk(KERN_WARNING
, shost
, " REJ reason: stale connection\n");
1838 target
->status
= SRP_STALE_CONN
;
1842 shost_printk(KERN_WARNING
, shost
, " REJ reason 0x%x\n",
1843 event
->param
.rej_rcvd
.reason
);
1844 target
->status
= -ECONNRESET
;
1848 static int srp_cm_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*event
)
1850 struct srp_target_port
*target
= cm_id
->context
;
1853 switch (event
->event
) {
1854 case IB_CM_REQ_ERROR
:
1855 shost_printk(KERN_DEBUG
, target
->scsi_host
,
1856 PFX
"Sending CM REQ failed\n");
1858 target
->status
= -ECONNRESET
;
1861 case IB_CM_REP_RECEIVED
:
1863 srp_cm_rep_handler(cm_id
, event
->private_data
, target
);
1866 case IB_CM_REJ_RECEIVED
:
1867 shost_printk(KERN_DEBUG
, target
->scsi_host
, PFX
"REJ received\n");
1870 srp_cm_rej_handler(cm_id
, event
, target
);
1873 case IB_CM_DREQ_RECEIVED
:
1874 shost_printk(KERN_WARNING
, target
->scsi_host
,
1875 PFX
"DREQ received - connection closed\n");
1876 srp_change_conn_state(target
, false);
1877 if (ib_send_cm_drep(cm_id
, NULL
, 0))
1878 shost_printk(KERN_ERR
, target
->scsi_host
,
1879 PFX
"Sending CM DREP failed\n");
1880 queue_work(system_long_wq
, &target
->tl_err_work
);
1883 case IB_CM_TIMEWAIT_EXIT
:
1884 shost_printk(KERN_ERR
, target
->scsi_host
,
1885 PFX
"connection closed\n");
1891 case IB_CM_MRA_RECEIVED
:
1892 case IB_CM_DREQ_ERROR
:
1893 case IB_CM_DREP_RECEIVED
:
1897 shost_printk(KERN_WARNING
, target
->scsi_host
,
1898 PFX
"Unhandled CM event %d\n", event
->event
);
1903 complete(&target
->done
);
1909 * srp_change_queue_type - changing device queue tag type
1910 * @sdev: scsi device struct
1911 * @tag_type: requested tag type
1913 * Returns queue tag type.
1916 srp_change_queue_type(struct scsi_device
*sdev
, int tag_type
)
1918 if (sdev
->tagged_supported
) {
1919 scsi_set_tag_type(sdev
, tag_type
);
1921 scsi_activate_tcq(sdev
, sdev
->queue_depth
);
1923 scsi_deactivate_tcq(sdev
, sdev
->queue_depth
);
1931 * srp_change_queue_depth - setting device queue depth
1932 * @sdev: scsi device struct
1933 * @qdepth: requested queue depth
1934 * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
1935 * (see include/scsi/scsi_host.h for definition)
1937 * Returns queue depth.
1940 srp_change_queue_depth(struct scsi_device
*sdev
, int qdepth
, int reason
)
1942 struct Scsi_Host
*shost
= sdev
->host
;
1944 if (reason
== SCSI_QDEPTH_DEFAULT
|| reason
== SCSI_QDEPTH_RAMP_UP
) {
1945 max_depth
= shost
->can_queue
;
1946 if (!sdev
->tagged_supported
)
1948 if (qdepth
> max_depth
)
1950 scsi_adjust_queue_depth(sdev
, scsi_get_tag_type(sdev
), qdepth
);
1951 } else if (reason
== SCSI_QDEPTH_QFULL
)
1952 scsi_track_queue_full(sdev
, qdepth
);
1956 return sdev
->queue_depth
;
1959 static int srp_send_tsk_mgmt(struct srp_target_port
*target
,
1960 u64 req_tag
, unsigned int lun
, u8 func
)
1962 struct srp_rport
*rport
= target
->rport
;
1963 struct ib_device
*dev
= target
->srp_host
->srp_dev
->dev
;
1965 struct srp_tsk_mgmt
*tsk_mgmt
;
1967 if (!target
->connected
|| target
->qp_in_error
)
1970 init_completion(&target
->tsk_mgmt_done
);
1973 * Lock the rport mutex to avoid that srp_create_target_ib() is
1974 * invoked while a task management function is being sent.
1976 mutex_lock(&rport
->mutex
);
1977 spin_lock_irq(&target
->lock
);
1978 iu
= __srp_get_tx_iu(target
, SRP_IU_TSK_MGMT
);
1979 spin_unlock_irq(&target
->lock
);
1982 mutex_unlock(&rport
->mutex
);
1987 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, sizeof *tsk_mgmt
,
1990 memset(tsk_mgmt
, 0, sizeof *tsk_mgmt
);
1992 tsk_mgmt
->opcode
= SRP_TSK_MGMT
;
1993 tsk_mgmt
->lun
= cpu_to_be64((u64
) lun
<< 48);
1994 tsk_mgmt
->tag
= req_tag
| SRP_TAG_TSK_MGMT
;
1995 tsk_mgmt
->tsk_mgmt_func
= func
;
1996 tsk_mgmt
->task_tag
= req_tag
;
1998 ib_dma_sync_single_for_device(dev
, iu
->dma
, sizeof *tsk_mgmt
,
2000 if (srp_post_send(target
, iu
, sizeof *tsk_mgmt
)) {
2001 srp_put_tx_iu(target
, iu
, SRP_IU_TSK_MGMT
);
2002 mutex_unlock(&rport
->mutex
);
2006 mutex_unlock(&rport
->mutex
);
2008 if (!wait_for_completion_timeout(&target
->tsk_mgmt_done
,
2009 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS
)))
2015 static int srp_abort(struct scsi_cmnd
*scmnd
)
2017 struct srp_target_port
*target
= host_to_target(scmnd
->device
->host
);
2018 struct srp_request
*req
= (struct srp_request
*) scmnd
->host_scribble
;
2021 shost_printk(KERN_ERR
, target
->scsi_host
, "SRP abort called\n");
2023 if (!req
|| !srp_claim_req(target
, req
, NULL
, scmnd
))
2025 if (srp_send_tsk_mgmt(target
, req
->index
, scmnd
->device
->lun
,
2026 SRP_TSK_ABORT_TASK
) == 0)
2028 else if (target
->rport
->state
== SRP_RPORT_LOST
)
2032 srp_free_req(target
, req
, scmnd
, 0);
2033 scmnd
->result
= DID_ABORT
<< 16;
2034 scmnd
->scsi_done(scmnd
);
2039 static int srp_reset_device(struct scsi_cmnd
*scmnd
)
2041 struct srp_target_port
*target
= host_to_target(scmnd
->device
->host
);
2044 shost_printk(KERN_ERR
, target
->scsi_host
, "SRP reset_device called\n");
2046 if (srp_send_tsk_mgmt(target
, SRP_TAG_NO_REQ
, scmnd
->device
->lun
,
2049 if (target
->tsk_mgmt_status
)
2052 for (i
= 0; i
< target
->req_ring_size
; ++i
) {
2053 struct srp_request
*req
= &target
->req_ring
[i
];
2054 srp_finish_req(target
, req
, scmnd
->device
, DID_RESET
<< 16);
2060 static int srp_reset_host(struct scsi_cmnd
*scmnd
)
2062 struct srp_target_port
*target
= host_to_target(scmnd
->device
->host
);
2064 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
"SRP reset_host called\n");
2066 return srp_reconnect_rport(target
->rport
) == 0 ? SUCCESS
: FAILED
;
2069 static int srp_slave_configure(struct scsi_device
*sdev
)
2071 struct Scsi_Host
*shost
= sdev
->host
;
2072 struct srp_target_port
*target
= host_to_target(shost
);
2073 struct request_queue
*q
= sdev
->request_queue
;
2074 unsigned long timeout
;
2076 if (sdev
->type
== TYPE_DISK
) {
2077 timeout
= max_t(unsigned, 30 * HZ
, target
->rq_tmo_jiffies
);
2078 blk_queue_rq_timeout(q
, timeout
);
2084 static ssize_t
show_id_ext(struct device
*dev
, struct device_attribute
*attr
,
2087 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2089 return sprintf(buf
, "0x%016llx\n",
2090 (unsigned long long) be64_to_cpu(target
->id_ext
));
2093 static ssize_t
show_ioc_guid(struct device
*dev
, struct device_attribute
*attr
,
2096 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2098 return sprintf(buf
, "0x%016llx\n",
2099 (unsigned long long) be64_to_cpu(target
->ioc_guid
));
2102 static ssize_t
show_service_id(struct device
*dev
,
2103 struct device_attribute
*attr
, char *buf
)
2105 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2107 return sprintf(buf
, "0x%016llx\n",
2108 (unsigned long long) be64_to_cpu(target
->service_id
));
2111 static ssize_t
show_pkey(struct device
*dev
, struct device_attribute
*attr
,
2114 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2116 return sprintf(buf
, "0x%04x\n", be16_to_cpu(target
->path
.pkey
));
2119 static ssize_t
show_sgid(struct device
*dev
, struct device_attribute
*attr
,
2122 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2124 return sprintf(buf
, "%pI6\n", target
->path
.sgid
.raw
);
2127 static ssize_t
show_dgid(struct device
*dev
, struct device_attribute
*attr
,
2130 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2132 return sprintf(buf
, "%pI6\n", target
->path
.dgid
.raw
);
2135 static ssize_t
show_orig_dgid(struct device
*dev
,
2136 struct device_attribute
*attr
, char *buf
)
2138 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2140 return sprintf(buf
, "%pI6\n", target
->orig_dgid
);
2143 static ssize_t
show_req_lim(struct device
*dev
,
2144 struct device_attribute
*attr
, char *buf
)
2146 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2148 return sprintf(buf
, "%d\n", target
->req_lim
);
2151 static ssize_t
show_zero_req_lim(struct device
*dev
,
2152 struct device_attribute
*attr
, char *buf
)
2154 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2156 return sprintf(buf
, "%d\n", target
->zero_req_lim
);
2159 static ssize_t
show_local_ib_port(struct device
*dev
,
2160 struct device_attribute
*attr
, char *buf
)
2162 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2164 return sprintf(buf
, "%d\n", target
->srp_host
->port
);
2167 static ssize_t
show_local_ib_device(struct device
*dev
,
2168 struct device_attribute
*attr
, char *buf
)
2170 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2172 return sprintf(buf
, "%s\n", target
->srp_host
->srp_dev
->dev
->name
);
2175 static ssize_t
show_comp_vector(struct device
*dev
,
2176 struct device_attribute
*attr
, char *buf
)
2178 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2180 return sprintf(buf
, "%d\n", target
->comp_vector
);
2183 static ssize_t
show_tl_retry_count(struct device
*dev
,
2184 struct device_attribute
*attr
, char *buf
)
2186 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2188 return sprintf(buf
, "%d\n", target
->tl_retry_count
);
2191 static ssize_t
show_cmd_sg_entries(struct device
*dev
,
2192 struct device_attribute
*attr
, char *buf
)
2194 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2196 return sprintf(buf
, "%u\n", target
->cmd_sg_cnt
);
2199 static ssize_t
show_allow_ext_sg(struct device
*dev
,
2200 struct device_attribute
*attr
, char *buf
)
2202 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2204 return sprintf(buf
, "%s\n", target
->allow_ext_sg
? "true" : "false");
2207 static DEVICE_ATTR(id_ext
, S_IRUGO
, show_id_ext
, NULL
);
2208 static DEVICE_ATTR(ioc_guid
, S_IRUGO
, show_ioc_guid
, NULL
);
2209 static DEVICE_ATTR(service_id
, S_IRUGO
, show_service_id
, NULL
);
2210 static DEVICE_ATTR(pkey
, S_IRUGO
, show_pkey
, NULL
);
2211 static DEVICE_ATTR(sgid
, S_IRUGO
, show_sgid
, NULL
);
2212 static DEVICE_ATTR(dgid
, S_IRUGO
, show_dgid
, NULL
);
2213 static DEVICE_ATTR(orig_dgid
, S_IRUGO
, show_orig_dgid
, NULL
);
2214 static DEVICE_ATTR(req_lim
, S_IRUGO
, show_req_lim
, NULL
);
2215 static DEVICE_ATTR(zero_req_lim
, S_IRUGO
, show_zero_req_lim
, NULL
);
2216 static DEVICE_ATTR(local_ib_port
, S_IRUGO
, show_local_ib_port
, NULL
);
2217 static DEVICE_ATTR(local_ib_device
, S_IRUGO
, show_local_ib_device
, NULL
);
2218 static DEVICE_ATTR(comp_vector
, S_IRUGO
, show_comp_vector
, NULL
);
2219 static DEVICE_ATTR(tl_retry_count
, S_IRUGO
, show_tl_retry_count
, NULL
);
2220 static DEVICE_ATTR(cmd_sg_entries
, S_IRUGO
, show_cmd_sg_entries
, NULL
);
2221 static DEVICE_ATTR(allow_ext_sg
, S_IRUGO
, show_allow_ext_sg
, NULL
);
2223 static struct device_attribute
*srp_host_attrs
[] = {
2226 &dev_attr_service_id
,
2230 &dev_attr_orig_dgid
,
2232 &dev_attr_zero_req_lim
,
2233 &dev_attr_local_ib_port
,
2234 &dev_attr_local_ib_device
,
2235 &dev_attr_comp_vector
,
2236 &dev_attr_tl_retry_count
,
2237 &dev_attr_cmd_sg_entries
,
2238 &dev_attr_allow_ext_sg
,
2242 static struct scsi_host_template srp_template
= {
2243 .module
= THIS_MODULE
,
2244 .name
= "InfiniBand SRP initiator",
2245 .proc_name
= DRV_NAME
,
2246 .slave_configure
= srp_slave_configure
,
2247 .info
= srp_target_info
,
2248 .queuecommand
= srp_queuecommand
,
2249 .change_queue_depth
= srp_change_queue_depth
,
2250 .change_queue_type
= srp_change_queue_type
,
2251 .eh_abort_handler
= srp_abort
,
2252 .eh_device_reset_handler
= srp_reset_device
,
2253 .eh_host_reset_handler
= srp_reset_host
,
2254 .skip_settle_delay
= true,
2255 .sg_tablesize
= SRP_DEF_SG_TABLESIZE
,
2256 .can_queue
= SRP_DEFAULT_CMD_SQ_SIZE
,
2258 .cmd_per_lun
= SRP_DEFAULT_CMD_SQ_SIZE
,
2259 .use_clustering
= ENABLE_CLUSTERING
,
2260 .shost_attrs
= srp_host_attrs
2263 static int srp_add_target(struct srp_host
*host
, struct srp_target_port
*target
)
2265 struct srp_rport_identifiers ids
;
2266 struct srp_rport
*rport
;
2268 sprintf(target
->target_name
, "SRP.T10:%016llX",
2269 (unsigned long long) be64_to_cpu(target
->id_ext
));
2271 if (scsi_add_host(target
->scsi_host
, host
->srp_dev
->dev
->dma_device
))
2274 memcpy(ids
.port_id
, &target
->id_ext
, 8);
2275 memcpy(ids
.port_id
+ 8, &target
->ioc_guid
, 8);
2276 ids
.roles
= SRP_RPORT_ROLE_TARGET
;
2277 rport
= srp_rport_add(target
->scsi_host
, &ids
);
2278 if (IS_ERR(rport
)) {
2279 scsi_remove_host(target
->scsi_host
);
2280 return PTR_ERR(rport
);
2283 rport
->lld_data
= target
;
2284 target
->rport
= rport
;
2286 spin_lock(&host
->target_lock
);
2287 list_add_tail(&target
->list
, &host
->target_list
);
2288 spin_unlock(&host
->target_lock
);
2290 target
->state
= SRP_TARGET_LIVE
;
2292 scsi_scan_target(&target
->scsi_host
->shost_gendev
,
2293 0, target
->scsi_id
, SCAN_WILD_CARD
, 0);
2298 static void srp_release_dev(struct device
*dev
)
2300 struct srp_host
*host
=
2301 container_of(dev
, struct srp_host
, dev
);
2303 complete(&host
->released
);
2306 static struct class srp_class
= {
2307 .name
= "infiniband_srp",
2308 .dev_release
= srp_release_dev
2312 * srp_conn_unique() - check whether the connection to a target is unique
2314 static bool srp_conn_unique(struct srp_host
*host
,
2315 struct srp_target_port
*target
)
2317 struct srp_target_port
*t
;
2320 if (target
->state
== SRP_TARGET_REMOVED
)
2325 spin_lock(&host
->target_lock
);
2326 list_for_each_entry(t
, &host
->target_list
, list
) {
2328 target
->id_ext
== t
->id_ext
&&
2329 target
->ioc_guid
== t
->ioc_guid
&&
2330 target
->initiator_ext
== t
->initiator_ext
) {
2335 spin_unlock(&host
->target_lock
);
2342 * Target ports are added by writing
2344 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2345 * pkey=<P_Key>,service_id=<service ID>
2347 * to the add_target sysfs attribute.
2351 SRP_OPT_ID_EXT
= 1 << 0,
2352 SRP_OPT_IOC_GUID
= 1 << 1,
2353 SRP_OPT_DGID
= 1 << 2,
2354 SRP_OPT_PKEY
= 1 << 3,
2355 SRP_OPT_SERVICE_ID
= 1 << 4,
2356 SRP_OPT_MAX_SECT
= 1 << 5,
2357 SRP_OPT_MAX_CMD_PER_LUN
= 1 << 6,
2358 SRP_OPT_IO_CLASS
= 1 << 7,
2359 SRP_OPT_INITIATOR_EXT
= 1 << 8,
2360 SRP_OPT_CMD_SG_ENTRIES
= 1 << 9,
2361 SRP_OPT_ALLOW_EXT_SG
= 1 << 10,
2362 SRP_OPT_SG_TABLESIZE
= 1 << 11,
2363 SRP_OPT_COMP_VECTOR
= 1 << 12,
2364 SRP_OPT_TL_RETRY_COUNT
= 1 << 13,
2365 SRP_OPT_QUEUE_SIZE
= 1 << 14,
2366 SRP_OPT_ALL
= (SRP_OPT_ID_EXT
|
2370 SRP_OPT_SERVICE_ID
),
2373 static const match_table_t srp_opt_tokens
= {
2374 { SRP_OPT_ID_EXT
, "id_ext=%s" },
2375 { SRP_OPT_IOC_GUID
, "ioc_guid=%s" },
2376 { SRP_OPT_DGID
, "dgid=%s" },
2377 { SRP_OPT_PKEY
, "pkey=%x" },
2378 { SRP_OPT_SERVICE_ID
, "service_id=%s" },
2379 { SRP_OPT_MAX_SECT
, "max_sect=%d" },
2380 { SRP_OPT_MAX_CMD_PER_LUN
, "max_cmd_per_lun=%d" },
2381 { SRP_OPT_IO_CLASS
, "io_class=%x" },
2382 { SRP_OPT_INITIATOR_EXT
, "initiator_ext=%s" },
2383 { SRP_OPT_CMD_SG_ENTRIES
, "cmd_sg_entries=%u" },
2384 { SRP_OPT_ALLOW_EXT_SG
, "allow_ext_sg=%u" },
2385 { SRP_OPT_SG_TABLESIZE
, "sg_tablesize=%u" },
2386 { SRP_OPT_COMP_VECTOR
, "comp_vector=%u" },
2387 { SRP_OPT_TL_RETRY_COUNT
, "tl_retry_count=%u" },
2388 { SRP_OPT_QUEUE_SIZE
, "queue_size=%d" },
2389 { SRP_OPT_ERR
, NULL
}
2392 static int srp_parse_options(const char *buf
, struct srp_target_port
*target
)
2394 char *options
, *sep_opt
;
2397 substring_t args
[MAX_OPT_ARGS
];
2403 options
= kstrdup(buf
, GFP_KERNEL
);
2408 while ((p
= strsep(&sep_opt
, ",")) != NULL
) {
2412 token
= match_token(p
, srp_opt_tokens
, args
);
2416 case SRP_OPT_ID_EXT
:
2417 p
= match_strdup(args
);
2422 target
->id_ext
= cpu_to_be64(simple_strtoull(p
, NULL
, 16));
2426 case SRP_OPT_IOC_GUID
:
2427 p
= match_strdup(args
);
2432 target
->ioc_guid
= cpu_to_be64(simple_strtoull(p
, NULL
, 16));
2437 p
= match_strdup(args
);
2442 if (strlen(p
) != 32) {
2443 pr_warn("bad dest GID parameter '%s'\n", p
);
2448 for (i
= 0; i
< 16; ++i
) {
2449 strlcpy(dgid
, p
+ i
* 2, 3);
2450 target
->path
.dgid
.raw
[i
] = simple_strtoul(dgid
, NULL
, 16);
2453 memcpy(target
->orig_dgid
, target
->path
.dgid
.raw
, 16);
2457 if (match_hex(args
, &token
)) {
2458 pr_warn("bad P_Key parameter '%s'\n", p
);
2461 target
->path
.pkey
= cpu_to_be16(token
);
2464 case SRP_OPT_SERVICE_ID
:
2465 p
= match_strdup(args
);
2470 target
->service_id
= cpu_to_be64(simple_strtoull(p
, NULL
, 16));
2471 target
->path
.service_id
= target
->service_id
;
2475 case SRP_OPT_MAX_SECT
:
2476 if (match_int(args
, &token
)) {
2477 pr_warn("bad max sect parameter '%s'\n", p
);
2480 target
->scsi_host
->max_sectors
= token
;
2483 case SRP_OPT_QUEUE_SIZE
:
2484 if (match_int(args
, &token
) || token
< 1) {
2485 pr_warn("bad queue_size parameter '%s'\n", p
);
2488 target
->scsi_host
->can_queue
= token
;
2489 target
->queue_size
= token
+ SRP_RSP_SQ_SIZE
+
2490 SRP_TSK_MGMT_SQ_SIZE
;
2491 if (!(opt_mask
& SRP_OPT_MAX_CMD_PER_LUN
))
2492 target
->scsi_host
->cmd_per_lun
= token
;
2495 case SRP_OPT_MAX_CMD_PER_LUN
:
2496 if (match_int(args
, &token
) || token
< 1) {
2497 pr_warn("bad max cmd_per_lun parameter '%s'\n",
2501 target
->scsi_host
->cmd_per_lun
= token
;
2504 case SRP_OPT_IO_CLASS
:
2505 if (match_hex(args
, &token
)) {
2506 pr_warn("bad IO class parameter '%s'\n", p
);
2509 if (token
!= SRP_REV10_IB_IO_CLASS
&&
2510 token
!= SRP_REV16A_IB_IO_CLASS
) {
2511 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2512 token
, SRP_REV10_IB_IO_CLASS
,
2513 SRP_REV16A_IB_IO_CLASS
);
2516 target
->io_class
= token
;
2519 case SRP_OPT_INITIATOR_EXT
:
2520 p
= match_strdup(args
);
2525 target
->initiator_ext
= cpu_to_be64(simple_strtoull(p
, NULL
, 16));
2529 case SRP_OPT_CMD_SG_ENTRIES
:
2530 if (match_int(args
, &token
) || token
< 1 || token
> 255) {
2531 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
2535 target
->cmd_sg_cnt
= token
;
2538 case SRP_OPT_ALLOW_EXT_SG
:
2539 if (match_int(args
, &token
)) {
2540 pr_warn("bad allow_ext_sg parameter '%s'\n", p
);
2543 target
->allow_ext_sg
= !!token
;
2546 case SRP_OPT_SG_TABLESIZE
:
2547 if (match_int(args
, &token
) || token
< 1 ||
2548 token
> SCSI_MAX_SG_CHAIN_SEGMENTS
) {
2549 pr_warn("bad max sg_tablesize parameter '%s'\n",
2553 target
->sg_tablesize
= token
;
2556 case SRP_OPT_COMP_VECTOR
:
2557 if (match_int(args
, &token
) || token
< 0) {
2558 pr_warn("bad comp_vector parameter '%s'\n", p
);
2561 target
->comp_vector
= token
;
2564 case SRP_OPT_TL_RETRY_COUNT
:
2565 if (match_int(args
, &token
) || token
< 2 || token
> 7) {
2566 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
2570 target
->tl_retry_count
= token
;
2574 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
2580 if ((opt_mask
& SRP_OPT_ALL
) == SRP_OPT_ALL
)
2583 for (i
= 0; i
< ARRAY_SIZE(srp_opt_tokens
); ++i
)
2584 if ((srp_opt_tokens
[i
].token
& SRP_OPT_ALL
) &&
2585 !(srp_opt_tokens
[i
].token
& opt_mask
))
2586 pr_warn("target creation request is missing parameter '%s'\n",
2587 srp_opt_tokens
[i
].pattern
);
2589 if (target
->scsi_host
->cmd_per_lun
> target
->scsi_host
->can_queue
2590 && (opt_mask
& SRP_OPT_MAX_CMD_PER_LUN
))
2591 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
2592 target
->scsi_host
->cmd_per_lun
,
2593 target
->scsi_host
->can_queue
);
2600 static ssize_t
srp_create_target(struct device
*dev
,
2601 struct device_attribute
*attr
,
2602 const char *buf
, size_t count
)
2604 struct srp_host
*host
=
2605 container_of(dev
, struct srp_host
, dev
);
2606 struct Scsi_Host
*target_host
;
2607 struct srp_target_port
*target
;
2608 struct ib_device
*ibdev
= host
->srp_dev
->dev
;
2611 target_host
= scsi_host_alloc(&srp_template
,
2612 sizeof (struct srp_target_port
));
2616 target_host
->transportt
= ib_srp_transport_template
;
2617 target_host
->max_channel
= 0;
2618 target_host
->max_id
= 1;
2619 target_host
->max_lun
= SRP_MAX_LUN
;
2620 target_host
->max_cmd_len
= sizeof ((struct srp_cmd
*) (void *) 0L)->cdb
;
2622 target
= host_to_target(target_host
);
2624 target
->io_class
= SRP_REV16A_IB_IO_CLASS
;
2625 target
->scsi_host
= target_host
;
2626 target
->srp_host
= host
;
2627 target
->lkey
= host
->srp_dev
->mr
->lkey
;
2628 target
->rkey
= host
->srp_dev
->mr
->rkey
;
2629 target
->cmd_sg_cnt
= cmd_sg_entries
;
2630 target
->sg_tablesize
= indirect_sg_entries
? : cmd_sg_entries
;
2631 target
->allow_ext_sg
= allow_ext_sg
;
2632 target
->tl_retry_count
= 7;
2633 target
->queue_size
= SRP_DEFAULT_QUEUE_SIZE
;
2635 mutex_lock(&host
->add_target_mutex
);
2637 ret
= srp_parse_options(buf
, target
);
2641 target
->req_ring_size
= target
->queue_size
- SRP_TSK_MGMT_SQ_SIZE
;
2643 if (!srp_conn_unique(target
->srp_host
, target
)) {
2644 shost_printk(KERN_INFO
, target
->scsi_host
,
2645 PFX
"Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
2646 be64_to_cpu(target
->id_ext
),
2647 be64_to_cpu(target
->ioc_guid
),
2648 be64_to_cpu(target
->initiator_ext
));
2653 if (!host
->srp_dev
->fmr_pool
&& !target
->allow_ext_sg
&&
2654 target
->cmd_sg_cnt
< target
->sg_tablesize
) {
2655 pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
2656 target
->sg_tablesize
= target
->cmd_sg_cnt
;
2659 target_host
->sg_tablesize
= target
->sg_tablesize
;
2660 target
->indirect_size
= target
->sg_tablesize
*
2661 sizeof (struct srp_direct_buf
);
2662 target
->max_iu_len
= sizeof (struct srp_cmd
) +
2663 sizeof (struct srp_indirect_buf
) +
2664 target
->cmd_sg_cnt
* sizeof (struct srp_direct_buf
);
2666 INIT_WORK(&target
->tl_err_work
, srp_tl_err_work
);
2667 INIT_WORK(&target
->remove_work
, srp_remove_work
);
2668 spin_lock_init(&target
->lock
);
2669 INIT_LIST_HEAD(&target
->free_tx
);
2670 ret
= srp_alloc_req_data(target
);
2674 ret
= ib_query_gid(ibdev
, host
->port
, 0, &target
->path
.sgid
);
2678 ret
= srp_create_target_ib(target
);
2682 ret
= srp_new_cm_id(target
);
2686 ret
= srp_connect_target(target
);
2688 shost_printk(KERN_ERR
, target
->scsi_host
,
2689 PFX
"Connection failed\n");
2693 ret
= srp_add_target(host
, target
);
2695 goto err_disconnect
;
2697 shost_printk(KERN_DEBUG
, target
->scsi_host
, PFX
2698 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
2699 be64_to_cpu(target
->id_ext
),
2700 be64_to_cpu(target
->ioc_guid
),
2701 be16_to_cpu(target
->path
.pkey
),
2702 be64_to_cpu(target
->service_id
),
2703 target
->path
.sgid
.raw
, target
->path
.dgid
.raw
);
2708 mutex_unlock(&host
->add_target_mutex
);
2712 srp_disconnect_target(target
);
2715 ib_destroy_cm_id(target
->cm_id
);
2718 srp_free_target_ib(target
);
2721 srp_free_req_data(target
);
2724 scsi_host_put(target_host
);
2728 static DEVICE_ATTR(add_target
, S_IWUSR
, NULL
, srp_create_target
);
2730 static ssize_t
show_ibdev(struct device
*dev
, struct device_attribute
*attr
,
2733 struct srp_host
*host
= container_of(dev
, struct srp_host
, dev
);
2735 return sprintf(buf
, "%s\n", host
->srp_dev
->dev
->name
);
2738 static DEVICE_ATTR(ibdev
, S_IRUGO
, show_ibdev
, NULL
);
2740 static ssize_t
show_port(struct device
*dev
, struct device_attribute
*attr
,
2743 struct srp_host
*host
= container_of(dev
, struct srp_host
, dev
);
2745 return sprintf(buf
, "%d\n", host
->port
);
2748 static DEVICE_ATTR(port
, S_IRUGO
, show_port
, NULL
);
2750 static struct srp_host
*srp_add_port(struct srp_device
*device
, u8 port
)
2752 struct srp_host
*host
;
2754 host
= kzalloc(sizeof *host
, GFP_KERNEL
);
2758 INIT_LIST_HEAD(&host
->target_list
);
2759 spin_lock_init(&host
->target_lock
);
2760 init_completion(&host
->released
);
2761 mutex_init(&host
->add_target_mutex
);
2762 host
->srp_dev
= device
;
2765 host
->dev
.class = &srp_class
;
2766 host
->dev
.parent
= device
->dev
->dma_device
;
2767 dev_set_name(&host
->dev
, "srp-%s-%d", device
->dev
->name
, port
);
2769 if (device_register(&host
->dev
))
2771 if (device_create_file(&host
->dev
, &dev_attr_add_target
))
2773 if (device_create_file(&host
->dev
, &dev_attr_ibdev
))
2775 if (device_create_file(&host
->dev
, &dev_attr_port
))
2781 device_unregister(&host
->dev
);
2789 static void srp_add_one(struct ib_device
*device
)
2791 struct srp_device
*srp_dev
;
2792 struct ib_device_attr
*dev_attr
;
2793 struct ib_fmr_pool_param fmr_param
;
2794 struct srp_host
*host
;
2795 int max_pages_per_fmr
, fmr_page_shift
, s
, e
, p
;
2797 dev_attr
= kmalloc(sizeof *dev_attr
, GFP_KERNEL
);
2801 if (ib_query_device(device
, dev_attr
)) {
2802 pr_warn("Query device failed for %s\n", device
->name
);
2806 srp_dev
= kmalloc(sizeof *srp_dev
, GFP_KERNEL
);
2811 * Use the smallest page size supported by the HCA, down to a
2812 * minimum of 4096 bytes. We're unlikely to build large sglists
2813 * out of smaller entries.
2815 fmr_page_shift
= max(12, ffs(dev_attr
->page_size_cap
) - 1);
2816 srp_dev
->fmr_page_size
= 1 << fmr_page_shift
;
2817 srp_dev
->fmr_page_mask
= ~((u64
) srp_dev
->fmr_page_size
- 1);
2818 srp_dev
->fmr_max_size
= srp_dev
->fmr_page_size
* SRP_FMR_SIZE
;
2820 INIT_LIST_HEAD(&srp_dev
->dev_list
);
2822 srp_dev
->dev
= device
;
2823 srp_dev
->pd
= ib_alloc_pd(device
);
2824 if (IS_ERR(srp_dev
->pd
))
2827 srp_dev
->mr
= ib_get_dma_mr(srp_dev
->pd
,
2828 IB_ACCESS_LOCAL_WRITE
|
2829 IB_ACCESS_REMOTE_READ
|
2830 IB_ACCESS_REMOTE_WRITE
);
2831 if (IS_ERR(srp_dev
->mr
))
2834 for (max_pages_per_fmr
= SRP_FMR_SIZE
;
2835 max_pages_per_fmr
>= SRP_FMR_MIN_SIZE
;
2836 max_pages_per_fmr
/= 2, srp_dev
->fmr_max_size
/= 2) {
2837 memset(&fmr_param
, 0, sizeof fmr_param
);
2838 fmr_param
.pool_size
= SRP_FMR_POOL_SIZE
;
2839 fmr_param
.dirty_watermark
= SRP_FMR_DIRTY_SIZE
;
2840 fmr_param
.cache
= 1;
2841 fmr_param
.max_pages_per_fmr
= max_pages_per_fmr
;
2842 fmr_param
.page_shift
= fmr_page_shift
;
2843 fmr_param
.access
= (IB_ACCESS_LOCAL_WRITE
|
2844 IB_ACCESS_REMOTE_WRITE
|
2845 IB_ACCESS_REMOTE_READ
);
2847 srp_dev
->fmr_pool
= ib_create_fmr_pool(srp_dev
->pd
, &fmr_param
);
2848 if (!IS_ERR(srp_dev
->fmr_pool
))
2852 if (IS_ERR(srp_dev
->fmr_pool
))
2853 srp_dev
->fmr_pool
= NULL
;
2855 if (device
->node_type
== RDMA_NODE_IB_SWITCH
) {
2860 e
= device
->phys_port_cnt
;
2863 for (p
= s
; p
<= e
; ++p
) {
2864 host
= srp_add_port(srp_dev
, p
);
2866 list_add_tail(&host
->list
, &srp_dev
->dev_list
);
2869 ib_set_client_data(device
, &srp_client
, srp_dev
);
2874 ib_dealloc_pd(srp_dev
->pd
);
2883 static void srp_remove_one(struct ib_device
*device
)
2885 struct srp_device
*srp_dev
;
2886 struct srp_host
*host
, *tmp_host
;
2887 struct srp_target_port
*target
;
2889 srp_dev
= ib_get_client_data(device
, &srp_client
);
2893 list_for_each_entry_safe(host
, tmp_host
, &srp_dev
->dev_list
, list
) {
2894 device_unregister(&host
->dev
);
2896 * Wait for the sysfs entry to go away, so that no new
2897 * target ports can be created.
2899 wait_for_completion(&host
->released
);
2902 * Remove all target ports.
2904 spin_lock(&host
->target_lock
);
2905 list_for_each_entry(target
, &host
->target_list
, list
)
2906 srp_queue_remove_work(target
);
2907 spin_unlock(&host
->target_lock
);
2910 * Wait for target port removal tasks.
2912 flush_workqueue(system_long_wq
);
2917 if (srp_dev
->fmr_pool
)
2918 ib_destroy_fmr_pool(srp_dev
->fmr_pool
);
2919 ib_dereg_mr(srp_dev
->mr
);
2920 ib_dealloc_pd(srp_dev
->pd
);
2925 static struct srp_function_template ib_srp_transport_functions
= {
2926 .has_rport_state
= true,
2927 .reset_timer_if_blocked
= true,
2928 .reconnect_delay
= &srp_reconnect_delay
,
2929 .fast_io_fail_tmo
= &srp_fast_io_fail_tmo
,
2930 .dev_loss_tmo
= &srp_dev_loss_tmo
,
2931 .reconnect
= srp_rport_reconnect
,
2932 .rport_delete
= srp_rport_delete
,
2933 .terminate_rport_io
= srp_terminate_io
,
2936 static int __init
srp_init_module(void)
2940 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc
, wr_id
) < sizeof(void *));
2942 if (srp_sg_tablesize
) {
2943 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
2944 if (!cmd_sg_entries
)
2945 cmd_sg_entries
= srp_sg_tablesize
;
2948 if (!cmd_sg_entries
)
2949 cmd_sg_entries
= SRP_DEF_SG_TABLESIZE
;
2951 if (cmd_sg_entries
> 255) {
2952 pr_warn("Clamping cmd_sg_entries to 255\n");
2953 cmd_sg_entries
= 255;
2956 if (!indirect_sg_entries
)
2957 indirect_sg_entries
= cmd_sg_entries
;
2958 else if (indirect_sg_entries
< cmd_sg_entries
) {
2959 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
2961 indirect_sg_entries
= cmd_sg_entries
;
2964 ib_srp_transport_template
=
2965 srp_attach_transport(&ib_srp_transport_functions
);
2966 if (!ib_srp_transport_template
)
2969 ret
= class_register(&srp_class
);
2971 pr_err("couldn't register class infiniband_srp\n");
2972 srp_release_transport(ib_srp_transport_template
);
2976 ib_sa_register_client(&srp_sa_client
);
2978 ret
= ib_register_client(&srp_client
);
2980 pr_err("couldn't register IB client\n");
2981 srp_release_transport(ib_srp_transport_template
);
2982 ib_sa_unregister_client(&srp_sa_client
);
2983 class_unregister(&srp_class
);
2990 static void __exit
srp_cleanup_module(void)
2992 ib_unregister_client(&srp_client
);
2993 ib_sa_unregister_client(&srp_sa_client
);
2994 class_unregister(&srp_class
);
2995 srp_release_transport(ib_srp_transport_template
);
2998 module_init(srp_init_module
);
2999 module_exit(srp_cleanup_module
);