IB/srp: Use SRP transport layer error recovery
[deliverable/linux.git] / drivers / infiniband / ulp / srp / ib_srp.c
CommitLineData
aef9ec39
RD
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
aef9ec39
RD
31 */
32
e0bda7d8
BVA
33#define pr_fmt(fmt) PFX fmt
34
aef9ec39
RD
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
de25968c 42#include <linux/jiffies.h>
aef9ec39 43
60063497 44#include <linux/atomic.h>
aef9ec39
RD
45
46#include <scsi/scsi.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_dbg.h>
49#include <scsi/srp.h>
3236822b 50#include <scsi/scsi_transport_srp.h>
aef9ec39 51
aef9ec39
RD
52#include "ib_srp.h"
53
54#define DRV_NAME "ib_srp"
55#define PFX DRV_NAME ": "
e8ca4135
VP
56#define DRV_VERSION "1.0"
57#define DRV_RELDATE "July 1, 2013"
aef9ec39
RD
58
59MODULE_AUTHOR("Roland Dreier");
60MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
61 "v" DRV_VERSION " (" DRV_RELDATE ")");
62MODULE_LICENSE("Dual BSD/GPL");
63
49248644
DD
64static unsigned int srp_sg_tablesize;
65static unsigned int cmd_sg_entries;
c07d424d
DD
66static unsigned int indirect_sg_entries;
67static bool allow_ext_sg;
49248644 68static int topspin_workarounds = 1;
74b0a15b 69
49248644
DD
70module_param(srp_sg_tablesize, uint, 0444);
71MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
74b0a15b 72
49248644
DD
73module_param(cmd_sg_entries, uint, 0444);
74MODULE_PARM_DESC(cmd_sg_entries,
75 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
aef9ec39 76
c07d424d
DD
77module_param(indirect_sg_entries, uint, 0444);
78MODULE_PARM_DESC(indirect_sg_entries,
79 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
80
81module_param(allow_ext_sg, bool, 0444);
82MODULE_PARM_DESC(allow_ext_sg,
83 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
84
aef9ec39
RD
85module_param(topspin_workarounds, int, 0444);
86MODULE_PARM_DESC(topspin_workarounds,
87 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
88
ed9b2264
BVA
89static struct kernel_param_ops srp_tmo_ops;
90
91static int srp_fast_io_fail_tmo = 15;
92module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
93 S_IRUGO | S_IWUSR);
94MODULE_PARM_DESC(fast_io_fail_tmo,
95 "Number of seconds between the observation of a transport"
96 " layer error and failing all I/O. \"off\" means that this"
97 " functionality is disabled.");
98
99static int srp_dev_loss_tmo = 60;
100module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
101 S_IRUGO | S_IWUSR);
102MODULE_PARM_DESC(dev_loss_tmo,
103 "Maximum number of seconds that the SRP transport should"
104 " insulate transport layer errors. After this time has been"
105 " exceeded the SCSI host is removed. Should be"
106 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
107 " if fast_io_fail_tmo has not been set. \"off\" means that"
108 " this functionality is disabled.");
109
aef9ec39
RD
110static void srp_add_one(struct ib_device *device);
111static void srp_remove_one(struct ib_device *device);
9c03dc9f
BVA
112static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
113static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
aef9ec39
RD
114static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
115
3236822b
FT
116static struct scsi_transport_template *ib_srp_transport_template;
117
aef9ec39
RD
118static struct ib_client srp_client = {
119 .name = "srp",
120 .add = srp_add_one,
121 .remove = srp_remove_one
122};
123
c1a0b23b
MT
124static struct ib_sa_client srp_sa_client;
125
ed9b2264
BVA
126static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
127{
128 int tmo = *(int *)kp->arg;
129
130 if (tmo >= 0)
131 return sprintf(buffer, "%d", tmo);
132 else
133 return sprintf(buffer, "off");
134}
135
136static int srp_tmo_set(const char *val, const struct kernel_param *kp)
137{
138 int tmo, res;
139
140 if (strncmp(val, "off", 3) != 0) {
141 res = kstrtoint(val, 0, &tmo);
142 if (res)
143 goto out;
144 } else {
145 tmo = -1;
146 }
147 if (kp->arg == &srp_fast_io_fail_tmo)
148 res = srp_tmo_valid(tmo, srp_dev_loss_tmo);
149 else
150 res = srp_tmo_valid(srp_fast_io_fail_tmo, tmo);
151 if (res)
152 goto out;
153 *(int *)kp->arg = tmo;
154
155out:
156 return res;
157}
158
159static struct kernel_param_ops srp_tmo_ops = {
160 .get = srp_tmo_get,
161 .set = srp_tmo_set,
162};
163
aef9ec39
RD
164static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
165{
166 return (struct srp_target_port *) host->hostdata;
167}
168
169static const char *srp_target_info(struct Scsi_Host *host)
170{
171 return host_to_target(host)->target_name;
172}
173
5d7cbfd6
RD
174static int srp_target_is_topspin(struct srp_target_port *target)
175{
176 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
3d1ff48d 177 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
5d7cbfd6
RD
178
179 return topspin_workarounds &&
3d1ff48d
RK
180 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
181 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
5d7cbfd6
RD
182}
183
aef9ec39
RD
184static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
185 gfp_t gfp_mask,
186 enum dma_data_direction direction)
187{
188 struct srp_iu *iu;
189
190 iu = kmalloc(sizeof *iu, gfp_mask);
191 if (!iu)
192 goto out;
193
194 iu->buf = kzalloc(size, gfp_mask);
195 if (!iu->buf)
196 goto out_free_iu;
197
05321937
GKH
198 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
199 direction);
200 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
aef9ec39
RD
201 goto out_free_buf;
202
203 iu->size = size;
204 iu->direction = direction;
205
206 return iu;
207
208out_free_buf:
209 kfree(iu->buf);
210out_free_iu:
211 kfree(iu);
212out:
213 return NULL;
214}
215
216static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
217{
218 if (!iu)
219 return;
220
05321937
GKH
221 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
222 iu->direction);
aef9ec39
RD
223 kfree(iu->buf);
224 kfree(iu);
225}
226
227static void srp_qp_event(struct ib_event *event, void *context)
228{
e0bda7d8 229 pr_debug("QP event %d\n", event->event);
aef9ec39
RD
230}
231
232static int srp_init_qp(struct srp_target_port *target,
233 struct ib_qp *qp)
234{
235 struct ib_qp_attr *attr;
236 int ret;
237
238 attr = kmalloc(sizeof *attr, GFP_KERNEL);
239 if (!attr)
240 return -ENOMEM;
241
969a60f9
RD
242 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
243 target->srp_host->port,
244 be16_to_cpu(target->path.pkey),
245 &attr->pkey_index);
aef9ec39
RD
246 if (ret)
247 goto out;
248
249 attr->qp_state = IB_QPS_INIT;
250 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
251 IB_ACCESS_REMOTE_WRITE);
252 attr->port_num = target->srp_host->port;
253
254 ret = ib_modify_qp(qp, attr,
255 IB_QP_STATE |
256 IB_QP_PKEY_INDEX |
257 IB_QP_ACCESS_FLAGS |
258 IB_QP_PORT);
259
260out:
261 kfree(attr);
262 return ret;
263}
264
9fe4bcf4
DD
265static int srp_new_cm_id(struct srp_target_port *target)
266{
267 struct ib_cm_id *new_cm_id;
268
05321937 269 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
9fe4bcf4
DD
270 srp_cm_handler, target);
271 if (IS_ERR(new_cm_id))
272 return PTR_ERR(new_cm_id);
273
274 if (target->cm_id)
275 ib_destroy_cm_id(target->cm_id);
276 target->cm_id = new_cm_id;
277
278 return 0;
279}
280
aef9ec39
RD
281static int srp_create_target_ib(struct srp_target_port *target)
282{
283 struct ib_qp_init_attr *init_attr;
73aa89ed
IR
284 struct ib_cq *recv_cq, *send_cq;
285 struct ib_qp *qp;
aef9ec39
RD
286 int ret;
287
288 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
289 if (!init_attr)
290 return -ENOMEM;
291
73aa89ed 292 recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
4b5e5f41
BVA
293 srp_recv_completion, NULL, target, SRP_RQ_SIZE,
294 target->comp_vector);
73aa89ed
IR
295 if (IS_ERR(recv_cq)) {
296 ret = PTR_ERR(recv_cq);
da9d2f07 297 goto err;
aef9ec39
RD
298 }
299
73aa89ed 300 send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
4b5e5f41
BVA
301 srp_send_completion, NULL, target, SRP_SQ_SIZE,
302 target->comp_vector);
73aa89ed
IR
303 if (IS_ERR(send_cq)) {
304 ret = PTR_ERR(send_cq);
da9d2f07 305 goto err_recv_cq;
9c03dc9f
BVA
306 }
307
73aa89ed 308 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
aef9ec39
RD
309
310 init_attr->event_handler = srp_qp_event;
311 init_attr->cap.max_send_wr = SRP_SQ_SIZE;
312 init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
313 init_attr->cap.max_recv_sge = 1;
314 init_attr->cap.max_send_sge = 1;
315 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
316 init_attr->qp_type = IB_QPT_RC;
73aa89ed
IR
317 init_attr->send_cq = send_cq;
318 init_attr->recv_cq = recv_cq;
aef9ec39 319
73aa89ed
IR
320 qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
321 if (IS_ERR(qp)) {
322 ret = PTR_ERR(qp);
da9d2f07 323 goto err_send_cq;
aef9ec39
RD
324 }
325
73aa89ed 326 ret = srp_init_qp(target, qp);
da9d2f07
RD
327 if (ret)
328 goto err_qp;
aef9ec39 329
73aa89ed
IR
330 if (target->qp)
331 ib_destroy_qp(target->qp);
332 if (target->recv_cq)
333 ib_destroy_cq(target->recv_cq);
334 if (target->send_cq)
335 ib_destroy_cq(target->send_cq);
336
337 target->qp = qp;
338 target->recv_cq = recv_cq;
339 target->send_cq = send_cq;
340
da9d2f07
RD
341 kfree(init_attr);
342 return 0;
343
344err_qp:
73aa89ed 345 ib_destroy_qp(qp);
da9d2f07
RD
346
347err_send_cq:
73aa89ed 348 ib_destroy_cq(send_cq);
da9d2f07
RD
349
350err_recv_cq:
73aa89ed 351 ib_destroy_cq(recv_cq);
da9d2f07
RD
352
353err:
aef9ec39
RD
354 kfree(init_attr);
355 return ret;
356}
357
358static void srp_free_target_ib(struct srp_target_port *target)
359{
360 int i;
361
362 ib_destroy_qp(target->qp);
9c03dc9f
BVA
363 ib_destroy_cq(target->send_cq);
364 ib_destroy_cq(target->recv_cq);
aef9ec39 365
73aa89ed
IR
366 target->qp = NULL;
367 target->send_cq = target->recv_cq = NULL;
368
aef9ec39
RD
369 for (i = 0; i < SRP_RQ_SIZE; ++i)
370 srp_free_iu(target->srp_host, target->rx_ring[i]);
dd5e6e38 371 for (i = 0; i < SRP_SQ_SIZE; ++i)
aef9ec39
RD
372 srp_free_iu(target->srp_host, target->tx_ring[i]);
373}
374
375static void srp_path_rec_completion(int status,
376 struct ib_sa_path_rec *pathrec,
377 void *target_ptr)
378{
379 struct srp_target_port *target = target_ptr;
380
381 target->status = status;
382 if (status)
7aa54bd7
DD
383 shost_printk(KERN_ERR, target->scsi_host,
384 PFX "Got failed path rec status %d\n", status);
aef9ec39
RD
385 else
386 target->path = *pathrec;
387 complete(&target->done);
388}
389
390static int srp_lookup_path(struct srp_target_port *target)
391{
392 target->path.numb_path = 1;
393
394 init_completion(&target->done);
395
c1a0b23b 396 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
05321937 397 target->srp_host->srp_dev->dev,
aef9ec39
RD
398 target->srp_host->port,
399 &target->path,
247e020e 400 IB_SA_PATH_REC_SERVICE_ID |
aef9ec39
RD
401 IB_SA_PATH_REC_DGID |
402 IB_SA_PATH_REC_SGID |
403 IB_SA_PATH_REC_NUMB_PATH |
404 IB_SA_PATH_REC_PKEY,
405 SRP_PATH_REC_TIMEOUT_MS,
406 GFP_KERNEL,
407 srp_path_rec_completion,
408 target, &target->path_query);
409 if (target->path_query_id < 0)
410 return target->path_query_id;
411
412 wait_for_completion(&target->done);
413
414 if (target->status < 0)
7aa54bd7
DD
415 shost_printk(KERN_WARNING, target->scsi_host,
416 PFX "Path record query failed\n");
aef9ec39
RD
417
418 return target->status;
419}
420
421static int srp_send_req(struct srp_target_port *target)
422{
423 struct {
424 struct ib_cm_req_param param;
425 struct srp_login_req priv;
426 } *req = NULL;
427 int status;
428
429 req = kzalloc(sizeof *req, GFP_KERNEL);
430 if (!req)
431 return -ENOMEM;
432
433 req->param.primary_path = &target->path;
434 req->param.alternate_path = NULL;
435 req->param.service_id = target->service_id;
436 req->param.qp_num = target->qp->qp_num;
437 req->param.qp_type = target->qp->qp_type;
438 req->param.private_data = &req->priv;
439 req->param.private_data_len = sizeof req->priv;
440 req->param.flow_control = 1;
441
442 get_random_bytes(&req->param.starting_psn, 4);
443 req->param.starting_psn &= 0xffffff;
444
445 /*
446 * Pick some arbitrary defaults here; we could make these
447 * module parameters if anyone cared about setting them.
448 */
449 req->param.responder_resources = 4;
450 req->param.remote_cm_response_timeout = 20;
451 req->param.local_cm_response_timeout = 20;
7bb312e4 452 req->param.retry_count = target->tl_retry_count;
aef9ec39
RD
453 req->param.rnr_retry_count = 7;
454 req->param.max_cm_retries = 15;
455
456 req->priv.opcode = SRP_LOGIN_REQ;
457 req->priv.tag = 0;
49248644 458 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
aef9ec39
RD
459 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
460 SRP_BUF_FORMAT_INDIRECT);
0c0450db 461 /*
3cd96564 462 * In the published SRP specification (draft rev. 16a), the
0c0450db
R
463 * port identifier format is 8 bytes of ID extension followed
464 * by 8 bytes of GUID. Older drafts put the two halves in the
465 * opposite order, so that the GUID comes first.
466 *
467 * Targets conforming to these obsolete drafts can be
468 * recognized by the I/O Class they report.
469 */
470 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
471 memcpy(req->priv.initiator_port_id,
01cb9bcb 472 &target->path.sgid.global.interface_id, 8);
0c0450db 473 memcpy(req->priv.initiator_port_id + 8,
01cb9bcb 474 &target->initiator_ext, 8);
0c0450db
R
475 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
476 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
477 } else {
478 memcpy(req->priv.initiator_port_id,
01cb9bcb
IR
479 &target->initiator_ext, 8);
480 memcpy(req->priv.initiator_port_id + 8,
481 &target->path.sgid.global.interface_id, 8);
0c0450db
R
482 memcpy(req->priv.target_port_id, &target->id_ext, 8);
483 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
484 }
485
aef9ec39
RD
486 /*
487 * Topspin/Cisco SRP targets will reject our login unless we
01cb9bcb
IR
488 * zero out the first 8 bytes of our initiator port ID and set
489 * the second 8 bytes to the local node GUID.
aef9ec39 490 */
5d7cbfd6 491 if (srp_target_is_topspin(target)) {
7aa54bd7
DD
492 shost_printk(KERN_DEBUG, target->scsi_host,
493 PFX "Topspin/Cisco initiator port ID workaround "
494 "activated for target GUID %016llx\n",
495 (unsigned long long) be64_to_cpu(target->ioc_guid));
aef9ec39 496 memset(req->priv.initiator_port_id, 0, 8);
01cb9bcb 497 memcpy(req->priv.initiator_port_id + 8,
05321937 498 &target->srp_host->srp_dev->dev->node_guid, 8);
aef9ec39 499 }
aef9ec39
RD
500
501 status = ib_send_cm_req(target->cm_id, &req->param);
502
503 kfree(req);
504
505 return status;
506}
507
ef6c49d8
BVA
508static bool srp_queue_remove_work(struct srp_target_port *target)
509{
510 bool changed = false;
511
512 spin_lock_irq(&target->lock);
513 if (target->state != SRP_TARGET_REMOVED) {
514 target->state = SRP_TARGET_REMOVED;
515 changed = true;
516 }
517 spin_unlock_irq(&target->lock);
518
519 if (changed)
520 queue_work(system_long_wq, &target->remove_work);
521
522 return changed;
523}
524
294c875a
BVA
525static bool srp_change_conn_state(struct srp_target_port *target,
526 bool connected)
527{
528 bool changed = false;
529
530 spin_lock_irq(&target->lock);
531 if (target->connected != connected) {
532 target->connected = connected;
533 changed = true;
534 }
535 spin_unlock_irq(&target->lock);
536
537 return changed;
538}
539
aef9ec39
RD
540static void srp_disconnect_target(struct srp_target_port *target)
541{
294c875a
BVA
542 if (srp_change_conn_state(target, false)) {
543 /* XXX should send SRP_I_LOGOUT request */
aef9ec39 544
294c875a
BVA
545 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
546 shost_printk(KERN_DEBUG, target->scsi_host,
547 PFX "Sending CM DREQ failed\n");
294c875a 548 }
e6581056 549 }
aef9ec39
RD
550}
551
8f26c9ff
DD
552static void srp_free_req_data(struct srp_target_port *target)
553{
c07d424d 554 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
8f26c9ff
DD
555 struct srp_request *req;
556 int i;
557
558 for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
559 kfree(req->fmr_list);
560 kfree(req->map_page);
c07d424d
DD
561 if (req->indirect_dma_addr) {
562 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
563 target->indirect_size,
564 DMA_TO_DEVICE);
565 }
566 kfree(req->indirect_desc);
8f26c9ff
DD
567 }
568}
569
683b159a
BVA
570/**
571 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
572 * @shost: SCSI host whose attributes to remove from sysfs.
573 *
574 * Note: Any attributes defined in the host template and that did not exist
575 * before invocation of this function will be ignored.
576 */
577static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
578{
579 struct device_attribute **attr;
580
581 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
582 device_remove_file(&shost->shost_dev, *attr);
583}
584
ee12d6a8
BVA
585static void srp_remove_target(struct srp_target_port *target)
586{
ef6c49d8
BVA
587 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
588
ee12d6a8 589 srp_del_scsi_host_attr(target->scsi_host);
9dd69a60 590 srp_rport_get(target->rport);
ee12d6a8
BVA
591 srp_remove_host(target->scsi_host);
592 scsi_remove_host(target->scsi_host);
ef6c49d8 593 srp_disconnect_target(target);
ee12d6a8
BVA
594 ib_destroy_cm_id(target->cm_id);
595 srp_free_target_ib(target);
9dd69a60 596 srp_rport_put(target->rport);
ee12d6a8
BVA
597 srp_free_req_data(target);
598 scsi_host_put(target->scsi_host);
599}
600
c4028958 601static void srp_remove_work(struct work_struct *work)
aef9ec39 602{
c4028958 603 struct srp_target_port *target =
ef6c49d8 604 container_of(work, struct srp_target_port, remove_work);
aef9ec39 605
ef6c49d8 606 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
aef9ec39 607
96fc248a
BVA
608 srp_remove_target(target);
609
b3589fd4 610 spin_lock(&target->srp_host->target_lock);
aef9ec39 611 list_del(&target->list);
b3589fd4 612 spin_unlock(&target->srp_host->target_lock);
aef9ec39
RD
613}
614
dc1bdbd9
BVA
615static void srp_rport_delete(struct srp_rport *rport)
616{
617 struct srp_target_port *target = rport->lld_data;
618
619 srp_queue_remove_work(target);
620}
621
aef9ec39
RD
622static int srp_connect_target(struct srp_target_port *target)
623{
9fe4bcf4 624 int retries = 3;
aef9ec39
RD
625 int ret;
626
294c875a
BVA
627 WARN_ON_ONCE(target->connected);
628
948d1e88
BVA
629 target->qp_in_error = false;
630
aef9ec39
RD
631 ret = srp_lookup_path(target);
632 if (ret)
633 return ret;
634
635 while (1) {
636 init_completion(&target->done);
637 ret = srp_send_req(target);
638 if (ret)
639 return ret;
640 wait_for_completion(&target->done);
641
642 /*
643 * The CM event handling code will set status to
644 * SRP_PORT_REDIRECT if we get a port redirect REJ
645 * back, or SRP_DLID_REDIRECT if we get a lid/qp
646 * redirect REJ back.
647 */
648 switch (target->status) {
649 case 0:
294c875a 650 srp_change_conn_state(target, true);
aef9ec39
RD
651 return 0;
652
653 case SRP_PORT_REDIRECT:
654 ret = srp_lookup_path(target);
655 if (ret)
656 return ret;
657 break;
658
659 case SRP_DLID_REDIRECT:
660 break;
661
9fe4bcf4
DD
662 case SRP_STALE_CONN:
663 /* Our current CM id was stale, and is now in timewait.
664 * Try to reconnect with a new one.
665 */
666 if (!retries-- || srp_new_cm_id(target)) {
667 shost_printk(KERN_ERR, target->scsi_host, PFX
668 "giving up on stale connection\n");
669 target->status = -ECONNRESET;
670 return target->status;
671 }
672
673 shost_printk(KERN_ERR, target->scsi_host, PFX
674 "retrying stale connection\n");
675 break;
676
aef9ec39
RD
677 default:
678 return target->status;
679 }
680 }
681}
682
d945e1df
RD
683static void srp_unmap_data(struct scsi_cmnd *scmnd,
684 struct srp_target_port *target,
685 struct srp_request *req)
686{
8f26c9ff
DD
687 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
688 struct ib_pool_fmr **pfmr;
689
bb350d1d 690 if (!scsi_sglist(scmnd) ||
d945e1df
RD
691 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
692 scmnd->sc_data_direction != DMA_FROM_DEVICE))
693 return;
694
8f26c9ff
DD
695 pfmr = req->fmr_list;
696 while (req->nfmr--)
697 ib_fmr_pool_unmap(*pfmr++);
f5358a17 698
8f26c9ff
DD
699 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
700 scmnd->sc_data_direction);
d945e1df
RD
701}
702
22032991
BVA
703/**
704 * srp_claim_req - Take ownership of the scmnd associated with a request.
705 * @target: SRP target port.
706 * @req: SRP request.
707 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
708 * ownership of @req->scmnd if it equals @scmnd.
709 *
710 * Return value:
711 * Either NULL or a pointer to the SCSI command the caller became owner of.
712 */
713static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
714 struct srp_request *req,
715 struct scsi_cmnd *scmnd)
716{
717 unsigned long flags;
718
719 spin_lock_irqsave(&target->lock, flags);
720 if (!scmnd) {
721 scmnd = req->scmnd;
722 req->scmnd = NULL;
723 } else if (req->scmnd == scmnd) {
724 req->scmnd = NULL;
725 } else {
726 scmnd = NULL;
727 }
728 spin_unlock_irqrestore(&target->lock, flags);
729
730 return scmnd;
731}
732
733/**
734 * srp_free_req() - Unmap data and add request to the free request list.
735 */
736static void srp_free_req(struct srp_target_port *target,
737 struct srp_request *req, struct scsi_cmnd *scmnd,
738 s32 req_lim_delta)
526b4caa 739{
94a9174c
BVA
740 unsigned long flags;
741
22032991
BVA
742 srp_unmap_data(scmnd, target, req);
743
e9684678 744 spin_lock_irqsave(&target->lock, flags);
94a9174c 745 target->req_lim += req_lim_delta;
536ae14e 746 list_add_tail(&req->list, &target->free_reqs);
e9684678 747 spin_unlock_irqrestore(&target->lock, flags);
526b4caa
IR
748}
749
ed9b2264
BVA
750static void srp_finish_req(struct srp_target_port *target,
751 struct srp_request *req, int result)
526b4caa 752{
22032991
BVA
753 struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
754
755 if (scmnd) {
9b796d06 756 srp_free_req(target, req, scmnd, 0);
ed9b2264 757 scmnd->result = result;
22032991 758 scmnd->scsi_done(scmnd);
22032991 759 }
526b4caa
IR
760}
761
ed9b2264 762static void srp_terminate_io(struct srp_rport *rport)
aef9ec39 763{
ed9b2264
BVA
764 struct srp_target_port *target = rport->lld_data;
765 int i;
766
767 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
768 struct srp_request *req = &target->req_ring[i];
769 srp_finish_req(target, req, DID_TRANSPORT_FAILFAST << 16);
770 }
771}
aef9ec39 772
ed9b2264
BVA
773/*
774 * It is up to the caller to ensure that srp_rport_reconnect() calls are
775 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
776 * srp_reset_device() or srp_reset_host() calls will occur while this function
777 * is in progress. One way to realize that is not to call this function
778 * directly but to call srp_reconnect_rport() instead since that last function
779 * serializes calls of this function via rport->mutex and also blocks
780 * srp_queuecommand() calls before invoking this function.
781 */
782static int srp_rport_reconnect(struct srp_rport *rport)
783{
784 struct srp_target_port *target = rport->lld_data;
785 int i, ret;
09be70a2 786
aef9ec39
RD
787 srp_disconnect_target(target);
788 /*
c7c4e7ff
BVA
789 * Now get a new local CM ID so that we avoid confusing the target in
790 * case things are really fouled up. Doing so also ensures that all CM
791 * callbacks will have finished before a new QP is allocated.
aef9ec39 792 */
9fe4bcf4 793 ret = srp_new_cm_id(target);
c7c4e7ff
BVA
794 /*
795 * Whether or not creating a new CM ID succeeded, create a new
796 * QP. This guarantees that all completion callback function
797 * invocations have finished before request resetting starts.
798 */
799 if (ret == 0)
800 ret = srp_create_target_ib(target);
801 else
802 srp_create_target_ib(target);
aef9ec39 803
536ae14e
BVA
804 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
805 struct srp_request *req = &target->req_ring[i];
ed9b2264 806 srp_finish_req(target, req, DID_RESET << 16);
536ae14e 807 }
aef9ec39 808
536ae14e 809 INIT_LIST_HEAD(&target->free_tx);
dcb4cb85 810 for (i = 0; i < SRP_SQ_SIZE; ++i)
536ae14e 811 list_add(&target->tx_ring[i]->list, &target->free_tx);
aef9ec39 812
c7c4e7ff
BVA
813 if (ret == 0)
814 ret = srp_connect_target(target);
09be70a2 815
ed9b2264
BVA
816 if (ret == 0)
817 shost_printk(KERN_INFO, target->scsi_host,
818 PFX "reconnect succeeded\n");
aef9ec39
RD
819
820 return ret;
821}
822
8f26c9ff
DD
823static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
824 unsigned int dma_len, u32 rkey)
f5358a17 825{
8f26c9ff 826 struct srp_direct_buf *desc = state->desc;
f5358a17 827
8f26c9ff
DD
828 desc->va = cpu_to_be64(dma_addr);
829 desc->key = cpu_to_be32(rkey);
830 desc->len = cpu_to_be32(dma_len);
f5358a17 831
8f26c9ff
DD
832 state->total_len += dma_len;
833 state->desc++;
834 state->ndesc++;
835}
559ce8f1 836
8f26c9ff
DD
837static int srp_map_finish_fmr(struct srp_map_state *state,
838 struct srp_target_port *target)
839{
840 struct srp_device *dev = target->srp_host->srp_dev;
841 struct ib_pool_fmr *fmr;
842 u64 io_addr = 0;
85507bcc 843
8f26c9ff
DD
844 if (!state->npages)
845 return 0;
f5358a17 846
8f26c9ff
DD
847 if (state->npages == 1) {
848 srp_map_desc(state, state->base_dma_addr, state->fmr_len,
849 target->rkey);
850 state->npages = state->fmr_len = 0;
851 return 0;
f5358a17
RD
852 }
853
8f26c9ff
DD
854 fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
855 state->npages, io_addr);
856 if (IS_ERR(fmr))
857 return PTR_ERR(fmr);
f5358a17 858
8f26c9ff
DD
859 *state->next_fmr++ = fmr;
860 state->nfmr++;
f5358a17 861
8f26c9ff
DD
862 srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
863 state->npages = state->fmr_len = 0;
864 return 0;
865}
866
867static void srp_map_update_start(struct srp_map_state *state,
868 struct scatterlist *sg, int sg_index,
869 dma_addr_t dma_addr)
870{
871 state->unmapped_sg = sg;
872 state->unmapped_index = sg_index;
873 state->unmapped_addr = dma_addr;
874}
85507bcc 875
8f26c9ff
DD
876static int srp_map_sg_entry(struct srp_map_state *state,
877 struct srp_target_port *target,
878 struct scatterlist *sg, int sg_index,
879 int use_fmr)
880{
881 struct srp_device *dev = target->srp_host->srp_dev;
882 struct ib_device *ibdev = dev->dev;
883 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
884 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
885 unsigned int len;
886 int ret;
887
888 if (!dma_len)
889 return 0;
890
891 if (use_fmr == SRP_MAP_NO_FMR) {
892 /* Once we're in direct map mode for a request, we don't
893 * go back to FMR mode, so no need to update anything
894 * other than the descriptor.
895 */
896 srp_map_desc(state, dma_addr, dma_len, target->rkey);
897 return 0;
85507bcc 898 }
f5358a17 899
8f26c9ff
DD
900 /* If we start at an offset into the FMR page, don't merge into
901 * the current FMR. Finish it out, and use the kernel's MR for this
902 * sg entry. This is to avoid potential bugs on some SRP targets
903 * that were never quite defined, but went away when the initiator
904 * avoided using FMR on such page fragments.
905 */
906 if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
907 ret = srp_map_finish_fmr(state, target);
908 if (ret)
909 return ret;
910
911 srp_map_desc(state, dma_addr, dma_len, target->rkey);
912 srp_map_update_start(state, NULL, 0, 0);
913 return 0;
f5358a17
RD
914 }
915
8f26c9ff
DD
916 /* If this is the first sg to go into the FMR, save our position.
917 * We need to know the first unmapped entry, its index, and the
918 * first unmapped address within that entry to be able to restart
919 * mapping after an error.
920 */
921 if (!state->unmapped_sg)
922 srp_map_update_start(state, sg, sg_index, dma_addr);
f5358a17 923
8f26c9ff
DD
924 while (dma_len) {
925 if (state->npages == SRP_FMR_SIZE) {
926 ret = srp_map_finish_fmr(state, target);
927 if (ret)
928 return ret;
f5358a17 929
8f26c9ff
DD
930 srp_map_update_start(state, sg, sg_index, dma_addr);
931 }
932
933 len = min_t(unsigned int, dma_len, dev->fmr_page_size);
f5358a17 934
8f26c9ff
DD
935 if (!state->npages)
936 state->base_dma_addr = dma_addr;
937 state->pages[state->npages++] = dma_addr;
938 state->fmr_len += len;
939 dma_addr += len;
940 dma_len -= len;
941 }
942
943 /* If the last entry of the FMR wasn't a full page, then we need to
944 * close it out and start a new one -- we can only merge at page
945 * boundries.
946 */
947 ret = 0;
948 if (len != dev->fmr_page_size) {
949 ret = srp_map_finish_fmr(state, target);
950 if (!ret)
951 srp_map_update_start(state, NULL, 0, 0);
952 }
f5358a17
RD
953 return ret;
954}
955
aef9ec39
RD
956static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
957 struct srp_request *req)
958{
8f26c9ff 959 struct scatterlist *scat, *sg;
aef9ec39 960 struct srp_cmd *cmd = req->cmd->buf;
8f26c9ff 961 int i, len, nents, count, use_fmr;
85507bcc
RC
962 struct srp_device *dev;
963 struct ib_device *ibdev;
8f26c9ff
DD
964 struct srp_map_state state;
965 struct srp_indirect_buf *indirect_hdr;
8f26c9ff
DD
966 u32 table_len;
967 u8 fmt;
aef9ec39 968
bb350d1d 969 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
aef9ec39
RD
970 return sizeof (struct srp_cmd);
971
972 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
973 scmnd->sc_data_direction != DMA_TO_DEVICE) {
7aa54bd7
DD
974 shost_printk(KERN_WARNING, target->scsi_host,
975 PFX "Unhandled data direction %d\n",
976 scmnd->sc_data_direction);
aef9ec39
RD
977 return -EINVAL;
978 }
979
bb350d1d
FT
980 nents = scsi_sg_count(scmnd);
981 scat = scsi_sglist(scmnd);
aef9ec39 982
05321937 983 dev = target->srp_host->srp_dev;
85507bcc
RC
984 ibdev = dev->dev;
985
986 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
8f26c9ff
DD
987 if (unlikely(count == 0))
988 return -EIO;
f5358a17
RD
989
990 fmt = SRP_DATA_DESC_DIRECT;
991 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
aef9ec39 992
cf368713 993 if (count == 1) {
f5358a17
RD
994 /*
995 * The midlayer only generated a single gather/scatter
996 * entry, or DMA mapping coalesced everything to a
997 * single entry. So a direct descriptor along with
998 * the DMA MR suffices.
999 */
cf368713 1000 struct srp_direct_buf *buf = (void *) cmd->add_data;
aef9ec39 1001
85507bcc 1002 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
9af76271 1003 buf->key = cpu_to_be32(target->rkey);
85507bcc 1004 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
8f26c9ff
DD
1005
1006 req->nfmr = 0;
1007 goto map_complete;
1008 }
1009
1010 /* We have more than one scatter/gather entry, so build our indirect
1011 * descriptor table, trying to merge as many entries with FMR as we
1012 * can.
1013 */
1014 indirect_hdr = (void *) cmd->add_data;
1015
c07d424d
DD
1016 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1017 target->indirect_size, DMA_TO_DEVICE);
1018
8f26c9ff 1019 memset(&state, 0, sizeof(state));
c07d424d 1020 state.desc = req->indirect_desc;
8f26c9ff
DD
1021 state.pages = req->map_page;
1022 state.next_fmr = req->fmr_list;
1023
1024 use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
1025
1026 for_each_sg(scat, sg, count, i) {
1027 if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
1028 /* FMR mapping failed, so backtrack to the first
1029 * unmapped entry and continue on without using FMR.
1030 */
1031 dma_addr_t dma_addr;
1032 unsigned int dma_len;
1033
1034backtrack:
1035 sg = state.unmapped_sg;
1036 i = state.unmapped_index;
1037
1038 dma_addr = ib_sg_dma_address(ibdev, sg);
1039 dma_len = ib_sg_dma_len(ibdev, sg);
1040 dma_len -= (state.unmapped_addr - dma_addr);
1041 dma_addr = state.unmapped_addr;
1042 use_fmr = SRP_MAP_NO_FMR;
1043 srp_map_desc(&state, dma_addr, dma_len, target->rkey);
f5358a17 1044 }
8f26c9ff 1045 }
aef9ec39 1046
8f26c9ff
DD
1047 if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
1048 goto backtrack;
cf368713 1049
c07d424d
DD
1050 /* We've mapped the request, now pull as much of the indirect
1051 * descriptor table as we can into the command buffer. If this
1052 * target is not using an external indirect table, we are
1053 * guaranteed to fit into the command, as the SCSI layer won't
1054 * give us more S/G entries than we allow.
8f26c9ff
DD
1055 */
1056 req->nfmr = state.nfmr;
1057 if (state.ndesc == 1) {
1058 /* FMR mapping was able to collapse this to one entry,
1059 * so use a direct descriptor.
1060 */
1061 struct srp_direct_buf *buf = (void *) cmd->add_data;
cf368713 1062
c07d424d 1063 *buf = req->indirect_desc[0];
8f26c9ff 1064 goto map_complete;
aef9ec39
RD
1065 }
1066
c07d424d
DD
1067 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1068 !target->allow_ext_sg)) {
1069 shost_printk(KERN_ERR, target->scsi_host,
1070 "Could not fit S/G list into SRP_CMD\n");
1071 return -EIO;
1072 }
1073
1074 count = min(state.ndesc, target->cmd_sg_cnt);
8f26c9ff
DD
1075 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1076
1077 fmt = SRP_DATA_DESC_INDIRECT;
1078 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
c07d424d 1079 len += count * sizeof (struct srp_direct_buf);
8f26c9ff 1080
c07d424d
DD
1081 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1082 count * sizeof (struct srp_direct_buf));
8f26c9ff 1083
c07d424d 1084 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
8f26c9ff
DD
1085 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1086 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1087 indirect_hdr->len = cpu_to_be32(state.total_len);
1088
1089 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
c07d424d 1090 cmd->data_out_desc_cnt = count;
8f26c9ff 1091 else
c07d424d
DD
1092 cmd->data_in_desc_cnt = count;
1093
1094 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1095 DMA_TO_DEVICE);
8f26c9ff
DD
1096
1097map_complete:
aef9ec39
RD
1098 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1099 cmd->buf_fmt = fmt << 4;
1100 else
1101 cmd->buf_fmt = fmt;
1102
aef9ec39
RD
1103 return len;
1104}
1105
76c75b25
BVA
1106/*
1107 * Return an IU and possible credit to the free pool
1108 */
1109static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
1110 enum srp_iu_type iu_type)
1111{
1112 unsigned long flags;
1113
e9684678 1114 spin_lock_irqsave(&target->lock, flags);
76c75b25
BVA
1115 list_add(&iu->list, &target->free_tx);
1116 if (iu_type != SRP_IU_RSP)
1117 ++target->req_lim;
e9684678 1118 spin_unlock_irqrestore(&target->lock, flags);
76c75b25
BVA
1119}
1120
05a1d750 1121/*
e9684678
BVA
1122 * Must be called with target->lock held to protect req_lim and free_tx.
1123 * If IU is not sent, it must be returned using srp_put_tx_iu().
05a1d750
DD
1124 *
1125 * Note:
1126 * An upper limit for the number of allocated information units for each
1127 * request type is:
1128 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1129 * more than Scsi_Host.can_queue requests.
1130 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1131 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1132 * one unanswered SRP request to an initiator.
1133 */
1134static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
1135 enum srp_iu_type iu_type)
1136{
1137 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1138 struct srp_iu *iu;
1139
1140 srp_send_completion(target->send_cq, target);
1141
dcb4cb85 1142 if (list_empty(&target->free_tx))
05a1d750
DD
1143 return NULL;
1144
1145 /* Initiator responses to target requests do not consume credits */
76c75b25
BVA
1146 if (iu_type != SRP_IU_RSP) {
1147 if (target->req_lim <= rsv) {
1148 ++target->zero_req_lim;
1149 return NULL;
1150 }
1151
1152 --target->req_lim;
05a1d750
DD
1153 }
1154
dcb4cb85 1155 iu = list_first_entry(&target->free_tx, struct srp_iu, list);
76c75b25 1156 list_del(&iu->list);
05a1d750
DD
1157 return iu;
1158}
1159
76c75b25
BVA
1160static int srp_post_send(struct srp_target_port *target,
1161 struct srp_iu *iu, int len)
05a1d750
DD
1162{
1163 struct ib_sge list;
1164 struct ib_send_wr wr, *bad_wr;
05a1d750
DD
1165
1166 list.addr = iu->dma;
1167 list.length = len;
9af76271 1168 list.lkey = target->lkey;
05a1d750
DD
1169
1170 wr.next = NULL;
dcb4cb85 1171 wr.wr_id = (uintptr_t) iu;
05a1d750
DD
1172 wr.sg_list = &list;
1173 wr.num_sge = 1;
1174 wr.opcode = IB_WR_SEND;
1175 wr.send_flags = IB_SEND_SIGNALED;
1176
76c75b25 1177 return ib_post_send(target->qp, &wr, &bad_wr);
05a1d750
DD
1178}
1179
dcb4cb85 1180static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
c996bb47 1181{
c996bb47 1182 struct ib_recv_wr wr, *bad_wr;
dcb4cb85 1183 struct ib_sge list;
c996bb47
BVA
1184
1185 list.addr = iu->dma;
1186 list.length = iu->size;
9af76271 1187 list.lkey = target->lkey;
c996bb47
BVA
1188
1189 wr.next = NULL;
dcb4cb85 1190 wr.wr_id = (uintptr_t) iu;
c996bb47
BVA
1191 wr.sg_list = &list;
1192 wr.num_sge = 1;
1193
dcb4cb85 1194 return ib_post_recv(target->qp, &wr, &bad_wr);
c996bb47
BVA
1195}
1196
aef9ec39
RD
1197static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1198{
1199 struct srp_request *req;
1200 struct scsi_cmnd *scmnd;
1201 unsigned long flags;
aef9ec39 1202
aef9ec39 1203 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
e9684678 1204 spin_lock_irqsave(&target->lock, flags);
94a9174c 1205 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
e9684678 1206 spin_unlock_irqrestore(&target->lock, flags);
94a9174c 1207
f8b6e31e
DD
1208 target->tsk_mgmt_status = -1;
1209 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1210 target->tsk_mgmt_status = rsp->data[3];
1211 complete(&target->tsk_mgmt_done);
aef9ec39 1212 } else {
f8b6e31e 1213 req = &target->req_ring[rsp->tag];
22032991
BVA
1214 scmnd = srp_claim_req(target, req, NULL);
1215 if (!scmnd) {
7aa54bd7
DD
1216 shost_printk(KERN_ERR, target->scsi_host,
1217 "Null scmnd for RSP w/tag %016llx\n",
1218 (unsigned long long) rsp->tag);
22032991
BVA
1219
1220 spin_lock_irqsave(&target->lock, flags);
1221 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1222 spin_unlock_irqrestore(&target->lock, flags);
1223
1224 return;
1225 }
aef9ec39
RD
1226 scmnd->result = rsp->status;
1227
1228 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1229 memcpy(scmnd->sense_buffer, rsp->data +
1230 be32_to_cpu(rsp->resp_data_len),
1231 min_t(int, be32_to_cpu(rsp->sense_data_len),
1232 SCSI_SENSE_BUFFERSIZE));
1233 }
1234
1235 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
bb350d1d 1236 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
aef9ec39 1237 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
bb350d1d 1238 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
aef9ec39 1239
22032991
BVA
1240 srp_free_req(target, req, scmnd,
1241 be32_to_cpu(rsp->req_lim_delta));
1242
f8b6e31e
DD
1243 scmnd->host_scribble = NULL;
1244 scmnd->scsi_done(scmnd);
aef9ec39 1245 }
aef9ec39
RD
1246}
1247
bb12588a
DD
1248static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1249 void *rsp, int len)
1250{
76c75b25 1251 struct ib_device *dev = target->srp_host->srp_dev->dev;
bb12588a
DD
1252 unsigned long flags;
1253 struct srp_iu *iu;
76c75b25 1254 int err;
bb12588a 1255
e9684678 1256 spin_lock_irqsave(&target->lock, flags);
bb12588a 1257 target->req_lim += req_delta;
bb12588a 1258 iu = __srp_get_tx_iu(target, SRP_IU_RSP);
e9684678 1259 spin_unlock_irqrestore(&target->lock, flags);
76c75b25 1260
bb12588a
DD
1261 if (!iu) {
1262 shost_printk(KERN_ERR, target->scsi_host, PFX
1263 "no IU available to send response\n");
76c75b25 1264 return 1;
bb12588a
DD
1265 }
1266
1267 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1268 memcpy(iu->buf, rsp, len);
1269 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1270
76c75b25
BVA
1271 err = srp_post_send(target, iu, len);
1272 if (err) {
bb12588a
DD
1273 shost_printk(KERN_ERR, target->scsi_host, PFX
1274 "unable to post response: %d\n", err);
76c75b25
BVA
1275 srp_put_tx_iu(target, iu, SRP_IU_RSP);
1276 }
bb12588a 1277
bb12588a
DD
1278 return err;
1279}
1280
1281static void srp_process_cred_req(struct srp_target_port *target,
1282 struct srp_cred_req *req)
1283{
1284 struct srp_cred_rsp rsp = {
1285 .opcode = SRP_CRED_RSP,
1286 .tag = req->tag,
1287 };
1288 s32 delta = be32_to_cpu(req->req_lim_delta);
1289
1290 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1291 shost_printk(KERN_ERR, target->scsi_host, PFX
1292 "problems processing SRP_CRED_REQ\n");
1293}
1294
1295static void srp_process_aer_req(struct srp_target_port *target,
1296 struct srp_aer_req *req)
1297{
1298 struct srp_aer_rsp rsp = {
1299 .opcode = SRP_AER_RSP,
1300 .tag = req->tag,
1301 };
1302 s32 delta = be32_to_cpu(req->req_lim_delta);
1303
1304 shost_printk(KERN_ERR, target->scsi_host, PFX
1305 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1306
1307 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1308 shost_printk(KERN_ERR, target->scsi_host, PFX
1309 "problems processing SRP_AER_REQ\n");
1310}
1311
aef9ec39
RD
1312static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1313{
dcb4cb85 1314 struct ib_device *dev = target->srp_host->srp_dev->dev;
737b94eb 1315 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
c996bb47 1316 int res;
aef9ec39
RD
1317 u8 opcode;
1318
85507bcc
RC
1319 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1320 DMA_FROM_DEVICE);
aef9ec39
RD
1321
1322 opcode = *(u8 *) iu->buf;
1323
1324 if (0) {
7aa54bd7
DD
1325 shost_printk(KERN_ERR, target->scsi_host,
1326 PFX "recv completion, opcode 0x%02x\n", opcode);
7a700811
BVA
1327 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1328 iu->buf, wc->byte_len, true);
aef9ec39
RD
1329 }
1330
1331 switch (opcode) {
1332 case SRP_RSP:
1333 srp_process_rsp(target, iu->buf);
1334 break;
1335
bb12588a
DD
1336 case SRP_CRED_REQ:
1337 srp_process_cred_req(target, iu->buf);
1338 break;
1339
1340 case SRP_AER_REQ:
1341 srp_process_aer_req(target, iu->buf);
1342 break;
1343
aef9ec39
RD
1344 case SRP_T_LOGOUT:
1345 /* XXX Handle target logout */
7aa54bd7
DD
1346 shost_printk(KERN_WARNING, target->scsi_host,
1347 PFX "Got target logout request\n");
aef9ec39
RD
1348 break;
1349
1350 default:
7aa54bd7
DD
1351 shost_printk(KERN_WARNING, target->scsi_host,
1352 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
aef9ec39
RD
1353 break;
1354 }
1355
85507bcc
RC
1356 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1357 DMA_FROM_DEVICE);
c996bb47 1358
dcb4cb85 1359 res = srp_post_recv(target, iu);
c996bb47
BVA
1360 if (res != 0)
1361 shost_printk(KERN_ERR, target->scsi_host,
1362 PFX "Recv failed with error code %d\n", res);
aef9ec39
RD
1363}
1364
948d1e88
BVA
1365static void srp_handle_qp_err(enum ib_wc_status wc_status,
1366 enum ib_wc_opcode wc_opcode,
1367 struct srp_target_port *target)
1368{
294c875a 1369 if (target->connected && !target->qp_in_error) {
4f0af697
BVA
1370 shost_printk(KERN_ERR, target->scsi_host,
1371 PFX "failed %s status %d\n",
1372 wc_opcode & IB_WC_RECV ? "receive" : "send",
1373 wc_status);
1374 }
948d1e88
BVA
1375 target->qp_in_error = true;
1376}
1377
9c03dc9f 1378static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
aef9ec39
RD
1379{
1380 struct srp_target_port *target = target_ptr;
1381 struct ib_wc wc;
aef9ec39
RD
1382
1383 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1384 while (ib_poll_cq(cq, 1, &wc) > 0) {
948d1e88
BVA
1385 if (likely(wc.status == IB_WC_SUCCESS)) {
1386 srp_handle_recv(target, &wc);
1387 } else {
1388 srp_handle_qp_err(wc.status, wc.opcode, target);
aef9ec39 1389 }
9c03dc9f
BVA
1390 }
1391}
1392
1393static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1394{
1395 struct srp_target_port *target = target_ptr;
1396 struct ib_wc wc;
dcb4cb85 1397 struct srp_iu *iu;
9c03dc9f
BVA
1398
1399 while (ib_poll_cq(cq, 1, &wc) > 0) {
948d1e88
BVA
1400 if (likely(wc.status == IB_WC_SUCCESS)) {
1401 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1402 list_add(&iu->list, &target->free_tx);
1403 } else {
1404 srp_handle_qp_err(wc.status, wc.opcode, target);
9c03dc9f 1405 }
aef9ec39
RD
1406 }
1407}
1408
76c75b25 1409static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
aef9ec39 1410{
76c75b25 1411 struct srp_target_port *target = host_to_target(shost);
aef9ec39
RD
1412 struct srp_request *req;
1413 struct srp_iu *iu;
1414 struct srp_cmd *cmd;
85507bcc 1415 struct ib_device *dev;
76c75b25 1416 unsigned long flags;
ed9b2264 1417 int len, result;
aef9ec39 1418
ed9b2264
BVA
1419 result = srp_chkready(target->rport);
1420 if (unlikely(result)) {
1421 scmnd->result = result;
2ce19e72
BVA
1422 scmnd->scsi_done(scmnd);
1423 return 0;
1424 }
1425
e9684678 1426 spin_lock_irqsave(&target->lock, flags);
bb12588a 1427 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
aef9ec39 1428 if (!iu)
695b8349
BVA
1429 goto err_unlock;
1430
1431 req = list_first_entry(&target->free_reqs, struct srp_request, list);
1432 list_del(&req->list);
1433 spin_unlock_irqrestore(&target->lock, flags);
aef9ec39 1434
05321937 1435 dev = target->srp_host->srp_dev->dev;
49248644 1436 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
85507bcc 1437 DMA_TO_DEVICE);
aef9ec39 1438
aef9ec39 1439 scmnd->result = 0;
f8b6e31e 1440 scmnd->host_scribble = (void *) req;
aef9ec39
RD
1441
1442 cmd = iu->buf;
1443 memset(cmd, 0, sizeof *cmd);
1444
1445 cmd->opcode = SRP_CMD;
1446 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
d945e1df 1447 cmd->tag = req->index;
aef9ec39
RD
1448 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1449
aef9ec39
RD
1450 req->scmnd = scmnd;
1451 req->cmd = iu;
aef9ec39
RD
1452
1453 len = srp_map_data(scmnd, target, req);
1454 if (len < 0) {
7aa54bd7
DD
1455 shost_printk(KERN_ERR, target->scsi_host,
1456 PFX "Failed to map data\n");
76c75b25 1457 goto err_iu;
aef9ec39
RD
1458 }
1459
49248644 1460 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
85507bcc 1461 DMA_TO_DEVICE);
aef9ec39 1462
76c75b25 1463 if (srp_post_send(target, iu, len)) {
7aa54bd7 1464 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
aef9ec39
RD
1465 goto err_unmap;
1466 }
1467
aef9ec39
RD
1468 return 0;
1469
1470err_unmap:
1471 srp_unmap_data(scmnd, target, req);
1472
76c75b25
BVA
1473err_iu:
1474 srp_put_tx_iu(target, iu, SRP_IU_CMD);
1475
e9684678 1476 spin_lock_irqsave(&target->lock, flags);
76c75b25 1477 list_add(&req->list, &target->free_reqs);
695b8349
BVA
1478
1479err_unlock:
e9684678 1480 spin_unlock_irqrestore(&target->lock, flags);
76c75b25 1481
aef9ec39
RD
1482 return SCSI_MLQUEUE_HOST_BUSY;
1483}
1484
1485static int srp_alloc_iu_bufs(struct srp_target_port *target)
1486{
1487 int i;
1488
1489 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1490 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1491 target->max_ti_iu_len,
1492 GFP_KERNEL, DMA_FROM_DEVICE);
1493 if (!target->rx_ring[i])
1494 goto err;
1495 }
1496
dd5e6e38 1497 for (i = 0; i < SRP_SQ_SIZE; ++i) {
aef9ec39 1498 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
49248644 1499 target->max_iu_len,
aef9ec39
RD
1500 GFP_KERNEL, DMA_TO_DEVICE);
1501 if (!target->tx_ring[i])
1502 goto err;
dcb4cb85
BVA
1503
1504 list_add(&target->tx_ring[i]->list, &target->free_tx);
aef9ec39
RD
1505 }
1506
1507 return 0;
1508
1509err:
1510 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1511 srp_free_iu(target->srp_host, target->rx_ring[i]);
1512 target->rx_ring[i] = NULL;
1513 }
1514
dd5e6e38 1515 for (i = 0; i < SRP_SQ_SIZE; ++i) {
aef9ec39
RD
1516 srp_free_iu(target->srp_host, target->tx_ring[i]);
1517 target->tx_ring[i] = NULL;
1518 }
1519
1520 return -ENOMEM;
1521}
1522
c9b03c1a
BVA
1523static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
1524{
1525 uint64_t T_tr_ns, max_compl_time_ms;
1526 uint32_t rq_tmo_jiffies;
1527
1528 /*
1529 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
1530 * table 91), both the QP timeout and the retry count have to be set
1531 * for RC QP's during the RTR to RTS transition.
1532 */
1533 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
1534 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
1535
1536 /*
1537 * Set target->rq_tmo_jiffies to one second more than the largest time
1538 * it can take before an error completion is generated. See also
1539 * C9-140..142 in the IBTA spec for more information about how to
1540 * convert the QP Local ACK Timeout value to nanoseconds.
1541 */
1542 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
1543 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
1544 do_div(max_compl_time_ms, NSEC_PER_MSEC);
1545 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
1546
1547 return rq_tmo_jiffies;
1548}
1549
961e0be8
DD
1550static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1551 struct srp_login_rsp *lrsp,
1552 struct srp_target_port *target)
1553{
1554 struct ib_qp_attr *qp_attr = NULL;
1555 int attr_mask = 0;
1556 int ret;
1557 int i;
1558
1559 if (lrsp->opcode == SRP_LOGIN_RSP) {
1560 target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
1561 target->req_lim = be32_to_cpu(lrsp->req_lim_delta);
1562
1563 /*
1564 * Reserve credits for task management so we don't
1565 * bounce requests back to the SCSI mid-layer.
1566 */
1567 target->scsi_host->can_queue
1568 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1569 target->scsi_host->can_queue);
1570 } else {
1571 shost_printk(KERN_WARNING, target->scsi_host,
1572 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
1573 ret = -ECONNRESET;
1574 goto error;
1575 }
1576
1577 if (!target->rx_ring[0]) {
1578 ret = srp_alloc_iu_bufs(target);
1579 if (ret)
1580 goto error;
1581 }
1582
1583 ret = -ENOMEM;
1584 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1585 if (!qp_attr)
1586 goto error;
1587
1588 qp_attr->qp_state = IB_QPS_RTR;
1589 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1590 if (ret)
1591 goto error_free;
1592
1593 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1594 if (ret)
1595 goto error_free;
1596
1597 for (i = 0; i < SRP_RQ_SIZE; i++) {
1598 struct srp_iu *iu = target->rx_ring[i];
1599 ret = srp_post_recv(target, iu);
1600 if (ret)
1601 goto error_free;
1602 }
1603
1604 qp_attr->qp_state = IB_QPS_RTS;
1605 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1606 if (ret)
1607 goto error_free;
1608
c9b03c1a
BVA
1609 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
1610
961e0be8
DD
1611 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1612 if (ret)
1613 goto error_free;
1614
1615 ret = ib_send_cm_rtu(cm_id, NULL, 0);
1616
1617error_free:
1618 kfree(qp_attr);
1619
1620error:
1621 target->status = ret;
1622}
1623
aef9ec39
RD
1624static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1625 struct ib_cm_event *event,
1626 struct srp_target_port *target)
1627{
7aa54bd7 1628 struct Scsi_Host *shost = target->scsi_host;
aef9ec39
RD
1629 struct ib_class_port_info *cpi;
1630 int opcode;
1631
1632 switch (event->param.rej_rcvd.reason) {
1633 case IB_CM_REJ_PORT_CM_REDIRECT:
1634 cpi = event->param.rej_rcvd.ari;
1635 target->path.dlid = cpi->redirect_lid;
1636 target->path.pkey = cpi->redirect_pkey;
1637 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1638 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1639
1640 target->status = target->path.dlid ?
1641 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1642 break;
1643
1644 case IB_CM_REJ_PORT_REDIRECT:
5d7cbfd6 1645 if (srp_target_is_topspin(target)) {
aef9ec39
RD
1646 /*
1647 * Topspin/Cisco SRP gateways incorrectly send
1648 * reject reason code 25 when they mean 24
1649 * (port redirect).
1650 */
1651 memcpy(target->path.dgid.raw,
1652 event->param.rej_rcvd.ari, 16);
1653
7aa54bd7
DD
1654 shost_printk(KERN_DEBUG, shost,
1655 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1656 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1657 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
aef9ec39
RD
1658
1659 target->status = SRP_PORT_REDIRECT;
1660 } else {
7aa54bd7
DD
1661 shost_printk(KERN_WARNING, shost,
1662 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
aef9ec39
RD
1663 target->status = -ECONNRESET;
1664 }
1665 break;
1666
1667 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
7aa54bd7
DD
1668 shost_printk(KERN_WARNING, shost,
1669 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
aef9ec39
RD
1670 target->status = -ECONNRESET;
1671 break;
1672
1673 case IB_CM_REJ_CONSUMER_DEFINED:
1674 opcode = *(u8 *) event->private_data;
1675 if (opcode == SRP_LOGIN_REJ) {
1676 struct srp_login_rej *rej = event->private_data;
1677 u32 reason = be32_to_cpu(rej->reason);
1678
1679 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
7aa54bd7
DD
1680 shost_printk(KERN_WARNING, shost,
1681 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
aef9ec39 1682 else
7aa54bd7
DD
1683 shost_printk(KERN_WARNING, shost,
1684 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
aef9ec39 1685 } else
7aa54bd7
DD
1686 shost_printk(KERN_WARNING, shost,
1687 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1688 " opcode 0x%02x\n", opcode);
aef9ec39
RD
1689 target->status = -ECONNRESET;
1690 break;
1691
9fe4bcf4
DD
1692 case IB_CM_REJ_STALE_CONN:
1693 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
1694 target->status = SRP_STALE_CONN;
1695 break;
1696
aef9ec39 1697 default:
7aa54bd7
DD
1698 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
1699 event->param.rej_rcvd.reason);
aef9ec39
RD
1700 target->status = -ECONNRESET;
1701 }
1702}
1703
1704static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1705{
1706 struct srp_target_port *target = cm_id->context;
aef9ec39 1707 int comp = 0;
aef9ec39
RD
1708
1709 switch (event->event) {
1710 case IB_CM_REQ_ERROR:
7aa54bd7
DD
1711 shost_printk(KERN_DEBUG, target->scsi_host,
1712 PFX "Sending CM REQ failed\n");
aef9ec39
RD
1713 comp = 1;
1714 target->status = -ECONNRESET;
1715 break;
1716
1717 case IB_CM_REP_RECEIVED:
1718 comp = 1;
961e0be8 1719 srp_cm_rep_handler(cm_id, event->private_data, target);
aef9ec39
RD
1720 break;
1721
1722 case IB_CM_REJ_RECEIVED:
7aa54bd7 1723 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
aef9ec39
RD
1724 comp = 1;
1725
1726 srp_cm_rej_handler(cm_id, event, target);
1727 break;
1728
b7ac4ab4 1729 case IB_CM_DREQ_RECEIVED:
7aa54bd7
DD
1730 shost_printk(KERN_WARNING, target->scsi_host,
1731 PFX "DREQ received - connection closed\n");
294c875a 1732 srp_change_conn_state(target, false);
b7ac4ab4 1733 if (ib_send_cm_drep(cm_id, NULL, 0))
7aa54bd7
DD
1734 shost_printk(KERN_ERR, target->scsi_host,
1735 PFX "Sending CM DREP failed\n");
aef9ec39
RD
1736 break;
1737
1738 case IB_CM_TIMEWAIT_EXIT:
7aa54bd7
DD
1739 shost_printk(KERN_ERR, target->scsi_host,
1740 PFX "connection closed\n");
aef9ec39 1741
aef9ec39
RD
1742 target->status = 0;
1743 break;
1744
b7ac4ab4
IR
1745 case IB_CM_MRA_RECEIVED:
1746 case IB_CM_DREQ_ERROR:
1747 case IB_CM_DREP_RECEIVED:
1748 break;
1749
aef9ec39 1750 default:
7aa54bd7
DD
1751 shost_printk(KERN_WARNING, target->scsi_host,
1752 PFX "Unhandled CM event %d\n", event->event);
aef9ec39
RD
1753 break;
1754 }
1755
1756 if (comp)
1757 complete(&target->done);
1758
aef9ec39
RD
1759 return 0;
1760}
1761
d945e1df 1762static int srp_send_tsk_mgmt(struct srp_target_port *target,
f8b6e31e 1763 u64 req_tag, unsigned int lun, u8 func)
aef9ec39 1764{
19081f31 1765 struct ib_device *dev = target->srp_host->srp_dev->dev;
aef9ec39
RD
1766 struct srp_iu *iu;
1767 struct srp_tsk_mgmt *tsk_mgmt;
aef9ec39 1768
3780d1f0
BVA
1769 if (!target->connected || target->qp_in_error)
1770 return -1;
1771
f8b6e31e 1772 init_completion(&target->tsk_mgmt_done);
aef9ec39 1773
e9684678 1774 spin_lock_irq(&target->lock);
bb12588a 1775 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
e9684678 1776 spin_unlock_irq(&target->lock);
76c75b25 1777
aef9ec39 1778 if (!iu)
76c75b25 1779 return -1;
aef9ec39 1780
19081f31
DD
1781 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1782 DMA_TO_DEVICE);
aef9ec39
RD
1783 tsk_mgmt = iu->buf;
1784 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1785
1786 tsk_mgmt->opcode = SRP_TSK_MGMT;
f8b6e31e
DD
1787 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
1788 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
aef9ec39 1789 tsk_mgmt->tsk_mgmt_func = func;
f8b6e31e 1790 tsk_mgmt->task_tag = req_tag;
aef9ec39 1791
19081f31
DD
1792 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1793 DMA_TO_DEVICE);
76c75b25
BVA
1794 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
1795 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
1796 return -1;
1797 }
d945e1df 1798
f8b6e31e 1799 if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
aef9ec39 1800 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
d945e1df 1801 return -1;
aef9ec39 1802
d945e1df 1803 return 0;
d945e1df
RD
1804}
1805
aef9ec39
RD
1806static int srp_abort(struct scsi_cmnd *scmnd)
1807{
d945e1df 1808 struct srp_target_port *target = host_to_target(scmnd->device->host);
f8b6e31e 1809 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
086f44f5 1810 int ret;
d945e1df 1811
7aa54bd7 1812 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
aef9ec39 1813
c7c4e7ff 1814 if (!req || !srp_claim_req(target, req, scmnd))
d945e1df 1815 return FAILED;
086f44f5 1816 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
80d5e8a2 1817 SRP_TSK_ABORT_TASK) == 0)
086f44f5 1818 ret = SUCCESS;
ed9b2264 1819 else if (target->rport->state == SRP_RPORT_LOST)
99e1c139 1820 ret = FAST_IO_FAIL;
086f44f5
BVA
1821 else
1822 ret = FAILED;
22032991
BVA
1823 srp_free_req(target, req, scmnd, 0);
1824 scmnd->result = DID_ABORT << 16;
d8536670 1825 scmnd->scsi_done(scmnd);
d945e1df 1826
086f44f5 1827 return ret;
aef9ec39
RD
1828}
1829
1830static int srp_reset_device(struct scsi_cmnd *scmnd)
1831{
d945e1df 1832 struct srp_target_port *target = host_to_target(scmnd->device->host);
536ae14e 1833 int i;
d945e1df 1834
7aa54bd7 1835 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
aef9ec39 1836
f8b6e31e
DD
1837 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
1838 SRP_TSK_LUN_RESET))
d945e1df 1839 return FAILED;
f8b6e31e 1840 if (target->tsk_mgmt_status)
d945e1df
RD
1841 return FAILED;
1842
536ae14e
BVA
1843 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1844 struct srp_request *req = &target->req_ring[i];
f8b6e31e 1845 if (req->scmnd && req->scmnd->device == scmnd->device)
ed9b2264 1846 srp_finish_req(target, req, DID_RESET << 16);
536ae14e 1847 }
d945e1df 1848
d945e1df 1849 return SUCCESS;
aef9ec39
RD
1850}
1851
1852static int srp_reset_host(struct scsi_cmnd *scmnd)
1853{
1854 struct srp_target_port *target = host_to_target(scmnd->device->host);
aef9ec39 1855
7aa54bd7 1856 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
aef9ec39 1857
ed9b2264 1858 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
aef9ec39
RD
1859}
1860
c9b03c1a
BVA
1861static int srp_slave_configure(struct scsi_device *sdev)
1862{
1863 struct Scsi_Host *shost = sdev->host;
1864 struct srp_target_port *target = host_to_target(shost);
1865 struct request_queue *q = sdev->request_queue;
1866 unsigned long timeout;
1867
1868 if (sdev->type == TYPE_DISK) {
1869 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
1870 blk_queue_rq_timeout(q, timeout);
1871 }
1872
1873 return 0;
1874}
1875
ee959b00
TJ
1876static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
1877 char *buf)
6ecb0c84 1878{
ee959b00 1879 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 1880
6ecb0c84
RD
1881 return sprintf(buf, "0x%016llx\n",
1882 (unsigned long long) be64_to_cpu(target->id_ext));
1883}
1884
ee959b00
TJ
1885static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
1886 char *buf)
6ecb0c84 1887{
ee959b00 1888 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 1889
6ecb0c84
RD
1890 return sprintf(buf, "0x%016llx\n",
1891 (unsigned long long) be64_to_cpu(target->ioc_guid));
1892}
1893
ee959b00
TJ
1894static ssize_t show_service_id(struct device *dev,
1895 struct device_attribute *attr, char *buf)
6ecb0c84 1896{
ee959b00 1897 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 1898
6ecb0c84
RD
1899 return sprintf(buf, "0x%016llx\n",
1900 (unsigned long long) be64_to_cpu(target->service_id));
1901}
1902
ee959b00
TJ
1903static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
1904 char *buf)
6ecb0c84 1905{
ee959b00 1906 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 1907
6ecb0c84
RD
1908 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1909}
1910
ee959b00
TJ
1911static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
1912 char *buf)
6ecb0c84 1913{
ee959b00 1914 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 1915
5b095d98 1916 return sprintf(buf, "%pI6\n", target->path.dgid.raw);
6ecb0c84
RD
1917}
1918
ee959b00
TJ
1919static ssize_t show_orig_dgid(struct device *dev,
1920 struct device_attribute *attr, char *buf)
3633b3d0 1921{
ee959b00 1922 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3633b3d0 1923
5b095d98 1924 return sprintf(buf, "%pI6\n", target->orig_dgid);
3633b3d0
IR
1925}
1926
89de7486
BVA
1927static ssize_t show_req_lim(struct device *dev,
1928 struct device_attribute *attr, char *buf)
1929{
1930 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1931
89de7486
BVA
1932 return sprintf(buf, "%d\n", target->req_lim);
1933}
1934
ee959b00
TJ
1935static ssize_t show_zero_req_lim(struct device *dev,
1936 struct device_attribute *attr, char *buf)
6bfa24fa 1937{
ee959b00 1938 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6bfa24fa 1939
6bfa24fa
RD
1940 return sprintf(buf, "%d\n", target->zero_req_lim);
1941}
1942
ee959b00
TJ
1943static ssize_t show_local_ib_port(struct device *dev,
1944 struct device_attribute *attr, char *buf)
ded7f1a1 1945{
ee959b00 1946 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1
IR
1947
1948 return sprintf(buf, "%d\n", target->srp_host->port);
1949}
1950
ee959b00
TJ
1951static ssize_t show_local_ib_device(struct device *dev,
1952 struct device_attribute *attr, char *buf)
ded7f1a1 1953{
ee959b00 1954 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1 1955
05321937 1956 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
ded7f1a1
IR
1957}
1958
4b5e5f41
BVA
1959static ssize_t show_comp_vector(struct device *dev,
1960 struct device_attribute *attr, char *buf)
1961{
1962 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1963
1964 return sprintf(buf, "%d\n", target->comp_vector);
1965}
1966
7bb312e4
VP
1967static ssize_t show_tl_retry_count(struct device *dev,
1968 struct device_attribute *attr, char *buf)
1969{
1970 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1971
1972 return sprintf(buf, "%d\n", target->tl_retry_count);
1973}
1974
49248644
DD
1975static ssize_t show_cmd_sg_entries(struct device *dev,
1976 struct device_attribute *attr, char *buf)
1977{
1978 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1979
1980 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
1981}
1982
c07d424d
DD
1983static ssize_t show_allow_ext_sg(struct device *dev,
1984 struct device_attribute *attr, char *buf)
1985{
1986 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1987
1988 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
1989}
1990
ee959b00
TJ
1991static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
1992static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
1993static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
1994static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1995static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
1996static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
89de7486 1997static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
ee959b00
TJ
1998static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
1999static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2000static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
4b5e5f41 2001static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
7bb312e4 2002static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
49248644 2003static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
c07d424d 2004static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
ee959b00
TJ
2005
2006static struct device_attribute *srp_host_attrs[] = {
2007 &dev_attr_id_ext,
2008 &dev_attr_ioc_guid,
2009 &dev_attr_service_id,
2010 &dev_attr_pkey,
2011 &dev_attr_dgid,
2012 &dev_attr_orig_dgid,
89de7486 2013 &dev_attr_req_lim,
ee959b00
TJ
2014 &dev_attr_zero_req_lim,
2015 &dev_attr_local_ib_port,
2016 &dev_attr_local_ib_device,
4b5e5f41 2017 &dev_attr_comp_vector,
7bb312e4 2018 &dev_attr_tl_retry_count,
49248644 2019 &dev_attr_cmd_sg_entries,
c07d424d 2020 &dev_attr_allow_ext_sg,
6ecb0c84
RD
2021 NULL
2022};
2023
aef9ec39
RD
2024static struct scsi_host_template srp_template = {
2025 .module = THIS_MODULE,
b7f008fd
RD
2026 .name = "InfiniBand SRP initiator",
2027 .proc_name = DRV_NAME,
c9b03c1a 2028 .slave_configure = srp_slave_configure,
aef9ec39
RD
2029 .info = srp_target_info,
2030 .queuecommand = srp_queuecommand,
2031 .eh_abort_handler = srp_abort,
2032 .eh_device_reset_handler = srp_reset_device,
2033 .eh_host_reset_handler = srp_reset_host,
2742c1da 2034 .skip_settle_delay = true,
49248644 2035 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
dd5e6e38 2036 .can_queue = SRP_CMD_SQ_SIZE,
aef9ec39 2037 .this_id = -1,
dd5e6e38 2038 .cmd_per_lun = SRP_CMD_SQ_SIZE,
6ecb0c84
RD
2039 .use_clustering = ENABLE_CLUSTERING,
2040 .shost_attrs = srp_host_attrs
aef9ec39
RD
2041};
2042
2043static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2044{
3236822b
FT
2045 struct srp_rport_identifiers ids;
2046 struct srp_rport *rport;
2047
aef9ec39
RD
2048 sprintf(target->target_name, "SRP.T10:%016llX",
2049 (unsigned long long) be64_to_cpu(target->id_ext));
2050
05321937 2051 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
aef9ec39
RD
2052 return -ENODEV;
2053
3236822b
FT
2054 memcpy(ids.port_id, &target->id_ext, 8);
2055 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
aebd5e47 2056 ids.roles = SRP_RPORT_ROLE_TARGET;
3236822b
FT
2057 rport = srp_rport_add(target->scsi_host, &ids);
2058 if (IS_ERR(rport)) {
2059 scsi_remove_host(target->scsi_host);
2060 return PTR_ERR(rport);
2061 }
2062
dc1bdbd9 2063 rport->lld_data = target;
9dd69a60 2064 target->rport = rport;
dc1bdbd9 2065
b3589fd4 2066 spin_lock(&host->target_lock);
aef9ec39 2067 list_add_tail(&target->list, &host->target_list);
b3589fd4 2068 spin_unlock(&host->target_lock);
aef9ec39
RD
2069
2070 target->state = SRP_TARGET_LIVE;
2071
aef9ec39 2072 scsi_scan_target(&target->scsi_host->shost_gendev,
1962a4a1 2073 0, target->scsi_id, SCAN_WILD_CARD, 0);
aef9ec39
RD
2074
2075 return 0;
2076}
2077
ee959b00 2078static void srp_release_dev(struct device *dev)
aef9ec39
RD
2079{
2080 struct srp_host *host =
ee959b00 2081 container_of(dev, struct srp_host, dev);
aef9ec39
RD
2082
2083 complete(&host->released);
2084}
2085
2086static struct class srp_class = {
2087 .name = "infiniband_srp",
ee959b00 2088 .dev_release = srp_release_dev
aef9ec39
RD
2089};
2090
96fc248a
BVA
2091/**
2092 * srp_conn_unique() - check whether the connection to a target is unique
2093 */
2094static bool srp_conn_unique(struct srp_host *host,
2095 struct srp_target_port *target)
2096{
2097 struct srp_target_port *t;
2098 bool ret = false;
2099
2100 if (target->state == SRP_TARGET_REMOVED)
2101 goto out;
2102
2103 ret = true;
2104
2105 spin_lock(&host->target_lock);
2106 list_for_each_entry(t, &host->target_list, list) {
2107 if (t != target &&
2108 target->id_ext == t->id_ext &&
2109 target->ioc_guid == t->ioc_guid &&
2110 target->initiator_ext == t->initiator_ext) {
2111 ret = false;
2112 break;
2113 }
2114 }
2115 spin_unlock(&host->target_lock);
2116
2117out:
2118 return ret;
2119}
2120
aef9ec39
RD
2121/*
2122 * Target ports are added by writing
2123 *
2124 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2125 * pkey=<P_Key>,service_id=<service ID>
2126 *
2127 * to the add_target sysfs attribute.
2128 */
2129enum {
2130 SRP_OPT_ERR = 0,
2131 SRP_OPT_ID_EXT = 1 << 0,
2132 SRP_OPT_IOC_GUID = 1 << 1,
2133 SRP_OPT_DGID = 1 << 2,
2134 SRP_OPT_PKEY = 1 << 3,
2135 SRP_OPT_SERVICE_ID = 1 << 4,
2136 SRP_OPT_MAX_SECT = 1 << 5,
52fb2b50 2137 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
0c0450db 2138 SRP_OPT_IO_CLASS = 1 << 7,
01cb9bcb 2139 SRP_OPT_INITIATOR_EXT = 1 << 8,
49248644 2140 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
c07d424d
DD
2141 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2142 SRP_OPT_SG_TABLESIZE = 1 << 11,
4b5e5f41 2143 SRP_OPT_COMP_VECTOR = 1 << 12,
7bb312e4 2144 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
aef9ec39
RD
2145 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2146 SRP_OPT_IOC_GUID |
2147 SRP_OPT_DGID |
2148 SRP_OPT_PKEY |
2149 SRP_OPT_SERVICE_ID),
2150};
2151
a447c093 2152static const match_table_t srp_opt_tokens = {
52fb2b50
VP
2153 { SRP_OPT_ID_EXT, "id_ext=%s" },
2154 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2155 { SRP_OPT_DGID, "dgid=%s" },
2156 { SRP_OPT_PKEY, "pkey=%x" },
2157 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2158 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2159 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
0c0450db 2160 { SRP_OPT_IO_CLASS, "io_class=%x" },
01cb9bcb 2161 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
49248644 2162 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
c07d424d
DD
2163 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2164 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
4b5e5f41 2165 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
7bb312e4 2166 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
52fb2b50 2167 { SRP_OPT_ERR, NULL }
aef9ec39
RD
2168};
2169
2170static int srp_parse_options(const char *buf, struct srp_target_port *target)
2171{
2172 char *options, *sep_opt;
2173 char *p;
2174 char dgid[3];
2175 substring_t args[MAX_OPT_ARGS];
2176 int opt_mask = 0;
2177 int token;
2178 int ret = -EINVAL;
2179 int i;
2180
2181 options = kstrdup(buf, GFP_KERNEL);
2182 if (!options)
2183 return -ENOMEM;
2184
2185 sep_opt = options;
2186 while ((p = strsep(&sep_opt, ",")) != NULL) {
2187 if (!*p)
2188 continue;
2189
2190 token = match_token(p, srp_opt_tokens, args);
2191 opt_mask |= token;
2192
2193 switch (token) {
2194 case SRP_OPT_ID_EXT:
2195 p = match_strdup(args);
a20f3a6d
IR
2196 if (!p) {
2197 ret = -ENOMEM;
2198 goto out;
2199 }
aef9ec39
RD
2200 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2201 kfree(p);
2202 break;
2203
2204 case SRP_OPT_IOC_GUID:
2205 p = match_strdup(args);
a20f3a6d
IR
2206 if (!p) {
2207 ret = -ENOMEM;
2208 goto out;
2209 }
aef9ec39
RD
2210 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2211 kfree(p);
2212 break;
2213
2214 case SRP_OPT_DGID:
2215 p = match_strdup(args);
a20f3a6d
IR
2216 if (!p) {
2217 ret = -ENOMEM;
2218 goto out;
2219 }
aef9ec39 2220 if (strlen(p) != 32) {
e0bda7d8 2221 pr_warn("bad dest GID parameter '%s'\n", p);
ce1823f0 2222 kfree(p);
aef9ec39
RD
2223 goto out;
2224 }
2225
2226 for (i = 0; i < 16; ++i) {
2227 strlcpy(dgid, p + i * 2, 3);
2228 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
2229 }
bf17c1c7 2230 kfree(p);
3633b3d0 2231 memcpy(target->orig_dgid, target->path.dgid.raw, 16);
aef9ec39
RD
2232 break;
2233
2234 case SRP_OPT_PKEY:
2235 if (match_hex(args, &token)) {
e0bda7d8 2236 pr_warn("bad P_Key parameter '%s'\n", p);
aef9ec39
RD
2237 goto out;
2238 }
2239 target->path.pkey = cpu_to_be16(token);
2240 break;
2241
2242 case SRP_OPT_SERVICE_ID:
2243 p = match_strdup(args);
a20f3a6d
IR
2244 if (!p) {
2245 ret = -ENOMEM;
2246 goto out;
2247 }
aef9ec39 2248 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
247e020e 2249 target->path.service_id = target->service_id;
aef9ec39
RD
2250 kfree(p);
2251 break;
2252
2253 case SRP_OPT_MAX_SECT:
2254 if (match_int(args, &token)) {
e0bda7d8 2255 pr_warn("bad max sect parameter '%s'\n", p);
aef9ec39
RD
2256 goto out;
2257 }
2258 target->scsi_host->max_sectors = token;
2259 break;
2260
52fb2b50
VP
2261 case SRP_OPT_MAX_CMD_PER_LUN:
2262 if (match_int(args, &token)) {
e0bda7d8
BVA
2263 pr_warn("bad max cmd_per_lun parameter '%s'\n",
2264 p);
52fb2b50
VP
2265 goto out;
2266 }
dd5e6e38 2267 target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
52fb2b50
VP
2268 break;
2269
0c0450db
R
2270 case SRP_OPT_IO_CLASS:
2271 if (match_hex(args, &token)) {
e0bda7d8 2272 pr_warn("bad IO class parameter '%s'\n", p);
0c0450db
R
2273 goto out;
2274 }
2275 if (token != SRP_REV10_IB_IO_CLASS &&
2276 token != SRP_REV16A_IB_IO_CLASS) {
e0bda7d8
BVA
2277 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2278 token, SRP_REV10_IB_IO_CLASS,
2279 SRP_REV16A_IB_IO_CLASS);
0c0450db
R
2280 goto out;
2281 }
2282 target->io_class = token;
2283 break;
2284
01cb9bcb
IR
2285 case SRP_OPT_INITIATOR_EXT:
2286 p = match_strdup(args);
a20f3a6d
IR
2287 if (!p) {
2288 ret = -ENOMEM;
2289 goto out;
2290 }
01cb9bcb
IR
2291 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2292 kfree(p);
2293 break;
2294
49248644
DD
2295 case SRP_OPT_CMD_SG_ENTRIES:
2296 if (match_int(args, &token) || token < 1 || token > 255) {
e0bda7d8
BVA
2297 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
2298 p);
49248644
DD
2299 goto out;
2300 }
2301 target->cmd_sg_cnt = token;
2302 break;
2303
c07d424d
DD
2304 case SRP_OPT_ALLOW_EXT_SG:
2305 if (match_int(args, &token)) {
e0bda7d8 2306 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
c07d424d
DD
2307 goto out;
2308 }
2309 target->allow_ext_sg = !!token;
2310 break;
2311
2312 case SRP_OPT_SG_TABLESIZE:
2313 if (match_int(args, &token) || token < 1 ||
2314 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
e0bda7d8
BVA
2315 pr_warn("bad max sg_tablesize parameter '%s'\n",
2316 p);
c07d424d
DD
2317 goto out;
2318 }
2319 target->sg_tablesize = token;
2320 break;
2321
4b5e5f41
BVA
2322 case SRP_OPT_COMP_VECTOR:
2323 if (match_int(args, &token) || token < 0) {
2324 pr_warn("bad comp_vector parameter '%s'\n", p);
2325 goto out;
2326 }
2327 target->comp_vector = token;
2328 break;
2329
7bb312e4
VP
2330 case SRP_OPT_TL_RETRY_COUNT:
2331 if (match_int(args, &token) || token < 2 || token > 7) {
2332 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
2333 p);
2334 goto out;
2335 }
2336 target->tl_retry_count = token;
2337 break;
2338
aef9ec39 2339 default:
e0bda7d8
BVA
2340 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
2341 p);
aef9ec39
RD
2342 goto out;
2343 }
2344 }
2345
2346 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2347 ret = 0;
2348 else
2349 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2350 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2351 !(srp_opt_tokens[i].token & opt_mask))
e0bda7d8
BVA
2352 pr_warn("target creation request is missing parameter '%s'\n",
2353 srp_opt_tokens[i].pattern);
aef9ec39
RD
2354
2355out:
2356 kfree(options);
2357 return ret;
2358}
2359
ee959b00
TJ
2360static ssize_t srp_create_target(struct device *dev,
2361 struct device_attribute *attr,
aef9ec39
RD
2362 const char *buf, size_t count)
2363{
2364 struct srp_host *host =
ee959b00 2365 container_of(dev, struct srp_host, dev);
aef9ec39
RD
2366 struct Scsi_Host *target_host;
2367 struct srp_target_port *target;
c07d424d
DD
2368 struct ib_device *ibdev = host->srp_dev->dev;
2369 dma_addr_t dma_addr;
8f26c9ff 2370 int i, ret;
aef9ec39
RD
2371
2372 target_host = scsi_host_alloc(&srp_template,
2373 sizeof (struct srp_target_port));
2374 if (!target_host)
2375 return -ENOMEM;
2376
49248644 2377 target_host->transportt = ib_srp_transport_template;
fd1b6c4a
BVA
2378 target_host->max_channel = 0;
2379 target_host->max_id = 1;
3c8edf0e
AR
2380 target_host->max_lun = SRP_MAX_LUN;
2381 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
5f068992 2382
aef9ec39 2383 target = host_to_target(target_host);
aef9ec39 2384
49248644
DD
2385 target->io_class = SRP_REV16A_IB_IO_CLASS;
2386 target->scsi_host = target_host;
2387 target->srp_host = host;
2388 target->lkey = host->srp_dev->mr->lkey;
2389 target->rkey = host->srp_dev->mr->rkey;
2390 target->cmd_sg_cnt = cmd_sg_entries;
c07d424d
DD
2391 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
2392 target->allow_ext_sg = allow_ext_sg;
7bb312e4 2393 target->tl_retry_count = 7;
aef9ec39 2394
aef9ec39
RD
2395 ret = srp_parse_options(buf, target);
2396 if (ret)
2397 goto err;
2398
96fc248a
BVA
2399 if (!srp_conn_unique(target->srp_host, target)) {
2400 shost_printk(KERN_INFO, target->scsi_host,
2401 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
2402 be64_to_cpu(target->id_ext),
2403 be64_to_cpu(target->ioc_guid),
2404 be64_to_cpu(target->initiator_ext));
2405 ret = -EEXIST;
2406 goto err;
2407 }
2408
c07d424d
DD
2409 if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
2410 target->cmd_sg_cnt < target->sg_tablesize) {
e0bda7d8 2411 pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
c07d424d
DD
2412 target->sg_tablesize = target->cmd_sg_cnt;
2413 }
2414
2415 target_host->sg_tablesize = target->sg_tablesize;
2416 target->indirect_size = target->sg_tablesize *
2417 sizeof (struct srp_direct_buf);
49248644
DD
2418 target->max_iu_len = sizeof (struct srp_cmd) +
2419 sizeof (struct srp_indirect_buf) +
2420 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
2421
ef6c49d8 2422 INIT_WORK(&target->remove_work, srp_remove_work);
8f26c9ff
DD
2423 spin_lock_init(&target->lock);
2424 INIT_LIST_HEAD(&target->free_tx);
2425 INIT_LIST_HEAD(&target->free_reqs);
2426 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
2427 struct srp_request *req = &target->req_ring[i];
2428
2429 req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
2430 GFP_KERNEL);
2431 req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
2432 GFP_KERNEL);
c07d424d
DD
2433 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
2434 if (!req->fmr_list || !req->map_page || !req->indirect_desc)
8f26c9ff
DD
2435 goto err_free_mem;
2436
c07d424d
DD
2437 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
2438 target->indirect_size,
2439 DMA_TO_DEVICE);
2440 if (ib_dma_mapping_error(ibdev, dma_addr))
2441 goto err_free_mem;
2442
2443 req->indirect_dma_addr = dma_addr;
8f26c9ff
DD
2444 req->index = i;
2445 list_add_tail(&req->list, &target->free_reqs);
2446 }
2447
c07d424d 2448 ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
aef9ec39 2449
7aa54bd7
DD
2450 shost_printk(KERN_DEBUG, target->scsi_host, PFX
2451 "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
5b095d98 2452 "service_id %016llx dgid %pI6\n",
aef9ec39
RD
2453 (unsigned long long) be64_to_cpu(target->id_ext),
2454 (unsigned long long) be64_to_cpu(target->ioc_guid),
2455 be16_to_cpu(target->path.pkey),
2456 (unsigned long long) be64_to_cpu(target->service_id),
8867cd7c 2457 target->path.dgid.raw);
aef9ec39
RD
2458
2459 ret = srp_create_target_ib(target);
2460 if (ret)
8f26c9ff 2461 goto err_free_mem;
aef9ec39 2462
9fe4bcf4
DD
2463 ret = srp_new_cm_id(target);
2464 if (ret)
8f26c9ff 2465 goto err_free_ib;
aef9ec39
RD
2466
2467 ret = srp_connect_target(target);
2468 if (ret) {
7aa54bd7
DD
2469 shost_printk(KERN_ERR, target->scsi_host,
2470 PFX "Connection failed\n");
aef9ec39
RD
2471 goto err_cm_id;
2472 }
2473
2474 ret = srp_add_target(host, target);
2475 if (ret)
2476 goto err_disconnect;
2477
2478 return count;
2479
2480err_disconnect:
2481 srp_disconnect_target(target);
2482
2483err_cm_id:
2484 ib_destroy_cm_id(target->cm_id);
2485
8f26c9ff 2486err_free_ib:
aef9ec39
RD
2487 srp_free_target_ib(target);
2488
8f26c9ff
DD
2489err_free_mem:
2490 srp_free_req_data(target);
2491
aef9ec39
RD
2492err:
2493 scsi_host_put(target_host);
2494
2495 return ret;
2496}
2497
ee959b00 2498static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
aef9ec39 2499
ee959b00
TJ
2500static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
2501 char *buf)
aef9ec39 2502{
ee959b00 2503 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39 2504
05321937 2505 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
aef9ec39
RD
2506}
2507
ee959b00 2508static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
aef9ec39 2509
ee959b00
TJ
2510static ssize_t show_port(struct device *dev, struct device_attribute *attr,
2511 char *buf)
aef9ec39 2512{
ee959b00 2513 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39
RD
2514
2515 return sprintf(buf, "%d\n", host->port);
2516}
2517
ee959b00 2518static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
aef9ec39 2519
f5358a17 2520static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
aef9ec39
RD
2521{
2522 struct srp_host *host;
2523
2524 host = kzalloc(sizeof *host, GFP_KERNEL);
2525 if (!host)
2526 return NULL;
2527
2528 INIT_LIST_HEAD(&host->target_list);
b3589fd4 2529 spin_lock_init(&host->target_lock);
aef9ec39 2530 init_completion(&host->released);
05321937 2531 host->srp_dev = device;
aef9ec39
RD
2532 host->port = port;
2533
ee959b00
TJ
2534 host->dev.class = &srp_class;
2535 host->dev.parent = device->dev->dma_device;
d927e38c 2536 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
aef9ec39 2537
ee959b00 2538 if (device_register(&host->dev))
f5358a17 2539 goto free_host;
ee959b00 2540 if (device_create_file(&host->dev, &dev_attr_add_target))
aef9ec39 2541 goto err_class;
ee959b00 2542 if (device_create_file(&host->dev, &dev_attr_ibdev))
aef9ec39 2543 goto err_class;
ee959b00 2544 if (device_create_file(&host->dev, &dev_attr_port))
aef9ec39
RD
2545 goto err_class;
2546
2547 return host;
2548
2549err_class:
ee959b00 2550 device_unregister(&host->dev);
aef9ec39 2551
f5358a17 2552free_host:
aef9ec39
RD
2553 kfree(host);
2554
2555 return NULL;
2556}
2557
2558static void srp_add_one(struct ib_device *device)
2559{
f5358a17
RD
2560 struct srp_device *srp_dev;
2561 struct ib_device_attr *dev_attr;
2562 struct ib_fmr_pool_param fmr_param;
aef9ec39 2563 struct srp_host *host;
be8b9814 2564 int max_pages_per_fmr, fmr_page_shift, s, e, p;
aef9ec39 2565
f5358a17
RD
2566 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2567 if (!dev_attr)
cf311cd4 2568 return;
aef9ec39 2569
f5358a17 2570 if (ib_query_device(device, dev_attr)) {
e0bda7d8 2571 pr_warn("Query device failed for %s\n", device->name);
f5358a17
RD
2572 goto free_attr;
2573 }
2574
2575 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
2576 if (!srp_dev)
2577 goto free_attr;
2578
2579 /*
2580 * Use the smallest page size supported by the HCA, down to a
8f26c9ff
DD
2581 * minimum of 4096 bytes. We're unlikely to build large sglists
2582 * out of smaller entries.
f5358a17 2583 */
8f26c9ff
DD
2584 fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
2585 srp_dev->fmr_page_size = 1 << fmr_page_shift;
2586 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
2587 srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE;
f5358a17
RD
2588
2589 INIT_LIST_HEAD(&srp_dev->dev_list);
2590
2591 srp_dev->dev = device;
2592 srp_dev->pd = ib_alloc_pd(device);
2593 if (IS_ERR(srp_dev->pd))
2594 goto free_dev;
2595
2596 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
2597 IB_ACCESS_LOCAL_WRITE |
2598 IB_ACCESS_REMOTE_READ |
2599 IB_ACCESS_REMOTE_WRITE);
2600 if (IS_ERR(srp_dev->mr))
2601 goto err_pd;
2602
be8b9814
DD
2603 for (max_pages_per_fmr = SRP_FMR_SIZE;
2604 max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
2605 max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
2606 memset(&fmr_param, 0, sizeof fmr_param);
2607 fmr_param.pool_size = SRP_FMR_POOL_SIZE;
2608 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
2609 fmr_param.cache = 1;
2610 fmr_param.max_pages_per_fmr = max_pages_per_fmr;
2611 fmr_param.page_shift = fmr_page_shift;
2612 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
2613 IB_ACCESS_REMOTE_WRITE |
2614 IB_ACCESS_REMOTE_READ);
2615
2616 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
2617 if (!IS_ERR(srp_dev->fmr_pool))
2618 break;
2619 }
2620
f5358a17
RD
2621 if (IS_ERR(srp_dev->fmr_pool))
2622 srp_dev->fmr_pool = NULL;
aef9ec39 2623
07ebafba 2624 if (device->node_type == RDMA_NODE_IB_SWITCH) {
aef9ec39
RD
2625 s = 0;
2626 e = 0;
2627 } else {
2628 s = 1;
2629 e = device->phys_port_cnt;
2630 }
2631
2632 for (p = s; p <= e; ++p) {
f5358a17 2633 host = srp_add_port(srp_dev, p);
aef9ec39 2634 if (host)
f5358a17 2635 list_add_tail(&host->list, &srp_dev->dev_list);
aef9ec39
RD
2636 }
2637
f5358a17
RD
2638 ib_set_client_data(device, &srp_client, srp_dev);
2639
2640 goto free_attr;
2641
2642err_pd:
2643 ib_dealloc_pd(srp_dev->pd);
2644
2645free_dev:
2646 kfree(srp_dev);
2647
2648free_attr:
2649 kfree(dev_attr);
aef9ec39
RD
2650}
2651
2652static void srp_remove_one(struct ib_device *device)
2653{
f5358a17 2654 struct srp_device *srp_dev;
aef9ec39 2655 struct srp_host *host, *tmp_host;
ef6c49d8 2656 struct srp_target_port *target;
aef9ec39 2657
f5358a17 2658 srp_dev = ib_get_client_data(device, &srp_client);
1fe0cb84
DB
2659 if (!srp_dev)
2660 return;
aef9ec39 2661
f5358a17 2662 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
ee959b00 2663 device_unregister(&host->dev);
aef9ec39
RD
2664 /*
2665 * Wait for the sysfs entry to go away, so that no new
2666 * target ports can be created.
2667 */
2668 wait_for_completion(&host->released);
2669
2670 /*
ef6c49d8 2671 * Remove all target ports.
aef9ec39 2672 */
b3589fd4 2673 spin_lock(&host->target_lock);
ef6c49d8
BVA
2674 list_for_each_entry(target, &host->target_list, list)
2675 srp_queue_remove_work(target);
b3589fd4 2676 spin_unlock(&host->target_lock);
aef9ec39
RD
2677
2678 /*
ef6c49d8 2679 * Wait for target port removal tasks.
aef9ec39 2680 */
ef6c49d8 2681 flush_workqueue(system_long_wq);
aef9ec39 2682
aef9ec39
RD
2683 kfree(host);
2684 }
2685
f5358a17
RD
2686 if (srp_dev->fmr_pool)
2687 ib_destroy_fmr_pool(srp_dev->fmr_pool);
2688 ib_dereg_mr(srp_dev->mr);
2689 ib_dealloc_pd(srp_dev->pd);
2690
2691 kfree(srp_dev);
aef9ec39
RD
2692}
2693
3236822b 2694static struct srp_function_template ib_srp_transport_functions = {
ed9b2264
BVA
2695 .has_rport_state = true,
2696 .reset_timer_if_blocked = true,
2697 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
2698 .dev_loss_tmo = &srp_dev_loss_tmo,
2699 .reconnect = srp_rport_reconnect,
dc1bdbd9 2700 .rport_delete = srp_rport_delete,
ed9b2264 2701 .terminate_rport_io = srp_terminate_io,
3236822b
FT
2702};
2703
aef9ec39
RD
2704static int __init srp_init_module(void)
2705{
2706 int ret;
2707
dcb4cb85 2708 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
dd5e6e38 2709
49248644 2710 if (srp_sg_tablesize) {
e0bda7d8 2711 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
49248644
DD
2712 if (!cmd_sg_entries)
2713 cmd_sg_entries = srp_sg_tablesize;
2714 }
2715
2716 if (!cmd_sg_entries)
2717 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
2718
2719 if (cmd_sg_entries > 255) {
e0bda7d8 2720 pr_warn("Clamping cmd_sg_entries to 255\n");
49248644 2721 cmd_sg_entries = 255;
1e89a194
DD
2722 }
2723
c07d424d
DD
2724 if (!indirect_sg_entries)
2725 indirect_sg_entries = cmd_sg_entries;
2726 else if (indirect_sg_entries < cmd_sg_entries) {
e0bda7d8
BVA
2727 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
2728 cmd_sg_entries);
c07d424d
DD
2729 indirect_sg_entries = cmd_sg_entries;
2730 }
2731
3236822b
FT
2732 ib_srp_transport_template =
2733 srp_attach_transport(&ib_srp_transport_functions);
2734 if (!ib_srp_transport_template)
2735 return -ENOMEM;
2736
aef9ec39
RD
2737 ret = class_register(&srp_class);
2738 if (ret) {
e0bda7d8 2739 pr_err("couldn't register class infiniband_srp\n");
3236822b 2740 srp_release_transport(ib_srp_transport_template);
aef9ec39
RD
2741 return ret;
2742 }
2743
c1a0b23b
MT
2744 ib_sa_register_client(&srp_sa_client);
2745
aef9ec39
RD
2746 ret = ib_register_client(&srp_client);
2747 if (ret) {
e0bda7d8 2748 pr_err("couldn't register IB client\n");
3236822b 2749 srp_release_transport(ib_srp_transport_template);
c1a0b23b 2750 ib_sa_unregister_client(&srp_sa_client);
aef9ec39
RD
2751 class_unregister(&srp_class);
2752 return ret;
2753 }
2754
2755 return 0;
2756}
2757
2758static void __exit srp_cleanup_module(void)
2759{
2760 ib_unregister_client(&srp_client);
c1a0b23b 2761 ib_sa_unregister_client(&srp_sa_client);
aef9ec39 2762 class_unregister(&srp_class);
3236822b 2763 srp_release_transport(ib_srp_transport_template);
aef9ec39
RD
2764}
2765
2766module_init(srp_init_module);
2767module_exit(srp_cleanup_module);
This page took 0.804086 seconds and 5 git commands to generate.