IB/core: Enhance ib_map_mr_sg()
[deliverable/linux.git] / drivers / infiniband / ulp / srp / ib_srp.c
CommitLineData
aef9ec39
RD
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
aef9ec39
RD
31 */
32
d236cd0e 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
e0bda7d8 34
aef9ec39
RD
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
de25968c 42#include <linux/jiffies.h>
56b5390c 43#include <rdma/ib_cache.h>
aef9ec39 44
60063497 45#include <linux/atomic.h>
aef9ec39
RD
46
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
71444b97 50#include <scsi/scsi_tcq.h>
aef9ec39 51#include <scsi/srp.h>
3236822b 52#include <scsi/scsi_transport_srp.h>
aef9ec39 53
aef9ec39
RD
54#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
713ef24e
BVA
58#define DRV_VERSION "2.0"
59#define DRV_RELDATE "July 26, 2015"
aef9ec39
RD
60
61MODULE_AUTHOR("Roland Dreier");
33ab3e5b 62MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
aef9ec39 63MODULE_LICENSE("Dual BSD/GPL");
33ab3e5b
BVA
64MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
aef9ec39 66
49248644
DD
67static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
c07d424d
DD
69static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
03f6fb93
BVA
71static bool prefer_fr = true;
72static bool register_always = true;
49248644 73static int topspin_workarounds = 1;
74b0a15b 74
49248644
DD
75module_param(srp_sg_tablesize, uint, 0444);
76MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
74b0a15b 77
49248644
DD
78module_param(cmd_sg_entries, uint, 0444);
79MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
aef9ec39 81
c07d424d
DD
82module_param(indirect_sg_entries, uint, 0444);
83MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86module_param(allow_ext_sg, bool, 0444);
87MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
aef9ec39
RD
90module_param(topspin_workarounds, int, 0444);
91MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
5cfb1782
BVA
94module_param(prefer_fr, bool, 0444);
95MODULE_PARM_DESC(prefer_fr,
96"Whether to use fast registration if both FMR and fast registration are supported");
97
b1b8854d
BVA
98module_param(register_always, bool, 0444);
99MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
9c27847d 102static const struct kernel_param_ops srp_tmo_ops;
ed9b2264 103
a95cadb9
BVA
104static int srp_reconnect_delay = 10;
105module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
ed9b2264
BVA
109static int srp_fast_io_fail_tmo = 15;
110module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
a95cadb9 117static int srp_dev_loss_tmo = 600;
ed9b2264
BVA
118module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
d92c0da7
BVA
128static unsigned ch_count;
129module_param(ch_count, uint, 0444);
130MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
aef9ec39 133static void srp_add_one(struct ib_device *device);
7c1eb45a 134static void srp_remove_one(struct ib_device *device, void *client_data);
1dc7b1f1
CH
135static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
136static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
137 const char *opname);
aef9ec39
RD
138static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
139
3236822b 140static struct scsi_transport_template *ib_srp_transport_template;
bcc05910 141static struct workqueue_struct *srp_remove_wq;
3236822b 142
aef9ec39
RD
143static struct ib_client srp_client = {
144 .name = "srp",
145 .add = srp_add_one,
146 .remove = srp_remove_one
147};
148
c1a0b23b
MT
149static struct ib_sa_client srp_sa_client;
150
ed9b2264
BVA
151static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
152{
153 int tmo = *(int *)kp->arg;
154
155 if (tmo >= 0)
156 return sprintf(buffer, "%d", tmo);
157 else
158 return sprintf(buffer, "off");
159}
160
161static int srp_tmo_set(const char *val, const struct kernel_param *kp)
162{
163 int tmo, res;
164
3fdf70ac
SG
165 res = srp_parse_tmo(&tmo, val);
166 if (res)
167 goto out;
168
a95cadb9
BVA
169 if (kp->arg == &srp_reconnect_delay)
170 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
171 srp_dev_loss_tmo);
172 else if (kp->arg == &srp_fast_io_fail_tmo)
173 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
ed9b2264 174 else
a95cadb9
BVA
175 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
176 tmo);
ed9b2264
BVA
177 if (res)
178 goto out;
179 *(int *)kp->arg = tmo;
180
181out:
182 return res;
183}
184
9c27847d 185static const struct kernel_param_ops srp_tmo_ops = {
ed9b2264
BVA
186 .get = srp_tmo_get,
187 .set = srp_tmo_set,
188};
189
aef9ec39
RD
190static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
191{
192 return (struct srp_target_port *) host->hostdata;
193}
194
195static const char *srp_target_info(struct Scsi_Host *host)
196{
197 return host_to_target(host)->target_name;
198}
199
5d7cbfd6
RD
200static int srp_target_is_topspin(struct srp_target_port *target)
201{
202 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
3d1ff48d 203 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
5d7cbfd6
RD
204
205 return topspin_workarounds &&
3d1ff48d
RK
206 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
207 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
5d7cbfd6
RD
208}
209
aef9ec39
RD
210static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
211 gfp_t gfp_mask,
212 enum dma_data_direction direction)
213{
214 struct srp_iu *iu;
215
216 iu = kmalloc(sizeof *iu, gfp_mask);
217 if (!iu)
218 goto out;
219
220 iu->buf = kzalloc(size, gfp_mask);
221 if (!iu->buf)
222 goto out_free_iu;
223
05321937
GKH
224 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
225 direction);
226 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
aef9ec39
RD
227 goto out_free_buf;
228
229 iu->size = size;
230 iu->direction = direction;
231
232 return iu;
233
234out_free_buf:
235 kfree(iu->buf);
236out_free_iu:
237 kfree(iu);
238out:
239 return NULL;
240}
241
242static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
243{
244 if (!iu)
245 return;
246
05321937
GKH
247 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
248 iu->direction);
aef9ec39
RD
249 kfree(iu->buf);
250 kfree(iu);
251}
252
253static void srp_qp_event(struct ib_event *event, void *context)
254{
57363d98
SG
255 pr_debug("QP event %s (%d)\n",
256 ib_event_msg(event->event), event->event);
aef9ec39
RD
257}
258
259static int srp_init_qp(struct srp_target_port *target,
260 struct ib_qp *qp)
261{
262 struct ib_qp_attr *attr;
263 int ret;
264
265 attr = kmalloc(sizeof *attr, GFP_KERNEL);
266 if (!attr)
267 return -ENOMEM;
268
56b5390c
BVA
269 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
270 target->srp_host->port,
271 be16_to_cpu(target->pkey),
272 &attr->pkey_index);
aef9ec39
RD
273 if (ret)
274 goto out;
275
276 attr->qp_state = IB_QPS_INIT;
277 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
278 IB_ACCESS_REMOTE_WRITE);
279 attr->port_num = target->srp_host->port;
280
281 ret = ib_modify_qp(qp, attr,
282 IB_QP_STATE |
283 IB_QP_PKEY_INDEX |
284 IB_QP_ACCESS_FLAGS |
285 IB_QP_PORT);
286
287out:
288 kfree(attr);
289 return ret;
290}
291
509c07bc 292static int srp_new_cm_id(struct srp_rdma_ch *ch)
9fe4bcf4 293{
509c07bc 294 struct srp_target_port *target = ch->target;
9fe4bcf4
DD
295 struct ib_cm_id *new_cm_id;
296
05321937 297 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
509c07bc 298 srp_cm_handler, ch);
9fe4bcf4
DD
299 if (IS_ERR(new_cm_id))
300 return PTR_ERR(new_cm_id);
301
509c07bc
BVA
302 if (ch->cm_id)
303 ib_destroy_cm_id(ch->cm_id);
304 ch->cm_id = new_cm_id;
305 ch->path.sgid = target->sgid;
306 ch->path.dgid = target->orig_dgid;
307 ch->path.pkey = target->pkey;
308 ch->path.service_id = target->service_id;
9fe4bcf4
DD
309
310 return 0;
311}
312
d1b4289e
BVA
313static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
314{
315 struct srp_device *dev = target->srp_host->srp_dev;
316 struct ib_fmr_pool_param fmr_param;
317
318 memset(&fmr_param, 0, sizeof(fmr_param));
fa9863f8 319 fmr_param.pool_size = target->mr_pool_size;
d1b4289e
BVA
320 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
321 fmr_param.cache = 1;
52ede08f
BVA
322 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
323 fmr_param.page_shift = ilog2(dev->mr_page_size);
d1b4289e
BVA
324 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
325 IB_ACCESS_REMOTE_WRITE |
326 IB_ACCESS_REMOTE_READ);
327
328 return ib_create_fmr_pool(dev->pd, &fmr_param);
329}
330
5cfb1782
BVA
331/**
332 * srp_destroy_fr_pool() - free the resources owned by a pool
333 * @pool: Fast registration pool to be destroyed.
334 */
335static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
336{
337 int i;
338 struct srp_fr_desc *d;
339
340 if (!pool)
341 return;
342
343 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
5cfb1782
BVA
344 if (d->mr)
345 ib_dereg_mr(d->mr);
346 }
347 kfree(pool);
348}
349
350/**
351 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
352 * @device: IB device to allocate fast registration descriptors for.
353 * @pd: Protection domain associated with the FR descriptors.
354 * @pool_size: Number of descriptors to allocate.
355 * @max_page_list_len: Maximum fast registration work request page list length.
356 */
357static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
358 struct ib_pd *pd, int pool_size,
359 int max_page_list_len)
360{
361 struct srp_fr_pool *pool;
362 struct srp_fr_desc *d;
363 struct ib_mr *mr;
5cfb1782
BVA
364 int i, ret = -EINVAL;
365
366 if (pool_size <= 0)
367 goto err;
368 ret = -ENOMEM;
369 pool = kzalloc(sizeof(struct srp_fr_pool) +
370 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
371 if (!pool)
372 goto err;
373 pool->size = pool_size;
374 pool->max_page_list_len = max_page_list_len;
375 spin_lock_init(&pool->lock);
376 INIT_LIST_HEAD(&pool->free_list);
377
378 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
563b67c5
SG
379 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
380 max_page_list_len);
5cfb1782
BVA
381 if (IS_ERR(mr)) {
382 ret = PTR_ERR(mr);
383 goto destroy_pool;
384 }
385 d->mr = mr;
5cfb1782
BVA
386 list_add_tail(&d->entry, &pool->free_list);
387 }
388
389out:
390 return pool;
391
392destroy_pool:
393 srp_destroy_fr_pool(pool);
394
395err:
396 pool = ERR_PTR(ret);
397 goto out;
398}
399
400/**
401 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
402 * @pool: Pool to obtain descriptor from.
403 */
404static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
405{
406 struct srp_fr_desc *d = NULL;
407 unsigned long flags;
408
409 spin_lock_irqsave(&pool->lock, flags);
410 if (!list_empty(&pool->free_list)) {
411 d = list_first_entry(&pool->free_list, typeof(*d), entry);
412 list_del(&d->entry);
413 }
414 spin_unlock_irqrestore(&pool->lock, flags);
415
416 return d;
417}
418
419/**
420 * srp_fr_pool_put() - put an FR descriptor back in the free list
421 * @pool: Pool the descriptor was allocated from.
422 * @desc: Pointer to an array of fast registration descriptor pointers.
423 * @n: Number of descriptors to put back.
424 *
425 * Note: The caller must already have queued an invalidation request for
426 * desc->mr->rkey before calling this function.
427 */
428static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
429 int n)
430{
431 unsigned long flags;
432 int i;
433
434 spin_lock_irqsave(&pool->lock, flags);
435 for (i = 0; i < n; i++)
436 list_add(&desc[i]->entry, &pool->free_list);
437 spin_unlock_irqrestore(&pool->lock, flags);
438}
439
440static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
441{
442 struct srp_device *dev = target->srp_host->srp_dev;
443
fa9863f8 444 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
5cfb1782
BVA
445 dev->max_pages_per_mr);
446}
447
7dad6b2e
BVA
448/**
449 * srp_destroy_qp() - destroy an RDMA queue pair
f83b2561 450 * @qp: RDMA queue pair.
7dad6b2e 451 *
561392d4
SW
452 * Drain the qp before destroying it. This avoids that the receive
453 * completion handler can access the queue pair while it is
7dad6b2e
BVA
454 * being destroyed.
455 */
f83b2561 456static void srp_destroy_qp(struct ib_qp *qp)
7dad6b2e 457{
f83b2561
BVA
458 ib_drain_rq(qp);
459 ib_destroy_qp(qp);
7dad6b2e
BVA
460}
461
509c07bc 462static int srp_create_ch_ib(struct srp_rdma_ch *ch)
aef9ec39 463{
509c07bc 464 struct srp_target_port *target = ch->target;
62154b2e 465 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39 466 struct ib_qp_init_attr *init_attr;
73aa89ed
IR
467 struct ib_cq *recv_cq, *send_cq;
468 struct ib_qp *qp;
d1b4289e 469 struct ib_fmr_pool *fmr_pool = NULL;
5cfb1782 470 struct srp_fr_pool *fr_pool = NULL;
09c0c0be 471 const int m = dev->use_fast_reg ? 3 : 1;
aef9ec39
RD
472 int ret;
473
474 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
475 if (!init_attr)
476 return -ENOMEM;
477
561392d4 478 /* queue_size + 1 for ib_drain_rq() */
1dc7b1f1
CH
479 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
480 ch->comp_vector, IB_POLL_SOFTIRQ);
73aa89ed
IR
481 if (IS_ERR(recv_cq)) {
482 ret = PTR_ERR(recv_cq);
da9d2f07 483 goto err;
aef9ec39
RD
484 }
485
1dc7b1f1
CH
486 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
487 ch->comp_vector, IB_POLL_DIRECT);
73aa89ed
IR
488 if (IS_ERR(send_cq)) {
489 ret = PTR_ERR(send_cq);
da9d2f07 490 goto err_recv_cq;
9c03dc9f
BVA
491 }
492
aef9ec39 493 init_attr->event_handler = srp_qp_event;
5cfb1782 494 init_attr->cap.max_send_wr = m * target->queue_size;
7dad6b2e 495 init_attr->cap.max_recv_wr = target->queue_size + 1;
aef9ec39
RD
496 init_attr->cap.max_recv_sge = 1;
497 init_attr->cap.max_send_sge = 1;
5cfb1782 498 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
aef9ec39 499 init_attr->qp_type = IB_QPT_RC;
73aa89ed
IR
500 init_attr->send_cq = send_cq;
501 init_attr->recv_cq = recv_cq;
aef9ec39 502
62154b2e 503 qp = ib_create_qp(dev->pd, init_attr);
73aa89ed
IR
504 if (IS_ERR(qp)) {
505 ret = PTR_ERR(qp);
da9d2f07 506 goto err_send_cq;
aef9ec39
RD
507 }
508
73aa89ed 509 ret = srp_init_qp(target, qp);
da9d2f07
RD
510 if (ret)
511 goto err_qp;
aef9ec39 512
002f1567 513 if (dev->use_fast_reg) {
5cfb1782
BVA
514 fr_pool = srp_alloc_fr_pool(target);
515 if (IS_ERR(fr_pool)) {
516 ret = PTR_ERR(fr_pool);
517 shost_printk(KERN_WARNING, target->scsi_host, PFX
518 "FR pool allocation failed (%d)\n", ret);
519 goto err_qp;
520 }
002f1567 521 } else if (dev->use_fmr) {
d1b4289e
BVA
522 fmr_pool = srp_alloc_fmr_pool(target);
523 if (IS_ERR(fmr_pool)) {
524 ret = PTR_ERR(fmr_pool);
525 shost_printk(KERN_WARNING, target->scsi_host, PFX
526 "FMR pool allocation failed (%d)\n", ret);
527 goto err_qp;
528 }
d1b4289e
BVA
529 }
530
509c07bc 531 if (ch->qp)
f83b2561 532 srp_destroy_qp(ch->qp);
509c07bc 533 if (ch->recv_cq)
1dc7b1f1 534 ib_free_cq(ch->recv_cq);
509c07bc 535 if (ch->send_cq)
1dc7b1f1 536 ib_free_cq(ch->send_cq);
73aa89ed 537
509c07bc
BVA
538 ch->qp = qp;
539 ch->recv_cq = recv_cq;
540 ch->send_cq = send_cq;
73aa89ed 541
7fbc67df
SG
542 if (dev->use_fast_reg) {
543 if (ch->fr_pool)
544 srp_destroy_fr_pool(ch->fr_pool);
545 ch->fr_pool = fr_pool;
546 } else if (dev->use_fmr) {
547 if (ch->fmr_pool)
548 ib_destroy_fmr_pool(ch->fmr_pool);
549 ch->fmr_pool = fmr_pool;
550 }
551
da9d2f07
RD
552 kfree(init_attr);
553 return 0;
554
555err_qp:
f83b2561 556 srp_destroy_qp(qp);
da9d2f07
RD
557
558err_send_cq:
1dc7b1f1 559 ib_free_cq(send_cq);
da9d2f07
RD
560
561err_recv_cq:
1dc7b1f1 562 ib_free_cq(recv_cq);
da9d2f07
RD
563
564err:
aef9ec39
RD
565 kfree(init_attr);
566 return ret;
567}
568
4d73f95f
BVA
569/*
570 * Note: this function may be called without srp_alloc_iu_bufs() having been
509c07bc 571 * invoked. Hence the ch->[rt]x_ring checks.
4d73f95f 572 */
509c07bc
BVA
573static void srp_free_ch_ib(struct srp_target_port *target,
574 struct srp_rdma_ch *ch)
aef9ec39 575{
5cfb1782 576 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39
RD
577 int i;
578
d92c0da7
BVA
579 if (!ch->target)
580 return;
581
509c07bc
BVA
582 if (ch->cm_id) {
583 ib_destroy_cm_id(ch->cm_id);
584 ch->cm_id = NULL;
394c595e
BVA
585 }
586
d92c0da7
BVA
587 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
588 if (!ch->qp)
589 return;
590
5cfb1782 591 if (dev->use_fast_reg) {
509c07bc
BVA
592 if (ch->fr_pool)
593 srp_destroy_fr_pool(ch->fr_pool);
002f1567 594 } else if (dev->use_fmr) {
509c07bc
BVA
595 if (ch->fmr_pool)
596 ib_destroy_fmr_pool(ch->fmr_pool);
5cfb1782 597 }
1dc7b1f1 598
f83b2561 599 srp_destroy_qp(ch->qp);
1dc7b1f1
CH
600 ib_free_cq(ch->send_cq);
601 ib_free_cq(ch->recv_cq);
aef9ec39 602
d92c0da7
BVA
603 /*
604 * Avoid that the SCSI error handler tries to use this channel after
605 * it has been freed. The SCSI error handler can namely continue
606 * trying to perform recovery actions after scsi_remove_host()
607 * returned.
608 */
609 ch->target = NULL;
610
509c07bc
BVA
611 ch->qp = NULL;
612 ch->send_cq = ch->recv_cq = NULL;
73aa89ed 613
509c07bc 614 if (ch->rx_ring) {
4d73f95f 615 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
616 srp_free_iu(target->srp_host, ch->rx_ring[i]);
617 kfree(ch->rx_ring);
618 ch->rx_ring = NULL;
4d73f95f 619 }
509c07bc 620 if (ch->tx_ring) {
4d73f95f 621 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
622 srp_free_iu(target->srp_host, ch->tx_ring[i]);
623 kfree(ch->tx_ring);
624 ch->tx_ring = NULL;
4d73f95f 625 }
aef9ec39
RD
626}
627
628static void srp_path_rec_completion(int status,
629 struct ib_sa_path_rec *pathrec,
509c07bc 630 void *ch_ptr)
aef9ec39 631{
509c07bc
BVA
632 struct srp_rdma_ch *ch = ch_ptr;
633 struct srp_target_port *target = ch->target;
aef9ec39 634
509c07bc 635 ch->status = status;
aef9ec39 636 if (status)
7aa54bd7
DD
637 shost_printk(KERN_ERR, target->scsi_host,
638 PFX "Got failed path rec status %d\n", status);
aef9ec39 639 else
509c07bc
BVA
640 ch->path = *pathrec;
641 complete(&ch->done);
aef9ec39
RD
642}
643
509c07bc 644static int srp_lookup_path(struct srp_rdma_ch *ch)
aef9ec39 645{
509c07bc 646 struct srp_target_port *target = ch->target;
a702adce
BVA
647 int ret;
648
509c07bc
BVA
649 ch->path.numb_path = 1;
650
651 init_completion(&ch->done);
652
653 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
654 target->srp_host->srp_dev->dev,
655 target->srp_host->port,
656 &ch->path,
657 IB_SA_PATH_REC_SERVICE_ID |
658 IB_SA_PATH_REC_DGID |
659 IB_SA_PATH_REC_SGID |
660 IB_SA_PATH_REC_NUMB_PATH |
661 IB_SA_PATH_REC_PKEY,
662 SRP_PATH_REC_TIMEOUT_MS,
663 GFP_KERNEL,
664 srp_path_rec_completion,
665 ch, &ch->path_query);
666 if (ch->path_query_id < 0)
667 return ch->path_query_id;
668
669 ret = wait_for_completion_interruptible(&ch->done);
a702adce
BVA
670 if (ret < 0)
671 return ret;
aef9ec39 672
509c07bc 673 if (ch->status < 0)
7aa54bd7
DD
674 shost_printk(KERN_WARNING, target->scsi_host,
675 PFX "Path record query failed\n");
aef9ec39 676
509c07bc 677 return ch->status;
aef9ec39
RD
678}
679
d92c0da7 680static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
aef9ec39 681{
509c07bc 682 struct srp_target_port *target = ch->target;
aef9ec39
RD
683 struct {
684 struct ib_cm_req_param param;
685 struct srp_login_req priv;
686 } *req = NULL;
687 int status;
688
689 req = kzalloc(sizeof *req, GFP_KERNEL);
690 if (!req)
691 return -ENOMEM;
692
509c07bc 693 req->param.primary_path = &ch->path;
aef9ec39
RD
694 req->param.alternate_path = NULL;
695 req->param.service_id = target->service_id;
509c07bc
BVA
696 req->param.qp_num = ch->qp->qp_num;
697 req->param.qp_type = ch->qp->qp_type;
aef9ec39
RD
698 req->param.private_data = &req->priv;
699 req->param.private_data_len = sizeof req->priv;
700 req->param.flow_control = 1;
701
702 get_random_bytes(&req->param.starting_psn, 4);
703 req->param.starting_psn &= 0xffffff;
704
705 /*
706 * Pick some arbitrary defaults here; we could make these
707 * module parameters if anyone cared about setting them.
708 */
709 req->param.responder_resources = 4;
710 req->param.remote_cm_response_timeout = 20;
711 req->param.local_cm_response_timeout = 20;
7bb312e4 712 req->param.retry_count = target->tl_retry_count;
aef9ec39
RD
713 req->param.rnr_retry_count = 7;
714 req->param.max_cm_retries = 15;
715
716 req->priv.opcode = SRP_LOGIN_REQ;
717 req->priv.tag = 0;
49248644 718 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
aef9ec39
RD
719 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
720 SRP_BUF_FORMAT_INDIRECT);
d92c0da7
BVA
721 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
722 SRP_MULTICHAN_SINGLE);
0c0450db 723 /*
3cd96564 724 * In the published SRP specification (draft rev. 16a), the
0c0450db
R
725 * port identifier format is 8 bytes of ID extension followed
726 * by 8 bytes of GUID. Older drafts put the two halves in the
727 * opposite order, so that the GUID comes first.
728 *
729 * Targets conforming to these obsolete drafts can be
730 * recognized by the I/O Class they report.
731 */
732 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
733 memcpy(req->priv.initiator_port_id,
747fe000 734 &target->sgid.global.interface_id, 8);
0c0450db 735 memcpy(req->priv.initiator_port_id + 8,
01cb9bcb 736 &target->initiator_ext, 8);
0c0450db
R
737 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
738 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
739 } else {
740 memcpy(req->priv.initiator_port_id,
01cb9bcb
IR
741 &target->initiator_ext, 8);
742 memcpy(req->priv.initiator_port_id + 8,
747fe000 743 &target->sgid.global.interface_id, 8);
0c0450db
R
744 memcpy(req->priv.target_port_id, &target->id_ext, 8);
745 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
746 }
747
aef9ec39
RD
748 /*
749 * Topspin/Cisco SRP targets will reject our login unless we
01cb9bcb
IR
750 * zero out the first 8 bytes of our initiator port ID and set
751 * the second 8 bytes to the local node GUID.
aef9ec39 752 */
5d7cbfd6 753 if (srp_target_is_topspin(target)) {
7aa54bd7
DD
754 shost_printk(KERN_DEBUG, target->scsi_host,
755 PFX "Topspin/Cisco initiator port ID workaround "
756 "activated for target GUID %016llx\n",
45c37cad 757 be64_to_cpu(target->ioc_guid));
aef9ec39 758 memset(req->priv.initiator_port_id, 0, 8);
01cb9bcb 759 memcpy(req->priv.initiator_port_id + 8,
05321937 760 &target->srp_host->srp_dev->dev->node_guid, 8);
aef9ec39 761 }
aef9ec39 762
509c07bc 763 status = ib_send_cm_req(ch->cm_id, &req->param);
aef9ec39
RD
764
765 kfree(req);
766
767 return status;
768}
769
ef6c49d8
BVA
770static bool srp_queue_remove_work(struct srp_target_port *target)
771{
772 bool changed = false;
773
774 spin_lock_irq(&target->lock);
775 if (target->state != SRP_TARGET_REMOVED) {
776 target->state = SRP_TARGET_REMOVED;
777 changed = true;
778 }
779 spin_unlock_irq(&target->lock);
780
781 if (changed)
bcc05910 782 queue_work(srp_remove_wq, &target->remove_work);
ef6c49d8
BVA
783
784 return changed;
785}
786
aef9ec39
RD
787static void srp_disconnect_target(struct srp_target_port *target)
788{
d92c0da7
BVA
789 struct srp_rdma_ch *ch;
790 int i;
509c07bc 791
c014c8cd 792 /* XXX should send SRP_I_LOGOUT request */
aef9ec39 793
c014c8cd
BVA
794 for (i = 0; i < target->ch_count; i++) {
795 ch = &target->ch[i];
796 ch->connected = false;
797 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
798 shost_printk(KERN_DEBUG, target->scsi_host,
799 PFX "Sending CM DREQ failed\n");
294c875a 800 }
e6581056 801 }
aef9ec39
RD
802}
803
509c07bc
BVA
804static void srp_free_req_data(struct srp_target_port *target,
805 struct srp_rdma_ch *ch)
8f26c9ff 806{
5cfb1782
BVA
807 struct srp_device *dev = target->srp_host->srp_dev;
808 struct ib_device *ibdev = dev->dev;
8f26c9ff
DD
809 struct srp_request *req;
810 int i;
811
47513cf4 812 if (!ch->req_ring)
4d73f95f
BVA
813 return;
814
815 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 816 req = &ch->req_ring[i];
9a21be53 817 if (dev->use_fast_reg) {
5cfb1782 818 kfree(req->fr_list);
9a21be53 819 } else {
5cfb1782 820 kfree(req->fmr_list);
9a21be53
SG
821 kfree(req->map_page);
822 }
c07d424d
DD
823 if (req->indirect_dma_addr) {
824 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
825 target->indirect_size,
826 DMA_TO_DEVICE);
827 }
828 kfree(req->indirect_desc);
8f26c9ff 829 }
4d73f95f 830
509c07bc
BVA
831 kfree(ch->req_ring);
832 ch->req_ring = NULL;
8f26c9ff
DD
833}
834
509c07bc 835static int srp_alloc_req_data(struct srp_rdma_ch *ch)
b81d00bd 836{
509c07bc 837 struct srp_target_port *target = ch->target;
b81d00bd
BVA
838 struct srp_device *srp_dev = target->srp_host->srp_dev;
839 struct ib_device *ibdev = srp_dev->dev;
840 struct srp_request *req;
5cfb1782 841 void *mr_list;
b81d00bd
BVA
842 dma_addr_t dma_addr;
843 int i, ret = -ENOMEM;
844
509c07bc
BVA
845 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
846 GFP_KERNEL);
847 if (!ch->req_ring)
4d73f95f
BVA
848 goto out;
849
850 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 851 req = &ch->req_ring[i];
5cfb1782
BVA
852 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
853 GFP_KERNEL);
854 if (!mr_list)
855 goto out;
9a21be53 856 if (srp_dev->use_fast_reg) {
5cfb1782 857 req->fr_list = mr_list;
9a21be53 858 } else {
5cfb1782 859 req->fmr_list = mr_list;
9a21be53
SG
860 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
861 sizeof(void *), GFP_KERNEL);
862 if (!req->map_page)
863 goto out;
864 }
b81d00bd 865 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
5cfb1782 866 if (!req->indirect_desc)
b81d00bd
BVA
867 goto out;
868
869 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
870 target->indirect_size,
871 DMA_TO_DEVICE);
872 if (ib_dma_mapping_error(ibdev, dma_addr))
873 goto out;
874
875 req->indirect_dma_addr = dma_addr;
b81d00bd
BVA
876 }
877 ret = 0;
878
879out:
880 return ret;
881}
882
683b159a
BVA
883/**
884 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
885 * @shost: SCSI host whose attributes to remove from sysfs.
886 *
887 * Note: Any attributes defined in the host template and that did not exist
888 * before invocation of this function will be ignored.
889 */
890static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
891{
892 struct device_attribute **attr;
893
894 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
895 device_remove_file(&shost->shost_dev, *attr);
896}
897
ee12d6a8
BVA
898static void srp_remove_target(struct srp_target_port *target)
899{
d92c0da7
BVA
900 struct srp_rdma_ch *ch;
901 int i;
509c07bc 902
ef6c49d8
BVA
903 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
904
ee12d6a8 905 srp_del_scsi_host_attr(target->scsi_host);
9dd69a60 906 srp_rport_get(target->rport);
ee12d6a8
BVA
907 srp_remove_host(target->scsi_host);
908 scsi_remove_host(target->scsi_host);
93079162 909 srp_stop_rport_timers(target->rport);
ef6c49d8 910 srp_disconnect_target(target);
d92c0da7
BVA
911 for (i = 0; i < target->ch_count; i++) {
912 ch = &target->ch[i];
913 srp_free_ch_ib(target, ch);
914 }
c1120f89 915 cancel_work_sync(&target->tl_err_work);
9dd69a60 916 srp_rport_put(target->rport);
d92c0da7
BVA
917 for (i = 0; i < target->ch_count; i++) {
918 ch = &target->ch[i];
919 srp_free_req_data(target, ch);
920 }
921 kfree(target->ch);
922 target->ch = NULL;
65d7dd2f
VP
923
924 spin_lock(&target->srp_host->target_lock);
925 list_del(&target->list);
926 spin_unlock(&target->srp_host->target_lock);
927
ee12d6a8
BVA
928 scsi_host_put(target->scsi_host);
929}
930
c4028958 931static void srp_remove_work(struct work_struct *work)
aef9ec39 932{
c4028958 933 struct srp_target_port *target =
ef6c49d8 934 container_of(work, struct srp_target_port, remove_work);
aef9ec39 935
ef6c49d8 936 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
aef9ec39 937
96fc248a 938 srp_remove_target(target);
aef9ec39
RD
939}
940
dc1bdbd9
BVA
941static void srp_rport_delete(struct srp_rport *rport)
942{
943 struct srp_target_port *target = rport->lld_data;
944
945 srp_queue_remove_work(target);
946}
947
c014c8cd
BVA
948/**
949 * srp_connected_ch() - number of connected channels
950 * @target: SRP target port.
951 */
952static int srp_connected_ch(struct srp_target_port *target)
953{
954 int i, c = 0;
955
956 for (i = 0; i < target->ch_count; i++)
957 c += target->ch[i].connected;
958
959 return c;
960}
961
d92c0da7 962static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
aef9ec39 963{
509c07bc 964 struct srp_target_port *target = ch->target;
aef9ec39
RD
965 int ret;
966
c014c8cd 967 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
294c875a 968
509c07bc 969 ret = srp_lookup_path(ch);
aef9ec39 970 if (ret)
4d59ad29 971 goto out;
aef9ec39
RD
972
973 while (1) {
509c07bc 974 init_completion(&ch->done);
d92c0da7 975 ret = srp_send_req(ch, multich);
aef9ec39 976 if (ret)
4d59ad29 977 goto out;
509c07bc 978 ret = wait_for_completion_interruptible(&ch->done);
a702adce 979 if (ret < 0)
4d59ad29 980 goto out;
aef9ec39
RD
981
982 /*
983 * The CM event handling code will set status to
984 * SRP_PORT_REDIRECT if we get a port redirect REJ
985 * back, or SRP_DLID_REDIRECT if we get a lid/qp
986 * redirect REJ back.
987 */
4d59ad29
BVA
988 ret = ch->status;
989 switch (ret) {
aef9ec39 990 case 0:
c014c8cd 991 ch->connected = true;
4d59ad29 992 goto out;
aef9ec39
RD
993
994 case SRP_PORT_REDIRECT:
509c07bc 995 ret = srp_lookup_path(ch);
aef9ec39 996 if (ret)
4d59ad29 997 goto out;
aef9ec39
RD
998 break;
999
1000 case SRP_DLID_REDIRECT:
1001 break;
1002
9fe4bcf4 1003 case SRP_STALE_CONN:
9fe4bcf4 1004 shost_printk(KERN_ERR, target->scsi_host, PFX
205619f2 1005 "giving up on stale connection\n");
4d59ad29
BVA
1006 ret = -ECONNRESET;
1007 goto out;
9fe4bcf4 1008
aef9ec39 1009 default:
4d59ad29 1010 goto out;
aef9ec39
RD
1011 }
1012 }
4d59ad29
BVA
1013
1014out:
1015 return ret <= 0 ? ret : -ENODEV;
aef9ec39
RD
1016}
1017
1dc7b1f1
CH
1018static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1019{
1020 srp_handle_qp_err(cq, wc, "INV RKEY");
1021}
1022
1023static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1024 u32 rkey)
5cfb1782
BVA
1025{
1026 struct ib_send_wr *bad_wr;
1027 struct ib_send_wr wr = {
1028 .opcode = IB_WR_LOCAL_INV,
5cfb1782
BVA
1029 .next = NULL,
1030 .num_sge = 0,
1031 .send_flags = 0,
1032 .ex.invalidate_rkey = rkey,
1033 };
1034
1dc7b1f1
CH
1035 wr.wr_cqe = &req->reg_cqe;
1036 req->reg_cqe.done = srp_inv_rkey_err_done;
509c07bc 1037 return ib_post_send(ch->qp, &wr, &bad_wr);
5cfb1782
BVA
1038}
1039
d945e1df 1040static void srp_unmap_data(struct scsi_cmnd *scmnd,
509c07bc 1041 struct srp_rdma_ch *ch,
d945e1df
RD
1042 struct srp_request *req)
1043{
509c07bc 1044 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1045 struct srp_device *dev = target->srp_host->srp_dev;
1046 struct ib_device *ibdev = dev->dev;
1047 int i, res;
8f26c9ff 1048
bb350d1d 1049 if (!scsi_sglist(scmnd) ||
d945e1df
RD
1050 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1051 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1052 return;
1053
5cfb1782
BVA
1054 if (dev->use_fast_reg) {
1055 struct srp_fr_desc **pfr;
1056
1057 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1dc7b1f1 1058 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
5cfb1782
BVA
1059 if (res < 0) {
1060 shost_printk(KERN_ERR, target->scsi_host, PFX
1061 "Queueing INV WR for rkey %#x failed (%d)\n",
1062 (*pfr)->mr->rkey, res);
1063 queue_work(system_long_wq,
1064 &target->tl_err_work);
1065 }
1066 }
1067 if (req->nmdesc)
509c07bc 1068 srp_fr_pool_put(ch->fr_pool, req->fr_list,
5cfb1782 1069 req->nmdesc);
002f1567 1070 } else if (dev->use_fmr) {
5cfb1782
BVA
1071 struct ib_pool_fmr **pfmr;
1072
1073 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1074 ib_fmr_pool_unmap(*pfmr);
1075 }
f5358a17 1076
8f26c9ff
DD
1077 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1078 scmnd->sc_data_direction);
d945e1df
RD
1079}
1080
22032991
BVA
1081/**
1082 * srp_claim_req - Take ownership of the scmnd associated with a request.
509c07bc 1083 * @ch: SRP RDMA channel.
22032991 1084 * @req: SRP request.
b3fe628d 1085 * @sdev: If not NULL, only take ownership for this SCSI device.
22032991
BVA
1086 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1087 * ownership of @req->scmnd if it equals @scmnd.
1088 *
1089 * Return value:
1090 * Either NULL or a pointer to the SCSI command the caller became owner of.
1091 */
509c07bc 1092static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
22032991 1093 struct srp_request *req,
b3fe628d 1094 struct scsi_device *sdev,
22032991
BVA
1095 struct scsi_cmnd *scmnd)
1096{
1097 unsigned long flags;
1098
509c07bc 1099 spin_lock_irqsave(&ch->lock, flags);
b3fe628d
BVA
1100 if (req->scmnd &&
1101 (!sdev || req->scmnd->device == sdev) &&
1102 (!scmnd || req->scmnd == scmnd)) {
22032991
BVA
1103 scmnd = req->scmnd;
1104 req->scmnd = NULL;
22032991
BVA
1105 } else {
1106 scmnd = NULL;
1107 }
509c07bc 1108 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1109
1110 return scmnd;
1111}
1112
1113/**
6ec2ba02 1114 * srp_free_req() - Unmap data and adjust ch->req_lim.
509c07bc 1115 * @ch: SRP RDMA channel.
af24663b
BVA
1116 * @req: Request to be freed.
1117 * @scmnd: SCSI command associated with @req.
1118 * @req_lim_delta: Amount to be added to @target->req_lim.
22032991 1119 */
509c07bc
BVA
1120static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1121 struct scsi_cmnd *scmnd, s32 req_lim_delta)
526b4caa 1122{
94a9174c
BVA
1123 unsigned long flags;
1124
509c07bc 1125 srp_unmap_data(scmnd, ch, req);
22032991 1126
509c07bc
BVA
1127 spin_lock_irqsave(&ch->lock, flags);
1128 ch->req_lim += req_lim_delta;
509c07bc 1129 spin_unlock_irqrestore(&ch->lock, flags);
526b4caa
IR
1130}
1131
509c07bc
BVA
1132static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1133 struct scsi_device *sdev, int result)
526b4caa 1134{
509c07bc 1135 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
22032991
BVA
1136
1137 if (scmnd) {
509c07bc 1138 srp_free_req(ch, req, scmnd, 0);
ed9b2264 1139 scmnd->result = result;
22032991 1140 scmnd->scsi_done(scmnd);
22032991 1141 }
526b4caa
IR
1142}
1143
ed9b2264 1144static void srp_terminate_io(struct srp_rport *rport)
aef9ec39 1145{
ed9b2264 1146 struct srp_target_port *target = rport->lld_data;
d92c0da7 1147 struct srp_rdma_ch *ch;
b3fe628d
BVA
1148 struct Scsi_Host *shost = target->scsi_host;
1149 struct scsi_device *sdev;
d92c0da7 1150 int i, j;
ed9b2264 1151
b3fe628d
BVA
1152 /*
1153 * Invoking srp_terminate_io() while srp_queuecommand() is running
1154 * is not safe. Hence the warning statement below.
1155 */
1156 shost_for_each_device(sdev, shost)
1157 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1158
d92c0da7
BVA
1159 for (i = 0; i < target->ch_count; i++) {
1160 ch = &target->ch[i];
509c07bc 1161
d92c0da7
BVA
1162 for (j = 0; j < target->req_ring_size; ++j) {
1163 struct srp_request *req = &ch->req_ring[j];
1164
1165 srp_finish_req(ch, req, NULL,
1166 DID_TRANSPORT_FAILFAST << 16);
1167 }
ed9b2264
BVA
1168 }
1169}
aef9ec39 1170
ed9b2264
BVA
1171/*
1172 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1173 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1174 * srp_reset_device() or srp_reset_host() calls will occur while this function
1175 * is in progress. One way to realize that is not to call this function
1176 * directly but to call srp_reconnect_rport() instead since that last function
1177 * serializes calls of this function via rport->mutex and also blocks
1178 * srp_queuecommand() calls before invoking this function.
1179 */
1180static int srp_rport_reconnect(struct srp_rport *rport)
1181{
1182 struct srp_target_port *target = rport->lld_data;
d92c0da7
BVA
1183 struct srp_rdma_ch *ch;
1184 int i, j, ret = 0;
1185 bool multich = false;
09be70a2 1186
aef9ec39 1187 srp_disconnect_target(target);
34aa654e
BVA
1188
1189 if (target->state == SRP_TARGET_SCANNING)
1190 return -ENODEV;
1191
aef9ec39 1192 /*
c7c4e7ff
BVA
1193 * Now get a new local CM ID so that we avoid confusing the target in
1194 * case things are really fouled up. Doing so also ensures that all CM
1195 * callbacks will have finished before a new QP is allocated.
aef9ec39 1196 */
d92c0da7
BVA
1197 for (i = 0; i < target->ch_count; i++) {
1198 ch = &target->ch[i];
d92c0da7 1199 ret += srp_new_cm_id(ch);
536ae14e 1200 }
d92c0da7
BVA
1201 for (i = 0; i < target->ch_count; i++) {
1202 ch = &target->ch[i];
d92c0da7
BVA
1203 for (j = 0; j < target->req_ring_size; ++j) {
1204 struct srp_request *req = &ch->req_ring[j];
aef9ec39 1205
d92c0da7
BVA
1206 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1207 }
1208 }
1209 for (i = 0; i < target->ch_count; i++) {
1210 ch = &target->ch[i];
d92c0da7
BVA
1211 /*
1212 * Whether or not creating a new CM ID succeeded, create a new
1213 * QP. This guarantees that all completion callback function
1214 * invocations have finished before request resetting starts.
1215 */
1216 ret += srp_create_ch_ib(ch);
aef9ec39 1217
d92c0da7
BVA
1218 INIT_LIST_HEAD(&ch->free_tx);
1219 for (j = 0; j < target->queue_size; ++j)
1220 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1221 }
8de9fe3a
BVA
1222
1223 target->qp_in_error = false;
1224
d92c0da7
BVA
1225 for (i = 0; i < target->ch_count; i++) {
1226 ch = &target->ch[i];
bbac5ccf 1227 if (ret)
d92c0da7 1228 break;
d92c0da7
BVA
1229 ret = srp_connect_ch(ch, multich);
1230 multich = true;
1231 }
09be70a2 1232
ed9b2264
BVA
1233 if (ret == 0)
1234 shost_printk(KERN_INFO, target->scsi_host,
1235 PFX "reconnect succeeded\n");
aef9ec39
RD
1236
1237 return ret;
1238}
1239
8f26c9ff
DD
1240static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1241 unsigned int dma_len, u32 rkey)
f5358a17 1242{
8f26c9ff 1243 struct srp_direct_buf *desc = state->desc;
f5358a17 1244
3ae95da8
BVA
1245 WARN_ON_ONCE(!dma_len);
1246
8f26c9ff
DD
1247 desc->va = cpu_to_be64(dma_addr);
1248 desc->key = cpu_to_be32(rkey);
1249 desc->len = cpu_to_be32(dma_len);
f5358a17 1250
8f26c9ff
DD
1251 state->total_len += dma_len;
1252 state->desc++;
1253 state->ndesc++;
1254}
559ce8f1 1255
8f26c9ff 1256static int srp_map_finish_fmr(struct srp_map_state *state,
509c07bc 1257 struct srp_rdma_ch *ch)
8f26c9ff 1258{
186fbc66
BVA
1259 struct srp_target_port *target = ch->target;
1260 struct srp_device *dev = target->srp_host->srp_dev;
8f26c9ff
DD
1261 struct ib_pool_fmr *fmr;
1262 u64 io_addr = 0;
85507bcc 1263
f731ed62
BVA
1264 if (state->fmr.next >= state->fmr.end)
1265 return -ENOMEM;
1266
26630e8a
SG
1267 WARN_ON_ONCE(!dev->use_fmr);
1268
1269 if (state->npages == 0)
1270 return 0;
1271
1272 if (state->npages == 1 && target->global_mr) {
1273 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1274 target->global_mr->rkey);
1275 goto reset_state;
1276 }
1277
509c07bc 1278 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
8f26c9ff
DD
1279 state->npages, io_addr);
1280 if (IS_ERR(fmr))
1281 return PTR_ERR(fmr);
f5358a17 1282
f731ed62 1283 *state->fmr.next++ = fmr;
52ede08f 1284 state->nmdesc++;
f5358a17 1285
186fbc66
BVA
1286 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1287 state->dma_len, fmr->fmr->rkey);
539dde6f 1288
26630e8a
SG
1289reset_state:
1290 state->npages = 0;
1291 state->dma_len = 0;
1292
8f26c9ff
DD
1293 return 0;
1294}
1295
1dc7b1f1
CH
1296static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1297{
1298 srp_handle_qp_err(cq, wc, "FAST REG");
1299}
1300
5cfb1782 1301static int srp_map_finish_fr(struct srp_map_state *state,
1dc7b1f1 1302 struct srp_request *req,
57b0be9c 1303 struct srp_rdma_ch *ch, int sg_nents)
5cfb1782 1304{
509c07bc 1305 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1306 struct srp_device *dev = target->srp_host->srp_dev;
1307 struct ib_send_wr *bad_wr;
f7f7aab1 1308 struct ib_reg_wr wr;
5cfb1782
BVA
1309 struct srp_fr_desc *desc;
1310 u32 rkey;
f7f7aab1 1311 int n, err;
5cfb1782 1312
f731ed62
BVA
1313 if (state->fr.next >= state->fr.end)
1314 return -ENOMEM;
1315
26630e8a
SG
1316 WARN_ON_ONCE(!dev->use_fast_reg);
1317
57b0be9c 1318 if (sg_nents == 1 && target->global_mr) {
f7f7aab1
SG
1319 srp_map_desc(state, sg_dma_address(state->sg),
1320 sg_dma_len(state->sg),
26630e8a 1321 target->global_mr->rkey);
f7f7aab1 1322 return 1;
26630e8a
SG
1323 }
1324
509c07bc 1325 desc = srp_fr_pool_get(ch->fr_pool);
5cfb1782
BVA
1326 if (!desc)
1327 return -ENOMEM;
1328
1329 rkey = ib_inc_rkey(desc->mr->rkey);
1330 ib_update_fast_reg_key(desc->mr, rkey);
1331
9aa8b321 1332 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, NULL, dev->mr_page_size);
9d8e7d0d
BVA
1333 if (unlikely(n < 0)) {
1334 srp_fr_pool_put(ch->fr_pool, &desc, 1);
1335 pr_debug("%s: ib_map_mr_sg(%d) returned %d.\n",
1336 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1337 n);
f7f7aab1 1338 return n;
9d8e7d0d 1339 }
5cfb1782 1340
1dc7b1f1
CH
1341 req->reg_cqe.done = srp_reg_mr_err_done;
1342
f7f7aab1
SG
1343 wr.wr.next = NULL;
1344 wr.wr.opcode = IB_WR_REG_MR;
1dc7b1f1 1345 wr.wr.wr_cqe = &req->reg_cqe;
f7f7aab1
SG
1346 wr.wr.num_sge = 0;
1347 wr.wr.send_flags = 0;
1348 wr.mr = desc->mr;
1349 wr.key = desc->mr->rkey;
1350 wr.access = (IB_ACCESS_LOCAL_WRITE |
1351 IB_ACCESS_REMOTE_READ |
1352 IB_ACCESS_REMOTE_WRITE);
5cfb1782 1353
f731ed62 1354 *state->fr.next++ = desc;
5cfb1782
BVA
1355 state->nmdesc++;
1356
f7f7aab1
SG
1357 srp_map_desc(state, desc->mr->iova,
1358 desc->mr->length, desc->mr->rkey);
5cfb1782 1359
26630e8a 1360 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
f7f7aab1 1361 if (unlikely(err))
26630e8a
SG
1362 return err;
1363
f7f7aab1 1364 return n;
5cfb1782
BVA
1365}
1366
8f26c9ff 1367static int srp_map_sg_entry(struct srp_map_state *state,
509c07bc 1368 struct srp_rdma_ch *ch,
3ae95da8 1369 struct scatterlist *sg, int sg_index)
8f26c9ff 1370{
509c07bc 1371 struct srp_target_port *target = ch->target;
8f26c9ff
DD
1372 struct srp_device *dev = target->srp_host->srp_dev;
1373 struct ib_device *ibdev = dev->dev;
1374 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1375 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
3ae95da8 1376 unsigned int len = 0;
8f26c9ff
DD
1377 int ret;
1378
3ae95da8 1379 WARN_ON_ONCE(!dma_len);
f5358a17 1380
8f26c9ff 1381 while (dma_len) {
5cfb1782
BVA
1382 unsigned offset = dma_addr & ~dev->mr_page_mask;
1383 if (state->npages == dev->max_pages_per_mr || offset != 0) {
f7f7aab1 1384 ret = srp_map_finish_fmr(state, ch);
8f26c9ff
DD
1385 if (ret)
1386 return ret;
8f26c9ff
DD
1387 }
1388
5cfb1782 1389 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
f5358a17 1390
8f26c9ff
DD
1391 if (!state->npages)
1392 state->base_dma_addr = dma_addr;
5cfb1782 1393 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
52ede08f 1394 state->dma_len += len;
8f26c9ff
DD
1395 dma_addr += len;
1396 dma_len -= len;
1397 }
1398
5cfb1782
BVA
1399 /*
1400 * If the last entry of the MR wasn't a full page, then we need to
8f26c9ff 1401 * close it out and start a new one -- we can only merge at page
1d3d98c4 1402 * boundaries.
8f26c9ff
DD
1403 */
1404 ret = 0;
0e0d3a48 1405 if (len != dev->mr_page_size)
f7f7aab1 1406 ret = srp_map_finish_fmr(state, ch);
f5358a17
RD
1407 return ret;
1408}
1409
26630e8a
SG
1410static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1411 struct srp_request *req, struct scatterlist *scat,
1412 int count)
76bc1e1d 1413{
76bc1e1d 1414 struct scatterlist *sg;
0e0d3a48 1415 int i, ret;
76bc1e1d 1416
26630e8a
SG
1417 state->pages = req->map_page;
1418 state->fmr.next = req->fmr_list;
1419 state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
1420
1421 for_each_sg(scat, sg, count, i) {
1422 ret = srp_map_sg_entry(state, ch, sg, i);
1423 if (ret)
1424 return ret;
5cfb1782 1425 }
76bc1e1d 1426
f7f7aab1 1427 ret = srp_map_finish_fmr(state, ch);
26630e8a
SG
1428 if (ret)
1429 return ret;
1430
26630e8a
SG
1431 return 0;
1432}
1433
1434static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1435 struct srp_request *req, struct scatterlist *scat,
1436 int count)
1437{
26630e8a 1438 state->desc = req->indirect_desc;
f7f7aab1
SG
1439 state->fr.next = req->fr_list;
1440 state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1441 state->sg = scat;
26630e8a 1442
3b59b7a6
BVA
1443 if (count == 0)
1444 return 0;
1445
57b0be9c 1446 while (count) {
f7f7aab1 1447 int i, n;
26630e8a 1448
c6333f9f 1449 n = srp_map_finish_fr(state, req, ch, count);
f7f7aab1
SG
1450 if (unlikely(n < 0))
1451 return n;
1452
57b0be9c 1453 count -= n;
f7f7aab1
SG
1454 for (i = 0; i < n; i++)
1455 state->sg = sg_next(state->sg);
1456 }
26630e8a 1457
26630e8a
SG
1458 return 0;
1459}
1460
1461static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1462 struct srp_request *req, struct scatterlist *scat,
1463 int count)
1464{
1465 struct srp_target_port *target = ch->target;
1466 struct srp_device *dev = target->srp_host->srp_dev;
1467 struct scatterlist *sg;
1468 int i;
1469
1470 state->desc = req->indirect_desc;
1471 for_each_sg(scat, sg, count, i) {
1472 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1473 ib_sg_dma_len(dev->dev, sg),
1474 target->global_mr->rkey);
0e0d3a48 1475 }
76bc1e1d 1476
26630e8a 1477 return 0;
76bc1e1d
BVA
1478}
1479
330179f2
BVA
1480/*
1481 * Register the indirect data buffer descriptor with the HCA.
1482 *
1483 * Note: since the indirect data buffer descriptor has been allocated with
1484 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1485 * memory buffer.
1486 */
1487static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1488 void **next_mr, void **end_mr, u32 idb_len,
1489 __be32 *idb_rkey)
1490{
1491 struct srp_target_port *target = ch->target;
1492 struct srp_device *dev = target->srp_host->srp_dev;
1493 struct srp_map_state state;
1494 struct srp_direct_buf idb_desc;
1495 u64 idb_pages[1];
f7f7aab1 1496 struct scatterlist idb_sg[1];
330179f2
BVA
1497 int ret;
1498
1499 memset(&state, 0, sizeof(state));
1500 memset(&idb_desc, 0, sizeof(idb_desc));
1501 state.gen.next = next_mr;
1502 state.gen.end = end_mr;
1503 state.desc = &idb_desc;
330179f2
BVA
1504 state.base_dma_addr = req->indirect_dma_addr;
1505 state.dma_len = idb_len;
f7f7aab1
SG
1506
1507 if (dev->use_fast_reg) {
1508 state.sg = idb_sg;
f7f7aab1
SG
1509 sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1510 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
fc925518
CH
1511#ifdef CONFIG_NEED_SG_DMA_LENGTH
1512 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1513#endif
c6333f9f 1514 ret = srp_map_finish_fr(&state, req, ch, 1);
f7f7aab1
SG
1515 if (ret < 0)
1516 return ret;
1517 } else if (dev->use_fmr) {
1518 state.pages = idb_pages;
1519 state.pages[0] = (req->indirect_dma_addr &
1520 dev->mr_page_mask);
1521 state.npages = 1;
1522 ret = srp_map_finish_fmr(&state, ch);
1523 if (ret < 0)
1524 return ret;
1525 } else {
1526 return -EINVAL;
1527 }
330179f2
BVA
1528
1529 *idb_rkey = idb_desc.key;
1530
f7f7aab1 1531 return 0;
330179f2
BVA
1532}
1533
77269cdf
BVA
1534/**
1535 * srp_map_data() - map SCSI data buffer onto an SRP request
1536 * @scmnd: SCSI command to map
1537 * @ch: SRP RDMA channel
1538 * @req: SRP request
1539 *
1540 * Returns the length in bytes of the SRP_CMD IU or a negative value if
1541 * mapping failed.
1542 */
509c07bc 1543static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
aef9ec39
RD
1544 struct srp_request *req)
1545{
509c07bc 1546 struct srp_target_port *target = ch->target;
76bc1e1d 1547 struct scatterlist *scat;
aef9ec39 1548 struct srp_cmd *cmd = req->cmd->buf;
330179f2 1549 int len, nents, count, ret;
85507bcc
RC
1550 struct srp_device *dev;
1551 struct ib_device *ibdev;
8f26c9ff
DD
1552 struct srp_map_state state;
1553 struct srp_indirect_buf *indirect_hdr;
330179f2
BVA
1554 u32 idb_len, table_len;
1555 __be32 idb_rkey;
8f26c9ff 1556 u8 fmt;
aef9ec39 1557
bb350d1d 1558 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
aef9ec39
RD
1559 return sizeof (struct srp_cmd);
1560
1561 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1562 scmnd->sc_data_direction != DMA_TO_DEVICE) {
7aa54bd7
DD
1563 shost_printk(KERN_WARNING, target->scsi_host,
1564 PFX "Unhandled data direction %d\n",
1565 scmnd->sc_data_direction);
aef9ec39
RD
1566 return -EINVAL;
1567 }
1568
bb350d1d
FT
1569 nents = scsi_sg_count(scmnd);
1570 scat = scsi_sglist(scmnd);
aef9ec39 1571
05321937 1572 dev = target->srp_host->srp_dev;
85507bcc
RC
1573 ibdev = dev->dev;
1574
1575 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
8f26c9ff
DD
1576 if (unlikely(count == 0))
1577 return -EIO;
f5358a17
RD
1578
1579 fmt = SRP_DATA_DESC_DIRECT;
1580 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
aef9ec39 1581
03f6fb93 1582 if (count == 1 && target->global_mr) {
f5358a17
RD
1583 /*
1584 * The midlayer only generated a single gather/scatter
1585 * entry, or DMA mapping coalesced everything to a
1586 * single entry. So a direct descriptor along with
1587 * the DMA MR suffices.
1588 */
cf368713 1589 struct srp_direct_buf *buf = (void *) cmd->add_data;
aef9ec39 1590
85507bcc 1591 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
03f6fb93 1592 buf->key = cpu_to_be32(target->global_mr->rkey);
85507bcc 1593 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
8f26c9ff 1594
52ede08f 1595 req->nmdesc = 0;
8f26c9ff
DD
1596 goto map_complete;
1597 }
1598
5cfb1782
BVA
1599 /*
1600 * We have more than one scatter/gather entry, so build our indirect
1601 * descriptor table, trying to merge as many entries as we can.
8f26c9ff
DD
1602 */
1603 indirect_hdr = (void *) cmd->add_data;
1604
c07d424d
DD
1605 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1606 target->indirect_size, DMA_TO_DEVICE);
1607
8f26c9ff 1608 memset(&state, 0, sizeof(state));
26630e8a 1609 if (dev->use_fast_reg)
e012f363 1610 ret = srp_map_sg_fr(&state, ch, req, scat, count);
26630e8a 1611 else if (dev->use_fmr)
e012f363 1612 ret = srp_map_sg_fmr(&state, ch, req, scat, count);
26630e8a 1613 else
e012f363
BVA
1614 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1615 req->nmdesc = state.nmdesc;
1616 if (ret < 0)
1617 goto unmap;
cf368713 1618
c07d424d
DD
1619 /* We've mapped the request, now pull as much of the indirect
1620 * descriptor table as we can into the command buffer. If this
1621 * target is not using an external indirect table, we are
1622 * guaranteed to fit into the command, as the SCSI layer won't
1623 * give us more S/G entries than we allow.
8f26c9ff 1624 */
8f26c9ff 1625 if (state.ndesc == 1) {
5cfb1782
BVA
1626 /*
1627 * Memory registration collapsed the sg-list into one entry,
8f26c9ff
DD
1628 * so use a direct descriptor.
1629 */
1630 struct srp_direct_buf *buf = (void *) cmd->add_data;
cf368713 1631
c07d424d 1632 *buf = req->indirect_desc[0];
8f26c9ff 1633 goto map_complete;
aef9ec39
RD
1634 }
1635
c07d424d
DD
1636 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1637 !target->allow_ext_sg)) {
1638 shost_printk(KERN_ERR, target->scsi_host,
1639 "Could not fit S/G list into SRP_CMD\n");
e012f363
BVA
1640 ret = -EIO;
1641 goto unmap;
c07d424d
DD
1642 }
1643
1644 count = min(state.ndesc, target->cmd_sg_cnt);
8f26c9ff 1645 table_len = state.ndesc * sizeof (struct srp_direct_buf);
330179f2 1646 idb_len = sizeof(struct srp_indirect_buf) + table_len;
8f26c9ff
DD
1647
1648 fmt = SRP_DATA_DESC_INDIRECT;
1649 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
c07d424d 1650 len += count * sizeof (struct srp_direct_buf);
8f26c9ff 1651
c07d424d
DD
1652 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1653 count * sizeof (struct srp_direct_buf));
8f26c9ff 1654
03f6fb93 1655 if (!target->global_mr) {
330179f2
BVA
1656 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1657 idb_len, &idb_rkey);
1658 if (ret < 0)
e012f363 1659 goto unmap;
330179f2
BVA
1660 req->nmdesc++;
1661 } else {
a745f4f4 1662 idb_rkey = cpu_to_be32(target->global_mr->rkey);
330179f2
BVA
1663 }
1664
c07d424d 1665 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
330179f2 1666 indirect_hdr->table_desc.key = idb_rkey;
8f26c9ff
DD
1667 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1668 indirect_hdr->len = cpu_to_be32(state.total_len);
1669
1670 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
c07d424d 1671 cmd->data_out_desc_cnt = count;
8f26c9ff 1672 else
c07d424d
DD
1673 cmd->data_in_desc_cnt = count;
1674
1675 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1676 DMA_TO_DEVICE);
8f26c9ff
DD
1677
1678map_complete:
aef9ec39
RD
1679 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1680 cmd->buf_fmt = fmt << 4;
1681 else
1682 cmd->buf_fmt = fmt;
1683
aef9ec39 1684 return len;
e012f363
BVA
1685
1686unmap:
1687 srp_unmap_data(scmnd, ch, req);
ffc548bb
BVA
1688 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1689 ret = -E2BIG;
e012f363 1690 return ret;
aef9ec39
RD
1691}
1692
76c75b25
BVA
1693/*
1694 * Return an IU and possible credit to the free pool
1695 */
509c07bc 1696static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
76c75b25
BVA
1697 enum srp_iu_type iu_type)
1698{
1699 unsigned long flags;
1700
509c07bc
BVA
1701 spin_lock_irqsave(&ch->lock, flags);
1702 list_add(&iu->list, &ch->free_tx);
76c75b25 1703 if (iu_type != SRP_IU_RSP)
509c07bc
BVA
1704 ++ch->req_lim;
1705 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25
BVA
1706}
1707
05a1d750 1708/*
509c07bc 1709 * Must be called with ch->lock held to protect req_lim and free_tx.
e9684678 1710 * If IU is not sent, it must be returned using srp_put_tx_iu().
05a1d750
DD
1711 *
1712 * Note:
1713 * An upper limit for the number of allocated information units for each
1714 * request type is:
1715 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1716 * more than Scsi_Host.can_queue requests.
1717 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1718 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1719 * one unanswered SRP request to an initiator.
1720 */
509c07bc 1721static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
05a1d750
DD
1722 enum srp_iu_type iu_type)
1723{
509c07bc 1724 struct srp_target_port *target = ch->target;
05a1d750
DD
1725 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1726 struct srp_iu *iu;
1727
1dc7b1f1 1728 ib_process_cq_direct(ch->send_cq, -1);
05a1d750 1729
509c07bc 1730 if (list_empty(&ch->free_tx))
05a1d750
DD
1731 return NULL;
1732
1733 /* Initiator responses to target requests do not consume credits */
76c75b25 1734 if (iu_type != SRP_IU_RSP) {
509c07bc 1735 if (ch->req_lim <= rsv) {
76c75b25
BVA
1736 ++target->zero_req_lim;
1737 return NULL;
1738 }
1739
509c07bc 1740 --ch->req_lim;
05a1d750
DD
1741 }
1742
509c07bc 1743 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
76c75b25 1744 list_del(&iu->list);
05a1d750
DD
1745 return iu;
1746}
1747
1dc7b1f1
CH
1748static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1749{
1750 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1751 struct srp_rdma_ch *ch = cq->cq_context;
1752
1753 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1754 srp_handle_qp_err(cq, wc, "SEND");
1755 return;
1756 }
1757
1758 list_add(&iu->list, &ch->free_tx);
1759}
1760
509c07bc 1761static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
05a1d750 1762{
509c07bc 1763 struct srp_target_port *target = ch->target;
05a1d750
DD
1764 struct ib_sge list;
1765 struct ib_send_wr wr, *bad_wr;
05a1d750
DD
1766
1767 list.addr = iu->dma;
1768 list.length = len;
9af76271 1769 list.lkey = target->lkey;
05a1d750 1770
1dc7b1f1
CH
1771 iu->cqe.done = srp_send_done;
1772
05a1d750 1773 wr.next = NULL;
1dc7b1f1 1774 wr.wr_cqe = &iu->cqe;
05a1d750
DD
1775 wr.sg_list = &list;
1776 wr.num_sge = 1;
1777 wr.opcode = IB_WR_SEND;
1778 wr.send_flags = IB_SEND_SIGNALED;
1779
509c07bc 1780 return ib_post_send(ch->qp, &wr, &bad_wr);
05a1d750
DD
1781}
1782
509c07bc 1783static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
c996bb47 1784{
509c07bc 1785 struct srp_target_port *target = ch->target;
c996bb47 1786 struct ib_recv_wr wr, *bad_wr;
dcb4cb85 1787 struct ib_sge list;
c996bb47
BVA
1788
1789 list.addr = iu->dma;
1790 list.length = iu->size;
9af76271 1791 list.lkey = target->lkey;
c996bb47 1792
1dc7b1f1
CH
1793 iu->cqe.done = srp_recv_done;
1794
c996bb47 1795 wr.next = NULL;
1dc7b1f1 1796 wr.wr_cqe = &iu->cqe;
c996bb47
BVA
1797 wr.sg_list = &list;
1798 wr.num_sge = 1;
1799
509c07bc 1800 return ib_post_recv(ch->qp, &wr, &bad_wr);
c996bb47
BVA
1801}
1802
509c07bc 1803static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
aef9ec39 1804{
509c07bc 1805 struct srp_target_port *target = ch->target;
aef9ec39
RD
1806 struct srp_request *req;
1807 struct scsi_cmnd *scmnd;
1808 unsigned long flags;
aef9ec39 1809
aef9ec39 1810 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
509c07bc
BVA
1811 spin_lock_irqsave(&ch->lock, flags);
1812 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1813 spin_unlock_irqrestore(&ch->lock, flags);
94a9174c 1814
509c07bc 1815 ch->tsk_mgmt_status = -1;
f8b6e31e 1816 if (be32_to_cpu(rsp->resp_data_len) >= 4)
509c07bc
BVA
1817 ch->tsk_mgmt_status = rsp->data[3];
1818 complete(&ch->tsk_mgmt_done);
aef9ec39 1819 } else {
77f2c1a4
BVA
1820 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1821 if (scmnd) {
1822 req = (void *)scmnd->host_scribble;
1823 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1824 }
22032991 1825 if (!scmnd) {
7aa54bd7 1826 shost_printk(KERN_ERR, target->scsi_host,
d92c0da7
BVA
1827 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1828 rsp->tag, ch - target->ch, ch->qp->qp_num);
22032991 1829
509c07bc
BVA
1830 spin_lock_irqsave(&ch->lock, flags);
1831 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1832 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1833
1834 return;
1835 }
aef9ec39
RD
1836 scmnd->result = rsp->status;
1837
1838 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1839 memcpy(scmnd->sense_buffer, rsp->data +
1840 be32_to_cpu(rsp->resp_data_len),
1841 min_t(int, be32_to_cpu(rsp->sense_data_len),
1842 SCSI_SENSE_BUFFERSIZE));
1843 }
1844
e714531a 1845 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
bb350d1d 1846 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
e714531a
BVA
1847 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1848 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1849 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1850 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1851 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1852 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
aef9ec39 1853
509c07bc 1854 srp_free_req(ch, req, scmnd,
22032991
BVA
1855 be32_to_cpu(rsp->req_lim_delta));
1856
f8b6e31e
DD
1857 scmnd->host_scribble = NULL;
1858 scmnd->scsi_done(scmnd);
aef9ec39 1859 }
aef9ec39
RD
1860}
1861
509c07bc 1862static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
bb12588a
DD
1863 void *rsp, int len)
1864{
509c07bc 1865 struct srp_target_port *target = ch->target;
76c75b25 1866 struct ib_device *dev = target->srp_host->srp_dev->dev;
bb12588a
DD
1867 unsigned long flags;
1868 struct srp_iu *iu;
76c75b25 1869 int err;
bb12588a 1870
509c07bc
BVA
1871 spin_lock_irqsave(&ch->lock, flags);
1872 ch->req_lim += req_delta;
1873 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1874 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25 1875
bb12588a
DD
1876 if (!iu) {
1877 shost_printk(KERN_ERR, target->scsi_host, PFX
1878 "no IU available to send response\n");
76c75b25 1879 return 1;
bb12588a
DD
1880 }
1881
1882 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1883 memcpy(iu->buf, rsp, len);
1884 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1885
509c07bc 1886 err = srp_post_send(ch, iu, len);
76c75b25 1887 if (err) {
bb12588a
DD
1888 shost_printk(KERN_ERR, target->scsi_host, PFX
1889 "unable to post response: %d\n", err);
509c07bc 1890 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
76c75b25 1891 }
bb12588a 1892
bb12588a
DD
1893 return err;
1894}
1895
509c07bc 1896static void srp_process_cred_req(struct srp_rdma_ch *ch,
bb12588a
DD
1897 struct srp_cred_req *req)
1898{
1899 struct srp_cred_rsp rsp = {
1900 .opcode = SRP_CRED_RSP,
1901 .tag = req->tag,
1902 };
1903 s32 delta = be32_to_cpu(req->req_lim_delta);
1904
509c07bc
BVA
1905 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1906 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
bb12588a
DD
1907 "problems processing SRP_CRED_REQ\n");
1908}
1909
509c07bc 1910static void srp_process_aer_req(struct srp_rdma_ch *ch,
bb12588a
DD
1911 struct srp_aer_req *req)
1912{
509c07bc 1913 struct srp_target_port *target = ch->target;
bb12588a
DD
1914 struct srp_aer_rsp rsp = {
1915 .opcode = SRP_AER_RSP,
1916 .tag = req->tag,
1917 };
1918 s32 delta = be32_to_cpu(req->req_lim_delta);
1919
1920 shost_printk(KERN_ERR, target->scsi_host, PFX
985aa495 1921 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
bb12588a 1922
509c07bc 1923 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
bb12588a
DD
1924 shost_printk(KERN_ERR, target->scsi_host, PFX
1925 "problems processing SRP_AER_REQ\n");
1926}
1927
1dc7b1f1 1928static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
aef9ec39 1929{
1dc7b1f1
CH
1930 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1931 struct srp_rdma_ch *ch = cq->cq_context;
509c07bc 1932 struct srp_target_port *target = ch->target;
dcb4cb85 1933 struct ib_device *dev = target->srp_host->srp_dev->dev;
c996bb47 1934 int res;
aef9ec39
RD
1935 u8 opcode;
1936
1dc7b1f1
CH
1937 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1938 srp_handle_qp_err(cq, wc, "RECV");
1939 return;
1940 }
1941
509c07bc 1942 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 1943 DMA_FROM_DEVICE);
aef9ec39
RD
1944
1945 opcode = *(u8 *) iu->buf;
1946
1947 if (0) {
7aa54bd7
DD
1948 shost_printk(KERN_ERR, target->scsi_host,
1949 PFX "recv completion, opcode 0x%02x\n", opcode);
7a700811
BVA
1950 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1951 iu->buf, wc->byte_len, true);
aef9ec39
RD
1952 }
1953
1954 switch (opcode) {
1955 case SRP_RSP:
509c07bc 1956 srp_process_rsp(ch, iu->buf);
aef9ec39
RD
1957 break;
1958
bb12588a 1959 case SRP_CRED_REQ:
509c07bc 1960 srp_process_cred_req(ch, iu->buf);
bb12588a
DD
1961 break;
1962
1963 case SRP_AER_REQ:
509c07bc 1964 srp_process_aer_req(ch, iu->buf);
bb12588a
DD
1965 break;
1966
aef9ec39
RD
1967 case SRP_T_LOGOUT:
1968 /* XXX Handle target logout */
7aa54bd7
DD
1969 shost_printk(KERN_WARNING, target->scsi_host,
1970 PFX "Got target logout request\n");
aef9ec39
RD
1971 break;
1972
1973 default:
7aa54bd7
DD
1974 shost_printk(KERN_WARNING, target->scsi_host,
1975 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
aef9ec39
RD
1976 break;
1977 }
1978
509c07bc 1979 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 1980 DMA_FROM_DEVICE);
c996bb47 1981
509c07bc 1982 res = srp_post_recv(ch, iu);
c996bb47
BVA
1983 if (res != 0)
1984 shost_printk(KERN_ERR, target->scsi_host,
1985 PFX "Recv failed with error code %d\n", res);
aef9ec39
RD
1986}
1987
c1120f89
BVA
1988/**
1989 * srp_tl_err_work() - handle a transport layer error
af24663b 1990 * @work: Work structure embedded in an SRP target port.
c1120f89
BVA
1991 *
1992 * Note: This function may get invoked before the rport has been created,
1993 * hence the target->rport test.
1994 */
1995static void srp_tl_err_work(struct work_struct *work)
1996{
1997 struct srp_target_port *target;
1998
1999 target = container_of(work, struct srp_target_port, tl_err_work);
2000 if (target->rport)
2001 srp_start_tl_fail_timers(target->rport);
2002}
2003
1dc7b1f1
CH
2004static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2005 const char *opname)
948d1e88 2006{
1dc7b1f1 2007 struct srp_rdma_ch *ch = cq->cq_context;
7dad6b2e
BVA
2008 struct srp_target_port *target = ch->target;
2009
c014c8cd 2010 if (ch->connected && !target->qp_in_error) {
1dc7b1f1
CH
2011 shost_printk(KERN_ERR, target->scsi_host,
2012 PFX "failed %s status %s (%d) for CQE %p\n",
2013 opname, ib_wc_status_msg(wc->status), wc->status,
2014 wc->wr_cqe);
c1120f89 2015 queue_work(system_long_wq, &target->tl_err_work);
4f0af697 2016 }
948d1e88
BVA
2017 target->qp_in_error = true;
2018}
2019
76c75b25 2020static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
aef9ec39 2021{
76c75b25 2022 struct srp_target_port *target = host_to_target(shost);
a95cadb9 2023 struct srp_rport *rport = target->rport;
509c07bc 2024 struct srp_rdma_ch *ch;
aef9ec39
RD
2025 struct srp_request *req;
2026 struct srp_iu *iu;
2027 struct srp_cmd *cmd;
85507bcc 2028 struct ib_device *dev;
76c75b25 2029 unsigned long flags;
77f2c1a4
BVA
2030 u32 tag;
2031 u16 idx;
d1b4289e 2032 int len, ret;
a95cadb9
BVA
2033 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2034
2035 /*
2036 * The SCSI EH thread is the only context from which srp_queuecommand()
2037 * can get invoked for blocked devices (SDEV_BLOCK /
2038 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2039 * locking the rport mutex if invoked from inside the SCSI EH.
2040 */
2041 if (in_scsi_eh)
2042 mutex_lock(&rport->mutex);
aef9ec39 2043
d1b4289e
BVA
2044 scmnd->result = srp_chkready(target->rport);
2045 if (unlikely(scmnd->result))
2046 goto err;
2ce19e72 2047
77f2c1a4
BVA
2048 WARN_ON_ONCE(scmnd->request->tag < 0);
2049 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7 2050 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
77f2c1a4
BVA
2051 idx = blk_mq_unique_tag_to_tag(tag);
2052 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2053 dev_name(&shost->shost_gendev), tag, idx,
2054 target->req_ring_size);
509c07bc
BVA
2055
2056 spin_lock_irqsave(&ch->lock, flags);
2057 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
509c07bc 2058 spin_unlock_irqrestore(&ch->lock, flags);
aef9ec39 2059
77f2c1a4
BVA
2060 if (!iu)
2061 goto err;
2062
2063 req = &ch->req_ring[idx];
05321937 2064 dev = target->srp_host->srp_dev->dev;
49248644 2065 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
85507bcc 2066 DMA_TO_DEVICE);
aef9ec39 2067
f8b6e31e 2068 scmnd->host_scribble = (void *) req;
aef9ec39
RD
2069
2070 cmd = iu->buf;
2071 memset(cmd, 0, sizeof *cmd);
2072
2073 cmd->opcode = SRP_CMD;
985aa495 2074 int_to_scsilun(scmnd->device->lun, &cmd->lun);
77f2c1a4 2075 cmd->tag = tag;
aef9ec39
RD
2076 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2077
aef9ec39
RD
2078 req->scmnd = scmnd;
2079 req->cmd = iu;
aef9ec39 2080
509c07bc 2081 len = srp_map_data(scmnd, ch, req);
aef9ec39 2082 if (len < 0) {
7aa54bd7 2083 shost_printk(KERN_ERR, target->scsi_host,
d1b4289e
BVA
2084 PFX "Failed to map data (%d)\n", len);
2085 /*
2086 * If we ran out of memory descriptors (-ENOMEM) because an
2087 * application is queuing many requests with more than
52ede08f 2088 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
d1b4289e
BVA
2089 * to reduce queue depth temporarily.
2090 */
2091 scmnd->result = len == -ENOMEM ?
2092 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
76c75b25 2093 goto err_iu;
aef9ec39
RD
2094 }
2095
49248644 2096 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
85507bcc 2097 DMA_TO_DEVICE);
aef9ec39 2098
509c07bc 2099 if (srp_post_send(ch, iu, len)) {
7aa54bd7 2100 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
aef9ec39
RD
2101 goto err_unmap;
2102 }
2103
d1b4289e
BVA
2104 ret = 0;
2105
a95cadb9
BVA
2106unlock_rport:
2107 if (in_scsi_eh)
2108 mutex_unlock(&rport->mutex);
2109
d1b4289e 2110 return ret;
aef9ec39
RD
2111
2112err_unmap:
509c07bc 2113 srp_unmap_data(scmnd, ch, req);
aef9ec39 2114
76c75b25 2115err_iu:
509c07bc 2116 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
76c75b25 2117
024ca901
BVA
2118 /*
2119 * Avoid that the loops that iterate over the request ring can
2120 * encounter a dangling SCSI command pointer.
2121 */
2122 req->scmnd = NULL;
2123
d1b4289e
BVA
2124err:
2125 if (scmnd->result) {
2126 scmnd->scsi_done(scmnd);
2127 ret = 0;
2128 } else {
2129 ret = SCSI_MLQUEUE_HOST_BUSY;
2130 }
a95cadb9 2131
d1b4289e 2132 goto unlock_rport;
aef9ec39
RD
2133}
2134
4d73f95f
BVA
2135/*
2136 * Note: the resources allocated in this function are freed in
509c07bc 2137 * srp_free_ch_ib().
4d73f95f 2138 */
509c07bc 2139static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
aef9ec39 2140{
509c07bc 2141 struct srp_target_port *target = ch->target;
aef9ec39
RD
2142 int i;
2143
509c07bc
BVA
2144 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2145 GFP_KERNEL);
2146 if (!ch->rx_ring)
4d73f95f 2147 goto err_no_ring;
509c07bc
BVA
2148 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2149 GFP_KERNEL);
2150 if (!ch->tx_ring)
4d73f95f
BVA
2151 goto err_no_ring;
2152
2153 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2154 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2155 ch->max_ti_iu_len,
2156 GFP_KERNEL, DMA_FROM_DEVICE);
2157 if (!ch->rx_ring[i])
aef9ec39
RD
2158 goto err;
2159 }
2160
4d73f95f 2161 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2162 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2163 target->max_iu_len,
2164 GFP_KERNEL, DMA_TO_DEVICE);
2165 if (!ch->tx_ring[i])
aef9ec39 2166 goto err;
dcb4cb85 2167
509c07bc 2168 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
aef9ec39
RD
2169 }
2170
2171 return 0;
2172
2173err:
4d73f95f 2174 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2175 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2176 srp_free_iu(target->srp_host, ch->tx_ring[i]);
aef9ec39
RD
2177 }
2178
4d73f95f
BVA
2179
2180err_no_ring:
509c07bc
BVA
2181 kfree(ch->tx_ring);
2182 ch->tx_ring = NULL;
2183 kfree(ch->rx_ring);
2184 ch->rx_ring = NULL;
4d73f95f 2185
aef9ec39
RD
2186 return -ENOMEM;
2187}
2188
c9b03c1a
BVA
2189static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2190{
2191 uint64_t T_tr_ns, max_compl_time_ms;
2192 uint32_t rq_tmo_jiffies;
2193
2194 /*
2195 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2196 * table 91), both the QP timeout and the retry count have to be set
2197 * for RC QP's during the RTR to RTS transition.
2198 */
2199 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2200 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2201
2202 /*
2203 * Set target->rq_tmo_jiffies to one second more than the largest time
2204 * it can take before an error completion is generated. See also
2205 * C9-140..142 in the IBTA spec for more information about how to
2206 * convert the QP Local ACK Timeout value to nanoseconds.
2207 */
2208 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2209 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2210 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2211 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2212
2213 return rq_tmo_jiffies;
2214}
2215
961e0be8 2216static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
e6300cbd 2217 const struct srp_login_rsp *lrsp,
509c07bc 2218 struct srp_rdma_ch *ch)
961e0be8 2219{
509c07bc 2220 struct srp_target_port *target = ch->target;
961e0be8
DD
2221 struct ib_qp_attr *qp_attr = NULL;
2222 int attr_mask = 0;
2223 int ret;
2224 int i;
2225
2226 if (lrsp->opcode == SRP_LOGIN_RSP) {
509c07bc
BVA
2227 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2228 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
961e0be8
DD
2229
2230 /*
2231 * Reserve credits for task management so we don't
2232 * bounce requests back to the SCSI mid-layer.
2233 */
2234 target->scsi_host->can_queue
509c07bc 2235 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
961e0be8 2236 target->scsi_host->can_queue);
4d73f95f
BVA
2237 target->scsi_host->cmd_per_lun
2238 = min_t(int, target->scsi_host->can_queue,
2239 target->scsi_host->cmd_per_lun);
961e0be8
DD
2240 } else {
2241 shost_printk(KERN_WARNING, target->scsi_host,
2242 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2243 ret = -ECONNRESET;
2244 goto error;
2245 }
2246
509c07bc
BVA
2247 if (!ch->rx_ring) {
2248 ret = srp_alloc_iu_bufs(ch);
961e0be8
DD
2249 if (ret)
2250 goto error;
2251 }
2252
2253 ret = -ENOMEM;
2254 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2255 if (!qp_attr)
2256 goto error;
2257
2258 qp_attr->qp_state = IB_QPS_RTR;
2259 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2260 if (ret)
2261 goto error_free;
2262
509c07bc 2263 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2264 if (ret)
2265 goto error_free;
2266
4d73f95f 2267 for (i = 0; i < target->queue_size; i++) {
509c07bc
BVA
2268 struct srp_iu *iu = ch->rx_ring[i];
2269
2270 ret = srp_post_recv(ch, iu);
961e0be8
DD
2271 if (ret)
2272 goto error_free;
2273 }
2274
2275 qp_attr->qp_state = IB_QPS_RTS;
2276 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2277 if (ret)
2278 goto error_free;
2279
c9b03c1a
BVA
2280 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2281
509c07bc 2282 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2283 if (ret)
2284 goto error_free;
2285
2286 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2287
2288error_free:
2289 kfree(qp_attr);
2290
2291error:
509c07bc 2292 ch->status = ret;
961e0be8
DD
2293}
2294
aef9ec39
RD
2295static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2296 struct ib_cm_event *event,
509c07bc 2297 struct srp_rdma_ch *ch)
aef9ec39 2298{
509c07bc 2299 struct srp_target_port *target = ch->target;
7aa54bd7 2300 struct Scsi_Host *shost = target->scsi_host;
aef9ec39
RD
2301 struct ib_class_port_info *cpi;
2302 int opcode;
2303
2304 switch (event->param.rej_rcvd.reason) {
2305 case IB_CM_REJ_PORT_CM_REDIRECT:
2306 cpi = event->param.rej_rcvd.ari;
509c07bc
BVA
2307 ch->path.dlid = cpi->redirect_lid;
2308 ch->path.pkey = cpi->redirect_pkey;
aef9ec39 2309 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
509c07bc 2310 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
aef9ec39 2311
509c07bc 2312 ch->status = ch->path.dlid ?
aef9ec39
RD
2313 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2314 break;
2315
2316 case IB_CM_REJ_PORT_REDIRECT:
5d7cbfd6 2317 if (srp_target_is_topspin(target)) {
aef9ec39
RD
2318 /*
2319 * Topspin/Cisco SRP gateways incorrectly send
2320 * reject reason code 25 when they mean 24
2321 * (port redirect).
2322 */
509c07bc 2323 memcpy(ch->path.dgid.raw,
aef9ec39
RD
2324 event->param.rej_rcvd.ari, 16);
2325
7aa54bd7
DD
2326 shost_printk(KERN_DEBUG, shost,
2327 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
509c07bc
BVA
2328 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2329 be64_to_cpu(ch->path.dgid.global.interface_id));
aef9ec39 2330
509c07bc 2331 ch->status = SRP_PORT_REDIRECT;
aef9ec39 2332 } else {
7aa54bd7
DD
2333 shost_printk(KERN_WARNING, shost,
2334 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
509c07bc 2335 ch->status = -ECONNRESET;
aef9ec39
RD
2336 }
2337 break;
2338
2339 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
7aa54bd7
DD
2340 shost_printk(KERN_WARNING, shost,
2341 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
509c07bc 2342 ch->status = -ECONNRESET;
aef9ec39
RD
2343 break;
2344
2345 case IB_CM_REJ_CONSUMER_DEFINED:
2346 opcode = *(u8 *) event->private_data;
2347 if (opcode == SRP_LOGIN_REJ) {
2348 struct srp_login_rej *rej = event->private_data;
2349 u32 reason = be32_to_cpu(rej->reason);
2350
2351 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
7aa54bd7
DD
2352 shost_printk(KERN_WARNING, shost,
2353 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
aef9ec39 2354 else
e7ffde01
BVA
2355 shost_printk(KERN_WARNING, shost, PFX
2356 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
747fe000
BVA
2357 target->sgid.raw,
2358 target->orig_dgid.raw, reason);
aef9ec39 2359 } else
7aa54bd7
DD
2360 shost_printk(KERN_WARNING, shost,
2361 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2362 " opcode 0x%02x\n", opcode);
509c07bc 2363 ch->status = -ECONNRESET;
aef9ec39
RD
2364 break;
2365
9fe4bcf4
DD
2366 case IB_CM_REJ_STALE_CONN:
2367 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
509c07bc 2368 ch->status = SRP_STALE_CONN;
9fe4bcf4
DD
2369 break;
2370
aef9ec39 2371 default:
7aa54bd7
DD
2372 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2373 event->param.rej_rcvd.reason);
509c07bc 2374 ch->status = -ECONNRESET;
aef9ec39
RD
2375 }
2376}
2377
2378static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2379{
509c07bc
BVA
2380 struct srp_rdma_ch *ch = cm_id->context;
2381 struct srp_target_port *target = ch->target;
aef9ec39 2382 int comp = 0;
aef9ec39
RD
2383
2384 switch (event->event) {
2385 case IB_CM_REQ_ERROR:
7aa54bd7
DD
2386 shost_printk(KERN_DEBUG, target->scsi_host,
2387 PFX "Sending CM REQ failed\n");
aef9ec39 2388 comp = 1;
509c07bc 2389 ch->status = -ECONNRESET;
aef9ec39
RD
2390 break;
2391
2392 case IB_CM_REP_RECEIVED:
2393 comp = 1;
509c07bc 2394 srp_cm_rep_handler(cm_id, event->private_data, ch);
aef9ec39
RD
2395 break;
2396
2397 case IB_CM_REJ_RECEIVED:
7aa54bd7 2398 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
aef9ec39
RD
2399 comp = 1;
2400
509c07bc 2401 srp_cm_rej_handler(cm_id, event, ch);
aef9ec39
RD
2402 break;
2403
b7ac4ab4 2404 case IB_CM_DREQ_RECEIVED:
7aa54bd7
DD
2405 shost_printk(KERN_WARNING, target->scsi_host,
2406 PFX "DREQ received - connection closed\n");
c014c8cd 2407 ch->connected = false;
b7ac4ab4 2408 if (ib_send_cm_drep(cm_id, NULL, 0))
7aa54bd7
DD
2409 shost_printk(KERN_ERR, target->scsi_host,
2410 PFX "Sending CM DREP failed\n");
c1120f89 2411 queue_work(system_long_wq, &target->tl_err_work);
aef9ec39
RD
2412 break;
2413
2414 case IB_CM_TIMEWAIT_EXIT:
7aa54bd7
DD
2415 shost_printk(KERN_ERR, target->scsi_host,
2416 PFX "connection closed\n");
ac72d766 2417 comp = 1;
aef9ec39 2418
509c07bc 2419 ch->status = 0;
aef9ec39
RD
2420 break;
2421
b7ac4ab4
IR
2422 case IB_CM_MRA_RECEIVED:
2423 case IB_CM_DREQ_ERROR:
2424 case IB_CM_DREP_RECEIVED:
2425 break;
2426
aef9ec39 2427 default:
7aa54bd7
DD
2428 shost_printk(KERN_WARNING, target->scsi_host,
2429 PFX "Unhandled CM event %d\n", event->event);
aef9ec39
RD
2430 break;
2431 }
2432
2433 if (comp)
509c07bc 2434 complete(&ch->done);
aef9ec39 2435
aef9ec39
RD
2436 return 0;
2437}
2438
71444b97
JW
2439/**
2440 * srp_change_queue_depth - setting device queue depth
2441 * @sdev: scsi device struct
2442 * @qdepth: requested queue depth
71444b97
JW
2443 *
2444 * Returns queue depth.
2445 */
2446static int
db5ed4df 2447srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
71444b97 2448{
c40ecc12 2449 if (!sdev->tagged_supported)
1e6f2416 2450 qdepth = 1;
db5ed4df 2451 return scsi_change_queue_depth(sdev, qdepth);
71444b97
JW
2452}
2453
985aa495
BVA
2454static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2455 u8 func)
aef9ec39 2456{
509c07bc 2457 struct srp_target_port *target = ch->target;
a95cadb9 2458 struct srp_rport *rport = target->rport;
19081f31 2459 struct ib_device *dev = target->srp_host->srp_dev->dev;
aef9ec39
RD
2460 struct srp_iu *iu;
2461 struct srp_tsk_mgmt *tsk_mgmt;
aef9ec39 2462
c014c8cd 2463 if (!ch->connected || target->qp_in_error)
3780d1f0
BVA
2464 return -1;
2465
509c07bc 2466 init_completion(&ch->tsk_mgmt_done);
aef9ec39 2467
a95cadb9 2468 /*
509c07bc 2469 * Lock the rport mutex to avoid that srp_create_ch_ib() is
a95cadb9
BVA
2470 * invoked while a task management function is being sent.
2471 */
2472 mutex_lock(&rport->mutex);
509c07bc
BVA
2473 spin_lock_irq(&ch->lock);
2474 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2475 spin_unlock_irq(&ch->lock);
76c75b25 2476
a95cadb9
BVA
2477 if (!iu) {
2478 mutex_unlock(&rport->mutex);
2479
76c75b25 2480 return -1;
a95cadb9 2481 }
aef9ec39 2482
19081f31
DD
2483 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2484 DMA_TO_DEVICE);
aef9ec39
RD
2485 tsk_mgmt = iu->buf;
2486 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2487
2488 tsk_mgmt->opcode = SRP_TSK_MGMT;
985aa495 2489 int_to_scsilun(lun, &tsk_mgmt->lun);
f8b6e31e 2490 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
aef9ec39 2491 tsk_mgmt->tsk_mgmt_func = func;
f8b6e31e 2492 tsk_mgmt->task_tag = req_tag;
aef9ec39 2493
19081f31
DD
2494 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2495 DMA_TO_DEVICE);
509c07bc
BVA
2496 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2497 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
a95cadb9
BVA
2498 mutex_unlock(&rport->mutex);
2499
76c75b25
BVA
2500 return -1;
2501 }
a95cadb9 2502 mutex_unlock(&rport->mutex);
d945e1df 2503
509c07bc 2504 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
aef9ec39 2505 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
d945e1df 2506 return -1;
aef9ec39 2507
d945e1df 2508 return 0;
d945e1df
RD
2509}
2510
aef9ec39
RD
2511static int srp_abort(struct scsi_cmnd *scmnd)
2512{
d945e1df 2513 struct srp_target_port *target = host_to_target(scmnd->device->host);
f8b6e31e 2514 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
77f2c1a4 2515 u32 tag;
d92c0da7 2516 u16 ch_idx;
509c07bc 2517 struct srp_rdma_ch *ch;
086f44f5 2518 int ret;
d945e1df 2519
7aa54bd7 2520 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
aef9ec39 2521
d92c0da7 2522 if (!req)
99b6697a 2523 return SUCCESS;
77f2c1a4 2524 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7
BVA
2525 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2526 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2527 return SUCCESS;
2528 ch = &target->ch[ch_idx];
2529 if (!srp_claim_req(ch, req, NULL, scmnd))
2530 return SUCCESS;
2531 shost_printk(KERN_ERR, target->scsi_host,
2532 "Sending SRP abort for tag %#x\n", tag);
77f2c1a4 2533 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
80d5e8a2 2534 SRP_TSK_ABORT_TASK) == 0)
086f44f5 2535 ret = SUCCESS;
ed9b2264 2536 else if (target->rport->state == SRP_RPORT_LOST)
99e1c139 2537 ret = FAST_IO_FAIL;
086f44f5
BVA
2538 else
2539 ret = FAILED;
509c07bc 2540 srp_free_req(ch, req, scmnd, 0);
22032991 2541 scmnd->result = DID_ABORT << 16;
d8536670 2542 scmnd->scsi_done(scmnd);
d945e1df 2543
086f44f5 2544 return ret;
aef9ec39
RD
2545}
2546
2547static int srp_reset_device(struct scsi_cmnd *scmnd)
2548{
d945e1df 2549 struct srp_target_port *target = host_to_target(scmnd->device->host);
d92c0da7 2550 struct srp_rdma_ch *ch;
536ae14e 2551 int i;
d945e1df 2552
7aa54bd7 2553 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
aef9ec39 2554
d92c0da7 2555 ch = &target->ch[0];
509c07bc 2556 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
f8b6e31e 2557 SRP_TSK_LUN_RESET))
d945e1df 2558 return FAILED;
509c07bc 2559 if (ch->tsk_mgmt_status)
d945e1df
RD
2560 return FAILED;
2561
d92c0da7
BVA
2562 for (i = 0; i < target->ch_count; i++) {
2563 ch = &target->ch[i];
2564 for (i = 0; i < target->req_ring_size; ++i) {
2565 struct srp_request *req = &ch->req_ring[i];
509c07bc 2566
d92c0da7
BVA
2567 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2568 }
536ae14e 2569 }
d945e1df 2570
d945e1df 2571 return SUCCESS;
aef9ec39
RD
2572}
2573
2574static int srp_reset_host(struct scsi_cmnd *scmnd)
2575{
2576 struct srp_target_port *target = host_to_target(scmnd->device->host);
aef9ec39 2577
7aa54bd7 2578 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
aef9ec39 2579
ed9b2264 2580 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
aef9ec39
RD
2581}
2582
c9b03c1a
BVA
2583static int srp_slave_configure(struct scsi_device *sdev)
2584{
2585 struct Scsi_Host *shost = sdev->host;
2586 struct srp_target_port *target = host_to_target(shost);
2587 struct request_queue *q = sdev->request_queue;
2588 unsigned long timeout;
2589
2590 if (sdev->type == TYPE_DISK) {
2591 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2592 blk_queue_rq_timeout(q, timeout);
2593 }
2594
2595 return 0;
2596}
2597
ee959b00
TJ
2598static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2599 char *buf)
6ecb0c84 2600{
ee959b00 2601 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2602
45c37cad 2603 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
6ecb0c84
RD
2604}
2605
ee959b00
TJ
2606static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2607 char *buf)
6ecb0c84 2608{
ee959b00 2609 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2610
45c37cad 2611 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
6ecb0c84
RD
2612}
2613
ee959b00
TJ
2614static ssize_t show_service_id(struct device *dev,
2615 struct device_attribute *attr, char *buf)
6ecb0c84 2616{
ee959b00 2617 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2618
45c37cad 2619 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
6ecb0c84
RD
2620}
2621
ee959b00
TJ
2622static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2623 char *buf)
6ecb0c84 2624{
ee959b00 2625 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2626
747fe000 2627 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
6ecb0c84
RD
2628}
2629
848b3082
BVA
2630static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2631 char *buf)
2632{
2633 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2634
747fe000 2635 return sprintf(buf, "%pI6\n", target->sgid.raw);
848b3082
BVA
2636}
2637
ee959b00
TJ
2638static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2639 char *buf)
6ecb0c84 2640{
ee959b00 2641 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7 2642 struct srp_rdma_ch *ch = &target->ch[0];
6ecb0c84 2643
509c07bc 2644 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
6ecb0c84
RD
2645}
2646
ee959b00
TJ
2647static ssize_t show_orig_dgid(struct device *dev,
2648 struct device_attribute *attr, char *buf)
3633b3d0 2649{
ee959b00 2650 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3633b3d0 2651
747fe000 2652 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
3633b3d0
IR
2653}
2654
89de7486
BVA
2655static ssize_t show_req_lim(struct device *dev,
2656 struct device_attribute *attr, char *buf)
2657{
2658 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7
BVA
2659 struct srp_rdma_ch *ch;
2660 int i, req_lim = INT_MAX;
89de7486 2661
d92c0da7
BVA
2662 for (i = 0; i < target->ch_count; i++) {
2663 ch = &target->ch[i];
2664 req_lim = min(req_lim, ch->req_lim);
2665 }
2666 return sprintf(buf, "%d\n", req_lim);
89de7486
BVA
2667}
2668
ee959b00
TJ
2669static ssize_t show_zero_req_lim(struct device *dev,
2670 struct device_attribute *attr, char *buf)
6bfa24fa 2671{
ee959b00 2672 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6bfa24fa 2673
6bfa24fa
RD
2674 return sprintf(buf, "%d\n", target->zero_req_lim);
2675}
2676
ee959b00
TJ
2677static ssize_t show_local_ib_port(struct device *dev,
2678 struct device_attribute *attr, char *buf)
ded7f1a1 2679{
ee959b00 2680 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1
IR
2681
2682 return sprintf(buf, "%d\n", target->srp_host->port);
2683}
2684
ee959b00
TJ
2685static ssize_t show_local_ib_device(struct device *dev,
2686 struct device_attribute *attr, char *buf)
ded7f1a1 2687{
ee959b00 2688 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1 2689
05321937 2690 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
ded7f1a1
IR
2691}
2692
d92c0da7
BVA
2693static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2694 char *buf)
2695{
2696 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2697
2698 return sprintf(buf, "%d\n", target->ch_count);
2699}
2700
4b5e5f41
BVA
2701static ssize_t show_comp_vector(struct device *dev,
2702 struct device_attribute *attr, char *buf)
2703{
2704 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2705
2706 return sprintf(buf, "%d\n", target->comp_vector);
2707}
2708
7bb312e4
VP
2709static ssize_t show_tl_retry_count(struct device *dev,
2710 struct device_attribute *attr, char *buf)
2711{
2712 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2713
2714 return sprintf(buf, "%d\n", target->tl_retry_count);
2715}
2716
49248644
DD
2717static ssize_t show_cmd_sg_entries(struct device *dev,
2718 struct device_attribute *attr, char *buf)
2719{
2720 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2721
2722 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2723}
2724
c07d424d
DD
2725static ssize_t show_allow_ext_sg(struct device *dev,
2726 struct device_attribute *attr, char *buf)
2727{
2728 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2729
2730 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2731}
2732
ee959b00
TJ
2733static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2734static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2735static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2736static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
848b3082 2737static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
ee959b00
TJ
2738static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2739static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
89de7486 2740static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
ee959b00
TJ
2741static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2742static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2743static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
d92c0da7 2744static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
4b5e5f41 2745static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
7bb312e4 2746static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
49248644 2747static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
c07d424d 2748static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
ee959b00
TJ
2749
2750static struct device_attribute *srp_host_attrs[] = {
2751 &dev_attr_id_ext,
2752 &dev_attr_ioc_guid,
2753 &dev_attr_service_id,
2754 &dev_attr_pkey,
848b3082 2755 &dev_attr_sgid,
ee959b00
TJ
2756 &dev_attr_dgid,
2757 &dev_attr_orig_dgid,
89de7486 2758 &dev_attr_req_lim,
ee959b00
TJ
2759 &dev_attr_zero_req_lim,
2760 &dev_attr_local_ib_port,
2761 &dev_attr_local_ib_device,
d92c0da7 2762 &dev_attr_ch_count,
4b5e5f41 2763 &dev_attr_comp_vector,
7bb312e4 2764 &dev_attr_tl_retry_count,
49248644 2765 &dev_attr_cmd_sg_entries,
c07d424d 2766 &dev_attr_allow_ext_sg,
6ecb0c84
RD
2767 NULL
2768};
2769
aef9ec39
RD
2770static struct scsi_host_template srp_template = {
2771 .module = THIS_MODULE,
b7f008fd
RD
2772 .name = "InfiniBand SRP initiator",
2773 .proc_name = DRV_NAME,
c9b03c1a 2774 .slave_configure = srp_slave_configure,
aef9ec39
RD
2775 .info = srp_target_info,
2776 .queuecommand = srp_queuecommand,
71444b97 2777 .change_queue_depth = srp_change_queue_depth,
aef9ec39
RD
2778 .eh_abort_handler = srp_abort,
2779 .eh_device_reset_handler = srp_reset_device,
2780 .eh_host_reset_handler = srp_reset_host,
2742c1da 2781 .skip_settle_delay = true,
49248644 2782 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
4d73f95f 2783 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
aef9ec39 2784 .this_id = -1,
4d73f95f 2785 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
6ecb0c84 2786 .use_clustering = ENABLE_CLUSTERING,
77f2c1a4 2787 .shost_attrs = srp_host_attrs,
c40ecc12 2788 .track_queue_depth = 1,
aef9ec39
RD
2789};
2790
34aa654e
BVA
2791static int srp_sdev_count(struct Scsi_Host *host)
2792{
2793 struct scsi_device *sdev;
2794 int c = 0;
2795
2796 shost_for_each_device(sdev, host)
2797 c++;
2798
2799 return c;
2800}
2801
bc44bd1d
BVA
2802/*
2803 * Return values:
2804 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2805 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2806 * removal has been scheduled.
2807 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2808 */
aef9ec39
RD
2809static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2810{
3236822b
FT
2811 struct srp_rport_identifiers ids;
2812 struct srp_rport *rport;
2813
34aa654e 2814 target->state = SRP_TARGET_SCANNING;
aef9ec39 2815 sprintf(target->target_name, "SRP.T10:%016llX",
45c37cad 2816 be64_to_cpu(target->id_ext));
aef9ec39 2817
05321937 2818 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
aef9ec39
RD
2819 return -ENODEV;
2820
3236822b
FT
2821 memcpy(ids.port_id, &target->id_ext, 8);
2822 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
aebd5e47 2823 ids.roles = SRP_RPORT_ROLE_TARGET;
3236822b
FT
2824 rport = srp_rport_add(target->scsi_host, &ids);
2825 if (IS_ERR(rport)) {
2826 scsi_remove_host(target->scsi_host);
2827 return PTR_ERR(rport);
2828 }
2829
dc1bdbd9 2830 rport->lld_data = target;
9dd69a60 2831 target->rport = rport;
dc1bdbd9 2832
b3589fd4 2833 spin_lock(&host->target_lock);
aef9ec39 2834 list_add_tail(&target->list, &host->target_list);
b3589fd4 2835 spin_unlock(&host->target_lock);
aef9ec39 2836
aef9ec39 2837 scsi_scan_target(&target->scsi_host->shost_gendev,
1962a4a1 2838 0, target->scsi_id, SCAN_WILD_CARD, 0);
aef9ec39 2839
c014c8cd
BVA
2840 if (srp_connected_ch(target) < target->ch_count ||
2841 target->qp_in_error) {
34aa654e
BVA
2842 shost_printk(KERN_INFO, target->scsi_host,
2843 PFX "SCSI scan failed - removing SCSI host\n");
2844 srp_queue_remove_work(target);
2845 goto out;
2846 }
2847
cf1acab7 2848 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
34aa654e
BVA
2849 dev_name(&target->scsi_host->shost_gendev),
2850 srp_sdev_count(target->scsi_host));
2851
2852 spin_lock_irq(&target->lock);
2853 if (target->state == SRP_TARGET_SCANNING)
2854 target->state = SRP_TARGET_LIVE;
2855 spin_unlock_irq(&target->lock);
2856
2857out:
aef9ec39
RD
2858 return 0;
2859}
2860
ee959b00 2861static void srp_release_dev(struct device *dev)
aef9ec39
RD
2862{
2863 struct srp_host *host =
ee959b00 2864 container_of(dev, struct srp_host, dev);
aef9ec39
RD
2865
2866 complete(&host->released);
2867}
2868
2869static struct class srp_class = {
2870 .name = "infiniband_srp",
ee959b00 2871 .dev_release = srp_release_dev
aef9ec39
RD
2872};
2873
96fc248a
BVA
2874/**
2875 * srp_conn_unique() - check whether the connection to a target is unique
af24663b
BVA
2876 * @host: SRP host.
2877 * @target: SRP target port.
96fc248a
BVA
2878 */
2879static bool srp_conn_unique(struct srp_host *host,
2880 struct srp_target_port *target)
2881{
2882 struct srp_target_port *t;
2883 bool ret = false;
2884
2885 if (target->state == SRP_TARGET_REMOVED)
2886 goto out;
2887
2888 ret = true;
2889
2890 spin_lock(&host->target_lock);
2891 list_for_each_entry(t, &host->target_list, list) {
2892 if (t != target &&
2893 target->id_ext == t->id_ext &&
2894 target->ioc_guid == t->ioc_guid &&
2895 target->initiator_ext == t->initiator_ext) {
2896 ret = false;
2897 break;
2898 }
2899 }
2900 spin_unlock(&host->target_lock);
2901
2902out:
2903 return ret;
2904}
2905
aef9ec39
RD
2906/*
2907 * Target ports are added by writing
2908 *
2909 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2910 * pkey=<P_Key>,service_id=<service ID>
2911 *
2912 * to the add_target sysfs attribute.
2913 */
2914enum {
2915 SRP_OPT_ERR = 0,
2916 SRP_OPT_ID_EXT = 1 << 0,
2917 SRP_OPT_IOC_GUID = 1 << 1,
2918 SRP_OPT_DGID = 1 << 2,
2919 SRP_OPT_PKEY = 1 << 3,
2920 SRP_OPT_SERVICE_ID = 1 << 4,
2921 SRP_OPT_MAX_SECT = 1 << 5,
52fb2b50 2922 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
0c0450db 2923 SRP_OPT_IO_CLASS = 1 << 7,
01cb9bcb 2924 SRP_OPT_INITIATOR_EXT = 1 << 8,
49248644 2925 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
c07d424d
DD
2926 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2927 SRP_OPT_SG_TABLESIZE = 1 << 11,
4b5e5f41 2928 SRP_OPT_COMP_VECTOR = 1 << 12,
7bb312e4 2929 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
4d73f95f 2930 SRP_OPT_QUEUE_SIZE = 1 << 14,
aef9ec39
RD
2931 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2932 SRP_OPT_IOC_GUID |
2933 SRP_OPT_DGID |
2934 SRP_OPT_PKEY |
2935 SRP_OPT_SERVICE_ID),
2936};
2937
a447c093 2938static const match_table_t srp_opt_tokens = {
52fb2b50
VP
2939 { SRP_OPT_ID_EXT, "id_ext=%s" },
2940 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2941 { SRP_OPT_DGID, "dgid=%s" },
2942 { SRP_OPT_PKEY, "pkey=%x" },
2943 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2944 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2945 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
0c0450db 2946 { SRP_OPT_IO_CLASS, "io_class=%x" },
01cb9bcb 2947 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
49248644 2948 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
c07d424d
DD
2949 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2950 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
4b5e5f41 2951 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
7bb312e4 2952 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
4d73f95f 2953 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
52fb2b50 2954 { SRP_OPT_ERR, NULL }
aef9ec39
RD
2955};
2956
2957static int srp_parse_options(const char *buf, struct srp_target_port *target)
2958{
2959 char *options, *sep_opt;
2960 char *p;
2961 char dgid[3];
2962 substring_t args[MAX_OPT_ARGS];
2963 int opt_mask = 0;
2964 int token;
2965 int ret = -EINVAL;
2966 int i;
2967
2968 options = kstrdup(buf, GFP_KERNEL);
2969 if (!options)
2970 return -ENOMEM;
2971
2972 sep_opt = options;
7dcf9c19 2973 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
aef9ec39
RD
2974 if (!*p)
2975 continue;
2976
2977 token = match_token(p, srp_opt_tokens, args);
2978 opt_mask |= token;
2979
2980 switch (token) {
2981 case SRP_OPT_ID_EXT:
2982 p = match_strdup(args);
a20f3a6d
IR
2983 if (!p) {
2984 ret = -ENOMEM;
2985 goto out;
2986 }
aef9ec39
RD
2987 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2988 kfree(p);
2989 break;
2990
2991 case SRP_OPT_IOC_GUID:
2992 p = match_strdup(args);
a20f3a6d
IR
2993 if (!p) {
2994 ret = -ENOMEM;
2995 goto out;
2996 }
aef9ec39
RD
2997 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2998 kfree(p);
2999 break;
3000
3001 case SRP_OPT_DGID:
3002 p = match_strdup(args);
a20f3a6d
IR
3003 if (!p) {
3004 ret = -ENOMEM;
3005 goto out;
3006 }
aef9ec39 3007 if (strlen(p) != 32) {
e0bda7d8 3008 pr_warn("bad dest GID parameter '%s'\n", p);
ce1823f0 3009 kfree(p);
aef9ec39
RD
3010 goto out;
3011 }
3012
3013 for (i = 0; i < 16; ++i) {
747fe000
BVA
3014 strlcpy(dgid, p + i * 2, sizeof(dgid));
3015 if (sscanf(dgid, "%hhx",
3016 &target->orig_dgid.raw[i]) < 1) {
3017 ret = -EINVAL;
3018 kfree(p);
3019 goto out;
3020 }
aef9ec39 3021 }
bf17c1c7 3022 kfree(p);
aef9ec39
RD
3023 break;
3024
3025 case SRP_OPT_PKEY:
3026 if (match_hex(args, &token)) {
e0bda7d8 3027 pr_warn("bad P_Key parameter '%s'\n", p);
aef9ec39
RD
3028 goto out;
3029 }
747fe000 3030 target->pkey = cpu_to_be16(token);
aef9ec39
RD
3031 break;
3032
3033 case SRP_OPT_SERVICE_ID:
3034 p = match_strdup(args);
a20f3a6d
IR
3035 if (!p) {
3036 ret = -ENOMEM;
3037 goto out;
3038 }
aef9ec39
RD
3039 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3040 kfree(p);
3041 break;
3042
3043 case SRP_OPT_MAX_SECT:
3044 if (match_int(args, &token)) {
e0bda7d8 3045 pr_warn("bad max sect parameter '%s'\n", p);
aef9ec39
RD
3046 goto out;
3047 }
3048 target->scsi_host->max_sectors = token;
3049 break;
3050
4d73f95f
BVA
3051 case SRP_OPT_QUEUE_SIZE:
3052 if (match_int(args, &token) || token < 1) {
3053 pr_warn("bad queue_size parameter '%s'\n", p);
3054 goto out;
3055 }
3056 target->scsi_host->can_queue = token;
3057 target->queue_size = token + SRP_RSP_SQ_SIZE +
3058 SRP_TSK_MGMT_SQ_SIZE;
3059 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3060 target->scsi_host->cmd_per_lun = token;
3061 break;
3062
52fb2b50 3063 case SRP_OPT_MAX_CMD_PER_LUN:
4d73f95f 3064 if (match_int(args, &token) || token < 1) {
e0bda7d8
BVA
3065 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3066 p);
52fb2b50
VP
3067 goto out;
3068 }
4d73f95f 3069 target->scsi_host->cmd_per_lun = token;
52fb2b50
VP
3070 break;
3071
0c0450db
R
3072 case SRP_OPT_IO_CLASS:
3073 if (match_hex(args, &token)) {
e0bda7d8 3074 pr_warn("bad IO class parameter '%s'\n", p);
0c0450db
R
3075 goto out;
3076 }
3077 if (token != SRP_REV10_IB_IO_CLASS &&
3078 token != SRP_REV16A_IB_IO_CLASS) {
e0bda7d8
BVA
3079 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3080 token, SRP_REV10_IB_IO_CLASS,
3081 SRP_REV16A_IB_IO_CLASS);
0c0450db
R
3082 goto out;
3083 }
3084 target->io_class = token;
3085 break;
3086
01cb9bcb
IR
3087 case SRP_OPT_INITIATOR_EXT:
3088 p = match_strdup(args);
a20f3a6d
IR
3089 if (!p) {
3090 ret = -ENOMEM;
3091 goto out;
3092 }
01cb9bcb
IR
3093 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3094 kfree(p);
3095 break;
3096
49248644
DD
3097 case SRP_OPT_CMD_SG_ENTRIES:
3098 if (match_int(args, &token) || token < 1 || token > 255) {
e0bda7d8
BVA
3099 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3100 p);
49248644
DD
3101 goto out;
3102 }
3103 target->cmd_sg_cnt = token;
3104 break;
3105
c07d424d
DD
3106 case SRP_OPT_ALLOW_EXT_SG:
3107 if (match_int(args, &token)) {
e0bda7d8 3108 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
c07d424d
DD
3109 goto out;
3110 }
3111 target->allow_ext_sg = !!token;
3112 break;
3113
3114 case SRP_OPT_SG_TABLESIZE:
3115 if (match_int(args, &token) || token < 1 ||
3116 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
e0bda7d8
BVA
3117 pr_warn("bad max sg_tablesize parameter '%s'\n",
3118 p);
c07d424d
DD
3119 goto out;
3120 }
3121 target->sg_tablesize = token;
3122 break;
3123
4b5e5f41
BVA
3124 case SRP_OPT_COMP_VECTOR:
3125 if (match_int(args, &token) || token < 0) {
3126 pr_warn("bad comp_vector parameter '%s'\n", p);
3127 goto out;
3128 }
3129 target->comp_vector = token;
3130 break;
3131
7bb312e4
VP
3132 case SRP_OPT_TL_RETRY_COUNT:
3133 if (match_int(args, &token) || token < 2 || token > 7) {
3134 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3135 p);
3136 goto out;
3137 }
3138 target->tl_retry_count = token;
3139 break;
3140
aef9ec39 3141 default:
e0bda7d8
BVA
3142 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3143 p);
aef9ec39
RD
3144 goto out;
3145 }
3146 }
3147
3148 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3149 ret = 0;
3150 else
3151 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3152 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3153 !(srp_opt_tokens[i].token & opt_mask))
e0bda7d8
BVA
3154 pr_warn("target creation request is missing parameter '%s'\n",
3155 srp_opt_tokens[i].pattern);
aef9ec39 3156
4d73f95f
BVA
3157 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3158 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3159 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3160 target->scsi_host->cmd_per_lun,
3161 target->scsi_host->can_queue);
3162
aef9ec39
RD
3163out:
3164 kfree(options);
3165 return ret;
3166}
3167
ee959b00
TJ
3168static ssize_t srp_create_target(struct device *dev,
3169 struct device_attribute *attr,
aef9ec39
RD
3170 const char *buf, size_t count)
3171{
3172 struct srp_host *host =
ee959b00 3173 container_of(dev, struct srp_host, dev);
aef9ec39
RD
3174 struct Scsi_Host *target_host;
3175 struct srp_target_port *target;
509c07bc 3176 struct srp_rdma_ch *ch;
d1b4289e
BVA
3177 struct srp_device *srp_dev = host->srp_dev;
3178 struct ib_device *ibdev = srp_dev->dev;
d92c0da7
BVA
3179 int ret, node_idx, node, cpu, i;
3180 bool multich = false;
aef9ec39
RD
3181
3182 target_host = scsi_host_alloc(&srp_template,
3183 sizeof (struct srp_target_port));
3184 if (!target_host)
3185 return -ENOMEM;
3186
49248644 3187 target_host->transportt = ib_srp_transport_template;
fd1b6c4a
BVA
3188 target_host->max_channel = 0;
3189 target_host->max_id = 1;
985aa495 3190 target_host->max_lun = -1LL;
3c8edf0e 3191 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
5f068992 3192
aef9ec39 3193 target = host_to_target(target_host);
aef9ec39 3194
49248644
DD
3195 target->io_class = SRP_REV16A_IB_IO_CLASS;
3196 target->scsi_host = target_host;
3197 target->srp_host = host;
e6bf5f48 3198 target->lkey = host->srp_dev->pd->local_dma_lkey;
03f6fb93 3199 target->global_mr = host->srp_dev->global_mr;
49248644 3200 target->cmd_sg_cnt = cmd_sg_entries;
c07d424d
DD
3201 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3202 target->allow_ext_sg = allow_ext_sg;
7bb312e4 3203 target->tl_retry_count = 7;
4d73f95f 3204 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
aef9ec39 3205
34aa654e
BVA
3206 /*
3207 * Avoid that the SCSI host can be removed by srp_remove_target()
3208 * before this function returns.
3209 */
3210 scsi_host_get(target->scsi_host);
3211
2d7091bc
BVA
3212 mutex_lock(&host->add_target_mutex);
3213
aef9ec39
RD
3214 ret = srp_parse_options(buf, target);
3215 if (ret)
fb49c8bb 3216 goto out;
aef9ec39 3217
4d73f95f
BVA
3218 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3219
96fc248a
BVA
3220 if (!srp_conn_unique(target->srp_host, target)) {
3221 shost_printk(KERN_INFO, target->scsi_host,
3222 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3223 be64_to_cpu(target->id_ext),
3224 be64_to_cpu(target->ioc_guid),
3225 be64_to_cpu(target->initiator_ext));
3226 ret = -EEXIST;
fb49c8bb 3227 goto out;
96fc248a
BVA
3228 }
3229
5cfb1782 3230 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
d1b4289e 3231 target->cmd_sg_cnt < target->sg_tablesize) {
5cfb1782 3232 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
c07d424d
DD
3233 target->sg_tablesize = target->cmd_sg_cnt;
3234 }
3235
3236 target_host->sg_tablesize = target->sg_tablesize;
fa9863f8 3237 target->mr_pool_size = target->scsi_host->can_queue;
c07d424d
DD
3238 target->indirect_size = target->sg_tablesize *
3239 sizeof (struct srp_direct_buf);
49248644
DD
3240 target->max_iu_len = sizeof (struct srp_cmd) +
3241 sizeof (struct srp_indirect_buf) +
3242 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3243
c1120f89 3244 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
ef6c49d8 3245 INIT_WORK(&target->remove_work, srp_remove_work);
8f26c9ff 3246 spin_lock_init(&target->lock);
55ee3ab2 3247 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
2088ca66 3248 if (ret)
fb49c8bb 3249 goto out;
aef9ec39 3250
d92c0da7
BVA
3251 ret = -ENOMEM;
3252 target->ch_count = max_t(unsigned, num_online_nodes(),
3253 min(ch_count ? :
3254 min(4 * num_online_nodes(),
3255 ibdev->num_comp_vectors),
3256 num_online_cpus()));
3257 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3258 GFP_KERNEL);
3259 if (!target->ch)
fb49c8bb 3260 goto out;
aef9ec39 3261
d92c0da7
BVA
3262 node_idx = 0;
3263 for_each_online_node(node) {
3264 const int ch_start = (node_idx * target->ch_count /
3265 num_online_nodes());
3266 const int ch_end = ((node_idx + 1) * target->ch_count /
3267 num_online_nodes());
3268 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3269 num_online_nodes() + target->comp_vector)
3270 % ibdev->num_comp_vectors;
3271 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3272 num_online_nodes() + target->comp_vector)
3273 % ibdev->num_comp_vectors;
3274 int cpu_idx = 0;
3275
3276 for_each_online_cpu(cpu) {
3277 if (cpu_to_node(cpu) != node)
3278 continue;
3279 if (ch_start + cpu_idx >= ch_end)
3280 continue;
3281 ch = &target->ch[ch_start + cpu_idx];
3282 ch->target = target;
3283 ch->comp_vector = cv_start == cv_end ? cv_start :
3284 cv_start + cpu_idx % (cv_end - cv_start);
3285 spin_lock_init(&ch->lock);
3286 INIT_LIST_HEAD(&ch->free_tx);
3287 ret = srp_new_cm_id(ch);
3288 if (ret)
3289 goto err_disconnect;
aef9ec39 3290
d92c0da7
BVA
3291 ret = srp_create_ch_ib(ch);
3292 if (ret)
3293 goto err_disconnect;
3294
3295 ret = srp_alloc_req_data(ch);
3296 if (ret)
3297 goto err_disconnect;
3298
3299 ret = srp_connect_ch(ch, multich);
3300 if (ret) {
3301 shost_printk(KERN_ERR, target->scsi_host,
3302 PFX "Connection %d/%d failed\n",
3303 ch_start + cpu_idx,
3304 target->ch_count);
3305 if (node_idx == 0 && cpu_idx == 0) {
3306 goto err_disconnect;
3307 } else {
3308 srp_free_ch_ib(target, ch);
3309 srp_free_req_data(target, ch);
3310 target->ch_count = ch - target->ch;
c257ea6f 3311 goto connected;
d92c0da7
BVA
3312 }
3313 }
3314
3315 multich = true;
3316 cpu_idx++;
3317 }
3318 node_idx++;
aef9ec39
RD
3319 }
3320
c257ea6f 3321connected:
d92c0da7
BVA
3322 target->scsi_host->nr_hw_queues = target->ch_count;
3323
aef9ec39
RD
3324 ret = srp_add_target(host, target);
3325 if (ret)
3326 goto err_disconnect;
3327
34aa654e
BVA
3328 if (target->state != SRP_TARGET_REMOVED) {
3329 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3330 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3331 be64_to_cpu(target->id_ext),
3332 be64_to_cpu(target->ioc_guid),
747fe000 3333 be16_to_cpu(target->pkey),
34aa654e 3334 be64_to_cpu(target->service_id),
747fe000 3335 target->sgid.raw, target->orig_dgid.raw);
34aa654e 3336 }
e7ffde01 3337
2d7091bc
BVA
3338 ret = count;
3339
3340out:
3341 mutex_unlock(&host->add_target_mutex);
34aa654e
BVA
3342
3343 scsi_host_put(target->scsi_host);
bc44bd1d
BVA
3344 if (ret < 0)
3345 scsi_host_put(target->scsi_host);
34aa654e 3346
2d7091bc 3347 return ret;
aef9ec39
RD
3348
3349err_disconnect:
3350 srp_disconnect_target(target);
3351
d92c0da7
BVA
3352 for (i = 0; i < target->ch_count; i++) {
3353 ch = &target->ch[i];
3354 srp_free_ch_ib(target, ch);
3355 srp_free_req_data(target, ch);
3356 }
aef9ec39 3357
d92c0da7 3358 kfree(target->ch);
2d7091bc 3359 goto out;
aef9ec39
RD
3360}
3361
ee959b00 3362static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
aef9ec39 3363
ee959b00
TJ
3364static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3365 char *buf)
aef9ec39 3366{
ee959b00 3367 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39 3368
05321937 3369 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
aef9ec39
RD
3370}
3371
ee959b00 3372static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
aef9ec39 3373
ee959b00
TJ
3374static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3375 char *buf)
aef9ec39 3376{
ee959b00 3377 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39
RD
3378
3379 return sprintf(buf, "%d\n", host->port);
3380}
3381
ee959b00 3382static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
aef9ec39 3383
f5358a17 3384static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
aef9ec39
RD
3385{
3386 struct srp_host *host;
3387
3388 host = kzalloc(sizeof *host, GFP_KERNEL);
3389 if (!host)
3390 return NULL;
3391
3392 INIT_LIST_HEAD(&host->target_list);
b3589fd4 3393 spin_lock_init(&host->target_lock);
aef9ec39 3394 init_completion(&host->released);
2d7091bc 3395 mutex_init(&host->add_target_mutex);
05321937 3396 host->srp_dev = device;
aef9ec39
RD
3397 host->port = port;
3398
ee959b00
TJ
3399 host->dev.class = &srp_class;
3400 host->dev.parent = device->dev->dma_device;
d927e38c 3401 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
aef9ec39 3402
ee959b00 3403 if (device_register(&host->dev))
f5358a17 3404 goto free_host;
ee959b00 3405 if (device_create_file(&host->dev, &dev_attr_add_target))
aef9ec39 3406 goto err_class;
ee959b00 3407 if (device_create_file(&host->dev, &dev_attr_ibdev))
aef9ec39 3408 goto err_class;
ee959b00 3409 if (device_create_file(&host->dev, &dev_attr_port))
aef9ec39
RD
3410 goto err_class;
3411
3412 return host;
3413
3414err_class:
ee959b00 3415 device_unregister(&host->dev);
aef9ec39 3416
f5358a17 3417free_host:
aef9ec39
RD
3418 kfree(host);
3419
3420 return NULL;
3421}
3422
3423static void srp_add_one(struct ib_device *device)
3424{
f5358a17 3425 struct srp_device *srp_dev;
aef9ec39 3426 struct srp_host *host;
4139032b 3427 int mr_page_shift, p;
52ede08f 3428 u64 max_pages_per_mr;
aef9ec39 3429
f5358a17
RD
3430 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3431 if (!srp_dev)
4a061b28 3432 return;
f5358a17 3433
d1b4289e
BVA
3434 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3435 device->map_phys_fmr && device->unmap_fmr);
4a061b28 3436 srp_dev->has_fr = (device->attrs.device_cap_flags &
5cfb1782
BVA
3437 IB_DEVICE_MEM_MGT_EXTENSIONS);
3438 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3439 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3440
3441 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3442 (!srp_dev->has_fmr || prefer_fr));
002f1567 3443 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
d1b4289e 3444
f5358a17
RD
3445 /*
3446 * Use the smallest page size supported by the HCA, down to a
8f26c9ff
DD
3447 * minimum of 4096 bytes. We're unlikely to build large sglists
3448 * out of smaller entries.
f5358a17 3449 */
4a061b28 3450 mr_page_shift = max(12, ffs(device->attrs.page_size_cap) - 1);
52ede08f
BVA
3451 srp_dev->mr_page_size = 1 << mr_page_shift;
3452 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
4a061b28 3453 max_pages_per_mr = device->attrs.max_mr_size;
52ede08f
BVA
3454 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3455 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3456 max_pages_per_mr);
5cfb1782
BVA
3457 if (srp_dev->use_fast_reg) {
3458 srp_dev->max_pages_per_mr =
3459 min_t(u32, srp_dev->max_pages_per_mr,
4a061b28 3460 device->attrs.max_fast_reg_page_list_len);
5cfb1782 3461 }
52ede08f
BVA
3462 srp_dev->mr_max_size = srp_dev->mr_page_size *
3463 srp_dev->max_pages_per_mr;
4a061b28
OG
3464 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3465 device->name, mr_page_shift, device->attrs.max_mr_size,
3466 device->attrs.max_fast_reg_page_list_len,
52ede08f 3467 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
f5358a17
RD
3468
3469 INIT_LIST_HEAD(&srp_dev->dev_list);
3470
3471 srp_dev->dev = device;
3472 srp_dev->pd = ib_alloc_pd(device);
3473 if (IS_ERR(srp_dev->pd))
3474 goto free_dev;
3475
03f6fb93
BVA
3476 if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3477 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3478 IB_ACCESS_LOCAL_WRITE |
3479 IB_ACCESS_REMOTE_READ |
3480 IB_ACCESS_REMOTE_WRITE);
3481 if (IS_ERR(srp_dev->global_mr))
3482 goto err_pd;
3483 } else {
3484 srp_dev->global_mr = NULL;
3485 }
f5358a17 3486
4139032b 3487 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
f5358a17 3488 host = srp_add_port(srp_dev, p);
aef9ec39 3489 if (host)
f5358a17 3490 list_add_tail(&host->list, &srp_dev->dev_list);
aef9ec39
RD
3491 }
3492
f5358a17 3493 ib_set_client_data(device, &srp_client, srp_dev);
4a061b28 3494 return;
f5358a17
RD
3495
3496err_pd:
3497 ib_dealloc_pd(srp_dev->pd);
3498
3499free_dev:
3500 kfree(srp_dev);
aef9ec39
RD
3501}
3502
7c1eb45a 3503static void srp_remove_one(struct ib_device *device, void *client_data)
aef9ec39 3504{
f5358a17 3505 struct srp_device *srp_dev;
aef9ec39 3506 struct srp_host *host, *tmp_host;
ef6c49d8 3507 struct srp_target_port *target;
aef9ec39 3508
7c1eb45a 3509 srp_dev = client_data;
1fe0cb84
DB
3510 if (!srp_dev)
3511 return;
aef9ec39 3512
f5358a17 3513 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
ee959b00 3514 device_unregister(&host->dev);
aef9ec39
RD
3515 /*
3516 * Wait for the sysfs entry to go away, so that no new
3517 * target ports can be created.
3518 */
3519 wait_for_completion(&host->released);
3520
3521 /*
ef6c49d8 3522 * Remove all target ports.
aef9ec39 3523 */
b3589fd4 3524 spin_lock(&host->target_lock);
ef6c49d8
BVA
3525 list_for_each_entry(target, &host->target_list, list)
3526 srp_queue_remove_work(target);
b3589fd4 3527 spin_unlock(&host->target_lock);
aef9ec39
RD
3528
3529 /*
bcc05910 3530 * Wait for tl_err and target port removal tasks.
aef9ec39 3531 */
ef6c49d8 3532 flush_workqueue(system_long_wq);
bcc05910 3533 flush_workqueue(srp_remove_wq);
aef9ec39 3534
aef9ec39
RD
3535 kfree(host);
3536 }
3537
03f6fb93
BVA
3538 if (srp_dev->global_mr)
3539 ib_dereg_mr(srp_dev->global_mr);
f5358a17
RD
3540 ib_dealloc_pd(srp_dev->pd);
3541
3542 kfree(srp_dev);
aef9ec39
RD
3543}
3544
3236822b 3545static struct srp_function_template ib_srp_transport_functions = {
ed9b2264
BVA
3546 .has_rport_state = true,
3547 .reset_timer_if_blocked = true,
a95cadb9 3548 .reconnect_delay = &srp_reconnect_delay,
ed9b2264
BVA
3549 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3550 .dev_loss_tmo = &srp_dev_loss_tmo,
3551 .reconnect = srp_rport_reconnect,
dc1bdbd9 3552 .rport_delete = srp_rport_delete,
ed9b2264 3553 .terminate_rport_io = srp_terminate_io,
3236822b
FT
3554};
3555
aef9ec39
RD
3556static int __init srp_init_module(void)
3557{
3558 int ret;
3559
49248644 3560 if (srp_sg_tablesize) {
e0bda7d8 3561 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
49248644
DD
3562 if (!cmd_sg_entries)
3563 cmd_sg_entries = srp_sg_tablesize;
3564 }
3565
3566 if (!cmd_sg_entries)
3567 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3568
3569 if (cmd_sg_entries > 255) {
e0bda7d8 3570 pr_warn("Clamping cmd_sg_entries to 255\n");
49248644 3571 cmd_sg_entries = 255;
1e89a194
DD
3572 }
3573
c07d424d
DD
3574 if (!indirect_sg_entries)
3575 indirect_sg_entries = cmd_sg_entries;
3576 else if (indirect_sg_entries < cmd_sg_entries) {
e0bda7d8
BVA
3577 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3578 cmd_sg_entries);
c07d424d
DD
3579 indirect_sg_entries = cmd_sg_entries;
3580 }
3581
bcc05910 3582 srp_remove_wq = create_workqueue("srp_remove");
da05be29
WY
3583 if (!srp_remove_wq) {
3584 ret = -ENOMEM;
bcc05910
BVA
3585 goto out;
3586 }
3587
3588 ret = -ENOMEM;
3236822b
FT
3589 ib_srp_transport_template =
3590 srp_attach_transport(&ib_srp_transport_functions);
3591 if (!ib_srp_transport_template)
bcc05910 3592 goto destroy_wq;
3236822b 3593
aef9ec39
RD
3594 ret = class_register(&srp_class);
3595 if (ret) {
e0bda7d8 3596 pr_err("couldn't register class infiniband_srp\n");
bcc05910 3597 goto release_tr;
aef9ec39
RD
3598 }
3599
c1a0b23b
MT
3600 ib_sa_register_client(&srp_sa_client);
3601
aef9ec39
RD
3602 ret = ib_register_client(&srp_client);
3603 if (ret) {
e0bda7d8 3604 pr_err("couldn't register IB client\n");
bcc05910 3605 goto unreg_sa;
aef9ec39
RD
3606 }
3607
bcc05910
BVA
3608out:
3609 return ret;
3610
3611unreg_sa:
3612 ib_sa_unregister_client(&srp_sa_client);
3613 class_unregister(&srp_class);
3614
3615release_tr:
3616 srp_release_transport(ib_srp_transport_template);
3617
3618destroy_wq:
3619 destroy_workqueue(srp_remove_wq);
3620 goto out;
aef9ec39
RD
3621}
3622
3623static void __exit srp_cleanup_module(void)
3624{
3625 ib_unregister_client(&srp_client);
c1a0b23b 3626 ib_sa_unregister_client(&srp_sa_client);
aef9ec39 3627 class_unregister(&srp_class);
3236822b 3628 srp_release_transport(ib_srp_transport_template);
bcc05910 3629 destroy_workqueue(srp_remove_wq);
aef9ec39
RD
3630}
3631
3632module_init(srp_init_module);
3633module_exit(srp_cleanup_module);
This page took 2.425494 seconds and 5 git commands to generate.