IB/srp: Introduce target->mr_pool_size
[deliverable/linux.git] / drivers / infiniband / ulp / srp / ib_srp.c
CommitLineData
aef9ec39
RD
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
aef9ec39
RD
31 */
32
d236cd0e 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
e0bda7d8 34
aef9ec39
RD
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
de25968c 42#include <linux/jiffies.h>
56b5390c 43#include <rdma/ib_cache.h>
aef9ec39 44
60063497 45#include <linux/atomic.h>
aef9ec39
RD
46
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
71444b97 50#include <scsi/scsi_tcq.h>
aef9ec39 51#include <scsi/srp.h>
3236822b 52#include <scsi/scsi_transport_srp.h>
aef9ec39 53
aef9ec39
RD
54#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
713ef24e
BVA
58#define DRV_VERSION "2.0"
59#define DRV_RELDATE "July 26, 2015"
aef9ec39
RD
60
61MODULE_AUTHOR("Roland Dreier");
33ab3e5b 62MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
aef9ec39 63MODULE_LICENSE("Dual BSD/GPL");
33ab3e5b
BVA
64MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
aef9ec39 66
49248644
DD
67static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
c07d424d
DD
69static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
03f6fb93
BVA
71static bool prefer_fr = true;
72static bool register_always = true;
49248644 73static int topspin_workarounds = 1;
74b0a15b 74
49248644
DD
75module_param(srp_sg_tablesize, uint, 0444);
76MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
74b0a15b 77
49248644
DD
78module_param(cmd_sg_entries, uint, 0444);
79MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
aef9ec39 81
c07d424d
DD
82module_param(indirect_sg_entries, uint, 0444);
83MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86module_param(allow_ext_sg, bool, 0444);
87MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
aef9ec39
RD
90module_param(topspin_workarounds, int, 0444);
91MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
5cfb1782
BVA
94module_param(prefer_fr, bool, 0444);
95MODULE_PARM_DESC(prefer_fr,
96"Whether to use fast registration if both FMR and fast registration are supported");
97
b1b8854d
BVA
98module_param(register_always, bool, 0444);
99MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
9c27847d 102static const struct kernel_param_ops srp_tmo_ops;
ed9b2264 103
a95cadb9
BVA
104static int srp_reconnect_delay = 10;
105module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
ed9b2264
BVA
109static int srp_fast_io_fail_tmo = 15;
110module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
a95cadb9 117static int srp_dev_loss_tmo = 600;
ed9b2264
BVA
118module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
d92c0da7
BVA
128static unsigned ch_count;
129module_param(ch_count, uint, 0444);
130MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
aef9ec39 133static void srp_add_one(struct ib_device *device);
7c1eb45a 134static void srp_remove_one(struct ib_device *device, void *client_data);
1dc7b1f1
CH
135static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
136static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
137 const char *opname);
aef9ec39
RD
138static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
139
3236822b 140static struct scsi_transport_template *ib_srp_transport_template;
bcc05910 141static struct workqueue_struct *srp_remove_wq;
3236822b 142
aef9ec39
RD
143static struct ib_client srp_client = {
144 .name = "srp",
145 .add = srp_add_one,
146 .remove = srp_remove_one
147};
148
c1a0b23b
MT
149static struct ib_sa_client srp_sa_client;
150
ed9b2264
BVA
151static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
152{
153 int tmo = *(int *)kp->arg;
154
155 if (tmo >= 0)
156 return sprintf(buffer, "%d", tmo);
157 else
158 return sprintf(buffer, "off");
159}
160
161static int srp_tmo_set(const char *val, const struct kernel_param *kp)
162{
163 int tmo, res;
164
3fdf70ac
SG
165 res = srp_parse_tmo(&tmo, val);
166 if (res)
167 goto out;
168
a95cadb9
BVA
169 if (kp->arg == &srp_reconnect_delay)
170 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
171 srp_dev_loss_tmo);
172 else if (kp->arg == &srp_fast_io_fail_tmo)
173 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
ed9b2264 174 else
a95cadb9
BVA
175 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
176 tmo);
ed9b2264
BVA
177 if (res)
178 goto out;
179 *(int *)kp->arg = tmo;
180
181out:
182 return res;
183}
184
9c27847d 185static const struct kernel_param_ops srp_tmo_ops = {
ed9b2264
BVA
186 .get = srp_tmo_get,
187 .set = srp_tmo_set,
188};
189
aef9ec39
RD
190static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
191{
192 return (struct srp_target_port *) host->hostdata;
193}
194
195static const char *srp_target_info(struct Scsi_Host *host)
196{
197 return host_to_target(host)->target_name;
198}
199
5d7cbfd6
RD
200static int srp_target_is_topspin(struct srp_target_port *target)
201{
202 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
3d1ff48d 203 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
5d7cbfd6
RD
204
205 return topspin_workarounds &&
3d1ff48d
RK
206 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
207 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
5d7cbfd6
RD
208}
209
aef9ec39
RD
210static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
211 gfp_t gfp_mask,
212 enum dma_data_direction direction)
213{
214 struct srp_iu *iu;
215
216 iu = kmalloc(sizeof *iu, gfp_mask);
217 if (!iu)
218 goto out;
219
220 iu->buf = kzalloc(size, gfp_mask);
221 if (!iu->buf)
222 goto out_free_iu;
223
05321937
GKH
224 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
225 direction);
226 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
aef9ec39
RD
227 goto out_free_buf;
228
229 iu->size = size;
230 iu->direction = direction;
231
232 return iu;
233
234out_free_buf:
235 kfree(iu->buf);
236out_free_iu:
237 kfree(iu);
238out:
239 return NULL;
240}
241
242static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
243{
244 if (!iu)
245 return;
246
05321937
GKH
247 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
248 iu->direction);
aef9ec39
RD
249 kfree(iu->buf);
250 kfree(iu);
251}
252
253static void srp_qp_event(struct ib_event *event, void *context)
254{
57363d98
SG
255 pr_debug("QP event %s (%d)\n",
256 ib_event_msg(event->event), event->event);
aef9ec39
RD
257}
258
259static int srp_init_qp(struct srp_target_port *target,
260 struct ib_qp *qp)
261{
262 struct ib_qp_attr *attr;
263 int ret;
264
265 attr = kmalloc(sizeof *attr, GFP_KERNEL);
266 if (!attr)
267 return -ENOMEM;
268
56b5390c
BVA
269 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
270 target->srp_host->port,
271 be16_to_cpu(target->pkey),
272 &attr->pkey_index);
aef9ec39
RD
273 if (ret)
274 goto out;
275
276 attr->qp_state = IB_QPS_INIT;
277 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
278 IB_ACCESS_REMOTE_WRITE);
279 attr->port_num = target->srp_host->port;
280
281 ret = ib_modify_qp(qp, attr,
282 IB_QP_STATE |
283 IB_QP_PKEY_INDEX |
284 IB_QP_ACCESS_FLAGS |
285 IB_QP_PORT);
286
287out:
288 kfree(attr);
289 return ret;
290}
291
509c07bc 292static int srp_new_cm_id(struct srp_rdma_ch *ch)
9fe4bcf4 293{
509c07bc 294 struct srp_target_port *target = ch->target;
9fe4bcf4
DD
295 struct ib_cm_id *new_cm_id;
296
05321937 297 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
509c07bc 298 srp_cm_handler, ch);
9fe4bcf4
DD
299 if (IS_ERR(new_cm_id))
300 return PTR_ERR(new_cm_id);
301
509c07bc
BVA
302 if (ch->cm_id)
303 ib_destroy_cm_id(ch->cm_id);
304 ch->cm_id = new_cm_id;
305 ch->path.sgid = target->sgid;
306 ch->path.dgid = target->orig_dgid;
307 ch->path.pkey = target->pkey;
308 ch->path.service_id = target->service_id;
9fe4bcf4
DD
309
310 return 0;
311}
312
d1b4289e
BVA
313static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
314{
315 struct srp_device *dev = target->srp_host->srp_dev;
316 struct ib_fmr_pool_param fmr_param;
317
318 memset(&fmr_param, 0, sizeof(fmr_param));
fa9863f8 319 fmr_param.pool_size = target->mr_pool_size;
d1b4289e
BVA
320 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
321 fmr_param.cache = 1;
52ede08f
BVA
322 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
323 fmr_param.page_shift = ilog2(dev->mr_page_size);
d1b4289e
BVA
324 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
325 IB_ACCESS_REMOTE_WRITE |
326 IB_ACCESS_REMOTE_READ);
327
328 return ib_create_fmr_pool(dev->pd, &fmr_param);
329}
330
5cfb1782
BVA
331/**
332 * srp_destroy_fr_pool() - free the resources owned by a pool
333 * @pool: Fast registration pool to be destroyed.
334 */
335static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
336{
337 int i;
338 struct srp_fr_desc *d;
339
340 if (!pool)
341 return;
342
343 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
5cfb1782
BVA
344 if (d->mr)
345 ib_dereg_mr(d->mr);
346 }
347 kfree(pool);
348}
349
350/**
351 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
352 * @device: IB device to allocate fast registration descriptors for.
353 * @pd: Protection domain associated with the FR descriptors.
354 * @pool_size: Number of descriptors to allocate.
355 * @max_page_list_len: Maximum fast registration work request page list length.
356 */
357static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
358 struct ib_pd *pd, int pool_size,
359 int max_page_list_len)
360{
361 struct srp_fr_pool *pool;
362 struct srp_fr_desc *d;
363 struct ib_mr *mr;
5cfb1782
BVA
364 int i, ret = -EINVAL;
365
366 if (pool_size <= 0)
367 goto err;
368 ret = -ENOMEM;
369 pool = kzalloc(sizeof(struct srp_fr_pool) +
370 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
371 if (!pool)
372 goto err;
373 pool->size = pool_size;
374 pool->max_page_list_len = max_page_list_len;
375 spin_lock_init(&pool->lock);
376 INIT_LIST_HEAD(&pool->free_list);
377
378 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
563b67c5
SG
379 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
380 max_page_list_len);
5cfb1782
BVA
381 if (IS_ERR(mr)) {
382 ret = PTR_ERR(mr);
383 goto destroy_pool;
384 }
385 d->mr = mr;
5cfb1782
BVA
386 list_add_tail(&d->entry, &pool->free_list);
387 }
388
389out:
390 return pool;
391
392destroy_pool:
393 srp_destroy_fr_pool(pool);
394
395err:
396 pool = ERR_PTR(ret);
397 goto out;
398}
399
400/**
401 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
402 * @pool: Pool to obtain descriptor from.
403 */
404static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
405{
406 struct srp_fr_desc *d = NULL;
407 unsigned long flags;
408
409 spin_lock_irqsave(&pool->lock, flags);
410 if (!list_empty(&pool->free_list)) {
411 d = list_first_entry(&pool->free_list, typeof(*d), entry);
412 list_del(&d->entry);
413 }
414 spin_unlock_irqrestore(&pool->lock, flags);
415
416 return d;
417}
418
419/**
420 * srp_fr_pool_put() - put an FR descriptor back in the free list
421 * @pool: Pool the descriptor was allocated from.
422 * @desc: Pointer to an array of fast registration descriptor pointers.
423 * @n: Number of descriptors to put back.
424 *
425 * Note: The caller must already have queued an invalidation request for
426 * desc->mr->rkey before calling this function.
427 */
428static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
429 int n)
430{
431 unsigned long flags;
432 int i;
433
434 spin_lock_irqsave(&pool->lock, flags);
435 for (i = 0; i < n; i++)
436 list_add(&desc[i]->entry, &pool->free_list);
437 spin_unlock_irqrestore(&pool->lock, flags);
438}
439
440static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
441{
442 struct srp_device *dev = target->srp_host->srp_dev;
443
fa9863f8 444 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
5cfb1782
BVA
445 dev->max_pages_per_mr);
446}
447
7dad6b2e
BVA
448/**
449 * srp_destroy_qp() - destroy an RDMA queue pair
450 * @ch: SRP RDMA channel.
451 *
561392d4
SW
452 * Drain the qp before destroying it. This avoids that the receive
453 * completion handler can access the queue pair while it is
7dad6b2e
BVA
454 * being destroyed.
455 */
456static void srp_destroy_qp(struct srp_rdma_ch *ch)
457{
561392d4 458 ib_drain_rq(ch->qp);
7dad6b2e
BVA
459 ib_destroy_qp(ch->qp);
460}
461
509c07bc 462static int srp_create_ch_ib(struct srp_rdma_ch *ch)
aef9ec39 463{
509c07bc 464 struct srp_target_port *target = ch->target;
62154b2e 465 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39 466 struct ib_qp_init_attr *init_attr;
73aa89ed
IR
467 struct ib_cq *recv_cq, *send_cq;
468 struct ib_qp *qp;
d1b4289e 469 struct ib_fmr_pool *fmr_pool = NULL;
5cfb1782 470 struct srp_fr_pool *fr_pool = NULL;
09c0c0be 471 const int m = dev->use_fast_reg ? 3 : 1;
aef9ec39
RD
472 int ret;
473
474 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
475 if (!init_attr)
476 return -ENOMEM;
477
561392d4 478 /* queue_size + 1 for ib_drain_rq() */
1dc7b1f1
CH
479 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
480 ch->comp_vector, IB_POLL_SOFTIRQ);
73aa89ed
IR
481 if (IS_ERR(recv_cq)) {
482 ret = PTR_ERR(recv_cq);
da9d2f07 483 goto err;
aef9ec39
RD
484 }
485
1dc7b1f1
CH
486 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
487 ch->comp_vector, IB_POLL_DIRECT);
73aa89ed
IR
488 if (IS_ERR(send_cq)) {
489 ret = PTR_ERR(send_cq);
da9d2f07 490 goto err_recv_cq;
9c03dc9f
BVA
491 }
492
aef9ec39 493 init_attr->event_handler = srp_qp_event;
5cfb1782 494 init_attr->cap.max_send_wr = m * target->queue_size;
7dad6b2e 495 init_attr->cap.max_recv_wr = target->queue_size + 1;
aef9ec39
RD
496 init_attr->cap.max_recv_sge = 1;
497 init_attr->cap.max_send_sge = 1;
5cfb1782 498 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
aef9ec39 499 init_attr->qp_type = IB_QPT_RC;
73aa89ed
IR
500 init_attr->send_cq = send_cq;
501 init_attr->recv_cq = recv_cq;
aef9ec39 502
62154b2e 503 qp = ib_create_qp(dev->pd, init_attr);
73aa89ed
IR
504 if (IS_ERR(qp)) {
505 ret = PTR_ERR(qp);
da9d2f07 506 goto err_send_cq;
aef9ec39
RD
507 }
508
73aa89ed 509 ret = srp_init_qp(target, qp);
da9d2f07
RD
510 if (ret)
511 goto err_qp;
aef9ec39 512
002f1567 513 if (dev->use_fast_reg) {
5cfb1782
BVA
514 fr_pool = srp_alloc_fr_pool(target);
515 if (IS_ERR(fr_pool)) {
516 ret = PTR_ERR(fr_pool);
517 shost_printk(KERN_WARNING, target->scsi_host, PFX
518 "FR pool allocation failed (%d)\n", ret);
519 goto err_qp;
520 }
002f1567 521 } else if (dev->use_fmr) {
d1b4289e
BVA
522 fmr_pool = srp_alloc_fmr_pool(target);
523 if (IS_ERR(fmr_pool)) {
524 ret = PTR_ERR(fmr_pool);
525 shost_printk(KERN_WARNING, target->scsi_host, PFX
526 "FMR pool allocation failed (%d)\n", ret);
527 goto err_qp;
528 }
d1b4289e
BVA
529 }
530
509c07bc 531 if (ch->qp)
7dad6b2e 532 srp_destroy_qp(ch);
509c07bc 533 if (ch->recv_cq)
1dc7b1f1 534 ib_free_cq(ch->recv_cq);
509c07bc 535 if (ch->send_cq)
1dc7b1f1 536 ib_free_cq(ch->send_cq);
73aa89ed 537
509c07bc
BVA
538 ch->qp = qp;
539 ch->recv_cq = recv_cq;
540 ch->send_cq = send_cq;
73aa89ed 541
7fbc67df
SG
542 if (dev->use_fast_reg) {
543 if (ch->fr_pool)
544 srp_destroy_fr_pool(ch->fr_pool);
545 ch->fr_pool = fr_pool;
546 } else if (dev->use_fmr) {
547 if (ch->fmr_pool)
548 ib_destroy_fmr_pool(ch->fmr_pool);
549 ch->fmr_pool = fmr_pool;
550 }
551
da9d2f07
RD
552 kfree(init_attr);
553 return 0;
554
555err_qp:
1dc7b1f1 556 srp_destroy_qp(ch);
da9d2f07
RD
557
558err_send_cq:
1dc7b1f1 559 ib_free_cq(send_cq);
da9d2f07
RD
560
561err_recv_cq:
1dc7b1f1 562 ib_free_cq(recv_cq);
da9d2f07
RD
563
564err:
aef9ec39
RD
565 kfree(init_attr);
566 return ret;
567}
568
4d73f95f
BVA
569/*
570 * Note: this function may be called without srp_alloc_iu_bufs() having been
509c07bc 571 * invoked. Hence the ch->[rt]x_ring checks.
4d73f95f 572 */
509c07bc
BVA
573static void srp_free_ch_ib(struct srp_target_port *target,
574 struct srp_rdma_ch *ch)
aef9ec39 575{
5cfb1782 576 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39
RD
577 int i;
578
d92c0da7
BVA
579 if (!ch->target)
580 return;
581
509c07bc
BVA
582 if (ch->cm_id) {
583 ib_destroy_cm_id(ch->cm_id);
584 ch->cm_id = NULL;
394c595e
BVA
585 }
586
d92c0da7
BVA
587 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
588 if (!ch->qp)
589 return;
590
5cfb1782 591 if (dev->use_fast_reg) {
509c07bc
BVA
592 if (ch->fr_pool)
593 srp_destroy_fr_pool(ch->fr_pool);
002f1567 594 } else if (dev->use_fmr) {
509c07bc
BVA
595 if (ch->fmr_pool)
596 ib_destroy_fmr_pool(ch->fmr_pool);
5cfb1782 597 }
1dc7b1f1 598
7dad6b2e 599 srp_destroy_qp(ch);
1dc7b1f1
CH
600 ib_free_cq(ch->send_cq);
601 ib_free_cq(ch->recv_cq);
aef9ec39 602
d92c0da7
BVA
603 /*
604 * Avoid that the SCSI error handler tries to use this channel after
605 * it has been freed. The SCSI error handler can namely continue
606 * trying to perform recovery actions after scsi_remove_host()
607 * returned.
608 */
609 ch->target = NULL;
610
509c07bc
BVA
611 ch->qp = NULL;
612 ch->send_cq = ch->recv_cq = NULL;
73aa89ed 613
509c07bc 614 if (ch->rx_ring) {
4d73f95f 615 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
616 srp_free_iu(target->srp_host, ch->rx_ring[i]);
617 kfree(ch->rx_ring);
618 ch->rx_ring = NULL;
4d73f95f 619 }
509c07bc 620 if (ch->tx_ring) {
4d73f95f 621 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
622 srp_free_iu(target->srp_host, ch->tx_ring[i]);
623 kfree(ch->tx_ring);
624 ch->tx_ring = NULL;
4d73f95f 625 }
aef9ec39
RD
626}
627
628static void srp_path_rec_completion(int status,
629 struct ib_sa_path_rec *pathrec,
509c07bc 630 void *ch_ptr)
aef9ec39 631{
509c07bc
BVA
632 struct srp_rdma_ch *ch = ch_ptr;
633 struct srp_target_port *target = ch->target;
aef9ec39 634
509c07bc 635 ch->status = status;
aef9ec39 636 if (status)
7aa54bd7
DD
637 shost_printk(KERN_ERR, target->scsi_host,
638 PFX "Got failed path rec status %d\n", status);
aef9ec39 639 else
509c07bc
BVA
640 ch->path = *pathrec;
641 complete(&ch->done);
aef9ec39
RD
642}
643
509c07bc 644static int srp_lookup_path(struct srp_rdma_ch *ch)
aef9ec39 645{
509c07bc 646 struct srp_target_port *target = ch->target;
a702adce
BVA
647 int ret;
648
509c07bc
BVA
649 ch->path.numb_path = 1;
650
651 init_completion(&ch->done);
652
653 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
654 target->srp_host->srp_dev->dev,
655 target->srp_host->port,
656 &ch->path,
657 IB_SA_PATH_REC_SERVICE_ID |
658 IB_SA_PATH_REC_DGID |
659 IB_SA_PATH_REC_SGID |
660 IB_SA_PATH_REC_NUMB_PATH |
661 IB_SA_PATH_REC_PKEY,
662 SRP_PATH_REC_TIMEOUT_MS,
663 GFP_KERNEL,
664 srp_path_rec_completion,
665 ch, &ch->path_query);
666 if (ch->path_query_id < 0)
667 return ch->path_query_id;
668
669 ret = wait_for_completion_interruptible(&ch->done);
a702adce
BVA
670 if (ret < 0)
671 return ret;
aef9ec39 672
509c07bc 673 if (ch->status < 0)
7aa54bd7
DD
674 shost_printk(KERN_WARNING, target->scsi_host,
675 PFX "Path record query failed\n");
aef9ec39 676
509c07bc 677 return ch->status;
aef9ec39
RD
678}
679
d92c0da7 680static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
aef9ec39 681{
509c07bc 682 struct srp_target_port *target = ch->target;
aef9ec39
RD
683 struct {
684 struct ib_cm_req_param param;
685 struct srp_login_req priv;
686 } *req = NULL;
687 int status;
688
689 req = kzalloc(sizeof *req, GFP_KERNEL);
690 if (!req)
691 return -ENOMEM;
692
509c07bc 693 req->param.primary_path = &ch->path;
aef9ec39
RD
694 req->param.alternate_path = NULL;
695 req->param.service_id = target->service_id;
509c07bc
BVA
696 req->param.qp_num = ch->qp->qp_num;
697 req->param.qp_type = ch->qp->qp_type;
aef9ec39
RD
698 req->param.private_data = &req->priv;
699 req->param.private_data_len = sizeof req->priv;
700 req->param.flow_control = 1;
701
702 get_random_bytes(&req->param.starting_psn, 4);
703 req->param.starting_psn &= 0xffffff;
704
705 /*
706 * Pick some arbitrary defaults here; we could make these
707 * module parameters if anyone cared about setting them.
708 */
709 req->param.responder_resources = 4;
710 req->param.remote_cm_response_timeout = 20;
711 req->param.local_cm_response_timeout = 20;
7bb312e4 712 req->param.retry_count = target->tl_retry_count;
aef9ec39
RD
713 req->param.rnr_retry_count = 7;
714 req->param.max_cm_retries = 15;
715
716 req->priv.opcode = SRP_LOGIN_REQ;
717 req->priv.tag = 0;
49248644 718 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
aef9ec39
RD
719 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
720 SRP_BUF_FORMAT_INDIRECT);
d92c0da7
BVA
721 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
722 SRP_MULTICHAN_SINGLE);
0c0450db 723 /*
3cd96564 724 * In the published SRP specification (draft rev. 16a), the
0c0450db
R
725 * port identifier format is 8 bytes of ID extension followed
726 * by 8 bytes of GUID. Older drafts put the two halves in the
727 * opposite order, so that the GUID comes first.
728 *
729 * Targets conforming to these obsolete drafts can be
730 * recognized by the I/O Class they report.
731 */
732 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
733 memcpy(req->priv.initiator_port_id,
747fe000 734 &target->sgid.global.interface_id, 8);
0c0450db 735 memcpy(req->priv.initiator_port_id + 8,
01cb9bcb 736 &target->initiator_ext, 8);
0c0450db
R
737 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
738 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
739 } else {
740 memcpy(req->priv.initiator_port_id,
01cb9bcb
IR
741 &target->initiator_ext, 8);
742 memcpy(req->priv.initiator_port_id + 8,
747fe000 743 &target->sgid.global.interface_id, 8);
0c0450db
R
744 memcpy(req->priv.target_port_id, &target->id_ext, 8);
745 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
746 }
747
aef9ec39
RD
748 /*
749 * Topspin/Cisco SRP targets will reject our login unless we
01cb9bcb
IR
750 * zero out the first 8 bytes of our initiator port ID and set
751 * the second 8 bytes to the local node GUID.
aef9ec39 752 */
5d7cbfd6 753 if (srp_target_is_topspin(target)) {
7aa54bd7
DD
754 shost_printk(KERN_DEBUG, target->scsi_host,
755 PFX "Topspin/Cisco initiator port ID workaround "
756 "activated for target GUID %016llx\n",
45c37cad 757 be64_to_cpu(target->ioc_guid));
aef9ec39 758 memset(req->priv.initiator_port_id, 0, 8);
01cb9bcb 759 memcpy(req->priv.initiator_port_id + 8,
05321937 760 &target->srp_host->srp_dev->dev->node_guid, 8);
aef9ec39 761 }
aef9ec39 762
509c07bc 763 status = ib_send_cm_req(ch->cm_id, &req->param);
aef9ec39
RD
764
765 kfree(req);
766
767 return status;
768}
769
ef6c49d8
BVA
770static bool srp_queue_remove_work(struct srp_target_port *target)
771{
772 bool changed = false;
773
774 spin_lock_irq(&target->lock);
775 if (target->state != SRP_TARGET_REMOVED) {
776 target->state = SRP_TARGET_REMOVED;
777 changed = true;
778 }
779 spin_unlock_irq(&target->lock);
780
781 if (changed)
bcc05910 782 queue_work(srp_remove_wq, &target->remove_work);
ef6c49d8
BVA
783
784 return changed;
785}
786
aef9ec39
RD
787static void srp_disconnect_target(struct srp_target_port *target)
788{
d92c0da7
BVA
789 struct srp_rdma_ch *ch;
790 int i;
509c07bc 791
c014c8cd 792 /* XXX should send SRP_I_LOGOUT request */
aef9ec39 793
c014c8cd
BVA
794 for (i = 0; i < target->ch_count; i++) {
795 ch = &target->ch[i];
796 ch->connected = false;
797 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
798 shost_printk(KERN_DEBUG, target->scsi_host,
799 PFX "Sending CM DREQ failed\n");
294c875a 800 }
e6581056 801 }
aef9ec39
RD
802}
803
509c07bc
BVA
804static void srp_free_req_data(struct srp_target_port *target,
805 struct srp_rdma_ch *ch)
8f26c9ff 806{
5cfb1782
BVA
807 struct srp_device *dev = target->srp_host->srp_dev;
808 struct ib_device *ibdev = dev->dev;
8f26c9ff
DD
809 struct srp_request *req;
810 int i;
811
47513cf4 812 if (!ch->req_ring)
4d73f95f
BVA
813 return;
814
815 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 816 req = &ch->req_ring[i];
9a21be53 817 if (dev->use_fast_reg) {
5cfb1782 818 kfree(req->fr_list);
9a21be53 819 } else {
5cfb1782 820 kfree(req->fmr_list);
9a21be53
SG
821 kfree(req->map_page);
822 }
c07d424d
DD
823 if (req->indirect_dma_addr) {
824 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
825 target->indirect_size,
826 DMA_TO_DEVICE);
827 }
828 kfree(req->indirect_desc);
8f26c9ff 829 }
4d73f95f 830
509c07bc
BVA
831 kfree(ch->req_ring);
832 ch->req_ring = NULL;
8f26c9ff
DD
833}
834
509c07bc 835static int srp_alloc_req_data(struct srp_rdma_ch *ch)
b81d00bd 836{
509c07bc 837 struct srp_target_port *target = ch->target;
b81d00bd
BVA
838 struct srp_device *srp_dev = target->srp_host->srp_dev;
839 struct ib_device *ibdev = srp_dev->dev;
840 struct srp_request *req;
5cfb1782 841 void *mr_list;
b81d00bd
BVA
842 dma_addr_t dma_addr;
843 int i, ret = -ENOMEM;
844
509c07bc
BVA
845 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
846 GFP_KERNEL);
847 if (!ch->req_ring)
4d73f95f
BVA
848 goto out;
849
850 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 851 req = &ch->req_ring[i];
5cfb1782
BVA
852 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
853 GFP_KERNEL);
854 if (!mr_list)
855 goto out;
9a21be53 856 if (srp_dev->use_fast_reg) {
5cfb1782 857 req->fr_list = mr_list;
9a21be53 858 } else {
5cfb1782 859 req->fmr_list = mr_list;
9a21be53
SG
860 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
861 sizeof(void *), GFP_KERNEL);
862 if (!req->map_page)
863 goto out;
864 }
b81d00bd 865 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
5cfb1782 866 if (!req->indirect_desc)
b81d00bd
BVA
867 goto out;
868
869 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
870 target->indirect_size,
871 DMA_TO_DEVICE);
872 if (ib_dma_mapping_error(ibdev, dma_addr))
873 goto out;
874
875 req->indirect_dma_addr = dma_addr;
b81d00bd
BVA
876 }
877 ret = 0;
878
879out:
880 return ret;
881}
882
683b159a
BVA
883/**
884 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
885 * @shost: SCSI host whose attributes to remove from sysfs.
886 *
887 * Note: Any attributes defined in the host template and that did not exist
888 * before invocation of this function will be ignored.
889 */
890static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
891{
892 struct device_attribute **attr;
893
894 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
895 device_remove_file(&shost->shost_dev, *attr);
896}
897
ee12d6a8
BVA
898static void srp_remove_target(struct srp_target_port *target)
899{
d92c0da7
BVA
900 struct srp_rdma_ch *ch;
901 int i;
509c07bc 902
ef6c49d8
BVA
903 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
904
ee12d6a8 905 srp_del_scsi_host_attr(target->scsi_host);
9dd69a60 906 srp_rport_get(target->rport);
ee12d6a8
BVA
907 srp_remove_host(target->scsi_host);
908 scsi_remove_host(target->scsi_host);
93079162 909 srp_stop_rport_timers(target->rport);
ef6c49d8 910 srp_disconnect_target(target);
d92c0da7
BVA
911 for (i = 0; i < target->ch_count; i++) {
912 ch = &target->ch[i];
913 srp_free_ch_ib(target, ch);
914 }
c1120f89 915 cancel_work_sync(&target->tl_err_work);
9dd69a60 916 srp_rport_put(target->rport);
d92c0da7
BVA
917 for (i = 0; i < target->ch_count; i++) {
918 ch = &target->ch[i];
919 srp_free_req_data(target, ch);
920 }
921 kfree(target->ch);
922 target->ch = NULL;
65d7dd2f
VP
923
924 spin_lock(&target->srp_host->target_lock);
925 list_del(&target->list);
926 spin_unlock(&target->srp_host->target_lock);
927
ee12d6a8
BVA
928 scsi_host_put(target->scsi_host);
929}
930
c4028958 931static void srp_remove_work(struct work_struct *work)
aef9ec39 932{
c4028958 933 struct srp_target_port *target =
ef6c49d8 934 container_of(work, struct srp_target_port, remove_work);
aef9ec39 935
ef6c49d8 936 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
aef9ec39 937
96fc248a 938 srp_remove_target(target);
aef9ec39
RD
939}
940
dc1bdbd9
BVA
941static void srp_rport_delete(struct srp_rport *rport)
942{
943 struct srp_target_port *target = rport->lld_data;
944
945 srp_queue_remove_work(target);
946}
947
c014c8cd
BVA
948/**
949 * srp_connected_ch() - number of connected channels
950 * @target: SRP target port.
951 */
952static int srp_connected_ch(struct srp_target_port *target)
953{
954 int i, c = 0;
955
956 for (i = 0; i < target->ch_count; i++)
957 c += target->ch[i].connected;
958
959 return c;
960}
961
d92c0da7 962static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
aef9ec39 963{
509c07bc 964 struct srp_target_port *target = ch->target;
aef9ec39
RD
965 int ret;
966
c014c8cd 967 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
294c875a 968
509c07bc 969 ret = srp_lookup_path(ch);
aef9ec39 970 if (ret)
4d59ad29 971 goto out;
aef9ec39
RD
972
973 while (1) {
509c07bc 974 init_completion(&ch->done);
d92c0da7 975 ret = srp_send_req(ch, multich);
aef9ec39 976 if (ret)
4d59ad29 977 goto out;
509c07bc 978 ret = wait_for_completion_interruptible(&ch->done);
a702adce 979 if (ret < 0)
4d59ad29 980 goto out;
aef9ec39
RD
981
982 /*
983 * The CM event handling code will set status to
984 * SRP_PORT_REDIRECT if we get a port redirect REJ
985 * back, or SRP_DLID_REDIRECT if we get a lid/qp
986 * redirect REJ back.
987 */
4d59ad29
BVA
988 ret = ch->status;
989 switch (ret) {
aef9ec39 990 case 0:
c014c8cd 991 ch->connected = true;
4d59ad29 992 goto out;
aef9ec39
RD
993
994 case SRP_PORT_REDIRECT:
509c07bc 995 ret = srp_lookup_path(ch);
aef9ec39 996 if (ret)
4d59ad29 997 goto out;
aef9ec39
RD
998 break;
999
1000 case SRP_DLID_REDIRECT:
1001 break;
1002
9fe4bcf4 1003 case SRP_STALE_CONN:
9fe4bcf4 1004 shost_printk(KERN_ERR, target->scsi_host, PFX
205619f2 1005 "giving up on stale connection\n");
4d59ad29
BVA
1006 ret = -ECONNRESET;
1007 goto out;
9fe4bcf4 1008
aef9ec39 1009 default:
4d59ad29 1010 goto out;
aef9ec39
RD
1011 }
1012 }
4d59ad29
BVA
1013
1014out:
1015 return ret <= 0 ? ret : -ENODEV;
aef9ec39
RD
1016}
1017
1dc7b1f1
CH
1018static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1019{
1020 srp_handle_qp_err(cq, wc, "INV RKEY");
1021}
1022
1023static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1024 u32 rkey)
5cfb1782
BVA
1025{
1026 struct ib_send_wr *bad_wr;
1027 struct ib_send_wr wr = {
1028 .opcode = IB_WR_LOCAL_INV,
5cfb1782
BVA
1029 .next = NULL,
1030 .num_sge = 0,
1031 .send_flags = 0,
1032 .ex.invalidate_rkey = rkey,
1033 };
1034
1dc7b1f1
CH
1035 wr.wr_cqe = &req->reg_cqe;
1036 req->reg_cqe.done = srp_inv_rkey_err_done;
509c07bc 1037 return ib_post_send(ch->qp, &wr, &bad_wr);
5cfb1782
BVA
1038}
1039
d945e1df 1040static void srp_unmap_data(struct scsi_cmnd *scmnd,
509c07bc 1041 struct srp_rdma_ch *ch,
d945e1df
RD
1042 struct srp_request *req)
1043{
509c07bc 1044 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1045 struct srp_device *dev = target->srp_host->srp_dev;
1046 struct ib_device *ibdev = dev->dev;
1047 int i, res;
8f26c9ff 1048
bb350d1d 1049 if (!scsi_sglist(scmnd) ||
d945e1df
RD
1050 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1051 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1052 return;
1053
5cfb1782
BVA
1054 if (dev->use_fast_reg) {
1055 struct srp_fr_desc **pfr;
1056
1057 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1dc7b1f1 1058 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
5cfb1782
BVA
1059 if (res < 0) {
1060 shost_printk(KERN_ERR, target->scsi_host, PFX
1061 "Queueing INV WR for rkey %#x failed (%d)\n",
1062 (*pfr)->mr->rkey, res);
1063 queue_work(system_long_wq,
1064 &target->tl_err_work);
1065 }
1066 }
1067 if (req->nmdesc)
509c07bc 1068 srp_fr_pool_put(ch->fr_pool, req->fr_list,
5cfb1782 1069 req->nmdesc);
002f1567 1070 } else if (dev->use_fmr) {
5cfb1782
BVA
1071 struct ib_pool_fmr **pfmr;
1072
1073 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1074 ib_fmr_pool_unmap(*pfmr);
1075 }
f5358a17 1076
8f26c9ff
DD
1077 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1078 scmnd->sc_data_direction);
d945e1df
RD
1079}
1080
22032991
BVA
1081/**
1082 * srp_claim_req - Take ownership of the scmnd associated with a request.
509c07bc 1083 * @ch: SRP RDMA channel.
22032991 1084 * @req: SRP request.
b3fe628d 1085 * @sdev: If not NULL, only take ownership for this SCSI device.
22032991
BVA
1086 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1087 * ownership of @req->scmnd if it equals @scmnd.
1088 *
1089 * Return value:
1090 * Either NULL or a pointer to the SCSI command the caller became owner of.
1091 */
509c07bc 1092static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
22032991 1093 struct srp_request *req,
b3fe628d 1094 struct scsi_device *sdev,
22032991
BVA
1095 struct scsi_cmnd *scmnd)
1096{
1097 unsigned long flags;
1098
509c07bc 1099 spin_lock_irqsave(&ch->lock, flags);
b3fe628d
BVA
1100 if (req->scmnd &&
1101 (!sdev || req->scmnd->device == sdev) &&
1102 (!scmnd || req->scmnd == scmnd)) {
22032991
BVA
1103 scmnd = req->scmnd;
1104 req->scmnd = NULL;
22032991
BVA
1105 } else {
1106 scmnd = NULL;
1107 }
509c07bc 1108 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1109
1110 return scmnd;
1111}
1112
1113/**
6ec2ba02 1114 * srp_free_req() - Unmap data and adjust ch->req_lim.
509c07bc 1115 * @ch: SRP RDMA channel.
af24663b
BVA
1116 * @req: Request to be freed.
1117 * @scmnd: SCSI command associated with @req.
1118 * @req_lim_delta: Amount to be added to @target->req_lim.
22032991 1119 */
509c07bc
BVA
1120static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1121 struct scsi_cmnd *scmnd, s32 req_lim_delta)
526b4caa 1122{
94a9174c
BVA
1123 unsigned long flags;
1124
509c07bc 1125 srp_unmap_data(scmnd, ch, req);
22032991 1126
509c07bc
BVA
1127 spin_lock_irqsave(&ch->lock, flags);
1128 ch->req_lim += req_lim_delta;
509c07bc 1129 spin_unlock_irqrestore(&ch->lock, flags);
526b4caa
IR
1130}
1131
509c07bc
BVA
1132static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1133 struct scsi_device *sdev, int result)
526b4caa 1134{
509c07bc 1135 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
22032991
BVA
1136
1137 if (scmnd) {
509c07bc 1138 srp_free_req(ch, req, scmnd, 0);
ed9b2264 1139 scmnd->result = result;
22032991 1140 scmnd->scsi_done(scmnd);
22032991 1141 }
526b4caa
IR
1142}
1143
ed9b2264 1144static void srp_terminate_io(struct srp_rport *rport)
aef9ec39 1145{
ed9b2264 1146 struct srp_target_port *target = rport->lld_data;
d92c0da7 1147 struct srp_rdma_ch *ch;
b3fe628d
BVA
1148 struct Scsi_Host *shost = target->scsi_host;
1149 struct scsi_device *sdev;
d92c0da7 1150 int i, j;
ed9b2264 1151
b3fe628d
BVA
1152 /*
1153 * Invoking srp_terminate_io() while srp_queuecommand() is running
1154 * is not safe. Hence the warning statement below.
1155 */
1156 shost_for_each_device(sdev, shost)
1157 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1158
d92c0da7
BVA
1159 for (i = 0; i < target->ch_count; i++) {
1160 ch = &target->ch[i];
509c07bc 1161
d92c0da7
BVA
1162 for (j = 0; j < target->req_ring_size; ++j) {
1163 struct srp_request *req = &ch->req_ring[j];
1164
1165 srp_finish_req(ch, req, NULL,
1166 DID_TRANSPORT_FAILFAST << 16);
1167 }
ed9b2264
BVA
1168 }
1169}
aef9ec39 1170
ed9b2264
BVA
1171/*
1172 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1173 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1174 * srp_reset_device() or srp_reset_host() calls will occur while this function
1175 * is in progress. One way to realize that is not to call this function
1176 * directly but to call srp_reconnect_rport() instead since that last function
1177 * serializes calls of this function via rport->mutex and also blocks
1178 * srp_queuecommand() calls before invoking this function.
1179 */
1180static int srp_rport_reconnect(struct srp_rport *rport)
1181{
1182 struct srp_target_port *target = rport->lld_data;
d92c0da7
BVA
1183 struct srp_rdma_ch *ch;
1184 int i, j, ret = 0;
1185 bool multich = false;
09be70a2 1186
aef9ec39 1187 srp_disconnect_target(target);
34aa654e
BVA
1188
1189 if (target->state == SRP_TARGET_SCANNING)
1190 return -ENODEV;
1191
aef9ec39 1192 /*
c7c4e7ff
BVA
1193 * Now get a new local CM ID so that we avoid confusing the target in
1194 * case things are really fouled up. Doing so also ensures that all CM
1195 * callbacks will have finished before a new QP is allocated.
aef9ec39 1196 */
d92c0da7
BVA
1197 for (i = 0; i < target->ch_count; i++) {
1198 ch = &target->ch[i];
d92c0da7 1199 ret += srp_new_cm_id(ch);
536ae14e 1200 }
d92c0da7
BVA
1201 for (i = 0; i < target->ch_count; i++) {
1202 ch = &target->ch[i];
d92c0da7
BVA
1203 for (j = 0; j < target->req_ring_size; ++j) {
1204 struct srp_request *req = &ch->req_ring[j];
aef9ec39 1205
d92c0da7
BVA
1206 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1207 }
1208 }
1209 for (i = 0; i < target->ch_count; i++) {
1210 ch = &target->ch[i];
d92c0da7
BVA
1211 /*
1212 * Whether or not creating a new CM ID succeeded, create a new
1213 * QP. This guarantees that all completion callback function
1214 * invocations have finished before request resetting starts.
1215 */
1216 ret += srp_create_ch_ib(ch);
aef9ec39 1217
d92c0da7
BVA
1218 INIT_LIST_HEAD(&ch->free_tx);
1219 for (j = 0; j < target->queue_size; ++j)
1220 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1221 }
8de9fe3a
BVA
1222
1223 target->qp_in_error = false;
1224
d92c0da7
BVA
1225 for (i = 0; i < target->ch_count; i++) {
1226 ch = &target->ch[i];
bbac5ccf 1227 if (ret)
d92c0da7 1228 break;
d92c0da7
BVA
1229 ret = srp_connect_ch(ch, multich);
1230 multich = true;
1231 }
09be70a2 1232
ed9b2264
BVA
1233 if (ret == 0)
1234 shost_printk(KERN_INFO, target->scsi_host,
1235 PFX "reconnect succeeded\n");
aef9ec39
RD
1236
1237 return ret;
1238}
1239
8f26c9ff
DD
1240static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1241 unsigned int dma_len, u32 rkey)
f5358a17 1242{
8f26c9ff 1243 struct srp_direct_buf *desc = state->desc;
f5358a17 1244
3ae95da8
BVA
1245 WARN_ON_ONCE(!dma_len);
1246
8f26c9ff
DD
1247 desc->va = cpu_to_be64(dma_addr);
1248 desc->key = cpu_to_be32(rkey);
1249 desc->len = cpu_to_be32(dma_len);
f5358a17 1250
8f26c9ff
DD
1251 state->total_len += dma_len;
1252 state->desc++;
1253 state->ndesc++;
1254}
559ce8f1 1255
8f26c9ff 1256static int srp_map_finish_fmr(struct srp_map_state *state,
509c07bc 1257 struct srp_rdma_ch *ch)
8f26c9ff 1258{
186fbc66
BVA
1259 struct srp_target_port *target = ch->target;
1260 struct srp_device *dev = target->srp_host->srp_dev;
8f26c9ff
DD
1261 struct ib_pool_fmr *fmr;
1262 u64 io_addr = 0;
85507bcc 1263
f731ed62
BVA
1264 if (state->fmr.next >= state->fmr.end)
1265 return -ENOMEM;
1266
26630e8a
SG
1267 WARN_ON_ONCE(!dev->use_fmr);
1268
1269 if (state->npages == 0)
1270 return 0;
1271
1272 if (state->npages == 1 && target->global_mr) {
1273 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1274 target->global_mr->rkey);
1275 goto reset_state;
1276 }
1277
509c07bc 1278 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
8f26c9ff
DD
1279 state->npages, io_addr);
1280 if (IS_ERR(fmr))
1281 return PTR_ERR(fmr);
f5358a17 1282
f731ed62 1283 *state->fmr.next++ = fmr;
52ede08f 1284 state->nmdesc++;
f5358a17 1285
186fbc66
BVA
1286 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1287 state->dma_len, fmr->fmr->rkey);
539dde6f 1288
26630e8a
SG
1289reset_state:
1290 state->npages = 0;
1291 state->dma_len = 0;
1292
8f26c9ff
DD
1293 return 0;
1294}
1295
1dc7b1f1
CH
1296static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1297{
1298 srp_handle_qp_err(cq, wc, "FAST REG");
1299}
1300
5cfb1782 1301static int srp_map_finish_fr(struct srp_map_state *state,
1dc7b1f1 1302 struct srp_request *req,
57b0be9c 1303 struct srp_rdma_ch *ch, int sg_nents)
5cfb1782 1304{
509c07bc 1305 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1306 struct srp_device *dev = target->srp_host->srp_dev;
1307 struct ib_send_wr *bad_wr;
f7f7aab1 1308 struct ib_reg_wr wr;
5cfb1782
BVA
1309 struct srp_fr_desc *desc;
1310 u32 rkey;
f7f7aab1 1311 int n, err;
5cfb1782 1312
f731ed62
BVA
1313 if (state->fr.next >= state->fr.end)
1314 return -ENOMEM;
1315
26630e8a
SG
1316 WARN_ON_ONCE(!dev->use_fast_reg);
1317
57b0be9c 1318 if (sg_nents == 0)
26630e8a
SG
1319 return 0;
1320
57b0be9c 1321 if (sg_nents == 1 && target->global_mr) {
f7f7aab1
SG
1322 srp_map_desc(state, sg_dma_address(state->sg),
1323 sg_dma_len(state->sg),
26630e8a 1324 target->global_mr->rkey);
f7f7aab1 1325 return 1;
26630e8a
SG
1326 }
1327
509c07bc 1328 desc = srp_fr_pool_get(ch->fr_pool);
5cfb1782
BVA
1329 if (!desc)
1330 return -ENOMEM;
1331
1332 rkey = ib_inc_rkey(desc->mr->rkey);
1333 ib_update_fast_reg_key(desc->mr, rkey);
1334
57b0be9c 1335 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, dev->mr_page_size);
f7f7aab1
SG
1336 if (unlikely(n < 0))
1337 return n;
5cfb1782 1338
1dc7b1f1
CH
1339 req->reg_cqe.done = srp_reg_mr_err_done;
1340
f7f7aab1
SG
1341 wr.wr.next = NULL;
1342 wr.wr.opcode = IB_WR_REG_MR;
1dc7b1f1 1343 wr.wr.wr_cqe = &req->reg_cqe;
f7f7aab1
SG
1344 wr.wr.num_sge = 0;
1345 wr.wr.send_flags = 0;
1346 wr.mr = desc->mr;
1347 wr.key = desc->mr->rkey;
1348 wr.access = (IB_ACCESS_LOCAL_WRITE |
1349 IB_ACCESS_REMOTE_READ |
1350 IB_ACCESS_REMOTE_WRITE);
5cfb1782 1351
f731ed62 1352 *state->fr.next++ = desc;
5cfb1782
BVA
1353 state->nmdesc++;
1354
f7f7aab1
SG
1355 srp_map_desc(state, desc->mr->iova,
1356 desc->mr->length, desc->mr->rkey);
5cfb1782 1357
26630e8a 1358 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
f7f7aab1 1359 if (unlikely(err))
26630e8a
SG
1360 return err;
1361
f7f7aab1 1362 return n;
5cfb1782
BVA
1363}
1364
8f26c9ff 1365static int srp_map_sg_entry(struct srp_map_state *state,
509c07bc 1366 struct srp_rdma_ch *ch,
3ae95da8 1367 struct scatterlist *sg, int sg_index)
8f26c9ff 1368{
509c07bc 1369 struct srp_target_port *target = ch->target;
8f26c9ff
DD
1370 struct srp_device *dev = target->srp_host->srp_dev;
1371 struct ib_device *ibdev = dev->dev;
1372 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1373 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
3ae95da8 1374 unsigned int len = 0;
8f26c9ff
DD
1375 int ret;
1376
3ae95da8 1377 WARN_ON_ONCE(!dma_len);
f5358a17 1378
8f26c9ff 1379 while (dma_len) {
5cfb1782
BVA
1380 unsigned offset = dma_addr & ~dev->mr_page_mask;
1381 if (state->npages == dev->max_pages_per_mr || offset != 0) {
f7f7aab1 1382 ret = srp_map_finish_fmr(state, ch);
8f26c9ff
DD
1383 if (ret)
1384 return ret;
8f26c9ff
DD
1385 }
1386
5cfb1782 1387 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
f5358a17 1388
8f26c9ff
DD
1389 if (!state->npages)
1390 state->base_dma_addr = dma_addr;
5cfb1782 1391 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
52ede08f 1392 state->dma_len += len;
8f26c9ff
DD
1393 dma_addr += len;
1394 dma_len -= len;
1395 }
1396
5cfb1782
BVA
1397 /*
1398 * If the last entry of the MR wasn't a full page, then we need to
8f26c9ff 1399 * close it out and start a new one -- we can only merge at page
1d3d98c4 1400 * boundaries.
8f26c9ff
DD
1401 */
1402 ret = 0;
0e0d3a48 1403 if (len != dev->mr_page_size)
f7f7aab1 1404 ret = srp_map_finish_fmr(state, ch);
f5358a17
RD
1405 return ret;
1406}
1407
26630e8a
SG
1408static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1409 struct srp_request *req, struct scatterlist *scat,
1410 int count)
76bc1e1d 1411{
76bc1e1d 1412 struct scatterlist *sg;
0e0d3a48 1413 int i, ret;
76bc1e1d 1414
26630e8a
SG
1415 state->desc = req->indirect_desc;
1416 state->pages = req->map_page;
1417 state->fmr.next = req->fmr_list;
1418 state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
1419
1420 for_each_sg(scat, sg, count, i) {
1421 ret = srp_map_sg_entry(state, ch, sg, i);
1422 if (ret)
1423 return ret;
5cfb1782 1424 }
76bc1e1d 1425
f7f7aab1 1426 ret = srp_map_finish_fmr(state, ch);
26630e8a
SG
1427 if (ret)
1428 return ret;
1429
26630e8a
SG
1430 return 0;
1431}
1432
1433static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1434 struct srp_request *req, struct scatterlist *scat,
1435 int count)
1436{
26630e8a 1437 state->desc = req->indirect_desc;
f7f7aab1
SG
1438 state->fr.next = req->fr_list;
1439 state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1440 state->sg = scat;
26630e8a 1441
57b0be9c 1442 while (count) {
f7f7aab1 1443 int i, n;
26630e8a 1444
c6333f9f 1445 n = srp_map_finish_fr(state, req, ch, count);
f7f7aab1
SG
1446 if (unlikely(n < 0))
1447 return n;
1448
57b0be9c 1449 count -= n;
f7f7aab1
SG
1450 for (i = 0; i < n; i++)
1451 state->sg = sg_next(state->sg);
1452 }
26630e8a 1453
26630e8a
SG
1454 return 0;
1455}
1456
1457static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1458 struct srp_request *req, struct scatterlist *scat,
1459 int count)
1460{
1461 struct srp_target_port *target = ch->target;
1462 struct srp_device *dev = target->srp_host->srp_dev;
1463 struct scatterlist *sg;
1464 int i;
1465
1466 state->desc = req->indirect_desc;
1467 for_each_sg(scat, sg, count, i) {
1468 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1469 ib_sg_dma_len(dev->dev, sg),
1470 target->global_mr->rkey);
0e0d3a48 1471 }
76bc1e1d 1472
26630e8a 1473 return 0;
76bc1e1d
BVA
1474}
1475
330179f2
BVA
1476/*
1477 * Register the indirect data buffer descriptor with the HCA.
1478 *
1479 * Note: since the indirect data buffer descriptor has been allocated with
1480 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1481 * memory buffer.
1482 */
1483static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1484 void **next_mr, void **end_mr, u32 idb_len,
1485 __be32 *idb_rkey)
1486{
1487 struct srp_target_port *target = ch->target;
1488 struct srp_device *dev = target->srp_host->srp_dev;
1489 struct srp_map_state state;
1490 struct srp_direct_buf idb_desc;
1491 u64 idb_pages[1];
f7f7aab1 1492 struct scatterlist idb_sg[1];
330179f2
BVA
1493 int ret;
1494
1495 memset(&state, 0, sizeof(state));
1496 memset(&idb_desc, 0, sizeof(idb_desc));
1497 state.gen.next = next_mr;
1498 state.gen.end = end_mr;
1499 state.desc = &idb_desc;
330179f2
BVA
1500 state.base_dma_addr = req->indirect_dma_addr;
1501 state.dma_len = idb_len;
f7f7aab1
SG
1502
1503 if (dev->use_fast_reg) {
1504 state.sg = idb_sg;
f7f7aab1
SG
1505 sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1506 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
fc925518
CH
1507#ifdef CONFIG_NEED_SG_DMA_LENGTH
1508 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1509#endif
c6333f9f 1510 ret = srp_map_finish_fr(&state, req, ch, 1);
f7f7aab1
SG
1511 if (ret < 0)
1512 return ret;
1513 } else if (dev->use_fmr) {
1514 state.pages = idb_pages;
1515 state.pages[0] = (req->indirect_dma_addr &
1516 dev->mr_page_mask);
1517 state.npages = 1;
1518 ret = srp_map_finish_fmr(&state, ch);
1519 if (ret < 0)
1520 return ret;
1521 } else {
1522 return -EINVAL;
1523 }
330179f2
BVA
1524
1525 *idb_rkey = idb_desc.key;
1526
f7f7aab1 1527 return 0;
330179f2
BVA
1528}
1529
77269cdf
BVA
1530/**
1531 * srp_map_data() - map SCSI data buffer onto an SRP request
1532 * @scmnd: SCSI command to map
1533 * @ch: SRP RDMA channel
1534 * @req: SRP request
1535 *
1536 * Returns the length in bytes of the SRP_CMD IU or a negative value if
1537 * mapping failed.
1538 */
509c07bc 1539static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
aef9ec39
RD
1540 struct srp_request *req)
1541{
509c07bc 1542 struct srp_target_port *target = ch->target;
76bc1e1d 1543 struct scatterlist *scat;
aef9ec39 1544 struct srp_cmd *cmd = req->cmd->buf;
330179f2 1545 int len, nents, count, ret;
85507bcc
RC
1546 struct srp_device *dev;
1547 struct ib_device *ibdev;
8f26c9ff
DD
1548 struct srp_map_state state;
1549 struct srp_indirect_buf *indirect_hdr;
330179f2
BVA
1550 u32 idb_len, table_len;
1551 __be32 idb_rkey;
8f26c9ff 1552 u8 fmt;
aef9ec39 1553
bb350d1d 1554 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
aef9ec39
RD
1555 return sizeof (struct srp_cmd);
1556
1557 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1558 scmnd->sc_data_direction != DMA_TO_DEVICE) {
7aa54bd7
DD
1559 shost_printk(KERN_WARNING, target->scsi_host,
1560 PFX "Unhandled data direction %d\n",
1561 scmnd->sc_data_direction);
aef9ec39
RD
1562 return -EINVAL;
1563 }
1564
bb350d1d
FT
1565 nents = scsi_sg_count(scmnd);
1566 scat = scsi_sglist(scmnd);
aef9ec39 1567
05321937 1568 dev = target->srp_host->srp_dev;
85507bcc
RC
1569 ibdev = dev->dev;
1570
1571 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
8f26c9ff
DD
1572 if (unlikely(count == 0))
1573 return -EIO;
f5358a17
RD
1574
1575 fmt = SRP_DATA_DESC_DIRECT;
1576 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
aef9ec39 1577
03f6fb93 1578 if (count == 1 && target->global_mr) {
f5358a17
RD
1579 /*
1580 * The midlayer only generated a single gather/scatter
1581 * entry, or DMA mapping coalesced everything to a
1582 * single entry. So a direct descriptor along with
1583 * the DMA MR suffices.
1584 */
cf368713 1585 struct srp_direct_buf *buf = (void *) cmd->add_data;
aef9ec39 1586
85507bcc 1587 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
03f6fb93 1588 buf->key = cpu_to_be32(target->global_mr->rkey);
85507bcc 1589 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
8f26c9ff 1590
52ede08f 1591 req->nmdesc = 0;
8f26c9ff
DD
1592 goto map_complete;
1593 }
1594
5cfb1782
BVA
1595 /*
1596 * We have more than one scatter/gather entry, so build our indirect
1597 * descriptor table, trying to merge as many entries as we can.
8f26c9ff
DD
1598 */
1599 indirect_hdr = (void *) cmd->add_data;
1600
c07d424d
DD
1601 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1602 target->indirect_size, DMA_TO_DEVICE);
1603
8f26c9ff 1604 memset(&state, 0, sizeof(state));
26630e8a 1605 if (dev->use_fast_reg)
e012f363 1606 ret = srp_map_sg_fr(&state, ch, req, scat, count);
26630e8a 1607 else if (dev->use_fmr)
e012f363 1608 ret = srp_map_sg_fmr(&state, ch, req, scat, count);
26630e8a 1609 else
e012f363
BVA
1610 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1611 req->nmdesc = state.nmdesc;
1612 if (ret < 0)
1613 goto unmap;
cf368713 1614
c07d424d
DD
1615 /* We've mapped the request, now pull as much of the indirect
1616 * descriptor table as we can into the command buffer. If this
1617 * target is not using an external indirect table, we are
1618 * guaranteed to fit into the command, as the SCSI layer won't
1619 * give us more S/G entries than we allow.
8f26c9ff 1620 */
8f26c9ff 1621 if (state.ndesc == 1) {
5cfb1782
BVA
1622 /*
1623 * Memory registration collapsed the sg-list into one entry,
8f26c9ff
DD
1624 * so use a direct descriptor.
1625 */
1626 struct srp_direct_buf *buf = (void *) cmd->add_data;
cf368713 1627
c07d424d 1628 *buf = req->indirect_desc[0];
8f26c9ff 1629 goto map_complete;
aef9ec39
RD
1630 }
1631
c07d424d
DD
1632 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1633 !target->allow_ext_sg)) {
1634 shost_printk(KERN_ERR, target->scsi_host,
1635 "Could not fit S/G list into SRP_CMD\n");
e012f363
BVA
1636 ret = -EIO;
1637 goto unmap;
c07d424d
DD
1638 }
1639
1640 count = min(state.ndesc, target->cmd_sg_cnt);
8f26c9ff 1641 table_len = state.ndesc * sizeof (struct srp_direct_buf);
330179f2 1642 idb_len = sizeof(struct srp_indirect_buf) + table_len;
8f26c9ff
DD
1643
1644 fmt = SRP_DATA_DESC_INDIRECT;
1645 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
c07d424d 1646 len += count * sizeof (struct srp_direct_buf);
8f26c9ff 1647
c07d424d
DD
1648 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1649 count * sizeof (struct srp_direct_buf));
8f26c9ff 1650
03f6fb93 1651 if (!target->global_mr) {
330179f2
BVA
1652 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1653 idb_len, &idb_rkey);
1654 if (ret < 0)
e012f363 1655 goto unmap;
330179f2
BVA
1656 req->nmdesc++;
1657 } else {
a745f4f4 1658 idb_rkey = cpu_to_be32(target->global_mr->rkey);
330179f2
BVA
1659 }
1660
c07d424d 1661 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
330179f2 1662 indirect_hdr->table_desc.key = idb_rkey;
8f26c9ff
DD
1663 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1664 indirect_hdr->len = cpu_to_be32(state.total_len);
1665
1666 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
c07d424d 1667 cmd->data_out_desc_cnt = count;
8f26c9ff 1668 else
c07d424d
DD
1669 cmd->data_in_desc_cnt = count;
1670
1671 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1672 DMA_TO_DEVICE);
8f26c9ff
DD
1673
1674map_complete:
aef9ec39
RD
1675 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1676 cmd->buf_fmt = fmt << 4;
1677 else
1678 cmd->buf_fmt = fmt;
1679
aef9ec39 1680 return len;
e012f363
BVA
1681
1682unmap:
1683 srp_unmap_data(scmnd, ch, req);
1684 return ret;
aef9ec39
RD
1685}
1686
76c75b25
BVA
1687/*
1688 * Return an IU and possible credit to the free pool
1689 */
509c07bc 1690static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
76c75b25
BVA
1691 enum srp_iu_type iu_type)
1692{
1693 unsigned long flags;
1694
509c07bc
BVA
1695 spin_lock_irqsave(&ch->lock, flags);
1696 list_add(&iu->list, &ch->free_tx);
76c75b25 1697 if (iu_type != SRP_IU_RSP)
509c07bc
BVA
1698 ++ch->req_lim;
1699 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25
BVA
1700}
1701
05a1d750 1702/*
509c07bc 1703 * Must be called with ch->lock held to protect req_lim and free_tx.
e9684678 1704 * If IU is not sent, it must be returned using srp_put_tx_iu().
05a1d750
DD
1705 *
1706 * Note:
1707 * An upper limit for the number of allocated information units for each
1708 * request type is:
1709 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1710 * more than Scsi_Host.can_queue requests.
1711 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1712 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1713 * one unanswered SRP request to an initiator.
1714 */
509c07bc 1715static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
05a1d750
DD
1716 enum srp_iu_type iu_type)
1717{
509c07bc 1718 struct srp_target_port *target = ch->target;
05a1d750
DD
1719 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1720 struct srp_iu *iu;
1721
1dc7b1f1 1722 ib_process_cq_direct(ch->send_cq, -1);
05a1d750 1723
509c07bc 1724 if (list_empty(&ch->free_tx))
05a1d750
DD
1725 return NULL;
1726
1727 /* Initiator responses to target requests do not consume credits */
76c75b25 1728 if (iu_type != SRP_IU_RSP) {
509c07bc 1729 if (ch->req_lim <= rsv) {
76c75b25
BVA
1730 ++target->zero_req_lim;
1731 return NULL;
1732 }
1733
509c07bc 1734 --ch->req_lim;
05a1d750
DD
1735 }
1736
509c07bc 1737 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
76c75b25 1738 list_del(&iu->list);
05a1d750
DD
1739 return iu;
1740}
1741
1dc7b1f1
CH
1742static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1743{
1744 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1745 struct srp_rdma_ch *ch = cq->cq_context;
1746
1747 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1748 srp_handle_qp_err(cq, wc, "SEND");
1749 return;
1750 }
1751
1752 list_add(&iu->list, &ch->free_tx);
1753}
1754
509c07bc 1755static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
05a1d750 1756{
509c07bc 1757 struct srp_target_port *target = ch->target;
05a1d750
DD
1758 struct ib_sge list;
1759 struct ib_send_wr wr, *bad_wr;
05a1d750
DD
1760
1761 list.addr = iu->dma;
1762 list.length = len;
9af76271 1763 list.lkey = target->lkey;
05a1d750 1764
1dc7b1f1
CH
1765 iu->cqe.done = srp_send_done;
1766
05a1d750 1767 wr.next = NULL;
1dc7b1f1 1768 wr.wr_cqe = &iu->cqe;
05a1d750
DD
1769 wr.sg_list = &list;
1770 wr.num_sge = 1;
1771 wr.opcode = IB_WR_SEND;
1772 wr.send_flags = IB_SEND_SIGNALED;
1773
509c07bc 1774 return ib_post_send(ch->qp, &wr, &bad_wr);
05a1d750
DD
1775}
1776
509c07bc 1777static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
c996bb47 1778{
509c07bc 1779 struct srp_target_port *target = ch->target;
c996bb47 1780 struct ib_recv_wr wr, *bad_wr;
dcb4cb85 1781 struct ib_sge list;
c996bb47
BVA
1782
1783 list.addr = iu->dma;
1784 list.length = iu->size;
9af76271 1785 list.lkey = target->lkey;
c996bb47 1786
1dc7b1f1
CH
1787 iu->cqe.done = srp_recv_done;
1788
c996bb47 1789 wr.next = NULL;
1dc7b1f1 1790 wr.wr_cqe = &iu->cqe;
c996bb47
BVA
1791 wr.sg_list = &list;
1792 wr.num_sge = 1;
1793
509c07bc 1794 return ib_post_recv(ch->qp, &wr, &bad_wr);
c996bb47
BVA
1795}
1796
509c07bc 1797static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
aef9ec39 1798{
509c07bc 1799 struct srp_target_port *target = ch->target;
aef9ec39
RD
1800 struct srp_request *req;
1801 struct scsi_cmnd *scmnd;
1802 unsigned long flags;
aef9ec39 1803
aef9ec39 1804 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
509c07bc
BVA
1805 spin_lock_irqsave(&ch->lock, flags);
1806 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1807 spin_unlock_irqrestore(&ch->lock, flags);
94a9174c 1808
509c07bc 1809 ch->tsk_mgmt_status = -1;
f8b6e31e 1810 if (be32_to_cpu(rsp->resp_data_len) >= 4)
509c07bc
BVA
1811 ch->tsk_mgmt_status = rsp->data[3];
1812 complete(&ch->tsk_mgmt_done);
aef9ec39 1813 } else {
77f2c1a4
BVA
1814 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1815 if (scmnd) {
1816 req = (void *)scmnd->host_scribble;
1817 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1818 }
22032991 1819 if (!scmnd) {
7aa54bd7 1820 shost_printk(KERN_ERR, target->scsi_host,
d92c0da7
BVA
1821 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1822 rsp->tag, ch - target->ch, ch->qp->qp_num);
22032991 1823
509c07bc
BVA
1824 spin_lock_irqsave(&ch->lock, flags);
1825 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1826 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1827
1828 return;
1829 }
aef9ec39
RD
1830 scmnd->result = rsp->status;
1831
1832 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1833 memcpy(scmnd->sense_buffer, rsp->data +
1834 be32_to_cpu(rsp->resp_data_len),
1835 min_t(int, be32_to_cpu(rsp->sense_data_len),
1836 SCSI_SENSE_BUFFERSIZE));
1837 }
1838
e714531a 1839 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
bb350d1d 1840 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
e714531a
BVA
1841 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1842 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1843 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1844 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1845 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1846 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
aef9ec39 1847
509c07bc 1848 srp_free_req(ch, req, scmnd,
22032991
BVA
1849 be32_to_cpu(rsp->req_lim_delta));
1850
f8b6e31e
DD
1851 scmnd->host_scribble = NULL;
1852 scmnd->scsi_done(scmnd);
aef9ec39 1853 }
aef9ec39
RD
1854}
1855
509c07bc 1856static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
bb12588a
DD
1857 void *rsp, int len)
1858{
509c07bc 1859 struct srp_target_port *target = ch->target;
76c75b25 1860 struct ib_device *dev = target->srp_host->srp_dev->dev;
bb12588a
DD
1861 unsigned long flags;
1862 struct srp_iu *iu;
76c75b25 1863 int err;
bb12588a 1864
509c07bc
BVA
1865 spin_lock_irqsave(&ch->lock, flags);
1866 ch->req_lim += req_delta;
1867 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1868 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25 1869
bb12588a
DD
1870 if (!iu) {
1871 shost_printk(KERN_ERR, target->scsi_host, PFX
1872 "no IU available to send response\n");
76c75b25 1873 return 1;
bb12588a
DD
1874 }
1875
1876 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1877 memcpy(iu->buf, rsp, len);
1878 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1879
509c07bc 1880 err = srp_post_send(ch, iu, len);
76c75b25 1881 if (err) {
bb12588a
DD
1882 shost_printk(KERN_ERR, target->scsi_host, PFX
1883 "unable to post response: %d\n", err);
509c07bc 1884 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
76c75b25 1885 }
bb12588a 1886
bb12588a
DD
1887 return err;
1888}
1889
509c07bc 1890static void srp_process_cred_req(struct srp_rdma_ch *ch,
bb12588a
DD
1891 struct srp_cred_req *req)
1892{
1893 struct srp_cred_rsp rsp = {
1894 .opcode = SRP_CRED_RSP,
1895 .tag = req->tag,
1896 };
1897 s32 delta = be32_to_cpu(req->req_lim_delta);
1898
509c07bc
BVA
1899 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1900 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
bb12588a
DD
1901 "problems processing SRP_CRED_REQ\n");
1902}
1903
509c07bc 1904static void srp_process_aer_req(struct srp_rdma_ch *ch,
bb12588a
DD
1905 struct srp_aer_req *req)
1906{
509c07bc 1907 struct srp_target_port *target = ch->target;
bb12588a
DD
1908 struct srp_aer_rsp rsp = {
1909 .opcode = SRP_AER_RSP,
1910 .tag = req->tag,
1911 };
1912 s32 delta = be32_to_cpu(req->req_lim_delta);
1913
1914 shost_printk(KERN_ERR, target->scsi_host, PFX
985aa495 1915 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
bb12588a 1916
509c07bc 1917 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
bb12588a
DD
1918 shost_printk(KERN_ERR, target->scsi_host, PFX
1919 "problems processing SRP_AER_REQ\n");
1920}
1921
1dc7b1f1 1922static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
aef9ec39 1923{
1dc7b1f1
CH
1924 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1925 struct srp_rdma_ch *ch = cq->cq_context;
509c07bc 1926 struct srp_target_port *target = ch->target;
dcb4cb85 1927 struct ib_device *dev = target->srp_host->srp_dev->dev;
c996bb47 1928 int res;
aef9ec39
RD
1929 u8 opcode;
1930
1dc7b1f1
CH
1931 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1932 srp_handle_qp_err(cq, wc, "RECV");
1933 return;
1934 }
1935
509c07bc 1936 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 1937 DMA_FROM_DEVICE);
aef9ec39
RD
1938
1939 opcode = *(u8 *) iu->buf;
1940
1941 if (0) {
7aa54bd7
DD
1942 shost_printk(KERN_ERR, target->scsi_host,
1943 PFX "recv completion, opcode 0x%02x\n", opcode);
7a700811
BVA
1944 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1945 iu->buf, wc->byte_len, true);
aef9ec39
RD
1946 }
1947
1948 switch (opcode) {
1949 case SRP_RSP:
509c07bc 1950 srp_process_rsp(ch, iu->buf);
aef9ec39
RD
1951 break;
1952
bb12588a 1953 case SRP_CRED_REQ:
509c07bc 1954 srp_process_cred_req(ch, iu->buf);
bb12588a
DD
1955 break;
1956
1957 case SRP_AER_REQ:
509c07bc 1958 srp_process_aer_req(ch, iu->buf);
bb12588a
DD
1959 break;
1960
aef9ec39
RD
1961 case SRP_T_LOGOUT:
1962 /* XXX Handle target logout */
7aa54bd7
DD
1963 shost_printk(KERN_WARNING, target->scsi_host,
1964 PFX "Got target logout request\n");
aef9ec39
RD
1965 break;
1966
1967 default:
7aa54bd7
DD
1968 shost_printk(KERN_WARNING, target->scsi_host,
1969 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
aef9ec39
RD
1970 break;
1971 }
1972
509c07bc 1973 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 1974 DMA_FROM_DEVICE);
c996bb47 1975
509c07bc 1976 res = srp_post_recv(ch, iu);
c996bb47
BVA
1977 if (res != 0)
1978 shost_printk(KERN_ERR, target->scsi_host,
1979 PFX "Recv failed with error code %d\n", res);
aef9ec39
RD
1980}
1981
c1120f89
BVA
1982/**
1983 * srp_tl_err_work() - handle a transport layer error
af24663b 1984 * @work: Work structure embedded in an SRP target port.
c1120f89
BVA
1985 *
1986 * Note: This function may get invoked before the rport has been created,
1987 * hence the target->rport test.
1988 */
1989static void srp_tl_err_work(struct work_struct *work)
1990{
1991 struct srp_target_port *target;
1992
1993 target = container_of(work, struct srp_target_port, tl_err_work);
1994 if (target->rport)
1995 srp_start_tl_fail_timers(target->rport);
1996}
1997
1dc7b1f1
CH
1998static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
1999 const char *opname)
948d1e88 2000{
1dc7b1f1 2001 struct srp_rdma_ch *ch = cq->cq_context;
7dad6b2e
BVA
2002 struct srp_target_port *target = ch->target;
2003
c014c8cd 2004 if (ch->connected && !target->qp_in_error) {
1dc7b1f1
CH
2005 shost_printk(KERN_ERR, target->scsi_host,
2006 PFX "failed %s status %s (%d) for CQE %p\n",
2007 opname, ib_wc_status_msg(wc->status), wc->status,
2008 wc->wr_cqe);
c1120f89 2009 queue_work(system_long_wq, &target->tl_err_work);
4f0af697 2010 }
948d1e88
BVA
2011 target->qp_in_error = true;
2012}
2013
76c75b25 2014static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
aef9ec39 2015{
76c75b25 2016 struct srp_target_port *target = host_to_target(shost);
a95cadb9 2017 struct srp_rport *rport = target->rport;
509c07bc 2018 struct srp_rdma_ch *ch;
aef9ec39
RD
2019 struct srp_request *req;
2020 struct srp_iu *iu;
2021 struct srp_cmd *cmd;
85507bcc 2022 struct ib_device *dev;
76c75b25 2023 unsigned long flags;
77f2c1a4
BVA
2024 u32 tag;
2025 u16 idx;
d1b4289e 2026 int len, ret;
a95cadb9
BVA
2027 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2028
2029 /*
2030 * The SCSI EH thread is the only context from which srp_queuecommand()
2031 * can get invoked for blocked devices (SDEV_BLOCK /
2032 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2033 * locking the rport mutex if invoked from inside the SCSI EH.
2034 */
2035 if (in_scsi_eh)
2036 mutex_lock(&rport->mutex);
aef9ec39 2037
d1b4289e
BVA
2038 scmnd->result = srp_chkready(target->rport);
2039 if (unlikely(scmnd->result))
2040 goto err;
2ce19e72 2041
77f2c1a4
BVA
2042 WARN_ON_ONCE(scmnd->request->tag < 0);
2043 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7 2044 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
77f2c1a4
BVA
2045 idx = blk_mq_unique_tag_to_tag(tag);
2046 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2047 dev_name(&shost->shost_gendev), tag, idx,
2048 target->req_ring_size);
509c07bc
BVA
2049
2050 spin_lock_irqsave(&ch->lock, flags);
2051 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
509c07bc 2052 spin_unlock_irqrestore(&ch->lock, flags);
aef9ec39 2053
77f2c1a4
BVA
2054 if (!iu)
2055 goto err;
2056
2057 req = &ch->req_ring[idx];
05321937 2058 dev = target->srp_host->srp_dev->dev;
49248644 2059 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
85507bcc 2060 DMA_TO_DEVICE);
aef9ec39 2061
f8b6e31e 2062 scmnd->host_scribble = (void *) req;
aef9ec39
RD
2063
2064 cmd = iu->buf;
2065 memset(cmd, 0, sizeof *cmd);
2066
2067 cmd->opcode = SRP_CMD;
985aa495 2068 int_to_scsilun(scmnd->device->lun, &cmd->lun);
77f2c1a4 2069 cmd->tag = tag;
aef9ec39
RD
2070 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2071
aef9ec39
RD
2072 req->scmnd = scmnd;
2073 req->cmd = iu;
aef9ec39 2074
509c07bc 2075 len = srp_map_data(scmnd, ch, req);
aef9ec39 2076 if (len < 0) {
7aa54bd7 2077 shost_printk(KERN_ERR, target->scsi_host,
d1b4289e
BVA
2078 PFX "Failed to map data (%d)\n", len);
2079 /*
2080 * If we ran out of memory descriptors (-ENOMEM) because an
2081 * application is queuing many requests with more than
52ede08f 2082 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
d1b4289e
BVA
2083 * to reduce queue depth temporarily.
2084 */
2085 scmnd->result = len == -ENOMEM ?
2086 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
76c75b25 2087 goto err_iu;
aef9ec39
RD
2088 }
2089
49248644 2090 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
85507bcc 2091 DMA_TO_DEVICE);
aef9ec39 2092
509c07bc 2093 if (srp_post_send(ch, iu, len)) {
7aa54bd7 2094 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
aef9ec39
RD
2095 goto err_unmap;
2096 }
2097
d1b4289e
BVA
2098 ret = 0;
2099
a95cadb9
BVA
2100unlock_rport:
2101 if (in_scsi_eh)
2102 mutex_unlock(&rport->mutex);
2103
d1b4289e 2104 return ret;
aef9ec39
RD
2105
2106err_unmap:
509c07bc 2107 srp_unmap_data(scmnd, ch, req);
aef9ec39 2108
76c75b25 2109err_iu:
509c07bc 2110 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
76c75b25 2111
024ca901
BVA
2112 /*
2113 * Avoid that the loops that iterate over the request ring can
2114 * encounter a dangling SCSI command pointer.
2115 */
2116 req->scmnd = NULL;
2117
d1b4289e
BVA
2118err:
2119 if (scmnd->result) {
2120 scmnd->scsi_done(scmnd);
2121 ret = 0;
2122 } else {
2123 ret = SCSI_MLQUEUE_HOST_BUSY;
2124 }
a95cadb9 2125
d1b4289e 2126 goto unlock_rport;
aef9ec39
RD
2127}
2128
4d73f95f
BVA
2129/*
2130 * Note: the resources allocated in this function are freed in
509c07bc 2131 * srp_free_ch_ib().
4d73f95f 2132 */
509c07bc 2133static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
aef9ec39 2134{
509c07bc 2135 struct srp_target_port *target = ch->target;
aef9ec39
RD
2136 int i;
2137
509c07bc
BVA
2138 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2139 GFP_KERNEL);
2140 if (!ch->rx_ring)
4d73f95f 2141 goto err_no_ring;
509c07bc
BVA
2142 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2143 GFP_KERNEL);
2144 if (!ch->tx_ring)
4d73f95f
BVA
2145 goto err_no_ring;
2146
2147 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2148 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2149 ch->max_ti_iu_len,
2150 GFP_KERNEL, DMA_FROM_DEVICE);
2151 if (!ch->rx_ring[i])
aef9ec39
RD
2152 goto err;
2153 }
2154
4d73f95f 2155 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2156 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2157 target->max_iu_len,
2158 GFP_KERNEL, DMA_TO_DEVICE);
2159 if (!ch->tx_ring[i])
aef9ec39 2160 goto err;
dcb4cb85 2161
509c07bc 2162 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
aef9ec39
RD
2163 }
2164
2165 return 0;
2166
2167err:
4d73f95f 2168 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2169 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2170 srp_free_iu(target->srp_host, ch->tx_ring[i]);
aef9ec39
RD
2171 }
2172
4d73f95f
BVA
2173
2174err_no_ring:
509c07bc
BVA
2175 kfree(ch->tx_ring);
2176 ch->tx_ring = NULL;
2177 kfree(ch->rx_ring);
2178 ch->rx_ring = NULL;
4d73f95f 2179
aef9ec39
RD
2180 return -ENOMEM;
2181}
2182
c9b03c1a
BVA
2183static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2184{
2185 uint64_t T_tr_ns, max_compl_time_ms;
2186 uint32_t rq_tmo_jiffies;
2187
2188 /*
2189 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2190 * table 91), both the QP timeout and the retry count have to be set
2191 * for RC QP's during the RTR to RTS transition.
2192 */
2193 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2194 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2195
2196 /*
2197 * Set target->rq_tmo_jiffies to one second more than the largest time
2198 * it can take before an error completion is generated. See also
2199 * C9-140..142 in the IBTA spec for more information about how to
2200 * convert the QP Local ACK Timeout value to nanoseconds.
2201 */
2202 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2203 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2204 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2205 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2206
2207 return rq_tmo_jiffies;
2208}
2209
961e0be8 2210static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
e6300cbd 2211 const struct srp_login_rsp *lrsp,
509c07bc 2212 struct srp_rdma_ch *ch)
961e0be8 2213{
509c07bc 2214 struct srp_target_port *target = ch->target;
961e0be8
DD
2215 struct ib_qp_attr *qp_attr = NULL;
2216 int attr_mask = 0;
2217 int ret;
2218 int i;
2219
2220 if (lrsp->opcode == SRP_LOGIN_RSP) {
509c07bc
BVA
2221 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2222 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
961e0be8
DD
2223
2224 /*
2225 * Reserve credits for task management so we don't
2226 * bounce requests back to the SCSI mid-layer.
2227 */
2228 target->scsi_host->can_queue
509c07bc 2229 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
961e0be8 2230 target->scsi_host->can_queue);
4d73f95f
BVA
2231 target->scsi_host->cmd_per_lun
2232 = min_t(int, target->scsi_host->can_queue,
2233 target->scsi_host->cmd_per_lun);
961e0be8
DD
2234 } else {
2235 shost_printk(KERN_WARNING, target->scsi_host,
2236 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2237 ret = -ECONNRESET;
2238 goto error;
2239 }
2240
509c07bc
BVA
2241 if (!ch->rx_ring) {
2242 ret = srp_alloc_iu_bufs(ch);
961e0be8
DD
2243 if (ret)
2244 goto error;
2245 }
2246
2247 ret = -ENOMEM;
2248 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2249 if (!qp_attr)
2250 goto error;
2251
2252 qp_attr->qp_state = IB_QPS_RTR;
2253 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2254 if (ret)
2255 goto error_free;
2256
509c07bc 2257 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2258 if (ret)
2259 goto error_free;
2260
4d73f95f 2261 for (i = 0; i < target->queue_size; i++) {
509c07bc
BVA
2262 struct srp_iu *iu = ch->rx_ring[i];
2263
2264 ret = srp_post_recv(ch, iu);
961e0be8
DD
2265 if (ret)
2266 goto error_free;
2267 }
2268
2269 qp_attr->qp_state = IB_QPS_RTS;
2270 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2271 if (ret)
2272 goto error_free;
2273
c9b03c1a
BVA
2274 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2275
509c07bc 2276 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2277 if (ret)
2278 goto error_free;
2279
2280 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2281
2282error_free:
2283 kfree(qp_attr);
2284
2285error:
509c07bc 2286 ch->status = ret;
961e0be8
DD
2287}
2288
aef9ec39
RD
2289static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2290 struct ib_cm_event *event,
509c07bc 2291 struct srp_rdma_ch *ch)
aef9ec39 2292{
509c07bc 2293 struct srp_target_port *target = ch->target;
7aa54bd7 2294 struct Scsi_Host *shost = target->scsi_host;
aef9ec39
RD
2295 struct ib_class_port_info *cpi;
2296 int opcode;
2297
2298 switch (event->param.rej_rcvd.reason) {
2299 case IB_CM_REJ_PORT_CM_REDIRECT:
2300 cpi = event->param.rej_rcvd.ari;
509c07bc
BVA
2301 ch->path.dlid = cpi->redirect_lid;
2302 ch->path.pkey = cpi->redirect_pkey;
aef9ec39 2303 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
509c07bc 2304 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
aef9ec39 2305
509c07bc 2306 ch->status = ch->path.dlid ?
aef9ec39
RD
2307 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2308 break;
2309
2310 case IB_CM_REJ_PORT_REDIRECT:
5d7cbfd6 2311 if (srp_target_is_topspin(target)) {
aef9ec39
RD
2312 /*
2313 * Topspin/Cisco SRP gateways incorrectly send
2314 * reject reason code 25 when they mean 24
2315 * (port redirect).
2316 */
509c07bc 2317 memcpy(ch->path.dgid.raw,
aef9ec39
RD
2318 event->param.rej_rcvd.ari, 16);
2319
7aa54bd7
DD
2320 shost_printk(KERN_DEBUG, shost,
2321 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
509c07bc
BVA
2322 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2323 be64_to_cpu(ch->path.dgid.global.interface_id));
aef9ec39 2324
509c07bc 2325 ch->status = SRP_PORT_REDIRECT;
aef9ec39 2326 } else {
7aa54bd7
DD
2327 shost_printk(KERN_WARNING, shost,
2328 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
509c07bc 2329 ch->status = -ECONNRESET;
aef9ec39
RD
2330 }
2331 break;
2332
2333 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
7aa54bd7
DD
2334 shost_printk(KERN_WARNING, shost,
2335 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
509c07bc 2336 ch->status = -ECONNRESET;
aef9ec39
RD
2337 break;
2338
2339 case IB_CM_REJ_CONSUMER_DEFINED:
2340 opcode = *(u8 *) event->private_data;
2341 if (opcode == SRP_LOGIN_REJ) {
2342 struct srp_login_rej *rej = event->private_data;
2343 u32 reason = be32_to_cpu(rej->reason);
2344
2345 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
7aa54bd7
DD
2346 shost_printk(KERN_WARNING, shost,
2347 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
aef9ec39 2348 else
e7ffde01
BVA
2349 shost_printk(KERN_WARNING, shost, PFX
2350 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
747fe000
BVA
2351 target->sgid.raw,
2352 target->orig_dgid.raw, reason);
aef9ec39 2353 } else
7aa54bd7
DD
2354 shost_printk(KERN_WARNING, shost,
2355 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2356 " opcode 0x%02x\n", opcode);
509c07bc 2357 ch->status = -ECONNRESET;
aef9ec39
RD
2358 break;
2359
9fe4bcf4
DD
2360 case IB_CM_REJ_STALE_CONN:
2361 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
509c07bc 2362 ch->status = SRP_STALE_CONN;
9fe4bcf4
DD
2363 break;
2364
aef9ec39 2365 default:
7aa54bd7
DD
2366 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2367 event->param.rej_rcvd.reason);
509c07bc 2368 ch->status = -ECONNRESET;
aef9ec39
RD
2369 }
2370}
2371
2372static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2373{
509c07bc
BVA
2374 struct srp_rdma_ch *ch = cm_id->context;
2375 struct srp_target_port *target = ch->target;
aef9ec39 2376 int comp = 0;
aef9ec39
RD
2377
2378 switch (event->event) {
2379 case IB_CM_REQ_ERROR:
7aa54bd7
DD
2380 shost_printk(KERN_DEBUG, target->scsi_host,
2381 PFX "Sending CM REQ failed\n");
aef9ec39 2382 comp = 1;
509c07bc 2383 ch->status = -ECONNRESET;
aef9ec39
RD
2384 break;
2385
2386 case IB_CM_REP_RECEIVED:
2387 comp = 1;
509c07bc 2388 srp_cm_rep_handler(cm_id, event->private_data, ch);
aef9ec39
RD
2389 break;
2390
2391 case IB_CM_REJ_RECEIVED:
7aa54bd7 2392 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
aef9ec39
RD
2393 comp = 1;
2394
509c07bc 2395 srp_cm_rej_handler(cm_id, event, ch);
aef9ec39
RD
2396 break;
2397
b7ac4ab4 2398 case IB_CM_DREQ_RECEIVED:
7aa54bd7
DD
2399 shost_printk(KERN_WARNING, target->scsi_host,
2400 PFX "DREQ received - connection closed\n");
c014c8cd 2401 ch->connected = false;
b7ac4ab4 2402 if (ib_send_cm_drep(cm_id, NULL, 0))
7aa54bd7
DD
2403 shost_printk(KERN_ERR, target->scsi_host,
2404 PFX "Sending CM DREP failed\n");
c1120f89 2405 queue_work(system_long_wq, &target->tl_err_work);
aef9ec39
RD
2406 break;
2407
2408 case IB_CM_TIMEWAIT_EXIT:
7aa54bd7
DD
2409 shost_printk(KERN_ERR, target->scsi_host,
2410 PFX "connection closed\n");
ac72d766 2411 comp = 1;
aef9ec39 2412
509c07bc 2413 ch->status = 0;
aef9ec39
RD
2414 break;
2415
b7ac4ab4
IR
2416 case IB_CM_MRA_RECEIVED:
2417 case IB_CM_DREQ_ERROR:
2418 case IB_CM_DREP_RECEIVED:
2419 break;
2420
aef9ec39 2421 default:
7aa54bd7
DD
2422 shost_printk(KERN_WARNING, target->scsi_host,
2423 PFX "Unhandled CM event %d\n", event->event);
aef9ec39
RD
2424 break;
2425 }
2426
2427 if (comp)
509c07bc 2428 complete(&ch->done);
aef9ec39 2429
aef9ec39
RD
2430 return 0;
2431}
2432
71444b97
JW
2433/**
2434 * srp_change_queue_depth - setting device queue depth
2435 * @sdev: scsi device struct
2436 * @qdepth: requested queue depth
71444b97
JW
2437 *
2438 * Returns queue depth.
2439 */
2440static int
db5ed4df 2441srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
71444b97 2442{
c40ecc12 2443 if (!sdev->tagged_supported)
1e6f2416 2444 qdepth = 1;
db5ed4df 2445 return scsi_change_queue_depth(sdev, qdepth);
71444b97
JW
2446}
2447
985aa495
BVA
2448static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2449 u8 func)
aef9ec39 2450{
509c07bc 2451 struct srp_target_port *target = ch->target;
a95cadb9 2452 struct srp_rport *rport = target->rport;
19081f31 2453 struct ib_device *dev = target->srp_host->srp_dev->dev;
aef9ec39
RD
2454 struct srp_iu *iu;
2455 struct srp_tsk_mgmt *tsk_mgmt;
aef9ec39 2456
c014c8cd 2457 if (!ch->connected || target->qp_in_error)
3780d1f0
BVA
2458 return -1;
2459
509c07bc 2460 init_completion(&ch->tsk_mgmt_done);
aef9ec39 2461
a95cadb9 2462 /*
509c07bc 2463 * Lock the rport mutex to avoid that srp_create_ch_ib() is
a95cadb9
BVA
2464 * invoked while a task management function is being sent.
2465 */
2466 mutex_lock(&rport->mutex);
509c07bc
BVA
2467 spin_lock_irq(&ch->lock);
2468 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2469 spin_unlock_irq(&ch->lock);
76c75b25 2470
a95cadb9
BVA
2471 if (!iu) {
2472 mutex_unlock(&rport->mutex);
2473
76c75b25 2474 return -1;
a95cadb9 2475 }
aef9ec39 2476
19081f31
DD
2477 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2478 DMA_TO_DEVICE);
aef9ec39
RD
2479 tsk_mgmt = iu->buf;
2480 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2481
2482 tsk_mgmt->opcode = SRP_TSK_MGMT;
985aa495 2483 int_to_scsilun(lun, &tsk_mgmt->lun);
f8b6e31e 2484 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
aef9ec39 2485 tsk_mgmt->tsk_mgmt_func = func;
f8b6e31e 2486 tsk_mgmt->task_tag = req_tag;
aef9ec39 2487
19081f31
DD
2488 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2489 DMA_TO_DEVICE);
509c07bc
BVA
2490 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2491 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
a95cadb9
BVA
2492 mutex_unlock(&rport->mutex);
2493
76c75b25
BVA
2494 return -1;
2495 }
a95cadb9 2496 mutex_unlock(&rport->mutex);
d945e1df 2497
509c07bc 2498 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
aef9ec39 2499 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
d945e1df 2500 return -1;
aef9ec39 2501
d945e1df 2502 return 0;
d945e1df
RD
2503}
2504
aef9ec39
RD
2505static int srp_abort(struct scsi_cmnd *scmnd)
2506{
d945e1df 2507 struct srp_target_port *target = host_to_target(scmnd->device->host);
f8b6e31e 2508 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
77f2c1a4 2509 u32 tag;
d92c0da7 2510 u16 ch_idx;
509c07bc 2511 struct srp_rdma_ch *ch;
086f44f5 2512 int ret;
d945e1df 2513
7aa54bd7 2514 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
aef9ec39 2515
d92c0da7 2516 if (!req)
99b6697a 2517 return SUCCESS;
77f2c1a4 2518 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7
BVA
2519 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2520 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2521 return SUCCESS;
2522 ch = &target->ch[ch_idx];
2523 if (!srp_claim_req(ch, req, NULL, scmnd))
2524 return SUCCESS;
2525 shost_printk(KERN_ERR, target->scsi_host,
2526 "Sending SRP abort for tag %#x\n", tag);
77f2c1a4 2527 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
80d5e8a2 2528 SRP_TSK_ABORT_TASK) == 0)
086f44f5 2529 ret = SUCCESS;
ed9b2264 2530 else if (target->rport->state == SRP_RPORT_LOST)
99e1c139 2531 ret = FAST_IO_FAIL;
086f44f5
BVA
2532 else
2533 ret = FAILED;
509c07bc 2534 srp_free_req(ch, req, scmnd, 0);
22032991 2535 scmnd->result = DID_ABORT << 16;
d8536670 2536 scmnd->scsi_done(scmnd);
d945e1df 2537
086f44f5 2538 return ret;
aef9ec39
RD
2539}
2540
2541static int srp_reset_device(struct scsi_cmnd *scmnd)
2542{
d945e1df 2543 struct srp_target_port *target = host_to_target(scmnd->device->host);
d92c0da7 2544 struct srp_rdma_ch *ch;
536ae14e 2545 int i;
d945e1df 2546
7aa54bd7 2547 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
aef9ec39 2548
d92c0da7 2549 ch = &target->ch[0];
509c07bc 2550 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
f8b6e31e 2551 SRP_TSK_LUN_RESET))
d945e1df 2552 return FAILED;
509c07bc 2553 if (ch->tsk_mgmt_status)
d945e1df
RD
2554 return FAILED;
2555
d92c0da7
BVA
2556 for (i = 0; i < target->ch_count; i++) {
2557 ch = &target->ch[i];
2558 for (i = 0; i < target->req_ring_size; ++i) {
2559 struct srp_request *req = &ch->req_ring[i];
509c07bc 2560
d92c0da7
BVA
2561 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2562 }
536ae14e 2563 }
d945e1df 2564
d945e1df 2565 return SUCCESS;
aef9ec39
RD
2566}
2567
2568static int srp_reset_host(struct scsi_cmnd *scmnd)
2569{
2570 struct srp_target_port *target = host_to_target(scmnd->device->host);
aef9ec39 2571
7aa54bd7 2572 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
aef9ec39 2573
ed9b2264 2574 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
aef9ec39
RD
2575}
2576
c9b03c1a
BVA
2577static int srp_slave_configure(struct scsi_device *sdev)
2578{
2579 struct Scsi_Host *shost = sdev->host;
2580 struct srp_target_port *target = host_to_target(shost);
2581 struct request_queue *q = sdev->request_queue;
2582 unsigned long timeout;
2583
2584 if (sdev->type == TYPE_DISK) {
2585 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2586 blk_queue_rq_timeout(q, timeout);
2587 }
2588
2589 return 0;
2590}
2591
ee959b00
TJ
2592static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2593 char *buf)
6ecb0c84 2594{
ee959b00 2595 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2596
45c37cad 2597 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
6ecb0c84
RD
2598}
2599
ee959b00
TJ
2600static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2601 char *buf)
6ecb0c84 2602{
ee959b00 2603 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2604
45c37cad 2605 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
6ecb0c84
RD
2606}
2607
ee959b00
TJ
2608static ssize_t show_service_id(struct device *dev,
2609 struct device_attribute *attr, char *buf)
6ecb0c84 2610{
ee959b00 2611 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2612
45c37cad 2613 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
6ecb0c84
RD
2614}
2615
ee959b00
TJ
2616static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2617 char *buf)
6ecb0c84 2618{
ee959b00 2619 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2620
747fe000 2621 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
6ecb0c84
RD
2622}
2623
848b3082
BVA
2624static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2625 char *buf)
2626{
2627 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2628
747fe000 2629 return sprintf(buf, "%pI6\n", target->sgid.raw);
848b3082
BVA
2630}
2631
ee959b00
TJ
2632static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2633 char *buf)
6ecb0c84 2634{
ee959b00 2635 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7 2636 struct srp_rdma_ch *ch = &target->ch[0];
6ecb0c84 2637
509c07bc 2638 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
6ecb0c84
RD
2639}
2640
ee959b00
TJ
2641static ssize_t show_orig_dgid(struct device *dev,
2642 struct device_attribute *attr, char *buf)
3633b3d0 2643{
ee959b00 2644 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3633b3d0 2645
747fe000 2646 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
3633b3d0
IR
2647}
2648
89de7486
BVA
2649static ssize_t show_req_lim(struct device *dev,
2650 struct device_attribute *attr, char *buf)
2651{
2652 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7
BVA
2653 struct srp_rdma_ch *ch;
2654 int i, req_lim = INT_MAX;
89de7486 2655
d92c0da7
BVA
2656 for (i = 0; i < target->ch_count; i++) {
2657 ch = &target->ch[i];
2658 req_lim = min(req_lim, ch->req_lim);
2659 }
2660 return sprintf(buf, "%d\n", req_lim);
89de7486
BVA
2661}
2662
ee959b00
TJ
2663static ssize_t show_zero_req_lim(struct device *dev,
2664 struct device_attribute *attr, char *buf)
6bfa24fa 2665{
ee959b00 2666 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6bfa24fa 2667
6bfa24fa
RD
2668 return sprintf(buf, "%d\n", target->zero_req_lim);
2669}
2670
ee959b00
TJ
2671static ssize_t show_local_ib_port(struct device *dev,
2672 struct device_attribute *attr, char *buf)
ded7f1a1 2673{
ee959b00 2674 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1
IR
2675
2676 return sprintf(buf, "%d\n", target->srp_host->port);
2677}
2678
ee959b00
TJ
2679static ssize_t show_local_ib_device(struct device *dev,
2680 struct device_attribute *attr, char *buf)
ded7f1a1 2681{
ee959b00 2682 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1 2683
05321937 2684 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
ded7f1a1
IR
2685}
2686
d92c0da7
BVA
2687static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2688 char *buf)
2689{
2690 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2691
2692 return sprintf(buf, "%d\n", target->ch_count);
2693}
2694
4b5e5f41
BVA
2695static ssize_t show_comp_vector(struct device *dev,
2696 struct device_attribute *attr, char *buf)
2697{
2698 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2699
2700 return sprintf(buf, "%d\n", target->comp_vector);
2701}
2702
7bb312e4
VP
2703static ssize_t show_tl_retry_count(struct device *dev,
2704 struct device_attribute *attr, char *buf)
2705{
2706 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2707
2708 return sprintf(buf, "%d\n", target->tl_retry_count);
2709}
2710
49248644
DD
2711static ssize_t show_cmd_sg_entries(struct device *dev,
2712 struct device_attribute *attr, char *buf)
2713{
2714 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2715
2716 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2717}
2718
c07d424d
DD
2719static ssize_t show_allow_ext_sg(struct device *dev,
2720 struct device_attribute *attr, char *buf)
2721{
2722 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2723
2724 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2725}
2726
ee959b00
TJ
2727static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2728static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2729static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2730static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
848b3082 2731static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
ee959b00
TJ
2732static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2733static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
89de7486 2734static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
ee959b00
TJ
2735static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2736static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2737static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
d92c0da7 2738static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
4b5e5f41 2739static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
7bb312e4 2740static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
49248644 2741static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
c07d424d 2742static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
ee959b00
TJ
2743
2744static struct device_attribute *srp_host_attrs[] = {
2745 &dev_attr_id_ext,
2746 &dev_attr_ioc_guid,
2747 &dev_attr_service_id,
2748 &dev_attr_pkey,
848b3082 2749 &dev_attr_sgid,
ee959b00
TJ
2750 &dev_attr_dgid,
2751 &dev_attr_orig_dgid,
89de7486 2752 &dev_attr_req_lim,
ee959b00
TJ
2753 &dev_attr_zero_req_lim,
2754 &dev_attr_local_ib_port,
2755 &dev_attr_local_ib_device,
d92c0da7 2756 &dev_attr_ch_count,
4b5e5f41 2757 &dev_attr_comp_vector,
7bb312e4 2758 &dev_attr_tl_retry_count,
49248644 2759 &dev_attr_cmd_sg_entries,
c07d424d 2760 &dev_attr_allow_ext_sg,
6ecb0c84
RD
2761 NULL
2762};
2763
aef9ec39
RD
2764static struct scsi_host_template srp_template = {
2765 .module = THIS_MODULE,
b7f008fd
RD
2766 .name = "InfiniBand SRP initiator",
2767 .proc_name = DRV_NAME,
c9b03c1a 2768 .slave_configure = srp_slave_configure,
aef9ec39
RD
2769 .info = srp_target_info,
2770 .queuecommand = srp_queuecommand,
71444b97 2771 .change_queue_depth = srp_change_queue_depth,
aef9ec39
RD
2772 .eh_abort_handler = srp_abort,
2773 .eh_device_reset_handler = srp_reset_device,
2774 .eh_host_reset_handler = srp_reset_host,
2742c1da 2775 .skip_settle_delay = true,
49248644 2776 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
4d73f95f 2777 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
aef9ec39 2778 .this_id = -1,
4d73f95f 2779 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
6ecb0c84 2780 .use_clustering = ENABLE_CLUSTERING,
77f2c1a4 2781 .shost_attrs = srp_host_attrs,
c40ecc12 2782 .track_queue_depth = 1,
aef9ec39
RD
2783};
2784
34aa654e
BVA
2785static int srp_sdev_count(struct Scsi_Host *host)
2786{
2787 struct scsi_device *sdev;
2788 int c = 0;
2789
2790 shost_for_each_device(sdev, host)
2791 c++;
2792
2793 return c;
2794}
2795
bc44bd1d
BVA
2796/*
2797 * Return values:
2798 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2799 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2800 * removal has been scheduled.
2801 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2802 */
aef9ec39
RD
2803static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2804{
3236822b
FT
2805 struct srp_rport_identifiers ids;
2806 struct srp_rport *rport;
2807
34aa654e 2808 target->state = SRP_TARGET_SCANNING;
aef9ec39 2809 sprintf(target->target_name, "SRP.T10:%016llX",
45c37cad 2810 be64_to_cpu(target->id_ext));
aef9ec39 2811
05321937 2812 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
aef9ec39
RD
2813 return -ENODEV;
2814
3236822b
FT
2815 memcpy(ids.port_id, &target->id_ext, 8);
2816 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
aebd5e47 2817 ids.roles = SRP_RPORT_ROLE_TARGET;
3236822b
FT
2818 rport = srp_rport_add(target->scsi_host, &ids);
2819 if (IS_ERR(rport)) {
2820 scsi_remove_host(target->scsi_host);
2821 return PTR_ERR(rport);
2822 }
2823
dc1bdbd9 2824 rport->lld_data = target;
9dd69a60 2825 target->rport = rport;
dc1bdbd9 2826
b3589fd4 2827 spin_lock(&host->target_lock);
aef9ec39 2828 list_add_tail(&target->list, &host->target_list);
b3589fd4 2829 spin_unlock(&host->target_lock);
aef9ec39 2830
aef9ec39 2831 scsi_scan_target(&target->scsi_host->shost_gendev,
1962a4a1 2832 0, target->scsi_id, SCAN_WILD_CARD, 0);
aef9ec39 2833
c014c8cd
BVA
2834 if (srp_connected_ch(target) < target->ch_count ||
2835 target->qp_in_error) {
34aa654e
BVA
2836 shost_printk(KERN_INFO, target->scsi_host,
2837 PFX "SCSI scan failed - removing SCSI host\n");
2838 srp_queue_remove_work(target);
2839 goto out;
2840 }
2841
2842 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2843 dev_name(&target->scsi_host->shost_gendev),
2844 srp_sdev_count(target->scsi_host));
2845
2846 spin_lock_irq(&target->lock);
2847 if (target->state == SRP_TARGET_SCANNING)
2848 target->state = SRP_TARGET_LIVE;
2849 spin_unlock_irq(&target->lock);
2850
2851out:
aef9ec39
RD
2852 return 0;
2853}
2854
ee959b00 2855static void srp_release_dev(struct device *dev)
aef9ec39
RD
2856{
2857 struct srp_host *host =
ee959b00 2858 container_of(dev, struct srp_host, dev);
aef9ec39
RD
2859
2860 complete(&host->released);
2861}
2862
2863static struct class srp_class = {
2864 .name = "infiniband_srp",
ee959b00 2865 .dev_release = srp_release_dev
aef9ec39
RD
2866};
2867
96fc248a
BVA
2868/**
2869 * srp_conn_unique() - check whether the connection to a target is unique
af24663b
BVA
2870 * @host: SRP host.
2871 * @target: SRP target port.
96fc248a
BVA
2872 */
2873static bool srp_conn_unique(struct srp_host *host,
2874 struct srp_target_port *target)
2875{
2876 struct srp_target_port *t;
2877 bool ret = false;
2878
2879 if (target->state == SRP_TARGET_REMOVED)
2880 goto out;
2881
2882 ret = true;
2883
2884 spin_lock(&host->target_lock);
2885 list_for_each_entry(t, &host->target_list, list) {
2886 if (t != target &&
2887 target->id_ext == t->id_ext &&
2888 target->ioc_guid == t->ioc_guid &&
2889 target->initiator_ext == t->initiator_ext) {
2890 ret = false;
2891 break;
2892 }
2893 }
2894 spin_unlock(&host->target_lock);
2895
2896out:
2897 return ret;
2898}
2899
aef9ec39
RD
2900/*
2901 * Target ports are added by writing
2902 *
2903 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2904 * pkey=<P_Key>,service_id=<service ID>
2905 *
2906 * to the add_target sysfs attribute.
2907 */
2908enum {
2909 SRP_OPT_ERR = 0,
2910 SRP_OPT_ID_EXT = 1 << 0,
2911 SRP_OPT_IOC_GUID = 1 << 1,
2912 SRP_OPT_DGID = 1 << 2,
2913 SRP_OPT_PKEY = 1 << 3,
2914 SRP_OPT_SERVICE_ID = 1 << 4,
2915 SRP_OPT_MAX_SECT = 1 << 5,
52fb2b50 2916 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
0c0450db 2917 SRP_OPT_IO_CLASS = 1 << 7,
01cb9bcb 2918 SRP_OPT_INITIATOR_EXT = 1 << 8,
49248644 2919 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
c07d424d
DD
2920 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2921 SRP_OPT_SG_TABLESIZE = 1 << 11,
4b5e5f41 2922 SRP_OPT_COMP_VECTOR = 1 << 12,
7bb312e4 2923 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
4d73f95f 2924 SRP_OPT_QUEUE_SIZE = 1 << 14,
aef9ec39
RD
2925 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2926 SRP_OPT_IOC_GUID |
2927 SRP_OPT_DGID |
2928 SRP_OPT_PKEY |
2929 SRP_OPT_SERVICE_ID),
2930};
2931
a447c093 2932static const match_table_t srp_opt_tokens = {
52fb2b50
VP
2933 { SRP_OPT_ID_EXT, "id_ext=%s" },
2934 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2935 { SRP_OPT_DGID, "dgid=%s" },
2936 { SRP_OPT_PKEY, "pkey=%x" },
2937 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2938 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2939 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
0c0450db 2940 { SRP_OPT_IO_CLASS, "io_class=%x" },
01cb9bcb 2941 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
49248644 2942 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
c07d424d
DD
2943 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2944 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
4b5e5f41 2945 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
7bb312e4 2946 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
4d73f95f 2947 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
52fb2b50 2948 { SRP_OPT_ERR, NULL }
aef9ec39
RD
2949};
2950
2951static int srp_parse_options(const char *buf, struct srp_target_port *target)
2952{
2953 char *options, *sep_opt;
2954 char *p;
2955 char dgid[3];
2956 substring_t args[MAX_OPT_ARGS];
2957 int opt_mask = 0;
2958 int token;
2959 int ret = -EINVAL;
2960 int i;
2961
2962 options = kstrdup(buf, GFP_KERNEL);
2963 if (!options)
2964 return -ENOMEM;
2965
2966 sep_opt = options;
7dcf9c19 2967 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
aef9ec39
RD
2968 if (!*p)
2969 continue;
2970
2971 token = match_token(p, srp_opt_tokens, args);
2972 opt_mask |= token;
2973
2974 switch (token) {
2975 case SRP_OPT_ID_EXT:
2976 p = match_strdup(args);
a20f3a6d
IR
2977 if (!p) {
2978 ret = -ENOMEM;
2979 goto out;
2980 }
aef9ec39
RD
2981 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2982 kfree(p);
2983 break;
2984
2985 case SRP_OPT_IOC_GUID:
2986 p = match_strdup(args);
a20f3a6d
IR
2987 if (!p) {
2988 ret = -ENOMEM;
2989 goto out;
2990 }
aef9ec39
RD
2991 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2992 kfree(p);
2993 break;
2994
2995 case SRP_OPT_DGID:
2996 p = match_strdup(args);
a20f3a6d
IR
2997 if (!p) {
2998 ret = -ENOMEM;
2999 goto out;
3000 }
aef9ec39 3001 if (strlen(p) != 32) {
e0bda7d8 3002 pr_warn("bad dest GID parameter '%s'\n", p);
ce1823f0 3003 kfree(p);
aef9ec39
RD
3004 goto out;
3005 }
3006
3007 for (i = 0; i < 16; ++i) {
747fe000
BVA
3008 strlcpy(dgid, p + i * 2, sizeof(dgid));
3009 if (sscanf(dgid, "%hhx",
3010 &target->orig_dgid.raw[i]) < 1) {
3011 ret = -EINVAL;
3012 kfree(p);
3013 goto out;
3014 }
aef9ec39 3015 }
bf17c1c7 3016 kfree(p);
aef9ec39
RD
3017 break;
3018
3019 case SRP_OPT_PKEY:
3020 if (match_hex(args, &token)) {
e0bda7d8 3021 pr_warn("bad P_Key parameter '%s'\n", p);
aef9ec39
RD
3022 goto out;
3023 }
747fe000 3024 target->pkey = cpu_to_be16(token);
aef9ec39
RD
3025 break;
3026
3027 case SRP_OPT_SERVICE_ID:
3028 p = match_strdup(args);
a20f3a6d
IR
3029 if (!p) {
3030 ret = -ENOMEM;
3031 goto out;
3032 }
aef9ec39
RD
3033 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3034 kfree(p);
3035 break;
3036
3037 case SRP_OPT_MAX_SECT:
3038 if (match_int(args, &token)) {
e0bda7d8 3039 pr_warn("bad max sect parameter '%s'\n", p);
aef9ec39
RD
3040 goto out;
3041 }
3042 target->scsi_host->max_sectors = token;
3043 break;
3044
4d73f95f
BVA
3045 case SRP_OPT_QUEUE_SIZE:
3046 if (match_int(args, &token) || token < 1) {
3047 pr_warn("bad queue_size parameter '%s'\n", p);
3048 goto out;
3049 }
3050 target->scsi_host->can_queue = token;
3051 target->queue_size = token + SRP_RSP_SQ_SIZE +
3052 SRP_TSK_MGMT_SQ_SIZE;
3053 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3054 target->scsi_host->cmd_per_lun = token;
3055 break;
3056
52fb2b50 3057 case SRP_OPT_MAX_CMD_PER_LUN:
4d73f95f 3058 if (match_int(args, &token) || token < 1) {
e0bda7d8
BVA
3059 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3060 p);
52fb2b50
VP
3061 goto out;
3062 }
4d73f95f 3063 target->scsi_host->cmd_per_lun = token;
52fb2b50
VP
3064 break;
3065
0c0450db
R
3066 case SRP_OPT_IO_CLASS:
3067 if (match_hex(args, &token)) {
e0bda7d8 3068 pr_warn("bad IO class parameter '%s'\n", p);
0c0450db
R
3069 goto out;
3070 }
3071 if (token != SRP_REV10_IB_IO_CLASS &&
3072 token != SRP_REV16A_IB_IO_CLASS) {
e0bda7d8
BVA
3073 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3074 token, SRP_REV10_IB_IO_CLASS,
3075 SRP_REV16A_IB_IO_CLASS);
0c0450db
R
3076 goto out;
3077 }
3078 target->io_class = token;
3079 break;
3080
01cb9bcb
IR
3081 case SRP_OPT_INITIATOR_EXT:
3082 p = match_strdup(args);
a20f3a6d
IR
3083 if (!p) {
3084 ret = -ENOMEM;
3085 goto out;
3086 }
01cb9bcb
IR
3087 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3088 kfree(p);
3089 break;
3090
49248644
DD
3091 case SRP_OPT_CMD_SG_ENTRIES:
3092 if (match_int(args, &token) || token < 1 || token > 255) {
e0bda7d8
BVA
3093 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3094 p);
49248644
DD
3095 goto out;
3096 }
3097 target->cmd_sg_cnt = token;
3098 break;
3099
c07d424d
DD
3100 case SRP_OPT_ALLOW_EXT_SG:
3101 if (match_int(args, &token)) {
e0bda7d8 3102 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
c07d424d
DD
3103 goto out;
3104 }
3105 target->allow_ext_sg = !!token;
3106 break;
3107
3108 case SRP_OPT_SG_TABLESIZE:
3109 if (match_int(args, &token) || token < 1 ||
3110 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
e0bda7d8
BVA
3111 pr_warn("bad max sg_tablesize parameter '%s'\n",
3112 p);
c07d424d
DD
3113 goto out;
3114 }
3115 target->sg_tablesize = token;
3116 break;
3117
4b5e5f41
BVA
3118 case SRP_OPT_COMP_VECTOR:
3119 if (match_int(args, &token) || token < 0) {
3120 pr_warn("bad comp_vector parameter '%s'\n", p);
3121 goto out;
3122 }
3123 target->comp_vector = token;
3124 break;
3125
7bb312e4
VP
3126 case SRP_OPT_TL_RETRY_COUNT:
3127 if (match_int(args, &token) || token < 2 || token > 7) {
3128 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3129 p);
3130 goto out;
3131 }
3132 target->tl_retry_count = token;
3133 break;
3134
aef9ec39 3135 default:
e0bda7d8
BVA
3136 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3137 p);
aef9ec39
RD
3138 goto out;
3139 }
3140 }
3141
3142 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3143 ret = 0;
3144 else
3145 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3146 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3147 !(srp_opt_tokens[i].token & opt_mask))
e0bda7d8
BVA
3148 pr_warn("target creation request is missing parameter '%s'\n",
3149 srp_opt_tokens[i].pattern);
aef9ec39 3150
4d73f95f
BVA
3151 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3152 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3153 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3154 target->scsi_host->cmd_per_lun,
3155 target->scsi_host->can_queue);
3156
aef9ec39
RD
3157out:
3158 kfree(options);
3159 return ret;
3160}
3161
ee959b00
TJ
3162static ssize_t srp_create_target(struct device *dev,
3163 struct device_attribute *attr,
aef9ec39
RD
3164 const char *buf, size_t count)
3165{
3166 struct srp_host *host =
ee959b00 3167 container_of(dev, struct srp_host, dev);
aef9ec39
RD
3168 struct Scsi_Host *target_host;
3169 struct srp_target_port *target;
509c07bc 3170 struct srp_rdma_ch *ch;
d1b4289e
BVA
3171 struct srp_device *srp_dev = host->srp_dev;
3172 struct ib_device *ibdev = srp_dev->dev;
d92c0da7
BVA
3173 int ret, node_idx, node, cpu, i;
3174 bool multich = false;
aef9ec39
RD
3175
3176 target_host = scsi_host_alloc(&srp_template,
3177 sizeof (struct srp_target_port));
3178 if (!target_host)
3179 return -ENOMEM;
3180
49248644 3181 target_host->transportt = ib_srp_transport_template;
fd1b6c4a
BVA
3182 target_host->max_channel = 0;
3183 target_host->max_id = 1;
985aa495 3184 target_host->max_lun = -1LL;
3c8edf0e 3185 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
5f068992 3186
aef9ec39 3187 target = host_to_target(target_host);
aef9ec39 3188
49248644
DD
3189 target->io_class = SRP_REV16A_IB_IO_CLASS;
3190 target->scsi_host = target_host;
3191 target->srp_host = host;
e6bf5f48 3192 target->lkey = host->srp_dev->pd->local_dma_lkey;
03f6fb93 3193 target->global_mr = host->srp_dev->global_mr;
49248644 3194 target->cmd_sg_cnt = cmd_sg_entries;
c07d424d
DD
3195 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3196 target->allow_ext_sg = allow_ext_sg;
7bb312e4 3197 target->tl_retry_count = 7;
4d73f95f 3198 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
aef9ec39 3199
34aa654e
BVA
3200 /*
3201 * Avoid that the SCSI host can be removed by srp_remove_target()
3202 * before this function returns.
3203 */
3204 scsi_host_get(target->scsi_host);
3205
2d7091bc
BVA
3206 mutex_lock(&host->add_target_mutex);
3207
aef9ec39
RD
3208 ret = srp_parse_options(buf, target);
3209 if (ret)
fb49c8bb 3210 goto out;
aef9ec39 3211
4d73f95f
BVA
3212 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3213
96fc248a
BVA
3214 if (!srp_conn_unique(target->srp_host, target)) {
3215 shost_printk(KERN_INFO, target->scsi_host,
3216 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3217 be64_to_cpu(target->id_ext),
3218 be64_to_cpu(target->ioc_guid),
3219 be64_to_cpu(target->initiator_ext));
3220 ret = -EEXIST;
fb49c8bb 3221 goto out;
96fc248a
BVA
3222 }
3223
5cfb1782 3224 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
d1b4289e 3225 target->cmd_sg_cnt < target->sg_tablesize) {
5cfb1782 3226 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
c07d424d
DD
3227 target->sg_tablesize = target->cmd_sg_cnt;
3228 }
3229
3230 target_host->sg_tablesize = target->sg_tablesize;
fa9863f8 3231 target->mr_pool_size = target->scsi_host->can_queue;
c07d424d
DD
3232 target->indirect_size = target->sg_tablesize *
3233 sizeof (struct srp_direct_buf);
49248644
DD
3234 target->max_iu_len = sizeof (struct srp_cmd) +
3235 sizeof (struct srp_indirect_buf) +
3236 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3237
c1120f89 3238 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
ef6c49d8 3239 INIT_WORK(&target->remove_work, srp_remove_work);
8f26c9ff 3240 spin_lock_init(&target->lock);
55ee3ab2 3241 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
2088ca66 3242 if (ret)
fb49c8bb 3243 goto out;
aef9ec39 3244
d92c0da7
BVA
3245 ret = -ENOMEM;
3246 target->ch_count = max_t(unsigned, num_online_nodes(),
3247 min(ch_count ? :
3248 min(4 * num_online_nodes(),
3249 ibdev->num_comp_vectors),
3250 num_online_cpus()));
3251 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3252 GFP_KERNEL);
3253 if (!target->ch)
fb49c8bb 3254 goto out;
aef9ec39 3255
d92c0da7
BVA
3256 node_idx = 0;
3257 for_each_online_node(node) {
3258 const int ch_start = (node_idx * target->ch_count /
3259 num_online_nodes());
3260 const int ch_end = ((node_idx + 1) * target->ch_count /
3261 num_online_nodes());
3262 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3263 num_online_nodes() + target->comp_vector)
3264 % ibdev->num_comp_vectors;
3265 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3266 num_online_nodes() + target->comp_vector)
3267 % ibdev->num_comp_vectors;
3268 int cpu_idx = 0;
3269
3270 for_each_online_cpu(cpu) {
3271 if (cpu_to_node(cpu) != node)
3272 continue;
3273 if (ch_start + cpu_idx >= ch_end)
3274 continue;
3275 ch = &target->ch[ch_start + cpu_idx];
3276 ch->target = target;
3277 ch->comp_vector = cv_start == cv_end ? cv_start :
3278 cv_start + cpu_idx % (cv_end - cv_start);
3279 spin_lock_init(&ch->lock);
3280 INIT_LIST_HEAD(&ch->free_tx);
3281 ret = srp_new_cm_id(ch);
3282 if (ret)
3283 goto err_disconnect;
aef9ec39 3284
d92c0da7
BVA
3285 ret = srp_create_ch_ib(ch);
3286 if (ret)
3287 goto err_disconnect;
3288
3289 ret = srp_alloc_req_data(ch);
3290 if (ret)
3291 goto err_disconnect;
3292
3293 ret = srp_connect_ch(ch, multich);
3294 if (ret) {
3295 shost_printk(KERN_ERR, target->scsi_host,
3296 PFX "Connection %d/%d failed\n",
3297 ch_start + cpu_idx,
3298 target->ch_count);
3299 if (node_idx == 0 && cpu_idx == 0) {
3300 goto err_disconnect;
3301 } else {
3302 srp_free_ch_ib(target, ch);
3303 srp_free_req_data(target, ch);
3304 target->ch_count = ch - target->ch;
c257ea6f 3305 goto connected;
d92c0da7
BVA
3306 }
3307 }
3308
3309 multich = true;
3310 cpu_idx++;
3311 }
3312 node_idx++;
aef9ec39
RD
3313 }
3314
c257ea6f 3315connected:
d92c0da7
BVA
3316 target->scsi_host->nr_hw_queues = target->ch_count;
3317
aef9ec39
RD
3318 ret = srp_add_target(host, target);
3319 if (ret)
3320 goto err_disconnect;
3321
34aa654e
BVA
3322 if (target->state != SRP_TARGET_REMOVED) {
3323 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3324 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3325 be64_to_cpu(target->id_ext),
3326 be64_to_cpu(target->ioc_guid),
747fe000 3327 be16_to_cpu(target->pkey),
34aa654e 3328 be64_to_cpu(target->service_id),
747fe000 3329 target->sgid.raw, target->orig_dgid.raw);
34aa654e 3330 }
e7ffde01 3331
2d7091bc
BVA
3332 ret = count;
3333
3334out:
3335 mutex_unlock(&host->add_target_mutex);
34aa654e
BVA
3336
3337 scsi_host_put(target->scsi_host);
bc44bd1d
BVA
3338 if (ret < 0)
3339 scsi_host_put(target->scsi_host);
34aa654e 3340
2d7091bc 3341 return ret;
aef9ec39
RD
3342
3343err_disconnect:
3344 srp_disconnect_target(target);
3345
d92c0da7
BVA
3346 for (i = 0; i < target->ch_count; i++) {
3347 ch = &target->ch[i];
3348 srp_free_ch_ib(target, ch);
3349 srp_free_req_data(target, ch);
3350 }
aef9ec39 3351
d92c0da7 3352 kfree(target->ch);
2d7091bc 3353 goto out;
aef9ec39
RD
3354}
3355
ee959b00 3356static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
aef9ec39 3357
ee959b00
TJ
3358static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3359 char *buf)
aef9ec39 3360{
ee959b00 3361 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39 3362
05321937 3363 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
aef9ec39
RD
3364}
3365
ee959b00 3366static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
aef9ec39 3367
ee959b00
TJ
3368static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3369 char *buf)
aef9ec39 3370{
ee959b00 3371 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39
RD
3372
3373 return sprintf(buf, "%d\n", host->port);
3374}
3375
ee959b00 3376static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
aef9ec39 3377
f5358a17 3378static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
aef9ec39
RD
3379{
3380 struct srp_host *host;
3381
3382 host = kzalloc(sizeof *host, GFP_KERNEL);
3383 if (!host)
3384 return NULL;
3385
3386 INIT_LIST_HEAD(&host->target_list);
b3589fd4 3387 spin_lock_init(&host->target_lock);
aef9ec39 3388 init_completion(&host->released);
2d7091bc 3389 mutex_init(&host->add_target_mutex);
05321937 3390 host->srp_dev = device;
aef9ec39
RD
3391 host->port = port;
3392
ee959b00
TJ
3393 host->dev.class = &srp_class;
3394 host->dev.parent = device->dev->dma_device;
d927e38c 3395 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
aef9ec39 3396
ee959b00 3397 if (device_register(&host->dev))
f5358a17 3398 goto free_host;
ee959b00 3399 if (device_create_file(&host->dev, &dev_attr_add_target))
aef9ec39 3400 goto err_class;
ee959b00 3401 if (device_create_file(&host->dev, &dev_attr_ibdev))
aef9ec39 3402 goto err_class;
ee959b00 3403 if (device_create_file(&host->dev, &dev_attr_port))
aef9ec39
RD
3404 goto err_class;
3405
3406 return host;
3407
3408err_class:
ee959b00 3409 device_unregister(&host->dev);
aef9ec39 3410
f5358a17 3411free_host:
aef9ec39
RD
3412 kfree(host);
3413
3414 return NULL;
3415}
3416
3417static void srp_add_one(struct ib_device *device)
3418{
f5358a17 3419 struct srp_device *srp_dev;
aef9ec39 3420 struct srp_host *host;
4139032b 3421 int mr_page_shift, p;
52ede08f 3422 u64 max_pages_per_mr;
aef9ec39 3423
f5358a17
RD
3424 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3425 if (!srp_dev)
4a061b28 3426 return;
f5358a17 3427
d1b4289e
BVA
3428 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3429 device->map_phys_fmr && device->unmap_fmr);
4a061b28 3430 srp_dev->has_fr = (device->attrs.device_cap_flags &
5cfb1782
BVA
3431 IB_DEVICE_MEM_MGT_EXTENSIONS);
3432 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3433 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3434
3435 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3436 (!srp_dev->has_fmr || prefer_fr));
002f1567 3437 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
d1b4289e 3438
f5358a17
RD
3439 /*
3440 * Use the smallest page size supported by the HCA, down to a
8f26c9ff
DD
3441 * minimum of 4096 bytes. We're unlikely to build large sglists
3442 * out of smaller entries.
f5358a17 3443 */
4a061b28 3444 mr_page_shift = max(12, ffs(device->attrs.page_size_cap) - 1);
52ede08f
BVA
3445 srp_dev->mr_page_size = 1 << mr_page_shift;
3446 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
4a061b28 3447 max_pages_per_mr = device->attrs.max_mr_size;
52ede08f
BVA
3448 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3449 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3450 max_pages_per_mr);
5cfb1782
BVA
3451 if (srp_dev->use_fast_reg) {
3452 srp_dev->max_pages_per_mr =
3453 min_t(u32, srp_dev->max_pages_per_mr,
4a061b28 3454 device->attrs.max_fast_reg_page_list_len);
5cfb1782 3455 }
52ede08f
BVA
3456 srp_dev->mr_max_size = srp_dev->mr_page_size *
3457 srp_dev->max_pages_per_mr;
4a061b28
OG
3458 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3459 device->name, mr_page_shift, device->attrs.max_mr_size,
3460 device->attrs.max_fast_reg_page_list_len,
52ede08f 3461 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
f5358a17
RD
3462
3463 INIT_LIST_HEAD(&srp_dev->dev_list);
3464
3465 srp_dev->dev = device;
3466 srp_dev->pd = ib_alloc_pd(device);
3467 if (IS_ERR(srp_dev->pd))
3468 goto free_dev;
3469
03f6fb93
BVA
3470 if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3471 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3472 IB_ACCESS_LOCAL_WRITE |
3473 IB_ACCESS_REMOTE_READ |
3474 IB_ACCESS_REMOTE_WRITE);
3475 if (IS_ERR(srp_dev->global_mr))
3476 goto err_pd;
3477 } else {
3478 srp_dev->global_mr = NULL;
3479 }
f5358a17 3480
4139032b 3481 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
f5358a17 3482 host = srp_add_port(srp_dev, p);
aef9ec39 3483 if (host)
f5358a17 3484 list_add_tail(&host->list, &srp_dev->dev_list);
aef9ec39
RD
3485 }
3486
f5358a17 3487 ib_set_client_data(device, &srp_client, srp_dev);
4a061b28 3488 return;
f5358a17
RD
3489
3490err_pd:
3491 ib_dealloc_pd(srp_dev->pd);
3492
3493free_dev:
3494 kfree(srp_dev);
aef9ec39
RD
3495}
3496
7c1eb45a 3497static void srp_remove_one(struct ib_device *device, void *client_data)
aef9ec39 3498{
f5358a17 3499 struct srp_device *srp_dev;
aef9ec39 3500 struct srp_host *host, *tmp_host;
ef6c49d8 3501 struct srp_target_port *target;
aef9ec39 3502
7c1eb45a 3503 srp_dev = client_data;
1fe0cb84
DB
3504 if (!srp_dev)
3505 return;
aef9ec39 3506
f5358a17 3507 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
ee959b00 3508 device_unregister(&host->dev);
aef9ec39
RD
3509 /*
3510 * Wait for the sysfs entry to go away, so that no new
3511 * target ports can be created.
3512 */
3513 wait_for_completion(&host->released);
3514
3515 /*
ef6c49d8 3516 * Remove all target ports.
aef9ec39 3517 */
b3589fd4 3518 spin_lock(&host->target_lock);
ef6c49d8
BVA
3519 list_for_each_entry(target, &host->target_list, list)
3520 srp_queue_remove_work(target);
b3589fd4 3521 spin_unlock(&host->target_lock);
aef9ec39
RD
3522
3523 /*
bcc05910 3524 * Wait for tl_err and target port removal tasks.
aef9ec39 3525 */
ef6c49d8 3526 flush_workqueue(system_long_wq);
bcc05910 3527 flush_workqueue(srp_remove_wq);
aef9ec39 3528
aef9ec39
RD
3529 kfree(host);
3530 }
3531
03f6fb93
BVA
3532 if (srp_dev->global_mr)
3533 ib_dereg_mr(srp_dev->global_mr);
f5358a17
RD
3534 ib_dealloc_pd(srp_dev->pd);
3535
3536 kfree(srp_dev);
aef9ec39
RD
3537}
3538
3236822b 3539static struct srp_function_template ib_srp_transport_functions = {
ed9b2264
BVA
3540 .has_rport_state = true,
3541 .reset_timer_if_blocked = true,
a95cadb9 3542 .reconnect_delay = &srp_reconnect_delay,
ed9b2264
BVA
3543 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3544 .dev_loss_tmo = &srp_dev_loss_tmo,
3545 .reconnect = srp_rport_reconnect,
dc1bdbd9 3546 .rport_delete = srp_rport_delete,
ed9b2264 3547 .terminate_rport_io = srp_terminate_io,
3236822b
FT
3548};
3549
aef9ec39
RD
3550static int __init srp_init_module(void)
3551{
3552 int ret;
3553
49248644 3554 if (srp_sg_tablesize) {
e0bda7d8 3555 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
49248644
DD
3556 if (!cmd_sg_entries)
3557 cmd_sg_entries = srp_sg_tablesize;
3558 }
3559
3560 if (!cmd_sg_entries)
3561 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3562
3563 if (cmd_sg_entries > 255) {
e0bda7d8 3564 pr_warn("Clamping cmd_sg_entries to 255\n");
49248644 3565 cmd_sg_entries = 255;
1e89a194
DD
3566 }
3567
c07d424d
DD
3568 if (!indirect_sg_entries)
3569 indirect_sg_entries = cmd_sg_entries;
3570 else if (indirect_sg_entries < cmd_sg_entries) {
e0bda7d8
BVA
3571 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3572 cmd_sg_entries);
c07d424d
DD
3573 indirect_sg_entries = cmd_sg_entries;
3574 }
3575
bcc05910 3576 srp_remove_wq = create_workqueue("srp_remove");
da05be29
WY
3577 if (!srp_remove_wq) {
3578 ret = -ENOMEM;
bcc05910
BVA
3579 goto out;
3580 }
3581
3582 ret = -ENOMEM;
3236822b
FT
3583 ib_srp_transport_template =
3584 srp_attach_transport(&ib_srp_transport_functions);
3585 if (!ib_srp_transport_template)
bcc05910 3586 goto destroy_wq;
3236822b 3587
aef9ec39
RD
3588 ret = class_register(&srp_class);
3589 if (ret) {
e0bda7d8 3590 pr_err("couldn't register class infiniband_srp\n");
bcc05910 3591 goto release_tr;
aef9ec39
RD
3592 }
3593
c1a0b23b
MT
3594 ib_sa_register_client(&srp_sa_client);
3595
aef9ec39
RD
3596 ret = ib_register_client(&srp_client);
3597 if (ret) {
e0bda7d8 3598 pr_err("couldn't register IB client\n");
bcc05910 3599 goto unreg_sa;
aef9ec39
RD
3600 }
3601
bcc05910
BVA
3602out:
3603 return ret;
3604
3605unreg_sa:
3606 ib_sa_unregister_client(&srp_sa_client);
3607 class_unregister(&srp_class);
3608
3609release_tr:
3610 srp_release_transport(ib_srp_transport_template);
3611
3612destroy_wq:
3613 destroy_workqueue(srp_remove_wq);
3614 goto out;
aef9ec39
RD
3615}
3616
3617static void __exit srp_cleanup_module(void)
3618{
3619 ib_unregister_client(&srp_client);
c1a0b23b 3620 ib_sa_unregister_client(&srp_sa_client);
aef9ec39 3621 class_unregister(&srp_class);
3236822b 3622 srp_release_transport(ib_srp_transport_template);
bcc05910 3623 destroy_workqueue(srp_remove_wq);
aef9ec39
RD
3624}
3625
3626module_init(srp_init_module);
3627module_exit(srp_cleanup_module);
This page took 1.005779 seconds and 5 git commands to generate.