IB/srp: Convert to new registration API
[deliverable/linux.git] / drivers / infiniband / ulp / srp / ib_srp.c
CommitLineData
aef9ec39
RD
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
aef9ec39
RD
31 */
32
d236cd0e 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
e0bda7d8 34
aef9ec39
RD
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
de25968c 42#include <linux/jiffies.h>
56b5390c 43#include <rdma/ib_cache.h>
aef9ec39 44
60063497 45#include <linux/atomic.h>
aef9ec39
RD
46
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
71444b97 50#include <scsi/scsi_tcq.h>
aef9ec39 51#include <scsi/srp.h>
3236822b 52#include <scsi/scsi_transport_srp.h>
aef9ec39 53
aef9ec39
RD
54#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
713ef24e
BVA
58#define DRV_VERSION "2.0"
59#define DRV_RELDATE "July 26, 2015"
aef9ec39
RD
60
61MODULE_AUTHOR("Roland Dreier");
33ab3e5b 62MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
aef9ec39 63MODULE_LICENSE("Dual BSD/GPL");
33ab3e5b
BVA
64MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
aef9ec39 66
49248644
DD
67static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
c07d424d
DD
69static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
03f6fb93
BVA
71static bool prefer_fr = true;
72static bool register_always = true;
49248644 73static int topspin_workarounds = 1;
74b0a15b 74
49248644
DD
75module_param(srp_sg_tablesize, uint, 0444);
76MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
74b0a15b 77
49248644
DD
78module_param(cmd_sg_entries, uint, 0444);
79MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
aef9ec39 81
c07d424d
DD
82module_param(indirect_sg_entries, uint, 0444);
83MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86module_param(allow_ext_sg, bool, 0444);
87MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
aef9ec39
RD
90module_param(topspin_workarounds, int, 0444);
91MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
5cfb1782
BVA
94module_param(prefer_fr, bool, 0444);
95MODULE_PARM_DESC(prefer_fr,
96"Whether to use fast registration if both FMR and fast registration are supported");
97
b1b8854d
BVA
98module_param(register_always, bool, 0444);
99MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
9c27847d 102static const struct kernel_param_ops srp_tmo_ops;
ed9b2264 103
a95cadb9
BVA
104static int srp_reconnect_delay = 10;
105module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
ed9b2264
BVA
109static int srp_fast_io_fail_tmo = 15;
110module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
a95cadb9 117static int srp_dev_loss_tmo = 600;
ed9b2264
BVA
118module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
d92c0da7
BVA
128static unsigned ch_count;
129module_param(ch_count, uint, 0444);
130MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
aef9ec39 133static void srp_add_one(struct ib_device *device);
7c1eb45a 134static void srp_remove_one(struct ib_device *device, void *client_data);
509c07bc
BVA
135static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
aef9ec39
RD
137static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138
3236822b 139static struct scsi_transport_template *ib_srp_transport_template;
bcc05910 140static struct workqueue_struct *srp_remove_wq;
3236822b 141
aef9ec39
RD
142static struct ib_client srp_client = {
143 .name = "srp",
144 .add = srp_add_one,
145 .remove = srp_remove_one
146};
147
c1a0b23b
MT
148static struct ib_sa_client srp_sa_client;
149
ed9b2264
BVA
150static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151{
152 int tmo = *(int *)kp->arg;
153
154 if (tmo >= 0)
155 return sprintf(buffer, "%d", tmo);
156 else
157 return sprintf(buffer, "off");
158}
159
160static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161{
162 int tmo, res;
163
3fdf70ac
SG
164 res = srp_parse_tmo(&tmo, val);
165 if (res)
166 goto out;
167
a95cadb9
BVA
168 if (kp->arg == &srp_reconnect_delay)
169 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
170 srp_dev_loss_tmo);
171 else if (kp->arg == &srp_fast_io_fail_tmo)
172 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
ed9b2264 173 else
a95cadb9
BVA
174 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
175 tmo);
ed9b2264
BVA
176 if (res)
177 goto out;
178 *(int *)kp->arg = tmo;
179
180out:
181 return res;
182}
183
9c27847d 184static const struct kernel_param_ops srp_tmo_ops = {
ed9b2264
BVA
185 .get = srp_tmo_get,
186 .set = srp_tmo_set,
187};
188
aef9ec39
RD
189static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
190{
191 return (struct srp_target_port *) host->hostdata;
192}
193
194static const char *srp_target_info(struct Scsi_Host *host)
195{
196 return host_to_target(host)->target_name;
197}
198
5d7cbfd6
RD
199static int srp_target_is_topspin(struct srp_target_port *target)
200{
201 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
3d1ff48d 202 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
5d7cbfd6
RD
203
204 return topspin_workarounds &&
3d1ff48d
RK
205 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
206 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
5d7cbfd6
RD
207}
208
aef9ec39
RD
209static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
210 gfp_t gfp_mask,
211 enum dma_data_direction direction)
212{
213 struct srp_iu *iu;
214
215 iu = kmalloc(sizeof *iu, gfp_mask);
216 if (!iu)
217 goto out;
218
219 iu->buf = kzalloc(size, gfp_mask);
220 if (!iu->buf)
221 goto out_free_iu;
222
05321937
GKH
223 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
224 direction);
225 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
aef9ec39
RD
226 goto out_free_buf;
227
228 iu->size = size;
229 iu->direction = direction;
230
231 return iu;
232
233out_free_buf:
234 kfree(iu->buf);
235out_free_iu:
236 kfree(iu);
237out:
238 return NULL;
239}
240
241static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
242{
243 if (!iu)
244 return;
245
05321937
GKH
246 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
247 iu->direction);
aef9ec39
RD
248 kfree(iu->buf);
249 kfree(iu);
250}
251
252static void srp_qp_event(struct ib_event *event, void *context)
253{
57363d98
SG
254 pr_debug("QP event %s (%d)\n",
255 ib_event_msg(event->event), event->event);
aef9ec39
RD
256}
257
258static int srp_init_qp(struct srp_target_port *target,
259 struct ib_qp *qp)
260{
261 struct ib_qp_attr *attr;
262 int ret;
263
264 attr = kmalloc(sizeof *attr, GFP_KERNEL);
265 if (!attr)
266 return -ENOMEM;
267
56b5390c
BVA
268 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
269 target->srp_host->port,
270 be16_to_cpu(target->pkey),
271 &attr->pkey_index);
aef9ec39
RD
272 if (ret)
273 goto out;
274
275 attr->qp_state = IB_QPS_INIT;
276 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277 IB_ACCESS_REMOTE_WRITE);
278 attr->port_num = target->srp_host->port;
279
280 ret = ib_modify_qp(qp, attr,
281 IB_QP_STATE |
282 IB_QP_PKEY_INDEX |
283 IB_QP_ACCESS_FLAGS |
284 IB_QP_PORT);
285
286out:
287 kfree(attr);
288 return ret;
289}
290
509c07bc 291static int srp_new_cm_id(struct srp_rdma_ch *ch)
9fe4bcf4 292{
509c07bc 293 struct srp_target_port *target = ch->target;
9fe4bcf4
DD
294 struct ib_cm_id *new_cm_id;
295
05321937 296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
509c07bc 297 srp_cm_handler, ch);
9fe4bcf4
DD
298 if (IS_ERR(new_cm_id))
299 return PTR_ERR(new_cm_id);
300
509c07bc
BVA
301 if (ch->cm_id)
302 ib_destroy_cm_id(ch->cm_id);
303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
9fe4bcf4
DD
308
309 return 0;
310}
311
d1b4289e
BVA
312static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
313{
314 struct srp_device *dev = target->srp_host->srp_dev;
315 struct ib_fmr_pool_param fmr_param;
316
317 memset(&fmr_param, 0, sizeof(fmr_param));
318 fmr_param.pool_size = target->scsi_host->can_queue;
319 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
320 fmr_param.cache = 1;
52ede08f
BVA
321 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322 fmr_param.page_shift = ilog2(dev->mr_page_size);
d1b4289e
BVA
323 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
324 IB_ACCESS_REMOTE_WRITE |
325 IB_ACCESS_REMOTE_READ);
326
327 return ib_create_fmr_pool(dev->pd, &fmr_param);
328}
329
5cfb1782
BVA
330/**
331 * srp_destroy_fr_pool() - free the resources owned by a pool
332 * @pool: Fast registration pool to be destroyed.
333 */
334static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
335{
336 int i;
337 struct srp_fr_desc *d;
338
339 if (!pool)
340 return;
341
342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
5cfb1782
BVA
343 if (d->mr)
344 ib_dereg_mr(d->mr);
345 }
346 kfree(pool);
347}
348
349/**
350 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
351 * @device: IB device to allocate fast registration descriptors for.
352 * @pd: Protection domain associated with the FR descriptors.
353 * @pool_size: Number of descriptors to allocate.
354 * @max_page_list_len: Maximum fast registration work request page list length.
355 */
356static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
357 struct ib_pd *pd, int pool_size,
358 int max_page_list_len)
359{
360 struct srp_fr_pool *pool;
361 struct srp_fr_desc *d;
362 struct ib_mr *mr;
5cfb1782
BVA
363 int i, ret = -EINVAL;
364
365 if (pool_size <= 0)
366 goto err;
367 ret = -ENOMEM;
368 pool = kzalloc(sizeof(struct srp_fr_pool) +
369 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
370 if (!pool)
371 goto err;
372 pool->size = pool_size;
373 pool->max_page_list_len = max_page_list_len;
374 spin_lock_init(&pool->lock);
375 INIT_LIST_HEAD(&pool->free_list);
376
377 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
563b67c5
SG
378 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
379 max_page_list_len);
5cfb1782
BVA
380 if (IS_ERR(mr)) {
381 ret = PTR_ERR(mr);
382 goto destroy_pool;
383 }
384 d->mr = mr;
5cfb1782
BVA
385 list_add_tail(&d->entry, &pool->free_list);
386 }
387
388out:
389 return pool;
390
391destroy_pool:
392 srp_destroy_fr_pool(pool);
393
394err:
395 pool = ERR_PTR(ret);
396 goto out;
397}
398
399/**
400 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
401 * @pool: Pool to obtain descriptor from.
402 */
403static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
404{
405 struct srp_fr_desc *d = NULL;
406 unsigned long flags;
407
408 spin_lock_irqsave(&pool->lock, flags);
409 if (!list_empty(&pool->free_list)) {
410 d = list_first_entry(&pool->free_list, typeof(*d), entry);
411 list_del(&d->entry);
412 }
413 spin_unlock_irqrestore(&pool->lock, flags);
414
415 return d;
416}
417
418/**
419 * srp_fr_pool_put() - put an FR descriptor back in the free list
420 * @pool: Pool the descriptor was allocated from.
421 * @desc: Pointer to an array of fast registration descriptor pointers.
422 * @n: Number of descriptors to put back.
423 *
424 * Note: The caller must already have queued an invalidation request for
425 * desc->mr->rkey before calling this function.
426 */
427static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
428 int n)
429{
430 unsigned long flags;
431 int i;
432
433 spin_lock_irqsave(&pool->lock, flags);
434 for (i = 0; i < n; i++)
435 list_add(&desc[i]->entry, &pool->free_list);
436 spin_unlock_irqrestore(&pool->lock, flags);
437}
438
439static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
440{
441 struct srp_device *dev = target->srp_host->srp_dev;
442
443 return srp_create_fr_pool(dev->dev, dev->pd,
444 target->scsi_host->can_queue,
445 dev->max_pages_per_mr);
446}
447
7dad6b2e
BVA
448/**
449 * srp_destroy_qp() - destroy an RDMA queue pair
450 * @ch: SRP RDMA channel.
451 *
452 * Change a queue pair into the error state and wait until all receive
453 * completions have been processed before destroying it. This avoids that
454 * the receive completion handler can access the queue pair while it is
455 * being destroyed.
456 */
457static void srp_destroy_qp(struct srp_rdma_ch *ch)
458{
7dad6b2e
BVA
459 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
460 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
461 struct ib_recv_wr *bad_wr;
462 int ret;
463
464 /* Destroying a QP and reusing ch->done is only safe if not connected */
c014c8cd 465 WARN_ON_ONCE(ch->connected);
7dad6b2e
BVA
466
467 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
468 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
469 if (ret)
470 goto out;
471
472 init_completion(&ch->done);
473 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
474 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
475 if (ret == 0)
476 wait_for_completion(&ch->done);
477
478out:
479 ib_destroy_qp(ch->qp);
480}
481
509c07bc 482static int srp_create_ch_ib(struct srp_rdma_ch *ch)
aef9ec39 483{
509c07bc 484 struct srp_target_port *target = ch->target;
62154b2e 485 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39 486 struct ib_qp_init_attr *init_attr;
73aa89ed
IR
487 struct ib_cq *recv_cq, *send_cq;
488 struct ib_qp *qp;
d1b4289e 489 struct ib_fmr_pool *fmr_pool = NULL;
5cfb1782
BVA
490 struct srp_fr_pool *fr_pool = NULL;
491 const int m = 1 + dev->use_fast_reg;
8e37210b 492 struct ib_cq_init_attr cq_attr = {};
aef9ec39
RD
493 int ret;
494
495 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
496 if (!init_attr)
497 return -ENOMEM;
498
7dad6b2e 499 /* + 1 for SRP_LAST_WR_ID */
8e37210b
MB
500 cq_attr.cqe = target->queue_size + 1;
501 cq_attr.comp_vector = ch->comp_vector;
509c07bc 502 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
8e37210b 503 &cq_attr);
73aa89ed
IR
504 if (IS_ERR(recv_cq)) {
505 ret = PTR_ERR(recv_cq);
da9d2f07 506 goto err;
aef9ec39
RD
507 }
508
8e37210b
MB
509 cq_attr.cqe = m * target->queue_size;
510 cq_attr.comp_vector = ch->comp_vector;
509c07bc 511 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
8e37210b 512 &cq_attr);
73aa89ed
IR
513 if (IS_ERR(send_cq)) {
514 ret = PTR_ERR(send_cq);
da9d2f07 515 goto err_recv_cq;
9c03dc9f
BVA
516 }
517
73aa89ed 518 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
aef9ec39
RD
519
520 init_attr->event_handler = srp_qp_event;
5cfb1782 521 init_attr->cap.max_send_wr = m * target->queue_size;
7dad6b2e 522 init_attr->cap.max_recv_wr = target->queue_size + 1;
aef9ec39
RD
523 init_attr->cap.max_recv_sge = 1;
524 init_attr->cap.max_send_sge = 1;
5cfb1782 525 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
aef9ec39 526 init_attr->qp_type = IB_QPT_RC;
73aa89ed
IR
527 init_attr->send_cq = send_cq;
528 init_attr->recv_cq = recv_cq;
aef9ec39 529
62154b2e 530 qp = ib_create_qp(dev->pd, init_attr);
73aa89ed
IR
531 if (IS_ERR(qp)) {
532 ret = PTR_ERR(qp);
da9d2f07 533 goto err_send_cq;
aef9ec39
RD
534 }
535
73aa89ed 536 ret = srp_init_qp(target, qp);
da9d2f07
RD
537 if (ret)
538 goto err_qp;
aef9ec39 539
002f1567 540 if (dev->use_fast_reg) {
5cfb1782
BVA
541 fr_pool = srp_alloc_fr_pool(target);
542 if (IS_ERR(fr_pool)) {
543 ret = PTR_ERR(fr_pool);
544 shost_printk(KERN_WARNING, target->scsi_host, PFX
545 "FR pool allocation failed (%d)\n", ret);
546 goto err_qp;
547 }
002f1567 548 } else if (dev->use_fmr) {
d1b4289e
BVA
549 fmr_pool = srp_alloc_fmr_pool(target);
550 if (IS_ERR(fmr_pool)) {
551 ret = PTR_ERR(fmr_pool);
552 shost_printk(KERN_WARNING, target->scsi_host, PFX
553 "FMR pool allocation failed (%d)\n", ret);
554 goto err_qp;
555 }
d1b4289e
BVA
556 }
557
509c07bc 558 if (ch->qp)
7dad6b2e 559 srp_destroy_qp(ch);
509c07bc
BVA
560 if (ch->recv_cq)
561 ib_destroy_cq(ch->recv_cq);
562 if (ch->send_cq)
563 ib_destroy_cq(ch->send_cq);
73aa89ed 564
509c07bc
BVA
565 ch->qp = qp;
566 ch->recv_cq = recv_cq;
567 ch->send_cq = send_cq;
73aa89ed 568
7fbc67df
SG
569 if (dev->use_fast_reg) {
570 if (ch->fr_pool)
571 srp_destroy_fr_pool(ch->fr_pool);
572 ch->fr_pool = fr_pool;
573 } else if (dev->use_fmr) {
574 if (ch->fmr_pool)
575 ib_destroy_fmr_pool(ch->fmr_pool);
576 ch->fmr_pool = fmr_pool;
577 }
578
da9d2f07
RD
579 kfree(init_attr);
580 return 0;
581
582err_qp:
73aa89ed 583 ib_destroy_qp(qp);
da9d2f07
RD
584
585err_send_cq:
73aa89ed 586 ib_destroy_cq(send_cq);
da9d2f07
RD
587
588err_recv_cq:
73aa89ed 589 ib_destroy_cq(recv_cq);
da9d2f07
RD
590
591err:
aef9ec39
RD
592 kfree(init_attr);
593 return ret;
594}
595
4d73f95f
BVA
596/*
597 * Note: this function may be called without srp_alloc_iu_bufs() having been
509c07bc 598 * invoked. Hence the ch->[rt]x_ring checks.
4d73f95f 599 */
509c07bc
BVA
600static void srp_free_ch_ib(struct srp_target_port *target,
601 struct srp_rdma_ch *ch)
aef9ec39 602{
5cfb1782 603 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39
RD
604 int i;
605
d92c0da7
BVA
606 if (!ch->target)
607 return;
608
509c07bc
BVA
609 if (ch->cm_id) {
610 ib_destroy_cm_id(ch->cm_id);
611 ch->cm_id = NULL;
394c595e
BVA
612 }
613
d92c0da7
BVA
614 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
615 if (!ch->qp)
616 return;
617
5cfb1782 618 if (dev->use_fast_reg) {
509c07bc
BVA
619 if (ch->fr_pool)
620 srp_destroy_fr_pool(ch->fr_pool);
002f1567 621 } else if (dev->use_fmr) {
509c07bc
BVA
622 if (ch->fmr_pool)
623 ib_destroy_fmr_pool(ch->fmr_pool);
5cfb1782 624 }
7dad6b2e 625 srp_destroy_qp(ch);
509c07bc
BVA
626 ib_destroy_cq(ch->send_cq);
627 ib_destroy_cq(ch->recv_cq);
aef9ec39 628
d92c0da7
BVA
629 /*
630 * Avoid that the SCSI error handler tries to use this channel after
631 * it has been freed. The SCSI error handler can namely continue
632 * trying to perform recovery actions after scsi_remove_host()
633 * returned.
634 */
635 ch->target = NULL;
636
509c07bc
BVA
637 ch->qp = NULL;
638 ch->send_cq = ch->recv_cq = NULL;
73aa89ed 639
509c07bc 640 if (ch->rx_ring) {
4d73f95f 641 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
642 srp_free_iu(target->srp_host, ch->rx_ring[i]);
643 kfree(ch->rx_ring);
644 ch->rx_ring = NULL;
4d73f95f 645 }
509c07bc 646 if (ch->tx_ring) {
4d73f95f 647 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
648 srp_free_iu(target->srp_host, ch->tx_ring[i]);
649 kfree(ch->tx_ring);
650 ch->tx_ring = NULL;
4d73f95f 651 }
aef9ec39
RD
652}
653
654static void srp_path_rec_completion(int status,
655 struct ib_sa_path_rec *pathrec,
509c07bc 656 void *ch_ptr)
aef9ec39 657{
509c07bc
BVA
658 struct srp_rdma_ch *ch = ch_ptr;
659 struct srp_target_port *target = ch->target;
aef9ec39 660
509c07bc 661 ch->status = status;
aef9ec39 662 if (status)
7aa54bd7
DD
663 shost_printk(KERN_ERR, target->scsi_host,
664 PFX "Got failed path rec status %d\n", status);
aef9ec39 665 else
509c07bc
BVA
666 ch->path = *pathrec;
667 complete(&ch->done);
aef9ec39
RD
668}
669
509c07bc 670static int srp_lookup_path(struct srp_rdma_ch *ch)
aef9ec39 671{
509c07bc 672 struct srp_target_port *target = ch->target;
a702adce
BVA
673 int ret;
674
509c07bc
BVA
675 ch->path.numb_path = 1;
676
677 init_completion(&ch->done);
678
679 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
680 target->srp_host->srp_dev->dev,
681 target->srp_host->port,
682 &ch->path,
683 IB_SA_PATH_REC_SERVICE_ID |
684 IB_SA_PATH_REC_DGID |
685 IB_SA_PATH_REC_SGID |
686 IB_SA_PATH_REC_NUMB_PATH |
687 IB_SA_PATH_REC_PKEY,
688 SRP_PATH_REC_TIMEOUT_MS,
689 GFP_KERNEL,
690 srp_path_rec_completion,
691 ch, &ch->path_query);
692 if (ch->path_query_id < 0)
693 return ch->path_query_id;
694
695 ret = wait_for_completion_interruptible(&ch->done);
a702adce
BVA
696 if (ret < 0)
697 return ret;
aef9ec39 698
509c07bc 699 if (ch->status < 0)
7aa54bd7
DD
700 shost_printk(KERN_WARNING, target->scsi_host,
701 PFX "Path record query failed\n");
aef9ec39 702
509c07bc 703 return ch->status;
aef9ec39
RD
704}
705
d92c0da7 706static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
aef9ec39 707{
509c07bc 708 struct srp_target_port *target = ch->target;
aef9ec39
RD
709 struct {
710 struct ib_cm_req_param param;
711 struct srp_login_req priv;
712 } *req = NULL;
713 int status;
714
715 req = kzalloc(sizeof *req, GFP_KERNEL);
716 if (!req)
717 return -ENOMEM;
718
509c07bc 719 req->param.primary_path = &ch->path;
aef9ec39
RD
720 req->param.alternate_path = NULL;
721 req->param.service_id = target->service_id;
509c07bc
BVA
722 req->param.qp_num = ch->qp->qp_num;
723 req->param.qp_type = ch->qp->qp_type;
aef9ec39
RD
724 req->param.private_data = &req->priv;
725 req->param.private_data_len = sizeof req->priv;
726 req->param.flow_control = 1;
727
728 get_random_bytes(&req->param.starting_psn, 4);
729 req->param.starting_psn &= 0xffffff;
730
731 /*
732 * Pick some arbitrary defaults here; we could make these
733 * module parameters if anyone cared about setting them.
734 */
735 req->param.responder_resources = 4;
736 req->param.remote_cm_response_timeout = 20;
737 req->param.local_cm_response_timeout = 20;
7bb312e4 738 req->param.retry_count = target->tl_retry_count;
aef9ec39
RD
739 req->param.rnr_retry_count = 7;
740 req->param.max_cm_retries = 15;
741
742 req->priv.opcode = SRP_LOGIN_REQ;
743 req->priv.tag = 0;
49248644 744 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
aef9ec39
RD
745 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
746 SRP_BUF_FORMAT_INDIRECT);
d92c0da7
BVA
747 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
748 SRP_MULTICHAN_SINGLE);
0c0450db 749 /*
3cd96564 750 * In the published SRP specification (draft rev. 16a), the
0c0450db
R
751 * port identifier format is 8 bytes of ID extension followed
752 * by 8 bytes of GUID. Older drafts put the two halves in the
753 * opposite order, so that the GUID comes first.
754 *
755 * Targets conforming to these obsolete drafts can be
756 * recognized by the I/O Class they report.
757 */
758 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
759 memcpy(req->priv.initiator_port_id,
747fe000 760 &target->sgid.global.interface_id, 8);
0c0450db 761 memcpy(req->priv.initiator_port_id + 8,
01cb9bcb 762 &target->initiator_ext, 8);
0c0450db
R
763 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
764 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
765 } else {
766 memcpy(req->priv.initiator_port_id,
01cb9bcb
IR
767 &target->initiator_ext, 8);
768 memcpy(req->priv.initiator_port_id + 8,
747fe000 769 &target->sgid.global.interface_id, 8);
0c0450db
R
770 memcpy(req->priv.target_port_id, &target->id_ext, 8);
771 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
772 }
773
aef9ec39
RD
774 /*
775 * Topspin/Cisco SRP targets will reject our login unless we
01cb9bcb
IR
776 * zero out the first 8 bytes of our initiator port ID and set
777 * the second 8 bytes to the local node GUID.
aef9ec39 778 */
5d7cbfd6 779 if (srp_target_is_topspin(target)) {
7aa54bd7
DD
780 shost_printk(KERN_DEBUG, target->scsi_host,
781 PFX "Topspin/Cisco initiator port ID workaround "
782 "activated for target GUID %016llx\n",
45c37cad 783 be64_to_cpu(target->ioc_guid));
aef9ec39 784 memset(req->priv.initiator_port_id, 0, 8);
01cb9bcb 785 memcpy(req->priv.initiator_port_id + 8,
05321937 786 &target->srp_host->srp_dev->dev->node_guid, 8);
aef9ec39 787 }
aef9ec39 788
509c07bc 789 status = ib_send_cm_req(ch->cm_id, &req->param);
aef9ec39
RD
790
791 kfree(req);
792
793 return status;
794}
795
ef6c49d8
BVA
796static bool srp_queue_remove_work(struct srp_target_port *target)
797{
798 bool changed = false;
799
800 spin_lock_irq(&target->lock);
801 if (target->state != SRP_TARGET_REMOVED) {
802 target->state = SRP_TARGET_REMOVED;
803 changed = true;
804 }
805 spin_unlock_irq(&target->lock);
806
807 if (changed)
bcc05910 808 queue_work(srp_remove_wq, &target->remove_work);
ef6c49d8
BVA
809
810 return changed;
811}
812
aef9ec39
RD
813static void srp_disconnect_target(struct srp_target_port *target)
814{
d92c0da7
BVA
815 struct srp_rdma_ch *ch;
816 int i;
509c07bc 817
c014c8cd 818 /* XXX should send SRP_I_LOGOUT request */
aef9ec39 819
c014c8cd
BVA
820 for (i = 0; i < target->ch_count; i++) {
821 ch = &target->ch[i];
822 ch->connected = false;
823 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
824 shost_printk(KERN_DEBUG, target->scsi_host,
825 PFX "Sending CM DREQ failed\n");
294c875a 826 }
e6581056 827 }
aef9ec39
RD
828}
829
509c07bc
BVA
830static void srp_free_req_data(struct srp_target_port *target,
831 struct srp_rdma_ch *ch)
8f26c9ff 832{
5cfb1782
BVA
833 struct srp_device *dev = target->srp_host->srp_dev;
834 struct ib_device *ibdev = dev->dev;
8f26c9ff
DD
835 struct srp_request *req;
836 int i;
837
47513cf4 838 if (!ch->req_ring)
4d73f95f
BVA
839 return;
840
841 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 842 req = &ch->req_ring[i];
5cfb1782
BVA
843 if (dev->use_fast_reg)
844 kfree(req->fr_list);
845 else
846 kfree(req->fmr_list);
8f26c9ff 847 kfree(req->map_page);
c07d424d
DD
848 if (req->indirect_dma_addr) {
849 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
850 target->indirect_size,
851 DMA_TO_DEVICE);
852 }
853 kfree(req->indirect_desc);
8f26c9ff 854 }
4d73f95f 855
509c07bc
BVA
856 kfree(ch->req_ring);
857 ch->req_ring = NULL;
8f26c9ff
DD
858}
859
509c07bc 860static int srp_alloc_req_data(struct srp_rdma_ch *ch)
b81d00bd 861{
509c07bc 862 struct srp_target_port *target = ch->target;
b81d00bd
BVA
863 struct srp_device *srp_dev = target->srp_host->srp_dev;
864 struct ib_device *ibdev = srp_dev->dev;
865 struct srp_request *req;
5cfb1782 866 void *mr_list;
b81d00bd
BVA
867 dma_addr_t dma_addr;
868 int i, ret = -ENOMEM;
869
509c07bc
BVA
870 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
871 GFP_KERNEL);
872 if (!ch->req_ring)
4d73f95f
BVA
873 goto out;
874
875 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 876 req = &ch->req_ring[i];
5cfb1782
BVA
877 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
878 GFP_KERNEL);
879 if (!mr_list)
880 goto out;
881 if (srp_dev->use_fast_reg)
882 req->fr_list = mr_list;
883 else
884 req->fmr_list = mr_list;
52ede08f 885 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
d1b4289e 886 sizeof(void *), GFP_KERNEL);
5cfb1782
BVA
887 if (!req->map_page)
888 goto out;
b81d00bd 889 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
5cfb1782 890 if (!req->indirect_desc)
b81d00bd
BVA
891 goto out;
892
893 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
894 target->indirect_size,
895 DMA_TO_DEVICE);
896 if (ib_dma_mapping_error(ibdev, dma_addr))
897 goto out;
898
899 req->indirect_dma_addr = dma_addr;
b81d00bd
BVA
900 }
901 ret = 0;
902
903out:
904 return ret;
905}
906
683b159a
BVA
907/**
908 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
909 * @shost: SCSI host whose attributes to remove from sysfs.
910 *
911 * Note: Any attributes defined in the host template and that did not exist
912 * before invocation of this function will be ignored.
913 */
914static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
915{
916 struct device_attribute **attr;
917
918 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
919 device_remove_file(&shost->shost_dev, *attr);
920}
921
ee12d6a8
BVA
922static void srp_remove_target(struct srp_target_port *target)
923{
d92c0da7
BVA
924 struct srp_rdma_ch *ch;
925 int i;
509c07bc 926
ef6c49d8
BVA
927 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
928
ee12d6a8 929 srp_del_scsi_host_attr(target->scsi_host);
9dd69a60 930 srp_rport_get(target->rport);
ee12d6a8
BVA
931 srp_remove_host(target->scsi_host);
932 scsi_remove_host(target->scsi_host);
93079162 933 srp_stop_rport_timers(target->rport);
ef6c49d8 934 srp_disconnect_target(target);
d92c0da7
BVA
935 for (i = 0; i < target->ch_count; i++) {
936 ch = &target->ch[i];
937 srp_free_ch_ib(target, ch);
938 }
c1120f89 939 cancel_work_sync(&target->tl_err_work);
9dd69a60 940 srp_rport_put(target->rport);
d92c0da7
BVA
941 for (i = 0; i < target->ch_count; i++) {
942 ch = &target->ch[i];
943 srp_free_req_data(target, ch);
944 }
945 kfree(target->ch);
946 target->ch = NULL;
65d7dd2f
VP
947
948 spin_lock(&target->srp_host->target_lock);
949 list_del(&target->list);
950 spin_unlock(&target->srp_host->target_lock);
951
ee12d6a8
BVA
952 scsi_host_put(target->scsi_host);
953}
954
c4028958 955static void srp_remove_work(struct work_struct *work)
aef9ec39 956{
c4028958 957 struct srp_target_port *target =
ef6c49d8 958 container_of(work, struct srp_target_port, remove_work);
aef9ec39 959
ef6c49d8 960 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
aef9ec39 961
96fc248a 962 srp_remove_target(target);
aef9ec39
RD
963}
964
dc1bdbd9
BVA
965static void srp_rport_delete(struct srp_rport *rport)
966{
967 struct srp_target_port *target = rport->lld_data;
968
969 srp_queue_remove_work(target);
970}
971
c014c8cd
BVA
972/**
973 * srp_connected_ch() - number of connected channels
974 * @target: SRP target port.
975 */
976static int srp_connected_ch(struct srp_target_port *target)
977{
978 int i, c = 0;
979
980 for (i = 0; i < target->ch_count; i++)
981 c += target->ch[i].connected;
982
983 return c;
984}
985
d92c0da7 986static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
aef9ec39 987{
509c07bc 988 struct srp_target_port *target = ch->target;
aef9ec39
RD
989 int ret;
990
c014c8cd 991 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
294c875a 992
509c07bc 993 ret = srp_lookup_path(ch);
aef9ec39
RD
994 if (ret)
995 return ret;
996
997 while (1) {
509c07bc 998 init_completion(&ch->done);
d92c0da7 999 ret = srp_send_req(ch, multich);
aef9ec39
RD
1000 if (ret)
1001 return ret;
509c07bc 1002 ret = wait_for_completion_interruptible(&ch->done);
a702adce
BVA
1003 if (ret < 0)
1004 return ret;
aef9ec39
RD
1005
1006 /*
1007 * The CM event handling code will set status to
1008 * SRP_PORT_REDIRECT if we get a port redirect REJ
1009 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1010 * redirect REJ back.
1011 */
509c07bc 1012 switch (ch->status) {
aef9ec39 1013 case 0:
c014c8cd 1014 ch->connected = true;
aef9ec39
RD
1015 return 0;
1016
1017 case SRP_PORT_REDIRECT:
509c07bc 1018 ret = srp_lookup_path(ch);
aef9ec39
RD
1019 if (ret)
1020 return ret;
1021 break;
1022
1023 case SRP_DLID_REDIRECT:
1024 break;
1025
9fe4bcf4 1026 case SRP_STALE_CONN:
9fe4bcf4 1027 shost_printk(KERN_ERR, target->scsi_host, PFX
205619f2 1028 "giving up on stale connection\n");
509c07bc
BVA
1029 ch->status = -ECONNRESET;
1030 return ch->status;
9fe4bcf4 1031
aef9ec39 1032 default:
509c07bc 1033 return ch->status;
aef9ec39
RD
1034 }
1035 }
1036}
1037
509c07bc 1038static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
5cfb1782
BVA
1039{
1040 struct ib_send_wr *bad_wr;
1041 struct ib_send_wr wr = {
1042 .opcode = IB_WR_LOCAL_INV,
1043 .wr_id = LOCAL_INV_WR_ID_MASK,
1044 .next = NULL,
1045 .num_sge = 0,
1046 .send_flags = 0,
1047 .ex.invalidate_rkey = rkey,
1048 };
1049
509c07bc 1050 return ib_post_send(ch->qp, &wr, &bad_wr);
5cfb1782
BVA
1051}
1052
d945e1df 1053static void srp_unmap_data(struct scsi_cmnd *scmnd,
509c07bc 1054 struct srp_rdma_ch *ch,
d945e1df
RD
1055 struct srp_request *req)
1056{
509c07bc 1057 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1058 struct srp_device *dev = target->srp_host->srp_dev;
1059 struct ib_device *ibdev = dev->dev;
1060 int i, res;
8f26c9ff 1061
bb350d1d 1062 if (!scsi_sglist(scmnd) ||
d945e1df
RD
1063 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1064 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1065 return;
1066
5cfb1782
BVA
1067 if (dev->use_fast_reg) {
1068 struct srp_fr_desc **pfr;
1069
1070 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
509c07bc 1071 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
5cfb1782
BVA
1072 if (res < 0) {
1073 shost_printk(KERN_ERR, target->scsi_host, PFX
1074 "Queueing INV WR for rkey %#x failed (%d)\n",
1075 (*pfr)->mr->rkey, res);
1076 queue_work(system_long_wq,
1077 &target->tl_err_work);
1078 }
1079 }
1080 if (req->nmdesc)
509c07bc 1081 srp_fr_pool_put(ch->fr_pool, req->fr_list,
5cfb1782 1082 req->nmdesc);
002f1567 1083 } else if (dev->use_fmr) {
5cfb1782
BVA
1084 struct ib_pool_fmr **pfmr;
1085
1086 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1087 ib_fmr_pool_unmap(*pfmr);
1088 }
f5358a17 1089
8f26c9ff
DD
1090 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1091 scmnd->sc_data_direction);
d945e1df
RD
1092}
1093
22032991
BVA
1094/**
1095 * srp_claim_req - Take ownership of the scmnd associated with a request.
509c07bc 1096 * @ch: SRP RDMA channel.
22032991 1097 * @req: SRP request.
b3fe628d 1098 * @sdev: If not NULL, only take ownership for this SCSI device.
22032991
BVA
1099 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1100 * ownership of @req->scmnd if it equals @scmnd.
1101 *
1102 * Return value:
1103 * Either NULL or a pointer to the SCSI command the caller became owner of.
1104 */
509c07bc 1105static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
22032991 1106 struct srp_request *req,
b3fe628d 1107 struct scsi_device *sdev,
22032991
BVA
1108 struct scsi_cmnd *scmnd)
1109{
1110 unsigned long flags;
1111
509c07bc 1112 spin_lock_irqsave(&ch->lock, flags);
b3fe628d
BVA
1113 if (req->scmnd &&
1114 (!sdev || req->scmnd->device == sdev) &&
1115 (!scmnd || req->scmnd == scmnd)) {
22032991
BVA
1116 scmnd = req->scmnd;
1117 req->scmnd = NULL;
22032991
BVA
1118 } else {
1119 scmnd = NULL;
1120 }
509c07bc 1121 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1122
1123 return scmnd;
1124}
1125
1126/**
1127 * srp_free_req() - Unmap data and add request to the free request list.
509c07bc 1128 * @ch: SRP RDMA channel.
af24663b
BVA
1129 * @req: Request to be freed.
1130 * @scmnd: SCSI command associated with @req.
1131 * @req_lim_delta: Amount to be added to @target->req_lim.
22032991 1132 */
509c07bc
BVA
1133static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1134 struct scsi_cmnd *scmnd, s32 req_lim_delta)
526b4caa 1135{
94a9174c
BVA
1136 unsigned long flags;
1137
509c07bc 1138 srp_unmap_data(scmnd, ch, req);
22032991 1139
509c07bc
BVA
1140 spin_lock_irqsave(&ch->lock, flags);
1141 ch->req_lim += req_lim_delta;
509c07bc 1142 spin_unlock_irqrestore(&ch->lock, flags);
526b4caa
IR
1143}
1144
509c07bc
BVA
1145static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1146 struct scsi_device *sdev, int result)
526b4caa 1147{
509c07bc 1148 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
22032991
BVA
1149
1150 if (scmnd) {
509c07bc 1151 srp_free_req(ch, req, scmnd, 0);
ed9b2264 1152 scmnd->result = result;
22032991 1153 scmnd->scsi_done(scmnd);
22032991 1154 }
526b4caa
IR
1155}
1156
ed9b2264 1157static void srp_terminate_io(struct srp_rport *rport)
aef9ec39 1158{
ed9b2264 1159 struct srp_target_port *target = rport->lld_data;
d92c0da7 1160 struct srp_rdma_ch *ch;
b3fe628d
BVA
1161 struct Scsi_Host *shost = target->scsi_host;
1162 struct scsi_device *sdev;
d92c0da7 1163 int i, j;
ed9b2264 1164
b3fe628d
BVA
1165 /*
1166 * Invoking srp_terminate_io() while srp_queuecommand() is running
1167 * is not safe. Hence the warning statement below.
1168 */
1169 shost_for_each_device(sdev, shost)
1170 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1171
d92c0da7
BVA
1172 for (i = 0; i < target->ch_count; i++) {
1173 ch = &target->ch[i];
509c07bc 1174
d92c0da7
BVA
1175 for (j = 0; j < target->req_ring_size; ++j) {
1176 struct srp_request *req = &ch->req_ring[j];
1177
1178 srp_finish_req(ch, req, NULL,
1179 DID_TRANSPORT_FAILFAST << 16);
1180 }
ed9b2264
BVA
1181 }
1182}
aef9ec39 1183
ed9b2264
BVA
1184/*
1185 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1186 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1187 * srp_reset_device() or srp_reset_host() calls will occur while this function
1188 * is in progress. One way to realize that is not to call this function
1189 * directly but to call srp_reconnect_rport() instead since that last function
1190 * serializes calls of this function via rport->mutex and also blocks
1191 * srp_queuecommand() calls before invoking this function.
1192 */
1193static int srp_rport_reconnect(struct srp_rport *rport)
1194{
1195 struct srp_target_port *target = rport->lld_data;
d92c0da7
BVA
1196 struct srp_rdma_ch *ch;
1197 int i, j, ret = 0;
1198 bool multich = false;
09be70a2 1199
aef9ec39 1200 srp_disconnect_target(target);
34aa654e
BVA
1201
1202 if (target->state == SRP_TARGET_SCANNING)
1203 return -ENODEV;
1204
aef9ec39 1205 /*
c7c4e7ff
BVA
1206 * Now get a new local CM ID so that we avoid confusing the target in
1207 * case things are really fouled up. Doing so also ensures that all CM
1208 * callbacks will have finished before a new QP is allocated.
aef9ec39 1209 */
d92c0da7
BVA
1210 for (i = 0; i < target->ch_count; i++) {
1211 ch = &target->ch[i];
d92c0da7 1212 ret += srp_new_cm_id(ch);
536ae14e 1213 }
d92c0da7
BVA
1214 for (i = 0; i < target->ch_count; i++) {
1215 ch = &target->ch[i];
d92c0da7
BVA
1216 for (j = 0; j < target->req_ring_size; ++j) {
1217 struct srp_request *req = &ch->req_ring[j];
aef9ec39 1218
d92c0da7
BVA
1219 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1220 }
1221 }
1222 for (i = 0; i < target->ch_count; i++) {
1223 ch = &target->ch[i];
d92c0da7
BVA
1224 /*
1225 * Whether or not creating a new CM ID succeeded, create a new
1226 * QP. This guarantees that all completion callback function
1227 * invocations have finished before request resetting starts.
1228 */
1229 ret += srp_create_ch_ib(ch);
aef9ec39 1230
d92c0da7
BVA
1231 INIT_LIST_HEAD(&ch->free_tx);
1232 for (j = 0; j < target->queue_size; ++j)
1233 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1234 }
8de9fe3a
BVA
1235
1236 target->qp_in_error = false;
1237
d92c0da7
BVA
1238 for (i = 0; i < target->ch_count; i++) {
1239 ch = &target->ch[i];
bbac5ccf 1240 if (ret)
d92c0da7 1241 break;
d92c0da7
BVA
1242 ret = srp_connect_ch(ch, multich);
1243 multich = true;
1244 }
09be70a2 1245
ed9b2264
BVA
1246 if (ret == 0)
1247 shost_printk(KERN_INFO, target->scsi_host,
1248 PFX "reconnect succeeded\n");
aef9ec39
RD
1249
1250 return ret;
1251}
1252
8f26c9ff
DD
1253static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1254 unsigned int dma_len, u32 rkey)
f5358a17 1255{
8f26c9ff 1256 struct srp_direct_buf *desc = state->desc;
f5358a17 1257
3ae95da8
BVA
1258 WARN_ON_ONCE(!dma_len);
1259
8f26c9ff
DD
1260 desc->va = cpu_to_be64(dma_addr);
1261 desc->key = cpu_to_be32(rkey);
1262 desc->len = cpu_to_be32(dma_len);
f5358a17 1263
8f26c9ff
DD
1264 state->total_len += dma_len;
1265 state->desc++;
1266 state->ndesc++;
1267}
559ce8f1 1268
8f26c9ff 1269static int srp_map_finish_fmr(struct srp_map_state *state,
509c07bc 1270 struct srp_rdma_ch *ch)
8f26c9ff 1271{
186fbc66
BVA
1272 struct srp_target_port *target = ch->target;
1273 struct srp_device *dev = target->srp_host->srp_dev;
8f26c9ff
DD
1274 struct ib_pool_fmr *fmr;
1275 u64 io_addr = 0;
85507bcc 1276
f731ed62
BVA
1277 if (state->fmr.next >= state->fmr.end)
1278 return -ENOMEM;
1279
26630e8a
SG
1280 WARN_ON_ONCE(!dev->use_fmr);
1281
1282 if (state->npages == 0)
1283 return 0;
1284
1285 if (state->npages == 1 && target->global_mr) {
1286 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1287 target->global_mr->rkey);
1288 goto reset_state;
1289 }
1290
509c07bc 1291 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
8f26c9ff
DD
1292 state->npages, io_addr);
1293 if (IS_ERR(fmr))
1294 return PTR_ERR(fmr);
f5358a17 1295
f731ed62 1296 *state->fmr.next++ = fmr;
52ede08f 1297 state->nmdesc++;
f5358a17 1298
186fbc66
BVA
1299 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1300 state->dma_len, fmr->fmr->rkey);
539dde6f 1301
26630e8a
SG
1302reset_state:
1303 state->npages = 0;
1304 state->dma_len = 0;
1305
8f26c9ff
DD
1306 return 0;
1307}
1308
5cfb1782 1309static int srp_map_finish_fr(struct srp_map_state *state,
509c07bc 1310 struct srp_rdma_ch *ch)
5cfb1782 1311{
509c07bc 1312 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1313 struct srp_device *dev = target->srp_host->srp_dev;
1314 struct ib_send_wr *bad_wr;
f7f7aab1 1315 struct ib_reg_wr wr;
5cfb1782
BVA
1316 struct srp_fr_desc *desc;
1317 u32 rkey;
f7f7aab1 1318 int n, err;
5cfb1782 1319
f731ed62
BVA
1320 if (state->fr.next >= state->fr.end)
1321 return -ENOMEM;
1322
26630e8a
SG
1323 WARN_ON_ONCE(!dev->use_fast_reg);
1324
f7f7aab1 1325 if (state->sg_nents == 0)
26630e8a
SG
1326 return 0;
1327
f7f7aab1
SG
1328 if (state->sg_nents == 1 && target->global_mr) {
1329 srp_map_desc(state, sg_dma_address(state->sg),
1330 sg_dma_len(state->sg),
26630e8a 1331 target->global_mr->rkey);
f7f7aab1 1332 return 1;
26630e8a
SG
1333 }
1334
509c07bc 1335 desc = srp_fr_pool_get(ch->fr_pool);
5cfb1782
BVA
1336 if (!desc)
1337 return -ENOMEM;
1338
1339 rkey = ib_inc_rkey(desc->mr->rkey);
1340 ib_update_fast_reg_key(desc->mr, rkey);
1341
f7f7aab1
SG
1342 n = ib_map_mr_sg(desc->mr, state->sg, state->sg_nents,
1343 dev->mr_page_size);
1344 if (unlikely(n < 0))
1345 return n;
5cfb1782 1346
f7f7aab1
SG
1347 wr.wr.next = NULL;
1348 wr.wr.opcode = IB_WR_REG_MR;
e622f2f4 1349 wr.wr.wr_id = FAST_REG_WR_ID_MASK;
f7f7aab1
SG
1350 wr.wr.num_sge = 0;
1351 wr.wr.send_flags = 0;
1352 wr.mr = desc->mr;
1353 wr.key = desc->mr->rkey;
1354 wr.access = (IB_ACCESS_LOCAL_WRITE |
1355 IB_ACCESS_REMOTE_READ |
1356 IB_ACCESS_REMOTE_WRITE);
5cfb1782 1357
f731ed62 1358 *state->fr.next++ = desc;
5cfb1782
BVA
1359 state->nmdesc++;
1360
f7f7aab1
SG
1361 srp_map_desc(state, desc->mr->iova,
1362 desc->mr->length, desc->mr->rkey);
5cfb1782 1363
26630e8a 1364 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
f7f7aab1 1365 if (unlikely(err))
26630e8a
SG
1366 return err;
1367
f7f7aab1 1368 return n;
5cfb1782
BVA
1369}
1370
539dde6f 1371static int srp_finish_mapping(struct srp_map_state *state,
509c07bc 1372 struct srp_rdma_ch *ch)
539dde6f 1373{
509c07bc 1374 struct srp_target_port *target = ch->target;
002f1567 1375 struct srp_device *dev = target->srp_host->srp_dev;
539dde6f 1376
26630e8a
SG
1377 return dev->use_fast_reg ? srp_map_finish_fr(state, ch) :
1378 srp_map_finish_fmr(state, ch);
539dde6f
BVA
1379}
1380
8f26c9ff 1381static int srp_map_sg_entry(struct srp_map_state *state,
509c07bc 1382 struct srp_rdma_ch *ch,
3ae95da8 1383 struct scatterlist *sg, int sg_index)
8f26c9ff 1384{
509c07bc 1385 struct srp_target_port *target = ch->target;
8f26c9ff
DD
1386 struct srp_device *dev = target->srp_host->srp_dev;
1387 struct ib_device *ibdev = dev->dev;
1388 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1389 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
3ae95da8 1390 unsigned int len = 0;
8f26c9ff
DD
1391 int ret;
1392
3ae95da8 1393 WARN_ON_ONCE(!dma_len);
f5358a17 1394
8f26c9ff 1395 while (dma_len) {
5cfb1782
BVA
1396 unsigned offset = dma_addr & ~dev->mr_page_mask;
1397 if (state->npages == dev->max_pages_per_mr || offset != 0) {
f7f7aab1 1398 ret = srp_map_finish_fmr(state, ch);
8f26c9ff
DD
1399 if (ret)
1400 return ret;
8f26c9ff
DD
1401 }
1402
5cfb1782 1403 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
f5358a17 1404
8f26c9ff
DD
1405 if (!state->npages)
1406 state->base_dma_addr = dma_addr;
5cfb1782 1407 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
52ede08f 1408 state->dma_len += len;
8f26c9ff
DD
1409 dma_addr += len;
1410 dma_len -= len;
1411 }
1412
5cfb1782
BVA
1413 /*
1414 * If the last entry of the MR wasn't a full page, then we need to
8f26c9ff
DD
1415 * close it out and start a new one -- we can only merge at page
1416 * boundries.
1417 */
1418 ret = 0;
0e0d3a48 1419 if (len != dev->mr_page_size)
f7f7aab1 1420 ret = srp_map_finish_fmr(state, ch);
f5358a17
RD
1421 return ret;
1422}
1423
26630e8a
SG
1424static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1425 struct srp_request *req, struct scatterlist *scat,
1426 int count)
76bc1e1d 1427{
76bc1e1d 1428 struct scatterlist *sg;
0e0d3a48 1429 int i, ret;
76bc1e1d 1430
26630e8a
SG
1431 state->desc = req->indirect_desc;
1432 state->pages = req->map_page;
1433 state->fmr.next = req->fmr_list;
1434 state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
1435
1436 for_each_sg(scat, sg, count, i) {
1437 ret = srp_map_sg_entry(state, ch, sg, i);
1438 if (ret)
1439 return ret;
5cfb1782 1440 }
76bc1e1d 1441
f7f7aab1 1442 ret = srp_map_finish_fmr(state, ch);
26630e8a
SG
1443 if (ret)
1444 return ret;
1445
1446 req->nmdesc = state->nmdesc;
1447
1448 return 0;
1449}
1450
1451static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1452 struct srp_request *req, struct scatterlist *scat,
1453 int count)
1454{
26630e8a 1455 state->desc = req->indirect_desc;
f7f7aab1
SG
1456 state->fr.next = req->fr_list;
1457 state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1458 state->sg = scat;
1459 state->sg_nents = scsi_sg_count(req->scmnd);
26630e8a 1460
f7f7aab1
SG
1461 while (state->sg_nents) {
1462 int i, n;
26630e8a 1463
f7f7aab1
SG
1464 n = srp_map_finish_fr(state, ch);
1465 if (unlikely(n < 0))
1466 return n;
1467
1468 state->sg_nents -= n;
1469 for (i = 0; i < n; i++)
1470 state->sg = sg_next(state->sg);
1471 }
26630e8a
SG
1472
1473 req->nmdesc = state->nmdesc;
1474
1475 return 0;
1476}
1477
1478static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1479 struct srp_request *req, struct scatterlist *scat,
1480 int count)
1481{
1482 struct srp_target_port *target = ch->target;
1483 struct srp_device *dev = target->srp_host->srp_dev;
1484 struct scatterlist *sg;
1485 int i;
1486
1487 state->desc = req->indirect_desc;
1488 for_each_sg(scat, sg, count, i) {
1489 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1490 ib_sg_dma_len(dev->dev, sg),
1491 target->global_mr->rkey);
0e0d3a48 1492 }
76bc1e1d 1493
52ede08f 1494 req->nmdesc = state->nmdesc;
5cfb1782 1495
26630e8a 1496 return 0;
76bc1e1d
BVA
1497}
1498
330179f2
BVA
1499/*
1500 * Register the indirect data buffer descriptor with the HCA.
1501 *
1502 * Note: since the indirect data buffer descriptor has been allocated with
1503 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1504 * memory buffer.
1505 */
1506static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1507 void **next_mr, void **end_mr, u32 idb_len,
1508 __be32 *idb_rkey)
1509{
1510 struct srp_target_port *target = ch->target;
1511 struct srp_device *dev = target->srp_host->srp_dev;
1512 struct srp_map_state state;
1513 struct srp_direct_buf idb_desc;
1514 u64 idb_pages[1];
f7f7aab1 1515 struct scatterlist idb_sg[1];
330179f2
BVA
1516 int ret;
1517
1518 memset(&state, 0, sizeof(state));
1519 memset(&idb_desc, 0, sizeof(idb_desc));
1520 state.gen.next = next_mr;
1521 state.gen.end = end_mr;
1522 state.desc = &idb_desc;
330179f2
BVA
1523 state.base_dma_addr = req->indirect_dma_addr;
1524 state.dma_len = idb_len;
f7f7aab1
SG
1525
1526 if (dev->use_fast_reg) {
1527 state.sg = idb_sg;
1528 state.sg_nents = 1;
1529 sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1530 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1531 ret = srp_map_finish_fr(&state, ch);
1532 if (ret < 0)
1533 return ret;
1534 } else if (dev->use_fmr) {
1535 state.pages = idb_pages;
1536 state.pages[0] = (req->indirect_dma_addr &
1537 dev->mr_page_mask);
1538 state.npages = 1;
1539 ret = srp_map_finish_fmr(&state, ch);
1540 if (ret < 0)
1541 return ret;
1542 } else {
1543 return -EINVAL;
1544 }
330179f2
BVA
1545
1546 *idb_rkey = idb_desc.key;
1547
f7f7aab1 1548 return 0;
330179f2
BVA
1549}
1550
509c07bc 1551static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
aef9ec39
RD
1552 struct srp_request *req)
1553{
509c07bc 1554 struct srp_target_port *target = ch->target;
76bc1e1d 1555 struct scatterlist *scat;
aef9ec39 1556 struct srp_cmd *cmd = req->cmd->buf;
330179f2 1557 int len, nents, count, ret;
85507bcc
RC
1558 struct srp_device *dev;
1559 struct ib_device *ibdev;
8f26c9ff
DD
1560 struct srp_map_state state;
1561 struct srp_indirect_buf *indirect_hdr;
330179f2
BVA
1562 u32 idb_len, table_len;
1563 __be32 idb_rkey;
8f26c9ff 1564 u8 fmt;
aef9ec39 1565
bb350d1d 1566 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
aef9ec39
RD
1567 return sizeof (struct srp_cmd);
1568
1569 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1570 scmnd->sc_data_direction != DMA_TO_DEVICE) {
7aa54bd7
DD
1571 shost_printk(KERN_WARNING, target->scsi_host,
1572 PFX "Unhandled data direction %d\n",
1573 scmnd->sc_data_direction);
aef9ec39
RD
1574 return -EINVAL;
1575 }
1576
bb350d1d
FT
1577 nents = scsi_sg_count(scmnd);
1578 scat = scsi_sglist(scmnd);
aef9ec39 1579
05321937 1580 dev = target->srp_host->srp_dev;
85507bcc
RC
1581 ibdev = dev->dev;
1582
1583 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
8f26c9ff
DD
1584 if (unlikely(count == 0))
1585 return -EIO;
f5358a17
RD
1586
1587 fmt = SRP_DATA_DESC_DIRECT;
1588 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
aef9ec39 1589
03f6fb93 1590 if (count == 1 && target->global_mr) {
f5358a17
RD
1591 /*
1592 * The midlayer only generated a single gather/scatter
1593 * entry, or DMA mapping coalesced everything to a
1594 * single entry. So a direct descriptor along with
1595 * the DMA MR suffices.
1596 */
cf368713 1597 struct srp_direct_buf *buf = (void *) cmd->add_data;
aef9ec39 1598
85507bcc 1599 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
03f6fb93 1600 buf->key = cpu_to_be32(target->global_mr->rkey);
85507bcc 1601 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
8f26c9ff 1602
52ede08f 1603 req->nmdesc = 0;
8f26c9ff
DD
1604 goto map_complete;
1605 }
1606
5cfb1782
BVA
1607 /*
1608 * We have more than one scatter/gather entry, so build our indirect
1609 * descriptor table, trying to merge as many entries as we can.
8f26c9ff
DD
1610 */
1611 indirect_hdr = (void *) cmd->add_data;
1612
c07d424d
DD
1613 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1614 target->indirect_size, DMA_TO_DEVICE);
1615
8f26c9ff 1616 memset(&state, 0, sizeof(state));
26630e8a
SG
1617 if (dev->use_fast_reg)
1618 srp_map_sg_fr(&state, ch, req, scat, count);
1619 else if (dev->use_fmr)
1620 srp_map_sg_fmr(&state, ch, req, scat, count);
1621 else
1622 srp_map_sg_dma(&state, ch, req, scat, count);
cf368713 1623
c07d424d
DD
1624 /* We've mapped the request, now pull as much of the indirect
1625 * descriptor table as we can into the command buffer. If this
1626 * target is not using an external indirect table, we are
1627 * guaranteed to fit into the command, as the SCSI layer won't
1628 * give us more S/G entries than we allow.
8f26c9ff 1629 */
8f26c9ff 1630 if (state.ndesc == 1) {
5cfb1782
BVA
1631 /*
1632 * Memory registration collapsed the sg-list into one entry,
8f26c9ff
DD
1633 * so use a direct descriptor.
1634 */
1635 struct srp_direct_buf *buf = (void *) cmd->add_data;
cf368713 1636
c07d424d 1637 *buf = req->indirect_desc[0];
8f26c9ff 1638 goto map_complete;
aef9ec39
RD
1639 }
1640
c07d424d
DD
1641 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1642 !target->allow_ext_sg)) {
1643 shost_printk(KERN_ERR, target->scsi_host,
1644 "Could not fit S/G list into SRP_CMD\n");
1645 return -EIO;
1646 }
1647
1648 count = min(state.ndesc, target->cmd_sg_cnt);
8f26c9ff 1649 table_len = state.ndesc * sizeof (struct srp_direct_buf);
330179f2 1650 idb_len = sizeof(struct srp_indirect_buf) + table_len;
8f26c9ff
DD
1651
1652 fmt = SRP_DATA_DESC_INDIRECT;
1653 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
c07d424d 1654 len += count * sizeof (struct srp_direct_buf);
8f26c9ff 1655
c07d424d
DD
1656 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1657 count * sizeof (struct srp_direct_buf));
8f26c9ff 1658
03f6fb93 1659 if (!target->global_mr) {
330179f2
BVA
1660 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1661 idb_len, &idb_rkey);
1662 if (ret < 0)
1663 return ret;
1664 req->nmdesc++;
1665 } else {
03f6fb93 1666 idb_rkey = target->global_mr->rkey;
330179f2
BVA
1667 }
1668
c07d424d 1669 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
330179f2 1670 indirect_hdr->table_desc.key = idb_rkey;
8f26c9ff
DD
1671 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1672 indirect_hdr->len = cpu_to_be32(state.total_len);
1673
1674 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
c07d424d 1675 cmd->data_out_desc_cnt = count;
8f26c9ff 1676 else
c07d424d
DD
1677 cmd->data_in_desc_cnt = count;
1678
1679 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1680 DMA_TO_DEVICE);
8f26c9ff
DD
1681
1682map_complete:
aef9ec39
RD
1683 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1684 cmd->buf_fmt = fmt << 4;
1685 else
1686 cmd->buf_fmt = fmt;
1687
aef9ec39
RD
1688 return len;
1689}
1690
76c75b25
BVA
1691/*
1692 * Return an IU and possible credit to the free pool
1693 */
509c07bc 1694static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
76c75b25
BVA
1695 enum srp_iu_type iu_type)
1696{
1697 unsigned long flags;
1698
509c07bc
BVA
1699 spin_lock_irqsave(&ch->lock, flags);
1700 list_add(&iu->list, &ch->free_tx);
76c75b25 1701 if (iu_type != SRP_IU_RSP)
509c07bc
BVA
1702 ++ch->req_lim;
1703 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25
BVA
1704}
1705
05a1d750 1706/*
509c07bc 1707 * Must be called with ch->lock held to protect req_lim and free_tx.
e9684678 1708 * If IU is not sent, it must be returned using srp_put_tx_iu().
05a1d750
DD
1709 *
1710 * Note:
1711 * An upper limit for the number of allocated information units for each
1712 * request type is:
1713 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1714 * more than Scsi_Host.can_queue requests.
1715 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1716 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1717 * one unanswered SRP request to an initiator.
1718 */
509c07bc 1719static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
05a1d750
DD
1720 enum srp_iu_type iu_type)
1721{
509c07bc 1722 struct srp_target_port *target = ch->target;
05a1d750
DD
1723 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1724 struct srp_iu *iu;
1725
509c07bc 1726 srp_send_completion(ch->send_cq, ch);
05a1d750 1727
509c07bc 1728 if (list_empty(&ch->free_tx))
05a1d750
DD
1729 return NULL;
1730
1731 /* Initiator responses to target requests do not consume credits */
76c75b25 1732 if (iu_type != SRP_IU_RSP) {
509c07bc 1733 if (ch->req_lim <= rsv) {
76c75b25
BVA
1734 ++target->zero_req_lim;
1735 return NULL;
1736 }
1737
509c07bc 1738 --ch->req_lim;
05a1d750
DD
1739 }
1740
509c07bc 1741 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
76c75b25 1742 list_del(&iu->list);
05a1d750
DD
1743 return iu;
1744}
1745
509c07bc 1746static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
05a1d750 1747{
509c07bc 1748 struct srp_target_port *target = ch->target;
05a1d750
DD
1749 struct ib_sge list;
1750 struct ib_send_wr wr, *bad_wr;
05a1d750
DD
1751
1752 list.addr = iu->dma;
1753 list.length = len;
9af76271 1754 list.lkey = target->lkey;
05a1d750
DD
1755
1756 wr.next = NULL;
dcb4cb85 1757 wr.wr_id = (uintptr_t) iu;
05a1d750
DD
1758 wr.sg_list = &list;
1759 wr.num_sge = 1;
1760 wr.opcode = IB_WR_SEND;
1761 wr.send_flags = IB_SEND_SIGNALED;
1762
509c07bc 1763 return ib_post_send(ch->qp, &wr, &bad_wr);
05a1d750
DD
1764}
1765
509c07bc 1766static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
c996bb47 1767{
509c07bc 1768 struct srp_target_port *target = ch->target;
c996bb47 1769 struct ib_recv_wr wr, *bad_wr;
dcb4cb85 1770 struct ib_sge list;
c996bb47
BVA
1771
1772 list.addr = iu->dma;
1773 list.length = iu->size;
9af76271 1774 list.lkey = target->lkey;
c996bb47
BVA
1775
1776 wr.next = NULL;
dcb4cb85 1777 wr.wr_id = (uintptr_t) iu;
c996bb47
BVA
1778 wr.sg_list = &list;
1779 wr.num_sge = 1;
1780
509c07bc 1781 return ib_post_recv(ch->qp, &wr, &bad_wr);
c996bb47
BVA
1782}
1783
509c07bc 1784static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
aef9ec39 1785{
509c07bc 1786 struct srp_target_port *target = ch->target;
aef9ec39
RD
1787 struct srp_request *req;
1788 struct scsi_cmnd *scmnd;
1789 unsigned long flags;
aef9ec39 1790
aef9ec39 1791 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
509c07bc
BVA
1792 spin_lock_irqsave(&ch->lock, flags);
1793 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1794 spin_unlock_irqrestore(&ch->lock, flags);
94a9174c 1795
509c07bc 1796 ch->tsk_mgmt_status = -1;
f8b6e31e 1797 if (be32_to_cpu(rsp->resp_data_len) >= 4)
509c07bc
BVA
1798 ch->tsk_mgmt_status = rsp->data[3];
1799 complete(&ch->tsk_mgmt_done);
aef9ec39 1800 } else {
77f2c1a4
BVA
1801 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1802 if (scmnd) {
1803 req = (void *)scmnd->host_scribble;
1804 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1805 }
22032991 1806 if (!scmnd) {
7aa54bd7 1807 shost_printk(KERN_ERR, target->scsi_host,
d92c0da7
BVA
1808 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1809 rsp->tag, ch - target->ch, ch->qp->qp_num);
22032991 1810
509c07bc
BVA
1811 spin_lock_irqsave(&ch->lock, flags);
1812 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1813 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1814
1815 return;
1816 }
aef9ec39
RD
1817 scmnd->result = rsp->status;
1818
1819 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1820 memcpy(scmnd->sense_buffer, rsp->data +
1821 be32_to_cpu(rsp->resp_data_len),
1822 min_t(int, be32_to_cpu(rsp->sense_data_len),
1823 SCSI_SENSE_BUFFERSIZE));
1824 }
1825
e714531a 1826 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
bb350d1d 1827 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
e714531a
BVA
1828 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1829 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1830 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1831 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1832 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1833 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
aef9ec39 1834
509c07bc 1835 srp_free_req(ch, req, scmnd,
22032991
BVA
1836 be32_to_cpu(rsp->req_lim_delta));
1837
f8b6e31e
DD
1838 scmnd->host_scribble = NULL;
1839 scmnd->scsi_done(scmnd);
aef9ec39 1840 }
aef9ec39
RD
1841}
1842
509c07bc 1843static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
bb12588a
DD
1844 void *rsp, int len)
1845{
509c07bc 1846 struct srp_target_port *target = ch->target;
76c75b25 1847 struct ib_device *dev = target->srp_host->srp_dev->dev;
bb12588a
DD
1848 unsigned long flags;
1849 struct srp_iu *iu;
76c75b25 1850 int err;
bb12588a 1851
509c07bc
BVA
1852 spin_lock_irqsave(&ch->lock, flags);
1853 ch->req_lim += req_delta;
1854 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1855 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25 1856
bb12588a
DD
1857 if (!iu) {
1858 shost_printk(KERN_ERR, target->scsi_host, PFX
1859 "no IU available to send response\n");
76c75b25 1860 return 1;
bb12588a
DD
1861 }
1862
1863 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1864 memcpy(iu->buf, rsp, len);
1865 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1866
509c07bc 1867 err = srp_post_send(ch, iu, len);
76c75b25 1868 if (err) {
bb12588a
DD
1869 shost_printk(KERN_ERR, target->scsi_host, PFX
1870 "unable to post response: %d\n", err);
509c07bc 1871 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
76c75b25 1872 }
bb12588a 1873
bb12588a
DD
1874 return err;
1875}
1876
509c07bc 1877static void srp_process_cred_req(struct srp_rdma_ch *ch,
bb12588a
DD
1878 struct srp_cred_req *req)
1879{
1880 struct srp_cred_rsp rsp = {
1881 .opcode = SRP_CRED_RSP,
1882 .tag = req->tag,
1883 };
1884 s32 delta = be32_to_cpu(req->req_lim_delta);
1885
509c07bc
BVA
1886 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1887 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
bb12588a
DD
1888 "problems processing SRP_CRED_REQ\n");
1889}
1890
509c07bc 1891static void srp_process_aer_req(struct srp_rdma_ch *ch,
bb12588a
DD
1892 struct srp_aer_req *req)
1893{
509c07bc 1894 struct srp_target_port *target = ch->target;
bb12588a
DD
1895 struct srp_aer_rsp rsp = {
1896 .opcode = SRP_AER_RSP,
1897 .tag = req->tag,
1898 };
1899 s32 delta = be32_to_cpu(req->req_lim_delta);
1900
1901 shost_printk(KERN_ERR, target->scsi_host, PFX
985aa495 1902 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
bb12588a 1903
509c07bc 1904 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
bb12588a
DD
1905 shost_printk(KERN_ERR, target->scsi_host, PFX
1906 "problems processing SRP_AER_REQ\n");
1907}
1908
509c07bc 1909static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
aef9ec39 1910{
509c07bc 1911 struct srp_target_port *target = ch->target;
dcb4cb85 1912 struct ib_device *dev = target->srp_host->srp_dev->dev;
737b94eb 1913 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
c996bb47 1914 int res;
aef9ec39
RD
1915 u8 opcode;
1916
509c07bc 1917 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 1918 DMA_FROM_DEVICE);
aef9ec39
RD
1919
1920 opcode = *(u8 *) iu->buf;
1921
1922 if (0) {
7aa54bd7
DD
1923 shost_printk(KERN_ERR, target->scsi_host,
1924 PFX "recv completion, opcode 0x%02x\n", opcode);
7a700811
BVA
1925 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1926 iu->buf, wc->byte_len, true);
aef9ec39
RD
1927 }
1928
1929 switch (opcode) {
1930 case SRP_RSP:
509c07bc 1931 srp_process_rsp(ch, iu->buf);
aef9ec39
RD
1932 break;
1933
bb12588a 1934 case SRP_CRED_REQ:
509c07bc 1935 srp_process_cred_req(ch, iu->buf);
bb12588a
DD
1936 break;
1937
1938 case SRP_AER_REQ:
509c07bc 1939 srp_process_aer_req(ch, iu->buf);
bb12588a
DD
1940 break;
1941
aef9ec39
RD
1942 case SRP_T_LOGOUT:
1943 /* XXX Handle target logout */
7aa54bd7
DD
1944 shost_printk(KERN_WARNING, target->scsi_host,
1945 PFX "Got target logout request\n");
aef9ec39
RD
1946 break;
1947
1948 default:
7aa54bd7
DD
1949 shost_printk(KERN_WARNING, target->scsi_host,
1950 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
aef9ec39
RD
1951 break;
1952 }
1953
509c07bc 1954 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 1955 DMA_FROM_DEVICE);
c996bb47 1956
509c07bc 1957 res = srp_post_recv(ch, iu);
c996bb47
BVA
1958 if (res != 0)
1959 shost_printk(KERN_ERR, target->scsi_host,
1960 PFX "Recv failed with error code %d\n", res);
aef9ec39
RD
1961}
1962
c1120f89
BVA
1963/**
1964 * srp_tl_err_work() - handle a transport layer error
af24663b 1965 * @work: Work structure embedded in an SRP target port.
c1120f89
BVA
1966 *
1967 * Note: This function may get invoked before the rport has been created,
1968 * hence the target->rport test.
1969 */
1970static void srp_tl_err_work(struct work_struct *work)
1971{
1972 struct srp_target_port *target;
1973
1974 target = container_of(work, struct srp_target_port, tl_err_work);
1975 if (target->rport)
1976 srp_start_tl_fail_timers(target->rport);
1977}
1978
5cfb1782 1979static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
7dad6b2e 1980 bool send_err, struct srp_rdma_ch *ch)
948d1e88 1981{
7dad6b2e
BVA
1982 struct srp_target_port *target = ch->target;
1983
1984 if (wr_id == SRP_LAST_WR_ID) {
1985 complete(&ch->done);
1986 return;
1987 }
1988
c014c8cd 1989 if (ch->connected && !target->qp_in_error) {
5cfb1782
BVA
1990 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1991 shost_printk(KERN_ERR, target->scsi_host, PFX
57363d98
SG
1992 "LOCAL_INV failed with status %s (%d)\n",
1993 ib_wc_status_msg(wc_status), wc_status);
5cfb1782
BVA
1994 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1995 shost_printk(KERN_ERR, target->scsi_host, PFX
57363d98
SG
1996 "FAST_REG_MR failed status %s (%d)\n",
1997 ib_wc_status_msg(wc_status), wc_status);
5cfb1782
BVA
1998 } else {
1999 shost_printk(KERN_ERR, target->scsi_host,
57363d98 2000 PFX "failed %s status %s (%d) for iu %p\n",
5cfb1782 2001 send_err ? "send" : "receive",
57363d98
SG
2002 ib_wc_status_msg(wc_status), wc_status,
2003 (void *)(uintptr_t)wr_id);
5cfb1782 2004 }
c1120f89 2005 queue_work(system_long_wq, &target->tl_err_work);
4f0af697 2006 }
948d1e88
BVA
2007 target->qp_in_error = true;
2008}
2009
509c07bc 2010static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
aef9ec39 2011{
509c07bc 2012 struct srp_rdma_ch *ch = ch_ptr;
aef9ec39 2013 struct ib_wc wc;
aef9ec39
RD
2014
2015 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
2016 while (ib_poll_cq(cq, 1, &wc) > 0) {
948d1e88 2017 if (likely(wc.status == IB_WC_SUCCESS)) {
509c07bc 2018 srp_handle_recv(ch, &wc);
948d1e88 2019 } else {
7dad6b2e 2020 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
aef9ec39 2021 }
9c03dc9f
BVA
2022 }
2023}
2024
509c07bc 2025static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
9c03dc9f 2026{
509c07bc 2027 struct srp_rdma_ch *ch = ch_ptr;
9c03dc9f 2028 struct ib_wc wc;
dcb4cb85 2029 struct srp_iu *iu;
9c03dc9f
BVA
2030
2031 while (ib_poll_cq(cq, 1, &wc) > 0) {
948d1e88
BVA
2032 if (likely(wc.status == IB_WC_SUCCESS)) {
2033 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
509c07bc 2034 list_add(&iu->list, &ch->free_tx);
948d1e88 2035 } else {
7dad6b2e 2036 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
9c03dc9f 2037 }
aef9ec39
RD
2038 }
2039}
2040
76c75b25 2041static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
aef9ec39 2042{
76c75b25 2043 struct srp_target_port *target = host_to_target(shost);
a95cadb9 2044 struct srp_rport *rport = target->rport;
509c07bc 2045 struct srp_rdma_ch *ch;
aef9ec39
RD
2046 struct srp_request *req;
2047 struct srp_iu *iu;
2048 struct srp_cmd *cmd;
85507bcc 2049 struct ib_device *dev;
76c75b25 2050 unsigned long flags;
77f2c1a4
BVA
2051 u32 tag;
2052 u16 idx;
d1b4289e 2053 int len, ret;
a95cadb9
BVA
2054 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2055
2056 /*
2057 * The SCSI EH thread is the only context from which srp_queuecommand()
2058 * can get invoked for blocked devices (SDEV_BLOCK /
2059 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2060 * locking the rport mutex if invoked from inside the SCSI EH.
2061 */
2062 if (in_scsi_eh)
2063 mutex_lock(&rport->mutex);
aef9ec39 2064
d1b4289e
BVA
2065 scmnd->result = srp_chkready(target->rport);
2066 if (unlikely(scmnd->result))
2067 goto err;
2ce19e72 2068
77f2c1a4
BVA
2069 WARN_ON_ONCE(scmnd->request->tag < 0);
2070 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7 2071 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
77f2c1a4
BVA
2072 idx = blk_mq_unique_tag_to_tag(tag);
2073 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2074 dev_name(&shost->shost_gendev), tag, idx,
2075 target->req_ring_size);
509c07bc
BVA
2076
2077 spin_lock_irqsave(&ch->lock, flags);
2078 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
509c07bc 2079 spin_unlock_irqrestore(&ch->lock, flags);
aef9ec39 2080
77f2c1a4
BVA
2081 if (!iu)
2082 goto err;
2083
2084 req = &ch->req_ring[idx];
05321937 2085 dev = target->srp_host->srp_dev->dev;
49248644 2086 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
85507bcc 2087 DMA_TO_DEVICE);
aef9ec39 2088
f8b6e31e 2089 scmnd->host_scribble = (void *) req;
aef9ec39
RD
2090
2091 cmd = iu->buf;
2092 memset(cmd, 0, sizeof *cmd);
2093
2094 cmd->opcode = SRP_CMD;
985aa495 2095 int_to_scsilun(scmnd->device->lun, &cmd->lun);
77f2c1a4 2096 cmd->tag = tag;
aef9ec39
RD
2097 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2098
aef9ec39
RD
2099 req->scmnd = scmnd;
2100 req->cmd = iu;
aef9ec39 2101
509c07bc 2102 len = srp_map_data(scmnd, ch, req);
aef9ec39 2103 if (len < 0) {
7aa54bd7 2104 shost_printk(KERN_ERR, target->scsi_host,
d1b4289e
BVA
2105 PFX "Failed to map data (%d)\n", len);
2106 /*
2107 * If we ran out of memory descriptors (-ENOMEM) because an
2108 * application is queuing many requests with more than
52ede08f 2109 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
d1b4289e
BVA
2110 * to reduce queue depth temporarily.
2111 */
2112 scmnd->result = len == -ENOMEM ?
2113 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
76c75b25 2114 goto err_iu;
aef9ec39
RD
2115 }
2116
49248644 2117 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
85507bcc 2118 DMA_TO_DEVICE);
aef9ec39 2119
509c07bc 2120 if (srp_post_send(ch, iu, len)) {
7aa54bd7 2121 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
aef9ec39
RD
2122 goto err_unmap;
2123 }
2124
d1b4289e
BVA
2125 ret = 0;
2126
a95cadb9
BVA
2127unlock_rport:
2128 if (in_scsi_eh)
2129 mutex_unlock(&rport->mutex);
2130
d1b4289e 2131 return ret;
aef9ec39
RD
2132
2133err_unmap:
509c07bc 2134 srp_unmap_data(scmnd, ch, req);
aef9ec39 2135
76c75b25 2136err_iu:
509c07bc 2137 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
76c75b25 2138
024ca901
BVA
2139 /*
2140 * Avoid that the loops that iterate over the request ring can
2141 * encounter a dangling SCSI command pointer.
2142 */
2143 req->scmnd = NULL;
2144
d1b4289e
BVA
2145err:
2146 if (scmnd->result) {
2147 scmnd->scsi_done(scmnd);
2148 ret = 0;
2149 } else {
2150 ret = SCSI_MLQUEUE_HOST_BUSY;
2151 }
a95cadb9 2152
d1b4289e 2153 goto unlock_rport;
aef9ec39
RD
2154}
2155
4d73f95f
BVA
2156/*
2157 * Note: the resources allocated in this function are freed in
509c07bc 2158 * srp_free_ch_ib().
4d73f95f 2159 */
509c07bc 2160static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
aef9ec39 2161{
509c07bc 2162 struct srp_target_port *target = ch->target;
aef9ec39
RD
2163 int i;
2164
509c07bc
BVA
2165 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2166 GFP_KERNEL);
2167 if (!ch->rx_ring)
4d73f95f 2168 goto err_no_ring;
509c07bc
BVA
2169 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2170 GFP_KERNEL);
2171 if (!ch->tx_ring)
4d73f95f
BVA
2172 goto err_no_ring;
2173
2174 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2175 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2176 ch->max_ti_iu_len,
2177 GFP_KERNEL, DMA_FROM_DEVICE);
2178 if (!ch->rx_ring[i])
aef9ec39
RD
2179 goto err;
2180 }
2181
4d73f95f 2182 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2183 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2184 target->max_iu_len,
2185 GFP_KERNEL, DMA_TO_DEVICE);
2186 if (!ch->tx_ring[i])
aef9ec39 2187 goto err;
dcb4cb85 2188
509c07bc 2189 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
aef9ec39
RD
2190 }
2191
2192 return 0;
2193
2194err:
4d73f95f 2195 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2196 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2197 srp_free_iu(target->srp_host, ch->tx_ring[i]);
aef9ec39
RD
2198 }
2199
4d73f95f
BVA
2200
2201err_no_ring:
509c07bc
BVA
2202 kfree(ch->tx_ring);
2203 ch->tx_ring = NULL;
2204 kfree(ch->rx_ring);
2205 ch->rx_ring = NULL;
4d73f95f 2206
aef9ec39
RD
2207 return -ENOMEM;
2208}
2209
c9b03c1a
BVA
2210static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2211{
2212 uint64_t T_tr_ns, max_compl_time_ms;
2213 uint32_t rq_tmo_jiffies;
2214
2215 /*
2216 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2217 * table 91), both the QP timeout and the retry count have to be set
2218 * for RC QP's during the RTR to RTS transition.
2219 */
2220 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2221 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2222
2223 /*
2224 * Set target->rq_tmo_jiffies to one second more than the largest time
2225 * it can take before an error completion is generated. See also
2226 * C9-140..142 in the IBTA spec for more information about how to
2227 * convert the QP Local ACK Timeout value to nanoseconds.
2228 */
2229 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2230 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2231 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2232 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2233
2234 return rq_tmo_jiffies;
2235}
2236
961e0be8 2237static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
e6300cbd 2238 const struct srp_login_rsp *lrsp,
509c07bc 2239 struct srp_rdma_ch *ch)
961e0be8 2240{
509c07bc 2241 struct srp_target_port *target = ch->target;
961e0be8
DD
2242 struct ib_qp_attr *qp_attr = NULL;
2243 int attr_mask = 0;
2244 int ret;
2245 int i;
2246
2247 if (lrsp->opcode == SRP_LOGIN_RSP) {
509c07bc
BVA
2248 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2249 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
961e0be8
DD
2250
2251 /*
2252 * Reserve credits for task management so we don't
2253 * bounce requests back to the SCSI mid-layer.
2254 */
2255 target->scsi_host->can_queue
509c07bc 2256 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
961e0be8 2257 target->scsi_host->can_queue);
4d73f95f
BVA
2258 target->scsi_host->cmd_per_lun
2259 = min_t(int, target->scsi_host->can_queue,
2260 target->scsi_host->cmd_per_lun);
961e0be8
DD
2261 } else {
2262 shost_printk(KERN_WARNING, target->scsi_host,
2263 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2264 ret = -ECONNRESET;
2265 goto error;
2266 }
2267
509c07bc
BVA
2268 if (!ch->rx_ring) {
2269 ret = srp_alloc_iu_bufs(ch);
961e0be8
DD
2270 if (ret)
2271 goto error;
2272 }
2273
2274 ret = -ENOMEM;
2275 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2276 if (!qp_attr)
2277 goto error;
2278
2279 qp_attr->qp_state = IB_QPS_RTR;
2280 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2281 if (ret)
2282 goto error_free;
2283
509c07bc 2284 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2285 if (ret)
2286 goto error_free;
2287
4d73f95f 2288 for (i = 0; i < target->queue_size; i++) {
509c07bc
BVA
2289 struct srp_iu *iu = ch->rx_ring[i];
2290
2291 ret = srp_post_recv(ch, iu);
961e0be8
DD
2292 if (ret)
2293 goto error_free;
2294 }
2295
2296 qp_attr->qp_state = IB_QPS_RTS;
2297 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2298 if (ret)
2299 goto error_free;
2300
c9b03c1a
BVA
2301 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2302
509c07bc 2303 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2304 if (ret)
2305 goto error_free;
2306
2307 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2308
2309error_free:
2310 kfree(qp_attr);
2311
2312error:
509c07bc 2313 ch->status = ret;
961e0be8
DD
2314}
2315
aef9ec39
RD
2316static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2317 struct ib_cm_event *event,
509c07bc 2318 struct srp_rdma_ch *ch)
aef9ec39 2319{
509c07bc 2320 struct srp_target_port *target = ch->target;
7aa54bd7 2321 struct Scsi_Host *shost = target->scsi_host;
aef9ec39
RD
2322 struct ib_class_port_info *cpi;
2323 int opcode;
2324
2325 switch (event->param.rej_rcvd.reason) {
2326 case IB_CM_REJ_PORT_CM_REDIRECT:
2327 cpi = event->param.rej_rcvd.ari;
509c07bc
BVA
2328 ch->path.dlid = cpi->redirect_lid;
2329 ch->path.pkey = cpi->redirect_pkey;
aef9ec39 2330 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
509c07bc 2331 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
aef9ec39 2332
509c07bc 2333 ch->status = ch->path.dlid ?
aef9ec39
RD
2334 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2335 break;
2336
2337 case IB_CM_REJ_PORT_REDIRECT:
5d7cbfd6 2338 if (srp_target_is_topspin(target)) {
aef9ec39
RD
2339 /*
2340 * Topspin/Cisco SRP gateways incorrectly send
2341 * reject reason code 25 when they mean 24
2342 * (port redirect).
2343 */
509c07bc 2344 memcpy(ch->path.dgid.raw,
aef9ec39
RD
2345 event->param.rej_rcvd.ari, 16);
2346
7aa54bd7
DD
2347 shost_printk(KERN_DEBUG, shost,
2348 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
509c07bc
BVA
2349 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2350 be64_to_cpu(ch->path.dgid.global.interface_id));
aef9ec39 2351
509c07bc 2352 ch->status = SRP_PORT_REDIRECT;
aef9ec39 2353 } else {
7aa54bd7
DD
2354 shost_printk(KERN_WARNING, shost,
2355 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
509c07bc 2356 ch->status = -ECONNRESET;
aef9ec39
RD
2357 }
2358 break;
2359
2360 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
7aa54bd7
DD
2361 shost_printk(KERN_WARNING, shost,
2362 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
509c07bc 2363 ch->status = -ECONNRESET;
aef9ec39
RD
2364 break;
2365
2366 case IB_CM_REJ_CONSUMER_DEFINED:
2367 opcode = *(u8 *) event->private_data;
2368 if (opcode == SRP_LOGIN_REJ) {
2369 struct srp_login_rej *rej = event->private_data;
2370 u32 reason = be32_to_cpu(rej->reason);
2371
2372 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
7aa54bd7
DD
2373 shost_printk(KERN_WARNING, shost,
2374 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
aef9ec39 2375 else
e7ffde01
BVA
2376 shost_printk(KERN_WARNING, shost, PFX
2377 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
747fe000
BVA
2378 target->sgid.raw,
2379 target->orig_dgid.raw, reason);
aef9ec39 2380 } else
7aa54bd7
DD
2381 shost_printk(KERN_WARNING, shost,
2382 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2383 " opcode 0x%02x\n", opcode);
509c07bc 2384 ch->status = -ECONNRESET;
aef9ec39
RD
2385 break;
2386
9fe4bcf4
DD
2387 case IB_CM_REJ_STALE_CONN:
2388 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
509c07bc 2389 ch->status = SRP_STALE_CONN;
9fe4bcf4
DD
2390 break;
2391
aef9ec39 2392 default:
7aa54bd7
DD
2393 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2394 event->param.rej_rcvd.reason);
509c07bc 2395 ch->status = -ECONNRESET;
aef9ec39
RD
2396 }
2397}
2398
2399static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2400{
509c07bc
BVA
2401 struct srp_rdma_ch *ch = cm_id->context;
2402 struct srp_target_port *target = ch->target;
aef9ec39 2403 int comp = 0;
aef9ec39
RD
2404
2405 switch (event->event) {
2406 case IB_CM_REQ_ERROR:
7aa54bd7
DD
2407 shost_printk(KERN_DEBUG, target->scsi_host,
2408 PFX "Sending CM REQ failed\n");
aef9ec39 2409 comp = 1;
509c07bc 2410 ch->status = -ECONNRESET;
aef9ec39
RD
2411 break;
2412
2413 case IB_CM_REP_RECEIVED:
2414 comp = 1;
509c07bc 2415 srp_cm_rep_handler(cm_id, event->private_data, ch);
aef9ec39
RD
2416 break;
2417
2418 case IB_CM_REJ_RECEIVED:
7aa54bd7 2419 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
aef9ec39
RD
2420 comp = 1;
2421
509c07bc 2422 srp_cm_rej_handler(cm_id, event, ch);
aef9ec39
RD
2423 break;
2424
b7ac4ab4 2425 case IB_CM_DREQ_RECEIVED:
7aa54bd7
DD
2426 shost_printk(KERN_WARNING, target->scsi_host,
2427 PFX "DREQ received - connection closed\n");
c014c8cd 2428 ch->connected = false;
b7ac4ab4 2429 if (ib_send_cm_drep(cm_id, NULL, 0))
7aa54bd7
DD
2430 shost_printk(KERN_ERR, target->scsi_host,
2431 PFX "Sending CM DREP failed\n");
c1120f89 2432 queue_work(system_long_wq, &target->tl_err_work);
aef9ec39
RD
2433 break;
2434
2435 case IB_CM_TIMEWAIT_EXIT:
7aa54bd7
DD
2436 shost_printk(KERN_ERR, target->scsi_host,
2437 PFX "connection closed\n");
ac72d766 2438 comp = 1;
aef9ec39 2439
509c07bc 2440 ch->status = 0;
aef9ec39
RD
2441 break;
2442
b7ac4ab4
IR
2443 case IB_CM_MRA_RECEIVED:
2444 case IB_CM_DREQ_ERROR:
2445 case IB_CM_DREP_RECEIVED:
2446 break;
2447
aef9ec39 2448 default:
7aa54bd7
DD
2449 shost_printk(KERN_WARNING, target->scsi_host,
2450 PFX "Unhandled CM event %d\n", event->event);
aef9ec39
RD
2451 break;
2452 }
2453
2454 if (comp)
509c07bc 2455 complete(&ch->done);
aef9ec39 2456
aef9ec39
RD
2457 return 0;
2458}
2459
71444b97
JW
2460/**
2461 * srp_change_queue_depth - setting device queue depth
2462 * @sdev: scsi device struct
2463 * @qdepth: requested queue depth
71444b97
JW
2464 *
2465 * Returns queue depth.
2466 */
2467static int
db5ed4df 2468srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
71444b97 2469{
c40ecc12 2470 if (!sdev->tagged_supported)
1e6f2416 2471 qdepth = 1;
db5ed4df 2472 return scsi_change_queue_depth(sdev, qdepth);
71444b97
JW
2473}
2474
985aa495
BVA
2475static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2476 u8 func)
aef9ec39 2477{
509c07bc 2478 struct srp_target_port *target = ch->target;
a95cadb9 2479 struct srp_rport *rport = target->rport;
19081f31 2480 struct ib_device *dev = target->srp_host->srp_dev->dev;
aef9ec39
RD
2481 struct srp_iu *iu;
2482 struct srp_tsk_mgmt *tsk_mgmt;
aef9ec39 2483
c014c8cd 2484 if (!ch->connected || target->qp_in_error)
3780d1f0
BVA
2485 return -1;
2486
509c07bc 2487 init_completion(&ch->tsk_mgmt_done);
aef9ec39 2488
a95cadb9 2489 /*
509c07bc 2490 * Lock the rport mutex to avoid that srp_create_ch_ib() is
a95cadb9
BVA
2491 * invoked while a task management function is being sent.
2492 */
2493 mutex_lock(&rport->mutex);
509c07bc
BVA
2494 spin_lock_irq(&ch->lock);
2495 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2496 spin_unlock_irq(&ch->lock);
76c75b25 2497
a95cadb9
BVA
2498 if (!iu) {
2499 mutex_unlock(&rport->mutex);
2500
76c75b25 2501 return -1;
a95cadb9 2502 }
aef9ec39 2503
19081f31
DD
2504 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2505 DMA_TO_DEVICE);
aef9ec39
RD
2506 tsk_mgmt = iu->buf;
2507 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2508
2509 tsk_mgmt->opcode = SRP_TSK_MGMT;
985aa495 2510 int_to_scsilun(lun, &tsk_mgmt->lun);
f8b6e31e 2511 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
aef9ec39 2512 tsk_mgmt->tsk_mgmt_func = func;
f8b6e31e 2513 tsk_mgmt->task_tag = req_tag;
aef9ec39 2514
19081f31
DD
2515 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2516 DMA_TO_DEVICE);
509c07bc
BVA
2517 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2518 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
a95cadb9
BVA
2519 mutex_unlock(&rport->mutex);
2520
76c75b25
BVA
2521 return -1;
2522 }
a95cadb9 2523 mutex_unlock(&rport->mutex);
d945e1df 2524
509c07bc 2525 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
aef9ec39 2526 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
d945e1df 2527 return -1;
aef9ec39 2528
d945e1df 2529 return 0;
d945e1df
RD
2530}
2531
aef9ec39
RD
2532static int srp_abort(struct scsi_cmnd *scmnd)
2533{
d945e1df 2534 struct srp_target_port *target = host_to_target(scmnd->device->host);
f8b6e31e 2535 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
77f2c1a4 2536 u32 tag;
d92c0da7 2537 u16 ch_idx;
509c07bc 2538 struct srp_rdma_ch *ch;
086f44f5 2539 int ret;
d945e1df 2540
7aa54bd7 2541 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
aef9ec39 2542
d92c0da7 2543 if (!req)
99b6697a 2544 return SUCCESS;
77f2c1a4 2545 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7
BVA
2546 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2547 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2548 return SUCCESS;
2549 ch = &target->ch[ch_idx];
2550 if (!srp_claim_req(ch, req, NULL, scmnd))
2551 return SUCCESS;
2552 shost_printk(KERN_ERR, target->scsi_host,
2553 "Sending SRP abort for tag %#x\n", tag);
77f2c1a4 2554 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
80d5e8a2 2555 SRP_TSK_ABORT_TASK) == 0)
086f44f5 2556 ret = SUCCESS;
ed9b2264 2557 else if (target->rport->state == SRP_RPORT_LOST)
99e1c139 2558 ret = FAST_IO_FAIL;
086f44f5
BVA
2559 else
2560 ret = FAILED;
509c07bc 2561 srp_free_req(ch, req, scmnd, 0);
22032991 2562 scmnd->result = DID_ABORT << 16;
d8536670 2563 scmnd->scsi_done(scmnd);
d945e1df 2564
086f44f5 2565 return ret;
aef9ec39
RD
2566}
2567
2568static int srp_reset_device(struct scsi_cmnd *scmnd)
2569{
d945e1df 2570 struct srp_target_port *target = host_to_target(scmnd->device->host);
d92c0da7 2571 struct srp_rdma_ch *ch;
536ae14e 2572 int i;
d945e1df 2573
7aa54bd7 2574 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
aef9ec39 2575
d92c0da7 2576 ch = &target->ch[0];
509c07bc 2577 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
f8b6e31e 2578 SRP_TSK_LUN_RESET))
d945e1df 2579 return FAILED;
509c07bc 2580 if (ch->tsk_mgmt_status)
d945e1df
RD
2581 return FAILED;
2582
d92c0da7
BVA
2583 for (i = 0; i < target->ch_count; i++) {
2584 ch = &target->ch[i];
2585 for (i = 0; i < target->req_ring_size; ++i) {
2586 struct srp_request *req = &ch->req_ring[i];
509c07bc 2587
d92c0da7
BVA
2588 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2589 }
536ae14e 2590 }
d945e1df 2591
d945e1df 2592 return SUCCESS;
aef9ec39
RD
2593}
2594
2595static int srp_reset_host(struct scsi_cmnd *scmnd)
2596{
2597 struct srp_target_port *target = host_to_target(scmnd->device->host);
aef9ec39 2598
7aa54bd7 2599 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
aef9ec39 2600
ed9b2264 2601 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
aef9ec39
RD
2602}
2603
c9b03c1a
BVA
2604static int srp_slave_configure(struct scsi_device *sdev)
2605{
2606 struct Scsi_Host *shost = sdev->host;
2607 struct srp_target_port *target = host_to_target(shost);
2608 struct request_queue *q = sdev->request_queue;
2609 unsigned long timeout;
2610
2611 if (sdev->type == TYPE_DISK) {
2612 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2613 blk_queue_rq_timeout(q, timeout);
2614 }
2615
2616 return 0;
2617}
2618
ee959b00
TJ
2619static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2620 char *buf)
6ecb0c84 2621{
ee959b00 2622 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2623
45c37cad 2624 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
6ecb0c84
RD
2625}
2626
ee959b00
TJ
2627static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2628 char *buf)
6ecb0c84 2629{
ee959b00 2630 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2631
45c37cad 2632 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
6ecb0c84
RD
2633}
2634
ee959b00
TJ
2635static ssize_t show_service_id(struct device *dev,
2636 struct device_attribute *attr, char *buf)
6ecb0c84 2637{
ee959b00 2638 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2639
45c37cad 2640 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
6ecb0c84
RD
2641}
2642
ee959b00
TJ
2643static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2644 char *buf)
6ecb0c84 2645{
ee959b00 2646 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2647
747fe000 2648 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
6ecb0c84
RD
2649}
2650
848b3082
BVA
2651static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2652 char *buf)
2653{
2654 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2655
747fe000 2656 return sprintf(buf, "%pI6\n", target->sgid.raw);
848b3082
BVA
2657}
2658
ee959b00
TJ
2659static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2660 char *buf)
6ecb0c84 2661{
ee959b00 2662 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7 2663 struct srp_rdma_ch *ch = &target->ch[0];
6ecb0c84 2664
509c07bc 2665 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
6ecb0c84
RD
2666}
2667
ee959b00
TJ
2668static ssize_t show_orig_dgid(struct device *dev,
2669 struct device_attribute *attr, char *buf)
3633b3d0 2670{
ee959b00 2671 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3633b3d0 2672
747fe000 2673 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
3633b3d0
IR
2674}
2675
89de7486
BVA
2676static ssize_t show_req_lim(struct device *dev,
2677 struct device_attribute *attr, char *buf)
2678{
2679 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7
BVA
2680 struct srp_rdma_ch *ch;
2681 int i, req_lim = INT_MAX;
89de7486 2682
d92c0da7
BVA
2683 for (i = 0; i < target->ch_count; i++) {
2684 ch = &target->ch[i];
2685 req_lim = min(req_lim, ch->req_lim);
2686 }
2687 return sprintf(buf, "%d\n", req_lim);
89de7486
BVA
2688}
2689
ee959b00
TJ
2690static ssize_t show_zero_req_lim(struct device *dev,
2691 struct device_attribute *attr, char *buf)
6bfa24fa 2692{
ee959b00 2693 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6bfa24fa 2694
6bfa24fa
RD
2695 return sprintf(buf, "%d\n", target->zero_req_lim);
2696}
2697
ee959b00
TJ
2698static ssize_t show_local_ib_port(struct device *dev,
2699 struct device_attribute *attr, char *buf)
ded7f1a1 2700{
ee959b00 2701 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1
IR
2702
2703 return sprintf(buf, "%d\n", target->srp_host->port);
2704}
2705
ee959b00
TJ
2706static ssize_t show_local_ib_device(struct device *dev,
2707 struct device_attribute *attr, char *buf)
ded7f1a1 2708{
ee959b00 2709 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1 2710
05321937 2711 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
ded7f1a1
IR
2712}
2713
d92c0da7
BVA
2714static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2715 char *buf)
2716{
2717 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2718
2719 return sprintf(buf, "%d\n", target->ch_count);
2720}
2721
4b5e5f41
BVA
2722static ssize_t show_comp_vector(struct device *dev,
2723 struct device_attribute *attr, char *buf)
2724{
2725 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2726
2727 return sprintf(buf, "%d\n", target->comp_vector);
2728}
2729
7bb312e4
VP
2730static ssize_t show_tl_retry_count(struct device *dev,
2731 struct device_attribute *attr, char *buf)
2732{
2733 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2734
2735 return sprintf(buf, "%d\n", target->tl_retry_count);
2736}
2737
49248644
DD
2738static ssize_t show_cmd_sg_entries(struct device *dev,
2739 struct device_attribute *attr, char *buf)
2740{
2741 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2742
2743 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2744}
2745
c07d424d
DD
2746static ssize_t show_allow_ext_sg(struct device *dev,
2747 struct device_attribute *attr, char *buf)
2748{
2749 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2750
2751 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2752}
2753
ee959b00
TJ
2754static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2755static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2756static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2757static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
848b3082 2758static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
ee959b00
TJ
2759static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2760static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
89de7486 2761static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
ee959b00
TJ
2762static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2763static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2764static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
d92c0da7 2765static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
4b5e5f41 2766static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
7bb312e4 2767static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
49248644 2768static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
c07d424d 2769static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
ee959b00
TJ
2770
2771static struct device_attribute *srp_host_attrs[] = {
2772 &dev_attr_id_ext,
2773 &dev_attr_ioc_guid,
2774 &dev_attr_service_id,
2775 &dev_attr_pkey,
848b3082 2776 &dev_attr_sgid,
ee959b00
TJ
2777 &dev_attr_dgid,
2778 &dev_attr_orig_dgid,
89de7486 2779 &dev_attr_req_lim,
ee959b00
TJ
2780 &dev_attr_zero_req_lim,
2781 &dev_attr_local_ib_port,
2782 &dev_attr_local_ib_device,
d92c0da7 2783 &dev_attr_ch_count,
4b5e5f41 2784 &dev_attr_comp_vector,
7bb312e4 2785 &dev_attr_tl_retry_count,
49248644 2786 &dev_attr_cmd_sg_entries,
c07d424d 2787 &dev_attr_allow_ext_sg,
6ecb0c84
RD
2788 NULL
2789};
2790
aef9ec39
RD
2791static struct scsi_host_template srp_template = {
2792 .module = THIS_MODULE,
b7f008fd
RD
2793 .name = "InfiniBand SRP initiator",
2794 .proc_name = DRV_NAME,
c9b03c1a 2795 .slave_configure = srp_slave_configure,
aef9ec39
RD
2796 .info = srp_target_info,
2797 .queuecommand = srp_queuecommand,
71444b97 2798 .change_queue_depth = srp_change_queue_depth,
aef9ec39
RD
2799 .eh_abort_handler = srp_abort,
2800 .eh_device_reset_handler = srp_reset_device,
2801 .eh_host_reset_handler = srp_reset_host,
2742c1da 2802 .skip_settle_delay = true,
49248644 2803 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
4d73f95f 2804 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
aef9ec39 2805 .this_id = -1,
4d73f95f 2806 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
6ecb0c84 2807 .use_clustering = ENABLE_CLUSTERING,
77f2c1a4
BVA
2808 .shost_attrs = srp_host_attrs,
2809 .use_blk_tags = 1,
c40ecc12 2810 .track_queue_depth = 1,
aef9ec39
RD
2811};
2812
34aa654e
BVA
2813static int srp_sdev_count(struct Scsi_Host *host)
2814{
2815 struct scsi_device *sdev;
2816 int c = 0;
2817
2818 shost_for_each_device(sdev, host)
2819 c++;
2820
2821 return c;
2822}
2823
bc44bd1d
BVA
2824/*
2825 * Return values:
2826 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2827 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2828 * removal has been scheduled.
2829 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2830 */
aef9ec39
RD
2831static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2832{
3236822b
FT
2833 struct srp_rport_identifiers ids;
2834 struct srp_rport *rport;
2835
34aa654e 2836 target->state = SRP_TARGET_SCANNING;
aef9ec39 2837 sprintf(target->target_name, "SRP.T10:%016llX",
45c37cad 2838 be64_to_cpu(target->id_ext));
aef9ec39 2839
05321937 2840 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
aef9ec39
RD
2841 return -ENODEV;
2842
3236822b
FT
2843 memcpy(ids.port_id, &target->id_ext, 8);
2844 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
aebd5e47 2845 ids.roles = SRP_RPORT_ROLE_TARGET;
3236822b
FT
2846 rport = srp_rport_add(target->scsi_host, &ids);
2847 if (IS_ERR(rport)) {
2848 scsi_remove_host(target->scsi_host);
2849 return PTR_ERR(rport);
2850 }
2851
dc1bdbd9 2852 rport->lld_data = target;
9dd69a60 2853 target->rport = rport;
dc1bdbd9 2854
b3589fd4 2855 spin_lock(&host->target_lock);
aef9ec39 2856 list_add_tail(&target->list, &host->target_list);
b3589fd4 2857 spin_unlock(&host->target_lock);
aef9ec39 2858
aef9ec39 2859 scsi_scan_target(&target->scsi_host->shost_gendev,
1962a4a1 2860 0, target->scsi_id, SCAN_WILD_CARD, 0);
aef9ec39 2861
c014c8cd
BVA
2862 if (srp_connected_ch(target) < target->ch_count ||
2863 target->qp_in_error) {
34aa654e
BVA
2864 shost_printk(KERN_INFO, target->scsi_host,
2865 PFX "SCSI scan failed - removing SCSI host\n");
2866 srp_queue_remove_work(target);
2867 goto out;
2868 }
2869
2870 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2871 dev_name(&target->scsi_host->shost_gendev),
2872 srp_sdev_count(target->scsi_host));
2873
2874 spin_lock_irq(&target->lock);
2875 if (target->state == SRP_TARGET_SCANNING)
2876 target->state = SRP_TARGET_LIVE;
2877 spin_unlock_irq(&target->lock);
2878
2879out:
aef9ec39
RD
2880 return 0;
2881}
2882
ee959b00 2883static void srp_release_dev(struct device *dev)
aef9ec39
RD
2884{
2885 struct srp_host *host =
ee959b00 2886 container_of(dev, struct srp_host, dev);
aef9ec39
RD
2887
2888 complete(&host->released);
2889}
2890
2891static struct class srp_class = {
2892 .name = "infiniband_srp",
ee959b00 2893 .dev_release = srp_release_dev
aef9ec39
RD
2894};
2895
96fc248a
BVA
2896/**
2897 * srp_conn_unique() - check whether the connection to a target is unique
af24663b
BVA
2898 * @host: SRP host.
2899 * @target: SRP target port.
96fc248a
BVA
2900 */
2901static bool srp_conn_unique(struct srp_host *host,
2902 struct srp_target_port *target)
2903{
2904 struct srp_target_port *t;
2905 bool ret = false;
2906
2907 if (target->state == SRP_TARGET_REMOVED)
2908 goto out;
2909
2910 ret = true;
2911
2912 spin_lock(&host->target_lock);
2913 list_for_each_entry(t, &host->target_list, list) {
2914 if (t != target &&
2915 target->id_ext == t->id_ext &&
2916 target->ioc_guid == t->ioc_guid &&
2917 target->initiator_ext == t->initiator_ext) {
2918 ret = false;
2919 break;
2920 }
2921 }
2922 spin_unlock(&host->target_lock);
2923
2924out:
2925 return ret;
2926}
2927
aef9ec39
RD
2928/*
2929 * Target ports are added by writing
2930 *
2931 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2932 * pkey=<P_Key>,service_id=<service ID>
2933 *
2934 * to the add_target sysfs attribute.
2935 */
2936enum {
2937 SRP_OPT_ERR = 0,
2938 SRP_OPT_ID_EXT = 1 << 0,
2939 SRP_OPT_IOC_GUID = 1 << 1,
2940 SRP_OPT_DGID = 1 << 2,
2941 SRP_OPT_PKEY = 1 << 3,
2942 SRP_OPT_SERVICE_ID = 1 << 4,
2943 SRP_OPT_MAX_SECT = 1 << 5,
52fb2b50 2944 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
0c0450db 2945 SRP_OPT_IO_CLASS = 1 << 7,
01cb9bcb 2946 SRP_OPT_INITIATOR_EXT = 1 << 8,
49248644 2947 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
c07d424d
DD
2948 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2949 SRP_OPT_SG_TABLESIZE = 1 << 11,
4b5e5f41 2950 SRP_OPT_COMP_VECTOR = 1 << 12,
7bb312e4 2951 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
4d73f95f 2952 SRP_OPT_QUEUE_SIZE = 1 << 14,
aef9ec39
RD
2953 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2954 SRP_OPT_IOC_GUID |
2955 SRP_OPT_DGID |
2956 SRP_OPT_PKEY |
2957 SRP_OPT_SERVICE_ID),
2958};
2959
a447c093 2960static const match_table_t srp_opt_tokens = {
52fb2b50
VP
2961 { SRP_OPT_ID_EXT, "id_ext=%s" },
2962 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2963 { SRP_OPT_DGID, "dgid=%s" },
2964 { SRP_OPT_PKEY, "pkey=%x" },
2965 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2966 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2967 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
0c0450db 2968 { SRP_OPT_IO_CLASS, "io_class=%x" },
01cb9bcb 2969 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
49248644 2970 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
c07d424d
DD
2971 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2972 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
4b5e5f41 2973 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
7bb312e4 2974 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
4d73f95f 2975 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
52fb2b50 2976 { SRP_OPT_ERR, NULL }
aef9ec39
RD
2977};
2978
2979static int srp_parse_options(const char *buf, struct srp_target_port *target)
2980{
2981 char *options, *sep_opt;
2982 char *p;
2983 char dgid[3];
2984 substring_t args[MAX_OPT_ARGS];
2985 int opt_mask = 0;
2986 int token;
2987 int ret = -EINVAL;
2988 int i;
2989
2990 options = kstrdup(buf, GFP_KERNEL);
2991 if (!options)
2992 return -ENOMEM;
2993
2994 sep_opt = options;
7dcf9c19 2995 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
aef9ec39
RD
2996 if (!*p)
2997 continue;
2998
2999 token = match_token(p, srp_opt_tokens, args);
3000 opt_mask |= token;
3001
3002 switch (token) {
3003 case SRP_OPT_ID_EXT:
3004 p = match_strdup(args);
a20f3a6d
IR
3005 if (!p) {
3006 ret = -ENOMEM;
3007 goto out;
3008 }
aef9ec39
RD
3009 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3010 kfree(p);
3011 break;
3012
3013 case SRP_OPT_IOC_GUID:
3014 p = match_strdup(args);
a20f3a6d
IR
3015 if (!p) {
3016 ret = -ENOMEM;
3017 goto out;
3018 }
aef9ec39
RD
3019 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
3020 kfree(p);
3021 break;
3022
3023 case SRP_OPT_DGID:
3024 p = match_strdup(args);
a20f3a6d
IR
3025 if (!p) {
3026 ret = -ENOMEM;
3027 goto out;
3028 }
aef9ec39 3029 if (strlen(p) != 32) {
e0bda7d8 3030 pr_warn("bad dest GID parameter '%s'\n", p);
ce1823f0 3031 kfree(p);
aef9ec39
RD
3032 goto out;
3033 }
3034
3035 for (i = 0; i < 16; ++i) {
747fe000
BVA
3036 strlcpy(dgid, p + i * 2, sizeof(dgid));
3037 if (sscanf(dgid, "%hhx",
3038 &target->orig_dgid.raw[i]) < 1) {
3039 ret = -EINVAL;
3040 kfree(p);
3041 goto out;
3042 }
aef9ec39 3043 }
bf17c1c7 3044 kfree(p);
aef9ec39
RD
3045 break;
3046
3047 case SRP_OPT_PKEY:
3048 if (match_hex(args, &token)) {
e0bda7d8 3049 pr_warn("bad P_Key parameter '%s'\n", p);
aef9ec39
RD
3050 goto out;
3051 }
747fe000 3052 target->pkey = cpu_to_be16(token);
aef9ec39
RD
3053 break;
3054
3055 case SRP_OPT_SERVICE_ID:
3056 p = match_strdup(args);
a20f3a6d
IR
3057 if (!p) {
3058 ret = -ENOMEM;
3059 goto out;
3060 }
aef9ec39
RD
3061 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3062 kfree(p);
3063 break;
3064
3065 case SRP_OPT_MAX_SECT:
3066 if (match_int(args, &token)) {
e0bda7d8 3067 pr_warn("bad max sect parameter '%s'\n", p);
aef9ec39
RD
3068 goto out;
3069 }
3070 target->scsi_host->max_sectors = token;
3071 break;
3072
4d73f95f
BVA
3073 case SRP_OPT_QUEUE_SIZE:
3074 if (match_int(args, &token) || token < 1) {
3075 pr_warn("bad queue_size parameter '%s'\n", p);
3076 goto out;
3077 }
3078 target->scsi_host->can_queue = token;
3079 target->queue_size = token + SRP_RSP_SQ_SIZE +
3080 SRP_TSK_MGMT_SQ_SIZE;
3081 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3082 target->scsi_host->cmd_per_lun = token;
3083 break;
3084
52fb2b50 3085 case SRP_OPT_MAX_CMD_PER_LUN:
4d73f95f 3086 if (match_int(args, &token) || token < 1) {
e0bda7d8
BVA
3087 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3088 p);
52fb2b50
VP
3089 goto out;
3090 }
4d73f95f 3091 target->scsi_host->cmd_per_lun = token;
52fb2b50
VP
3092 break;
3093
0c0450db
R
3094 case SRP_OPT_IO_CLASS:
3095 if (match_hex(args, &token)) {
e0bda7d8 3096 pr_warn("bad IO class parameter '%s'\n", p);
0c0450db
R
3097 goto out;
3098 }
3099 if (token != SRP_REV10_IB_IO_CLASS &&
3100 token != SRP_REV16A_IB_IO_CLASS) {
e0bda7d8
BVA
3101 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3102 token, SRP_REV10_IB_IO_CLASS,
3103 SRP_REV16A_IB_IO_CLASS);
0c0450db
R
3104 goto out;
3105 }
3106 target->io_class = token;
3107 break;
3108
01cb9bcb
IR
3109 case SRP_OPT_INITIATOR_EXT:
3110 p = match_strdup(args);
a20f3a6d
IR
3111 if (!p) {
3112 ret = -ENOMEM;
3113 goto out;
3114 }
01cb9bcb
IR
3115 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3116 kfree(p);
3117 break;
3118
49248644
DD
3119 case SRP_OPT_CMD_SG_ENTRIES:
3120 if (match_int(args, &token) || token < 1 || token > 255) {
e0bda7d8
BVA
3121 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3122 p);
49248644
DD
3123 goto out;
3124 }
3125 target->cmd_sg_cnt = token;
3126 break;
3127
c07d424d
DD
3128 case SRP_OPT_ALLOW_EXT_SG:
3129 if (match_int(args, &token)) {
e0bda7d8 3130 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
c07d424d
DD
3131 goto out;
3132 }
3133 target->allow_ext_sg = !!token;
3134 break;
3135
3136 case SRP_OPT_SG_TABLESIZE:
3137 if (match_int(args, &token) || token < 1 ||
3138 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
e0bda7d8
BVA
3139 pr_warn("bad max sg_tablesize parameter '%s'\n",
3140 p);
c07d424d
DD
3141 goto out;
3142 }
3143 target->sg_tablesize = token;
3144 break;
3145
4b5e5f41
BVA
3146 case SRP_OPT_COMP_VECTOR:
3147 if (match_int(args, &token) || token < 0) {
3148 pr_warn("bad comp_vector parameter '%s'\n", p);
3149 goto out;
3150 }
3151 target->comp_vector = token;
3152 break;
3153
7bb312e4
VP
3154 case SRP_OPT_TL_RETRY_COUNT:
3155 if (match_int(args, &token) || token < 2 || token > 7) {
3156 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3157 p);
3158 goto out;
3159 }
3160 target->tl_retry_count = token;
3161 break;
3162
aef9ec39 3163 default:
e0bda7d8
BVA
3164 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3165 p);
aef9ec39
RD
3166 goto out;
3167 }
3168 }
3169
3170 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3171 ret = 0;
3172 else
3173 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3174 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3175 !(srp_opt_tokens[i].token & opt_mask))
e0bda7d8
BVA
3176 pr_warn("target creation request is missing parameter '%s'\n",
3177 srp_opt_tokens[i].pattern);
aef9ec39 3178
4d73f95f
BVA
3179 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3180 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3181 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3182 target->scsi_host->cmd_per_lun,
3183 target->scsi_host->can_queue);
3184
aef9ec39
RD
3185out:
3186 kfree(options);
3187 return ret;
3188}
3189
ee959b00
TJ
3190static ssize_t srp_create_target(struct device *dev,
3191 struct device_attribute *attr,
aef9ec39
RD
3192 const char *buf, size_t count)
3193{
3194 struct srp_host *host =
ee959b00 3195 container_of(dev, struct srp_host, dev);
aef9ec39
RD
3196 struct Scsi_Host *target_host;
3197 struct srp_target_port *target;
509c07bc 3198 struct srp_rdma_ch *ch;
d1b4289e
BVA
3199 struct srp_device *srp_dev = host->srp_dev;
3200 struct ib_device *ibdev = srp_dev->dev;
d92c0da7
BVA
3201 int ret, node_idx, node, cpu, i;
3202 bool multich = false;
aef9ec39
RD
3203
3204 target_host = scsi_host_alloc(&srp_template,
3205 sizeof (struct srp_target_port));
3206 if (!target_host)
3207 return -ENOMEM;
3208
49248644 3209 target_host->transportt = ib_srp_transport_template;
fd1b6c4a
BVA
3210 target_host->max_channel = 0;
3211 target_host->max_id = 1;
985aa495 3212 target_host->max_lun = -1LL;
3c8edf0e 3213 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
5f068992 3214
aef9ec39 3215 target = host_to_target(target_host);
aef9ec39 3216
49248644
DD
3217 target->io_class = SRP_REV16A_IB_IO_CLASS;
3218 target->scsi_host = target_host;
3219 target->srp_host = host;
e6bf5f48 3220 target->lkey = host->srp_dev->pd->local_dma_lkey;
03f6fb93 3221 target->global_mr = host->srp_dev->global_mr;
49248644 3222 target->cmd_sg_cnt = cmd_sg_entries;
c07d424d
DD
3223 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3224 target->allow_ext_sg = allow_ext_sg;
7bb312e4 3225 target->tl_retry_count = 7;
4d73f95f 3226 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
aef9ec39 3227
34aa654e
BVA
3228 /*
3229 * Avoid that the SCSI host can be removed by srp_remove_target()
3230 * before this function returns.
3231 */
3232 scsi_host_get(target->scsi_host);
3233
2d7091bc
BVA
3234 mutex_lock(&host->add_target_mutex);
3235
aef9ec39
RD
3236 ret = srp_parse_options(buf, target);
3237 if (ret)
fb49c8bb 3238 goto out;
aef9ec39 3239
77f2c1a4
BVA
3240 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3241 if (ret)
fb49c8bb 3242 goto out;
77f2c1a4 3243
4d73f95f
BVA
3244 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3245
96fc248a
BVA
3246 if (!srp_conn_unique(target->srp_host, target)) {
3247 shost_printk(KERN_INFO, target->scsi_host,
3248 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3249 be64_to_cpu(target->id_ext),
3250 be64_to_cpu(target->ioc_guid),
3251 be64_to_cpu(target->initiator_ext));
3252 ret = -EEXIST;
fb49c8bb 3253 goto out;
96fc248a
BVA
3254 }
3255
5cfb1782 3256 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
d1b4289e 3257 target->cmd_sg_cnt < target->sg_tablesize) {
5cfb1782 3258 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
c07d424d
DD
3259 target->sg_tablesize = target->cmd_sg_cnt;
3260 }
3261
3262 target_host->sg_tablesize = target->sg_tablesize;
3263 target->indirect_size = target->sg_tablesize *
3264 sizeof (struct srp_direct_buf);
49248644
DD
3265 target->max_iu_len = sizeof (struct srp_cmd) +
3266 sizeof (struct srp_indirect_buf) +
3267 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3268
c1120f89 3269 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
ef6c49d8 3270 INIT_WORK(&target->remove_work, srp_remove_work);
8f26c9ff 3271 spin_lock_init(&target->lock);
55ee3ab2 3272 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
2088ca66 3273 if (ret)
fb49c8bb 3274 goto out;
aef9ec39 3275
d92c0da7
BVA
3276 ret = -ENOMEM;
3277 target->ch_count = max_t(unsigned, num_online_nodes(),
3278 min(ch_count ? :
3279 min(4 * num_online_nodes(),
3280 ibdev->num_comp_vectors),
3281 num_online_cpus()));
3282 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3283 GFP_KERNEL);
3284 if (!target->ch)
fb49c8bb 3285 goto out;
aef9ec39 3286
d92c0da7
BVA
3287 node_idx = 0;
3288 for_each_online_node(node) {
3289 const int ch_start = (node_idx * target->ch_count /
3290 num_online_nodes());
3291 const int ch_end = ((node_idx + 1) * target->ch_count /
3292 num_online_nodes());
3293 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3294 num_online_nodes() + target->comp_vector)
3295 % ibdev->num_comp_vectors;
3296 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3297 num_online_nodes() + target->comp_vector)
3298 % ibdev->num_comp_vectors;
3299 int cpu_idx = 0;
3300
3301 for_each_online_cpu(cpu) {
3302 if (cpu_to_node(cpu) != node)
3303 continue;
3304 if (ch_start + cpu_idx >= ch_end)
3305 continue;
3306 ch = &target->ch[ch_start + cpu_idx];
3307 ch->target = target;
3308 ch->comp_vector = cv_start == cv_end ? cv_start :
3309 cv_start + cpu_idx % (cv_end - cv_start);
3310 spin_lock_init(&ch->lock);
3311 INIT_LIST_HEAD(&ch->free_tx);
3312 ret = srp_new_cm_id(ch);
3313 if (ret)
3314 goto err_disconnect;
aef9ec39 3315
d92c0da7
BVA
3316 ret = srp_create_ch_ib(ch);
3317 if (ret)
3318 goto err_disconnect;
3319
3320 ret = srp_alloc_req_data(ch);
3321 if (ret)
3322 goto err_disconnect;
3323
3324 ret = srp_connect_ch(ch, multich);
3325 if (ret) {
3326 shost_printk(KERN_ERR, target->scsi_host,
3327 PFX "Connection %d/%d failed\n",
3328 ch_start + cpu_idx,
3329 target->ch_count);
3330 if (node_idx == 0 && cpu_idx == 0) {
3331 goto err_disconnect;
3332 } else {
3333 srp_free_ch_ib(target, ch);
3334 srp_free_req_data(target, ch);
3335 target->ch_count = ch - target->ch;
c257ea6f 3336 goto connected;
d92c0da7
BVA
3337 }
3338 }
3339
3340 multich = true;
3341 cpu_idx++;
3342 }
3343 node_idx++;
aef9ec39
RD
3344 }
3345
c257ea6f 3346connected:
d92c0da7
BVA
3347 target->scsi_host->nr_hw_queues = target->ch_count;
3348
aef9ec39
RD
3349 ret = srp_add_target(host, target);
3350 if (ret)
3351 goto err_disconnect;
3352
34aa654e
BVA
3353 if (target->state != SRP_TARGET_REMOVED) {
3354 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3355 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3356 be64_to_cpu(target->id_ext),
3357 be64_to_cpu(target->ioc_guid),
747fe000 3358 be16_to_cpu(target->pkey),
34aa654e 3359 be64_to_cpu(target->service_id),
747fe000 3360 target->sgid.raw, target->orig_dgid.raw);
34aa654e 3361 }
e7ffde01 3362
2d7091bc
BVA
3363 ret = count;
3364
3365out:
3366 mutex_unlock(&host->add_target_mutex);
34aa654e
BVA
3367
3368 scsi_host_put(target->scsi_host);
bc44bd1d
BVA
3369 if (ret < 0)
3370 scsi_host_put(target->scsi_host);
34aa654e 3371
2d7091bc 3372 return ret;
aef9ec39
RD
3373
3374err_disconnect:
3375 srp_disconnect_target(target);
3376
d92c0da7
BVA
3377 for (i = 0; i < target->ch_count; i++) {
3378 ch = &target->ch[i];
3379 srp_free_ch_ib(target, ch);
3380 srp_free_req_data(target, ch);
3381 }
aef9ec39 3382
d92c0da7 3383 kfree(target->ch);
2d7091bc 3384 goto out;
aef9ec39
RD
3385}
3386
ee959b00 3387static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
aef9ec39 3388
ee959b00
TJ
3389static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3390 char *buf)
aef9ec39 3391{
ee959b00 3392 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39 3393
05321937 3394 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
aef9ec39
RD
3395}
3396
ee959b00 3397static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
aef9ec39 3398
ee959b00
TJ
3399static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3400 char *buf)
aef9ec39 3401{
ee959b00 3402 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39
RD
3403
3404 return sprintf(buf, "%d\n", host->port);
3405}
3406
ee959b00 3407static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
aef9ec39 3408
f5358a17 3409static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
aef9ec39
RD
3410{
3411 struct srp_host *host;
3412
3413 host = kzalloc(sizeof *host, GFP_KERNEL);
3414 if (!host)
3415 return NULL;
3416
3417 INIT_LIST_HEAD(&host->target_list);
b3589fd4 3418 spin_lock_init(&host->target_lock);
aef9ec39 3419 init_completion(&host->released);
2d7091bc 3420 mutex_init(&host->add_target_mutex);
05321937 3421 host->srp_dev = device;
aef9ec39
RD
3422 host->port = port;
3423
ee959b00
TJ
3424 host->dev.class = &srp_class;
3425 host->dev.parent = device->dev->dma_device;
d927e38c 3426 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
aef9ec39 3427
ee959b00 3428 if (device_register(&host->dev))
f5358a17 3429 goto free_host;
ee959b00 3430 if (device_create_file(&host->dev, &dev_attr_add_target))
aef9ec39 3431 goto err_class;
ee959b00 3432 if (device_create_file(&host->dev, &dev_attr_ibdev))
aef9ec39 3433 goto err_class;
ee959b00 3434 if (device_create_file(&host->dev, &dev_attr_port))
aef9ec39
RD
3435 goto err_class;
3436
3437 return host;
3438
3439err_class:
ee959b00 3440 device_unregister(&host->dev);
aef9ec39 3441
f5358a17 3442free_host:
aef9ec39
RD
3443 kfree(host);
3444
3445 return NULL;
3446}
3447
3448static void srp_add_one(struct ib_device *device)
3449{
f5358a17
RD
3450 struct srp_device *srp_dev;
3451 struct ib_device_attr *dev_attr;
aef9ec39 3452 struct srp_host *host;
4139032b 3453 int mr_page_shift, p;
52ede08f 3454 u64 max_pages_per_mr;
aef9ec39 3455
f5358a17
RD
3456 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3457 if (!dev_attr)
cf311cd4 3458 return;
aef9ec39 3459
f5358a17 3460 if (ib_query_device(device, dev_attr)) {
e0bda7d8 3461 pr_warn("Query device failed for %s\n", device->name);
f5358a17
RD
3462 goto free_attr;
3463 }
3464
3465 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3466 if (!srp_dev)
3467 goto free_attr;
3468
d1b4289e
BVA
3469 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3470 device->map_phys_fmr && device->unmap_fmr);
5cfb1782
BVA
3471 srp_dev->has_fr = (dev_attr->device_cap_flags &
3472 IB_DEVICE_MEM_MGT_EXTENSIONS);
3473 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3474 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3475
3476 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3477 (!srp_dev->has_fmr || prefer_fr));
002f1567 3478 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
d1b4289e 3479
f5358a17
RD
3480 /*
3481 * Use the smallest page size supported by the HCA, down to a
8f26c9ff
DD
3482 * minimum of 4096 bytes. We're unlikely to build large sglists
3483 * out of smaller entries.
f5358a17 3484 */
52ede08f
BVA
3485 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3486 srp_dev->mr_page_size = 1 << mr_page_shift;
3487 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3488 max_pages_per_mr = dev_attr->max_mr_size;
3489 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3490 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3491 max_pages_per_mr);
5cfb1782
BVA
3492 if (srp_dev->use_fast_reg) {
3493 srp_dev->max_pages_per_mr =
3494 min_t(u32, srp_dev->max_pages_per_mr,
3495 dev_attr->max_fast_reg_page_list_len);
3496 }
52ede08f
BVA
3497 srp_dev->mr_max_size = srp_dev->mr_page_size *
3498 srp_dev->max_pages_per_mr;
5cfb1782 3499 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
52ede08f 3500 device->name, mr_page_shift, dev_attr->max_mr_size,
5cfb1782 3501 dev_attr->max_fast_reg_page_list_len,
52ede08f 3502 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
f5358a17
RD
3503
3504 INIT_LIST_HEAD(&srp_dev->dev_list);
3505
3506 srp_dev->dev = device;
3507 srp_dev->pd = ib_alloc_pd(device);
3508 if (IS_ERR(srp_dev->pd))
3509 goto free_dev;
3510
03f6fb93
BVA
3511 if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3512 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3513 IB_ACCESS_LOCAL_WRITE |
3514 IB_ACCESS_REMOTE_READ |
3515 IB_ACCESS_REMOTE_WRITE);
3516 if (IS_ERR(srp_dev->global_mr))
3517 goto err_pd;
3518 } else {
3519 srp_dev->global_mr = NULL;
3520 }
f5358a17 3521
4139032b 3522 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
f5358a17 3523 host = srp_add_port(srp_dev, p);
aef9ec39 3524 if (host)
f5358a17 3525 list_add_tail(&host->list, &srp_dev->dev_list);
aef9ec39
RD
3526 }
3527
f5358a17
RD
3528 ib_set_client_data(device, &srp_client, srp_dev);
3529
3530 goto free_attr;
3531
3532err_pd:
3533 ib_dealloc_pd(srp_dev->pd);
3534
3535free_dev:
3536 kfree(srp_dev);
3537
3538free_attr:
3539 kfree(dev_attr);
aef9ec39
RD
3540}
3541
7c1eb45a 3542static void srp_remove_one(struct ib_device *device, void *client_data)
aef9ec39 3543{
f5358a17 3544 struct srp_device *srp_dev;
aef9ec39 3545 struct srp_host *host, *tmp_host;
ef6c49d8 3546 struct srp_target_port *target;
aef9ec39 3547
7c1eb45a 3548 srp_dev = client_data;
1fe0cb84
DB
3549 if (!srp_dev)
3550 return;
aef9ec39 3551
f5358a17 3552 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
ee959b00 3553 device_unregister(&host->dev);
aef9ec39
RD
3554 /*
3555 * Wait for the sysfs entry to go away, so that no new
3556 * target ports can be created.
3557 */
3558 wait_for_completion(&host->released);
3559
3560 /*
ef6c49d8 3561 * Remove all target ports.
aef9ec39 3562 */
b3589fd4 3563 spin_lock(&host->target_lock);
ef6c49d8
BVA
3564 list_for_each_entry(target, &host->target_list, list)
3565 srp_queue_remove_work(target);
b3589fd4 3566 spin_unlock(&host->target_lock);
aef9ec39
RD
3567
3568 /*
bcc05910 3569 * Wait for tl_err and target port removal tasks.
aef9ec39 3570 */
ef6c49d8 3571 flush_workqueue(system_long_wq);
bcc05910 3572 flush_workqueue(srp_remove_wq);
aef9ec39 3573
aef9ec39
RD
3574 kfree(host);
3575 }
3576
03f6fb93
BVA
3577 if (srp_dev->global_mr)
3578 ib_dereg_mr(srp_dev->global_mr);
f5358a17
RD
3579 ib_dealloc_pd(srp_dev->pd);
3580
3581 kfree(srp_dev);
aef9ec39
RD
3582}
3583
3236822b 3584static struct srp_function_template ib_srp_transport_functions = {
ed9b2264
BVA
3585 .has_rport_state = true,
3586 .reset_timer_if_blocked = true,
a95cadb9 3587 .reconnect_delay = &srp_reconnect_delay,
ed9b2264
BVA
3588 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3589 .dev_loss_tmo = &srp_dev_loss_tmo,
3590 .reconnect = srp_rport_reconnect,
dc1bdbd9 3591 .rport_delete = srp_rport_delete,
ed9b2264 3592 .terminate_rport_io = srp_terminate_io,
3236822b
FT
3593};
3594
aef9ec39
RD
3595static int __init srp_init_module(void)
3596{
3597 int ret;
3598
dcb4cb85 3599 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
dd5e6e38 3600
49248644 3601 if (srp_sg_tablesize) {
e0bda7d8 3602 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
49248644
DD
3603 if (!cmd_sg_entries)
3604 cmd_sg_entries = srp_sg_tablesize;
3605 }
3606
3607 if (!cmd_sg_entries)
3608 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3609
3610 if (cmd_sg_entries > 255) {
e0bda7d8 3611 pr_warn("Clamping cmd_sg_entries to 255\n");
49248644 3612 cmd_sg_entries = 255;
1e89a194
DD
3613 }
3614
c07d424d
DD
3615 if (!indirect_sg_entries)
3616 indirect_sg_entries = cmd_sg_entries;
3617 else if (indirect_sg_entries < cmd_sg_entries) {
e0bda7d8
BVA
3618 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3619 cmd_sg_entries);
c07d424d
DD
3620 indirect_sg_entries = cmd_sg_entries;
3621 }
3622
bcc05910 3623 srp_remove_wq = create_workqueue("srp_remove");
da05be29
WY
3624 if (!srp_remove_wq) {
3625 ret = -ENOMEM;
bcc05910
BVA
3626 goto out;
3627 }
3628
3629 ret = -ENOMEM;
3236822b
FT
3630 ib_srp_transport_template =
3631 srp_attach_transport(&ib_srp_transport_functions);
3632 if (!ib_srp_transport_template)
bcc05910 3633 goto destroy_wq;
3236822b 3634
aef9ec39
RD
3635 ret = class_register(&srp_class);
3636 if (ret) {
e0bda7d8 3637 pr_err("couldn't register class infiniband_srp\n");
bcc05910 3638 goto release_tr;
aef9ec39
RD
3639 }
3640
c1a0b23b
MT
3641 ib_sa_register_client(&srp_sa_client);
3642
aef9ec39
RD
3643 ret = ib_register_client(&srp_client);
3644 if (ret) {
e0bda7d8 3645 pr_err("couldn't register IB client\n");
bcc05910 3646 goto unreg_sa;
aef9ec39
RD
3647 }
3648
bcc05910
BVA
3649out:
3650 return ret;
3651
3652unreg_sa:
3653 ib_sa_unregister_client(&srp_sa_client);
3654 class_unregister(&srp_class);
3655
3656release_tr:
3657 srp_release_transport(ib_srp_transport_template);
3658
3659destroy_wq:
3660 destroy_workqueue(srp_remove_wq);
3661 goto out;
aef9ec39
RD
3662}
3663
3664static void __exit srp_cleanup_module(void)
3665{
3666 ib_unregister_client(&srp_client);
c1a0b23b 3667 ib_sa_unregister_client(&srp_sa_client);
aef9ec39 3668 class_unregister(&srp_class);
3236822b 3669 srp_release_transport(ib_srp_transport_template);
bcc05910 3670 destroy_workqueue(srp_remove_wq);
aef9ec39
RD
3671}
3672
3673module_init(srp_init_module);
3674module_exit(srp_cleanup_module);
This page took 1.549427 seconds and 5 git commands to generate.