IB/srp: Use block layer tags
[deliverable/linux.git] / drivers / infiniband / ulp / srp / ib_srp.c
CommitLineData
aef9ec39
RD
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
aef9ec39
RD
31 */
32
d236cd0e 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
e0bda7d8 34
aef9ec39
RD
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
de25968c 42#include <linux/jiffies.h>
aef9ec39 43
60063497 44#include <linux/atomic.h>
aef9ec39
RD
45
46#include <scsi/scsi.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_dbg.h>
71444b97 49#include <scsi/scsi_tcq.h>
aef9ec39 50#include <scsi/srp.h>
3236822b 51#include <scsi/scsi_transport_srp.h>
aef9ec39 52
aef9ec39
RD
53#include "ib_srp.h"
54
55#define DRV_NAME "ib_srp"
56#define PFX DRV_NAME ": "
e8ca4135
VP
57#define DRV_VERSION "1.0"
58#define DRV_RELDATE "July 1, 2013"
aef9ec39
RD
59
60MODULE_AUTHOR("Roland Dreier");
61MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62 "v" DRV_VERSION " (" DRV_RELDATE ")");
63MODULE_LICENSE("Dual BSD/GPL");
64
49248644
DD
65static unsigned int srp_sg_tablesize;
66static unsigned int cmd_sg_entries;
c07d424d
DD
67static unsigned int indirect_sg_entries;
68static bool allow_ext_sg;
5cfb1782 69static bool prefer_fr;
b1b8854d 70static bool register_always;
49248644 71static int topspin_workarounds = 1;
74b0a15b 72
49248644
DD
73module_param(srp_sg_tablesize, uint, 0444);
74MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
74b0a15b 75
49248644
DD
76module_param(cmd_sg_entries, uint, 0444);
77MODULE_PARM_DESC(cmd_sg_entries,
78 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
aef9ec39 79
c07d424d
DD
80module_param(indirect_sg_entries, uint, 0444);
81MODULE_PARM_DESC(indirect_sg_entries,
82 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
83
84module_param(allow_ext_sg, bool, 0444);
85MODULE_PARM_DESC(allow_ext_sg,
86 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
87
aef9ec39
RD
88module_param(topspin_workarounds, int, 0444);
89MODULE_PARM_DESC(topspin_workarounds,
90 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
91
5cfb1782
BVA
92module_param(prefer_fr, bool, 0444);
93MODULE_PARM_DESC(prefer_fr,
94"Whether to use fast registration if both FMR and fast registration are supported");
95
b1b8854d
BVA
96module_param(register_always, bool, 0444);
97MODULE_PARM_DESC(register_always,
98 "Use memory registration even for contiguous memory regions");
99
ed9b2264
BVA
100static struct kernel_param_ops srp_tmo_ops;
101
a95cadb9
BVA
102static int srp_reconnect_delay = 10;
103module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
104 S_IRUGO | S_IWUSR);
105MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
106
ed9b2264
BVA
107static int srp_fast_io_fail_tmo = 15;
108module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
109 S_IRUGO | S_IWUSR);
110MODULE_PARM_DESC(fast_io_fail_tmo,
111 "Number of seconds between the observation of a transport"
112 " layer error and failing all I/O. \"off\" means that this"
113 " functionality is disabled.");
114
a95cadb9 115static int srp_dev_loss_tmo = 600;
ed9b2264
BVA
116module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
117 S_IRUGO | S_IWUSR);
118MODULE_PARM_DESC(dev_loss_tmo,
119 "Maximum number of seconds that the SRP transport should"
120 " insulate transport layer errors. After this time has been"
121 " exceeded the SCSI host is removed. Should be"
122 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
123 " if fast_io_fail_tmo has not been set. \"off\" means that"
124 " this functionality is disabled.");
125
aef9ec39
RD
126static void srp_add_one(struct ib_device *device);
127static void srp_remove_one(struct ib_device *device);
509c07bc
BVA
128static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
129static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
aef9ec39
RD
130static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
131
3236822b 132static struct scsi_transport_template *ib_srp_transport_template;
bcc05910 133static struct workqueue_struct *srp_remove_wq;
3236822b 134
aef9ec39
RD
135static struct ib_client srp_client = {
136 .name = "srp",
137 .add = srp_add_one,
138 .remove = srp_remove_one
139};
140
c1a0b23b
MT
141static struct ib_sa_client srp_sa_client;
142
ed9b2264
BVA
143static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
144{
145 int tmo = *(int *)kp->arg;
146
147 if (tmo >= 0)
148 return sprintf(buffer, "%d", tmo);
149 else
150 return sprintf(buffer, "off");
151}
152
153static int srp_tmo_set(const char *val, const struct kernel_param *kp)
154{
155 int tmo, res;
156
157 if (strncmp(val, "off", 3) != 0) {
158 res = kstrtoint(val, 0, &tmo);
159 if (res)
160 goto out;
161 } else {
162 tmo = -1;
163 }
a95cadb9
BVA
164 if (kp->arg == &srp_reconnect_delay)
165 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
166 srp_dev_loss_tmo);
167 else if (kp->arg == &srp_fast_io_fail_tmo)
168 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
ed9b2264 169 else
a95cadb9
BVA
170 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
171 tmo);
ed9b2264
BVA
172 if (res)
173 goto out;
174 *(int *)kp->arg = tmo;
175
176out:
177 return res;
178}
179
180static struct kernel_param_ops srp_tmo_ops = {
181 .get = srp_tmo_get,
182 .set = srp_tmo_set,
183};
184
aef9ec39
RD
185static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
186{
187 return (struct srp_target_port *) host->hostdata;
188}
189
190static const char *srp_target_info(struct Scsi_Host *host)
191{
192 return host_to_target(host)->target_name;
193}
194
5d7cbfd6
RD
195static int srp_target_is_topspin(struct srp_target_port *target)
196{
197 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
3d1ff48d 198 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
5d7cbfd6
RD
199
200 return topspin_workarounds &&
3d1ff48d
RK
201 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
202 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
5d7cbfd6
RD
203}
204
aef9ec39
RD
205static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
206 gfp_t gfp_mask,
207 enum dma_data_direction direction)
208{
209 struct srp_iu *iu;
210
211 iu = kmalloc(sizeof *iu, gfp_mask);
212 if (!iu)
213 goto out;
214
215 iu->buf = kzalloc(size, gfp_mask);
216 if (!iu->buf)
217 goto out_free_iu;
218
05321937
GKH
219 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
220 direction);
221 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
aef9ec39
RD
222 goto out_free_buf;
223
224 iu->size = size;
225 iu->direction = direction;
226
227 return iu;
228
229out_free_buf:
230 kfree(iu->buf);
231out_free_iu:
232 kfree(iu);
233out:
234 return NULL;
235}
236
237static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
238{
239 if (!iu)
240 return;
241
05321937
GKH
242 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
243 iu->direction);
aef9ec39
RD
244 kfree(iu->buf);
245 kfree(iu);
246}
247
248static void srp_qp_event(struct ib_event *event, void *context)
249{
e0bda7d8 250 pr_debug("QP event %d\n", event->event);
aef9ec39
RD
251}
252
253static int srp_init_qp(struct srp_target_port *target,
254 struct ib_qp *qp)
255{
256 struct ib_qp_attr *attr;
257 int ret;
258
259 attr = kmalloc(sizeof *attr, GFP_KERNEL);
260 if (!attr)
261 return -ENOMEM;
262
969a60f9
RD
263 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
264 target->srp_host->port,
747fe000 265 be16_to_cpu(target->pkey),
969a60f9 266 &attr->pkey_index);
aef9ec39
RD
267 if (ret)
268 goto out;
269
270 attr->qp_state = IB_QPS_INIT;
271 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
272 IB_ACCESS_REMOTE_WRITE);
273 attr->port_num = target->srp_host->port;
274
275 ret = ib_modify_qp(qp, attr,
276 IB_QP_STATE |
277 IB_QP_PKEY_INDEX |
278 IB_QP_ACCESS_FLAGS |
279 IB_QP_PORT);
280
281out:
282 kfree(attr);
283 return ret;
284}
285
509c07bc 286static int srp_new_cm_id(struct srp_rdma_ch *ch)
9fe4bcf4 287{
509c07bc 288 struct srp_target_port *target = ch->target;
9fe4bcf4
DD
289 struct ib_cm_id *new_cm_id;
290
05321937 291 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
509c07bc 292 srp_cm_handler, ch);
9fe4bcf4
DD
293 if (IS_ERR(new_cm_id))
294 return PTR_ERR(new_cm_id);
295
509c07bc
BVA
296 if (ch->cm_id)
297 ib_destroy_cm_id(ch->cm_id);
298 ch->cm_id = new_cm_id;
299 ch->path.sgid = target->sgid;
300 ch->path.dgid = target->orig_dgid;
301 ch->path.pkey = target->pkey;
302 ch->path.service_id = target->service_id;
9fe4bcf4
DD
303
304 return 0;
305}
306
d1b4289e
BVA
307static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
308{
309 struct srp_device *dev = target->srp_host->srp_dev;
310 struct ib_fmr_pool_param fmr_param;
311
312 memset(&fmr_param, 0, sizeof(fmr_param));
313 fmr_param.pool_size = target->scsi_host->can_queue;
314 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
315 fmr_param.cache = 1;
52ede08f
BVA
316 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
317 fmr_param.page_shift = ilog2(dev->mr_page_size);
d1b4289e
BVA
318 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
319 IB_ACCESS_REMOTE_WRITE |
320 IB_ACCESS_REMOTE_READ);
321
322 return ib_create_fmr_pool(dev->pd, &fmr_param);
323}
324
5cfb1782
BVA
325/**
326 * srp_destroy_fr_pool() - free the resources owned by a pool
327 * @pool: Fast registration pool to be destroyed.
328 */
329static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
330{
331 int i;
332 struct srp_fr_desc *d;
333
334 if (!pool)
335 return;
336
337 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
338 if (d->frpl)
339 ib_free_fast_reg_page_list(d->frpl);
340 if (d->mr)
341 ib_dereg_mr(d->mr);
342 }
343 kfree(pool);
344}
345
346/**
347 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
348 * @device: IB device to allocate fast registration descriptors for.
349 * @pd: Protection domain associated with the FR descriptors.
350 * @pool_size: Number of descriptors to allocate.
351 * @max_page_list_len: Maximum fast registration work request page list length.
352 */
353static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
354 struct ib_pd *pd, int pool_size,
355 int max_page_list_len)
356{
357 struct srp_fr_pool *pool;
358 struct srp_fr_desc *d;
359 struct ib_mr *mr;
360 struct ib_fast_reg_page_list *frpl;
361 int i, ret = -EINVAL;
362
363 if (pool_size <= 0)
364 goto err;
365 ret = -ENOMEM;
366 pool = kzalloc(sizeof(struct srp_fr_pool) +
367 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
368 if (!pool)
369 goto err;
370 pool->size = pool_size;
371 pool->max_page_list_len = max_page_list_len;
372 spin_lock_init(&pool->lock);
373 INIT_LIST_HEAD(&pool->free_list);
374
375 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
376 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
377 if (IS_ERR(mr)) {
378 ret = PTR_ERR(mr);
379 goto destroy_pool;
380 }
381 d->mr = mr;
382 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
383 if (IS_ERR(frpl)) {
384 ret = PTR_ERR(frpl);
385 goto destroy_pool;
386 }
387 d->frpl = frpl;
388 list_add_tail(&d->entry, &pool->free_list);
389 }
390
391out:
392 return pool;
393
394destroy_pool:
395 srp_destroy_fr_pool(pool);
396
397err:
398 pool = ERR_PTR(ret);
399 goto out;
400}
401
402/**
403 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
404 * @pool: Pool to obtain descriptor from.
405 */
406static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
407{
408 struct srp_fr_desc *d = NULL;
409 unsigned long flags;
410
411 spin_lock_irqsave(&pool->lock, flags);
412 if (!list_empty(&pool->free_list)) {
413 d = list_first_entry(&pool->free_list, typeof(*d), entry);
414 list_del(&d->entry);
415 }
416 spin_unlock_irqrestore(&pool->lock, flags);
417
418 return d;
419}
420
421/**
422 * srp_fr_pool_put() - put an FR descriptor back in the free list
423 * @pool: Pool the descriptor was allocated from.
424 * @desc: Pointer to an array of fast registration descriptor pointers.
425 * @n: Number of descriptors to put back.
426 *
427 * Note: The caller must already have queued an invalidation request for
428 * desc->mr->rkey before calling this function.
429 */
430static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
431 int n)
432{
433 unsigned long flags;
434 int i;
435
436 spin_lock_irqsave(&pool->lock, flags);
437 for (i = 0; i < n; i++)
438 list_add(&desc[i]->entry, &pool->free_list);
439 spin_unlock_irqrestore(&pool->lock, flags);
440}
441
442static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
443{
444 struct srp_device *dev = target->srp_host->srp_dev;
445
446 return srp_create_fr_pool(dev->dev, dev->pd,
447 target->scsi_host->can_queue,
448 dev->max_pages_per_mr);
449}
450
509c07bc 451static int srp_create_ch_ib(struct srp_rdma_ch *ch)
aef9ec39 452{
509c07bc 453 struct srp_target_port *target = ch->target;
62154b2e 454 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39 455 struct ib_qp_init_attr *init_attr;
73aa89ed
IR
456 struct ib_cq *recv_cq, *send_cq;
457 struct ib_qp *qp;
d1b4289e 458 struct ib_fmr_pool *fmr_pool = NULL;
5cfb1782
BVA
459 struct srp_fr_pool *fr_pool = NULL;
460 const int m = 1 + dev->use_fast_reg;
aef9ec39
RD
461 int ret;
462
463 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
464 if (!init_attr)
465 return -ENOMEM;
466
509c07bc
BVA
467 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
468 target->queue_size, ch->comp_vector);
73aa89ed
IR
469 if (IS_ERR(recv_cq)) {
470 ret = PTR_ERR(recv_cq);
da9d2f07 471 goto err;
aef9ec39
RD
472 }
473
509c07bc
BVA
474 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
475 m * target->queue_size, ch->comp_vector);
73aa89ed
IR
476 if (IS_ERR(send_cq)) {
477 ret = PTR_ERR(send_cq);
da9d2f07 478 goto err_recv_cq;
9c03dc9f
BVA
479 }
480
73aa89ed 481 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
aef9ec39
RD
482
483 init_attr->event_handler = srp_qp_event;
5cfb1782 484 init_attr->cap.max_send_wr = m * target->queue_size;
4d73f95f 485 init_attr->cap.max_recv_wr = target->queue_size;
aef9ec39
RD
486 init_attr->cap.max_recv_sge = 1;
487 init_attr->cap.max_send_sge = 1;
5cfb1782 488 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
aef9ec39 489 init_attr->qp_type = IB_QPT_RC;
73aa89ed
IR
490 init_attr->send_cq = send_cq;
491 init_attr->recv_cq = recv_cq;
aef9ec39 492
62154b2e 493 qp = ib_create_qp(dev->pd, init_attr);
73aa89ed
IR
494 if (IS_ERR(qp)) {
495 ret = PTR_ERR(qp);
da9d2f07 496 goto err_send_cq;
aef9ec39
RD
497 }
498
73aa89ed 499 ret = srp_init_qp(target, qp);
da9d2f07
RD
500 if (ret)
501 goto err_qp;
aef9ec39 502
5cfb1782
BVA
503 if (dev->use_fast_reg && dev->has_fr) {
504 fr_pool = srp_alloc_fr_pool(target);
505 if (IS_ERR(fr_pool)) {
506 ret = PTR_ERR(fr_pool);
507 shost_printk(KERN_WARNING, target->scsi_host, PFX
508 "FR pool allocation failed (%d)\n", ret);
509 goto err_qp;
510 }
509c07bc
BVA
511 if (ch->fr_pool)
512 srp_destroy_fr_pool(ch->fr_pool);
513 ch->fr_pool = fr_pool;
5cfb1782 514 } else if (!dev->use_fast_reg && dev->has_fmr) {
d1b4289e
BVA
515 fmr_pool = srp_alloc_fmr_pool(target);
516 if (IS_ERR(fmr_pool)) {
517 ret = PTR_ERR(fmr_pool);
518 shost_printk(KERN_WARNING, target->scsi_host, PFX
519 "FMR pool allocation failed (%d)\n", ret);
520 goto err_qp;
521 }
509c07bc
BVA
522 if (ch->fmr_pool)
523 ib_destroy_fmr_pool(ch->fmr_pool);
524 ch->fmr_pool = fmr_pool;
d1b4289e
BVA
525 }
526
509c07bc
BVA
527 if (ch->qp)
528 ib_destroy_qp(ch->qp);
529 if (ch->recv_cq)
530 ib_destroy_cq(ch->recv_cq);
531 if (ch->send_cq)
532 ib_destroy_cq(ch->send_cq);
73aa89ed 533
509c07bc
BVA
534 ch->qp = qp;
535 ch->recv_cq = recv_cq;
536 ch->send_cq = send_cq;
73aa89ed 537
da9d2f07
RD
538 kfree(init_attr);
539 return 0;
540
541err_qp:
73aa89ed 542 ib_destroy_qp(qp);
da9d2f07
RD
543
544err_send_cq:
73aa89ed 545 ib_destroy_cq(send_cq);
da9d2f07
RD
546
547err_recv_cq:
73aa89ed 548 ib_destroy_cq(recv_cq);
da9d2f07
RD
549
550err:
aef9ec39
RD
551 kfree(init_attr);
552 return ret;
553}
554
4d73f95f
BVA
555/*
556 * Note: this function may be called without srp_alloc_iu_bufs() having been
509c07bc 557 * invoked. Hence the ch->[rt]x_ring checks.
4d73f95f 558 */
509c07bc
BVA
559static void srp_free_ch_ib(struct srp_target_port *target,
560 struct srp_rdma_ch *ch)
aef9ec39 561{
5cfb1782 562 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39
RD
563 int i;
564
509c07bc
BVA
565 if (ch->cm_id) {
566 ib_destroy_cm_id(ch->cm_id);
567 ch->cm_id = NULL;
394c595e
BVA
568 }
569
5cfb1782 570 if (dev->use_fast_reg) {
509c07bc
BVA
571 if (ch->fr_pool)
572 srp_destroy_fr_pool(ch->fr_pool);
5cfb1782 573 } else {
509c07bc
BVA
574 if (ch->fmr_pool)
575 ib_destroy_fmr_pool(ch->fmr_pool);
5cfb1782 576 }
509c07bc
BVA
577 ib_destroy_qp(ch->qp);
578 ib_destroy_cq(ch->send_cq);
579 ib_destroy_cq(ch->recv_cq);
aef9ec39 580
509c07bc
BVA
581 ch->qp = NULL;
582 ch->send_cq = ch->recv_cq = NULL;
73aa89ed 583
509c07bc 584 if (ch->rx_ring) {
4d73f95f 585 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
586 srp_free_iu(target->srp_host, ch->rx_ring[i]);
587 kfree(ch->rx_ring);
588 ch->rx_ring = NULL;
4d73f95f 589 }
509c07bc 590 if (ch->tx_ring) {
4d73f95f 591 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
592 srp_free_iu(target->srp_host, ch->tx_ring[i]);
593 kfree(ch->tx_ring);
594 ch->tx_ring = NULL;
4d73f95f 595 }
aef9ec39
RD
596}
597
598static void srp_path_rec_completion(int status,
599 struct ib_sa_path_rec *pathrec,
509c07bc 600 void *ch_ptr)
aef9ec39 601{
509c07bc
BVA
602 struct srp_rdma_ch *ch = ch_ptr;
603 struct srp_target_port *target = ch->target;
aef9ec39 604
509c07bc 605 ch->status = status;
aef9ec39 606 if (status)
7aa54bd7
DD
607 shost_printk(KERN_ERR, target->scsi_host,
608 PFX "Got failed path rec status %d\n", status);
aef9ec39 609 else
509c07bc
BVA
610 ch->path = *pathrec;
611 complete(&ch->done);
aef9ec39
RD
612}
613
509c07bc 614static int srp_lookup_path(struct srp_rdma_ch *ch)
aef9ec39 615{
509c07bc 616 struct srp_target_port *target = ch->target;
a702adce
BVA
617 int ret;
618
509c07bc
BVA
619 ch->path.numb_path = 1;
620
621 init_completion(&ch->done);
622
623 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
624 target->srp_host->srp_dev->dev,
625 target->srp_host->port,
626 &ch->path,
627 IB_SA_PATH_REC_SERVICE_ID |
628 IB_SA_PATH_REC_DGID |
629 IB_SA_PATH_REC_SGID |
630 IB_SA_PATH_REC_NUMB_PATH |
631 IB_SA_PATH_REC_PKEY,
632 SRP_PATH_REC_TIMEOUT_MS,
633 GFP_KERNEL,
634 srp_path_rec_completion,
635 ch, &ch->path_query);
636 if (ch->path_query_id < 0)
637 return ch->path_query_id;
638
639 ret = wait_for_completion_interruptible(&ch->done);
a702adce
BVA
640 if (ret < 0)
641 return ret;
aef9ec39 642
509c07bc 643 if (ch->status < 0)
7aa54bd7
DD
644 shost_printk(KERN_WARNING, target->scsi_host,
645 PFX "Path record query failed\n");
aef9ec39 646
509c07bc 647 return ch->status;
aef9ec39
RD
648}
649
509c07bc 650static int srp_send_req(struct srp_rdma_ch *ch)
aef9ec39 651{
509c07bc 652 struct srp_target_port *target = ch->target;
aef9ec39
RD
653 struct {
654 struct ib_cm_req_param param;
655 struct srp_login_req priv;
656 } *req = NULL;
657 int status;
658
659 req = kzalloc(sizeof *req, GFP_KERNEL);
660 if (!req)
661 return -ENOMEM;
662
509c07bc 663 req->param.primary_path = &ch->path;
aef9ec39
RD
664 req->param.alternate_path = NULL;
665 req->param.service_id = target->service_id;
509c07bc
BVA
666 req->param.qp_num = ch->qp->qp_num;
667 req->param.qp_type = ch->qp->qp_type;
aef9ec39
RD
668 req->param.private_data = &req->priv;
669 req->param.private_data_len = sizeof req->priv;
670 req->param.flow_control = 1;
671
672 get_random_bytes(&req->param.starting_psn, 4);
673 req->param.starting_psn &= 0xffffff;
674
675 /*
676 * Pick some arbitrary defaults here; we could make these
677 * module parameters if anyone cared about setting them.
678 */
679 req->param.responder_resources = 4;
680 req->param.remote_cm_response_timeout = 20;
681 req->param.local_cm_response_timeout = 20;
7bb312e4 682 req->param.retry_count = target->tl_retry_count;
aef9ec39
RD
683 req->param.rnr_retry_count = 7;
684 req->param.max_cm_retries = 15;
685
686 req->priv.opcode = SRP_LOGIN_REQ;
687 req->priv.tag = 0;
49248644 688 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
aef9ec39
RD
689 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
690 SRP_BUF_FORMAT_INDIRECT);
0c0450db 691 /*
3cd96564 692 * In the published SRP specification (draft rev. 16a), the
0c0450db
R
693 * port identifier format is 8 bytes of ID extension followed
694 * by 8 bytes of GUID. Older drafts put the two halves in the
695 * opposite order, so that the GUID comes first.
696 *
697 * Targets conforming to these obsolete drafts can be
698 * recognized by the I/O Class they report.
699 */
700 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
701 memcpy(req->priv.initiator_port_id,
747fe000 702 &target->sgid.global.interface_id, 8);
0c0450db 703 memcpy(req->priv.initiator_port_id + 8,
01cb9bcb 704 &target->initiator_ext, 8);
0c0450db
R
705 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
706 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
707 } else {
708 memcpy(req->priv.initiator_port_id,
01cb9bcb
IR
709 &target->initiator_ext, 8);
710 memcpy(req->priv.initiator_port_id + 8,
747fe000 711 &target->sgid.global.interface_id, 8);
0c0450db
R
712 memcpy(req->priv.target_port_id, &target->id_ext, 8);
713 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
714 }
715
aef9ec39
RD
716 /*
717 * Topspin/Cisco SRP targets will reject our login unless we
01cb9bcb
IR
718 * zero out the first 8 bytes of our initiator port ID and set
719 * the second 8 bytes to the local node GUID.
aef9ec39 720 */
5d7cbfd6 721 if (srp_target_is_topspin(target)) {
7aa54bd7
DD
722 shost_printk(KERN_DEBUG, target->scsi_host,
723 PFX "Topspin/Cisco initiator port ID workaround "
724 "activated for target GUID %016llx\n",
725 (unsigned long long) be64_to_cpu(target->ioc_guid));
aef9ec39 726 memset(req->priv.initiator_port_id, 0, 8);
01cb9bcb 727 memcpy(req->priv.initiator_port_id + 8,
05321937 728 &target->srp_host->srp_dev->dev->node_guid, 8);
aef9ec39 729 }
aef9ec39 730
509c07bc 731 status = ib_send_cm_req(ch->cm_id, &req->param);
aef9ec39
RD
732
733 kfree(req);
734
735 return status;
736}
737
ef6c49d8
BVA
738static bool srp_queue_remove_work(struct srp_target_port *target)
739{
740 bool changed = false;
741
742 spin_lock_irq(&target->lock);
743 if (target->state != SRP_TARGET_REMOVED) {
744 target->state = SRP_TARGET_REMOVED;
745 changed = true;
746 }
747 spin_unlock_irq(&target->lock);
748
749 if (changed)
bcc05910 750 queue_work(srp_remove_wq, &target->remove_work);
ef6c49d8
BVA
751
752 return changed;
753}
754
294c875a
BVA
755static bool srp_change_conn_state(struct srp_target_port *target,
756 bool connected)
757{
758 bool changed = false;
759
760 spin_lock_irq(&target->lock);
761 if (target->connected != connected) {
762 target->connected = connected;
763 changed = true;
764 }
765 spin_unlock_irq(&target->lock);
766
767 return changed;
768}
769
aef9ec39
RD
770static void srp_disconnect_target(struct srp_target_port *target)
771{
509c07bc
BVA
772 struct srp_rdma_ch *ch = &target->ch;
773
294c875a
BVA
774 if (srp_change_conn_state(target, false)) {
775 /* XXX should send SRP_I_LOGOUT request */
aef9ec39 776
509c07bc 777 if (ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
294c875a
BVA
778 shost_printk(KERN_DEBUG, target->scsi_host,
779 PFX "Sending CM DREQ failed\n");
294c875a 780 }
e6581056 781 }
aef9ec39
RD
782}
783
509c07bc
BVA
784static void srp_free_req_data(struct srp_target_port *target,
785 struct srp_rdma_ch *ch)
8f26c9ff 786{
5cfb1782
BVA
787 struct srp_device *dev = target->srp_host->srp_dev;
788 struct ib_device *ibdev = dev->dev;
8f26c9ff
DD
789 struct srp_request *req;
790 int i;
791
509c07bc 792 if (!ch->req_ring)
4d73f95f
BVA
793 return;
794
795 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 796 req = &ch->req_ring[i];
5cfb1782
BVA
797 if (dev->use_fast_reg)
798 kfree(req->fr_list);
799 else
800 kfree(req->fmr_list);
8f26c9ff 801 kfree(req->map_page);
c07d424d
DD
802 if (req->indirect_dma_addr) {
803 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
804 target->indirect_size,
805 DMA_TO_DEVICE);
806 }
807 kfree(req->indirect_desc);
8f26c9ff 808 }
4d73f95f 809
509c07bc
BVA
810 kfree(ch->req_ring);
811 ch->req_ring = NULL;
8f26c9ff
DD
812}
813
509c07bc 814static int srp_alloc_req_data(struct srp_rdma_ch *ch)
b81d00bd 815{
509c07bc 816 struct srp_target_port *target = ch->target;
b81d00bd
BVA
817 struct srp_device *srp_dev = target->srp_host->srp_dev;
818 struct ib_device *ibdev = srp_dev->dev;
819 struct srp_request *req;
5cfb1782 820 void *mr_list;
b81d00bd
BVA
821 dma_addr_t dma_addr;
822 int i, ret = -ENOMEM;
823
509c07bc
BVA
824 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
825 GFP_KERNEL);
826 if (!ch->req_ring)
4d73f95f
BVA
827 goto out;
828
829 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 830 req = &ch->req_ring[i];
5cfb1782
BVA
831 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
832 GFP_KERNEL);
833 if (!mr_list)
834 goto out;
835 if (srp_dev->use_fast_reg)
836 req->fr_list = mr_list;
837 else
838 req->fmr_list = mr_list;
52ede08f 839 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
d1b4289e 840 sizeof(void *), GFP_KERNEL);
5cfb1782
BVA
841 if (!req->map_page)
842 goto out;
b81d00bd 843 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
5cfb1782 844 if (!req->indirect_desc)
b81d00bd
BVA
845 goto out;
846
847 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
848 target->indirect_size,
849 DMA_TO_DEVICE);
850 if (ib_dma_mapping_error(ibdev, dma_addr))
851 goto out;
852
853 req->indirect_dma_addr = dma_addr;
b81d00bd
BVA
854 }
855 ret = 0;
856
857out:
858 return ret;
859}
860
683b159a
BVA
861/**
862 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
863 * @shost: SCSI host whose attributes to remove from sysfs.
864 *
865 * Note: Any attributes defined in the host template and that did not exist
866 * before invocation of this function will be ignored.
867 */
868static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
869{
870 struct device_attribute **attr;
871
872 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
873 device_remove_file(&shost->shost_dev, *attr);
874}
875
ee12d6a8
BVA
876static void srp_remove_target(struct srp_target_port *target)
877{
509c07bc
BVA
878 struct srp_rdma_ch *ch = &target->ch;
879
ef6c49d8
BVA
880 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
881
ee12d6a8 882 srp_del_scsi_host_attr(target->scsi_host);
9dd69a60 883 srp_rport_get(target->rport);
ee12d6a8
BVA
884 srp_remove_host(target->scsi_host);
885 scsi_remove_host(target->scsi_host);
93079162 886 srp_stop_rport_timers(target->rport);
ef6c49d8 887 srp_disconnect_target(target);
509c07bc 888 srp_free_ch_ib(target, ch);
c1120f89 889 cancel_work_sync(&target->tl_err_work);
9dd69a60 890 srp_rport_put(target->rport);
509c07bc 891 srp_free_req_data(target, ch);
65d7dd2f
VP
892
893 spin_lock(&target->srp_host->target_lock);
894 list_del(&target->list);
895 spin_unlock(&target->srp_host->target_lock);
896
ee12d6a8
BVA
897 scsi_host_put(target->scsi_host);
898}
899
c4028958 900static void srp_remove_work(struct work_struct *work)
aef9ec39 901{
c4028958 902 struct srp_target_port *target =
ef6c49d8 903 container_of(work, struct srp_target_port, remove_work);
aef9ec39 904
ef6c49d8 905 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
aef9ec39 906
96fc248a 907 srp_remove_target(target);
aef9ec39
RD
908}
909
dc1bdbd9
BVA
910static void srp_rport_delete(struct srp_rport *rport)
911{
912 struct srp_target_port *target = rport->lld_data;
913
914 srp_queue_remove_work(target);
915}
916
509c07bc 917static int srp_connect_ch(struct srp_rdma_ch *ch)
aef9ec39 918{
509c07bc 919 struct srp_target_port *target = ch->target;
aef9ec39
RD
920 int ret;
921
294c875a
BVA
922 WARN_ON_ONCE(target->connected);
923
948d1e88
BVA
924 target->qp_in_error = false;
925
509c07bc 926 ret = srp_lookup_path(ch);
aef9ec39
RD
927 if (ret)
928 return ret;
929
930 while (1) {
509c07bc
BVA
931 init_completion(&ch->done);
932 ret = srp_send_req(ch);
aef9ec39
RD
933 if (ret)
934 return ret;
509c07bc 935 ret = wait_for_completion_interruptible(&ch->done);
a702adce
BVA
936 if (ret < 0)
937 return ret;
aef9ec39
RD
938
939 /*
940 * The CM event handling code will set status to
941 * SRP_PORT_REDIRECT if we get a port redirect REJ
942 * back, or SRP_DLID_REDIRECT if we get a lid/qp
943 * redirect REJ back.
944 */
509c07bc 945 switch (ch->status) {
aef9ec39 946 case 0:
294c875a 947 srp_change_conn_state(target, true);
aef9ec39
RD
948 return 0;
949
950 case SRP_PORT_REDIRECT:
509c07bc 951 ret = srp_lookup_path(ch);
aef9ec39
RD
952 if (ret)
953 return ret;
954 break;
955
956 case SRP_DLID_REDIRECT:
957 break;
958
9fe4bcf4 959 case SRP_STALE_CONN:
9fe4bcf4 960 shost_printk(KERN_ERR, target->scsi_host, PFX
205619f2 961 "giving up on stale connection\n");
509c07bc
BVA
962 ch->status = -ECONNRESET;
963 return ch->status;
9fe4bcf4 964
aef9ec39 965 default:
509c07bc 966 return ch->status;
aef9ec39
RD
967 }
968 }
969}
970
509c07bc 971static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
5cfb1782
BVA
972{
973 struct ib_send_wr *bad_wr;
974 struct ib_send_wr wr = {
975 .opcode = IB_WR_LOCAL_INV,
976 .wr_id = LOCAL_INV_WR_ID_MASK,
977 .next = NULL,
978 .num_sge = 0,
979 .send_flags = 0,
980 .ex.invalidate_rkey = rkey,
981 };
982
509c07bc 983 return ib_post_send(ch->qp, &wr, &bad_wr);
5cfb1782
BVA
984}
985
d945e1df 986static void srp_unmap_data(struct scsi_cmnd *scmnd,
509c07bc 987 struct srp_rdma_ch *ch,
d945e1df
RD
988 struct srp_request *req)
989{
509c07bc 990 struct srp_target_port *target = ch->target;
5cfb1782
BVA
991 struct srp_device *dev = target->srp_host->srp_dev;
992 struct ib_device *ibdev = dev->dev;
993 int i, res;
8f26c9ff 994
bb350d1d 995 if (!scsi_sglist(scmnd) ||
d945e1df
RD
996 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
997 scmnd->sc_data_direction != DMA_FROM_DEVICE))
998 return;
999
5cfb1782
BVA
1000 if (dev->use_fast_reg) {
1001 struct srp_fr_desc **pfr;
1002
1003 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
509c07bc 1004 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
5cfb1782
BVA
1005 if (res < 0) {
1006 shost_printk(KERN_ERR, target->scsi_host, PFX
1007 "Queueing INV WR for rkey %#x failed (%d)\n",
1008 (*pfr)->mr->rkey, res);
1009 queue_work(system_long_wq,
1010 &target->tl_err_work);
1011 }
1012 }
1013 if (req->nmdesc)
509c07bc 1014 srp_fr_pool_put(ch->fr_pool, req->fr_list,
5cfb1782
BVA
1015 req->nmdesc);
1016 } else {
1017 struct ib_pool_fmr **pfmr;
1018
1019 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1020 ib_fmr_pool_unmap(*pfmr);
1021 }
f5358a17 1022
8f26c9ff
DD
1023 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1024 scmnd->sc_data_direction);
d945e1df
RD
1025}
1026
22032991
BVA
1027/**
1028 * srp_claim_req - Take ownership of the scmnd associated with a request.
509c07bc 1029 * @ch: SRP RDMA channel.
22032991 1030 * @req: SRP request.
b3fe628d 1031 * @sdev: If not NULL, only take ownership for this SCSI device.
22032991
BVA
1032 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1033 * ownership of @req->scmnd if it equals @scmnd.
1034 *
1035 * Return value:
1036 * Either NULL or a pointer to the SCSI command the caller became owner of.
1037 */
509c07bc 1038static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
22032991 1039 struct srp_request *req,
b3fe628d 1040 struct scsi_device *sdev,
22032991
BVA
1041 struct scsi_cmnd *scmnd)
1042{
1043 unsigned long flags;
1044
509c07bc 1045 spin_lock_irqsave(&ch->lock, flags);
b3fe628d
BVA
1046 if (req->scmnd &&
1047 (!sdev || req->scmnd->device == sdev) &&
1048 (!scmnd || req->scmnd == scmnd)) {
22032991
BVA
1049 scmnd = req->scmnd;
1050 req->scmnd = NULL;
22032991
BVA
1051 } else {
1052 scmnd = NULL;
1053 }
509c07bc 1054 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1055
1056 return scmnd;
1057}
1058
1059/**
1060 * srp_free_req() - Unmap data and add request to the free request list.
509c07bc 1061 * @ch: SRP RDMA channel.
af24663b
BVA
1062 * @req: Request to be freed.
1063 * @scmnd: SCSI command associated with @req.
1064 * @req_lim_delta: Amount to be added to @target->req_lim.
22032991 1065 */
509c07bc
BVA
1066static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1067 struct scsi_cmnd *scmnd, s32 req_lim_delta)
526b4caa 1068{
94a9174c
BVA
1069 unsigned long flags;
1070
509c07bc 1071 srp_unmap_data(scmnd, ch, req);
22032991 1072
509c07bc
BVA
1073 spin_lock_irqsave(&ch->lock, flags);
1074 ch->req_lim += req_lim_delta;
509c07bc 1075 spin_unlock_irqrestore(&ch->lock, flags);
526b4caa
IR
1076}
1077
509c07bc
BVA
1078static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1079 struct scsi_device *sdev, int result)
526b4caa 1080{
509c07bc 1081 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
22032991
BVA
1082
1083 if (scmnd) {
509c07bc 1084 srp_free_req(ch, req, scmnd, 0);
ed9b2264 1085 scmnd->result = result;
22032991 1086 scmnd->scsi_done(scmnd);
22032991 1087 }
526b4caa
IR
1088}
1089
ed9b2264 1090static void srp_terminate_io(struct srp_rport *rport)
aef9ec39 1091{
ed9b2264 1092 struct srp_target_port *target = rport->lld_data;
509c07bc 1093 struct srp_rdma_ch *ch = &target->ch;
b3fe628d
BVA
1094 struct Scsi_Host *shost = target->scsi_host;
1095 struct scsi_device *sdev;
ed9b2264
BVA
1096 int i;
1097
b3fe628d
BVA
1098 /*
1099 * Invoking srp_terminate_io() while srp_queuecommand() is running
1100 * is not safe. Hence the warning statement below.
1101 */
1102 shost_for_each_device(sdev, shost)
1103 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1104
4d73f95f 1105 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc
BVA
1106 struct srp_request *req = &ch->req_ring[i];
1107
1108 srp_finish_req(ch, req, NULL, DID_TRANSPORT_FAILFAST << 16);
ed9b2264
BVA
1109 }
1110}
aef9ec39 1111
ed9b2264
BVA
1112/*
1113 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1114 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1115 * srp_reset_device() or srp_reset_host() calls will occur while this function
1116 * is in progress. One way to realize that is not to call this function
1117 * directly but to call srp_reconnect_rport() instead since that last function
1118 * serializes calls of this function via rport->mutex and also blocks
1119 * srp_queuecommand() calls before invoking this function.
1120 */
1121static int srp_rport_reconnect(struct srp_rport *rport)
1122{
1123 struct srp_target_port *target = rport->lld_data;
509c07bc 1124 struct srp_rdma_ch *ch = &target->ch;
ed9b2264 1125 int i, ret;
09be70a2 1126
aef9ec39 1127 srp_disconnect_target(target);
34aa654e
BVA
1128
1129 if (target->state == SRP_TARGET_SCANNING)
1130 return -ENODEV;
1131
aef9ec39 1132 /*
c7c4e7ff
BVA
1133 * Now get a new local CM ID so that we avoid confusing the target in
1134 * case things are really fouled up. Doing so also ensures that all CM
1135 * callbacks will have finished before a new QP is allocated.
aef9ec39 1136 */
509c07bc 1137 ret = srp_new_cm_id(ch);
aef9ec39 1138
4d73f95f 1139 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc
BVA
1140 struct srp_request *req = &ch->req_ring[i];
1141
1142 srp_finish_req(ch, req, NULL, DID_RESET << 16);
536ae14e 1143 }
aef9ec39 1144
5cfb1782
BVA
1145 /*
1146 * Whether or not creating a new CM ID succeeded, create a new
1147 * QP. This guarantees that all callback functions for the old QP have
1148 * finished before any send requests are posted on the new QP.
1149 */
509c07bc 1150 ret += srp_create_ch_ib(ch);
5cfb1782 1151
509c07bc 1152 INIT_LIST_HEAD(&ch->free_tx);
4d73f95f 1153 for (i = 0; i < target->queue_size; ++i)
509c07bc 1154 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
aef9ec39 1155
c7c4e7ff 1156 if (ret == 0)
509c07bc 1157 ret = srp_connect_ch(ch);
09be70a2 1158
ed9b2264
BVA
1159 if (ret == 0)
1160 shost_printk(KERN_INFO, target->scsi_host,
1161 PFX "reconnect succeeded\n");
aef9ec39
RD
1162
1163 return ret;
1164}
1165
8f26c9ff
DD
1166static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1167 unsigned int dma_len, u32 rkey)
f5358a17 1168{
8f26c9ff 1169 struct srp_direct_buf *desc = state->desc;
f5358a17 1170
8f26c9ff
DD
1171 desc->va = cpu_to_be64(dma_addr);
1172 desc->key = cpu_to_be32(rkey);
1173 desc->len = cpu_to_be32(dma_len);
f5358a17 1174
8f26c9ff
DD
1175 state->total_len += dma_len;
1176 state->desc++;
1177 state->ndesc++;
1178}
559ce8f1 1179
8f26c9ff 1180static int srp_map_finish_fmr(struct srp_map_state *state,
509c07bc 1181 struct srp_rdma_ch *ch)
8f26c9ff 1182{
8f26c9ff
DD
1183 struct ib_pool_fmr *fmr;
1184 u64 io_addr = 0;
85507bcc 1185
509c07bc 1186 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
8f26c9ff
DD
1187 state->npages, io_addr);
1188 if (IS_ERR(fmr))
1189 return PTR_ERR(fmr);
f5358a17 1190
8f26c9ff 1191 *state->next_fmr++ = fmr;
52ede08f 1192 state->nmdesc++;
f5358a17 1193
52ede08f 1194 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
539dde6f 1195
8f26c9ff
DD
1196 return 0;
1197}
1198
5cfb1782 1199static int srp_map_finish_fr(struct srp_map_state *state,
509c07bc 1200 struct srp_rdma_ch *ch)
5cfb1782 1201{
509c07bc 1202 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1203 struct srp_device *dev = target->srp_host->srp_dev;
1204 struct ib_send_wr *bad_wr;
1205 struct ib_send_wr wr;
1206 struct srp_fr_desc *desc;
1207 u32 rkey;
1208
509c07bc 1209 desc = srp_fr_pool_get(ch->fr_pool);
5cfb1782
BVA
1210 if (!desc)
1211 return -ENOMEM;
1212
1213 rkey = ib_inc_rkey(desc->mr->rkey);
1214 ib_update_fast_reg_key(desc->mr, rkey);
1215
1216 memcpy(desc->frpl->page_list, state->pages,
1217 sizeof(state->pages[0]) * state->npages);
1218
1219 memset(&wr, 0, sizeof(wr));
1220 wr.opcode = IB_WR_FAST_REG_MR;
1221 wr.wr_id = FAST_REG_WR_ID_MASK;
1222 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1223 wr.wr.fast_reg.page_list = desc->frpl;
1224 wr.wr.fast_reg.page_list_len = state->npages;
1225 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1226 wr.wr.fast_reg.length = state->dma_len;
1227 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1228 IB_ACCESS_REMOTE_READ |
1229 IB_ACCESS_REMOTE_WRITE);
1230 wr.wr.fast_reg.rkey = desc->mr->lkey;
1231
1232 *state->next_fr++ = desc;
1233 state->nmdesc++;
1234
1235 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1236 desc->mr->rkey);
1237
509c07bc 1238 return ib_post_send(ch->qp, &wr, &bad_wr);
5cfb1782
BVA
1239}
1240
539dde6f 1241static int srp_finish_mapping(struct srp_map_state *state,
509c07bc 1242 struct srp_rdma_ch *ch)
539dde6f 1243{
509c07bc 1244 struct srp_target_port *target = ch->target;
539dde6f
BVA
1245 int ret = 0;
1246
1247 if (state->npages == 0)
1248 return 0;
1249
b1b8854d 1250 if (state->npages == 1 && !register_always)
52ede08f 1251 srp_map_desc(state, state->base_dma_addr, state->dma_len,
539dde6f
BVA
1252 target->rkey);
1253 else
5cfb1782 1254 ret = target->srp_host->srp_dev->use_fast_reg ?
509c07bc
BVA
1255 srp_map_finish_fr(state, ch) :
1256 srp_map_finish_fmr(state, ch);
539dde6f
BVA
1257
1258 if (ret == 0) {
1259 state->npages = 0;
52ede08f 1260 state->dma_len = 0;
539dde6f
BVA
1261 }
1262
1263 return ret;
1264}
1265
8f26c9ff
DD
1266static void srp_map_update_start(struct srp_map_state *state,
1267 struct scatterlist *sg, int sg_index,
1268 dma_addr_t dma_addr)
1269{
1270 state->unmapped_sg = sg;
1271 state->unmapped_index = sg_index;
1272 state->unmapped_addr = dma_addr;
1273}
85507bcc 1274
8f26c9ff 1275static int srp_map_sg_entry(struct srp_map_state *state,
509c07bc 1276 struct srp_rdma_ch *ch,
8f26c9ff 1277 struct scatterlist *sg, int sg_index,
5cfb1782 1278 bool use_mr)
8f26c9ff 1279{
509c07bc 1280 struct srp_target_port *target = ch->target;
8f26c9ff
DD
1281 struct srp_device *dev = target->srp_host->srp_dev;
1282 struct ib_device *ibdev = dev->dev;
1283 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1284 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1285 unsigned int len;
1286 int ret;
1287
1288 if (!dma_len)
1289 return 0;
1290
5cfb1782
BVA
1291 if (!use_mr) {
1292 /*
1293 * Once we're in direct map mode for a request, we don't
1294 * go back to FMR or FR mode, so no need to update anything
8f26c9ff
DD
1295 * other than the descriptor.
1296 */
1297 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1298 return 0;
85507bcc 1299 }
f5358a17 1300
5cfb1782
BVA
1301 /*
1302 * Since not all RDMA HW drivers support non-zero page offsets for
1303 * FMR, if we start at an offset into a page, don't merge into the
1304 * current FMR mapping. Finish it out, and use the kernel's MR for
1305 * this sg entry.
8f26c9ff 1306 */
5cfb1782
BVA
1307 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1308 dma_len > dev->mr_max_size) {
509c07bc 1309 ret = srp_finish_mapping(state, ch);
8f26c9ff
DD
1310 if (ret)
1311 return ret;
1312
1313 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1314 srp_map_update_start(state, NULL, 0, 0);
1315 return 0;
f5358a17
RD
1316 }
1317
5cfb1782
BVA
1318 /*
1319 * If this is the first sg that will be mapped via FMR or via FR, save
1320 * our position. We need to know the first unmapped entry, its index,
1321 * and the first unmapped address within that entry to be able to
1322 * restart mapping after an error.
8f26c9ff
DD
1323 */
1324 if (!state->unmapped_sg)
1325 srp_map_update_start(state, sg, sg_index, dma_addr);
f5358a17 1326
8f26c9ff 1327 while (dma_len) {
5cfb1782
BVA
1328 unsigned offset = dma_addr & ~dev->mr_page_mask;
1329 if (state->npages == dev->max_pages_per_mr || offset != 0) {
509c07bc 1330 ret = srp_finish_mapping(state, ch);
8f26c9ff
DD
1331 if (ret)
1332 return ret;
f5358a17 1333
8f26c9ff
DD
1334 srp_map_update_start(state, sg, sg_index, dma_addr);
1335 }
1336
5cfb1782 1337 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
f5358a17 1338
8f26c9ff
DD
1339 if (!state->npages)
1340 state->base_dma_addr = dma_addr;
5cfb1782 1341 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
52ede08f 1342 state->dma_len += len;
8f26c9ff
DD
1343 dma_addr += len;
1344 dma_len -= len;
1345 }
1346
5cfb1782
BVA
1347 /*
1348 * If the last entry of the MR wasn't a full page, then we need to
8f26c9ff
DD
1349 * close it out and start a new one -- we can only merge at page
1350 * boundries.
1351 */
1352 ret = 0;
52ede08f 1353 if (len != dev->mr_page_size) {
509c07bc 1354 ret = srp_finish_mapping(state, ch);
8f26c9ff
DD
1355 if (!ret)
1356 srp_map_update_start(state, NULL, 0, 0);
1357 }
f5358a17
RD
1358 return ret;
1359}
1360
509c07bc
BVA
1361static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1362 struct srp_request *req, struct scatterlist *scat,
1363 int count)
76bc1e1d 1364{
509c07bc 1365 struct srp_target_port *target = ch->target;
76bc1e1d
BVA
1366 struct srp_device *dev = target->srp_host->srp_dev;
1367 struct ib_device *ibdev = dev->dev;
1368 struct scatterlist *sg;
5cfb1782
BVA
1369 int i;
1370 bool use_mr;
76bc1e1d
BVA
1371
1372 state->desc = req->indirect_desc;
1373 state->pages = req->map_page;
5cfb1782
BVA
1374 if (dev->use_fast_reg) {
1375 state->next_fr = req->fr_list;
509c07bc 1376 use_mr = !!ch->fr_pool;
5cfb1782
BVA
1377 } else {
1378 state->next_fmr = req->fmr_list;
509c07bc 1379 use_mr = !!ch->fmr_pool;
5cfb1782 1380 }
76bc1e1d
BVA
1381
1382 for_each_sg(scat, sg, count, i) {
509c07bc 1383 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
5cfb1782
BVA
1384 /*
1385 * Memory registration failed, so backtrack to the
1386 * first unmapped entry and continue on without using
1387 * memory registration.
76bc1e1d
BVA
1388 */
1389 dma_addr_t dma_addr;
1390 unsigned int dma_len;
1391
1392backtrack:
1393 sg = state->unmapped_sg;
1394 i = state->unmapped_index;
1395
1396 dma_addr = ib_sg_dma_address(ibdev, sg);
1397 dma_len = ib_sg_dma_len(ibdev, sg);
1398 dma_len -= (state->unmapped_addr - dma_addr);
1399 dma_addr = state->unmapped_addr;
5cfb1782 1400 use_mr = false;
76bc1e1d
BVA
1401 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1402 }
1403 }
1404
509c07bc 1405 if (use_mr && srp_finish_mapping(state, ch))
76bc1e1d
BVA
1406 goto backtrack;
1407
52ede08f 1408 req->nmdesc = state->nmdesc;
5cfb1782
BVA
1409
1410 return 0;
76bc1e1d
BVA
1411}
1412
509c07bc 1413static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
aef9ec39
RD
1414 struct srp_request *req)
1415{
509c07bc 1416 struct srp_target_port *target = ch->target;
76bc1e1d 1417 struct scatterlist *scat;
aef9ec39 1418 struct srp_cmd *cmd = req->cmd->buf;
76bc1e1d 1419 int len, nents, count;
85507bcc
RC
1420 struct srp_device *dev;
1421 struct ib_device *ibdev;
8f26c9ff
DD
1422 struct srp_map_state state;
1423 struct srp_indirect_buf *indirect_hdr;
8f26c9ff
DD
1424 u32 table_len;
1425 u8 fmt;
aef9ec39 1426
bb350d1d 1427 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
aef9ec39
RD
1428 return sizeof (struct srp_cmd);
1429
1430 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1431 scmnd->sc_data_direction != DMA_TO_DEVICE) {
7aa54bd7
DD
1432 shost_printk(KERN_WARNING, target->scsi_host,
1433 PFX "Unhandled data direction %d\n",
1434 scmnd->sc_data_direction);
aef9ec39
RD
1435 return -EINVAL;
1436 }
1437
bb350d1d
FT
1438 nents = scsi_sg_count(scmnd);
1439 scat = scsi_sglist(scmnd);
aef9ec39 1440
05321937 1441 dev = target->srp_host->srp_dev;
85507bcc
RC
1442 ibdev = dev->dev;
1443
1444 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
8f26c9ff
DD
1445 if (unlikely(count == 0))
1446 return -EIO;
f5358a17
RD
1447
1448 fmt = SRP_DATA_DESC_DIRECT;
1449 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
aef9ec39 1450
b1b8854d 1451 if (count == 1 && !register_always) {
f5358a17
RD
1452 /*
1453 * The midlayer only generated a single gather/scatter
1454 * entry, or DMA mapping coalesced everything to a
1455 * single entry. So a direct descriptor along with
1456 * the DMA MR suffices.
1457 */
cf368713 1458 struct srp_direct_buf *buf = (void *) cmd->add_data;
aef9ec39 1459
85507bcc 1460 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
9af76271 1461 buf->key = cpu_to_be32(target->rkey);
85507bcc 1462 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
8f26c9ff 1463
52ede08f 1464 req->nmdesc = 0;
8f26c9ff
DD
1465 goto map_complete;
1466 }
1467
5cfb1782
BVA
1468 /*
1469 * We have more than one scatter/gather entry, so build our indirect
1470 * descriptor table, trying to merge as many entries as we can.
8f26c9ff
DD
1471 */
1472 indirect_hdr = (void *) cmd->add_data;
1473
c07d424d
DD
1474 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1475 target->indirect_size, DMA_TO_DEVICE);
1476
8f26c9ff 1477 memset(&state, 0, sizeof(state));
509c07bc 1478 srp_map_sg(&state, ch, req, scat, count);
cf368713 1479
c07d424d
DD
1480 /* We've mapped the request, now pull as much of the indirect
1481 * descriptor table as we can into the command buffer. If this
1482 * target is not using an external indirect table, we are
1483 * guaranteed to fit into the command, as the SCSI layer won't
1484 * give us more S/G entries than we allow.
8f26c9ff 1485 */
8f26c9ff 1486 if (state.ndesc == 1) {
5cfb1782
BVA
1487 /*
1488 * Memory registration collapsed the sg-list into one entry,
8f26c9ff
DD
1489 * so use a direct descriptor.
1490 */
1491 struct srp_direct_buf *buf = (void *) cmd->add_data;
cf368713 1492
c07d424d 1493 *buf = req->indirect_desc[0];
8f26c9ff 1494 goto map_complete;
aef9ec39
RD
1495 }
1496
c07d424d
DD
1497 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1498 !target->allow_ext_sg)) {
1499 shost_printk(KERN_ERR, target->scsi_host,
1500 "Could not fit S/G list into SRP_CMD\n");
1501 return -EIO;
1502 }
1503
1504 count = min(state.ndesc, target->cmd_sg_cnt);
8f26c9ff
DD
1505 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1506
1507 fmt = SRP_DATA_DESC_INDIRECT;
1508 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
c07d424d 1509 len += count * sizeof (struct srp_direct_buf);
8f26c9ff 1510
c07d424d
DD
1511 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1512 count * sizeof (struct srp_direct_buf));
8f26c9ff 1513
c07d424d 1514 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
8f26c9ff
DD
1515 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1516 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1517 indirect_hdr->len = cpu_to_be32(state.total_len);
1518
1519 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
c07d424d 1520 cmd->data_out_desc_cnt = count;
8f26c9ff 1521 else
c07d424d
DD
1522 cmd->data_in_desc_cnt = count;
1523
1524 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1525 DMA_TO_DEVICE);
8f26c9ff
DD
1526
1527map_complete:
aef9ec39
RD
1528 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1529 cmd->buf_fmt = fmt << 4;
1530 else
1531 cmd->buf_fmt = fmt;
1532
aef9ec39
RD
1533 return len;
1534}
1535
76c75b25
BVA
1536/*
1537 * Return an IU and possible credit to the free pool
1538 */
509c07bc 1539static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
76c75b25
BVA
1540 enum srp_iu_type iu_type)
1541{
1542 unsigned long flags;
1543
509c07bc
BVA
1544 spin_lock_irqsave(&ch->lock, flags);
1545 list_add(&iu->list, &ch->free_tx);
76c75b25 1546 if (iu_type != SRP_IU_RSP)
509c07bc
BVA
1547 ++ch->req_lim;
1548 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25
BVA
1549}
1550
05a1d750 1551/*
509c07bc 1552 * Must be called with ch->lock held to protect req_lim and free_tx.
e9684678 1553 * If IU is not sent, it must be returned using srp_put_tx_iu().
05a1d750
DD
1554 *
1555 * Note:
1556 * An upper limit for the number of allocated information units for each
1557 * request type is:
1558 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1559 * more than Scsi_Host.can_queue requests.
1560 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1561 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1562 * one unanswered SRP request to an initiator.
1563 */
509c07bc 1564static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
05a1d750
DD
1565 enum srp_iu_type iu_type)
1566{
509c07bc 1567 struct srp_target_port *target = ch->target;
05a1d750
DD
1568 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1569 struct srp_iu *iu;
1570
509c07bc 1571 srp_send_completion(ch->send_cq, ch);
05a1d750 1572
509c07bc 1573 if (list_empty(&ch->free_tx))
05a1d750
DD
1574 return NULL;
1575
1576 /* Initiator responses to target requests do not consume credits */
76c75b25 1577 if (iu_type != SRP_IU_RSP) {
509c07bc 1578 if (ch->req_lim <= rsv) {
76c75b25
BVA
1579 ++target->zero_req_lim;
1580 return NULL;
1581 }
1582
509c07bc 1583 --ch->req_lim;
05a1d750
DD
1584 }
1585
509c07bc 1586 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
76c75b25 1587 list_del(&iu->list);
05a1d750
DD
1588 return iu;
1589}
1590
509c07bc 1591static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
05a1d750 1592{
509c07bc 1593 struct srp_target_port *target = ch->target;
05a1d750
DD
1594 struct ib_sge list;
1595 struct ib_send_wr wr, *bad_wr;
05a1d750
DD
1596
1597 list.addr = iu->dma;
1598 list.length = len;
9af76271 1599 list.lkey = target->lkey;
05a1d750
DD
1600
1601 wr.next = NULL;
dcb4cb85 1602 wr.wr_id = (uintptr_t) iu;
05a1d750
DD
1603 wr.sg_list = &list;
1604 wr.num_sge = 1;
1605 wr.opcode = IB_WR_SEND;
1606 wr.send_flags = IB_SEND_SIGNALED;
1607
509c07bc 1608 return ib_post_send(ch->qp, &wr, &bad_wr);
05a1d750
DD
1609}
1610
509c07bc 1611static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
c996bb47 1612{
509c07bc 1613 struct srp_target_port *target = ch->target;
c996bb47 1614 struct ib_recv_wr wr, *bad_wr;
dcb4cb85 1615 struct ib_sge list;
c996bb47
BVA
1616
1617 list.addr = iu->dma;
1618 list.length = iu->size;
9af76271 1619 list.lkey = target->lkey;
c996bb47
BVA
1620
1621 wr.next = NULL;
dcb4cb85 1622 wr.wr_id = (uintptr_t) iu;
c996bb47
BVA
1623 wr.sg_list = &list;
1624 wr.num_sge = 1;
1625
509c07bc 1626 return ib_post_recv(ch->qp, &wr, &bad_wr);
c996bb47
BVA
1627}
1628
509c07bc 1629static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
aef9ec39 1630{
509c07bc 1631 struct srp_target_port *target = ch->target;
aef9ec39
RD
1632 struct srp_request *req;
1633 struct scsi_cmnd *scmnd;
1634 unsigned long flags;
aef9ec39 1635
aef9ec39 1636 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
509c07bc
BVA
1637 spin_lock_irqsave(&ch->lock, flags);
1638 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1639 spin_unlock_irqrestore(&ch->lock, flags);
94a9174c 1640
509c07bc 1641 ch->tsk_mgmt_status = -1;
f8b6e31e 1642 if (be32_to_cpu(rsp->resp_data_len) >= 4)
509c07bc
BVA
1643 ch->tsk_mgmt_status = rsp->data[3];
1644 complete(&ch->tsk_mgmt_done);
aef9ec39 1645 } else {
77f2c1a4
BVA
1646 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1647 if (scmnd) {
1648 req = (void *)scmnd->host_scribble;
1649 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1650 }
22032991 1651 if (!scmnd) {
7aa54bd7
DD
1652 shost_printk(KERN_ERR, target->scsi_host,
1653 "Null scmnd for RSP w/tag %016llx\n",
1654 (unsigned long long) rsp->tag);
22032991 1655
509c07bc
BVA
1656 spin_lock_irqsave(&ch->lock, flags);
1657 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1658 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1659
1660 return;
1661 }
aef9ec39
RD
1662 scmnd->result = rsp->status;
1663
1664 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1665 memcpy(scmnd->sense_buffer, rsp->data +
1666 be32_to_cpu(rsp->resp_data_len),
1667 min_t(int, be32_to_cpu(rsp->sense_data_len),
1668 SCSI_SENSE_BUFFERSIZE));
1669 }
1670
e714531a 1671 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
bb350d1d 1672 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
e714531a
BVA
1673 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1674 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1675 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1676 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1677 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1678 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
aef9ec39 1679
509c07bc 1680 srp_free_req(ch, req, scmnd,
22032991
BVA
1681 be32_to_cpu(rsp->req_lim_delta));
1682
f8b6e31e
DD
1683 scmnd->host_scribble = NULL;
1684 scmnd->scsi_done(scmnd);
aef9ec39 1685 }
aef9ec39
RD
1686}
1687
509c07bc 1688static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
bb12588a
DD
1689 void *rsp, int len)
1690{
509c07bc 1691 struct srp_target_port *target = ch->target;
76c75b25 1692 struct ib_device *dev = target->srp_host->srp_dev->dev;
bb12588a
DD
1693 unsigned long flags;
1694 struct srp_iu *iu;
76c75b25 1695 int err;
bb12588a 1696
509c07bc
BVA
1697 spin_lock_irqsave(&ch->lock, flags);
1698 ch->req_lim += req_delta;
1699 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1700 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25 1701
bb12588a
DD
1702 if (!iu) {
1703 shost_printk(KERN_ERR, target->scsi_host, PFX
1704 "no IU available to send response\n");
76c75b25 1705 return 1;
bb12588a
DD
1706 }
1707
1708 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1709 memcpy(iu->buf, rsp, len);
1710 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1711
509c07bc 1712 err = srp_post_send(ch, iu, len);
76c75b25 1713 if (err) {
bb12588a
DD
1714 shost_printk(KERN_ERR, target->scsi_host, PFX
1715 "unable to post response: %d\n", err);
509c07bc 1716 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
76c75b25 1717 }
bb12588a 1718
bb12588a
DD
1719 return err;
1720}
1721
509c07bc 1722static void srp_process_cred_req(struct srp_rdma_ch *ch,
bb12588a
DD
1723 struct srp_cred_req *req)
1724{
1725 struct srp_cred_rsp rsp = {
1726 .opcode = SRP_CRED_RSP,
1727 .tag = req->tag,
1728 };
1729 s32 delta = be32_to_cpu(req->req_lim_delta);
1730
509c07bc
BVA
1731 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1732 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
bb12588a
DD
1733 "problems processing SRP_CRED_REQ\n");
1734}
1735
509c07bc 1736static void srp_process_aer_req(struct srp_rdma_ch *ch,
bb12588a
DD
1737 struct srp_aer_req *req)
1738{
509c07bc 1739 struct srp_target_port *target = ch->target;
bb12588a
DD
1740 struct srp_aer_rsp rsp = {
1741 .opcode = SRP_AER_RSP,
1742 .tag = req->tag,
1743 };
1744 s32 delta = be32_to_cpu(req->req_lim_delta);
1745
1746 shost_printk(KERN_ERR, target->scsi_host, PFX
1747 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1748
509c07bc 1749 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
bb12588a
DD
1750 shost_printk(KERN_ERR, target->scsi_host, PFX
1751 "problems processing SRP_AER_REQ\n");
1752}
1753
509c07bc 1754static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
aef9ec39 1755{
509c07bc 1756 struct srp_target_port *target = ch->target;
dcb4cb85 1757 struct ib_device *dev = target->srp_host->srp_dev->dev;
737b94eb 1758 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
c996bb47 1759 int res;
aef9ec39
RD
1760 u8 opcode;
1761
509c07bc 1762 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 1763 DMA_FROM_DEVICE);
aef9ec39
RD
1764
1765 opcode = *(u8 *) iu->buf;
1766
1767 if (0) {
7aa54bd7
DD
1768 shost_printk(KERN_ERR, target->scsi_host,
1769 PFX "recv completion, opcode 0x%02x\n", opcode);
7a700811
BVA
1770 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1771 iu->buf, wc->byte_len, true);
aef9ec39
RD
1772 }
1773
1774 switch (opcode) {
1775 case SRP_RSP:
509c07bc 1776 srp_process_rsp(ch, iu->buf);
aef9ec39
RD
1777 break;
1778
bb12588a 1779 case SRP_CRED_REQ:
509c07bc 1780 srp_process_cred_req(ch, iu->buf);
bb12588a
DD
1781 break;
1782
1783 case SRP_AER_REQ:
509c07bc 1784 srp_process_aer_req(ch, iu->buf);
bb12588a
DD
1785 break;
1786
aef9ec39
RD
1787 case SRP_T_LOGOUT:
1788 /* XXX Handle target logout */
7aa54bd7
DD
1789 shost_printk(KERN_WARNING, target->scsi_host,
1790 PFX "Got target logout request\n");
aef9ec39
RD
1791 break;
1792
1793 default:
7aa54bd7
DD
1794 shost_printk(KERN_WARNING, target->scsi_host,
1795 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
aef9ec39
RD
1796 break;
1797 }
1798
509c07bc 1799 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 1800 DMA_FROM_DEVICE);
c996bb47 1801
509c07bc 1802 res = srp_post_recv(ch, iu);
c996bb47
BVA
1803 if (res != 0)
1804 shost_printk(KERN_ERR, target->scsi_host,
1805 PFX "Recv failed with error code %d\n", res);
aef9ec39
RD
1806}
1807
c1120f89
BVA
1808/**
1809 * srp_tl_err_work() - handle a transport layer error
af24663b 1810 * @work: Work structure embedded in an SRP target port.
c1120f89
BVA
1811 *
1812 * Note: This function may get invoked before the rport has been created,
1813 * hence the target->rport test.
1814 */
1815static void srp_tl_err_work(struct work_struct *work)
1816{
1817 struct srp_target_port *target;
1818
1819 target = container_of(work, struct srp_target_port, tl_err_work);
1820 if (target->rport)
1821 srp_start_tl_fail_timers(target->rport);
1822}
1823
5cfb1782
BVA
1824static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1825 bool send_err, struct srp_target_port *target)
948d1e88 1826{
294c875a 1827 if (target->connected && !target->qp_in_error) {
5cfb1782
BVA
1828 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1829 shost_printk(KERN_ERR, target->scsi_host, PFX
1830 "LOCAL_INV failed with status %d\n",
1831 wc_status);
1832 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1833 shost_printk(KERN_ERR, target->scsi_host, PFX
1834 "FAST_REG_MR failed status %d\n",
1835 wc_status);
1836 } else {
1837 shost_printk(KERN_ERR, target->scsi_host,
1838 PFX "failed %s status %d for iu %p\n",
1839 send_err ? "send" : "receive",
1840 wc_status, (void *)(uintptr_t)wr_id);
1841 }
c1120f89 1842 queue_work(system_long_wq, &target->tl_err_work);
4f0af697 1843 }
948d1e88
BVA
1844 target->qp_in_error = true;
1845}
1846
509c07bc 1847static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
aef9ec39 1848{
509c07bc 1849 struct srp_rdma_ch *ch = ch_ptr;
aef9ec39 1850 struct ib_wc wc;
aef9ec39
RD
1851
1852 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1853 while (ib_poll_cq(cq, 1, &wc) > 0) {
948d1e88 1854 if (likely(wc.status == IB_WC_SUCCESS)) {
509c07bc 1855 srp_handle_recv(ch, &wc);
948d1e88 1856 } else {
509c07bc
BVA
1857 srp_handle_qp_err(wc.wr_id, wc.status, false,
1858 ch->target);
aef9ec39 1859 }
9c03dc9f
BVA
1860 }
1861}
1862
509c07bc 1863static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
9c03dc9f 1864{
509c07bc 1865 struct srp_rdma_ch *ch = ch_ptr;
9c03dc9f 1866 struct ib_wc wc;
dcb4cb85 1867 struct srp_iu *iu;
9c03dc9f
BVA
1868
1869 while (ib_poll_cq(cq, 1, &wc) > 0) {
948d1e88
BVA
1870 if (likely(wc.status == IB_WC_SUCCESS)) {
1871 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
509c07bc 1872 list_add(&iu->list, &ch->free_tx);
948d1e88 1873 } else {
509c07bc
BVA
1874 srp_handle_qp_err(wc.wr_id, wc.status, true,
1875 ch->target);
9c03dc9f 1876 }
aef9ec39
RD
1877 }
1878}
1879
76c75b25 1880static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
aef9ec39 1881{
76c75b25 1882 struct srp_target_port *target = host_to_target(shost);
a95cadb9 1883 struct srp_rport *rport = target->rport;
509c07bc 1884 struct srp_rdma_ch *ch;
aef9ec39
RD
1885 struct srp_request *req;
1886 struct srp_iu *iu;
1887 struct srp_cmd *cmd;
85507bcc 1888 struct ib_device *dev;
76c75b25 1889 unsigned long flags;
77f2c1a4
BVA
1890 u32 tag;
1891 u16 idx;
d1b4289e 1892 int len, ret;
a95cadb9
BVA
1893 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1894
1895 /*
1896 * The SCSI EH thread is the only context from which srp_queuecommand()
1897 * can get invoked for blocked devices (SDEV_BLOCK /
1898 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1899 * locking the rport mutex if invoked from inside the SCSI EH.
1900 */
1901 if (in_scsi_eh)
1902 mutex_lock(&rport->mutex);
aef9ec39 1903
d1b4289e
BVA
1904 scmnd->result = srp_chkready(target->rport);
1905 if (unlikely(scmnd->result))
1906 goto err;
2ce19e72 1907
77f2c1a4
BVA
1908 WARN_ON_ONCE(scmnd->request->tag < 0);
1909 tag = blk_mq_unique_tag(scmnd->request);
509c07bc 1910 ch = &target->ch;
77f2c1a4
BVA
1911 idx = blk_mq_unique_tag_to_tag(tag);
1912 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
1913 dev_name(&shost->shost_gendev), tag, idx,
1914 target->req_ring_size);
509c07bc
BVA
1915
1916 spin_lock_irqsave(&ch->lock, flags);
1917 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
509c07bc 1918 spin_unlock_irqrestore(&ch->lock, flags);
aef9ec39 1919
77f2c1a4
BVA
1920 if (!iu)
1921 goto err;
1922
1923 req = &ch->req_ring[idx];
05321937 1924 dev = target->srp_host->srp_dev->dev;
49248644 1925 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
85507bcc 1926 DMA_TO_DEVICE);
aef9ec39 1927
f8b6e31e 1928 scmnd->host_scribble = (void *) req;
aef9ec39
RD
1929
1930 cmd = iu->buf;
1931 memset(cmd, 0, sizeof *cmd);
1932
1933 cmd->opcode = SRP_CMD;
1934 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
77f2c1a4 1935 cmd->tag = tag;
aef9ec39
RD
1936 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1937
aef9ec39
RD
1938 req->scmnd = scmnd;
1939 req->cmd = iu;
aef9ec39 1940
509c07bc 1941 len = srp_map_data(scmnd, ch, req);
aef9ec39 1942 if (len < 0) {
7aa54bd7 1943 shost_printk(KERN_ERR, target->scsi_host,
d1b4289e
BVA
1944 PFX "Failed to map data (%d)\n", len);
1945 /*
1946 * If we ran out of memory descriptors (-ENOMEM) because an
1947 * application is queuing many requests with more than
52ede08f 1948 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
d1b4289e
BVA
1949 * to reduce queue depth temporarily.
1950 */
1951 scmnd->result = len == -ENOMEM ?
1952 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
76c75b25 1953 goto err_iu;
aef9ec39
RD
1954 }
1955
49248644 1956 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
85507bcc 1957 DMA_TO_DEVICE);
aef9ec39 1958
509c07bc 1959 if (srp_post_send(ch, iu, len)) {
7aa54bd7 1960 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
aef9ec39
RD
1961 goto err_unmap;
1962 }
1963
d1b4289e
BVA
1964 ret = 0;
1965
a95cadb9
BVA
1966unlock_rport:
1967 if (in_scsi_eh)
1968 mutex_unlock(&rport->mutex);
1969
d1b4289e 1970 return ret;
aef9ec39
RD
1971
1972err_unmap:
509c07bc 1973 srp_unmap_data(scmnd, ch, req);
aef9ec39 1974
76c75b25 1975err_iu:
509c07bc 1976 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
76c75b25 1977
024ca901
BVA
1978 /*
1979 * Avoid that the loops that iterate over the request ring can
1980 * encounter a dangling SCSI command pointer.
1981 */
1982 req->scmnd = NULL;
1983
d1b4289e
BVA
1984err:
1985 if (scmnd->result) {
1986 scmnd->scsi_done(scmnd);
1987 ret = 0;
1988 } else {
1989 ret = SCSI_MLQUEUE_HOST_BUSY;
1990 }
a95cadb9 1991
d1b4289e 1992 goto unlock_rport;
aef9ec39
RD
1993}
1994
4d73f95f
BVA
1995/*
1996 * Note: the resources allocated in this function are freed in
509c07bc 1997 * srp_free_ch_ib().
4d73f95f 1998 */
509c07bc 1999static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
aef9ec39 2000{
509c07bc 2001 struct srp_target_port *target = ch->target;
aef9ec39
RD
2002 int i;
2003
509c07bc
BVA
2004 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2005 GFP_KERNEL);
2006 if (!ch->rx_ring)
4d73f95f 2007 goto err_no_ring;
509c07bc
BVA
2008 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2009 GFP_KERNEL);
2010 if (!ch->tx_ring)
4d73f95f
BVA
2011 goto err_no_ring;
2012
2013 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2014 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2015 ch->max_ti_iu_len,
2016 GFP_KERNEL, DMA_FROM_DEVICE);
2017 if (!ch->rx_ring[i])
aef9ec39
RD
2018 goto err;
2019 }
2020
4d73f95f 2021 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2022 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2023 target->max_iu_len,
2024 GFP_KERNEL, DMA_TO_DEVICE);
2025 if (!ch->tx_ring[i])
aef9ec39 2026 goto err;
dcb4cb85 2027
509c07bc 2028 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
aef9ec39
RD
2029 }
2030
2031 return 0;
2032
2033err:
4d73f95f 2034 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2035 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2036 srp_free_iu(target->srp_host, ch->tx_ring[i]);
aef9ec39
RD
2037 }
2038
4d73f95f
BVA
2039
2040err_no_ring:
509c07bc
BVA
2041 kfree(ch->tx_ring);
2042 ch->tx_ring = NULL;
2043 kfree(ch->rx_ring);
2044 ch->rx_ring = NULL;
4d73f95f 2045
aef9ec39
RD
2046 return -ENOMEM;
2047}
2048
c9b03c1a
BVA
2049static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2050{
2051 uint64_t T_tr_ns, max_compl_time_ms;
2052 uint32_t rq_tmo_jiffies;
2053
2054 /*
2055 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2056 * table 91), both the QP timeout and the retry count have to be set
2057 * for RC QP's during the RTR to RTS transition.
2058 */
2059 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2060 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2061
2062 /*
2063 * Set target->rq_tmo_jiffies to one second more than the largest time
2064 * it can take before an error completion is generated. See also
2065 * C9-140..142 in the IBTA spec for more information about how to
2066 * convert the QP Local ACK Timeout value to nanoseconds.
2067 */
2068 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2069 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2070 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2071 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2072
2073 return rq_tmo_jiffies;
2074}
2075
961e0be8
DD
2076static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2077 struct srp_login_rsp *lrsp,
509c07bc 2078 struct srp_rdma_ch *ch)
961e0be8 2079{
509c07bc 2080 struct srp_target_port *target = ch->target;
961e0be8
DD
2081 struct ib_qp_attr *qp_attr = NULL;
2082 int attr_mask = 0;
2083 int ret;
2084 int i;
2085
2086 if (lrsp->opcode == SRP_LOGIN_RSP) {
509c07bc
BVA
2087 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2088 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
961e0be8
DD
2089
2090 /*
2091 * Reserve credits for task management so we don't
2092 * bounce requests back to the SCSI mid-layer.
2093 */
2094 target->scsi_host->can_queue
509c07bc 2095 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
961e0be8 2096 target->scsi_host->can_queue);
4d73f95f
BVA
2097 target->scsi_host->cmd_per_lun
2098 = min_t(int, target->scsi_host->can_queue,
2099 target->scsi_host->cmd_per_lun);
961e0be8
DD
2100 } else {
2101 shost_printk(KERN_WARNING, target->scsi_host,
2102 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2103 ret = -ECONNRESET;
2104 goto error;
2105 }
2106
509c07bc
BVA
2107 if (!ch->rx_ring) {
2108 ret = srp_alloc_iu_bufs(ch);
961e0be8
DD
2109 if (ret)
2110 goto error;
2111 }
2112
2113 ret = -ENOMEM;
2114 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2115 if (!qp_attr)
2116 goto error;
2117
2118 qp_attr->qp_state = IB_QPS_RTR;
2119 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2120 if (ret)
2121 goto error_free;
2122
509c07bc 2123 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2124 if (ret)
2125 goto error_free;
2126
4d73f95f 2127 for (i = 0; i < target->queue_size; i++) {
509c07bc
BVA
2128 struct srp_iu *iu = ch->rx_ring[i];
2129
2130 ret = srp_post_recv(ch, iu);
961e0be8
DD
2131 if (ret)
2132 goto error_free;
2133 }
2134
2135 qp_attr->qp_state = IB_QPS_RTS;
2136 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2137 if (ret)
2138 goto error_free;
2139
c9b03c1a
BVA
2140 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2141
509c07bc 2142 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2143 if (ret)
2144 goto error_free;
2145
2146 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2147
2148error_free:
2149 kfree(qp_attr);
2150
2151error:
509c07bc 2152 ch->status = ret;
961e0be8
DD
2153}
2154
aef9ec39
RD
2155static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2156 struct ib_cm_event *event,
509c07bc 2157 struct srp_rdma_ch *ch)
aef9ec39 2158{
509c07bc 2159 struct srp_target_port *target = ch->target;
7aa54bd7 2160 struct Scsi_Host *shost = target->scsi_host;
aef9ec39
RD
2161 struct ib_class_port_info *cpi;
2162 int opcode;
2163
2164 switch (event->param.rej_rcvd.reason) {
2165 case IB_CM_REJ_PORT_CM_REDIRECT:
2166 cpi = event->param.rej_rcvd.ari;
509c07bc
BVA
2167 ch->path.dlid = cpi->redirect_lid;
2168 ch->path.pkey = cpi->redirect_pkey;
aef9ec39 2169 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
509c07bc 2170 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
aef9ec39 2171
509c07bc 2172 ch->status = ch->path.dlid ?
aef9ec39
RD
2173 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2174 break;
2175
2176 case IB_CM_REJ_PORT_REDIRECT:
5d7cbfd6 2177 if (srp_target_is_topspin(target)) {
aef9ec39
RD
2178 /*
2179 * Topspin/Cisco SRP gateways incorrectly send
2180 * reject reason code 25 when they mean 24
2181 * (port redirect).
2182 */
509c07bc 2183 memcpy(ch->path.dgid.raw,
aef9ec39
RD
2184 event->param.rej_rcvd.ari, 16);
2185
7aa54bd7
DD
2186 shost_printk(KERN_DEBUG, shost,
2187 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
509c07bc
BVA
2188 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2189 be64_to_cpu(ch->path.dgid.global.interface_id));
aef9ec39 2190
509c07bc 2191 ch->status = SRP_PORT_REDIRECT;
aef9ec39 2192 } else {
7aa54bd7
DD
2193 shost_printk(KERN_WARNING, shost,
2194 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
509c07bc 2195 ch->status = -ECONNRESET;
aef9ec39
RD
2196 }
2197 break;
2198
2199 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
7aa54bd7
DD
2200 shost_printk(KERN_WARNING, shost,
2201 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
509c07bc 2202 ch->status = -ECONNRESET;
aef9ec39
RD
2203 break;
2204
2205 case IB_CM_REJ_CONSUMER_DEFINED:
2206 opcode = *(u8 *) event->private_data;
2207 if (opcode == SRP_LOGIN_REJ) {
2208 struct srp_login_rej *rej = event->private_data;
2209 u32 reason = be32_to_cpu(rej->reason);
2210
2211 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
7aa54bd7
DD
2212 shost_printk(KERN_WARNING, shost,
2213 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
aef9ec39 2214 else
e7ffde01
BVA
2215 shost_printk(KERN_WARNING, shost, PFX
2216 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
747fe000
BVA
2217 target->sgid.raw,
2218 target->orig_dgid.raw, reason);
aef9ec39 2219 } else
7aa54bd7
DD
2220 shost_printk(KERN_WARNING, shost,
2221 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2222 " opcode 0x%02x\n", opcode);
509c07bc 2223 ch->status = -ECONNRESET;
aef9ec39
RD
2224 break;
2225
9fe4bcf4
DD
2226 case IB_CM_REJ_STALE_CONN:
2227 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
509c07bc 2228 ch->status = SRP_STALE_CONN;
9fe4bcf4
DD
2229 break;
2230
aef9ec39 2231 default:
7aa54bd7
DD
2232 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2233 event->param.rej_rcvd.reason);
509c07bc 2234 ch->status = -ECONNRESET;
aef9ec39
RD
2235 }
2236}
2237
2238static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2239{
509c07bc
BVA
2240 struct srp_rdma_ch *ch = cm_id->context;
2241 struct srp_target_port *target = ch->target;
aef9ec39 2242 int comp = 0;
aef9ec39
RD
2243
2244 switch (event->event) {
2245 case IB_CM_REQ_ERROR:
7aa54bd7
DD
2246 shost_printk(KERN_DEBUG, target->scsi_host,
2247 PFX "Sending CM REQ failed\n");
aef9ec39 2248 comp = 1;
509c07bc 2249 ch->status = -ECONNRESET;
aef9ec39
RD
2250 break;
2251
2252 case IB_CM_REP_RECEIVED:
2253 comp = 1;
509c07bc 2254 srp_cm_rep_handler(cm_id, event->private_data, ch);
aef9ec39
RD
2255 break;
2256
2257 case IB_CM_REJ_RECEIVED:
7aa54bd7 2258 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
aef9ec39
RD
2259 comp = 1;
2260
509c07bc 2261 srp_cm_rej_handler(cm_id, event, ch);
aef9ec39
RD
2262 break;
2263
b7ac4ab4 2264 case IB_CM_DREQ_RECEIVED:
7aa54bd7
DD
2265 shost_printk(KERN_WARNING, target->scsi_host,
2266 PFX "DREQ received - connection closed\n");
294c875a 2267 srp_change_conn_state(target, false);
b7ac4ab4 2268 if (ib_send_cm_drep(cm_id, NULL, 0))
7aa54bd7
DD
2269 shost_printk(KERN_ERR, target->scsi_host,
2270 PFX "Sending CM DREP failed\n");
c1120f89 2271 queue_work(system_long_wq, &target->tl_err_work);
aef9ec39
RD
2272 break;
2273
2274 case IB_CM_TIMEWAIT_EXIT:
7aa54bd7
DD
2275 shost_printk(KERN_ERR, target->scsi_host,
2276 PFX "connection closed\n");
ac72d766 2277 comp = 1;
aef9ec39 2278
509c07bc 2279 ch->status = 0;
aef9ec39
RD
2280 break;
2281
b7ac4ab4
IR
2282 case IB_CM_MRA_RECEIVED:
2283 case IB_CM_DREQ_ERROR:
2284 case IB_CM_DREP_RECEIVED:
2285 break;
2286
aef9ec39 2287 default:
7aa54bd7
DD
2288 shost_printk(KERN_WARNING, target->scsi_host,
2289 PFX "Unhandled CM event %d\n", event->event);
aef9ec39
RD
2290 break;
2291 }
2292
2293 if (comp)
509c07bc 2294 complete(&ch->done);
aef9ec39 2295
aef9ec39
RD
2296 return 0;
2297}
2298
71444b97
JW
2299/**
2300 * srp_change_queue_depth - setting device queue depth
2301 * @sdev: scsi device struct
2302 * @qdepth: requested queue depth
2303 * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
2304 * (see include/scsi/scsi_host.h for definition)
2305 *
2306 * Returns queue depth.
2307 */
2308static int
2309srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
2310{
2311 struct Scsi_Host *shost = sdev->host;
2312 int max_depth;
2313 if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) {
2314 max_depth = shost->can_queue;
2315 if (!sdev->tagged_supported)
2316 max_depth = 1;
2317 if (qdepth > max_depth)
2318 qdepth = max_depth;
c8b09f6f 2319 scsi_adjust_queue_depth(sdev, qdepth);
71444b97
JW
2320 } else if (reason == SCSI_QDEPTH_QFULL)
2321 scsi_track_queue_full(sdev, qdepth);
2322 else
2323 return -EOPNOTSUPP;
2324
2325 return sdev->queue_depth;
2326}
2327
509c07bc
BVA
2328static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
2329 unsigned int lun, u8 func)
aef9ec39 2330{
509c07bc 2331 struct srp_target_port *target = ch->target;
a95cadb9 2332 struct srp_rport *rport = target->rport;
19081f31 2333 struct ib_device *dev = target->srp_host->srp_dev->dev;
aef9ec39
RD
2334 struct srp_iu *iu;
2335 struct srp_tsk_mgmt *tsk_mgmt;
aef9ec39 2336
3780d1f0
BVA
2337 if (!target->connected || target->qp_in_error)
2338 return -1;
2339
509c07bc 2340 init_completion(&ch->tsk_mgmt_done);
aef9ec39 2341
a95cadb9 2342 /*
509c07bc 2343 * Lock the rport mutex to avoid that srp_create_ch_ib() is
a95cadb9
BVA
2344 * invoked while a task management function is being sent.
2345 */
2346 mutex_lock(&rport->mutex);
509c07bc
BVA
2347 spin_lock_irq(&ch->lock);
2348 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2349 spin_unlock_irq(&ch->lock);
76c75b25 2350
a95cadb9
BVA
2351 if (!iu) {
2352 mutex_unlock(&rport->mutex);
2353
76c75b25 2354 return -1;
a95cadb9 2355 }
aef9ec39 2356
19081f31
DD
2357 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2358 DMA_TO_DEVICE);
aef9ec39
RD
2359 tsk_mgmt = iu->buf;
2360 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2361
2362 tsk_mgmt->opcode = SRP_TSK_MGMT;
f8b6e31e
DD
2363 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
2364 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
aef9ec39 2365 tsk_mgmt->tsk_mgmt_func = func;
f8b6e31e 2366 tsk_mgmt->task_tag = req_tag;
aef9ec39 2367
19081f31
DD
2368 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2369 DMA_TO_DEVICE);
509c07bc
BVA
2370 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2371 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
a95cadb9
BVA
2372 mutex_unlock(&rport->mutex);
2373
76c75b25
BVA
2374 return -1;
2375 }
a95cadb9 2376 mutex_unlock(&rport->mutex);
d945e1df 2377
509c07bc 2378 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
aef9ec39 2379 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
d945e1df 2380 return -1;
aef9ec39 2381
d945e1df 2382 return 0;
d945e1df
RD
2383}
2384
aef9ec39
RD
2385static int srp_abort(struct scsi_cmnd *scmnd)
2386{
d945e1df 2387 struct srp_target_port *target = host_to_target(scmnd->device->host);
f8b6e31e 2388 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
77f2c1a4 2389 u32 tag;
509c07bc 2390 struct srp_rdma_ch *ch;
086f44f5 2391 int ret;
d945e1df 2392
7aa54bd7 2393 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
aef9ec39 2394
509c07bc
BVA
2395 ch = &target->ch;
2396 if (!req || !srp_claim_req(ch, req, NULL, scmnd))
99b6697a 2397 return SUCCESS;
77f2c1a4
BVA
2398 tag = blk_mq_unique_tag(scmnd->request);
2399 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
80d5e8a2 2400 SRP_TSK_ABORT_TASK) == 0)
086f44f5 2401 ret = SUCCESS;
ed9b2264 2402 else if (target->rport->state == SRP_RPORT_LOST)
99e1c139 2403 ret = FAST_IO_FAIL;
086f44f5
BVA
2404 else
2405 ret = FAILED;
509c07bc 2406 srp_free_req(ch, req, scmnd, 0);
22032991 2407 scmnd->result = DID_ABORT << 16;
d8536670 2408 scmnd->scsi_done(scmnd);
d945e1df 2409
086f44f5 2410 return ret;
aef9ec39
RD
2411}
2412
2413static int srp_reset_device(struct scsi_cmnd *scmnd)
2414{
d945e1df 2415 struct srp_target_port *target = host_to_target(scmnd->device->host);
509c07bc 2416 struct srp_rdma_ch *ch = &target->ch;
536ae14e 2417 int i;
d945e1df 2418
7aa54bd7 2419 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
aef9ec39 2420
509c07bc 2421 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
f8b6e31e 2422 SRP_TSK_LUN_RESET))
d945e1df 2423 return FAILED;
509c07bc 2424 if (ch->tsk_mgmt_status)
d945e1df
RD
2425 return FAILED;
2426
4d73f95f 2427 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc
BVA
2428 struct srp_request *req = &ch->req_ring[i];
2429
2430 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
536ae14e 2431 }
d945e1df 2432
d945e1df 2433 return SUCCESS;
aef9ec39
RD
2434}
2435
2436static int srp_reset_host(struct scsi_cmnd *scmnd)
2437{
2438 struct srp_target_port *target = host_to_target(scmnd->device->host);
aef9ec39 2439
7aa54bd7 2440 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
aef9ec39 2441
ed9b2264 2442 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
aef9ec39
RD
2443}
2444
c9b03c1a
BVA
2445static int srp_slave_configure(struct scsi_device *sdev)
2446{
2447 struct Scsi_Host *shost = sdev->host;
2448 struct srp_target_port *target = host_to_target(shost);
2449 struct request_queue *q = sdev->request_queue;
2450 unsigned long timeout;
2451
2452 if (sdev->type == TYPE_DISK) {
2453 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2454 blk_queue_rq_timeout(q, timeout);
2455 }
2456
2457 return 0;
2458}
2459
ee959b00
TJ
2460static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2461 char *buf)
6ecb0c84 2462{
ee959b00 2463 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2464
6ecb0c84
RD
2465 return sprintf(buf, "0x%016llx\n",
2466 (unsigned long long) be64_to_cpu(target->id_ext));
2467}
2468
ee959b00
TJ
2469static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2470 char *buf)
6ecb0c84 2471{
ee959b00 2472 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2473
6ecb0c84
RD
2474 return sprintf(buf, "0x%016llx\n",
2475 (unsigned long long) be64_to_cpu(target->ioc_guid));
2476}
2477
ee959b00
TJ
2478static ssize_t show_service_id(struct device *dev,
2479 struct device_attribute *attr, char *buf)
6ecb0c84 2480{
ee959b00 2481 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2482
6ecb0c84
RD
2483 return sprintf(buf, "0x%016llx\n",
2484 (unsigned long long) be64_to_cpu(target->service_id));
2485}
2486
ee959b00
TJ
2487static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2488 char *buf)
6ecb0c84 2489{
ee959b00 2490 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2491
747fe000 2492 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
6ecb0c84
RD
2493}
2494
848b3082
BVA
2495static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2496 char *buf)
2497{
2498 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2499
747fe000 2500 return sprintf(buf, "%pI6\n", target->sgid.raw);
848b3082
BVA
2501}
2502
ee959b00
TJ
2503static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2504 char *buf)
6ecb0c84 2505{
ee959b00 2506 struct srp_target_port *target = host_to_target(class_to_shost(dev));
509c07bc 2507 struct srp_rdma_ch *ch = &target->ch;
6ecb0c84 2508
509c07bc 2509 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
6ecb0c84
RD
2510}
2511
ee959b00
TJ
2512static ssize_t show_orig_dgid(struct device *dev,
2513 struct device_attribute *attr, char *buf)
3633b3d0 2514{
ee959b00 2515 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3633b3d0 2516
747fe000 2517 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
3633b3d0
IR
2518}
2519
89de7486
BVA
2520static ssize_t show_req_lim(struct device *dev,
2521 struct device_attribute *attr, char *buf)
2522{
2523 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2524
509c07bc 2525 return sprintf(buf, "%d\n", target->ch.req_lim);
89de7486
BVA
2526}
2527
ee959b00
TJ
2528static ssize_t show_zero_req_lim(struct device *dev,
2529 struct device_attribute *attr, char *buf)
6bfa24fa 2530{
ee959b00 2531 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6bfa24fa 2532
6bfa24fa
RD
2533 return sprintf(buf, "%d\n", target->zero_req_lim);
2534}
2535
ee959b00
TJ
2536static ssize_t show_local_ib_port(struct device *dev,
2537 struct device_attribute *attr, char *buf)
ded7f1a1 2538{
ee959b00 2539 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1
IR
2540
2541 return sprintf(buf, "%d\n", target->srp_host->port);
2542}
2543
ee959b00
TJ
2544static ssize_t show_local_ib_device(struct device *dev,
2545 struct device_attribute *attr, char *buf)
ded7f1a1 2546{
ee959b00 2547 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1 2548
05321937 2549 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
ded7f1a1
IR
2550}
2551
4b5e5f41
BVA
2552static ssize_t show_comp_vector(struct device *dev,
2553 struct device_attribute *attr, char *buf)
2554{
2555 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2556
2557 return sprintf(buf, "%d\n", target->comp_vector);
2558}
2559
7bb312e4
VP
2560static ssize_t show_tl_retry_count(struct device *dev,
2561 struct device_attribute *attr, char *buf)
2562{
2563 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2564
2565 return sprintf(buf, "%d\n", target->tl_retry_count);
2566}
2567
49248644
DD
2568static ssize_t show_cmd_sg_entries(struct device *dev,
2569 struct device_attribute *attr, char *buf)
2570{
2571 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2572
2573 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2574}
2575
c07d424d
DD
2576static ssize_t show_allow_ext_sg(struct device *dev,
2577 struct device_attribute *attr, char *buf)
2578{
2579 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2580
2581 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2582}
2583
ee959b00
TJ
2584static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2585static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2586static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2587static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
848b3082 2588static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
ee959b00
TJ
2589static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2590static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
89de7486 2591static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
ee959b00
TJ
2592static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2593static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2594static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
4b5e5f41 2595static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
7bb312e4 2596static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
49248644 2597static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
c07d424d 2598static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
ee959b00
TJ
2599
2600static struct device_attribute *srp_host_attrs[] = {
2601 &dev_attr_id_ext,
2602 &dev_attr_ioc_guid,
2603 &dev_attr_service_id,
2604 &dev_attr_pkey,
848b3082 2605 &dev_attr_sgid,
ee959b00
TJ
2606 &dev_attr_dgid,
2607 &dev_attr_orig_dgid,
89de7486 2608 &dev_attr_req_lim,
ee959b00
TJ
2609 &dev_attr_zero_req_lim,
2610 &dev_attr_local_ib_port,
2611 &dev_attr_local_ib_device,
4b5e5f41 2612 &dev_attr_comp_vector,
7bb312e4 2613 &dev_attr_tl_retry_count,
49248644 2614 &dev_attr_cmd_sg_entries,
c07d424d 2615 &dev_attr_allow_ext_sg,
6ecb0c84
RD
2616 NULL
2617};
2618
aef9ec39
RD
2619static struct scsi_host_template srp_template = {
2620 .module = THIS_MODULE,
b7f008fd
RD
2621 .name = "InfiniBand SRP initiator",
2622 .proc_name = DRV_NAME,
c9b03c1a 2623 .slave_configure = srp_slave_configure,
aef9ec39
RD
2624 .info = srp_target_info,
2625 .queuecommand = srp_queuecommand,
71444b97 2626 .change_queue_depth = srp_change_queue_depth,
a62182f3 2627 .change_queue_type = scsi_change_queue_type,
aef9ec39
RD
2628 .eh_abort_handler = srp_abort,
2629 .eh_device_reset_handler = srp_reset_device,
2630 .eh_host_reset_handler = srp_reset_host,
2742c1da 2631 .skip_settle_delay = true,
49248644 2632 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
4d73f95f 2633 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
aef9ec39 2634 .this_id = -1,
4d73f95f 2635 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
6ecb0c84 2636 .use_clustering = ENABLE_CLUSTERING,
77f2c1a4
BVA
2637 .shost_attrs = srp_host_attrs,
2638 .use_blk_tags = 1,
aef9ec39
RD
2639};
2640
34aa654e
BVA
2641static int srp_sdev_count(struct Scsi_Host *host)
2642{
2643 struct scsi_device *sdev;
2644 int c = 0;
2645
2646 shost_for_each_device(sdev, host)
2647 c++;
2648
2649 return c;
2650}
2651
aef9ec39
RD
2652static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2653{
3236822b
FT
2654 struct srp_rport_identifiers ids;
2655 struct srp_rport *rport;
2656
34aa654e 2657 target->state = SRP_TARGET_SCANNING;
aef9ec39
RD
2658 sprintf(target->target_name, "SRP.T10:%016llX",
2659 (unsigned long long) be64_to_cpu(target->id_ext));
2660
05321937 2661 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
aef9ec39
RD
2662 return -ENODEV;
2663
3236822b
FT
2664 memcpy(ids.port_id, &target->id_ext, 8);
2665 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
aebd5e47 2666 ids.roles = SRP_RPORT_ROLE_TARGET;
3236822b
FT
2667 rport = srp_rport_add(target->scsi_host, &ids);
2668 if (IS_ERR(rport)) {
2669 scsi_remove_host(target->scsi_host);
2670 return PTR_ERR(rport);
2671 }
2672
dc1bdbd9 2673 rport->lld_data = target;
9dd69a60 2674 target->rport = rport;
dc1bdbd9 2675
b3589fd4 2676 spin_lock(&host->target_lock);
aef9ec39 2677 list_add_tail(&target->list, &host->target_list);
b3589fd4 2678 spin_unlock(&host->target_lock);
aef9ec39 2679
aef9ec39 2680 scsi_scan_target(&target->scsi_host->shost_gendev,
1962a4a1 2681 0, target->scsi_id, SCAN_WILD_CARD, 0);
aef9ec39 2682
34aa654e
BVA
2683 if (!target->connected || target->qp_in_error) {
2684 shost_printk(KERN_INFO, target->scsi_host,
2685 PFX "SCSI scan failed - removing SCSI host\n");
2686 srp_queue_remove_work(target);
2687 goto out;
2688 }
2689
2690 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2691 dev_name(&target->scsi_host->shost_gendev),
2692 srp_sdev_count(target->scsi_host));
2693
2694 spin_lock_irq(&target->lock);
2695 if (target->state == SRP_TARGET_SCANNING)
2696 target->state = SRP_TARGET_LIVE;
2697 spin_unlock_irq(&target->lock);
2698
2699out:
aef9ec39
RD
2700 return 0;
2701}
2702
ee959b00 2703static void srp_release_dev(struct device *dev)
aef9ec39
RD
2704{
2705 struct srp_host *host =
ee959b00 2706 container_of(dev, struct srp_host, dev);
aef9ec39
RD
2707
2708 complete(&host->released);
2709}
2710
2711static struct class srp_class = {
2712 .name = "infiniband_srp",
ee959b00 2713 .dev_release = srp_release_dev
aef9ec39
RD
2714};
2715
96fc248a
BVA
2716/**
2717 * srp_conn_unique() - check whether the connection to a target is unique
af24663b
BVA
2718 * @host: SRP host.
2719 * @target: SRP target port.
96fc248a
BVA
2720 */
2721static bool srp_conn_unique(struct srp_host *host,
2722 struct srp_target_port *target)
2723{
2724 struct srp_target_port *t;
2725 bool ret = false;
2726
2727 if (target->state == SRP_TARGET_REMOVED)
2728 goto out;
2729
2730 ret = true;
2731
2732 spin_lock(&host->target_lock);
2733 list_for_each_entry(t, &host->target_list, list) {
2734 if (t != target &&
2735 target->id_ext == t->id_ext &&
2736 target->ioc_guid == t->ioc_guid &&
2737 target->initiator_ext == t->initiator_ext) {
2738 ret = false;
2739 break;
2740 }
2741 }
2742 spin_unlock(&host->target_lock);
2743
2744out:
2745 return ret;
2746}
2747
aef9ec39
RD
2748/*
2749 * Target ports are added by writing
2750 *
2751 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2752 * pkey=<P_Key>,service_id=<service ID>
2753 *
2754 * to the add_target sysfs attribute.
2755 */
2756enum {
2757 SRP_OPT_ERR = 0,
2758 SRP_OPT_ID_EXT = 1 << 0,
2759 SRP_OPT_IOC_GUID = 1 << 1,
2760 SRP_OPT_DGID = 1 << 2,
2761 SRP_OPT_PKEY = 1 << 3,
2762 SRP_OPT_SERVICE_ID = 1 << 4,
2763 SRP_OPT_MAX_SECT = 1 << 5,
52fb2b50 2764 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
0c0450db 2765 SRP_OPT_IO_CLASS = 1 << 7,
01cb9bcb 2766 SRP_OPT_INITIATOR_EXT = 1 << 8,
49248644 2767 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
c07d424d
DD
2768 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2769 SRP_OPT_SG_TABLESIZE = 1 << 11,
4b5e5f41 2770 SRP_OPT_COMP_VECTOR = 1 << 12,
7bb312e4 2771 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
4d73f95f 2772 SRP_OPT_QUEUE_SIZE = 1 << 14,
aef9ec39
RD
2773 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2774 SRP_OPT_IOC_GUID |
2775 SRP_OPT_DGID |
2776 SRP_OPT_PKEY |
2777 SRP_OPT_SERVICE_ID),
2778};
2779
a447c093 2780static const match_table_t srp_opt_tokens = {
52fb2b50
VP
2781 { SRP_OPT_ID_EXT, "id_ext=%s" },
2782 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2783 { SRP_OPT_DGID, "dgid=%s" },
2784 { SRP_OPT_PKEY, "pkey=%x" },
2785 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2786 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2787 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
0c0450db 2788 { SRP_OPT_IO_CLASS, "io_class=%x" },
01cb9bcb 2789 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
49248644 2790 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
c07d424d
DD
2791 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2792 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
4b5e5f41 2793 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
7bb312e4 2794 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
4d73f95f 2795 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
52fb2b50 2796 { SRP_OPT_ERR, NULL }
aef9ec39
RD
2797};
2798
2799static int srp_parse_options(const char *buf, struct srp_target_port *target)
2800{
2801 char *options, *sep_opt;
2802 char *p;
2803 char dgid[3];
2804 substring_t args[MAX_OPT_ARGS];
2805 int opt_mask = 0;
2806 int token;
2807 int ret = -EINVAL;
2808 int i;
2809
2810 options = kstrdup(buf, GFP_KERNEL);
2811 if (!options)
2812 return -ENOMEM;
2813
2814 sep_opt = options;
2815 while ((p = strsep(&sep_opt, ",")) != NULL) {
2816 if (!*p)
2817 continue;
2818
2819 token = match_token(p, srp_opt_tokens, args);
2820 opt_mask |= token;
2821
2822 switch (token) {
2823 case SRP_OPT_ID_EXT:
2824 p = match_strdup(args);
a20f3a6d
IR
2825 if (!p) {
2826 ret = -ENOMEM;
2827 goto out;
2828 }
aef9ec39
RD
2829 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2830 kfree(p);
2831 break;
2832
2833 case SRP_OPT_IOC_GUID:
2834 p = match_strdup(args);
a20f3a6d
IR
2835 if (!p) {
2836 ret = -ENOMEM;
2837 goto out;
2838 }
aef9ec39
RD
2839 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2840 kfree(p);
2841 break;
2842
2843 case SRP_OPT_DGID:
2844 p = match_strdup(args);
a20f3a6d
IR
2845 if (!p) {
2846 ret = -ENOMEM;
2847 goto out;
2848 }
aef9ec39 2849 if (strlen(p) != 32) {
e0bda7d8 2850 pr_warn("bad dest GID parameter '%s'\n", p);
ce1823f0 2851 kfree(p);
aef9ec39
RD
2852 goto out;
2853 }
2854
2855 for (i = 0; i < 16; ++i) {
747fe000
BVA
2856 strlcpy(dgid, p + i * 2, sizeof(dgid));
2857 if (sscanf(dgid, "%hhx",
2858 &target->orig_dgid.raw[i]) < 1) {
2859 ret = -EINVAL;
2860 kfree(p);
2861 goto out;
2862 }
aef9ec39 2863 }
bf17c1c7 2864 kfree(p);
aef9ec39
RD
2865 break;
2866
2867 case SRP_OPT_PKEY:
2868 if (match_hex(args, &token)) {
e0bda7d8 2869 pr_warn("bad P_Key parameter '%s'\n", p);
aef9ec39
RD
2870 goto out;
2871 }
747fe000 2872 target->pkey = cpu_to_be16(token);
aef9ec39
RD
2873 break;
2874
2875 case SRP_OPT_SERVICE_ID:
2876 p = match_strdup(args);
a20f3a6d
IR
2877 if (!p) {
2878 ret = -ENOMEM;
2879 goto out;
2880 }
aef9ec39
RD
2881 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2882 kfree(p);
2883 break;
2884
2885 case SRP_OPT_MAX_SECT:
2886 if (match_int(args, &token)) {
e0bda7d8 2887 pr_warn("bad max sect parameter '%s'\n", p);
aef9ec39
RD
2888 goto out;
2889 }
2890 target->scsi_host->max_sectors = token;
2891 break;
2892
4d73f95f
BVA
2893 case SRP_OPT_QUEUE_SIZE:
2894 if (match_int(args, &token) || token < 1) {
2895 pr_warn("bad queue_size parameter '%s'\n", p);
2896 goto out;
2897 }
2898 target->scsi_host->can_queue = token;
2899 target->queue_size = token + SRP_RSP_SQ_SIZE +
2900 SRP_TSK_MGMT_SQ_SIZE;
2901 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
2902 target->scsi_host->cmd_per_lun = token;
2903 break;
2904
52fb2b50 2905 case SRP_OPT_MAX_CMD_PER_LUN:
4d73f95f 2906 if (match_int(args, &token) || token < 1) {
e0bda7d8
BVA
2907 pr_warn("bad max cmd_per_lun parameter '%s'\n",
2908 p);
52fb2b50
VP
2909 goto out;
2910 }
4d73f95f 2911 target->scsi_host->cmd_per_lun = token;
52fb2b50
VP
2912 break;
2913
0c0450db
R
2914 case SRP_OPT_IO_CLASS:
2915 if (match_hex(args, &token)) {
e0bda7d8 2916 pr_warn("bad IO class parameter '%s'\n", p);
0c0450db
R
2917 goto out;
2918 }
2919 if (token != SRP_REV10_IB_IO_CLASS &&
2920 token != SRP_REV16A_IB_IO_CLASS) {
e0bda7d8
BVA
2921 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2922 token, SRP_REV10_IB_IO_CLASS,
2923 SRP_REV16A_IB_IO_CLASS);
0c0450db
R
2924 goto out;
2925 }
2926 target->io_class = token;
2927 break;
2928
01cb9bcb
IR
2929 case SRP_OPT_INITIATOR_EXT:
2930 p = match_strdup(args);
a20f3a6d
IR
2931 if (!p) {
2932 ret = -ENOMEM;
2933 goto out;
2934 }
01cb9bcb
IR
2935 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2936 kfree(p);
2937 break;
2938
49248644
DD
2939 case SRP_OPT_CMD_SG_ENTRIES:
2940 if (match_int(args, &token) || token < 1 || token > 255) {
e0bda7d8
BVA
2941 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
2942 p);
49248644
DD
2943 goto out;
2944 }
2945 target->cmd_sg_cnt = token;
2946 break;
2947
c07d424d
DD
2948 case SRP_OPT_ALLOW_EXT_SG:
2949 if (match_int(args, &token)) {
e0bda7d8 2950 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
c07d424d
DD
2951 goto out;
2952 }
2953 target->allow_ext_sg = !!token;
2954 break;
2955
2956 case SRP_OPT_SG_TABLESIZE:
2957 if (match_int(args, &token) || token < 1 ||
2958 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
e0bda7d8
BVA
2959 pr_warn("bad max sg_tablesize parameter '%s'\n",
2960 p);
c07d424d
DD
2961 goto out;
2962 }
2963 target->sg_tablesize = token;
2964 break;
2965
4b5e5f41
BVA
2966 case SRP_OPT_COMP_VECTOR:
2967 if (match_int(args, &token) || token < 0) {
2968 pr_warn("bad comp_vector parameter '%s'\n", p);
2969 goto out;
2970 }
2971 target->comp_vector = token;
2972 break;
2973
7bb312e4
VP
2974 case SRP_OPT_TL_RETRY_COUNT:
2975 if (match_int(args, &token) || token < 2 || token > 7) {
2976 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
2977 p);
2978 goto out;
2979 }
2980 target->tl_retry_count = token;
2981 break;
2982
aef9ec39 2983 default:
e0bda7d8
BVA
2984 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
2985 p);
aef9ec39
RD
2986 goto out;
2987 }
2988 }
2989
2990 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2991 ret = 0;
2992 else
2993 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2994 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2995 !(srp_opt_tokens[i].token & opt_mask))
e0bda7d8
BVA
2996 pr_warn("target creation request is missing parameter '%s'\n",
2997 srp_opt_tokens[i].pattern);
aef9ec39 2998
4d73f95f
BVA
2999 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3000 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3001 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3002 target->scsi_host->cmd_per_lun,
3003 target->scsi_host->can_queue);
3004
aef9ec39
RD
3005out:
3006 kfree(options);
3007 return ret;
3008}
3009
ee959b00
TJ
3010static ssize_t srp_create_target(struct device *dev,
3011 struct device_attribute *attr,
aef9ec39
RD
3012 const char *buf, size_t count)
3013{
3014 struct srp_host *host =
ee959b00 3015 container_of(dev, struct srp_host, dev);
aef9ec39
RD
3016 struct Scsi_Host *target_host;
3017 struct srp_target_port *target;
509c07bc 3018 struct srp_rdma_ch *ch;
d1b4289e
BVA
3019 struct srp_device *srp_dev = host->srp_dev;
3020 struct ib_device *ibdev = srp_dev->dev;
b81d00bd 3021 int ret;
aef9ec39
RD
3022
3023 target_host = scsi_host_alloc(&srp_template,
3024 sizeof (struct srp_target_port));
3025 if (!target_host)
3026 return -ENOMEM;
3027
49248644 3028 target_host->transportt = ib_srp_transport_template;
fd1b6c4a
BVA
3029 target_host->max_channel = 0;
3030 target_host->max_id = 1;
3c8edf0e
AR
3031 target_host->max_lun = SRP_MAX_LUN;
3032 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
5f068992 3033
aef9ec39 3034 target = host_to_target(target_host);
aef9ec39 3035
49248644
DD
3036 target->io_class = SRP_REV16A_IB_IO_CLASS;
3037 target->scsi_host = target_host;
3038 target->srp_host = host;
3039 target->lkey = host->srp_dev->mr->lkey;
3040 target->rkey = host->srp_dev->mr->rkey;
3041 target->cmd_sg_cnt = cmd_sg_entries;
c07d424d
DD
3042 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3043 target->allow_ext_sg = allow_ext_sg;
7bb312e4 3044 target->tl_retry_count = 7;
4d73f95f 3045 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
aef9ec39 3046
34aa654e
BVA
3047 /*
3048 * Avoid that the SCSI host can be removed by srp_remove_target()
3049 * before this function returns.
3050 */
3051 scsi_host_get(target->scsi_host);
3052
2d7091bc
BVA
3053 mutex_lock(&host->add_target_mutex);
3054
aef9ec39
RD
3055 ret = srp_parse_options(buf, target);
3056 if (ret)
3057 goto err;
3058
77f2c1a4
BVA
3059 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3060 if (ret)
3061 goto err;
3062
4d73f95f
BVA
3063 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3064
96fc248a
BVA
3065 if (!srp_conn_unique(target->srp_host, target)) {
3066 shost_printk(KERN_INFO, target->scsi_host,
3067 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3068 be64_to_cpu(target->id_ext),
3069 be64_to_cpu(target->ioc_guid),
3070 be64_to_cpu(target->initiator_ext));
3071 ret = -EEXIST;
3072 goto err;
3073 }
3074
5cfb1782 3075 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
d1b4289e 3076 target->cmd_sg_cnt < target->sg_tablesize) {
5cfb1782 3077 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
c07d424d
DD
3078 target->sg_tablesize = target->cmd_sg_cnt;
3079 }
3080
3081 target_host->sg_tablesize = target->sg_tablesize;
3082 target->indirect_size = target->sg_tablesize *
3083 sizeof (struct srp_direct_buf);
49248644
DD
3084 target->max_iu_len = sizeof (struct srp_cmd) +
3085 sizeof (struct srp_indirect_buf) +
3086 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3087
c1120f89 3088 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
ef6c49d8 3089 INIT_WORK(&target->remove_work, srp_remove_work);
8f26c9ff 3090 spin_lock_init(&target->lock);
509c07bc
BVA
3091 ch = &target->ch;
3092 ch->target = target;
3093 ch->comp_vector = target->comp_vector;
3094 spin_lock_init(&ch->lock);
3095 INIT_LIST_HEAD(&ch->free_tx);
3096 ret = srp_alloc_req_data(ch);
b81d00bd
BVA
3097 if (ret)
3098 goto err_free_mem;
8f26c9ff 3099
747fe000 3100 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
2088ca66
SG
3101 if (ret)
3102 goto err_free_mem;
aef9ec39 3103
509c07bc 3104 ret = srp_create_ch_ib(ch);
aef9ec39 3105 if (ret)
8f26c9ff 3106 goto err_free_mem;
aef9ec39 3107
509c07bc 3108 ret = srp_new_cm_id(ch);
9fe4bcf4 3109 if (ret)
8f26c9ff 3110 goto err_free_ib;
aef9ec39 3111
509c07bc 3112 ret = srp_connect_ch(ch);
aef9ec39 3113 if (ret) {
7aa54bd7
DD
3114 shost_printk(KERN_ERR, target->scsi_host,
3115 PFX "Connection failed\n");
394c595e 3116 goto err_free_ib;
aef9ec39
RD
3117 }
3118
3119 ret = srp_add_target(host, target);
3120 if (ret)
3121 goto err_disconnect;
3122
34aa654e
BVA
3123 if (target->state != SRP_TARGET_REMOVED) {
3124 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3125 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3126 be64_to_cpu(target->id_ext),
3127 be64_to_cpu(target->ioc_guid),
747fe000 3128 be16_to_cpu(target->pkey),
34aa654e 3129 be64_to_cpu(target->service_id),
747fe000 3130 target->sgid.raw, target->orig_dgid.raw);
34aa654e 3131 }
e7ffde01 3132
2d7091bc
BVA
3133 ret = count;
3134
3135out:
3136 mutex_unlock(&host->add_target_mutex);
34aa654e
BVA
3137
3138 scsi_host_put(target->scsi_host);
3139
2d7091bc 3140 return ret;
aef9ec39
RD
3141
3142err_disconnect:
3143 srp_disconnect_target(target);
3144
8f26c9ff 3145err_free_ib:
509c07bc 3146 srp_free_ch_ib(target, ch);
aef9ec39 3147
8f26c9ff 3148err_free_mem:
509c07bc 3149 srp_free_req_data(target, ch);
8f26c9ff 3150
aef9ec39
RD
3151err:
3152 scsi_host_put(target_host);
2d7091bc 3153 goto out;
aef9ec39
RD
3154}
3155
ee959b00 3156static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
aef9ec39 3157
ee959b00
TJ
3158static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3159 char *buf)
aef9ec39 3160{
ee959b00 3161 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39 3162
05321937 3163 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
aef9ec39
RD
3164}
3165
ee959b00 3166static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
aef9ec39 3167
ee959b00
TJ
3168static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3169 char *buf)
aef9ec39 3170{
ee959b00 3171 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39
RD
3172
3173 return sprintf(buf, "%d\n", host->port);
3174}
3175
ee959b00 3176static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
aef9ec39 3177
f5358a17 3178static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
aef9ec39
RD
3179{
3180 struct srp_host *host;
3181
3182 host = kzalloc(sizeof *host, GFP_KERNEL);
3183 if (!host)
3184 return NULL;
3185
3186 INIT_LIST_HEAD(&host->target_list);
b3589fd4 3187 spin_lock_init(&host->target_lock);
aef9ec39 3188 init_completion(&host->released);
2d7091bc 3189 mutex_init(&host->add_target_mutex);
05321937 3190 host->srp_dev = device;
aef9ec39
RD
3191 host->port = port;
3192
ee959b00
TJ
3193 host->dev.class = &srp_class;
3194 host->dev.parent = device->dev->dma_device;
d927e38c 3195 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
aef9ec39 3196
ee959b00 3197 if (device_register(&host->dev))
f5358a17 3198 goto free_host;
ee959b00 3199 if (device_create_file(&host->dev, &dev_attr_add_target))
aef9ec39 3200 goto err_class;
ee959b00 3201 if (device_create_file(&host->dev, &dev_attr_ibdev))
aef9ec39 3202 goto err_class;
ee959b00 3203 if (device_create_file(&host->dev, &dev_attr_port))
aef9ec39
RD
3204 goto err_class;
3205
3206 return host;
3207
3208err_class:
ee959b00 3209 device_unregister(&host->dev);
aef9ec39 3210
f5358a17 3211free_host:
aef9ec39
RD
3212 kfree(host);
3213
3214 return NULL;
3215}
3216
3217static void srp_add_one(struct ib_device *device)
3218{
f5358a17
RD
3219 struct srp_device *srp_dev;
3220 struct ib_device_attr *dev_attr;
aef9ec39 3221 struct srp_host *host;
52ede08f
BVA
3222 int mr_page_shift, s, e, p;
3223 u64 max_pages_per_mr;
aef9ec39 3224
f5358a17
RD
3225 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3226 if (!dev_attr)
cf311cd4 3227 return;
aef9ec39 3228
f5358a17 3229 if (ib_query_device(device, dev_attr)) {
e0bda7d8 3230 pr_warn("Query device failed for %s\n", device->name);
f5358a17
RD
3231 goto free_attr;
3232 }
3233
3234 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3235 if (!srp_dev)
3236 goto free_attr;
3237
d1b4289e
BVA
3238 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3239 device->map_phys_fmr && device->unmap_fmr);
5cfb1782
BVA
3240 srp_dev->has_fr = (dev_attr->device_cap_flags &
3241 IB_DEVICE_MEM_MGT_EXTENSIONS);
3242 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3243 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3244
3245 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3246 (!srp_dev->has_fmr || prefer_fr));
d1b4289e 3247
f5358a17
RD
3248 /*
3249 * Use the smallest page size supported by the HCA, down to a
8f26c9ff
DD
3250 * minimum of 4096 bytes. We're unlikely to build large sglists
3251 * out of smaller entries.
f5358a17 3252 */
52ede08f
BVA
3253 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3254 srp_dev->mr_page_size = 1 << mr_page_shift;
3255 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3256 max_pages_per_mr = dev_attr->max_mr_size;
3257 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3258 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3259 max_pages_per_mr);
5cfb1782
BVA
3260 if (srp_dev->use_fast_reg) {
3261 srp_dev->max_pages_per_mr =
3262 min_t(u32, srp_dev->max_pages_per_mr,
3263 dev_attr->max_fast_reg_page_list_len);
3264 }
52ede08f
BVA
3265 srp_dev->mr_max_size = srp_dev->mr_page_size *
3266 srp_dev->max_pages_per_mr;
5cfb1782 3267 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
52ede08f 3268 device->name, mr_page_shift, dev_attr->max_mr_size,
5cfb1782 3269 dev_attr->max_fast_reg_page_list_len,
52ede08f 3270 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
f5358a17
RD
3271
3272 INIT_LIST_HEAD(&srp_dev->dev_list);
3273
3274 srp_dev->dev = device;
3275 srp_dev->pd = ib_alloc_pd(device);
3276 if (IS_ERR(srp_dev->pd))
3277 goto free_dev;
3278
3279 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3280 IB_ACCESS_LOCAL_WRITE |
3281 IB_ACCESS_REMOTE_READ |
3282 IB_ACCESS_REMOTE_WRITE);
3283 if (IS_ERR(srp_dev->mr))
3284 goto err_pd;
3285
07ebafba 3286 if (device->node_type == RDMA_NODE_IB_SWITCH) {
aef9ec39
RD
3287 s = 0;
3288 e = 0;
3289 } else {
3290 s = 1;
3291 e = device->phys_port_cnt;
3292 }
3293
3294 for (p = s; p <= e; ++p) {
f5358a17 3295 host = srp_add_port(srp_dev, p);
aef9ec39 3296 if (host)
f5358a17 3297 list_add_tail(&host->list, &srp_dev->dev_list);
aef9ec39
RD
3298 }
3299
f5358a17
RD
3300 ib_set_client_data(device, &srp_client, srp_dev);
3301
3302 goto free_attr;
3303
3304err_pd:
3305 ib_dealloc_pd(srp_dev->pd);
3306
3307free_dev:
3308 kfree(srp_dev);
3309
3310free_attr:
3311 kfree(dev_attr);
aef9ec39
RD
3312}
3313
3314static void srp_remove_one(struct ib_device *device)
3315{
f5358a17 3316 struct srp_device *srp_dev;
aef9ec39 3317 struct srp_host *host, *tmp_host;
ef6c49d8 3318 struct srp_target_port *target;
aef9ec39 3319
f5358a17 3320 srp_dev = ib_get_client_data(device, &srp_client);
1fe0cb84
DB
3321 if (!srp_dev)
3322 return;
aef9ec39 3323
f5358a17 3324 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
ee959b00 3325 device_unregister(&host->dev);
aef9ec39
RD
3326 /*
3327 * Wait for the sysfs entry to go away, so that no new
3328 * target ports can be created.
3329 */
3330 wait_for_completion(&host->released);
3331
3332 /*
ef6c49d8 3333 * Remove all target ports.
aef9ec39 3334 */
b3589fd4 3335 spin_lock(&host->target_lock);
ef6c49d8
BVA
3336 list_for_each_entry(target, &host->target_list, list)
3337 srp_queue_remove_work(target);
b3589fd4 3338 spin_unlock(&host->target_lock);
aef9ec39
RD
3339
3340 /*
bcc05910 3341 * Wait for tl_err and target port removal tasks.
aef9ec39 3342 */
ef6c49d8 3343 flush_workqueue(system_long_wq);
bcc05910 3344 flush_workqueue(srp_remove_wq);
aef9ec39 3345
aef9ec39
RD
3346 kfree(host);
3347 }
3348
f5358a17
RD
3349 ib_dereg_mr(srp_dev->mr);
3350 ib_dealloc_pd(srp_dev->pd);
3351
3352 kfree(srp_dev);
aef9ec39
RD
3353}
3354
3236822b 3355static struct srp_function_template ib_srp_transport_functions = {
ed9b2264
BVA
3356 .has_rport_state = true,
3357 .reset_timer_if_blocked = true,
a95cadb9 3358 .reconnect_delay = &srp_reconnect_delay,
ed9b2264
BVA
3359 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3360 .dev_loss_tmo = &srp_dev_loss_tmo,
3361 .reconnect = srp_rport_reconnect,
dc1bdbd9 3362 .rport_delete = srp_rport_delete,
ed9b2264 3363 .terminate_rport_io = srp_terminate_io,
3236822b
FT
3364};
3365
aef9ec39
RD
3366static int __init srp_init_module(void)
3367{
3368 int ret;
3369
dcb4cb85 3370 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
dd5e6e38 3371
49248644 3372 if (srp_sg_tablesize) {
e0bda7d8 3373 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
49248644
DD
3374 if (!cmd_sg_entries)
3375 cmd_sg_entries = srp_sg_tablesize;
3376 }
3377
3378 if (!cmd_sg_entries)
3379 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3380
3381 if (cmd_sg_entries > 255) {
e0bda7d8 3382 pr_warn("Clamping cmd_sg_entries to 255\n");
49248644 3383 cmd_sg_entries = 255;
1e89a194
DD
3384 }
3385
c07d424d
DD
3386 if (!indirect_sg_entries)
3387 indirect_sg_entries = cmd_sg_entries;
3388 else if (indirect_sg_entries < cmd_sg_entries) {
e0bda7d8
BVA
3389 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3390 cmd_sg_entries);
c07d424d
DD
3391 indirect_sg_entries = cmd_sg_entries;
3392 }
3393
bcc05910 3394 srp_remove_wq = create_workqueue("srp_remove");
da05be29
WY
3395 if (!srp_remove_wq) {
3396 ret = -ENOMEM;
bcc05910
BVA
3397 goto out;
3398 }
3399
3400 ret = -ENOMEM;
3236822b
FT
3401 ib_srp_transport_template =
3402 srp_attach_transport(&ib_srp_transport_functions);
3403 if (!ib_srp_transport_template)
bcc05910 3404 goto destroy_wq;
3236822b 3405
aef9ec39
RD
3406 ret = class_register(&srp_class);
3407 if (ret) {
e0bda7d8 3408 pr_err("couldn't register class infiniband_srp\n");
bcc05910 3409 goto release_tr;
aef9ec39
RD
3410 }
3411
c1a0b23b
MT
3412 ib_sa_register_client(&srp_sa_client);
3413
aef9ec39
RD
3414 ret = ib_register_client(&srp_client);
3415 if (ret) {
e0bda7d8 3416 pr_err("couldn't register IB client\n");
bcc05910 3417 goto unreg_sa;
aef9ec39
RD
3418 }
3419
bcc05910
BVA
3420out:
3421 return ret;
3422
3423unreg_sa:
3424 ib_sa_unregister_client(&srp_sa_client);
3425 class_unregister(&srp_class);
3426
3427release_tr:
3428 srp_release_transport(ib_srp_transport_template);
3429
3430destroy_wq:
3431 destroy_workqueue(srp_remove_wq);
3432 goto out;
aef9ec39
RD
3433}
3434
3435static void __exit srp_cleanup_module(void)
3436{
3437 ib_unregister_client(&srp_client);
c1a0b23b 3438 ib_sa_unregister_client(&srp_sa_client);
aef9ec39 3439 class_unregister(&srp_class);
3236822b 3440 srp_release_transport(ib_srp_transport_template);
bcc05910 3441 destroy_workqueue(srp_remove_wq);
aef9ec39
RD
3442}
3443
3444module_init(srp_init_module);
3445module_exit(srp_cleanup_module);
This page took 1.072664 seconds and 5 git commands to generate.