IB/srp: Fix a race condition triggered by destroying a queue pair
[deliverable/linux.git] / drivers / infiniband / ulp / srp / ib_srp.c
1 /*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43
44 #include <linux/atomic.h>
45
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_dbg.h>
49 #include <scsi/scsi_tcq.h>
50 #include <scsi/srp.h>
51 #include <scsi/scsi_transport_srp.h>
52
53 #include "ib_srp.h"
54
55 #define DRV_NAME "ib_srp"
56 #define PFX DRV_NAME ": "
57 #define DRV_VERSION "1.0"
58 #define DRV_RELDATE "July 1, 2013"
59
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62 "v" DRV_VERSION " (" DRV_RELDATE ")");
63 MODULE_LICENSE("Dual BSD/GPL");
64
65 static unsigned int srp_sg_tablesize;
66 static unsigned int cmd_sg_entries;
67 static unsigned int indirect_sg_entries;
68 static bool allow_ext_sg;
69 static bool prefer_fr;
70 static bool register_always;
71 static int topspin_workarounds = 1;
72
73 module_param(srp_sg_tablesize, uint, 0444);
74 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
75
76 module_param(cmd_sg_entries, uint, 0444);
77 MODULE_PARM_DESC(cmd_sg_entries,
78 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
79
80 module_param(indirect_sg_entries, uint, 0444);
81 MODULE_PARM_DESC(indirect_sg_entries,
82 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
83
84 module_param(allow_ext_sg, bool, 0444);
85 MODULE_PARM_DESC(allow_ext_sg,
86 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
87
88 module_param(topspin_workarounds, int, 0444);
89 MODULE_PARM_DESC(topspin_workarounds,
90 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
91
92 module_param(prefer_fr, bool, 0444);
93 MODULE_PARM_DESC(prefer_fr,
94 "Whether to use fast registration if both FMR and fast registration are supported");
95
96 module_param(register_always, bool, 0444);
97 MODULE_PARM_DESC(register_always,
98 "Use memory registration even for contiguous memory regions");
99
100 static struct kernel_param_ops srp_tmo_ops;
101
102 static int srp_reconnect_delay = 10;
103 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
104 S_IRUGO | S_IWUSR);
105 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
106
107 static int srp_fast_io_fail_tmo = 15;
108 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
109 S_IRUGO | S_IWUSR);
110 MODULE_PARM_DESC(fast_io_fail_tmo,
111 "Number of seconds between the observation of a transport"
112 " layer error and failing all I/O. \"off\" means that this"
113 " functionality is disabled.");
114
115 static int srp_dev_loss_tmo = 600;
116 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
117 S_IRUGO | S_IWUSR);
118 MODULE_PARM_DESC(dev_loss_tmo,
119 "Maximum number of seconds that the SRP transport should"
120 " insulate transport layer errors. After this time has been"
121 " exceeded the SCSI host is removed. Should be"
122 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
123 " if fast_io_fail_tmo has not been set. \"off\" means that"
124 " this functionality is disabled.");
125
126 static unsigned ch_count;
127 module_param(ch_count, uint, 0444);
128 MODULE_PARM_DESC(ch_count,
129 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
130
131 static void srp_add_one(struct ib_device *device);
132 static void srp_remove_one(struct ib_device *device);
133 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
134 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
135 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
136
137 static struct scsi_transport_template *ib_srp_transport_template;
138 static struct workqueue_struct *srp_remove_wq;
139
140 static struct ib_client srp_client = {
141 .name = "srp",
142 .add = srp_add_one,
143 .remove = srp_remove_one
144 };
145
146 static struct ib_sa_client srp_sa_client;
147
148 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
149 {
150 int tmo = *(int *)kp->arg;
151
152 if (tmo >= 0)
153 return sprintf(buffer, "%d", tmo);
154 else
155 return sprintf(buffer, "off");
156 }
157
158 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
159 {
160 int tmo, res;
161
162 if (strncmp(val, "off", 3) != 0) {
163 res = kstrtoint(val, 0, &tmo);
164 if (res)
165 goto out;
166 } else {
167 tmo = -1;
168 }
169 if (kp->arg == &srp_reconnect_delay)
170 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
171 srp_dev_loss_tmo);
172 else if (kp->arg == &srp_fast_io_fail_tmo)
173 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
174 else
175 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
176 tmo);
177 if (res)
178 goto out;
179 *(int *)kp->arg = tmo;
180
181 out:
182 return res;
183 }
184
185 static struct kernel_param_ops srp_tmo_ops = {
186 .get = srp_tmo_get,
187 .set = srp_tmo_set,
188 };
189
190 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
191 {
192 return (struct srp_target_port *) host->hostdata;
193 }
194
195 static const char *srp_target_info(struct Scsi_Host *host)
196 {
197 return host_to_target(host)->target_name;
198 }
199
200 static int srp_target_is_topspin(struct srp_target_port *target)
201 {
202 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
203 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
204
205 return topspin_workarounds &&
206 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
207 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
208 }
209
210 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
211 gfp_t gfp_mask,
212 enum dma_data_direction direction)
213 {
214 struct srp_iu *iu;
215
216 iu = kmalloc(sizeof *iu, gfp_mask);
217 if (!iu)
218 goto out;
219
220 iu->buf = kzalloc(size, gfp_mask);
221 if (!iu->buf)
222 goto out_free_iu;
223
224 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
225 direction);
226 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
227 goto out_free_buf;
228
229 iu->size = size;
230 iu->direction = direction;
231
232 return iu;
233
234 out_free_buf:
235 kfree(iu->buf);
236 out_free_iu:
237 kfree(iu);
238 out:
239 return NULL;
240 }
241
242 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
243 {
244 if (!iu)
245 return;
246
247 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
248 iu->direction);
249 kfree(iu->buf);
250 kfree(iu);
251 }
252
253 static void srp_qp_event(struct ib_event *event, void *context)
254 {
255 pr_debug("QP event %d\n", event->event);
256 }
257
258 static int srp_init_qp(struct srp_target_port *target,
259 struct ib_qp *qp)
260 {
261 struct ib_qp_attr *attr;
262 int ret;
263
264 attr = kmalloc(sizeof *attr, GFP_KERNEL);
265 if (!attr)
266 return -ENOMEM;
267
268 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
269 target->srp_host->port,
270 be16_to_cpu(target->pkey),
271 &attr->pkey_index);
272 if (ret)
273 goto out;
274
275 attr->qp_state = IB_QPS_INIT;
276 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277 IB_ACCESS_REMOTE_WRITE);
278 attr->port_num = target->srp_host->port;
279
280 ret = ib_modify_qp(qp, attr,
281 IB_QP_STATE |
282 IB_QP_PKEY_INDEX |
283 IB_QP_ACCESS_FLAGS |
284 IB_QP_PORT);
285
286 out:
287 kfree(attr);
288 return ret;
289 }
290
291 static int srp_new_cm_id(struct srp_rdma_ch *ch)
292 {
293 struct srp_target_port *target = ch->target;
294 struct ib_cm_id *new_cm_id;
295
296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
297 srp_cm_handler, ch);
298 if (IS_ERR(new_cm_id))
299 return PTR_ERR(new_cm_id);
300
301 if (ch->cm_id)
302 ib_destroy_cm_id(ch->cm_id);
303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
308
309 return 0;
310 }
311
312 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
313 {
314 struct srp_device *dev = target->srp_host->srp_dev;
315 struct ib_fmr_pool_param fmr_param;
316
317 memset(&fmr_param, 0, sizeof(fmr_param));
318 fmr_param.pool_size = target->scsi_host->can_queue;
319 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
320 fmr_param.cache = 1;
321 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322 fmr_param.page_shift = ilog2(dev->mr_page_size);
323 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
324 IB_ACCESS_REMOTE_WRITE |
325 IB_ACCESS_REMOTE_READ);
326
327 return ib_create_fmr_pool(dev->pd, &fmr_param);
328 }
329
330 /**
331 * srp_destroy_fr_pool() - free the resources owned by a pool
332 * @pool: Fast registration pool to be destroyed.
333 */
334 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
335 {
336 int i;
337 struct srp_fr_desc *d;
338
339 if (!pool)
340 return;
341
342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
343 if (d->frpl)
344 ib_free_fast_reg_page_list(d->frpl);
345 if (d->mr)
346 ib_dereg_mr(d->mr);
347 }
348 kfree(pool);
349 }
350
351 /**
352 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
353 * @device: IB device to allocate fast registration descriptors for.
354 * @pd: Protection domain associated with the FR descriptors.
355 * @pool_size: Number of descriptors to allocate.
356 * @max_page_list_len: Maximum fast registration work request page list length.
357 */
358 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
359 struct ib_pd *pd, int pool_size,
360 int max_page_list_len)
361 {
362 struct srp_fr_pool *pool;
363 struct srp_fr_desc *d;
364 struct ib_mr *mr;
365 struct ib_fast_reg_page_list *frpl;
366 int i, ret = -EINVAL;
367
368 if (pool_size <= 0)
369 goto err;
370 ret = -ENOMEM;
371 pool = kzalloc(sizeof(struct srp_fr_pool) +
372 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
373 if (!pool)
374 goto err;
375 pool->size = pool_size;
376 pool->max_page_list_len = max_page_list_len;
377 spin_lock_init(&pool->lock);
378 INIT_LIST_HEAD(&pool->free_list);
379
380 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
381 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
382 if (IS_ERR(mr)) {
383 ret = PTR_ERR(mr);
384 goto destroy_pool;
385 }
386 d->mr = mr;
387 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
388 if (IS_ERR(frpl)) {
389 ret = PTR_ERR(frpl);
390 goto destroy_pool;
391 }
392 d->frpl = frpl;
393 list_add_tail(&d->entry, &pool->free_list);
394 }
395
396 out:
397 return pool;
398
399 destroy_pool:
400 srp_destroy_fr_pool(pool);
401
402 err:
403 pool = ERR_PTR(ret);
404 goto out;
405 }
406
407 /**
408 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
409 * @pool: Pool to obtain descriptor from.
410 */
411 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
412 {
413 struct srp_fr_desc *d = NULL;
414 unsigned long flags;
415
416 spin_lock_irqsave(&pool->lock, flags);
417 if (!list_empty(&pool->free_list)) {
418 d = list_first_entry(&pool->free_list, typeof(*d), entry);
419 list_del(&d->entry);
420 }
421 spin_unlock_irqrestore(&pool->lock, flags);
422
423 return d;
424 }
425
426 /**
427 * srp_fr_pool_put() - put an FR descriptor back in the free list
428 * @pool: Pool the descriptor was allocated from.
429 * @desc: Pointer to an array of fast registration descriptor pointers.
430 * @n: Number of descriptors to put back.
431 *
432 * Note: The caller must already have queued an invalidation request for
433 * desc->mr->rkey before calling this function.
434 */
435 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
436 int n)
437 {
438 unsigned long flags;
439 int i;
440
441 spin_lock_irqsave(&pool->lock, flags);
442 for (i = 0; i < n; i++)
443 list_add(&desc[i]->entry, &pool->free_list);
444 spin_unlock_irqrestore(&pool->lock, flags);
445 }
446
447 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
448 {
449 struct srp_device *dev = target->srp_host->srp_dev;
450
451 return srp_create_fr_pool(dev->dev, dev->pd,
452 target->scsi_host->can_queue,
453 dev->max_pages_per_mr);
454 }
455
456 /**
457 * srp_destroy_qp() - destroy an RDMA queue pair
458 * @ch: SRP RDMA channel.
459 *
460 * Change a queue pair into the error state and wait until all receive
461 * completions have been processed before destroying it. This avoids that
462 * the receive completion handler can access the queue pair while it is
463 * being destroyed.
464 */
465 static void srp_destroy_qp(struct srp_rdma_ch *ch)
466 {
467 struct srp_target_port *target = ch->target;
468 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
469 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
470 struct ib_recv_wr *bad_wr;
471 int ret;
472
473 /* Destroying a QP and reusing ch->done is only safe if not connected */
474 WARN_ON_ONCE(target->connected);
475
476 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
477 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
478 if (ret)
479 goto out;
480
481 init_completion(&ch->done);
482 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
483 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
484 if (ret == 0)
485 wait_for_completion(&ch->done);
486
487 out:
488 ib_destroy_qp(ch->qp);
489 }
490
491 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
492 {
493 struct srp_target_port *target = ch->target;
494 struct srp_device *dev = target->srp_host->srp_dev;
495 struct ib_qp_init_attr *init_attr;
496 struct ib_cq *recv_cq, *send_cq;
497 struct ib_qp *qp;
498 struct ib_fmr_pool *fmr_pool = NULL;
499 struct srp_fr_pool *fr_pool = NULL;
500 const int m = 1 + dev->use_fast_reg;
501 int ret;
502
503 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
504 if (!init_attr)
505 return -ENOMEM;
506
507 /* + 1 for SRP_LAST_WR_ID */
508 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
509 target->queue_size + 1, ch->comp_vector);
510 if (IS_ERR(recv_cq)) {
511 ret = PTR_ERR(recv_cq);
512 goto err;
513 }
514
515 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
516 m * target->queue_size, ch->comp_vector);
517 if (IS_ERR(send_cq)) {
518 ret = PTR_ERR(send_cq);
519 goto err_recv_cq;
520 }
521
522 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
523
524 init_attr->event_handler = srp_qp_event;
525 init_attr->cap.max_send_wr = m * target->queue_size;
526 init_attr->cap.max_recv_wr = target->queue_size + 1;
527 init_attr->cap.max_recv_sge = 1;
528 init_attr->cap.max_send_sge = 1;
529 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
530 init_attr->qp_type = IB_QPT_RC;
531 init_attr->send_cq = send_cq;
532 init_attr->recv_cq = recv_cq;
533
534 qp = ib_create_qp(dev->pd, init_attr);
535 if (IS_ERR(qp)) {
536 ret = PTR_ERR(qp);
537 goto err_send_cq;
538 }
539
540 ret = srp_init_qp(target, qp);
541 if (ret)
542 goto err_qp;
543
544 if (dev->use_fast_reg && dev->has_fr) {
545 fr_pool = srp_alloc_fr_pool(target);
546 if (IS_ERR(fr_pool)) {
547 ret = PTR_ERR(fr_pool);
548 shost_printk(KERN_WARNING, target->scsi_host, PFX
549 "FR pool allocation failed (%d)\n", ret);
550 goto err_qp;
551 }
552 if (ch->fr_pool)
553 srp_destroy_fr_pool(ch->fr_pool);
554 ch->fr_pool = fr_pool;
555 } else if (!dev->use_fast_reg && dev->has_fmr) {
556 fmr_pool = srp_alloc_fmr_pool(target);
557 if (IS_ERR(fmr_pool)) {
558 ret = PTR_ERR(fmr_pool);
559 shost_printk(KERN_WARNING, target->scsi_host, PFX
560 "FMR pool allocation failed (%d)\n", ret);
561 goto err_qp;
562 }
563 if (ch->fmr_pool)
564 ib_destroy_fmr_pool(ch->fmr_pool);
565 ch->fmr_pool = fmr_pool;
566 }
567
568 if (ch->qp)
569 srp_destroy_qp(ch);
570 if (ch->recv_cq)
571 ib_destroy_cq(ch->recv_cq);
572 if (ch->send_cq)
573 ib_destroy_cq(ch->send_cq);
574
575 ch->qp = qp;
576 ch->recv_cq = recv_cq;
577 ch->send_cq = send_cq;
578
579 kfree(init_attr);
580 return 0;
581
582 err_qp:
583 ib_destroy_qp(qp);
584
585 err_send_cq:
586 ib_destroy_cq(send_cq);
587
588 err_recv_cq:
589 ib_destroy_cq(recv_cq);
590
591 err:
592 kfree(init_attr);
593 return ret;
594 }
595
596 /*
597 * Note: this function may be called without srp_alloc_iu_bufs() having been
598 * invoked. Hence the ch->[rt]x_ring checks.
599 */
600 static void srp_free_ch_ib(struct srp_target_port *target,
601 struct srp_rdma_ch *ch)
602 {
603 struct srp_device *dev = target->srp_host->srp_dev;
604 int i;
605
606 if (!ch->target)
607 return;
608
609 if (ch->cm_id) {
610 ib_destroy_cm_id(ch->cm_id);
611 ch->cm_id = NULL;
612 }
613
614 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
615 if (!ch->qp)
616 return;
617
618 if (dev->use_fast_reg) {
619 if (ch->fr_pool)
620 srp_destroy_fr_pool(ch->fr_pool);
621 } else {
622 if (ch->fmr_pool)
623 ib_destroy_fmr_pool(ch->fmr_pool);
624 }
625 srp_destroy_qp(ch);
626 ib_destroy_cq(ch->send_cq);
627 ib_destroy_cq(ch->recv_cq);
628
629 /*
630 * Avoid that the SCSI error handler tries to use this channel after
631 * it has been freed. The SCSI error handler can namely continue
632 * trying to perform recovery actions after scsi_remove_host()
633 * returned.
634 */
635 ch->target = NULL;
636
637 ch->qp = NULL;
638 ch->send_cq = ch->recv_cq = NULL;
639
640 if (ch->rx_ring) {
641 for (i = 0; i < target->queue_size; ++i)
642 srp_free_iu(target->srp_host, ch->rx_ring[i]);
643 kfree(ch->rx_ring);
644 ch->rx_ring = NULL;
645 }
646 if (ch->tx_ring) {
647 for (i = 0; i < target->queue_size; ++i)
648 srp_free_iu(target->srp_host, ch->tx_ring[i]);
649 kfree(ch->tx_ring);
650 ch->tx_ring = NULL;
651 }
652 }
653
654 static void srp_path_rec_completion(int status,
655 struct ib_sa_path_rec *pathrec,
656 void *ch_ptr)
657 {
658 struct srp_rdma_ch *ch = ch_ptr;
659 struct srp_target_port *target = ch->target;
660
661 ch->status = status;
662 if (status)
663 shost_printk(KERN_ERR, target->scsi_host,
664 PFX "Got failed path rec status %d\n", status);
665 else
666 ch->path = *pathrec;
667 complete(&ch->done);
668 }
669
670 static int srp_lookup_path(struct srp_rdma_ch *ch)
671 {
672 struct srp_target_port *target = ch->target;
673 int ret;
674
675 ch->path.numb_path = 1;
676
677 init_completion(&ch->done);
678
679 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
680 target->srp_host->srp_dev->dev,
681 target->srp_host->port,
682 &ch->path,
683 IB_SA_PATH_REC_SERVICE_ID |
684 IB_SA_PATH_REC_DGID |
685 IB_SA_PATH_REC_SGID |
686 IB_SA_PATH_REC_NUMB_PATH |
687 IB_SA_PATH_REC_PKEY,
688 SRP_PATH_REC_TIMEOUT_MS,
689 GFP_KERNEL,
690 srp_path_rec_completion,
691 ch, &ch->path_query);
692 if (ch->path_query_id < 0)
693 return ch->path_query_id;
694
695 ret = wait_for_completion_interruptible(&ch->done);
696 if (ret < 0)
697 return ret;
698
699 if (ch->status < 0)
700 shost_printk(KERN_WARNING, target->scsi_host,
701 PFX "Path record query failed\n");
702
703 return ch->status;
704 }
705
706 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
707 {
708 struct srp_target_port *target = ch->target;
709 struct {
710 struct ib_cm_req_param param;
711 struct srp_login_req priv;
712 } *req = NULL;
713 int status;
714
715 req = kzalloc(sizeof *req, GFP_KERNEL);
716 if (!req)
717 return -ENOMEM;
718
719 req->param.primary_path = &ch->path;
720 req->param.alternate_path = NULL;
721 req->param.service_id = target->service_id;
722 req->param.qp_num = ch->qp->qp_num;
723 req->param.qp_type = ch->qp->qp_type;
724 req->param.private_data = &req->priv;
725 req->param.private_data_len = sizeof req->priv;
726 req->param.flow_control = 1;
727
728 get_random_bytes(&req->param.starting_psn, 4);
729 req->param.starting_psn &= 0xffffff;
730
731 /*
732 * Pick some arbitrary defaults here; we could make these
733 * module parameters if anyone cared about setting them.
734 */
735 req->param.responder_resources = 4;
736 req->param.remote_cm_response_timeout = 20;
737 req->param.local_cm_response_timeout = 20;
738 req->param.retry_count = target->tl_retry_count;
739 req->param.rnr_retry_count = 7;
740 req->param.max_cm_retries = 15;
741
742 req->priv.opcode = SRP_LOGIN_REQ;
743 req->priv.tag = 0;
744 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
745 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
746 SRP_BUF_FORMAT_INDIRECT);
747 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
748 SRP_MULTICHAN_SINGLE);
749 /*
750 * In the published SRP specification (draft rev. 16a), the
751 * port identifier format is 8 bytes of ID extension followed
752 * by 8 bytes of GUID. Older drafts put the two halves in the
753 * opposite order, so that the GUID comes first.
754 *
755 * Targets conforming to these obsolete drafts can be
756 * recognized by the I/O Class they report.
757 */
758 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
759 memcpy(req->priv.initiator_port_id,
760 &target->sgid.global.interface_id, 8);
761 memcpy(req->priv.initiator_port_id + 8,
762 &target->initiator_ext, 8);
763 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
764 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
765 } else {
766 memcpy(req->priv.initiator_port_id,
767 &target->initiator_ext, 8);
768 memcpy(req->priv.initiator_port_id + 8,
769 &target->sgid.global.interface_id, 8);
770 memcpy(req->priv.target_port_id, &target->id_ext, 8);
771 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
772 }
773
774 /*
775 * Topspin/Cisco SRP targets will reject our login unless we
776 * zero out the first 8 bytes of our initiator port ID and set
777 * the second 8 bytes to the local node GUID.
778 */
779 if (srp_target_is_topspin(target)) {
780 shost_printk(KERN_DEBUG, target->scsi_host,
781 PFX "Topspin/Cisco initiator port ID workaround "
782 "activated for target GUID %016llx\n",
783 (unsigned long long) be64_to_cpu(target->ioc_guid));
784 memset(req->priv.initiator_port_id, 0, 8);
785 memcpy(req->priv.initiator_port_id + 8,
786 &target->srp_host->srp_dev->dev->node_guid, 8);
787 }
788
789 status = ib_send_cm_req(ch->cm_id, &req->param);
790
791 kfree(req);
792
793 return status;
794 }
795
796 static bool srp_queue_remove_work(struct srp_target_port *target)
797 {
798 bool changed = false;
799
800 spin_lock_irq(&target->lock);
801 if (target->state != SRP_TARGET_REMOVED) {
802 target->state = SRP_TARGET_REMOVED;
803 changed = true;
804 }
805 spin_unlock_irq(&target->lock);
806
807 if (changed)
808 queue_work(srp_remove_wq, &target->remove_work);
809
810 return changed;
811 }
812
813 static bool srp_change_conn_state(struct srp_target_port *target,
814 bool connected)
815 {
816 bool changed = false;
817
818 spin_lock_irq(&target->lock);
819 if (target->connected != connected) {
820 target->connected = connected;
821 changed = true;
822 }
823 spin_unlock_irq(&target->lock);
824
825 return changed;
826 }
827
828 static void srp_disconnect_target(struct srp_target_port *target)
829 {
830 struct srp_rdma_ch *ch;
831 int i;
832
833 if (srp_change_conn_state(target, false)) {
834 /* XXX should send SRP_I_LOGOUT request */
835
836 for (i = 0; i < target->ch_count; i++) {
837 ch = &target->ch[i];
838 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
839 shost_printk(KERN_DEBUG, target->scsi_host,
840 PFX "Sending CM DREQ failed\n");
841 }
842 }
843 }
844 }
845
846 static void srp_free_req_data(struct srp_target_port *target,
847 struct srp_rdma_ch *ch)
848 {
849 struct srp_device *dev = target->srp_host->srp_dev;
850 struct ib_device *ibdev = dev->dev;
851 struct srp_request *req;
852 int i;
853
854 if (!ch->target || !ch->req_ring)
855 return;
856
857 for (i = 0; i < target->req_ring_size; ++i) {
858 req = &ch->req_ring[i];
859 if (dev->use_fast_reg)
860 kfree(req->fr_list);
861 else
862 kfree(req->fmr_list);
863 kfree(req->map_page);
864 if (req->indirect_dma_addr) {
865 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
866 target->indirect_size,
867 DMA_TO_DEVICE);
868 }
869 kfree(req->indirect_desc);
870 }
871
872 kfree(ch->req_ring);
873 ch->req_ring = NULL;
874 }
875
876 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
877 {
878 struct srp_target_port *target = ch->target;
879 struct srp_device *srp_dev = target->srp_host->srp_dev;
880 struct ib_device *ibdev = srp_dev->dev;
881 struct srp_request *req;
882 void *mr_list;
883 dma_addr_t dma_addr;
884 int i, ret = -ENOMEM;
885
886 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
887 GFP_KERNEL);
888 if (!ch->req_ring)
889 goto out;
890
891 for (i = 0; i < target->req_ring_size; ++i) {
892 req = &ch->req_ring[i];
893 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
894 GFP_KERNEL);
895 if (!mr_list)
896 goto out;
897 if (srp_dev->use_fast_reg)
898 req->fr_list = mr_list;
899 else
900 req->fmr_list = mr_list;
901 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
902 sizeof(void *), GFP_KERNEL);
903 if (!req->map_page)
904 goto out;
905 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
906 if (!req->indirect_desc)
907 goto out;
908
909 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
910 target->indirect_size,
911 DMA_TO_DEVICE);
912 if (ib_dma_mapping_error(ibdev, dma_addr))
913 goto out;
914
915 req->indirect_dma_addr = dma_addr;
916 }
917 ret = 0;
918
919 out:
920 return ret;
921 }
922
923 /**
924 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
925 * @shost: SCSI host whose attributes to remove from sysfs.
926 *
927 * Note: Any attributes defined in the host template and that did not exist
928 * before invocation of this function will be ignored.
929 */
930 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
931 {
932 struct device_attribute **attr;
933
934 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
935 device_remove_file(&shost->shost_dev, *attr);
936 }
937
938 static void srp_remove_target(struct srp_target_port *target)
939 {
940 struct srp_rdma_ch *ch;
941 int i;
942
943 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
944
945 srp_del_scsi_host_attr(target->scsi_host);
946 srp_rport_get(target->rport);
947 srp_remove_host(target->scsi_host);
948 scsi_remove_host(target->scsi_host);
949 srp_stop_rport_timers(target->rport);
950 srp_disconnect_target(target);
951 for (i = 0; i < target->ch_count; i++) {
952 ch = &target->ch[i];
953 srp_free_ch_ib(target, ch);
954 }
955 cancel_work_sync(&target->tl_err_work);
956 srp_rport_put(target->rport);
957 for (i = 0; i < target->ch_count; i++) {
958 ch = &target->ch[i];
959 srp_free_req_data(target, ch);
960 }
961 kfree(target->ch);
962 target->ch = NULL;
963
964 spin_lock(&target->srp_host->target_lock);
965 list_del(&target->list);
966 spin_unlock(&target->srp_host->target_lock);
967
968 scsi_host_put(target->scsi_host);
969 }
970
971 static void srp_remove_work(struct work_struct *work)
972 {
973 struct srp_target_port *target =
974 container_of(work, struct srp_target_port, remove_work);
975
976 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
977
978 srp_remove_target(target);
979 }
980
981 static void srp_rport_delete(struct srp_rport *rport)
982 {
983 struct srp_target_port *target = rport->lld_data;
984
985 srp_queue_remove_work(target);
986 }
987
988 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
989 {
990 struct srp_target_port *target = ch->target;
991 int ret;
992
993 WARN_ON_ONCE(!multich && target->connected);
994
995 target->qp_in_error = false;
996
997 ret = srp_lookup_path(ch);
998 if (ret)
999 return ret;
1000
1001 while (1) {
1002 init_completion(&ch->done);
1003 ret = srp_send_req(ch, multich);
1004 if (ret)
1005 return ret;
1006 ret = wait_for_completion_interruptible(&ch->done);
1007 if (ret < 0)
1008 return ret;
1009
1010 /*
1011 * The CM event handling code will set status to
1012 * SRP_PORT_REDIRECT if we get a port redirect REJ
1013 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1014 * redirect REJ back.
1015 */
1016 switch (ch->status) {
1017 case 0:
1018 srp_change_conn_state(target, true);
1019 return 0;
1020
1021 case SRP_PORT_REDIRECT:
1022 ret = srp_lookup_path(ch);
1023 if (ret)
1024 return ret;
1025 break;
1026
1027 case SRP_DLID_REDIRECT:
1028 break;
1029
1030 case SRP_STALE_CONN:
1031 shost_printk(KERN_ERR, target->scsi_host, PFX
1032 "giving up on stale connection\n");
1033 ch->status = -ECONNRESET;
1034 return ch->status;
1035
1036 default:
1037 return ch->status;
1038 }
1039 }
1040 }
1041
1042 static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
1043 {
1044 struct ib_send_wr *bad_wr;
1045 struct ib_send_wr wr = {
1046 .opcode = IB_WR_LOCAL_INV,
1047 .wr_id = LOCAL_INV_WR_ID_MASK,
1048 .next = NULL,
1049 .num_sge = 0,
1050 .send_flags = 0,
1051 .ex.invalidate_rkey = rkey,
1052 };
1053
1054 return ib_post_send(ch->qp, &wr, &bad_wr);
1055 }
1056
1057 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1058 struct srp_rdma_ch *ch,
1059 struct srp_request *req)
1060 {
1061 struct srp_target_port *target = ch->target;
1062 struct srp_device *dev = target->srp_host->srp_dev;
1063 struct ib_device *ibdev = dev->dev;
1064 int i, res;
1065
1066 if (!scsi_sglist(scmnd) ||
1067 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1068 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1069 return;
1070
1071 if (dev->use_fast_reg) {
1072 struct srp_fr_desc **pfr;
1073
1074 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1075 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
1076 if (res < 0) {
1077 shost_printk(KERN_ERR, target->scsi_host, PFX
1078 "Queueing INV WR for rkey %#x failed (%d)\n",
1079 (*pfr)->mr->rkey, res);
1080 queue_work(system_long_wq,
1081 &target->tl_err_work);
1082 }
1083 }
1084 if (req->nmdesc)
1085 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1086 req->nmdesc);
1087 } else {
1088 struct ib_pool_fmr **pfmr;
1089
1090 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1091 ib_fmr_pool_unmap(*pfmr);
1092 }
1093
1094 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1095 scmnd->sc_data_direction);
1096 }
1097
1098 /**
1099 * srp_claim_req - Take ownership of the scmnd associated with a request.
1100 * @ch: SRP RDMA channel.
1101 * @req: SRP request.
1102 * @sdev: If not NULL, only take ownership for this SCSI device.
1103 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1104 * ownership of @req->scmnd if it equals @scmnd.
1105 *
1106 * Return value:
1107 * Either NULL or a pointer to the SCSI command the caller became owner of.
1108 */
1109 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1110 struct srp_request *req,
1111 struct scsi_device *sdev,
1112 struct scsi_cmnd *scmnd)
1113 {
1114 unsigned long flags;
1115
1116 spin_lock_irqsave(&ch->lock, flags);
1117 if (req->scmnd &&
1118 (!sdev || req->scmnd->device == sdev) &&
1119 (!scmnd || req->scmnd == scmnd)) {
1120 scmnd = req->scmnd;
1121 req->scmnd = NULL;
1122 } else {
1123 scmnd = NULL;
1124 }
1125 spin_unlock_irqrestore(&ch->lock, flags);
1126
1127 return scmnd;
1128 }
1129
1130 /**
1131 * srp_free_req() - Unmap data and add request to the free request list.
1132 * @ch: SRP RDMA channel.
1133 * @req: Request to be freed.
1134 * @scmnd: SCSI command associated with @req.
1135 * @req_lim_delta: Amount to be added to @target->req_lim.
1136 */
1137 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1138 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1139 {
1140 unsigned long flags;
1141
1142 srp_unmap_data(scmnd, ch, req);
1143
1144 spin_lock_irqsave(&ch->lock, flags);
1145 ch->req_lim += req_lim_delta;
1146 spin_unlock_irqrestore(&ch->lock, flags);
1147 }
1148
1149 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1150 struct scsi_device *sdev, int result)
1151 {
1152 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1153
1154 if (scmnd) {
1155 srp_free_req(ch, req, scmnd, 0);
1156 scmnd->result = result;
1157 scmnd->scsi_done(scmnd);
1158 }
1159 }
1160
1161 static void srp_terminate_io(struct srp_rport *rport)
1162 {
1163 struct srp_target_port *target = rport->lld_data;
1164 struct srp_rdma_ch *ch;
1165 struct Scsi_Host *shost = target->scsi_host;
1166 struct scsi_device *sdev;
1167 int i, j;
1168
1169 /*
1170 * Invoking srp_terminate_io() while srp_queuecommand() is running
1171 * is not safe. Hence the warning statement below.
1172 */
1173 shost_for_each_device(sdev, shost)
1174 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1175
1176 for (i = 0; i < target->ch_count; i++) {
1177 ch = &target->ch[i];
1178
1179 for (j = 0; j < target->req_ring_size; ++j) {
1180 struct srp_request *req = &ch->req_ring[j];
1181
1182 srp_finish_req(ch, req, NULL,
1183 DID_TRANSPORT_FAILFAST << 16);
1184 }
1185 }
1186 }
1187
1188 /*
1189 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1190 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1191 * srp_reset_device() or srp_reset_host() calls will occur while this function
1192 * is in progress. One way to realize that is not to call this function
1193 * directly but to call srp_reconnect_rport() instead since that last function
1194 * serializes calls of this function via rport->mutex and also blocks
1195 * srp_queuecommand() calls before invoking this function.
1196 */
1197 static int srp_rport_reconnect(struct srp_rport *rport)
1198 {
1199 struct srp_target_port *target = rport->lld_data;
1200 struct srp_rdma_ch *ch;
1201 int i, j, ret = 0;
1202 bool multich = false;
1203
1204 srp_disconnect_target(target);
1205
1206 if (target->state == SRP_TARGET_SCANNING)
1207 return -ENODEV;
1208
1209 /*
1210 * Now get a new local CM ID so that we avoid confusing the target in
1211 * case things are really fouled up. Doing so also ensures that all CM
1212 * callbacks will have finished before a new QP is allocated.
1213 */
1214 for (i = 0; i < target->ch_count; i++) {
1215 ch = &target->ch[i];
1216 if (!ch->target)
1217 break;
1218 ret += srp_new_cm_id(ch);
1219 }
1220 for (i = 0; i < target->ch_count; i++) {
1221 ch = &target->ch[i];
1222 if (!ch->target)
1223 break;
1224 for (j = 0; j < target->req_ring_size; ++j) {
1225 struct srp_request *req = &ch->req_ring[j];
1226
1227 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1228 }
1229 }
1230 for (i = 0; i < target->ch_count; i++) {
1231 ch = &target->ch[i];
1232 if (!ch->target)
1233 break;
1234 /*
1235 * Whether or not creating a new CM ID succeeded, create a new
1236 * QP. This guarantees that all completion callback function
1237 * invocations have finished before request resetting starts.
1238 */
1239 ret += srp_create_ch_ib(ch);
1240
1241 INIT_LIST_HEAD(&ch->free_tx);
1242 for (j = 0; j < target->queue_size; ++j)
1243 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1244 }
1245 for (i = 0; i < target->ch_count; i++) {
1246 ch = &target->ch[i];
1247 if (ret || !ch->target) {
1248 if (i > 1)
1249 ret = 0;
1250 break;
1251 }
1252 ret = srp_connect_ch(ch, multich);
1253 multich = true;
1254 }
1255
1256 if (ret == 0)
1257 shost_printk(KERN_INFO, target->scsi_host,
1258 PFX "reconnect succeeded\n");
1259
1260 return ret;
1261 }
1262
1263 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1264 unsigned int dma_len, u32 rkey)
1265 {
1266 struct srp_direct_buf *desc = state->desc;
1267
1268 desc->va = cpu_to_be64(dma_addr);
1269 desc->key = cpu_to_be32(rkey);
1270 desc->len = cpu_to_be32(dma_len);
1271
1272 state->total_len += dma_len;
1273 state->desc++;
1274 state->ndesc++;
1275 }
1276
1277 static int srp_map_finish_fmr(struct srp_map_state *state,
1278 struct srp_rdma_ch *ch)
1279 {
1280 struct ib_pool_fmr *fmr;
1281 u64 io_addr = 0;
1282
1283 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1284 state->npages, io_addr);
1285 if (IS_ERR(fmr))
1286 return PTR_ERR(fmr);
1287
1288 *state->next_fmr++ = fmr;
1289 state->nmdesc++;
1290
1291 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
1292
1293 return 0;
1294 }
1295
1296 static int srp_map_finish_fr(struct srp_map_state *state,
1297 struct srp_rdma_ch *ch)
1298 {
1299 struct srp_target_port *target = ch->target;
1300 struct srp_device *dev = target->srp_host->srp_dev;
1301 struct ib_send_wr *bad_wr;
1302 struct ib_send_wr wr;
1303 struct srp_fr_desc *desc;
1304 u32 rkey;
1305
1306 desc = srp_fr_pool_get(ch->fr_pool);
1307 if (!desc)
1308 return -ENOMEM;
1309
1310 rkey = ib_inc_rkey(desc->mr->rkey);
1311 ib_update_fast_reg_key(desc->mr, rkey);
1312
1313 memcpy(desc->frpl->page_list, state->pages,
1314 sizeof(state->pages[0]) * state->npages);
1315
1316 memset(&wr, 0, sizeof(wr));
1317 wr.opcode = IB_WR_FAST_REG_MR;
1318 wr.wr_id = FAST_REG_WR_ID_MASK;
1319 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1320 wr.wr.fast_reg.page_list = desc->frpl;
1321 wr.wr.fast_reg.page_list_len = state->npages;
1322 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1323 wr.wr.fast_reg.length = state->dma_len;
1324 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1325 IB_ACCESS_REMOTE_READ |
1326 IB_ACCESS_REMOTE_WRITE);
1327 wr.wr.fast_reg.rkey = desc->mr->lkey;
1328
1329 *state->next_fr++ = desc;
1330 state->nmdesc++;
1331
1332 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1333 desc->mr->rkey);
1334
1335 return ib_post_send(ch->qp, &wr, &bad_wr);
1336 }
1337
1338 static int srp_finish_mapping(struct srp_map_state *state,
1339 struct srp_rdma_ch *ch)
1340 {
1341 struct srp_target_port *target = ch->target;
1342 int ret = 0;
1343
1344 if (state->npages == 0)
1345 return 0;
1346
1347 if (state->npages == 1 && !register_always)
1348 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1349 target->rkey);
1350 else
1351 ret = target->srp_host->srp_dev->use_fast_reg ?
1352 srp_map_finish_fr(state, ch) :
1353 srp_map_finish_fmr(state, ch);
1354
1355 if (ret == 0) {
1356 state->npages = 0;
1357 state->dma_len = 0;
1358 }
1359
1360 return ret;
1361 }
1362
1363 static void srp_map_update_start(struct srp_map_state *state,
1364 struct scatterlist *sg, int sg_index,
1365 dma_addr_t dma_addr)
1366 {
1367 state->unmapped_sg = sg;
1368 state->unmapped_index = sg_index;
1369 state->unmapped_addr = dma_addr;
1370 }
1371
1372 static int srp_map_sg_entry(struct srp_map_state *state,
1373 struct srp_rdma_ch *ch,
1374 struct scatterlist *sg, int sg_index,
1375 bool use_mr)
1376 {
1377 struct srp_target_port *target = ch->target;
1378 struct srp_device *dev = target->srp_host->srp_dev;
1379 struct ib_device *ibdev = dev->dev;
1380 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1381 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1382 unsigned int len;
1383 int ret;
1384
1385 if (!dma_len)
1386 return 0;
1387
1388 if (!use_mr) {
1389 /*
1390 * Once we're in direct map mode for a request, we don't
1391 * go back to FMR or FR mode, so no need to update anything
1392 * other than the descriptor.
1393 */
1394 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1395 return 0;
1396 }
1397
1398 /*
1399 * Since not all RDMA HW drivers support non-zero page offsets for
1400 * FMR, if we start at an offset into a page, don't merge into the
1401 * current FMR mapping. Finish it out, and use the kernel's MR for
1402 * this sg entry.
1403 */
1404 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1405 dma_len > dev->mr_max_size) {
1406 ret = srp_finish_mapping(state, ch);
1407 if (ret)
1408 return ret;
1409
1410 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1411 srp_map_update_start(state, NULL, 0, 0);
1412 return 0;
1413 }
1414
1415 /*
1416 * If this is the first sg that will be mapped via FMR or via FR, save
1417 * our position. We need to know the first unmapped entry, its index,
1418 * and the first unmapped address within that entry to be able to
1419 * restart mapping after an error.
1420 */
1421 if (!state->unmapped_sg)
1422 srp_map_update_start(state, sg, sg_index, dma_addr);
1423
1424 while (dma_len) {
1425 unsigned offset = dma_addr & ~dev->mr_page_mask;
1426 if (state->npages == dev->max_pages_per_mr || offset != 0) {
1427 ret = srp_finish_mapping(state, ch);
1428 if (ret)
1429 return ret;
1430
1431 srp_map_update_start(state, sg, sg_index, dma_addr);
1432 }
1433
1434 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1435
1436 if (!state->npages)
1437 state->base_dma_addr = dma_addr;
1438 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1439 state->dma_len += len;
1440 dma_addr += len;
1441 dma_len -= len;
1442 }
1443
1444 /*
1445 * If the last entry of the MR wasn't a full page, then we need to
1446 * close it out and start a new one -- we can only merge at page
1447 * boundries.
1448 */
1449 ret = 0;
1450 if (len != dev->mr_page_size) {
1451 ret = srp_finish_mapping(state, ch);
1452 if (!ret)
1453 srp_map_update_start(state, NULL, 0, 0);
1454 }
1455 return ret;
1456 }
1457
1458 static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1459 struct srp_request *req, struct scatterlist *scat,
1460 int count)
1461 {
1462 struct srp_target_port *target = ch->target;
1463 struct srp_device *dev = target->srp_host->srp_dev;
1464 struct ib_device *ibdev = dev->dev;
1465 struct scatterlist *sg;
1466 int i;
1467 bool use_mr;
1468
1469 state->desc = req->indirect_desc;
1470 state->pages = req->map_page;
1471 if (dev->use_fast_reg) {
1472 state->next_fr = req->fr_list;
1473 use_mr = !!ch->fr_pool;
1474 } else {
1475 state->next_fmr = req->fmr_list;
1476 use_mr = !!ch->fmr_pool;
1477 }
1478
1479 for_each_sg(scat, sg, count, i) {
1480 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
1481 /*
1482 * Memory registration failed, so backtrack to the
1483 * first unmapped entry and continue on without using
1484 * memory registration.
1485 */
1486 dma_addr_t dma_addr;
1487 unsigned int dma_len;
1488
1489 backtrack:
1490 sg = state->unmapped_sg;
1491 i = state->unmapped_index;
1492
1493 dma_addr = ib_sg_dma_address(ibdev, sg);
1494 dma_len = ib_sg_dma_len(ibdev, sg);
1495 dma_len -= (state->unmapped_addr - dma_addr);
1496 dma_addr = state->unmapped_addr;
1497 use_mr = false;
1498 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1499 }
1500 }
1501
1502 if (use_mr && srp_finish_mapping(state, ch))
1503 goto backtrack;
1504
1505 req->nmdesc = state->nmdesc;
1506
1507 return 0;
1508 }
1509
1510 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1511 struct srp_request *req)
1512 {
1513 struct srp_target_port *target = ch->target;
1514 struct scatterlist *scat;
1515 struct srp_cmd *cmd = req->cmd->buf;
1516 int len, nents, count;
1517 struct srp_device *dev;
1518 struct ib_device *ibdev;
1519 struct srp_map_state state;
1520 struct srp_indirect_buf *indirect_hdr;
1521 u32 table_len;
1522 u8 fmt;
1523
1524 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1525 return sizeof (struct srp_cmd);
1526
1527 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1528 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1529 shost_printk(KERN_WARNING, target->scsi_host,
1530 PFX "Unhandled data direction %d\n",
1531 scmnd->sc_data_direction);
1532 return -EINVAL;
1533 }
1534
1535 nents = scsi_sg_count(scmnd);
1536 scat = scsi_sglist(scmnd);
1537
1538 dev = target->srp_host->srp_dev;
1539 ibdev = dev->dev;
1540
1541 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1542 if (unlikely(count == 0))
1543 return -EIO;
1544
1545 fmt = SRP_DATA_DESC_DIRECT;
1546 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1547
1548 if (count == 1 && !register_always) {
1549 /*
1550 * The midlayer only generated a single gather/scatter
1551 * entry, or DMA mapping coalesced everything to a
1552 * single entry. So a direct descriptor along with
1553 * the DMA MR suffices.
1554 */
1555 struct srp_direct_buf *buf = (void *) cmd->add_data;
1556
1557 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1558 buf->key = cpu_to_be32(target->rkey);
1559 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1560
1561 req->nmdesc = 0;
1562 goto map_complete;
1563 }
1564
1565 /*
1566 * We have more than one scatter/gather entry, so build our indirect
1567 * descriptor table, trying to merge as many entries as we can.
1568 */
1569 indirect_hdr = (void *) cmd->add_data;
1570
1571 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1572 target->indirect_size, DMA_TO_DEVICE);
1573
1574 memset(&state, 0, sizeof(state));
1575 srp_map_sg(&state, ch, req, scat, count);
1576
1577 /* We've mapped the request, now pull as much of the indirect
1578 * descriptor table as we can into the command buffer. If this
1579 * target is not using an external indirect table, we are
1580 * guaranteed to fit into the command, as the SCSI layer won't
1581 * give us more S/G entries than we allow.
1582 */
1583 if (state.ndesc == 1) {
1584 /*
1585 * Memory registration collapsed the sg-list into one entry,
1586 * so use a direct descriptor.
1587 */
1588 struct srp_direct_buf *buf = (void *) cmd->add_data;
1589
1590 *buf = req->indirect_desc[0];
1591 goto map_complete;
1592 }
1593
1594 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1595 !target->allow_ext_sg)) {
1596 shost_printk(KERN_ERR, target->scsi_host,
1597 "Could not fit S/G list into SRP_CMD\n");
1598 return -EIO;
1599 }
1600
1601 count = min(state.ndesc, target->cmd_sg_cnt);
1602 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1603
1604 fmt = SRP_DATA_DESC_INDIRECT;
1605 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1606 len += count * sizeof (struct srp_direct_buf);
1607
1608 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1609 count * sizeof (struct srp_direct_buf));
1610
1611 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1612 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1613 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1614 indirect_hdr->len = cpu_to_be32(state.total_len);
1615
1616 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1617 cmd->data_out_desc_cnt = count;
1618 else
1619 cmd->data_in_desc_cnt = count;
1620
1621 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1622 DMA_TO_DEVICE);
1623
1624 map_complete:
1625 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1626 cmd->buf_fmt = fmt << 4;
1627 else
1628 cmd->buf_fmt = fmt;
1629
1630 return len;
1631 }
1632
1633 /*
1634 * Return an IU and possible credit to the free pool
1635 */
1636 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1637 enum srp_iu_type iu_type)
1638 {
1639 unsigned long flags;
1640
1641 spin_lock_irqsave(&ch->lock, flags);
1642 list_add(&iu->list, &ch->free_tx);
1643 if (iu_type != SRP_IU_RSP)
1644 ++ch->req_lim;
1645 spin_unlock_irqrestore(&ch->lock, flags);
1646 }
1647
1648 /*
1649 * Must be called with ch->lock held to protect req_lim and free_tx.
1650 * If IU is not sent, it must be returned using srp_put_tx_iu().
1651 *
1652 * Note:
1653 * An upper limit for the number of allocated information units for each
1654 * request type is:
1655 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1656 * more than Scsi_Host.can_queue requests.
1657 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1658 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1659 * one unanswered SRP request to an initiator.
1660 */
1661 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1662 enum srp_iu_type iu_type)
1663 {
1664 struct srp_target_port *target = ch->target;
1665 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1666 struct srp_iu *iu;
1667
1668 srp_send_completion(ch->send_cq, ch);
1669
1670 if (list_empty(&ch->free_tx))
1671 return NULL;
1672
1673 /* Initiator responses to target requests do not consume credits */
1674 if (iu_type != SRP_IU_RSP) {
1675 if (ch->req_lim <= rsv) {
1676 ++target->zero_req_lim;
1677 return NULL;
1678 }
1679
1680 --ch->req_lim;
1681 }
1682
1683 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1684 list_del(&iu->list);
1685 return iu;
1686 }
1687
1688 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1689 {
1690 struct srp_target_port *target = ch->target;
1691 struct ib_sge list;
1692 struct ib_send_wr wr, *bad_wr;
1693
1694 list.addr = iu->dma;
1695 list.length = len;
1696 list.lkey = target->lkey;
1697
1698 wr.next = NULL;
1699 wr.wr_id = (uintptr_t) iu;
1700 wr.sg_list = &list;
1701 wr.num_sge = 1;
1702 wr.opcode = IB_WR_SEND;
1703 wr.send_flags = IB_SEND_SIGNALED;
1704
1705 return ib_post_send(ch->qp, &wr, &bad_wr);
1706 }
1707
1708 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1709 {
1710 struct srp_target_port *target = ch->target;
1711 struct ib_recv_wr wr, *bad_wr;
1712 struct ib_sge list;
1713
1714 list.addr = iu->dma;
1715 list.length = iu->size;
1716 list.lkey = target->lkey;
1717
1718 wr.next = NULL;
1719 wr.wr_id = (uintptr_t) iu;
1720 wr.sg_list = &list;
1721 wr.num_sge = 1;
1722
1723 return ib_post_recv(ch->qp, &wr, &bad_wr);
1724 }
1725
1726 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1727 {
1728 struct srp_target_port *target = ch->target;
1729 struct srp_request *req;
1730 struct scsi_cmnd *scmnd;
1731 unsigned long flags;
1732
1733 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1734 spin_lock_irqsave(&ch->lock, flags);
1735 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1736 spin_unlock_irqrestore(&ch->lock, flags);
1737
1738 ch->tsk_mgmt_status = -1;
1739 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1740 ch->tsk_mgmt_status = rsp->data[3];
1741 complete(&ch->tsk_mgmt_done);
1742 } else {
1743 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1744 if (scmnd) {
1745 req = (void *)scmnd->host_scribble;
1746 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1747 }
1748 if (!scmnd) {
1749 shost_printk(KERN_ERR, target->scsi_host,
1750 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1751 rsp->tag, ch - target->ch, ch->qp->qp_num);
1752
1753 spin_lock_irqsave(&ch->lock, flags);
1754 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1755 spin_unlock_irqrestore(&ch->lock, flags);
1756
1757 return;
1758 }
1759 scmnd->result = rsp->status;
1760
1761 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1762 memcpy(scmnd->sense_buffer, rsp->data +
1763 be32_to_cpu(rsp->resp_data_len),
1764 min_t(int, be32_to_cpu(rsp->sense_data_len),
1765 SCSI_SENSE_BUFFERSIZE));
1766 }
1767
1768 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1769 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1770 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1771 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1772 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1773 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1774 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1775 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1776
1777 srp_free_req(ch, req, scmnd,
1778 be32_to_cpu(rsp->req_lim_delta));
1779
1780 scmnd->host_scribble = NULL;
1781 scmnd->scsi_done(scmnd);
1782 }
1783 }
1784
1785 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1786 void *rsp, int len)
1787 {
1788 struct srp_target_port *target = ch->target;
1789 struct ib_device *dev = target->srp_host->srp_dev->dev;
1790 unsigned long flags;
1791 struct srp_iu *iu;
1792 int err;
1793
1794 spin_lock_irqsave(&ch->lock, flags);
1795 ch->req_lim += req_delta;
1796 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1797 spin_unlock_irqrestore(&ch->lock, flags);
1798
1799 if (!iu) {
1800 shost_printk(KERN_ERR, target->scsi_host, PFX
1801 "no IU available to send response\n");
1802 return 1;
1803 }
1804
1805 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1806 memcpy(iu->buf, rsp, len);
1807 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1808
1809 err = srp_post_send(ch, iu, len);
1810 if (err) {
1811 shost_printk(KERN_ERR, target->scsi_host, PFX
1812 "unable to post response: %d\n", err);
1813 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1814 }
1815
1816 return err;
1817 }
1818
1819 static void srp_process_cred_req(struct srp_rdma_ch *ch,
1820 struct srp_cred_req *req)
1821 {
1822 struct srp_cred_rsp rsp = {
1823 .opcode = SRP_CRED_RSP,
1824 .tag = req->tag,
1825 };
1826 s32 delta = be32_to_cpu(req->req_lim_delta);
1827
1828 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1829 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1830 "problems processing SRP_CRED_REQ\n");
1831 }
1832
1833 static void srp_process_aer_req(struct srp_rdma_ch *ch,
1834 struct srp_aer_req *req)
1835 {
1836 struct srp_target_port *target = ch->target;
1837 struct srp_aer_rsp rsp = {
1838 .opcode = SRP_AER_RSP,
1839 .tag = req->tag,
1840 };
1841 s32 delta = be32_to_cpu(req->req_lim_delta);
1842
1843 shost_printk(KERN_ERR, target->scsi_host, PFX
1844 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1845
1846 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1847 shost_printk(KERN_ERR, target->scsi_host, PFX
1848 "problems processing SRP_AER_REQ\n");
1849 }
1850
1851 static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
1852 {
1853 struct srp_target_port *target = ch->target;
1854 struct ib_device *dev = target->srp_host->srp_dev->dev;
1855 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1856 int res;
1857 u8 opcode;
1858
1859 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
1860 DMA_FROM_DEVICE);
1861
1862 opcode = *(u8 *) iu->buf;
1863
1864 if (0) {
1865 shost_printk(KERN_ERR, target->scsi_host,
1866 PFX "recv completion, opcode 0x%02x\n", opcode);
1867 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1868 iu->buf, wc->byte_len, true);
1869 }
1870
1871 switch (opcode) {
1872 case SRP_RSP:
1873 srp_process_rsp(ch, iu->buf);
1874 break;
1875
1876 case SRP_CRED_REQ:
1877 srp_process_cred_req(ch, iu->buf);
1878 break;
1879
1880 case SRP_AER_REQ:
1881 srp_process_aer_req(ch, iu->buf);
1882 break;
1883
1884 case SRP_T_LOGOUT:
1885 /* XXX Handle target logout */
1886 shost_printk(KERN_WARNING, target->scsi_host,
1887 PFX "Got target logout request\n");
1888 break;
1889
1890 default:
1891 shost_printk(KERN_WARNING, target->scsi_host,
1892 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1893 break;
1894 }
1895
1896 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
1897 DMA_FROM_DEVICE);
1898
1899 res = srp_post_recv(ch, iu);
1900 if (res != 0)
1901 shost_printk(KERN_ERR, target->scsi_host,
1902 PFX "Recv failed with error code %d\n", res);
1903 }
1904
1905 /**
1906 * srp_tl_err_work() - handle a transport layer error
1907 * @work: Work structure embedded in an SRP target port.
1908 *
1909 * Note: This function may get invoked before the rport has been created,
1910 * hence the target->rport test.
1911 */
1912 static void srp_tl_err_work(struct work_struct *work)
1913 {
1914 struct srp_target_port *target;
1915
1916 target = container_of(work, struct srp_target_port, tl_err_work);
1917 if (target->rport)
1918 srp_start_tl_fail_timers(target->rport);
1919 }
1920
1921 static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1922 bool send_err, struct srp_rdma_ch *ch)
1923 {
1924 struct srp_target_port *target = ch->target;
1925
1926 if (wr_id == SRP_LAST_WR_ID) {
1927 complete(&ch->done);
1928 return;
1929 }
1930
1931 if (target->connected && !target->qp_in_error) {
1932 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1933 shost_printk(KERN_ERR, target->scsi_host, PFX
1934 "LOCAL_INV failed with status %d\n",
1935 wc_status);
1936 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1937 shost_printk(KERN_ERR, target->scsi_host, PFX
1938 "FAST_REG_MR failed status %d\n",
1939 wc_status);
1940 } else {
1941 shost_printk(KERN_ERR, target->scsi_host,
1942 PFX "failed %s status %d for iu %p\n",
1943 send_err ? "send" : "receive",
1944 wc_status, (void *)(uintptr_t)wr_id);
1945 }
1946 queue_work(system_long_wq, &target->tl_err_work);
1947 }
1948 target->qp_in_error = true;
1949 }
1950
1951 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
1952 {
1953 struct srp_rdma_ch *ch = ch_ptr;
1954 struct ib_wc wc;
1955
1956 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1957 while (ib_poll_cq(cq, 1, &wc) > 0) {
1958 if (likely(wc.status == IB_WC_SUCCESS)) {
1959 srp_handle_recv(ch, &wc);
1960 } else {
1961 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
1962 }
1963 }
1964 }
1965
1966 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
1967 {
1968 struct srp_rdma_ch *ch = ch_ptr;
1969 struct ib_wc wc;
1970 struct srp_iu *iu;
1971
1972 while (ib_poll_cq(cq, 1, &wc) > 0) {
1973 if (likely(wc.status == IB_WC_SUCCESS)) {
1974 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1975 list_add(&iu->list, &ch->free_tx);
1976 } else {
1977 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
1978 }
1979 }
1980 }
1981
1982 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1983 {
1984 struct srp_target_port *target = host_to_target(shost);
1985 struct srp_rport *rport = target->rport;
1986 struct srp_rdma_ch *ch;
1987 struct srp_request *req;
1988 struct srp_iu *iu;
1989 struct srp_cmd *cmd;
1990 struct ib_device *dev;
1991 unsigned long flags;
1992 u32 tag;
1993 u16 idx;
1994 int len, ret;
1995 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1996
1997 /*
1998 * The SCSI EH thread is the only context from which srp_queuecommand()
1999 * can get invoked for blocked devices (SDEV_BLOCK /
2000 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2001 * locking the rport mutex if invoked from inside the SCSI EH.
2002 */
2003 if (in_scsi_eh)
2004 mutex_lock(&rport->mutex);
2005
2006 scmnd->result = srp_chkready(target->rport);
2007 if (unlikely(scmnd->result))
2008 goto err;
2009
2010 WARN_ON_ONCE(scmnd->request->tag < 0);
2011 tag = blk_mq_unique_tag(scmnd->request);
2012 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2013 idx = blk_mq_unique_tag_to_tag(tag);
2014 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2015 dev_name(&shost->shost_gendev), tag, idx,
2016 target->req_ring_size);
2017
2018 spin_lock_irqsave(&ch->lock, flags);
2019 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2020 spin_unlock_irqrestore(&ch->lock, flags);
2021
2022 if (!iu)
2023 goto err;
2024
2025 req = &ch->req_ring[idx];
2026 dev = target->srp_host->srp_dev->dev;
2027 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2028 DMA_TO_DEVICE);
2029
2030 scmnd->host_scribble = (void *) req;
2031
2032 cmd = iu->buf;
2033 memset(cmd, 0, sizeof *cmd);
2034
2035 cmd->opcode = SRP_CMD;
2036 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
2037 cmd->tag = tag;
2038 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2039
2040 req->scmnd = scmnd;
2041 req->cmd = iu;
2042
2043 len = srp_map_data(scmnd, ch, req);
2044 if (len < 0) {
2045 shost_printk(KERN_ERR, target->scsi_host,
2046 PFX "Failed to map data (%d)\n", len);
2047 /*
2048 * If we ran out of memory descriptors (-ENOMEM) because an
2049 * application is queuing many requests with more than
2050 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2051 * to reduce queue depth temporarily.
2052 */
2053 scmnd->result = len == -ENOMEM ?
2054 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2055 goto err_iu;
2056 }
2057
2058 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2059 DMA_TO_DEVICE);
2060
2061 if (srp_post_send(ch, iu, len)) {
2062 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2063 goto err_unmap;
2064 }
2065
2066 ret = 0;
2067
2068 unlock_rport:
2069 if (in_scsi_eh)
2070 mutex_unlock(&rport->mutex);
2071
2072 return ret;
2073
2074 err_unmap:
2075 srp_unmap_data(scmnd, ch, req);
2076
2077 err_iu:
2078 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2079
2080 /*
2081 * Avoid that the loops that iterate over the request ring can
2082 * encounter a dangling SCSI command pointer.
2083 */
2084 req->scmnd = NULL;
2085
2086 err:
2087 if (scmnd->result) {
2088 scmnd->scsi_done(scmnd);
2089 ret = 0;
2090 } else {
2091 ret = SCSI_MLQUEUE_HOST_BUSY;
2092 }
2093
2094 goto unlock_rport;
2095 }
2096
2097 /*
2098 * Note: the resources allocated in this function are freed in
2099 * srp_free_ch_ib().
2100 */
2101 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2102 {
2103 struct srp_target_port *target = ch->target;
2104 int i;
2105
2106 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2107 GFP_KERNEL);
2108 if (!ch->rx_ring)
2109 goto err_no_ring;
2110 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2111 GFP_KERNEL);
2112 if (!ch->tx_ring)
2113 goto err_no_ring;
2114
2115 for (i = 0; i < target->queue_size; ++i) {
2116 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2117 ch->max_ti_iu_len,
2118 GFP_KERNEL, DMA_FROM_DEVICE);
2119 if (!ch->rx_ring[i])
2120 goto err;
2121 }
2122
2123 for (i = 0; i < target->queue_size; ++i) {
2124 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2125 target->max_iu_len,
2126 GFP_KERNEL, DMA_TO_DEVICE);
2127 if (!ch->tx_ring[i])
2128 goto err;
2129
2130 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2131 }
2132
2133 return 0;
2134
2135 err:
2136 for (i = 0; i < target->queue_size; ++i) {
2137 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2138 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2139 }
2140
2141
2142 err_no_ring:
2143 kfree(ch->tx_ring);
2144 ch->tx_ring = NULL;
2145 kfree(ch->rx_ring);
2146 ch->rx_ring = NULL;
2147
2148 return -ENOMEM;
2149 }
2150
2151 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2152 {
2153 uint64_t T_tr_ns, max_compl_time_ms;
2154 uint32_t rq_tmo_jiffies;
2155
2156 /*
2157 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2158 * table 91), both the QP timeout and the retry count have to be set
2159 * for RC QP's during the RTR to RTS transition.
2160 */
2161 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2162 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2163
2164 /*
2165 * Set target->rq_tmo_jiffies to one second more than the largest time
2166 * it can take before an error completion is generated. See also
2167 * C9-140..142 in the IBTA spec for more information about how to
2168 * convert the QP Local ACK Timeout value to nanoseconds.
2169 */
2170 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2171 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2172 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2173 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2174
2175 return rq_tmo_jiffies;
2176 }
2177
2178 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2179 struct srp_login_rsp *lrsp,
2180 struct srp_rdma_ch *ch)
2181 {
2182 struct srp_target_port *target = ch->target;
2183 struct ib_qp_attr *qp_attr = NULL;
2184 int attr_mask = 0;
2185 int ret;
2186 int i;
2187
2188 if (lrsp->opcode == SRP_LOGIN_RSP) {
2189 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2190 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2191
2192 /*
2193 * Reserve credits for task management so we don't
2194 * bounce requests back to the SCSI mid-layer.
2195 */
2196 target->scsi_host->can_queue
2197 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2198 target->scsi_host->can_queue);
2199 target->scsi_host->cmd_per_lun
2200 = min_t(int, target->scsi_host->can_queue,
2201 target->scsi_host->cmd_per_lun);
2202 } else {
2203 shost_printk(KERN_WARNING, target->scsi_host,
2204 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2205 ret = -ECONNRESET;
2206 goto error;
2207 }
2208
2209 if (!ch->rx_ring) {
2210 ret = srp_alloc_iu_bufs(ch);
2211 if (ret)
2212 goto error;
2213 }
2214
2215 ret = -ENOMEM;
2216 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2217 if (!qp_attr)
2218 goto error;
2219
2220 qp_attr->qp_state = IB_QPS_RTR;
2221 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2222 if (ret)
2223 goto error_free;
2224
2225 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2226 if (ret)
2227 goto error_free;
2228
2229 for (i = 0; i < target->queue_size; i++) {
2230 struct srp_iu *iu = ch->rx_ring[i];
2231
2232 ret = srp_post_recv(ch, iu);
2233 if (ret)
2234 goto error_free;
2235 }
2236
2237 qp_attr->qp_state = IB_QPS_RTS;
2238 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2239 if (ret)
2240 goto error_free;
2241
2242 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2243
2244 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2245 if (ret)
2246 goto error_free;
2247
2248 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2249
2250 error_free:
2251 kfree(qp_attr);
2252
2253 error:
2254 ch->status = ret;
2255 }
2256
2257 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2258 struct ib_cm_event *event,
2259 struct srp_rdma_ch *ch)
2260 {
2261 struct srp_target_port *target = ch->target;
2262 struct Scsi_Host *shost = target->scsi_host;
2263 struct ib_class_port_info *cpi;
2264 int opcode;
2265
2266 switch (event->param.rej_rcvd.reason) {
2267 case IB_CM_REJ_PORT_CM_REDIRECT:
2268 cpi = event->param.rej_rcvd.ari;
2269 ch->path.dlid = cpi->redirect_lid;
2270 ch->path.pkey = cpi->redirect_pkey;
2271 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2272 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2273
2274 ch->status = ch->path.dlid ?
2275 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2276 break;
2277
2278 case IB_CM_REJ_PORT_REDIRECT:
2279 if (srp_target_is_topspin(target)) {
2280 /*
2281 * Topspin/Cisco SRP gateways incorrectly send
2282 * reject reason code 25 when they mean 24
2283 * (port redirect).
2284 */
2285 memcpy(ch->path.dgid.raw,
2286 event->param.rej_rcvd.ari, 16);
2287
2288 shost_printk(KERN_DEBUG, shost,
2289 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2290 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2291 be64_to_cpu(ch->path.dgid.global.interface_id));
2292
2293 ch->status = SRP_PORT_REDIRECT;
2294 } else {
2295 shost_printk(KERN_WARNING, shost,
2296 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2297 ch->status = -ECONNRESET;
2298 }
2299 break;
2300
2301 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2302 shost_printk(KERN_WARNING, shost,
2303 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2304 ch->status = -ECONNRESET;
2305 break;
2306
2307 case IB_CM_REJ_CONSUMER_DEFINED:
2308 opcode = *(u8 *) event->private_data;
2309 if (opcode == SRP_LOGIN_REJ) {
2310 struct srp_login_rej *rej = event->private_data;
2311 u32 reason = be32_to_cpu(rej->reason);
2312
2313 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2314 shost_printk(KERN_WARNING, shost,
2315 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2316 else
2317 shost_printk(KERN_WARNING, shost, PFX
2318 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2319 target->sgid.raw,
2320 target->orig_dgid.raw, reason);
2321 } else
2322 shost_printk(KERN_WARNING, shost,
2323 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2324 " opcode 0x%02x\n", opcode);
2325 ch->status = -ECONNRESET;
2326 break;
2327
2328 case IB_CM_REJ_STALE_CONN:
2329 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2330 ch->status = SRP_STALE_CONN;
2331 break;
2332
2333 default:
2334 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2335 event->param.rej_rcvd.reason);
2336 ch->status = -ECONNRESET;
2337 }
2338 }
2339
2340 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2341 {
2342 struct srp_rdma_ch *ch = cm_id->context;
2343 struct srp_target_port *target = ch->target;
2344 int comp = 0;
2345
2346 switch (event->event) {
2347 case IB_CM_REQ_ERROR:
2348 shost_printk(KERN_DEBUG, target->scsi_host,
2349 PFX "Sending CM REQ failed\n");
2350 comp = 1;
2351 ch->status = -ECONNRESET;
2352 break;
2353
2354 case IB_CM_REP_RECEIVED:
2355 comp = 1;
2356 srp_cm_rep_handler(cm_id, event->private_data, ch);
2357 break;
2358
2359 case IB_CM_REJ_RECEIVED:
2360 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2361 comp = 1;
2362
2363 srp_cm_rej_handler(cm_id, event, ch);
2364 break;
2365
2366 case IB_CM_DREQ_RECEIVED:
2367 shost_printk(KERN_WARNING, target->scsi_host,
2368 PFX "DREQ received - connection closed\n");
2369 srp_change_conn_state(target, false);
2370 if (ib_send_cm_drep(cm_id, NULL, 0))
2371 shost_printk(KERN_ERR, target->scsi_host,
2372 PFX "Sending CM DREP failed\n");
2373 queue_work(system_long_wq, &target->tl_err_work);
2374 break;
2375
2376 case IB_CM_TIMEWAIT_EXIT:
2377 shost_printk(KERN_ERR, target->scsi_host,
2378 PFX "connection closed\n");
2379 comp = 1;
2380
2381 ch->status = 0;
2382 break;
2383
2384 case IB_CM_MRA_RECEIVED:
2385 case IB_CM_DREQ_ERROR:
2386 case IB_CM_DREP_RECEIVED:
2387 break;
2388
2389 default:
2390 shost_printk(KERN_WARNING, target->scsi_host,
2391 PFX "Unhandled CM event %d\n", event->event);
2392 break;
2393 }
2394
2395 if (comp)
2396 complete(&ch->done);
2397
2398 return 0;
2399 }
2400
2401 /**
2402 * srp_change_queue_depth - setting device queue depth
2403 * @sdev: scsi device struct
2404 * @qdepth: requested queue depth
2405 * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
2406 * (see include/scsi/scsi_host.h for definition)
2407 *
2408 * Returns queue depth.
2409 */
2410 static int
2411 srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
2412 {
2413 struct Scsi_Host *shost = sdev->host;
2414 int max_depth;
2415 if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) {
2416 max_depth = shost->can_queue;
2417 if (!sdev->tagged_supported)
2418 max_depth = 1;
2419 if (qdepth > max_depth)
2420 qdepth = max_depth;
2421 scsi_adjust_queue_depth(sdev, qdepth);
2422 } else if (reason == SCSI_QDEPTH_QFULL)
2423 scsi_track_queue_full(sdev, qdepth);
2424 else
2425 return -EOPNOTSUPP;
2426
2427 return sdev->queue_depth;
2428 }
2429
2430 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
2431 unsigned int lun, u8 func)
2432 {
2433 struct srp_target_port *target = ch->target;
2434 struct srp_rport *rport = target->rport;
2435 struct ib_device *dev = target->srp_host->srp_dev->dev;
2436 struct srp_iu *iu;
2437 struct srp_tsk_mgmt *tsk_mgmt;
2438
2439 if (!target->connected || target->qp_in_error)
2440 return -1;
2441
2442 init_completion(&ch->tsk_mgmt_done);
2443
2444 /*
2445 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2446 * invoked while a task management function is being sent.
2447 */
2448 mutex_lock(&rport->mutex);
2449 spin_lock_irq(&ch->lock);
2450 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2451 spin_unlock_irq(&ch->lock);
2452
2453 if (!iu) {
2454 mutex_unlock(&rport->mutex);
2455
2456 return -1;
2457 }
2458
2459 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2460 DMA_TO_DEVICE);
2461 tsk_mgmt = iu->buf;
2462 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2463
2464 tsk_mgmt->opcode = SRP_TSK_MGMT;
2465 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
2466 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
2467 tsk_mgmt->tsk_mgmt_func = func;
2468 tsk_mgmt->task_tag = req_tag;
2469
2470 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2471 DMA_TO_DEVICE);
2472 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2473 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2474 mutex_unlock(&rport->mutex);
2475
2476 return -1;
2477 }
2478 mutex_unlock(&rport->mutex);
2479
2480 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2481 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2482 return -1;
2483
2484 return 0;
2485 }
2486
2487 static int srp_abort(struct scsi_cmnd *scmnd)
2488 {
2489 struct srp_target_port *target = host_to_target(scmnd->device->host);
2490 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2491 u32 tag;
2492 u16 ch_idx;
2493 struct srp_rdma_ch *ch;
2494 int ret;
2495
2496 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2497
2498 if (!req)
2499 return SUCCESS;
2500 tag = blk_mq_unique_tag(scmnd->request);
2501 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2502 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2503 return SUCCESS;
2504 ch = &target->ch[ch_idx];
2505 if (!srp_claim_req(ch, req, NULL, scmnd))
2506 return SUCCESS;
2507 shost_printk(KERN_ERR, target->scsi_host,
2508 "Sending SRP abort for tag %#x\n", tag);
2509 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2510 SRP_TSK_ABORT_TASK) == 0)
2511 ret = SUCCESS;
2512 else if (target->rport->state == SRP_RPORT_LOST)
2513 ret = FAST_IO_FAIL;
2514 else
2515 ret = FAILED;
2516 srp_free_req(ch, req, scmnd, 0);
2517 scmnd->result = DID_ABORT << 16;
2518 scmnd->scsi_done(scmnd);
2519
2520 return ret;
2521 }
2522
2523 static int srp_reset_device(struct scsi_cmnd *scmnd)
2524 {
2525 struct srp_target_port *target = host_to_target(scmnd->device->host);
2526 struct srp_rdma_ch *ch;
2527 int i;
2528
2529 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2530
2531 ch = &target->ch[0];
2532 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2533 SRP_TSK_LUN_RESET))
2534 return FAILED;
2535 if (ch->tsk_mgmt_status)
2536 return FAILED;
2537
2538 for (i = 0; i < target->ch_count; i++) {
2539 ch = &target->ch[i];
2540 for (i = 0; i < target->req_ring_size; ++i) {
2541 struct srp_request *req = &ch->req_ring[i];
2542
2543 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2544 }
2545 }
2546
2547 return SUCCESS;
2548 }
2549
2550 static int srp_reset_host(struct scsi_cmnd *scmnd)
2551 {
2552 struct srp_target_port *target = host_to_target(scmnd->device->host);
2553
2554 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2555
2556 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2557 }
2558
2559 static int srp_slave_configure(struct scsi_device *sdev)
2560 {
2561 struct Scsi_Host *shost = sdev->host;
2562 struct srp_target_port *target = host_to_target(shost);
2563 struct request_queue *q = sdev->request_queue;
2564 unsigned long timeout;
2565
2566 if (sdev->type == TYPE_DISK) {
2567 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2568 blk_queue_rq_timeout(q, timeout);
2569 }
2570
2571 return 0;
2572 }
2573
2574 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2575 char *buf)
2576 {
2577 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2578
2579 return sprintf(buf, "0x%016llx\n",
2580 (unsigned long long) be64_to_cpu(target->id_ext));
2581 }
2582
2583 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2584 char *buf)
2585 {
2586 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2587
2588 return sprintf(buf, "0x%016llx\n",
2589 (unsigned long long) be64_to_cpu(target->ioc_guid));
2590 }
2591
2592 static ssize_t show_service_id(struct device *dev,
2593 struct device_attribute *attr, char *buf)
2594 {
2595 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2596
2597 return sprintf(buf, "0x%016llx\n",
2598 (unsigned long long) be64_to_cpu(target->service_id));
2599 }
2600
2601 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2602 char *buf)
2603 {
2604 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2605
2606 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2607 }
2608
2609 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2610 char *buf)
2611 {
2612 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2613
2614 return sprintf(buf, "%pI6\n", target->sgid.raw);
2615 }
2616
2617 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2618 char *buf)
2619 {
2620 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2621 struct srp_rdma_ch *ch = &target->ch[0];
2622
2623 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2624 }
2625
2626 static ssize_t show_orig_dgid(struct device *dev,
2627 struct device_attribute *attr, char *buf)
2628 {
2629 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2630
2631 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2632 }
2633
2634 static ssize_t show_req_lim(struct device *dev,
2635 struct device_attribute *attr, char *buf)
2636 {
2637 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2638 struct srp_rdma_ch *ch;
2639 int i, req_lim = INT_MAX;
2640
2641 for (i = 0; i < target->ch_count; i++) {
2642 ch = &target->ch[i];
2643 req_lim = min(req_lim, ch->req_lim);
2644 }
2645 return sprintf(buf, "%d\n", req_lim);
2646 }
2647
2648 static ssize_t show_zero_req_lim(struct device *dev,
2649 struct device_attribute *attr, char *buf)
2650 {
2651 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2652
2653 return sprintf(buf, "%d\n", target->zero_req_lim);
2654 }
2655
2656 static ssize_t show_local_ib_port(struct device *dev,
2657 struct device_attribute *attr, char *buf)
2658 {
2659 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2660
2661 return sprintf(buf, "%d\n", target->srp_host->port);
2662 }
2663
2664 static ssize_t show_local_ib_device(struct device *dev,
2665 struct device_attribute *attr, char *buf)
2666 {
2667 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2668
2669 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2670 }
2671
2672 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2673 char *buf)
2674 {
2675 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2676
2677 return sprintf(buf, "%d\n", target->ch_count);
2678 }
2679
2680 static ssize_t show_comp_vector(struct device *dev,
2681 struct device_attribute *attr, char *buf)
2682 {
2683 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2684
2685 return sprintf(buf, "%d\n", target->comp_vector);
2686 }
2687
2688 static ssize_t show_tl_retry_count(struct device *dev,
2689 struct device_attribute *attr, char *buf)
2690 {
2691 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2692
2693 return sprintf(buf, "%d\n", target->tl_retry_count);
2694 }
2695
2696 static ssize_t show_cmd_sg_entries(struct device *dev,
2697 struct device_attribute *attr, char *buf)
2698 {
2699 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2700
2701 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2702 }
2703
2704 static ssize_t show_allow_ext_sg(struct device *dev,
2705 struct device_attribute *attr, char *buf)
2706 {
2707 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2708
2709 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2710 }
2711
2712 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2713 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2714 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2715 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2716 static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
2717 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2718 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
2719 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2720 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2721 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2722 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2723 static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
2724 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2725 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2726 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
2727 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
2728
2729 static struct device_attribute *srp_host_attrs[] = {
2730 &dev_attr_id_ext,
2731 &dev_attr_ioc_guid,
2732 &dev_attr_service_id,
2733 &dev_attr_pkey,
2734 &dev_attr_sgid,
2735 &dev_attr_dgid,
2736 &dev_attr_orig_dgid,
2737 &dev_attr_req_lim,
2738 &dev_attr_zero_req_lim,
2739 &dev_attr_local_ib_port,
2740 &dev_attr_local_ib_device,
2741 &dev_attr_ch_count,
2742 &dev_attr_comp_vector,
2743 &dev_attr_tl_retry_count,
2744 &dev_attr_cmd_sg_entries,
2745 &dev_attr_allow_ext_sg,
2746 NULL
2747 };
2748
2749 static struct scsi_host_template srp_template = {
2750 .module = THIS_MODULE,
2751 .name = "InfiniBand SRP initiator",
2752 .proc_name = DRV_NAME,
2753 .slave_configure = srp_slave_configure,
2754 .info = srp_target_info,
2755 .queuecommand = srp_queuecommand,
2756 .change_queue_depth = srp_change_queue_depth,
2757 .change_queue_type = scsi_change_queue_type,
2758 .eh_abort_handler = srp_abort,
2759 .eh_device_reset_handler = srp_reset_device,
2760 .eh_host_reset_handler = srp_reset_host,
2761 .skip_settle_delay = true,
2762 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
2763 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
2764 .this_id = -1,
2765 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
2766 .use_clustering = ENABLE_CLUSTERING,
2767 .shost_attrs = srp_host_attrs,
2768 .use_blk_tags = 1,
2769 };
2770
2771 static int srp_sdev_count(struct Scsi_Host *host)
2772 {
2773 struct scsi_device *sdev;
2774 int c = 0;
2775
2776 shost_for_each_device(sdev, host)
2777 c++;
2778
2779 return c;
2780 }
2781
2782 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2783 {
2784 struct srp_rport_identifiers ids;
2785 struct srp_rport *rport;
2786
2787 target->state = SRP_TARGET_SCANNING;
2788 sprintf(target->target_name, "SRP.T10:%016llX",
2789 (unsigned long long) be64_to_cpu(target->id_ext));
2790
2791 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2792 return -ENODEV;
2793
2794 memcpy(ids.port_id, &target->id_ext, 8);
2795 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2796 ids.roles = SRP_RPORT_ROLE_TARGET;
2797 rport = srp_rport_add(target->scsi_host, &ids);
2798 if (IS_ERR(rport)) {
2799 scsi_remove_host(target->scsi_host);
2800 return PTR_ERR(rport);
2801 }
2802
2803 rport->lld_data = target;
2804 target->rport = rport;
2805
2806 spin_lock(&host->target_lock);
2807 list_add_tail(&target->list, &host->target_list);
2808 spin_unlock(&host->target_lock);
2809
2810 scsi_scan_target(&target->scsi_host->shost_gendev,
2811 0, target->scsi_id, SCAN_WILD_CARD, 0);
2812
2813 if (!target->connected || target->qp_in_error) {
2814 shost_printk(KERN_INFO, target->scsi_host,
2815 PFX "SCSI scan failed - removing SCSI host\n");
2816 srp_queue_remove_work(target);
2817 goto out;
2818 }
2819
2820 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2821 dev_name(&target->scsi_host->shost_gendev),
2822 srp_sdev_count(target->scsi_host));
2823
2824 spin_lock_irq(&target->lock);
2825 if (target->state == SRP_TARGET_SCANNING)
2826 target->state = SRP_TARGET_LIVE;
2827 spin_unlock_irq(&target->lock);
2828
2829 out:
2830 return 0;
2831 }
2832
2833 static void srp_release_dev(struct device *dev)
2834 {
2835 struct srp_host *host =
2836 container_of(dev, struct srp_host, dev);
2837
2838 complete(&host->released);
2839 }
2840
2841 static struct class srp_class = {
2842 .name = "infiniband_srp",
2843 .dev_release = srp_release_dev
2844 };
2845
2846 /**
2847 * srp_conn_unique() - check whether the connection to a target is unique
2848 * @host: SRP host.
2849 * @target: SRP target port.
2850 */
2851 static bool srp_conn_unique(struct srp_host *host,
2852 struct srp_target_port *target)
2853 {
2854 struct srp_target_port *t;
2855 bool ret = false;
2856
2857 if (target->state == SRP_TARGET_REMOVED)
2858 goto out;
2859
2860 ret = true;
2861
2862 spin_lock(&host->target_lock);
2863 list_for_each_entry(t, &host->target_list, list) {
2864 if (t != target &&
2865 target->id_ext == t->id_ext &&
2866 target->ioc_guid == t->ioc_guid &&
2867 target->initiator_ext == t->initiator_ext) {
2868 ret = false;
2869 break;
2870 }
2871 }
2872 spin_unlock(&host->target_lock);
2873
2874 out:
2875 return ret;
2876 }
2877
2878 /*
2879 * Target ports are added by writing
2880 *
2881 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2882 * pkey=<P_Key>,service_id=<service ID>
2883 *
2884 * to the add_target sysfs attribute.
2885 */
2886 enum {
2887 SRP_OPT_ERR = 0,
2888 SRP_OPT_ID_EXT = 1 << 0,
2889 SRP_OPT_IOC_GUID = 1 << 1,
2890 SRP_OPT_DGID = 1 << 2,
2891 SRP_OPT_PKEY = 1 << 3,
2892 SRP_OPT_SERVICE_ID = 1 << 4,
2893 SRP_OPT_MAX_SECT = 1 << 5,
2894 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2895 SRP_OPT_IO_CLASS = 1 << 7,
2896 SRP_OPT_INITIATOR_EXT = 1 << 8,
2897 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
2898 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2899 SRP_OPT_SG_TABLESIZE = 1 << 11,
2900 SRP_OPT_COMP_VECTOR = 1 << 12,
2901 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
2902 SRP_OPT_QUEUE_SIZE = 1 << 14,
2903 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2904 SRP_OPT_IOC_GUID |
2905 SRP_OPT_DGID |
2906 SRP_OPT_PKEY |
2907 SRP_OPT_SERVICE_ID),
2908 };
2909
2910 static const match_table_t srp_opt_tokens = {
2911 { SRP_OPT_ID_EXT, "id_ext=%s" },
2912 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2913 { SRP_OPT_DGID, "dgid=%s" },
2914 { SRP_OPT_PKEY, "pkey=%x" },
2915 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2916 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2917 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
2918 { SRP_OPT_IO_CLASS, "io_class=%x" },
2919 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
2920 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
2921 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2922 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
2923 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
2924 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
2925 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
2926 { SRP_OPT_ERR, NULL }
2927 };
2928
2929 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2930 {
2931 char *options, *sep_opt;
2932 char *p;
2933 char dgid[3];
2934 substring_t args[MAX_OPT_ARGS];
2935 int opt_mask = 0;
2936 int token;
2937 int ret = -EINVAL;
2938 int i;
2939
2940 options = kstrdup(buf, GFP_KERNEL);
2941 if (!options)
2942 return -ENOMEM;
2943
2944 sep_opt = options;
2945 while ((p = strsep(&sep_opt, ",")) != NULL) {
2946 if (!*p)
2947 continue;
2948
2949 token = match_token(p, srp_opt_tokens, args);
2950 opt_mask |= token;
2951
2952 switch (token) {
2953 case SRP_OPT_ID_EXT:
2954 p = match_strdup(args);
2955 if (!p) {
2956 ret = -ENOMEM;
2957 goto out;
2958 }
2959 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2960 kfree(p);
2961 break;
2962
2963 case SRP_OPT_IOC_GUID:
2964 p = match_strdup(args);
2965 if (!p) {
2966 ret = -ENOMEM;
2967 goto out;
2968 }
2969 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2970 kfree(p);
2971 break;
2972
2973 case SRP_OPT_DGID:
2974 p = match_strdup(args);
2975 if (!p) {
2976 ret = -ENOMEM;
2977 goto out;
2978 }
2979 if (strlen(p) != 32) {
2980 pr_warn("bad dest GID parameter '%s'\n", p);
2981 kfree(p);
2982 goto out;
2983 }
2984
2985 for (i = 0; i < 16; ++i) {
2986 strlcpy(dgid, p + i * 2, sizeof(dgid));
2987 if (sscanf(dgid, "%hhx",
2988 &target->orig_dgid.raw[i]) < 1) {
2989 ret = -EINVAL;
2990 kfree(p);
2991 goto out;
2992 }
2993 }
2994 kfree(p);
2995 break;
2996
2997 case SRP_OPT_PKEY:
2998 if (match_hex(args, &token)) {
2999 pr_warn("bad P_Key parameter '%s'\n", p);
3000 goto out;
3001 }
3002 target->pkey = cpu_to_be16(token);
3003 break;
3004
3005 case SRP_OPT_SERVICE_ID:
3006 p = match_strdup(args);
3007 if (!p) {
3008 ret = -ENOMEM;
3009 goto out;
3010 }
3011 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3012 kfree(p);
3013 break;
3014
3015 case SRP_OPT_MAX_SECT:
3016 if (match_int(args, &token)) {
3017 pr_warn("bad max sect parameter '%s'\n", p);
3018 goto out;
3019 }
3020 target->scsi_host->max_sectors = token;
3021 break;
3022
3023 case SRP_OPT_QUEUE_SIZE:
3024 if (match_int(args, &token) || token < 1) {
3025 pr_warn("bad queue_size parameter '%s'\n", p);
3026 goto out;
3027 }
3028 target->scsi_host->can_queue = token;
3029 target->queue_size = token + SRP_RSP_SQ_SIZE +
3030 SRP_TSK_MGMT_SQ_SIZE;
3031 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3032 target->scsi_host->cmd_per_lun = token;
3033 break;
3034
3035 case SRP_OPT_MAX_CMD_PER_LUN:
3036 if (match_int(args, &token) || token < 1) {
3037 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3038 p);
3039 goto out;
3040 }
3041 target->scsi_host->cmd_per_lun = token;
3042 break;
3043
3044 case SRP_OPT_IO_CLASS:
3045 if (match_hex(args, &token)) {
3046 pr_warn("bad IO class parameter '%s'\n", p);
3047 goto out;
3048 }
3049 if (token != SRP_REV10_IB_IO_CLASS &&
3050 token != SRP_REV16A_IB_IO_CLASS) {
3051 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3052 token, SRP_REV10_IB_IO_CLASS,
3053 SRP_REV16A_IB_IO_CLASS);
3054 goto out;
3055 }
3056 target->io_class = token;
3057 break;
3058
3059 case SRP_OPT_INITIATOR_EXT:
3060 p = match_strdup(args);
3061 if (!p) {
3062 ret = -ENOMEM;
3063 goto out;
3064 }
3065 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3066 kfree(p);
3067 break;
3068
3069 case SRP_OPT_CMD_SG_ENTRIES:
3070 if (match_int(args, &token) || token < 1 || token > 255) {
3071 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3072 p);
3073 goto out;
3074 }
3075 target->cmd_sg_cnt = token;
3076 break;
3077
3078 case SRP_OPT_ALLOW_EXT_SG:
3079 if (match_int(args, &token)) {
3080 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3081 goto out;
3082 }
3083 target->allow_ext_sg = !!token;
3084 break;
3085
3086 case SRP_OPT_SG_TABLESIZE:
3087 if (match_int(args, &token) || token < 1 ||
3088 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
3089 pr_warn("bad max sg_tablesize parameter '%s'\n",
3090 p);
3091 goto out;
3092 }
3093 target->sg_tablesize = token;
3094 break;
3095
3096 case SRP_OPT_COMP_VECTOR:
3097 if (match_int(args, &token) || token < 0) {
3098 pr_warn("bad comp_vector parameter '%s'\n", p);
3099 goto out;
3100 }
3101 target->comp_vector = token;
3102 break;
3103
3104 case SRP_OPT_TL_RETRY_COUNT:
3105 if (match_int(args, &token) || token < 2 || token > 7) {
3106 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3107 p);
3108 goto out;
3109 }
3110 target->tl_retry_count = token;
3111 break;
3112
3113 default:
3114 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3115 p);
3116 goto out;
3117 }
3118 }
3119
3120 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3121 ret = 0;
3122 else
3123 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3124 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3125 !(srp_opt_tokens[i].token & opt_mask))
3126 pr_warn("target creation request is missing parameter '%s'\n",
3127 srp_opt_tokens[i].pattern);
3128
3129 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3130 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3131 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3132 target->scsi_host->cmd_per_lun,
3133 target->scsi_host->can_queue);
3134
3135 out:
3136 kfree(options);
3137 return ret;
3138 }
3139
3140 static ssize_t srp_create_target(struct device *dev,
3141 struct device_attribute *attr,
3142 const char *buf, size_t count)
3143 {
3144 struct srp_host *host =
3145 container_of(dev, struct srp_host, dev);
3146 struct Scsi_Host *target_host;
3147 struct srp_target_port *target;
3148 struct srp_rdma_ch *ch;
3149 struct srp_device *srp_dev = host->srp_dev;
3150 struct ib_device *ibdev = srp_dev->dev;
3151 int ret, node_idx, node, cpu, i;
3152 bool multich = false;
3153
3154 target_host = scsi_host_alloc(&srp_template,
3155 sizeof (struct srp_target_port));
3156 if (!target_host)
3157 return -ENOMEM;
3158
3159 target_host->transportt = ib_srp_transport_template;
3160 target_host->max_channel = 0;
3161 target_host->max_id = 1;
3162 target_host->max_lun = SRP_MAX_LUN;
3163 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3164
3165 target = host_to_target(target_host);
3166
3167 target->io_class = SRP_REV16A_IB_IO_CLASS;
3168 target->scsi_host = target_host;
3169 target->srp_host = host;
3170 target->lkey = host->srp_dev->mr->lkey;
3171 target->rkey = host->srp_dev->mr->rkey;
3172 target->cmd_sg_cnt = cmd_sg_entries;
3173 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3174 target->allow_ext_sg = allow_ext_sg;
3175 target->tl_retry_count = 7;
3176 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3177
3178 /*
3179 * Avoid that the SCSI host can be removed by srp_remove_target()
3180 * before this function returns.
3181 */
3182 scsi_host_get(target->scsi_host);
3183
3184 mutex_lock(&host->add_target_mutex);
3185
3186 ret = srp_parse_options(buf, target);
3187 if (ret)
3188 goto err;
3189
3190 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3191 if (ret)
3192 goto err;
3193
3194 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3195
3196 if (!srp_conn_unique(target->srp_host, target)) {
3197 shost_printk(KERN_INFO, target->scsi_host,
3198 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3199 be64_to_cpu(target->id_ext),
3200 be64_to_cpu(target->ioc_guid),
3201 be64_to_cpu(target->initiator_ext));
3202 ret = -EEXIST;
3203 goto err;
3204 }
3205
3206 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3207 target->cmd_sg_cnt < target->sg_tablesize) {
3208 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3209 target->sg_tablesize = target->cmd_sg_cnt;
3210 }
3211
3212 target_host->sg_tablesize = target->sg_tablesize;
3213 target->indirect_size = target->sg_tablesize *
3214 sizeof (struct srp_direct_buf);
3215 target->max_iu_len = sizeof (struct srp_cmd) +
3216 sizeof (struct srp_indirect_buf) +
3217 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3218
3219 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3220 INIT_WORK(&target->remove_work, srp_remove_work);
3221 spin_lock_init(&target->lock);
3222 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
3223 if (ret)
3224 goto err;
3225
3226 ret = -ENOMEM;
3227 target->ch_count = max_t(unsigned, num_online_nodes(),
3228 min(ch_count ? :
3229 min(4 * num_online_nodes(),
3230 ibdev->num_comp_vectors),
3231 num_online_cpus()));
3232 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3233 GFP_KERNEL);
3234 if (!target->ch)
3235 goto err;
3236
3237 node_idx = 0;
3238 for_each_online_node(node) {
3239 const int ch_start = (node_idx * target->ch_count /
3240 num_online_nodes());
3241 const int ch_end = ((node_idx + 1) * target->ch_count /
3242 num_online_nodes());
3243 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3244 num_online_nodes() + target->comp_vector)
3245 % ibdev->num_comp_vectors;
3246 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3247 num_online_nodes() + target->comp_vector)
3248 % ibdev->num_comp_vectors;
3249 int cpu_idx = 0;
3250
3251 for_each_online_cpu(cpu) {
3252 if (cpu_to_node(cpu) != node)
3253 continue;
3254 if (ch_start + cpu_idx >= ch_end)
3255 continue;
3256 ch = &target->ch[ch_start + cpu_idx];
3257 ch->target = target;
3258 ch->comp_vector = cv_start == cv_end ? cv_start :
3259 cv_start + cpu_idx % (cv_end - cv_start);
3260 spin_lock_init(&ch->lock);
3261 INIT_LIST_HEAD(&ch->free_tx);
3262 ret = srp_new_cm_id(ch);
3263 if (ret)
3264 goto err_disconnect;
3265
3266 ret = srp_create_ch_ib(ch);
3267 if (ret)
3268 goto err_disconnect;
3269
3270 ret = srp_alloc_req_data(ch);
3271 if (ret)
3272 goto err_disconnect;
3273
3274 ret = srp_connect_ch(ch, multich);
3275 if (ret) {
3276 shost_printk(KERN_ERR, target->scsi_host,
3277 PFX "Connection %d/%d failed\n",
3278 ch_start + cpu_idx,
3279 target->ch_count);
3280 if (node_idx == 0 && cpu_idx == 0) {
3281 goto err_disconnect;
3282 } else {
3283 srp_free_ch_ib(target, ch);
3284 srp_free_req_data(target, ch);
3285 target->ch_count = ch - target->ch;
3286 break;
3287 }
3288 }
3289
3290 multich = true;
3291 cpu_idx++;
3292 }
3293 node_idx++;
3294 }
3295
3296 target->scsi_host->nr_hw_queues = target->ch_count;
3297
3298 ret = srp_add_target(host, target);
3299 if (ret)
3300 goto err_disconnect;
3301
3302 if (target->state != SRP_TARGET_REMOVED) {
3303 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3304 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3305 be64_to_cpu(target->id_ext),
3306 be64_to_cpu(target->ioc_guid),
3307 be16_to_cpu(target->pkey),
3308 be64_to_cpu(target->service_id),
3309 target->sgid.raw, target->orig_dgid.raw);
3310 }
3311
3312 ret = count;
3313
3314 out:
3315 mutex_unlock(&host->add_target_mutex);
3316
3317 scsi_host_put(target->scsi_host);
3318
3319 return ret;
3320
3321 err_disconnect:
3322 srp_disconnect_target(target);
3323
3324 for (i = 0; i < target->ch_count; i++) {
3325 ch = &target->ch[i];
3326 srp_free_ch_ib(target, ch);
3327 srp_free_req_data(target, ch);
3328 }
3329
3330 kfree(target->ch);
3331
3332 err:
3333 scsi_host_put(target_host);
3334 goto out;
3335 }
3336
3337 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3338
3339 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3340 char *buf)
3341 {
3342 struct srp_host *host = container_of(dev, struct srp_host, dev);
3343
3344 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3345 }
3346
3347 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3348
3349 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3350 char *buf)
3351 {
3352 struct srp_host *host = container_of(dev, struct srp_host, dev);
3353
3354 return sprintf(buf, "%d\n", host->port);
3355 }
3356
3357 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3358
3359 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3360 {
3361 struct srp_host *host;
3362
3363 host = kzalloc(sizeof *host, GFP_KERNEL);
3364 if (!host)
3365 return NULL;
3366
3367 INIT_LIST_HEAD(&host->target_list);
3368 spin_lock_init(&host->target_lock);
3369 init_completion(&host->released);
3370 mutex_init(&host->add_target_mutex);
3371 host->srp_dev = device;
3372 host->port = port;
3373
3374 host->dev.class = &srp_class;
3375 host->dev.parent = device->dev->dma_device;
3376 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3377
3378 if (device_register(&host->dev))
3379 goto free_host;
3380 if (device_create_file(&host->dev, &dev_attr_add_target))
3381 goto err_class;
3382 if (device_create_file(&host->dev, &dev_attr_ibdev))
3383 goto err_class;
3384 if (device_create_file(&host->dev, &dev_attr_port))
3385 goto err_class;
3386
3387 return host;
3388
3389 err_class:
3390 device_unregister(&host->dev);
3391
3392 free_host:
3393 kfree(host);
3394
3395 return NULL;
3396 }
3397
3398 static void srp_add_one(struct ib_device *device)
3399 {
3400 struct srp_device *srp_dev;
3401 struct ib_device_attr *dev_attr;
3402 struct srp_host *host;
3403 int mr_page_shift, s, e, p;
3404 u64 max_pages_per_mr;
3405
3406 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3407 if (!dev_attr)
3408 return;
3409
3410 if (ib_query_device(device, dev_attr)) {
3411 pr_warn("Query device failed for %s\n", device->name);
3412 goto free_attr;
3413 }
3414
3415 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3416 if (!srp_dev)
3417 goto free_attr;
3418
3419 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3420 device->map_phys_fmr && device->unmap_fmr);
3421 srp_dev->has_fr = (dev_attr->device_cap_flags &
3422 IB_DEVICE_MEM_MGT_EXTENSIONS);
3423 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3424 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3425
3426 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3427 (!srp_dev->has_fmr || prefer_fr));
3428
3429 /*
3430 * Use the smallest page size supported by the HCA, down to a
3431 * minimum of 4096 bytes. We're unlikely to build large sglists
3432 * out of smaller entries.
3433 */
3434 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3435 srp_dev->mr_page_size = 1 << mr_page_shift;
3436 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3437 max_pages_per_mr = dev_attr->max_mr_size;
3438 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3439 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3440 max_pages_per_mr);
3441 if (srp_dev->use_fast_reg) {
3442 srp_dev->max_pages_per_mr =
3443 min_t(u32, srp_dev->max_pages_per_mr,
3444 dev_attr->max_fast_reg_page_list_len);
3445 }
3446 srp_dev->mr_max_size = srp_dev->mr_page_size *
3447 srp_dev->max_pages_per_mr;
3448 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3449 device->name, mr_page_shift, dev_attr->max_mr_size,
3450 dev_attr->max_fast_reg_page_list_len,
3451 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3452
3453 INIT_LIST_HEAD(&srp_dev->dev_list);
3454
3455 srp_dev->dev = device;
3456 srp_dev->pd = ib_alloc_pd(device);
3457 if (IS_ERR(srp_dev->pd))
3458 goto free_dev;
3459
3460 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3461 IB_ACCESS_LOCAL_WRITE |
3462 IB_ACCESS_REMOTE_READ |
3463 IB_ACCESS_REMOTE_WRITE);
3464 if (IS_ERR(srp_dev->mr))
3465 goto err_pd;
3466
3467 if (device->node_type == RDMA_NODE_IB_SWITCH) {
3468 s = 0;
3469 e = 0;
3470 } else {
3471 s = 1;
3472 e = device->phys_port_cnt;
3473 }
3474
3475 for (p = s; p <= e; ++p) {
3476 host = srp_add_port(srp_dev, p);
3477 if (host)
3478 list_add_tail(&host->list, &srp_dev->dev_list);
3479 }
3480
3481 ib_set_client_data(device, &srp_client, srp_dev);
3482
3483 goto free_attr;
3484
3485 err_pd:
3486 ib_dealloc_pd(srp_dev->pd);
3487
3488 free_dev:
3489 kfree(srp_dev);
3490
3491 free_attr:
3492 kfree(dev_attr);
3493 }
3494
3495 static void srp_remove_one(struct ib_device *device)
3496 {
3497 struct srp_device *srp_dev;
3498 struct srp_host *host, *tmp_host;
3499 struct srp_target_port *target;
3500
3501 srp_dev = ib_get_client_data(device, &srp_client);
3502 if (!srp_dev)
3503 return;
3504
3505 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3506 device_unregister(&host->dev);
3507 /*
3508 * Wait for the sysfs entry to go away, so that no new
3509 * target ports can be created.
3510 */
3511 wait_for_completion(&host->released);
3512
3513 /*
3514 * Remove all target ports.
3515 */
3516 spin_lock(&host->target_lock);
3517 list_for_each_entry(target, &host->target_list, list)
3518 srp_queue_remove_work(target);
3519 spin_unlock(&host->target_lock);
3520
3521 /*
3522 * Wait for tl_err and target port removal tasks.
3523 */
3524 flush_workqueue(system_long_wq);
3525 flush_workqueue(srp_remove_wq);
3526
3527 kfree(host);
3528 }
3529
3530 ib_dereg_mr(srp_dev->mr);
3531 ib_dealloc_pd(srp_dev->pd);
3532
3533 kfree(srp_dev);
3534 }
3535
3536 static struct srp_function_template ib_srp_transport_functions = {
3537 .has_rport_state = true,
3538 .reset_timer_if_blocked = true,
3539 .reconnect_delay = &srp_reconnect_delay,
3540 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3541 .dev_loss_tmo = &srp_dev_loss_tmo,
3542 .reconnect = srp_rport_reconnect,
3543 .rport_delete = srp_rport_delete,
3544 .terminate_rport_io = srp_terminate_io,
3545 };
3546
3547 static int __init srp_init_module(void)
3548 {
3549 int ret;
3550
3551 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
3552
3553 if (srp_sg_tablesize) {
3554 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3555 if (!cmd_sg_entries)
3556 cmd_sg_entries = srp_sg_tablesize;
3557 }
3558
3559 if (!cmd_sg_entries)
3560 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3561
3562 if (cmd_sg_entries > 255) {
3563 pr_warn("Clamping cmd_sg_entries to 255\n");
3564 cmd_sg_entries = 255;
3565 }
3566
3567 if (!indirect_sg_entries)
3568 indirect_sg_entries = cmd_sg_entries;
3569 else if (indirect_sg_entries < cmd_sg_entries) {
3570 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3571 cmd_sg_entries);
3572 indirect_sg_entries = cmd_sg_entries;
3573 }
3574
3575 srp_remove_wq = create_workqueue("srp_remove");
3576 if (!srp_remove_wq) {
3577 ret = -ENOMEM;
3578 goto out;
3579 }
3580
3581 ret = -ENOMEM;
3582 ib_srp_transport_template =
3583 srp_attach_transport(&ib_srp_transport_functions);
3584 if (!ib_srp_transport_template)
3585 goto destroy_wq;
3586
3587 ret = class_register(&srp_class);
3588 if (ret) {
3589 pr_err("couldn't register class infiniband_srp\n");
3590 goto release_tr;
3591 }
3592
3593 ib_sa_register_client(&srp_sa_client);
3594
3595 ret = ib_register_client(&srp_client);
3596 if (ret) {
3597 pr_err("couldn't register IB client\n");
3598 goto unreg_sa;
3599 }
3600
3601 out:
3602 return ret;
3603
3604 unreg_sa:
3605 ib_sa_unregister_client(&srp_sa_client);
3606 class_unregister(&srp_class);
3607
3608 release_tr:
3609 srp_release_transport(ib_srp_transport_template);
3610
3611 destroy_wq:
3612 destroy_workqueue(srp_remove_wq);
3613 goto out;
3614 }
3615
3616 static void __exit srp_cleanup_module(void)
3617 {
3618 ib_unregister_client(&srp_client);
3619 ib_sa_unregister_client(&srp_sa_client);
3620 class_unregister(&srp_class);
3621 srp_release_transport(ib_srp_transport_template);
3622 destroy_workqueue(srp_remove_wq);
3623 }
3624
3625 module_init(srp_init_module);
3626 module_exit(srp_cleanup_module);
This page took 0.112444 seconds and 5 git commands to generate.