IB/srp: Introduce srp_alloc_req_data()
[deliverable/linux.git] / drivers / infiniband / ulp / srp / ib_srp.c
1 /*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #define pr_fmt(fmt) PFX fmt
34
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43
44 #include <linux/atomic.h>
45
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_dbg.h>
49 #include <scsi/srp.h>
50 #include <scsi/scsi_transport_srp.h>
51
52 #include "ib_srp.h"
53
54 #define DRV_NAME "ib_srp"
55 #define PFX DRV_NAME ": "
56 #define DRV_VERSION "1.0"
57 #define DRV_RELDATE "July 1, 2013"
58
59 MODULE_AUTHOR("Roland Dreier");
60 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
61 "v" DRV_VERSION " (" DRV_RELDATE ")");
62 MODULE_LICENSE("Dual BSD/GPL");
63
64 static unsigned int srp_sg_tablesize;
65 static unsigned int cmd_sg_entries;
66 static unsigned int indirect_sg_entries;
67 static bool allow_ext_sg;
68 static int topspin_workarounds = 1;
69
70 module_param(srp_sg_tablesize, uint, 0444);
71 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
72
73 module_param(cmd_sg_entries, uint, 0444);
74 MODULE_PARM_DESC(cmd_sg_entries,
75 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
76
77 module_param(indirect_sg_entries, uint, 0444);
78 MODULE_PARM_DESC(indirect_sg_entries,
79 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
80
81 module_param(allow_ext_sg, bool, 0444);
82 MODULE_PARM_DESC(allow_ext_sg,
83 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
84
85 module_param(topspin_workarounds, int, 0444);
86 MODULE_PARM_DESC(topspin_workarounds,
87 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
88
89 static struct kernel_param_ops srp_tmo_ops;
90
91 static int srp_reconnect_delay = 10;
92 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
93 S_IRUGO | S_IWUSR);
94 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
95
96 static int srp_fast_io_fail_tmo = 15;
97 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
98 S_IRUGO | S_IWUSR);
99 MODULE_PARM_DESC(fast_io_fail_tmo,
100 "Number of seconds between the observation of a transport"
101 " layer error and failing all I/O. \"off\" means that this"
102 " functionality is disabled.");
103
104 static int srp_dev_loss_tmo = 600;
105 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
106 S_IRUGO | S_IWUSR);
107 MODULE_PARM_DESC(dev_loss_tmo,
108 "Maximum number of seconds that the SRP transport should"
109 " insulate transport layer errors. After this time has been"
110 " exceeded the SCSI host is removed. Should be"
111 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
112 " if fast_io_fail_tmo has not been set. \"off\" means that"
113 " this functionality is disabled.");
114
115 static void srp_add_one(struct ib_device *device);
116 static void srp_remove_one(struct ib_device *device);
117 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
118 static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
119 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
120
121 static struct scsi_transport_template *ib_srp_transport_template;
122
123 static struct ib_client srp_client = {
124 .name = "srp",
125 .add = srp_add_one,
126 .remove = srp_remove_one
127 };
128
129 static struct ib_sa_client srp_sa_client;
130
131 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
132 {
133 int tmo = *(int *)kp->arg;
134
135 if (tmo >= 0)
136 return sprintf(buffer, "%d", tmo);
137 else
138 return sprintf(buffer, "off");
139 }
140
141 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
142 {
143 int tmo, res;
144
145 if (strncmp(val, "off", 3) != 0) {
146 res = kstrtoint(val, 0, &tmo);
147 if (res)
148 goto out;
149 } else {
150 tmo = -1;
151 }
152 if (kp->arg == &srp_reconnect_delay)
153 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
154 srp_dev_loss_tmo);
155 else if (kp->arg == &srp_fast_io_fail_tmo)
156 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
157 else
158 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
159 tmo);
160 if (res)
161 goto out;
162 *(int *)kp->arg = tmo;
163
164 out:
165 return res;
166 }
167
168 static struct kernel_param_ops srp_tmo_ops = {
169 .get = srp_tmo_get,
170 .set = srp_tmo_set,
171 };
172
173 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
174 {
175 return (struct srp_target_port *) host->hostdata;
176 }
177
178 static const char *srp_target_info(struct Scsi_Host *host)
179 {
180 return host_to_target(host)->target_name;
181 }
182
183 static int srp_target_is_topspin(struct srp_target_port *target)
184 {
185 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
186 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
187
188 return topspin_workarounds &&
189 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
190 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
191 }
192
193 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
194 gfp_t gfp_mask,
195 enum dma_data_direction direction)
196 {
197 struct srp_iu *iu;
198
199 iu = kmalloc(sizeof *iu, gfp_mask);
200 if (!iu)
201 goto out;
202
203 iu->buf = kzalloc(size, gfp_mask);
204 if (!iu->buf)
205 goto out_free_iu;
206
207 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
208 direction);
209 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
210 goto out_free_buf;
211
212 iu->size = size;
213 iu->direction = direction;
214
215 return iu;
216
217 out_free_buf:
218 kfree(iu->buf);
219 out_free_iu:
220 kfree(iu);
221 out:
222 return NULL;
223 }
224
225 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
226 {
227 if (!iu)
228 return;
229
230 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
231 iu->direction);
232 kfree(iu->buf);
233 kfree(iu);
234 }
235
236 static void srp_qp_event(struct ib_event *event, void *context)
237 {
238 pr_debug("QP event %d\n", event->event);
239 }
240
241 static int srp_init_qp(struct srp_target_port *target,
242 struct ib_qp *qp)
243 {
244 struct ib_qp_attr *attr;
245 int ret;
246
247 attr = kmalloc(sizeof *attr, GFP_KERNEL);
248 if (!attr)
249 return -ENOMEM;
250
251 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
252 target->srp_host->port,
253 be16_to_cpu(target->path.pkey),
254 &attr->pkey_index);
255 if (ret)
256 goto out;
257
258 attr->qp_state = IB_QPS_INIT;
259 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
260 IB_ACCESS_REMOTE_WRITE);
261 attr->port_num = target->srp_host->port;
262
263 ret = ib_modify_qp(qp, attr,
264 IB_QP_STATE |
265 IB_QP_PKEY_INDEX |
266 IB_QP_ACCESS_FLAGS |
267 IB_QP_PORT);
268
269 out:
270 kfree(attr);
271 return ret;
272 }
273
274 static int srp_new_cm_id(struct srp_target_port *target)
275 {
276 struct ib_cm_id *new_cm_id;
277
278 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
279 srp_cm_handler, target);
280 if (IS_ERR(new_cm_id))
281 return PTR_ERR(new_cm_id);
282
283 if (target->cm_id)
284 ib_destroy_cm_id(target->cm_id);
285 target->cm_id = new_cm_id;
286
287 return 0;
288 }
289
290 static int srp_create_target_ib(struct srp_target_port *target)
291 {
292 struct ib_qp_init_attr *init_attr;
293 struct ib_cq *recv_cq, *send_cq;
294 struct ib_qp *qp;
295 int ret;
296
297 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
298 if (!init_attr)
299 return -ENOMEM;
300
301 recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
302 srp_recv_completion, NULL, target, SRP_RQ_SIZE,
303 target->comp_vector);
304 if (IS_ERR(recv_cq)) {
305 ret = PTR_ERR(recv_cq);
306 goto err;
307 }
308
309 send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
310 srp_send_completion, NULL, target, SRP_SQ_SIZE,
311 target->comp_vector);
312 if (IS_ERR(send_cq)) {
313 ret = PTR_ERR(send_cq);
314 goto err_recv_cq;
315 }
316
317 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
318
319 init_attr->event_handler = srp_qp_event;
320 init_attr->cap.max_send_wr = SRP_SQ_SIZE;
321 init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
322 init_attr->cap.max_recv_sge = 1;
323 init_attr->cap.max_send_sge = 1;
324 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
325 init_attr->qp_type = IB_QPT_RC;
326 init_attr->send_cq = send_cq;
327 init_attr->recv_cq = recv_cq;
328
329 qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
330 if (IS_ERR(qp)) {
331 ret = PTR_ERR(qp);
332 goto err_send_cq;
333 }
334
335 ret = srp_init_qp(target, qp);
336 if (ret)
337 goto err_qp;
338
339 if (target->qp)
340 ib_destroy_qp(target->qp);
341 if (target->recv_cq)
342 ib_destroy_cq(target->recv_cq);
343 if (target->send_cq)
344 ib_destroy_cq(target->send_cq);
345
346 target->qp = qp;
347 target->recv_cq = recv_cq;
348 target->send_cq = send_cq;
349
350 kfree(init_attr);
351 return 0;
352
353 err_qp:
354 ib_destroy_qp(qp);
355
356 err_send_cq:
357 ib_destroy_cq(send_cq);
358
359 err_recv_cq:
360 ib_destroy_cq(recv_cq);
361
362 err:
363 kfree(init_attr);
364 return ret;
365 }
366
367 static void srp_free_target_ib(struct srp_target_port *target)
368 {
369 int i;
370
371 ib_destroy_qp(target->qp);
372 ib_destroy_cq(target->send_cq);
373 ib_destroy_cq(target->recv_cq);
374
375 target->qp = NULL;
376 target->send_cq = target->recv_cq = NULL;
377
378 for (i = 0; i < SRP_RQ_SIZE; ++i)
379 srp_free_iu(target->srp_host, target->rx_ring[i]);
380 for (i = 0; i < SRP_SQ_SIZE; ++i)
381 srp_free_iu(target->srp_host, target->tx_ring[i]);
382 }
383
384 static void srp_path_rec_completion(int status,
385 struct ib_sa_path_rec *pathrec,
386 void *target_ptr)
387 {
388 struct srp_target_port *target = target_ptr;
389
390 target->status = status;
391 if (status)
392 shost_printk(KERN_ERR, target->scsi_host,
393 PFX "Got failed path rec status %d\n", status);
394 else
395 target->path = *pathrec;
396 complete(&target->done);
397 }
398
399 static int srp_lookup_path(struct srp_target_port *target)
400 {
401 target->path.numb_path = 1;
402
403 init_completion(&target->done);
404
405 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
406 target->srp_host->srp_dev->dev,
407 target->srp_host->port,
408 &target->path,
409 IB_SA_PATH_REC_SERVICE_ID |
410 IB_SA_PATH_REC_DGID |
411 IB_SA_PATH_REC_SGID |
412 IB_SA_PATH_REC_NUMB_PATH |
413 IB_SA_PATH_REC_PKEY,
414 SRP_PATH_REC_TIMEOUT_MS,
415 GFP_KERNEL,
416 srp_path_rec_completion,
417 target, &target->path_query);
418 if (target->path_query_id < 0)
419 return target->path_query_id;
420
421 wait_for_completion(&target->done);
422
423 if (target->status < 0)
424 shost_printk(KERN_WARNING, target->scsi_host,
425 PFX "Path record query failed\n");
426
427 return target->status;
428 }
429
430 static int srp_send_req(struct srp_target_port *target)
431 {
432 struct {
433 struct ib_cm_req_param param;
434 struct srp_login_req priv;
435 } *req = NULL;
436 int status;
437
438 req = kzalloc(sizeof *req, GFP_KERNEL);
439 if (!req)
440 return -ENOMEM;
441
442 req->param.primary_path = &target->path;
443 req->param.alternate_path = NULL;
444 req->param.service_id = target->service_id;
445 req->param.qp_num = target->qp->qp_num;
446 req->param.qp_type = target->qp->qp_type;
447 req->param.private_data = &req->priv;
448 req->param.private_data_len = sizeof req->priv;
449 req->param.flow_control = 1;
450
451 get_random_bytes(&req->param.starting_psn, 4);
452 req->param.starting_psn &= 0xffffff;
453
454 /*
455 * Pick some arbitrary defaults here; we could make these
456 * module parameters if anyone cared about setting them.
457 */
458 req->param.responder_resources = 4;
459 req->param.remote_cm_response_timeout = 20;
460 req->param.local_cm_response_timeout = 20;
461 req->param.retry_count = target->tl_retry_count;
462 req->param.rnr_retry_count = 7;
463 req->param.max_cm_retries = 15;
464
465 req->priv.opcode = SRP_LOGIN_REQ;
466 req->priv.tag = 0;
467 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
468 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
469 SRP_BUF_FORMAT_INDIRECT);
470 /*
471 * In the published SRP specification (draft rev. 16a), the
472 * port identifier format is 8 bytes of ID extension followed
473 * by 8 bytes of GUID. Older drafts put the two halves in the
474 * opposite order, so that the GUID comes first.
475 *
476 * Targets conforming to these obsolete drafts can be
477 * recognized by the I/O Class they report.
478 */
479 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
480 memcpy(req->priv.initiator_port_id,
481 &target->path.sgid.global.interface_id, 8);
482 memcpy(req->priv.initiator_port_id + 8,
483 &target->initiator_ext, 8);
484 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
485 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
486 } else {
487 memcpy(req->priv.initiator_port_id,
488 &target->initiator_ext, 8);
489 memcpy(req->priv.initiator_port_id + 8,
490 &target->path.sgid.global.interface_id, 8);
491 memcpy(req->priv.target_port_id, &target->id_ext, 8);
492 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
493 }
494
495 /*
496 * Topspin/Cisco SRP targets will reject our login unless we
497 * zero out the first 8 bytes of our initiator port ID and set
498 * the second 8 bytes to the local node GUID.
499 */
500 if (srp_target_is_topspin(target)) {
501 shost_printk(KERN_DEBUG, target->scsi_host,
502 PFX "Topspin/Cisco initiator port ID workaround "
503 "activated for target GUID %016llx\n",
504 (unsigned long long) be64_to_cpu(target->ioc_guid));
505 memset(req->priv.initiator_port_id, 0, 8);
506 memcpy(req->priv.initiator_port_id + 8,
507 &target->srp_host->srp_dev->dev->node_guid, 8);
508 }
509
510 status = ib_send_cm_req(target->cm_id, &req->param);
511
512 kfree(req);
513
514 return status;
515 }
516
517 static bool srp_queue_remove_work(struct srp_target_port *target)
518 {
519 bool changed = false;
520
521 spin_lock_irq(&target->lock);
522 if (target->state != SRP_TARGET_REMOVED) {
523 target->state = SRP_TARGET_REMOVED;
524 changed = true;
525 }
526 spin_unlock_irq(&target->lock);
527
528 if (changed)
529 queue_work(system_long_wq, &target->remove_work);
530
531 return changed;
532 }
533
534 static bool srp_change_conn_state(struct srp_target_port *target,
535 bool connected)
536 {
537 bool changed = false;
538
539 spin_lock_irq(&target->lock);
540 if (target->connected != connected) {
541 target->connected = connected;
542 changed = true;
543 }
544 spin_unlock_irq(&target->lock);
545
546 return changed;
547 }
548
549 static void srp_disconnect_target(struct srp_target_port *target)
550 {
551 if (srp_change_conn_state(target, false)) {
552 /* XXX should send SRP_I_LOGOUT request */
553
554 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
555 shost_printk(KERN_DEBUG, target->scsi_host,
556 PFX "Sending CM DREQ failed\n");
557 }
558 }
559 }
560
561 static void srp_free_req_data(struct srp_target_port *target)
562 {
563 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
564 struct srp_request *req;
565 int i;
566
567 for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
568 kfree(req->fmr_list);
569 kfree(req->map_page);
570 if (req->indirect_dma_addr) {
571 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
572 target->indirect_size,
573 DMA_TO_DEVICE);
574 }
575 kfree(req->indirect_desc);
576 }
577 }
578
579 static int srp_alloc_req_data(struct srp_target_port *target)
580 {
581 struct srp_device *srp_dev = target->srp_host->srp_dev;
582 struct ib_device *ibdev = srp_dev->dev;
583 struct srp_request *req;
584 dma_addr_t dma_addr;
585 int i, ret = -ENOMEM;
586
587 INIT_LIST_HEAD(&target->free_reqs);
588
589 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
590 req = &target->req_ring[i];
591 req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
592 GFP_KERNEL);
593 req->map_page = kmalloc(SRP_FMR_SIZE * sizeof(void *),
594 GFP_KERNEL);
595 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
596 if (!req->fmr_list || !req->map_page || !req->indirect_desc)
597 goto out;
598
599 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
600 target->indirect_size,
601 DMA_TO_DEVICE);
602 if (ib_dma_mapping_error(ibdev, dma_addr))
603 goto out;
604
605 req->indirect_dma_addr = dma_addr;
606 req->index = i;
607 list_add_tail(&req->list, &target->free_reqs);
608 }
609 ret = 0;
610
611 out:
612 return ret;
613 }
614
615 /**
616 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
617 * @shost: SCSI host whose attributes to remove from sysfs.
618 *
619 * Note: Any attributes defined in the host template and that did not exist
620 * before invocation of this function will be ignored.
621 */
622 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
623 {
624 struct device_attribute **attr;
625
626 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
627 device_remove_file(&shost->shost_dev, *attr);
628 }
629
630 static void srp_remove_target(struct srp_target_port *target)
631 {
632 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
633
634 srp_del_scsi_host_attr(target->scsi_host);
635 srp_rport_get(target->rport);
636 srp_remove_host(target->scsi_host);
637 scsi_remove_host(target->scsi_host);
638 srp_disconnect_target(target);
639 ib_destroy_cm_id(target->cm_id);
640 srp_free_target_ib(target);
641 cancel_work_sync(&target->tl_err_work);
642 srp_rport_put(target->rport);
643 srp_free_req_data(target);
644 scsi_host_put(target->scsi_host);
645 }
646
647 static void srp_remove_work(struct work_struct *work)
648 {
649 struct srp_target_port *target =
650 container_of(work, struct srp_target_port, remove_work);
651
652 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
653
654 srp_remove_target(target);
655
656 spin_lock(&target->srp_host->target_lock);
657 list_del(&target->list);
658 spin_unlock(&target->srp_host->target_lock);
659 }
660
661 static void srp_rport_delete(struct srp_rport *rport)
662 {
663 struct srp_target_port *target = rport->lld_data;
664
665 srp_queue_remove_work(target);
666 }
667
668 static int srp_connect_target(struct srp_target_port *target)
669 {
670 int retries = 3;
671 int ret;
672
673 WARN_ON_ONCE(target->connected);
674
675 target->qp_in_error = false;
676
677 ret = srp_lookup_path(target);
678 if (ret)
679 return ret;
680
681 while (1) {
682 init_completion(&target->done);
683 ret = srp_send_req(target);
684 if (ret)
685 return ret;
686 wait_for_completion(&target->done);
687
688 /*
689 * The CM event handling code will set status to
690 * SRP_PORT_REDIRECT if we get a port redirect REJ
691 * back, or SRP_DLID_REDIRECT if we get a lid/qp
692 * redirect REJ back.
693 */
694 switch (target->status) {
695 case 0:
696 srp_change_conn_state(target, true);
697 return 0;
698
699 case SRP_PORT_REDIRECT:
700 ret = srp_lookup_path(target);
701 if (ret)
702 return ret;
703 break;
704
705 case SRP_DLID_REDIRECT:
706 break;
707
708 case SRP_STALE_CONN:
709 /* Our current CM id was stale, and is now in timewait.
710 * Try to reconnect with a new one.
711 */
712 if (!retries-- || srp_new_cm_id(target)) {
713 shost_printk(KERN_ERR, target->scsi_host, PFX
714 "giving up on stale connection\n");
715 target->status = -ECONNRESET;
716 return target->status;
717 }
718
719 shost_printk(KERN_ERR, target->scsi_host, PFX
720 "retrying stale connection\n");
721 break;
722
723 default:
724 return target->status;
725 }
726 }
727 }
728
729 static void srp_unmap_data(struct scsi_cmnd *scmnd,
730 struct srp_target_port *target,
731 struct srp_request *req)
732 {
733 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
734 struct ib_pool_fmr **pfmr;
735
736 if (!scsi_sglist(scmnd) ||
737 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
738 scmnd->sc_data_direction != DMA_FROM_DEVICE))
739 return;
740
741 pfmr = req->fmr_list;
742 while (req->nfmr--)
743 ib_fmr_pool_unmap(*pfmr++);
744
745 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
746 scmnd->sc_data_direction);
747 }
748
749 /**
750 * srp_claim_req - Take ownership of the scmnd associated with a request.
751 * @target: SRP target port.
752 * @req: SRP request.
753 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
754 * ownership of @req->scmnd if it equals @scmnd.
755 *
756 * Return value:
757 * Either NULL or a pointer to the SCSI command the caller became owner of.
758 */
759 static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
760 struct srp_request *req,
761 struct scsi_cmnd *scmnd)
762 {
763 unsigned long flags;
764
765 spin_lock_irqsave(&target->lock, flags);
766 if (!scmnd) {
767 scmnd = req->scmnd;
768 req->scmnd = NULL;
769 } else if (req->scmnd == scmnd) {
770 req->scmnd = NULL;
771 } else {
772 scmnd = NULL;
773 }
774 spin_unlock_irqrestore(&target->lock, flags);
775
776 return scmnd;
777 }
778
779 /**
780 * srp_free_req() - Unmap data and add request to the free request list.
781 */
782 static void srp_free_req(struct srp_target_port *target,
783 struct srp_request *req, struct scsi_cmnd *scmnd,
784 s32 req_lim_delta)
785 {
786 unsigned long flags;
787
788 srp_unmap_data(scmnd, target, req);
789
790 spin_lock_irqsave(&target->lock, flags);
791 target->req_lim += req_lim_delta;
792 list_add_tail(&req->list, &target->free_reqs);
793 spin_unlock_irqrestore(&target->lock, flags);
794 }
795
796 static void srp_finish_req(struct srp_target_port *target,
797 struct srp_request *req, int result)
798 {
799 struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
800
801 if (scmnd) {
802 srp_free_req(target, req, scmnd, 0);
803 scmnd->result = result;
804 scmnd->scsi_done(scmnd);
805 }
806 }
807
808 static void srp_terminate_io(struct srp_rport *rport)
809 {
810 struct srp_target_port *target = rport->lld_data;
811 int i;
812
813 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
814 struct srp_request *req = &target->req_ring[i];
815 srp_finish_req(target, req, DID_TRANSPORT_FAILFAST << 16);
816 }
817 }
818
819 /*
820 * It is up to the caller to ensure that srp_rport_reconnect() calls are
821 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
822 * srp_reset_device() or srp_reset_host() calls will occur while this function
823 * is in progress. One way to realize that is not to call this function
824 * directly but to call srp_reconnect_rport() instead since that last function
825 * serializes calls of this function via rport->mutex and also blocks
826 * srp_queuecommand() calls before invoking this function.
827 */
828 static int srp_rport_reconnect(struct srp_rport *rport)
829 {
830 struct srp_target_port *target = rport->lld_data;
831 int i, ret;
832
833 srp_disconnect_target(target);
834 /*
835 * Now get a new local CM ID so that we avoid confusing the target in
836 * case things are really fouled up. Doing so also ensures that all CM
837 * callbacks will have finished before a new QP is allocated.
838 */
839 ret = srp_new_cm_id(target);
840 /*
841 * Whether or not creating a new CM ID succeeded, create a new
842 * QP. This guarantees that all completion callback function
843 * invocations have finished before request resetting starts.
844 */
845 if (ret == 0)
846 ret = srp_create_target_ib(target);
847 else
848 srp_create_target_ib(target);
849
850 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
851 struct srp_request *req = &target->req_ring[i];
852 srp_finish_req(target, req, DID_RESET << 16);
853 }
854
855 INIT_LIST_HEAD(&target->free_tx);
856 for (i = 0; i < SRP_SQ_SIZE; ++i)
857 list_add(&target->tx_ring[i]->list, &target->free_tx);
858
859 if (ret == 0)
860 ret = srp_connect_target(target);
861
862 if (ret == 0)
863 shost_printk(KERN_INFO, target->scsi_host,
864 PFX "reconnect succeeded\n");
865
866 return ret;
867 }
868
869 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
870 unsigned int dma_len, u32 rkey)
871 {
872 struct srp_direct_buf *desc = state->desc;
873
874 desc->va = cpu_to_be64(dma_addr);
875 desc->key = cpu_to_be32(rkey);
876 desc->len = cpu_to_be32(dma_len);
877
878 state->total_len += dma_len;
879 state->desc++;
880 state->ndesc++;
881 }
882
883 static int srp_map_finish_fmr(struct srp_map_state *state,
884 struct srp_target_port *target)
885 {
886 struct srp_device *dev = target->srp_host->srp_dev;
887 struct ib_pool_fmr *fmr;
888 u64 io_addr = 0;
889
890 if (!state->npages)
891 return 0;
892
893 if (state->npages == 1) {
894 srp_map_desc(state, state->base_dma_addr, state->fmr_len,
895 target->rkey);
896 state->npages = state->fmr_len = 0;
897 return 0;
898 }
899
900 fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
901 state->npages, io_addr);
902 if (IS_ERR(fmr))
903 return PTR_ERR(fmr);
904
905 *state->next_fmr++ = fmr;
906 state->nfmr++;
907
908 srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
909 state->npages = state->fmr_len = 0;
910 return 0;
911 }
912
913 static void srp_map_update_start(struct srp_map_state *state,
914 struct scatterlist *sg, int sg_index,
915 dma_addr_t dma_addr)
916 {
917 state->unmapped_sg = sg;
918 state->unmapped_index = sg_index;
919 state->unmapped_addr = dma_addr;
920 }
921
922 static int srp_map_sg_entry(struct srp_map_state *state,
923 struct srp_target_port *target,
924 struct scatterlist *sg, int sg_index,
925 int use_fmr)
926 {
927 struct srp_device *dev = target->srp_host->srp_dev;
928 struct ib_device *ibdev = dev->dev;
929 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
930 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
931 unsigned int len;
932 int ret;
933
934 if (!dma_len)
935 return 0;
936
937 if (use_fmr == SRP_MAP_NO_FMR) {
938 /* Once we're in direct map mode for a request, we don't
939 * go back to FMR mode, so no need to update anything
940 * other than the descriptor.
941 */
942 srp_map_desc(state, dma_addr, dma_len, target->rkey);
943 return 0;
944 }
945
946 /* If we start at an offset into the FMR page, don't merge into
947 * the current FMR. Finish it out, and use the kernel's MR for this
948 * sg entry. This is to avoid potential bugs on some SRP targets
949 * that were never quite defined, but went away when the initiator
950 * avoided using FMR on such page fragments.
951 */
952 if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
953 ret = srp_map_finish_fmr(state, target);
954 if (ret)
955 return ret;
956
957 srp_map_desc(state, dma_addr, dma_len, target->rkey);
958 srp_map_update_start(state, NULL, 0, 0);
959 return 0;
960 }
961
962 /* If this is the first sg to go into the FMR, save our position.
963 * We need to know the first unmapped entry, its index, and the
964 * first unmapped address within that entry to be able to restart
965 * mapping after an error.
966 */
967 if (!state->unmapped_sg)
968 srp_map_update_start(state, sg, sg_index, dma_addr);
969
970 while (dma_len) {
971 if (state->npages == SRP_FMR_SIZE) {
972 ret = srp_map_finish_fmr(state, target);
973 if (ret)
974 return ret;
975
976 srp_map_update_start(state, sg, sg_index, dma_addr);
977 }
978
979 len = min_t(unsigned int, dma_len, dev->fmr_page_size);
980
981 if (!state->npages)
982 state->base_dma_addr = dma_addr;
983 state->pages[state->npages++] = dma_addr;
984 state->fmr_len += len;
985 dma_addr += len;
986 dma_len -= len;
987 }
988
989 /* If the last entry of the FMR wasn't a full page, then we need to
990 * close it out and start a new one -- we can only merge at page
991 * boundries.
992 */
993 ret = 0;
994 if (len != dev->fmr_page_size) {
995 ret = srp_map_finish_fmr(state, target);
996 if (!ret)
997 srp_map_update_start(state, NULL, 0, 0);
998 }
999 return ret;
1000 }
1001
1002 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
1003 struct srp_request *req)
1004 {
1005 struct scatterlist *scat, *sg;
1006 struct srp_cmd *cmd = req->cmd->buf;
1007 int i, len, nents, count, use_fmr;
1008 struct srp_device *dev;
1009 struct ib_device *ibdev;
1010 struct srp_map_state state;
1011 struct srp_indirect_buf *indirect_hdr;
1012 u32 table_len;
1013 u8 fmt;
1014
1015 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1016 return sizeof (struct srp_cmd);
1017
1018 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1019 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1020 shost_printk(KERN_WARNING, target->scsi_host,
1021 PFX "Unhandled data direction %d\n",
1022 scmnd->sc_data_direction);
1023 return -EINVAL;
1024 }
1025
1026 nents = scsi_sg_count(scmnd);
1027 scat = scsi_sglist(scmnd);
1028
1029 dev = target->srp_host->srp_dev;
1030 ibdev = dev->dev;
1031
1032 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1033 if (unlikely(count == 0))
1034 return -EIO;
1035
1036 fmt = SRP_DATA_DESC_DIRECT;
1037 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1038
1039 if (count == 1) {
1040 /*
1041 * The midlayer only generated a single gather/scatter
1042 * entry, or DMA mapping coalesced everything to a
1043 * single entry. So a direct descriptor along with
1044 * the DMA MR suffices.
1045 */
1046 struct srp_direct_buf *buf = (void *) cmd->add_data;
1047
1048 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1049 buf->key = cpu_to_be32(target->rkey);
1050 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1051
1052 req->nfmr = 0;
1053 goto map_complete;
1054 }
1055
1056 /* We have more than one scatter/gather entry, so build our indirect
1057 * descriptor table, trying to merge as many entries with FMR as we
1058 * can.
1059 */
1060 indirect_hdr = (void *) cmd->add_data;
1061
1062 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1063 target->indirect_size, DMA_TO_DEVICE);
1064
1065 memset(&state, 0, sizeof(state));
1066 state.desc = req->indirect_desc;
1067 state.pages = req->map_page;
1068 state.next_fmr = req->fmr_list;
1069
1070 use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
1071
1072 for_each_sg(scat, sg, count, i) {
1073 if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
1074 /* FMR mapping failed, so backtrack to the first
1075 * unmapped entry and continue on without using FMR.
1076 */
1077 dma_addr_t dma_addr;
1078 unsigned int dma_len;
1079
1080 backtrack:
1081 sg = state.unmapped_sg;
1082 i = state.unmapped_index;
1083
1084 dma_addr = ib_sg_dma_address(ibdev, sg);
1085 dma_len = ib_sg_dma_len(ibdev, sg);
1086 dma_len -= (state.unmapped_addr - dma_addr);
1087 dma_addr = state.unmapped_addr;
1088 use_fmr = SRP_MAP_NO_FMR;
1089 srp_map_desc(&state, dma_addr, dma_len, target->rkey);
1090 }
1091 }
1092
1093 if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
1094 goto backtrack;
1095
1096 /* We've mapped the request, now pull as much of the indirect
1097 * descriptor table as we can into the command buffer. If this
1098 * target is not using an external indirect table, we are
1099 * guaranteed to fit into the command, as the SCSI layer won't
1100 * give us more S/G entries than we allow.
1101 */
1102 req->nfmr = state.nfmr;
1103 if (state.ndesc == 1) {
1104 /* FMR mapping was able to collapse this to one entry,
1105 * so use a direct descriptor.
1106 */
1107 struct srp_direct_buf *buf = (void *) cmd->add_data;
1108
1109 *buf = req->indirect_desc[0];
1110 goto map_complete;
1111 }
1112
1113 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1114 !target->allow_ext_sg)) {
1115 shost_printk(KERN_ERR, target->scsi_host,
1116 "Could not fit S/G list into SRP_CMD\n");
1117 return -EIO;
1118 }
1119
1120 count = min(state.ndesc, target->cmd_sg_cnt);
1121 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1122
1123 fmt = SRP_DATA_DESC_INDIRECT;
1124 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1125 len += count * sizeof (struct srp_direct_buf);
1126
1127 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1128 count * sizeof (struct srp_direct_buf));
1129
1130 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1131 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1132 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1133 indirect_hdr->len = cpu_to_be32(state.total_len);
1134
1135 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1136 cmd->data_out_desc_cnt = count;
1137 else
1138 cmd->data_in_desc_cnt = count;
1139
1140 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1141 DMA_TO_DEVICE);
1142
1143 map_complete:
1144 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1145 cmd->buf_fmt = fmt << 4;
1146 else
1147 cmd->buf_fmt = fmt;
1148
1149 return len;
1150 }
1151
1152 /*
1153 * Return an IU and possible credit to the free pool
1154 */
1155 static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
1156 enum srp_iu_type iu_type)
1157 {
1158 unsigned long flags;
1159
1160 spin_lock_irqsave(&target->lock, flags);
1161 list_add(&iu->list, &target->free_tx);
1162 if (iu_type != SRP_IU_RSP)
1163 ++target->req_lim;
1164 spin_unlock_irqrestore(&target->lock, flags);
1165 }
1166
1167 /*
1168 * Must be called with target->lock held to protect req_lim and free_tx.
1169 * If IU is not sent, it must be returned using srp_put_tx_iu().
1170 *
1171 * Note:
1172 * An upper limit for the number of allocated information units for each
1173 * request type is:
1174 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1175 * more than Scsi_Host.can_queue requests.
1176 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1177 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1178 * one unanswered SRP request to an initiator.
1179 */
1180 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
1181 enum srp_iu_type iu_type)
1182 {
1183 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1184 struct srp_iu *iu;
1185
1186 srp_send_completion(target->send_cq, target);
1187
1188 if (list_empty(&target->free_tx))
1189 return NULL;
1190
1191 /* Initiator responses to target requests do not consume credits */
1192 if (iu_type != SRP_IU_RSP) {
1193 if (target->req_lim <= rsv) {
1194 ++target->zero_req_lim;
1195 return NULL;
1196 }
1197
1198 --target->req_lim;
1199 }
1200
1201 iu = list_first_entry(&target->free_tx, struct srp_iu, list);
1202 list_del(&iu->list);
1203 return iu;
1204 }
1205
1206 static int srp_post_send(struct srp_target_port *target,
1207 struct srp_iu *iu, int len)
1208 {
1209 struct ib_sge list;
1210 struct ib_send_wr wr, *bad_wr;
1211
1212 list.addr = iu->dma;
1213 list.length = len;
1214 list.lkey = target->lkey;
1215
1216 wr.next = NULL;
1217 wr.wr_id = (uintptr_t) iu;
1218 wr.sg_list = &list;
1219 wr.num_sge = 1;
1220 wr.opcode = IB_WR_SEND;
1221 wr.send_flags = IB_SEND_SIGNALED;
1222
1223 return ib_post_send(target->qp, &wr, &bad_wr);
1224 }
1225
1226 static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
1227 {
1228 struct ib_recv_wr wr, *bad_wr;
1229 struct ib_sge list;
1230
1231 list.addr = iu->dma;
1232 list.length = iu->size;
1233 list.lkey = target->lkey;
1234
1235 wr.next = NULL;
1236 wr.wr_id = (uintptr_t) iu;
1237 wr.sg_list = &list;
1238 wr.num_sge = 1;
1239
1240 return ib_post_recv(target->qp, &wr, &bad_wr);
1241 }
1242
1243 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1244 {
1245 struct srp_request *req;
1246 struct scsi_cmnd *scmnd;
1247 unsigned long flags;
1248
1249 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1250 spin_lock_irqsave(&target->lock, flags);
1251 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1252 spin_unlock_irqrestore(&target->lock, flags);
1253
1254 target->tsk_mgmt_status = -1;
1255 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1256 target->tsk_mgmt_status = rsp->data[3];
1257 complete(&target->tsk_mgmt_done);
1258 } else {
1259 req = &target->req_ring[rsp->tag];
1260 scmnd = srp_claim_req(target, req, NULL);
1261 if (!scmnd) {
1262 shost_printk(KERN_ERR, target->scsi_host,
1263 "Null scmnd for RSP w/tag %016llx\n",
1264 (unsigned long long) rsp->tag);
1265
1266 spin_lock_irqsave(&target->lock, flags);
1267 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1268 spin_unlock_irqrestore(&target->lock, flags);
1269
1270 return;
1271 }
1272 scmnd->result = rsp->status;
1273
1274 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1275 memcpy(scmnd->sense_buffer, rsp->data +
1276 be32_to_cpu(rsp->resp_data_len),
1277 min_t(int, be32_to_cpu(rsp->sense_data_len),
1278 SCSI_SENSE_BUFFERSIZE));
1279 }
1280
1281 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
1282 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1283 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
1284 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1285
1286 srp_free_req(target, req, scmnd,
1287 be32_to_cpu(rsp->req_lim_delta));
1288
1289 scmnd->host_scribble = NULL;
1290 scmnd->scsi_done(scmnd);
1291 }
1292 }
1293
1294 static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1295 void *rsp, int len)
1296 {
1297 struct ib_device *dev = target->srp_host->srp_dev->dev;
1298 unsigned long flags;
1299 struct srp_iu *iu;
1300 int err;
1301
1302 spin_lock_irqsave(&target->lock, flags);
1303 target->req_lim += req_delta;
1304 iu = __srp_get_tx_iu(target, SRP_IU_RSP);
1305 spin_unlock_irqrestore(&target->lock, flags);
1306
1307 if (!iu) {
1308 shost_printk(KERN_ERR, target->scsi_host, PFX
1309 "no IU available to send response\n");
1310 return 1;
1311 }
1312
1313 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1314 memcpy(iu->buf, rsp, len);
1315 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1316
1317 err = srp_post_send(target, iu, len);
1318 if (err) {
1319 shost_printk(KERN_ERR, target->scsi_host, PFX
1320 "unable to post response: %d\n", err);
1321 srp_put_tx_iu(target, iu, SRP_IU_RSP);
1322 }
1323
1324 return err;
1325 }
1326
1327 static void srp_process_cred_req(struct srp_target_port *target,
1328 struct srp_cred_req *req)
1329 {
1330 struct srp_cred_rsp rsp = {
1331 .opcode = SRP_CRED_RSP,
1332 .tag = req->tag,
1333 };
1334 s32 delta = be32_to_cpu(req->req_lim_delta);
1335
1336 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1337 shost_printk(KERN_ERR, target->scsi_host, PFX
1338 "problems processing SRP_CRED_REQ\n");
1339 }
1340
1341 static void srp_process_aer_req(struct srp_target_port *target,
1342 struct srp_aer_req *req)
1343 {
1344 struct srp_aer_rsp rsp = {
1345 .opcode = SRP_AER_RSP,
1346 .tag = req->tag,
1347 };
1348 s32 delta = be32_to_cpu(req->req_lim_delta);
1349
1350 shost_printk(KERN_ERR, target->scsi_host, PFX
1351 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1352
1353 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1354 shost_printk(KERN_ERR, target->scsi_host, PFX
1355 "problems processing SRP_AER_REQ\n");
1356 }
1357
1358 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1359 {
1360 struct ib_device *dev = target->srp_host->srp_dev->dev;
1361 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1362 int res;
1363 u8 opcode;
1364
1365 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1366 DMA_FROM_DEVICE);
1367
1368 opcode = *(u8 *) iu->buf;
1369
1370 if (0) {
1371 shost_printk(KERN_ERR, target->scsi_host,
1372 PFX "recv completion, opcode 0x%02x\n", opcode);
1373 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1374 iu->buf, wc->byte_len, true);
1375 }
1376
1377 switch (opcode) {
1378 case SRP_RSP:
1379 srp_process_rsp(target, iu->buf);
1380 break;
1381
1382 case SRP_CRED_REQ:
1383 srp_process_cred_req(target, iu->buf);
1384 break;
1385
1386 case SRP_AER_REQ:
1387 srp_process_aer_req(target, iu->buf);
1388 break;
1389
1390 case SRP_T_LOGOUT:
1391 /* XXX Handle target logout */
1392 shost_printk(KERN_WARNING, target->scsi_host,
1393 PFX "Got target logout request\n");
1394 break;
1395
1396 default:
1397 shost_printk(KERN_WARNING, target->scsi_host,
1398 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1399 break;
1400 }
1401
1402 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1403 DMA_FROM_DEVICE);
1404
1405 res = srp_post_recv(target, iu);
1406 if (res != 0)
1407 shost_printk(KERN_ERR, target->scsi_host,
1408 PFX "Recv failed with error code %d\n", res);
1409 }
1410
1411 /**
1412 * srp_tl_err_work() - handle a transport layer error
1413 *
1414 * Note: This function may get invoked before the rport has been created,
1415 * hence the target->rport test.
1416 */
1417 static void srp_tl_err_work(struct work_struct *work)
1418 {
1419 struct srp_target_port *target;
1420
1421 target = container_of(work, struct srp_target_port, tl_err_work);
1422 if (target->rport)
1423 srp_start_tl_fail_timers(target->rport);
1424 }
1425
1426 static void srp_handle_qp_err(enum ib_wc_status wc_status,
1427 enum ib_wc_opcode wc_opcode,
1428 struct srp_target_port *target)
1429 {
1430 if (target->connected && !target->qp_in_error) {
1431 shost_printk(KERN_ERR, target->scsi_host,
1432 PFX "failed %s status %d\n",
1433 wc_opcode & IB_WC_RECV ? "receive" : "send",
1434 wc_status);
1435 queue_work(system_long_wq, &target->tl_err_work);
1436 }
1437 target->qp_in_error = true;
1438 }
1439
1440 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
1441 {
1442 struct srp_target_port *target = target_ptr;
1443 struct ib_wc wc;
1444
1445 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1446 while (ib_poll_cq(cq, 1, &wc) > 0) {
1447 if (likely(wc.status == IB_WC_SUCCESS)) {
1448 srp_handle_recv(target, &wc);
1449 } else {
1450 srp_handle_qp_err(wc.status, wc.opcode, target);
1451 }
1452 }
1453 }
1454
1455 static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1456 {
1457 struct srp_target_port *target = target_ptr;
1458 struct ib_wc wc;
1459 struct srp_iu *iu;
1460
1461 while (ib_poll_cq(cq, 1, &wc) > 0) {
1462 if (likely(wc.status == IB_WC_SUCCESS)) {
1463 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1464 list_add(&iu->list, &target->free_tx);
1465 } else {
1466 srp_handle_qp_err(wc.status, wc.opcode, target);
1467 }
1468 }
1469 }
1470
1471 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1472 {
1473 struct srp_target_port *target = host_to_target(shost);
1474 struct srp_rport *rport = target->rport;
1475 struct srp_request *req;
1476 struct srp_iu *iu;
1477 struct srp_cmd *cmd;
1478 struct ib_device *dev;
1479 unsigned long flags;
1480 int len, result;
1481 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1482
1483 /*
1484 * The SCSI EH thread is the only context from which srp_queuecommand()
1485 * can get invoked for blocked devices (SDEV_BLOCK /
1486 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1487 * locking the rport mutex if invoked from inside the SCSI EH.
1488 */
1489 if (in_scsi_eh)
1490 mutex_lock(&rport->mutex);
1491
1492 result = srp_chkready(target->rport);
1493 if (unlikely(result)) {
1494 scmnd->result = result;
1495 scmnd->scsi_done(scmnd);
1496 goto unlock_rport;
1497 }
1498
1499 spin_lock_irqsave(&target->lock, flags);
1500 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1501 if (!iu)
1502 goto err_unlock;
1503
1504 req = list_first_entry(&target->free_reqs, struct srp_request, list);
1505 list_del(&req->list);
1506 spin_unlock_irqrestore(&target->lock, flags);
1507
1508 dev = target->srp_host->srp_dev->dev;
1509 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
1510 DMA_TO_DEVICE);
1511
1512 scmnd->result = 0;
1513 scmnd->host_scribble = (void *) req;
1514
1515 cmd = iu->buf;
1516 memset(cmd, 0, sizeof *cmd);
1517
1518 cmd->opcode = SRP_CMD;
1519 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
1520 cmd->tag = req->index;
1521 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1522
1523 req->scmnd = scmnd;
1524 req->cmd = iu;
1525
1526 len = srp_map_data(scmnd, target, req);
1527 if (len < 0) {
1528 shost_printk(KERN_ERR, target->scsi_host,
1529 PFX "Failed to map data\n");
1530 goto err_iu;
1531 }
1532
1533 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
1534 DMA_TO_DEVICE);
1535
1536 if (srp_post_send(target, iu, len)) {
1537 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
1538 goto err_unmap;
1539 }
1540
1541 unlock_rport:
1542 if (in_scsi_eh)
1543 mutex_unlock(&rport->mutex);
1544
1545 return 0;
1546
1547 err_unmap:
1548 srp_unmap_data(scmnd, target, req);
1549
1550 err_iu:
1551 srp_put_tx_iu(target, iu, SRP_IU_CMD);
1552
1553 spin_lock_irqsave(&target->lock, flags);
1554 list_add(&req->list, &target->free_reqs);
1555
1556 err_unlock:
1557 spin_unlock_irqrestore(&target->lock, flags);
1558
1559 if (in_scsi_eh)
1560 mutex_unlock(&rport->mutex);
1561
1562 return SCSI_MLQUEUE_HOST_BUSY;
1563 }
1564
1565 static int srp_alloc_iu_bufs(struct srp_target_port *target)
1566 {
1567 int i;
1568
1569 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1570 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1571 target->max_ti_iu_len,
1572 GFP_KERNEL, DMA_FROM_DEVICE);
1573 if (!target->rx_ring[i])
1574 goto err;
1575 }
1576
1577 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1578 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1579 target->max_iu_len,
1580 GFP_KERNEL, DMA_TO_DEVICE);
1581 if (!target->tx_ring[i])
1582 goto err;
1583
1584 list_add(&target->tx_ring[i]->list, &target->free_tx);
1585 }
1586
1587 return 0;
1588
1589 err:
1590 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1591 srp_free_iu(target->srp_host, target->rx_ring[i]);
1592 target->rx_ring[i] = NULL;
1593 }
1594
1595 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1596 srp_free_iu(target->srp_host, target->tx_ring[i]);
1597 target->tx_ring[i] = NULL;
1598 }
1599
1600 return -ENOMEM;
1601 }
1602
1603 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
1604 {
1605 uint64_t T_tr_ns, max_compl_time_ms;
1606 uint32_t rq_tmo_jiffies;
1607
1608 /*
1609 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
1610 * table 91), both the QP timeout and the retry count have to be set
1611 * for RC QP's during the RTR to RTS transition.
1612 */
1613 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
1614 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
1615
1616 /*
1617 * Set target->rq_tmo_jiffies to one second more than the largest time
1618 * it can take before an error completion is generated. See also
1619 * C9-140..142 in the IBTA spec for more information about how to
1620 * convert the QP Local ACK Timeout value to nanoseconds.
1621 */
1622 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
1623 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
1624 do_div(max_compl_time_ms, NSEC_PER_MSEC);
1625 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
1626
1627 return rq_tmo_jiffies;
1628 }
1629
1630 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1631 struct srp_login_rsp *lrsp,
1632 struct srp_target_port *target)
1633 {
1634 struct ib_qp_attr *qp_attr = NULL;
1635 int attr_mask = 0;
1636 int ret;
1637 int i;
1638
1639 if (lrsp->opcode == SRP_LOGIN_RSP) {
1640 target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
1641 target->req_lim = be32_to_cpu(lrsp->req_lim_delta);
1642
1643 /*
1644 * Reserve credits for task management so we don't
1645 * bounce requests back to the SCSI mid-layer.
1646 */
1647 target->scsi_host->can_queue
1648 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1649 target->scsi_host->can_queue);
1650 } else {
1651 shost_printk(KERN_WARNING, target->scsi_host,
1652 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
1653 ret = -ECONNRESET;
1654 goto error;
1655 }
1656
1657 if (!target->rx_ring[0]) {
1658 ret = srp_alloc_iu_bufs(target);
1659 if (ret)
1660 goto error;
1661 }
1662
1663 ret = -ENOMEM;
1664 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1665 if (!qp_attr)
1666 goto error;
1667
1668 qp_attr->qp_state = IB_QPS_RTR;
1669 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1670 if (ret)
1671 goto error_free;
1672
1673 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1674 if (ret)
1675 goto error_free;
1676
1677 for (i = 0; i < SRP_RQ_SIZE; i++) {
1678 struct srp_iu *iu = target->rx_ring[i];
1679 ret = srp_post_recv(target, iu);
1680 if (ret)
1681 goto error_free;
1682 }
1683
1684 qp_attr->qp_state = IB_QPS_RTS;
1685 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1686 if (ret)
1687 goto error_free;
1688
1689 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
1690
1691 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1692 if (ret)
1693 goto error_free;
1694
1695 ret = ib_send_cm_rtu(cm_id, NULL, 0);
1696
1697 error_free:
1698 kfree(qp_attr);
1699
1700 error:
1701 target->status = ret;
1702 }
1703
1704 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1705 struct ib_cm_event *event,
1706 struct srp_target_port *target)
1707 {
1708 struct Scsi_Host *shost = target->scsi_host;
1709 struct ib_class_port_info *cpi;
1710 int opcode;
1711
1712 switch (event->param.rej_rcvd.reason) {
1713 case IB_CM_REJ_PORT_CM_REDIRECT:
1714 cpi = event->param.rej_rcvd.ari;
1715 target->path.dlid = cpi->redirect_lid;
1716 target->path.pkey = cpi->redirect_pkey;
1717 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1718 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1719
1720 target->status = target->path.dlid ?
1721 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1722 break;
1723
1724 case IB_CM_REJ_PORT_REDIRECT:
1725 if (srp_target_is_topspin(target)) {
1726 /*
1727 * Topspin/Cisco SRP gateways incorrectly send
1728 * reject reason code 25 when they mean 24
1729 * (port redirect).
1730 */
1731 memcpy(target->path.dgid.raw,
1732 event->param.rej_rcvd.ari, 16);
1733
1734 shost_printk(KERN_DEBUG, shost,
1735 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1736 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1737 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
1738
1739 target->status = SRP_PORT_REDIRECT;
1740 } else {
1741 shost_printk(KERN_WARNING, shost,
1742 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1743 target->status = -ECONNRESET;
1744 }
1745 break;
1746
1747 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
1748 shost_printk(KERN_WARNING, shost,
1749 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1750 target->status = -ECONNRESET;
1751 break;
1752
1753 case IB_CM_REJ_CONSUMER_DEFINED:
1754 opcode = *(u8 *) event->private_data;
1755 if (opcode == SRP_LOGIN_REJ) {
1756 struct srp_login_rej *rej = event->private_data;
1757 u32 reason = be32_to_cpu(rej->reason);
1758
1759 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
1760 shost_printk(KERN_WARNING, shost,
1761 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1762 else
1763 shost_printk(KERN_WARNING, shost,
1764 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
1765 } else
1766 shost_printk(KERN_WARNING, shost,
1767 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1768 " opcode 0x%02x\n", opcode);
1769 target->status = -ECONNRESET;
1770 break;
1771
1772 case IB_CM_REJ_STALE_CONN:
1773 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
1774 target->status = SRP_STALE_CONN;
1775 break;
1776
1777 default:
1778 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
1779 event->param.rej_rcvd.reason);
1780 target->status = -ECONNRESET;
1781 }
1782 }
1783
1784 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1785 {
1786 struct srp_target_port *target = cm_id->context;
1787 int comp = 0;
1788
1789 switch (event->event) {
1790 case IB_CM_REQ_ERROR:
1791 shost_printk(KERN_DEBUG, target->scsi_host,
1792 PFX "Sending CM REQ failed\n");
1793 comp = 1;
1794 target->status = -ECONNRESET;
1795 break;
1796
1797 case IB_CM_REP_RECEIVED:
1798 comp = 1;
1799 srp_cm_rep_handler(cm_id, event->private_data, target);
1800 break;
1801
1802 case IB_CM_REJ_RECEIVED:
1803 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
1804 comp = 1;
1805
1806 srp_cm_rej_handler(cm_id, event, target);
1807 break;
1808
1809 case IB_CM_DREQ_RECEIVED:
1810 shost_printk(KERN_WARNING, target->scsi_host,
1811 PFX "DREQ received - connection closed\n");
1812 srp_change_conn_state(target, false);
1813 if (ib_send_cm_drep(cm_id, NULL, 0))
1814 shost_printk(KERN_ERR, target->scsi_host,
1815 PFX "Sending CM DREP failed\n");
1816 queue_work(system_long_wq, &target->tl_err_work);
1817 break;
1818
1819 case IB_CM_TIMEWAIT_EXIT:
1820 shost_printk(KERN_ERR, target->scsi_host,
1821 PFX "connection closed\n");
1822
1823 target->status = 0;
1824 break;
1825
1826 case IB_CM_MRA_RECEIVED:
1827 case IB_CM_DREQ_ERROR:
1828 case IB_CM_DREP_RECEIVED:
1829 break;
1830
1831 default:
1832 shost_printk(KERN_WARNING, target->scsi_host,
1833 PFX "Unhandled CM event %d\n", event->event);
1834 break;
1835 }
1836
1837 if (comp)
1838 complete(&target->done);
1839
1840 return 0;
1841 }
1842
1843 static int srp_send_tsk_mgmt(struct srp_target_port *target,
1844 u64 req_tag, unsigned int lun, u8 func)
1845 {
1846 struct srp_rport *rport = target->rport;
1847 struct ib_device *dev = target->srp_host->srp_dev->dev;
1848 struct srp_iu *iu;
1849 struct srp_tsk_mgmt *tsk_mgmt;
1850
1851 if (!target->connected || target->qp_in_error)
1852 return -1;
1853
1854 init_completion(&target->tsk_mgmt_done);
1855
1856 /*
1857 * Lock the rport mutex to avoid that srp_create_target_ib() is
1858 * invoked while a task management function is being sent.
1859 */
1860 mutex_lock(&rport->mutex);
1861 spin_lock_irq(&target->lock);
1862 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
1863 spin_unlock_irq(&target->lock);
1864
1865 if (!iu) {
1866 mutex_unlock(&rport->mutex);
1867
1868 return -1;
1869 }
1870
1871 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1872 DMA_TO_DEVICE);
1873 tsk_mgmt = iu->buf;
1874 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1875
1876 tsk_mgmt->opcode = SRP_TSK_MGMT;
1877 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
1878 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
1879 tsk_mgmt->tsk_mgmt_func = func;
1880 tsk_mgmt->task_tag = req_tag;
1881
1882 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1883 DMA_TO_DEVICE);
1884 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
1885 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
1886 mutex_unlock(&rport->mutex);
1887
1888 return -1;
1889 }
1890 mutex_unlock(&rport->mutex);
1891
1892 if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
1893 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1894 return -1;
1895
1896 return 0;
1897 }
1898
1899 static int srp_abort(struct scsi_cmnd *scmnd)
1900 {
1901 struct srp_target_port *target = host_to_target(scmnd->device->host);
1902 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
1903 int ret;
1904
1905 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
1906
1907 if (!req || !srp_claim_req(target, req, scmnd))
1908 return FAILED;
1909 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
1910 SRP_TSK_ABORT_TASK) == 0)
1911 ret = SUCCESS;
1912 else if (target->rport->state == SRP_RPORT_LOST)
1913 ret = FAST_IO_FAIL;
1914 else
1915 ret = FAILED;
1916 srp_free_req(target, req, scmnd, 0);
1917 scmnd->result = DID_ABORT << 16;
1918 scmnd->scsi_done(scmnd);
1919
1920 return ret;
1921 }
1922
1923 static int srp_reset_device(struct scsi_cmnd *scmnd)
1924 {
1925 struct srp_target_port *target = host_to_target(scmnd->device->host);
1926 int i;
1927
1928 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
1929
1930 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
1931 SRP_TSK_LUN_RESET))
1932 return FAILED;
1933 if (target->tsk_mgmt_status)
1934 return FAILED;
1935
1936 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1937 struct srp_request *req = &target->req_ring[i];
1938 if (req->scmnd && req->scmnd->device == scmnd->device)
1939 srp_finish_req(target, req, DID_RESET << 16);
1940 }
1941
1942 return SUCCESS;
1943 }
1944
1945 static int srp_reset_host(struct scsi_cmnd *scmnd)
1946 {
1947 struct srp_target_port *target = host_to_target(scmnd->device->host);
1948
1949 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
1950
1951 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
1952 }
1953
1954 static int srp_slave_configure(struct scsi_device *sdev)
1955 {
1956 struct Scsi_Host *shost = sdev->host;
1957 struct srp_target_port *target = host_to_target(shost);
1958 struct request_queue *q = sdev->request_queue;
1959 unsigned long timeout;
1960
1961 if (sdev->type == TYPE_DISK) {
1962 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
1963 blk_queue_rq_timeout(q, timeout);
1964 }
1965
1966 return 0;
1967 }
1968
1969 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
1970 char *buf)
1971 {
1972 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1973
1974 return sprintf(buf, "0x%016llx\n",
1975 (unsigned long long) be64_to_cpu(target->id_ext));
1976 }
1977
1978 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
1979 char *buf)
1980 {
1981 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1982
1983 return sprintf(buf, "0x%016llx\n",
1984 (unsigned long long) be64_to_cpu(target->ioc_guid));
1985 }
1986
1987 static ssize_t show_service_id(struct device *dev,
1988 struct device_attribute *attr, char *buf)
1989 {
1990 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1991
1992 return sprintf(buf, "0x%016llx\n",
1993 (unsigned long long) be64_to_cpu(target->service_id));
1994 }
1995
1996 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
1997 char *buf)
1998 {
1999 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2000
2001 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
2002 }
2003
2004 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2005 char *buf)
2006 {
2007 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2008
2009 return sprintf(buf, "%pI6\n", target->path.sgid.raw);
2010 }
2011
2012 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2013 char *buf)
2014 {
2015 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2016
2017 return sprintf(buf, "%pI6\n", target->path.dgid.raw);
2018 }
2019
2020 static ssize_t show_orig_dgid(struct device *dev,
2021 struct device_attribute *attr, char *buf)
2022 {
2023 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2024
2025 return sprintf(buf, "%pI6\n", target->orig_dgid);
2026 }
2027
2028 static ssize_t show_req_lim(struct device *dev,
2029 struct device_attribute *attr, char *buf)
2030 {
2031 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2032
2033 return sprintf(buf, "%d\n", target->req_lim);
2034 }
2035
2036 static ssize_t show_zero_req_lim(struct device *dev,
2037 struct device_attribute *attr, char *buf)
2038 {
2039 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2040
2041 return sprintf(buf, "%d\n", target->zero_req_lim);
2042 }
2043
2044 static ssize_t show_local_ib_port(struct device *dev,
2045 struct device_attribute *attr, char *buf)
2046 {
2047 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2048
2049 return sprintf(buf, "%d\n", target->srp_host->port);
2050 }
2051
2052 static ssize_t show_local_ib_device(struct device *dev,
2053 struct device_attribute *attr, char *buf)
2054 {
2055 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2056
2057 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2058 }
2059
2060 static ssize_t show_comp_vector(struct device *dev,
2061 struct device_attribute *attr, char *buf)
2062 {
2063 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2064
2065 return sprintf(buf, "%d\n", target->comp_vector);
2066 }
2067
2068 static ssize_t show_tl_retry_count(struct device *dev,
2069 struct device_attribute *attr, char *buf)
2070 {
2071 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2072
2073 return sprintf(buf, "%d\n", target->tl_retry_count);
2074 }
2075
2076 static ssize_t show_cmd_sg_entries(struct device *dev,
2077 struct device_attribute *attr, char *buf)
2078 {
2079 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2080
2081 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2082 }
2083
2084 static ssize_t show_allow_ext_sg(struct device *dev,
2085 struct device_attribute *attr, char *buf)
2086 {
2087 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2088
2089 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2090 }
2091
2092 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2093 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2094 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2095 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2096 static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
2097 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2098 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
2099 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2100 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2101 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2102 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2103 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2104 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2105 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
2106 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
2107
2108 static struct device_attribute *srp_host_attrs[] = {
2109 &dev_attr_id_ext,
2110 &dev_attr_ioc_guid,
2111 &dev_attr_service_id,
2112 &dev_attr_pkey,
2113 &dev_attr_sgid,
2114 &dev_attr_dgid,
2115 &dev_attr_orig_dgid,
2116 &dev_attr_req_lim,
2117 &dev_attr_zero_req_lim,
2118 &dev_attr_local_ib_port,
2119 &dev_attr_local_ib_device,
2120 &dev_attr_comp_vector,
2121 &dev_attr_tl_retry_count,
2122 &dev_attr_cmd_sg_entries,
2123 &dev_attr_allow_ext_sg,
2124 NULL
2125 };
2126
2127 static struct scsi_host_template srp_template = {
2128 .module = THIS_MODULE,
2129 .name = "InfiniBand SRP initiator",
2130 .proc_name = DRV_NAME,
2131 .slave_configure = srp_slave_configure,
2132 .info = srp_target_info,
2133 .queuecommand = srp_queuecommand,
2134 .eh_abort_handler = srp_abort,
2135 .eh_device_reset_handler = srp_reset_device,
2136 .eh_host_reset_handler = srp_reset_host,
2137 .skip_settle_delay = true,
2138 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
2139 .can_queue = SRP_CMD_SQ_SIZE,
2140 .this_id = -1,
2141 .cmd_per_lun = SRP_CMD_SQ_SIZE,
2142 .use_clustering = ENABLE_CLUSTERING,
2143 .shost_attrs = srp_host_attrs
2144 };
2145
2146 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2147 {
2148 struct srp_rport_identifiers ids;
2149 struct srp_rport *rport;
2150
2151 sprintf(target->target_name, "SRP.T10:%016llX",
2152 (unsigned long long) be64_to_cpu(target->id_ext));
2153
2154 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2155 return -ENODEV;
2156
2157 memcpy(ids.port_id, &target->id_ext, 8);
2158 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2159 ids.roles = SRP_RPORT_ROLE_TARGET;
2160 rport = srp_rport_add(target->scsi_host, &ids);
2161 if (IS_ERR(rport)) {
2162 scsi_remove_host(target->scsi_host);
2163 return PTR_ERR(rport);
2164 }
2165
2166 rport->lld_data = target;
2167 target->rport = rport;
2168
2169 spin_lock(&host->target_lock);
2170 list_add_tail(&target->list, &host->target_list);
2171 spin_unlock(&host->target_lock);
2172
2173 target->state = SRP_TARGET_LIVE;
2174
2175 scsi_scan_target(&target->scsi_host->shost_gendev,
2176 0, target->scsi_id, SCAN_WILD_CARD, 0);
2177
2178 return 0;
2179 }
2180
2181 static void srp_release_dev(struct device *dev)
2182 {
2183 struct srp_host *host =
2184 container_of(dev, struct srp_host, dev);
2185
2186 complete(&host->released);
2187 }
2188
2189 static struct class srp_class = {
2190 .name = "infiniband_srp",
2191 .dev_release = srp_release_dev
2192 };
2193
2194 /**
2195 * srp_conn_unique() - check whether the connection to a target is unique
2196 */
2197 static bool srp_conn_unique(struct srp_host *host,
2198 struct srp_target_port *target)
2199 {
2200 struct srp_target_port *t;
2201 bool ret = false;
2202
2203 if (target->state == SRP_TARGET_REMOVED)
2204 goto out;
2205
2206 ret = true;
2207
2208 spin_lock(&host->target_lock);
2209 list_for_each_entry(t, &host->target_list, list) {
2210 if (t != target &&
2211 target->id_ext == t->id_ext &&
2212 target->ioc_guid == t->ioc_guid &&
2213 target->initiator_ext == t->initiator_ext) {
2214 ret = false;
2215 break;
2216 }
2217 }
2218 spin_unlock(&host->target_lock);
2219
2220 out:
2221 return ret;
2222 }
2223
2224 /*
2225 * Target ports are added by writing
2226 *
2227 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2228 * pkey=<P_Key>,service_id=<service ID>
2229 *
2230 * to the add_target sysfs attribute.
2231 */
2232 enum {
2233 SRP_OPT_ERR = 0,
2234 SRP_OPT_ID_EXT = 1 << 0,
2235 SRP_OPT_IOC_GUID = 1 << 1,
2236 SRP_OPT_DGID = 1 << 2,
2237 SRP_OPT_PKEY = 1 << 3,
2238 SRP_OPT_SERVICE_ID = 1 << 4,
2239 SRP_OPT_MAX_SECT = 1 << 5,
2240 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2241 SRP_OPT_IO_CLASS = 1 << 7,
2242 SRP_OPT_INITIATOR_EXT = 1 << 8,
2243 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
2244 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2245 SRP_OPT_SG_TABLESIZE = 1 << 11,
2246 SRP_OPT_COMP_VECTOR = 1 << 12,
2247 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
2248 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2249 SRP_OPT_IOC_GUID |
2250 SRP_OPT_DGID |
2251 SRP_OPT_PKEY |
2252 SRP_OPT_SERVICE_ID),
2253 };
2254
2255 static const match_table_t srp_opt_tokens = {
2256 { SRP_OPT_ID_EXT, "id_ext=%s" },
2257 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2258 { SRP_OPT_DGID, "dgid=%s" },
2259 { SRP_OPT_PKEY, "pkey=%x" },
2260 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2261 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2262 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
2263 { SRP_OPT_IO_CLASS, "io_class=%x" },
2264 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
2265 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
2266 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2267 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
2268 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
2269 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
2270 { SRP_OPT_ERR, NULL }
2271 };
2272
2273 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2274 {
2275 char *options, *sep_opt;
2276 char *p;
2277 char dgid[3];
2278 substring_t args[MAX_OPT_ARGS];
2279 int opt_mask = 0;
2280 int token;
2281 int ret = -EINVAL;
2282 int i;
2283
2284 options = kstrdup(buf, GFP_KERNEL);
2285 if (!options)
2286 return -ENOMEM;
2287
2288 sep_opt = options;
2289 while ((p = strsep(&sep_opt, ",")) != NULL) {
2290 if (!*p)
2291 continue;
2292
2293 token = match_token(p, srp_opt_tokens, args);
2294 opt_mask |= token;
2295
2296 switch (token) {
2297 case SRP_OPT_ID_EXT:
2298 p = match_strdup(args);
2299 if (!p) {
2300 ret = -ENOMEM;
2301 goto out;
2302 }
2303 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2304 kfree(p);
2305 break;
2306
2307 case SRP_OPT_IOC_GUID:
2308 p = match_strdup(args);
2309 if (!p) {
2310 ret = -ENOMEM;
2311 goto out;
2312 }
2313 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2314 kfree(p);
2315 break;
2316
2317 case SRP_OPT_DGID:
2318 p = match_strdup(args);
2319 if (!p) {
2320 ret = -ENOMEM;
2321 goto out;
2322 }
2323 if (strlen(p) != 32) {
2324 pr_warn("bad dest GID parameter '%s'\n", p);
2325 kfree(p);
2326 goto out;
2327 }
2328
2329 for (i = 0; i < 16; ++i) {
2330 strlcpy(dgid, p + i * 2, 3);
2331 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
2332 }
2333 kfree(p);
2334 memcpy(target->orig_dgid, target->path.dgid.raw, 16);
2335 break;
2336
2337 case SRP_OPT_PKEY:
2338 if (match_hex(args, &token)) {
2339 pr_warn("bad P_Key parameter '%s'\n", p);
2340 goto out;
2341 }
2342 target->path.pkey = cpu_to_be16(token);
2343 break;
2344
2345 case SRP_OPT_SERVICE_ID:
2346 p = match_strdup(args);
2347 if (!p) {
2348 ret = -ENOMEM;
2349 goto out;
2350 }
2351 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2352 target->path.service_id = target->service_id;
2353 kfree(p);
2354 break;
2355
2356 case SRP_OPT_MAX_SECT:
2357 if (match_int(args, &token)) {
2358 pr_warn("bad max sect parameter '%s'\n", p);
2359 goto out;
2360 }
2361 target->scsi_host->max_sectors = token;
2362 break;
2363
2364 case SRP_OPT_MAX_CMD_PER_LUN:
2365 if (match_int(args, &token)) {
2366 pr_warn("bad max cmd_per_lun parameter '%s'\n",
2367 p);
2368 goto out;
2369 }
2370 target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
2371 break;
2372
2373 case SRP_OPT_IO_CLASS:
2374 if (match_hex(args, &token)) {
2375 pr_warn("bad IO class parameter '%s'\n", p);
2376 goto out;
2377 }
2378 if (token != SRP_REV10_IB_IO_CLASS &&
2379 token != SRP_REV16A_IB_IO_CLASS) {
2380 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2381 token, SRP_REV10_IB_IO_CLASS,
2382 SRP_REV16A_IB_IO_CLASS);
2383 goto out;
2384 }
2385 target->io_class = token;
2386 break;
2387
2388 case SRP_OPT_INITIATOR_EXT:
2389 p = match_strdup(args);
2390 if (!p) {
2391 ret = -ENOMEM;
2392 goto out;
2393 }
2394 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2395 kfree(p);
2396 break;
2397
2398 case SRP_OPT_CMD_SG_ENTRIES:
2399 if (match_int(args, &token) || token < 1 || token > 255) {
2400 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
2401 p);
2402 goto out;
2403 }
2404 target->cmd_sg_cnt = token;
2405 break;
2406
2407 case SRP_OPT_ALLOW_EXT_SG:
2408 if (match_int(args, &token)) {
2409 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
2410 goto out;
2411 }
2412 target->allow_ext_sg = !!token;
2413 break;
2414
2415 case SRP_OPT_SG_TABLESIZE:
2416 if (match_int(args, &token) || token < 1 ||
2417 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
2418 pr_warn("bad max sg_tablesize parameter '%s'\n",
2419 p);
2420 goto out;
2421 }
2422 target->sg_tablesize = token;
2423 break;
2424
2425 case SRP_OPT_COMP_VECTOR:
2426 if (match_int(args, &token) || token < 0) {
2427 pr_warn("bad comp_vector parameter '%s'\n", p);
2428 goto out;
2429 }
2430 target->comp_vector = token;
2431 break;
2432
2433 case SRP_OPT_TL_RETRY_COUNT:
2434 if (match_int(args, &token) || token < 2 || token > 7) {
2435 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
2436 p);
2437 goto out;
2438 }
2439 target->tl_retry_count = token;
2440 break;
2441
2442 default:
2443 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
2444 p);
2445 goto out;
2446 }
2447 }
2448
2449 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2450 ret = 0;
2451 else
2452 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2453 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2454 !(srp_opt_tokens[i].token & opt_mask))
2455 pr_warn("target creation request is missing parameter '%s'\n",
2456 srp_opt_tokens[i].pattern);
2457
2458 out:
2459 kfree(options);
2460 return ret;
2461 }
2462
2463 static ssize_t srp_create_target(struct device *dev,
2464 struct device_attribute *attr,
2465 const char *buf, size_t count)
2466 {
2467 struct srp_host *host =
2468 container_of(dev, struct srp_host, dev);
2469 struct Scsi_Host *target_host;
2470 struct srp_target_port *target;
2471 struct ib_device *ibdev = host->srp_dev->dev;
2472 int ret;
2473
2474 target_host = scsi_host_alloc(&srp_template,
2475 sizeof (struct srp_target_port));
2476 if (!target_host)
2477 return -ENOMEM;
2478
2479 target_host->transportt = ib_srp_transport_template;
2480 target_host->max_channel = 0;
2481 target_host->max_id = 1;
2482 target_host->max_lun = SRP_MAX_LUN;
2483 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
2484
2485 target = host_to_target(target_host);
2486
2487 target->io_class = SRP_REV16A_IB_IO_CLASS;
2488 target->scsi_host = target_host;
2489 target->srp_host = host;
2490 target->lkey = host->srp_dev->mr->lkey;
2491 target->rkey = host->srp_dev->mr->rkey;
2492 target->cmd_sg_cnt = cmd_sg_entries;
2493 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
2494 target->allow_ext_sg = allow_ext_sg;
2495 target->tl_retry_count = 7;
2496
2497 ret = srp_parse_options(buf, target);
2498 if (ret)
2499 goto err;
2500
2501 if (!srp_conn_unique(target->srp_host, target)) {
2502 shost_printk(KERN_INFO, target->scsi_host,
2503 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
2504 be64_to_cpu(target->id_ext),
2505 be64_to_cpu(target->ioc_guid),
2506 be64_to_cpu(target->initiator_ext));
2507 ret = -EEXIST;
2508 goto err;
2509 }
2510
2511 if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
2512 target->cmd_sg_cnt < target->sg_tablesize) {
2513 pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
2514 target->sg_tablesize = target->cmd_sg_cnt;
2515 }
2516
2517 target_host->sg_tablesize = target->sg_tablesize;
2518 target->indirect_size = target->sg_tablesize *
2519 sizeof (struct srp_direct_buf);
2520 target->max_iu_len = sizeof (struct srp_cmd) +
2521 sizeof (struct srp_indirect_buf) +
2522 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
2523
2524 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
2525 INIT_WORK(&target->remove_work, srp_remove_work);
2526 spin_lock_init(&target->lock);
2527 INIT_LIST_HEAD(&target->free_tx);
2528 ret = srp_alloc_req_data(target);
2529 if (ret)
2530 goto err_free_mem;
2531
2532 ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
2533
2534 shost_printk(KERN_DEBUG, target->scsi_host, PFX
2535 "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
2536 "service_id %016llx dgid %pI6\n",
2537 (unsigned long long) be64_to_cpu(target->id_ext),
2538 (unsigned long long) be64_to_cpu(target->ioc_guid),
2539 be16_to_cpu(target->path.pkey),
2540 (unsigned long long) be64_to_cpu(target->service_id),
2541 target->path.dgid.raw);
2542
2543 ret = srp_create_target_ib(target);
2544 if (ret)
2545 goto err_free_mem;
2546
2547 ret = srp_new_cm_id(target);
2548 if (ret)
2549 goto err_free_ib;
2550
2551 ret = srp_connect_target(target);
2552 if (ret) {
2553 shost_printk(KERN_ERR, target->scsi_host,
2554 PFX "Connection failed\n");
2555 goto err_cm_id;
2556 }
2557
2558 ret = srp_add_target(host, target);
2559 if (ret)
2560 goto err_disconnect;
2561
2562 return count;
2563
2564 err_disconnect:
2565 srp_disconnect_target(target);
2566
2567 err_cm_id:
2568 ib_destroy_cm_id(target->cm_id);
2569
2570 err_free_ib:
2571 srp_free_target_ib(target);
2572
2573 err_free_mem:
2574 srp_free_req_data(target);
2575
2576 err:
2577 scsi_host_put(target_host);
2578
2579 return ret;
2580 }
2581
2582 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
2583
2584 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
2585 char *buf)
2586 {
2587 struct srp_host *host = container_of(dev, struct srp_host, dev);
2588
2589 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
2590 }
2591
2592 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
2593
2594 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
2595 char *buf)
2596 {
2597 struct srp_host *host = container_of(dev, struct srp_host, dev);
2598
2599 return sprintf(buf, "%d\n", host->port);
2600 }
2601
2602 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
2603
2604 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
2605 {
2606 struct srp_host *host;
2607
2608 host = kzalloc(sizeof *host, GFP_KERNEL);
2609 if (!host)
2610 return NULL;
2611
2612 INIT_LIST_HEAD(&host->target_list);
2613 spin_lock_init(&host->target_lock);
2614 init_completion(&host->released);
2615 host->srp_dev = device;
2616 host->port = port;
2617
2618 host->dev.class = &srp_class;
2619 host->dev.parent = device->dev->dma_device;
2620 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
2621
2622 if (device_register(&host->dev))
2623 goto free_host;
2624 if (device_create_file(&host->dev, &dev_attr_add_target))
2625 goto err_class;
2626 if (device_create_file(&host->dev, &dev_attr_ibdev))
2627 goto err_class;
2628 if (device_create_file(&host->dev, &dev_attr_port))
2629 goto err_class;
2630
2631 return host;
2632
2633 err_class:
2634 device_unregister(&host->dev);
2635
2636 free_host:
2637 kfree(host);
2638
2639 return NULL;
2640 }
2641
2642 static void srp_add_one(struct ib_device *device)
2643 {
2644 struct srp_device *srp_dev;
2645 struct ib_device_attr *dev_attr;
2646 struct ib_fmr_pool_param fmr_param;
2647 struct srp_host *host;
2648 int max_pages_per_fmr, fmr_page_shift, s, e, p;
2649
2650 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2651 if (!dev_attr)
2652 return;
2653
2654 if (ib_query_device(device, dev_attr)) {
2655 pr_warn("Query device failed for %s\n", device->name);
2656 goto free_attr;
2657 }
2658
2659 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
2660 if (!srp_dev)
2661 goto free_attr;
2662
2663 /*
2664 * Use the smallest page size supported by the HCA, down to a
2665 * minimum of 4096 bytes. We're unlikely to build large sglists
2666 * out of smaller entries.
2667 */
2668 fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
2669 srp_dev->fmr_page_size = 1 << fmr_page_shift;
2670 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
2671 srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE;
2672
2673 INIT_LIST_HEAD(&srp_dev->dev_list);
2674
2675 srp_dev->dev = device;
2676 srp_dev->pd = ib_alloc_pd(device);
2677 if (IS_ERR(srp_dev->pd))
2678 goto free_dev;
2679
2680 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
2681 IB_ACCESS_LOCAL_WRITE |
2682 IB_ACCESS_REMOTE_READ |
2683 IB_ACCESS_REMOTE_WRITE);
2684 if (IS_ERR(srp_dev->mr))
2685 goto err_pd;
2686
2687 for (max_pages_per_fmr = SRP_FMR_SIZE;
2688 max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
2689 max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
2690 memset(&fmr_param, 0, sizeof fmr_param);
2691 fmr_param.pool_size = SRP_FMR_POOL_SIZE;
2692 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
2693 fmr_param.cache = 1;
2694 fmr_param.max_pages_per_fmr = max_pages_per_fmr;
2695 fmr_param.page_shift = fmr_page_shift;
2696 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
2697 IB_ACCESS_REMOTE_WRITE |
2698 IB_ACCESS_REMOTE_READ);
2699
2700 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
2701 if (!IS_ERR(srp_dev->fmr_pool))
2702 break;
2703 }
2704
2705 if (IS_ERR(srp_dev->fmr_pool))
2706 srp_dev->fmr_pool = NULL;
2707
2708 if (device->node_type == RDMA_NODE_IB_SWITCH) {
2709 s = 0;
2710 e = 0;
2711 } else {
2712 s = 1;
2713 e = device->phys_port_cnt;
2714 }
2715
2716 for (p = s; p <= e; ++p) {
2717 host = srp_add_port(srp_dev, p);
2718 if (host)
2719 list_add_tail(&host->list, &srp_dev->dev_list);
2720 }
2721
2722 ib_set_client_data(device, &srp_client, srp_dev);
2723
2724 goto free_attr;
2725
2726 err_pd:
2727 ib_dealloc_pd(srp_dev->pd);
2728
2729 free_dev:
2730 kfree(srp_dev);
2731
2732 free_attr:
2733 kfree(dev_attr);
2734 }
2735
2736 static void srp_remove_one(struct ib_device *device)
2737 {
2738 struct srp_device *srp_dev;
2739 struct srp_host *host, *tmp_host;
2740 struct srp_target_port *target;
2741
2742 srp_dev = ib_get_client_data(device, &srp_client);
2743 if (!srp_dev)
2744 return;
2745
2746 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
2747 device_unregister(&host->dev);
2748 /*
2749 * Wait for the sysfs entry to go away, so that no new
2750 * target ports can be created.
2751 */
2752 wait_for_completion(&host->released);
2753
2754 /*
2755 * Remove all target ports.
2756 */
2757 spin_lock(&host->target_lock);
2758 list_for_each_entry(target, &host->target_list, list)
2759 srp_queue_remove_work(target);
2760 spin_unlock(&host->target_lock);
2761
2762 /*
2763 * Wait for target port removal tasks.
2764 */
2765 flush_workqueue(system_long_wq);
2766
2767 kfree(host);
2768 }
2769
2770 if (srp_dev->fmr_pool)
2771 ib_destroy_fmr_pool(srp_dev->fmr_pool);
2772 ib_dereg_mr(srp_dev->mr);
2773 ib_dealloc_pd(srp_dev->pd);
2774
2775 kfree(srp_dev);
2776 }
2777
2778 static struct srp_function_template ib_srp_transport_functions = {
2779 .has_rport_state = true,
2780 .reset_timer_if_blocked = true,
2781 .reconnect_delay = &srp_reconnect_delay,
2782 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
2783 .dev_loss_tmo = &srp_dev_loss_tmo,
2784 .reconnect = srp_rport_reconnect,
2785 .rport_delete = srp_rport_delete,
2786 .terminate_rport_io = srp_terminate_io,
2787 };
2788
2789 static int __init srp_init_module(void)
2790 {
2791 int ret;
2792
2793 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
2794
2795 if (srp_sg_tablesize) {
2796 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
2797 if (!cmd_sg_entries)
2798 cmd_sg_entries = srp_sg_tablesize;
2799 }
2800
2801 if (!cmd_sg_entries)
2802 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
2803
2804 if (cmd_sg_entries > 255) {
2805 pr_warn("Clamping cmd_sg_entries to 255\n");
2806 cmd_sg_entries = 255;
2807 }
2808
2809 if (!indirect_sg_entries)
2810 indirect_sg_entries = cmd_sg_entries;
2811 else if (indirect_sg_entries < cmd_sg_entries) {
2812 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
2813 cmd_sg_entries);
2814 indirect_sg_entries = cmd_sg_entries;
2815 }
2816
2817 ib_srp_transport_template =
2818 srp_attach_transport(&ib_srp_transport_functions);
2819 if (!ib_srp_transport_template)
2820 return -ENOMEM;
2821
2822 ret = class_register(&srp_class);
2823 if (ret) {
2824 pr_err("couldn't register class infiniband_srp\n");
2825 srp_release_transport(ib_srp_transport_template);
2826 return ret;
2827 }
2828
2829 ib_sa_register_client(&srp_sa_client);
2830
2831 ret = ib_register_client(&srp_client);
2832 if (ret) {
2833 pr_err("couldn't register IB client\n");
2834 srp_release_transport(ib_srp_transport_template);
2835 ib_sa_unregister_client(&srp_sa_client);
2836 class_unregister(&srp_class);
2837 return ret;
2838 }
2839
2840 return 0;
2841 }
2842
2843 static void __exit srp_cleanup_module(void)
2844 {
2845 ib_unregister_client(&srp_client);
2846 ib_sa_unregister_client(&srp_sa_client);
2847 class_unregister(&srp_class);
2848 srp_release_transport(ib_srp_transport_template);
2849 }
2850
2851 module_init(srp_init_module);
2852 module_exit(srp_cleanup_module);
This page took 0.110116 seconds and 5 git commands to generate.