IB: Modify ib_create_mr API
[deliverable/linux.git] / drivers / infiniband / ulp / isert / ib_isert.c
CommitLineData
b8d26b3b
NB
1/*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
3 *
4c76251e 4 * (c) Copyright 2013 Datera, Inc.
b8d26b3b
NB
5 *
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
18
19#include <linux/string.h>
20#include <linux/module.h>
21#include <linux/scatterlist.h>
22#include <linux/socket.h>
23#include <linux/in.h>
24#include <linux/in6.h>
25#include <rdma/ib_verbs.h>
26#include <rdma/rdma_cm.h>
27#include <target/target_core_base.h>
28#include <target/target_core_fabric.h>
29#include <target/iscsi/iscsi_transport.h>
531b7bf4 30#include <linux/semaphore.h>
b8d26b3b
NB
31
32#include "isert_proto.h"
33#include "ib_isert.h"
34
35#define ISERT_MAX_CONN 8
36#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
bdf20e72
SG
38#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
39 ISERT_MAX_CONN)
b8d26b3b 40
45678b6b 41static int isert_debug_level;
24f412dd
SG
42module_param_named(debug_level, isert_debug_level, int, 0644);
43MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
44
b8d26b3b
NB
45static DEFINE_MUTEX(device_list_mutex);
46static LIST_HEAD(device_list);
b8d26b3b 47static struct workqueue_struct *isert_comp_wq;
b02efbfc 48static struct workqueue_struct *isert_release_wq;
b8d26b3b 49
d40945d8
VP
50static void
51isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
52static int
53isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
54 struct isert_rdma_wr *wr);
59464ef4 55static void
a3a5a826 56isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
59464ef4 57static int
a3a5a826
SG
58isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
59 struct isert_rdma_wr *wr);
f93f3a70
SG
60static int
61isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
2371e5da
SG
62static int
63isert_rdma_post_recvl(struct isert_conn *isert_conn);
64static int
65isert_rdma_accept(struct isert_conn *isert_conn);
ca6c1d82 66struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
d40945d8 67
2f1b6b7d
SG
68static void isert_release_work(struct work_struct *work);
69
302cc7c3
SG
70static inline bool
71isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
72{
23a548ee 73 return (conn->pi_support &&
302cc7c3
SG
74 cmd->prot_op != TARGET_PROT_NORMAL);
75}
76
77
b8d26b3b
NB
78static void
79isert_qp_event_callback(struct ib_event *e, void *context)
80{
6700425e 81 struct isert_conn *isert_conn = context;
b8d26b3b 82
ea8a1616
SG
83 isert_err("%s (%d): conn %p\n",
84 ib_event_msg(e->event), e->event, isert_conn);
85
b8d26b3b
NB
86 switch (e->event) {
87 case IB_EVENT_COMM_EST:
dac6ab30 88 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST);
b8d26b3b
NB
89 break;
90 case IB_EVENT_QP_LAST_WQE_REACHED:
4c22e07f 91 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
b8d26b3b
NB
92 break;
93 default:
94 break;
95 }
96}
97
98static int
99isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
100{
101 int ret;
102
103 ret = ib_query_device(ib_dev, devattr);
104 if (ret) {
24f412dd 105 isert_err("ib_query_device() failed: %d\n", ret);
b8d26b3b
NB
106 return ret;
107 }
24f412dd
SG
108 isert_dbg("devattr->max_sge: %d\n", devattr->max_sge);
109 isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
b8d26b3b
NB
110
111 return 0;
112}
113
40fc069a
SG
114static struct isert_comp *
115isert_comp_get(struct isert_conn *isert_conn)
b8d26b3b 116{
dac6ab30 117 struct isert_device *device = isert_conn->device;
4a295bae 118 struct isert_comp *comp;
40fc069a 119 int i, min = 0;
b8d26b3b 120
b8d26b3b 121 mutex_lock(&device_list_mutex);
4a295bae
SG
122 for (i = 0; i < device->comps_used; i++)
123 if (device->comps[i].active_qps <
124 device->comps[min].active_qps)
125 min = i;
126 comp = &device->comps[min];
127 comp->active_qps++;
40fc069a
SG
128 mutex_unlock(&device_list_mutex);
129
24f412dd 130 isert_info("conn %p, using comp %p min_index: %d\n",
4a295bae 131 isert_conn, comp, min);
40fc069a
SG
132
133 return comp;
134}
135
136static void
137isert_comp_put(struct isert_comp *comp)
138{
139 mutex_lock(&device_list_mutex);
140 comp->active_qps--;
b8d26b3b 141 mutex_unlock(&device_list_mutex);
40fc069a
SG
142}
143
144static struct ib_qp *
145isert_create_qp(struct isert_conn *isert_conn,
146 struct isert_comp *comp,
147 struct rdma_cm_id *cma_id)
148{
dac6ab30 149 struct isert_device *device = isert_conn->device;
40fc069a
SG
150 struct ib_qp_init_attr attr;
151 int ret;
b8d26b3b
NB
152
153 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
154 attr.event_handler = isert_qp_event_callback;
155 attr.qp_context = isert_conn;
6f0fae3d
SG
156 attr.send_cq = comp->cq;
157 attr.recv_cq = comp->cq;
b8d26b3b 158 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
bdf20e72 159 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
b8d26b3b
NB
160 /*
161 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
f57915cf
OG
162 * work-around for RDMA_READs with ConnectX-2.
163 *
164 * Also, still make sure to have at least two SGEs for
165 * outgoing control PDU responses.
b8d26b3b 166 */
f57915cf 167 attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
b8d26b3b
NB
168 isert_conn->max_sge = attr.cap.max_send_sge;
169
170 attr.cap.max_recv_sge = 1;
171 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
172 attr.qp_type = IB_QPT_RC;
570db170 173 if (device->pi_capable)
d3e125da 174 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
b8d26b3b 175
67cb3949 176 ret = rdma_create_qp(cma_id, device->pd, &attr);
b8d26b3b 177 if (ret) {
24f412dd 178 isert_err("rdma_create_qp failed for cma_id %d\n", ret);
40fc069a
SG
179 return ERR_PTR(ret);
180 }
181
182 return cma_id->qp;
183}
184
185static int
186isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
187{
188 struct isert_comp *comp;
189 int ret;
190
191 comp = isert_comp_get(isert_conn);
dac6ab30
SG
192 isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id);
193 if (IS_ERR(isert_conn->qp)) {
194 ret = PTR_ERR(isert_conn->qp);
19e2090f 195 goto err;
b8d26b3b 196 }
b8d26b3b
NB
197
198 return 0;
19e2090f 199err:
40fc069a 200 isert_comp_put(comp);
19e2090f 201 return ret;
b8d26b3b
NB
202}
203
204static void
205isert_cq_event_callback(struct ib_event *e, void *context)
206{
4c22e07f 207 isert_dbg("event: %d\n", e->event);
b8d26b3b
NB
208}
209
210static int
211isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
212{
dac6ab30 213 struct isert_device *device = isert_conn->device;
67cb3949 214 struct ib_device *ib_dev = device->ib_device;
b8d26b3b
NB
215 struct iser_rx_desc *rx_desc;
216 struct ib_sge *rx_sg;
217 u64 dma_addr;
218 int i, j;
219
dac6ab30 220 isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
b8d26b3b 221 sizeof(struct iser_rx_desc), GFP_KERNEL);
dac6ab30 222 if (!isert_conn->rx_descs)
b8d26b3b
NB
223 goto fail;
224
dac6ab30 225 rx_desc = isert_conn->rx_descs;
b8d26b3b
NB
226
227 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
228 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
229 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
230 if (ib_dma_mapping_error(ib_dev, dma_addr))
231 goto dma_map_fail;
232
233 rx_desc->dma_addr = dma_addr;
234
235 rx_sg = &rx_desc->rx_sg;
236 rx_sg->addr = rx_desc->dma_addr;
237 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
67cb3949 238 rx_sg->lkey = device->mr->lkey;
b8d26b3b
NB
239 }
240
dac6ab30 241 isert_conn->rx_desc_head = 0;
4c22e07f 242
b8d26b3b
NB
243 return 0;
244
245dma_map_fail:
dac6ab30 246 rx_desc = isert_conn->rx_descs;
b8d26b3b
NB
247 for (j = 0; j < i; j++, rx_desc++) {
248 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
249 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
250 }
dac6ab30
SG
251 kfree(isert_conn->rx_descs);
252 isert_conn->rx_descs = NULL;
b8d26b3b 253fail:
4c22e07f
SG
254 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
255
b8d26b3b
NB
256 return -ENOMEM;
257}
258
259static void
260isert_free_rx_descriptors(struct isert_conn *isert_conn)
261{
dac6ab30 262 struct ib_device *ib_dev = isert_conn->device->ib_device;
b8d26b3b
NB
263 struct iser_rx_desc *rx_desc;
264 int i;
265
dac6ab30 266 if (!isert_conn->rx_descs)
b8d26b3b
NB
267 return;
268
dac6ab30 269 rx_desc = isert_conn->rx_descs;
b8d26b3b
NB
270 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
271 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
272 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
273 }
274
dac6ab30
SG
275 kfree(isert_conn->rx_descs);
276 isert_conn->rx_descs = NULL;
b8d26b3b
NB
277}
278
6f0fae3d
SG
279static void isert_cq_work(struct work_struct *);
280static void isert_cq_callback(struct ib_cq *, void *);
b8d26b3b 281
172369c5
SG
282static void
283isert_free_comps(struct isert_device *device)
b8d26b3b 284{
172369c5 285 int i;
59464ef4 286
172369c5
SG
287 for (i = 0; i < device->comps_used; i++) {
288 struct isert_comp *comp = &device->comps[i];
b1a5ad00 289
172369c5
SG
290 if (comp->cq) {
291 cancel_work_sync(&comp->work);
292 ib_destroy_cq(comp->cq);
293 }
59464ef4 294 }
172369c5
SG
295 kfree(device->comps);
296}
d40945d8 297
172369c5
SG
298static int
299isert_alloc_comps(struct isert_device *device,
300 struct ib_device_attr *attr)
301{
302 int i, max_cqe, ret = 0;
d3e125da 303
4a295bae 304 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
172369c5
SG
305 device->ib_device->num_comp_vectors));
306
24f412dd 307 isert_info("Using %d CQs, %s supports %d vectors support "
4a295bae
SG
308 "Fast registration %d pi_capable %d\n",
309 device->comps_used, device->ib_device->name,
310 device->ib_device->num_comp_vectors, device->use_fastreg,
311 device->pi_capable);
312
313 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
314 GFP_KERNEL);
315 if (!device->comps) {
24f412dd 316 isert_err("Unable to allocate completion contexts\n");
b8d26b3b
NB
317 return -ENOMEM;
318 }
4a295bae 319
172369c5
SG
320 max_cqe = min(ISER_MAX_CQ_LEN, attr->max_cqe);
321
4a295bae 322 for (i = 0; i < device->comps_used; i++) {
8e37210b 323 struct ib_cq_init_attr cq_attr = {};
4a295bae
SG
324 struct isert_comp *comp = &device->comps[i];
325
326 comp->device = device;
6f0fae3d 327 INIT_WORK(&comp->work, isert_cq_work);
8e37210b
MB
328 cq_attr.cqe = max_cqe;
329 cq_attr.comp_vector = i;
6f0fae3d
SG
330 comp->cq = ib_create_cq(device->ib_device,
331 isert_cq_callback,
332 isert_cq_event_callback,
333 (void *)comp,
8e37210b 334 &cq_attr);
6f0fae3d 335 if (IS_ERR(comp->cq)) {
172369c5 336 isert_err("Unable to allocate cq\n");
6f0fae3d
SG
337 ret = PTR_ERR(comp->cq);
338 comp->cq = NULL;
b8d26b3b 339 goto out_cq;
94a71110 340 }
b8d26b3b 341
6f0fae3d 342 ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
94a71110 343 if (ret)
b8d26b3b
NB
344 goto out_cq;
345 }
346
172369c5
SG
347 return 0;
348out_cq:
349 isert_free_comps(device);
350 return ret;
351}
352
353static int
354isert_create_device_ib_res(struct isert_device *device)
355{
172369c5 356 struct ib_device_attr *dev_attr;
fd8205e8 357 int ret;
172369c5
SG
358
359 dev_attr = &device->dev_attr;
fd8205e8 360 ret = isert_query_device(device->ib_device, dev_attr);
172369c5
SG
361 if (ret)
362 return ret;
363
364 /* asign function handlers */
365 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
366 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
367 device->use_fastreg = 1;
368 device->reg_rdma_mem = isert_reg_rdma;
369 device->unreg_rdma_mem = isert_unreg_rdma;
370 } else {
371 device->use_fastreg = 0;
372 device->reg_rdma_mem = isert_map_rdma;
373 device->unreg_rdma_mem = isert_unmap_cmd;
374 }
375
376 ret = isert_alloc_comps(device, dev_attr);
377 if (ret)
378 return ret;
379
67cb3949
SG
380 device->pd = ib_alloc_pd(device->ib_device);
381 if (IS_ERR(device->pd)) {
382 ret = PTR_ERR(device->pd);
383 isert_err("failed to allocate pd, device %p, ret=%d\n",
384 device, ret);
385 goto out_cq;
386 }
387
388 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE);
389 if (IS_ERR(device->mr)) {
390 ret = PTR_ERR(device->mr);
391 isert_err("failed to create dma mr, device %p, ret=%d\n",
392 device, ret);
393 goto out_mr;
394 }
395
172369c5
SG
396 /* Check signature cap */
397 device->pi_capable = dev_attr->device_cap_flags &
398 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
67cb3949 399
b8d26b3b
NB
400 return 0;
401
67cb3949
SG
402out_mr:
403 ib_dealloc_pd(device->pd);
b8d26b3b 404out_cq:
172369c5 405 isert_free_comps(device);
b8d26b3b
NB
406 return ret;
407}
408
409static void
410isert_free_device_ib_res(struct isert_device *device)
411{
24f412dd 412 isert_info("device %p\n", device);
b8d26b3b 413
67cb3949
SG
414 ib_dereg_mr(device->mr);
415 ib_dealloc_pd(device->pd);
172369c5 416 isert_free_comps(device);
b8d26b3b
NB
417}
418
419static void
cf8ae958 420isert_device_put(struct isert_device *device)
b8d26b3b
NB
421{
422 mutex_lock(&device_list_mutex);
423 device->refcount--;
4c22e07f 424 isert_info("device %p refcount %d\n", device, device->refcount);
b8d26b3b
NB
425 if (!device->refcount) {
426 isert_free_device_ib_res(device);
427 list_del(&device->dev_node);
428 kfree(device);
429 }
430 mutex_unlock(&device_list_mutex);
431}
432
433static struct isert_device *
cf8ae958 434isert_device_get(struct rdma_cm_id *cma_id)
b8d26b3b
NB
435{
436 struct isert_device *device;
437 int ret;
438
439 mutex_lock(&device_list_mutex);
440 list_for_each_entry(device, &device_list, dev_node) {
441 if (device->ib_device->node_guid == cma_id->device->node_guid) {
442 device->refcount++;
4c22e07f
SG
443 isert_info("Found iser device %p refcount %d\n",
444 device, device->refcount);
b8d26b3b
NB
445 mutex_unlock(&device_list_mutex);
446 return device;
447 }
448 }
449
450 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
451 if (!device) {
452 mutex_unlock(&device_list_mutex);
453 return ERR_PTR(-ENOMEM);
454 }
455
456 INIT_LIST_HEAD(&device->dev_node);
457
458 device->ib_device = cma_id->device;
459 ret = isert_create_device_ib_res(device);
460 if (ret) {
461 kfree(device);
462 mutex_unlock(&device_list_mutex);
463 return ERR_PTR(ret);
464 }
465
466 device->refcount++;
467 list_add_tail(&device->dev_node, &device_list);
4c22e07f
SG
468 isert_info("Created a new iser device %p refcount %d\n",
469 device, device->refcount);
b8d26b3b
NB
470 mutex_unlock(&device_list_mutex);
471
472 return device;
473}
474
59464ef4 475static void
a3a5a826 476isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
59464ef4
VP
477{
478 struct fast_reg_descriptor *fr_desc, *tmp;
479 int i = 0;
480
dac6ab30 481 if (list_empty(&isert_conn->fr_pool))
59464ef4
VP
482 return;
483
4c22e07f 484 isert_info("Freeing conn %p fastreg pool", isert_conn);
59464ef4
VP
485
486 list_for_each_entry_safe(fr_desc, tmp,
dac6ab30 487 &isert_conn->fr_pool, list) {
59464ef4
VP
488 list_del(&fr_desc->list);
489 ib_free_fast_reg_page_list(fr_desc->data_frpl);
490 ib_dereg_mr(fr_desc->data_mr);
d3e125da
SG
491 if (fr_desc->pi_ctx) {
492 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
493 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
8b91ffc1 494 ib_dereg_mr(fr_desc->pi_ctx->sig_mr);
d3e125da
SG
495 kfree(fr_desc->pi_ctx);
496 }
59464ef4
VP
497 kfree(fr_desc);
498 ++i;
499 }
500
dac6ab30 501 if (i < isert_conn->fr_pool_size)
24f412dd 502 isert_warn("Pool still has %d regions registered\n",
dac6ab30 503 isert_conn->fr_pool_size - i);
59464ef4
VP
504}
505
570db170
SG
506static int
507isert_create_pi_ctx(struct fast_reg_descriptor *desc,
508 struct ib_device *device,
509 struct ib_pd *pd)
510{
570db170
SG
511 struct pi_context *pi_ctx;
512 int ret;
513
514 pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
515 if (!pi_ctx) {
24f412dd 516 isert_err("Failed to allocate pi context\n");
570db170
SG
517 return -ENOMEM;
518 }
519
520 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
521 ISCSI_ISER_SG_TABLESIZE);
522 if (IS_ERR(pi_ctx->prot_frpl)) {
24f412dd 523 isert_err("Failed to allocate prot frpl err=%ld\n",
570db170
SG
524 PTR_ERR(pi_ctx->prot_frpl));
525 ret = PTR_ERR(pi_ctx->prot_frpl);
526 goto err_pi_ctx;
527 }
528
529 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
530 if (IS_ERR(pi_ctx->prot_mr)) {
24f412dd 531 isert_err("Failed to allocate prot frmr err=%ld\n",
570db170
SG
532 PTR_ERR(pi_ctx->prot_mr));
533 ret = PTR_ERR(pi_ctx->prot_mr);
534 goto err_prot_frpl;
535 }
536 desc->ind |= ISERT_PROT_KEY_VALID;
537
9bee178b 538 pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
570db170 539 if (IS_ERR(pi_ctx->sig_mr)) {
24f412dd 540 isert_err("Failed to allocate signature enabled mr err=%ld\n",
570db170
SG
541 PTR_ERR(pi_ctx->sig_mr));
542 ret = PTR_ERR(pi_ctx->sig_mr);
543 goto err_prot_mr;
544 }
545
546 desc->pi_ctx = pi_ctx;
547 desc->ind |= ISERT_SIG_KEY_VALID;
548 desc->ind &= ~ISERT_PROTECTED;
549
550 return 0;
551
552err_prot_mr:
b2feda4f 553 ib_dereg_mr(pi_ctx->prot_mr);
570db170 554err_prot_frpl:
b2feda4f 555 ib_free_fast_reg_page_list(pi_ctx->prot_frpl);
570db170 556err_pi_ctx:
b2feda4f 557 kfree(pi_ctx);
570db170
SG
558
559 return ret;
560}
561
dc87a90f
SG
562static int
563isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
570db170 564 struct fast_reg_descriptor *fr_desc)
dc87a90f 565{
d3e125da
SG
566 int ret;
567
dc87a90f
SG
568 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
569 ISCSI_ISER_SG_TABLESIZE);
570 if (IS_ERR(fr_desc->data_frpl)) {
24f412dd 571 isert_err("Failed to allocate data frpl err=%ld\n",
4c22e07f 572 PTR_ERR(fr_desc->data_frpl));
dc87a90f
SG
573 return PTR_ERR(fr_desc->data_frpl);
574 }
575
576 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
577 if (IS_ERR(fr_desc->data_mr)) {
24f412dd 578 isert_err("Failed to allocate data frmr err=%ld\n",
4c22e07f 579 PTR_ERR(fr_desc->data_mr));
d3e125da
SG
580 ret = PTR_ERR(fr_desc->data_mr);
581 goto err_data_frpl;
dc87a90f 582 }
d3e125da
SG
583 fr_desc->ind |= ISERT_DATA_KEY_VALID;
584
24f412dd 585 isert_dbg("Created fr_desc %p\n", fr_desc);
dc87a90f
SG
586
587 return 0;
570db170 588
d3e125da
SG
589err_data_frpl:
590 ib_free_fast_reg_page_list(fr_desc->data_frpl);
591
592 return ret;
59464ef4
VP
593}
594
595static int
570db170 596isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
59464ef4
VP
597{
598 struct fast_reg_descriptor *fr_desc;
dac6ab30 599 struct isert_device *device = isert_conn->device;
f46d6a8a
NB
600 struct se_session *se_sess = isert_conn->conn->sess->se_sess;
601 struct se_node_acl *se_nacl = se_sess->se_node_acl;
602 int i, ret, tag_num;
603 /*
604 * Setup the number of FRMRs based upon the number of tags
605 * available to session in iscsi_target_locate_portal().
606 */
607 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
608 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
59464ef4 609
dac6ab30 610 isert_conn->fr_pool_size = 0;
f46d6a8a 611 for (i = 0; i < tag_num; i++) {
59464ef4
VP
612 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
613 if (!fr_desc) {
24f412dd 614 isert_err("Failed to allocate fast_reg descriptor\n");
59464ef4
VP
615 ret = -ENOMEM;
616 goto err;
617 }
618
dc87a90f 619 ret = isert_create_fr_desc(device->ib_device,
67cb3949 620 device->pd, fr_desc);
dc87a90f 621 if (ret) {
24f412dd 622 isert_err("Failed to create fastreg descriptor err=%d\n",
dc87a90f 623 ret);
a80e21b3 624 kfree(fr_desc);
59464ef4
VP
625 goto err;
626 }
59464ef4 627
dac6ab30
SG
628 list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
629 isert_conn->fr_pool_size++;
59464ef4
VP
630 }
631
24f412dd 632 isert_dbg("Creating conn %p fastreg pool size=%d",
dac6ab30 633 isert_conn, isert_conn->fr_pool_size);
59464ef4
VP
634
635 return 0;
636
637err:
a3a5a826 638 isert_conn_free_fastreg_pool(isert_conn);
59464ef4
VP
639 return ret;
640}
641
ae9ea9ed
SG
642static void
643isert_init_conn(struct isert_conn *isert_conn)
b8d26b3b 644{
b8d26b3b 645 isert_conn->state = ISER_CONN_INIT;
dac6ab30
SG
646 INIT_LIST_HEAD(&isert_conn->accept_node);
647 init_completion(&isert_conn->login_comp);
2371e5da 648 init_completion(&isert_conn->login_req_comp);
dac6ab30
SG
649 init_completion(&isert_conn->wait);
650 kref_init(&isert_conn->kref);
651 mutex_init(&isert_conn->mutex);
652 spin_lock_init(&isert_conn->pool_lock);
653 INIT_LIST_HEAD(&isert_conn->fr_pool);
2f1b6b7d 654 INIT_WORK(&isert_conn->release_work, isert_release_work);
ae9ea9ed 655}
b8d26b3b 656
ae9ea9ed
SG
657static void
658isert_free_login_buf(struct isert_conn *isert_conn)
659{
dac6ab30 660 struct ib_device *ib_dev = isert_conn->device->ib_device;
ae9ea9ed
SG
661
662 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
663 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
664 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
665 ISCSI_DEF_MAX_RECV_SEG_LEN,
666 DMA_FROM_DEVICE);
667 kfree(isert_conn->login_buf);
668}
669
670static int
671isert_alloc_login_buf(struct isert_conn *isert_conn,
672 struct ib_device *ib_dev)
673{
674 int ret;
b8d26b3b
NB
675
676 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
677 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
678 if (!isert_conn->login_buf) {
24f412dd 679 isert_err("Unable to allocate isert_conn->login_buf\n");
ae9ea9ed 680 return -ENOMEM;
b8d26b3b
NB
681 }
682
683 isert_conn->login_req_buf = isert_conn->login_buf;
684 isert_conn->login_rsp_buf = isert_conn->login_buf +
685 ISCSI_DEF_MAX_RECV_SEG_LEN;
ae9ea9ed 686
24f412dd 687 isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
b8d26b3b
NB
688 isert_conn->login_buf, isert_conn->login_req_buf,
689 isert_conn->login_rsp_buf);
690
691 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
692 (void *)isert_conn->login_req_buf,
693 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
694
695 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
696 if (ret) {
ae9ea9ed 697 isert_err("login_req_dma mapping error: %d\n", ret);
b8d26b3b
NB
698 isert_conn->login_req_dma = 0;
699 goto out_login_buf;
700 }
701
702 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
703 (void *)isert_conn->login_rsp_buf,
704 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
705
706 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
707 if (ret) {
ae9ea9ed 708 isert_err("login_rsp_dma mapping error: %d\n", ret);
b8d26b3b
NB
709 isert_conn->login_rsp_dma = 0;
710 goto out_req_dma_map;
711 }
712
ae9ea9ed
SG
713 return 0;
714
715out_req_dma_map:
716 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
717 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
718out_login_buf:
719 kfree(isert_conn->login_buf);
720 return ret;
721}
722
723static int
724isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
725{
726 struct isert_np *isert_np = cma_id->context;
727 struct iscsi_np *np = isert_np->np;
728 struct isert_conn *isert_conn;
729 struct isert_device *device;
730 int ret = 0;
731
732 spin_lock_bh(&np->np_thread_lock);
733 if (!np->enabled) {
734 spin_unlock_bh(&np->np_thread_lock);
735 isert_dbg("iscsi_np is not enabled, reject connect request\n");
736 return rdma_reject(cma_id, NULL, 0);
737 }
738 spin_unlock_bh(&np->np_thread_lock);
739
740 isert_dbg("cma_id: %p, portal: %p\n",
741 cma_id, cma_id->context);
742
743 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
744 if (!isert_conn)
745 return -ENOMEM;
746
747 isert_init_conn(isert_conn);
dac6ab30 748 isert_conn->cm_id = cma_id;
ae9ea9ed
SG
749
750 ret = isert_alloc_login_buf(isert_conn, cma_id->device);
751 if (ret)
752 goto out;
753
cf8ae958 754 device = isert_device_get(cma_id);
b8d26b3b
NB
755 if (IS_ERR(device)) {
756 ret = PTR_ERR(device);
757 goto out_rsp_dma_map;
758 }
dac6ab30 759 isert_conn->device = device;
b8d26b3b 760
1a92e17e
SG
761 /* Set max inflight RDMA READ requests */
762 isert_conn->initiator_depth = min_t(u8,
763 event->param.conn.initiator_depth,
764 device->dev_attr.max_qp_init_rd_atom);
24f412dd 765 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
1a92e17e 766
570db170 767 ret = isert_conn_setup_qp(isert_conn, cma_id);
b8d26b3b
NB
768 if (ret)
769 goto out_conn_dev;
770
2371e5da
SG
771 ret = isert_rdma_post_recvl(isert_conn);
772 if (ret)
773 goto out_conn_dev;
ce9a9fc2
NB
774 /*
775 * Obtain the second reference now before isert_rdma_accept() to
776 * ensure that any initiator generated REJECT CM event that occurs
777 * asynchronously won't drop the last reference until the error path
778 * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() ->
779 * isert_free_conn() -> isert_put_conn() -> kref_put().
780 */
781 if (!kref_get_unless_zero(&isert_conn->kref)) {
782 isert_warn("conn %p connect_release is running\n", isert_conn);
783 goto out_conn_dev;
784 }
2371e5da
SG
785
786 ret = isert_rdma_accept(isert_conn);
787 if (ret)
788 goto out_conn_dev;
789
b8d26b3b 790 mutex_lock(&isert_np->np_accept_mutex);
dac6ab30 791 list_add_tail(&isert_conn->accept_node, &isert_np->np_accept_list);
b8d26b3b
NB
792 mutex_unlock(&isert_np->np_accept_mutex);
793
24f412dd 794 isert_info("np %p: Allow accept_np to continue\n", np);
531b7bf4 795 up(&isert_np->np_sem);
b8d26b3b
NB
796 return 0;
797
798out_conn_dev:
cf8ae958 799 isert_device_put(device);
b8d26b3b 800out_rsp_dma_map:
ae9ea9ed 801 isert_free_login_buf(isert_conn);
b8d26b3b
NB
802out:
803 kfree(isert_conn);
2371e5da 804 rdma_reject(cma_id, NULL, 0);
b8d26b3b
NB
805 return ret;
806}
807
808static void
809isert_connect_release(struct isert_conn *isert_conn)
810{
dac6ab30 811 struct isert_device *device = isert_conn->device;
b8d26b3b 812
4c22e07f 813 isert_dbg("conn %p\n", isert_conn);
b8d26b3b 814
57df81e3
SG
815 BUG_ON(!device);
816
817 if (device->use_fastreg)
a3a5a826 818 isert_conn_free_fastreg_pool(isert_conn);
59464ef4 819
19e2090f 820 isert_free_rx_descriptors(isert_conn);
dac6ab30
SG
821 if (isert_conn->cm_id)
822 rdma_destroy_id(isert_conn->cm_id);
19e2090f 823
dac6ab30
SG
824 if (isert_conn->qp) {
825 struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context;
4a295bae 826
40fc069a 827 isert_comp_put(comp);
dac6ab30 828 ib_destroy_qp(isert_conn->qp);
b8d26b3b
NB
829 }
830
ae9ea9ed
SG
831 if (isert_conn->login_buf)
832 isert_free_login_buf(isert_conn);
833
57df81e3 834 isert_device_put(device);
b8d26b3b 835
57df81e3 836 kfree(isert_conn);
b8d26b3b
NB
837}
838
839static void
840isert_connected_handler(struct rdma_cm_id *cma_id)
841{
19e2090f 842 struct isert_conn *isert_conn = cma_id->qp->qp_context;
c2f88b17 843
24f412dd 844 isert_info("conn %p\n", isert_conn);
128e9cc8 845
dac6ab30 846 mutex_lock(&isert_conn->mutex);
2371e5da
SG
847 if (isert_conn->state != ISER_CONN_FULL_FEATURE)
848 isert_conn->state = ISER_CONN_UP;
dac6ab30 849 mutex_unlock(&isert_conn->mutex);
b8d26b3b
NB
850}
851
852static void
dac6ab30 853isert_release_kref(struct kref *kref)
b8d26b3b
NB
854{
855 struct isert_conn *isert_conn = container_of(kref,
dac6ab30 856 struct isert_conn, kref);
b8d26b3b 857
4c22e07f
SG
858 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
859 current->pid);
b8d26b3b
NB
860
861 isert_connect_release(isert_conn);
862}
863
864static void
865isert_put_conn(struct isert_conn *isert_conn)
866{
dac6ab30 867 kref_put(&isert_conn->kref, isert_release_kref);
b8d26b3b
NB
868}
869
954f2372
SG
870/**
871 * isert_conn_terminate() - Initiate connection termination
872 * @isert_conn: isert connection struct
873 *
874 * Notes:
128e9cc8 875 * In case the connection state is FULL_FEATURE, move state
954f2372 876 * to TEMINATING and start teardown sequence (rdma_disconnect).
128e9cc8 877 * In case the connection state is UP, complete flush as well.
954f2372 878 *
dac6ab30 879 * This routine must be called with mutex held. Thus it is
954f2372
SG
880 * safe to call multiple times.
881 */
882static void
883isert_conn_terminate(struct isert_conn *isert_conn)
884{
885 int err;
886
128e9cc8
SG
887 switch (isert_conn->state) {
888 case ISER_CONN_TERMINATING:
889 break;
890 case ISER_CONN_UP:
128e9cc8 891 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
24f412dd 892 isert_info("Terminating conn %p state %d\n",
954f2372 893 isert_conn, isert_conn->state);
128e9cc8 894 isert_conn->state = ISER_CONN_TERMINATING;
dac6ab30 895 err = rdma_disconnect(isert_conn->cm_id);
954f2372 896 if (err)
24f412dd 897 isert_warn("Failed rdma_disconnect isert_conn %p\n",
954f2372 898 isert_conn);
128e9cc8
SG
899 break;
900 default:
24f412dd 901 isert_warn("conn %p teminating in state %d\n",
128e9cc8 902 isert_conn, isert_conn->state);
954f2372
SG
903 }
904}
905
3b726ae2 906static int
ca6c1d82
SG
907isert_np_cma_handler(struct isert_np *isert_np,
908 enum rdma_cm_event_type event)
b8d26b3b 909{
ea8a1616
SG
910 isert_dbg("%s (%d): isert np %p\n",
911 rdma_event_msg(event), event, isert_np);
3b726ae2 912
ca6c1d82
SG
913 switch (event) {
914 case RDMA_CM_EVENT_DEVICE_REMOVAL:
3b726ae2 915 isert_np->np_cm_id = NULL;
ca6c1d82
SG
916 break;
917 case RDMA_CM_EVENT_ADDR_CHANGE:
918 isert_np->np_cm_id = isert_setup_id(isert_np);
919 if (IS_ERR(isert_np->np_cm_id)) {
24f412dd
SG
920 isert_err("isert np %p setup id failed: %ld\n",
921 isert_np, PTR_ERR(isert_np->np_cm_id));
ca6c1d82
SG
922 isert_np->np_cm_id = NULL;
923 }
924 break;
925 default:
24f412dd 926 isert_err("isert np %p Unexpected event %d\n",
ca6c1d82 927 isert_np, event);
3b726ae2
SG
928 }
929
ca6c1d82
SG
930 return -1;
931}
932
933static int
934isert_disconnected_handler(struct rdma_cm_id *cma_id,
935 enum rdma_cm_event_type event)
936{
937 struct isert_np *isert_np = cma_id->context;
938 struct isert_conn *isert_conn;
2f1b6b7d 939 bool terminating = false;
ca6c1d82
SG
940
941 if (isert_np->np_cm_id == cma_id)
942 return isert_np_cma_handler(cma_id->context, event);
943
19e2090f 944 isert_conn = cma_id->qp->qp_context;
b8d26b3b 945
dac6ab30 946 mutex_lock(&isert_conn->mutex);
2f1b6b7d 947 terminating = (isert_conn->state == ISER_CONN_TERMINATING);
128e9cc8 948 isert_conn_terminate(isert_conn);
dac6ab30 949 mutex_unlock(&isert_conn->mutex);
128e9cc8 950
dac6ab30
SG
951 isert_info("conn %p completing wait\n", isert_conn);
952 complete(&isert_conn->wait);
3b726ae2 953
2f1b6b7d
SG
954 if (terminating)
955 goto out;
956
957 mutex_lock(&isert_np->np_accept_mutex);
958 if (!list_empty(&isert_conn->accept_node)) {
959 list_del_init(&isert_conn->accept_node);
960 isert_put_conn(isert_conn);
961 queue_work(isert_release_wq, &isert_conn->release_work);
962 }
963 mutex_unlock(&isert_np->np_accept_mutex);
964
965out:
3b726ae2 966 return 0;
b8d26b3b
NB
967}
968
4a579da2 969static int
954f2372
SG
970isert_connect_error(struct rdma_cm_id *cma_id)
971{
19e2090f 972 struct isert_conn *isert_conn = cma_id->qp->qp_context;
954f2372 973
dac6ab30 974 isert_conn->cm_id = NULL;
954f2372 975 isert_put_conn(isert_conn);
4a579da2
SG
976
977 return -1;
954f2372
SG
978}
979
b8d26b3b
NB
980static int
981isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
982{
983 int ret = 0;
984
ea8a1616
SG
985 isert_info("%s (%d): status %d id %p np %p\n",
986 rdma_event_msg(event->event), event->event,
4c22e07f 987 event->status, cma_id, cma_id->context);
b8d26b3b
NB
988
989 switch (event->event) {
990 case RDMA_CM_EVENT_CONNECT_REQUEST:
b8d26b3b 991 ret = isert_connect_request(cma_id, event);
3b726ae2 992 if (ret)
4c22e07f 993 isert_err("failed handle connect request %d\n", ret);
b8d26b3b
NB
994 break;
995 case RDMA_CM_EVENT_ESTABLISHED:
b8d26b3b
NB
996 isert_connected_handler(cma_id);
997 break;
88c4015f
SG
998 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
999 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
1000 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
88c4015f 1001 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
ca6c1d82 1002 ret = isert_disconnected_handler(cma_id, event->event);
b8d26b3b 1003 break;
954f2372
SG
1004 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
1005 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
b8d26b3b 1006 case RDMA_CM_EVENT_CONNECT_ERROR:
4a579da2 1007 ret = isert_connect_error(cma_id);
954f2372 1008 break;
b8d26b3b 1009 default:
24f412dd 1010 isert_err("Unhandled RDMA CMA event: %d\n", event->event);
b8d26b3b
NB
1011 break;
1012 }
1013
b8d26b3b
NB
1014 return ret;
1015}
1016
1017static int
1018isert_post_recv(struct isert_conn *isert_conn, u32 count)
1019{
1020 struct ib_recv_wr *rx_wr, *rx_wr_failed;
1021 int i, ret;
dac6ab30 1022 unsigned int rx_head = isert_conn->rx_desc_head;
b8d26b3b
NB
1023 struct iser_rx_desc *rx_desc;
1024
dac6ab30
SG
1025 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
1026 rx_desc = &isert_conn->rx_descs[rx_head];
b0a191e7 1027 rx_wr->wr_id = (uintptr_t)rx_desc;
b8d26b3b
NB
1028 rx_wr->sg_list = &rx_desc->rx_sg;
1029 rx_wr->num_sge = 1;
1030 rx_wr->next = rx_wr + 1;
1031 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
1032 }
1033
1034 rx_wr--;
1035 rx_wr->next = NULL; /* mark end of work requests list */
1036
1037 isert_conn->post_recv_buf_count += count;
dac6ab30 1038 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
b8d26b3b
NB
1039 &rx_wr_failed);
1040 if (ret) {
24f412dd 1041 isert_err("ib_post_recv() failed with ret: %d\n", ret);
b8d26b3b
NB
1042 isert_conn->post_recv_buf_count -= count;
1043 } else {
11378cdb 1044 isert_dbg("Posted %d RX buffers\n", count);
dac6ab30 1045 isert_conn->rx_desc_head = rx_head;
b8d26b3b
NB
1046 }
1047 return ret;
1048}
1049
1050static int
1051isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
1052{
dac6ab30 1053 struct ib_device *ib_dev = isert_conn->cm_id->device;
b8d26b3b
NB
1054 struct ib_send_wr send_wr, *send_wr_failed;
1055 int ret;
1056
1057 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
1058 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1059
1060 send_wr.next = NULL;
b0a191e7 1061 send_wr.wr_id = (uintptr_t)tx_desc;
b8d26b3b
NB
1062 send_wr.sg_list = tx_desc->tx_sg;
1063 send_wr.num_sge = tx_desc->num_sge;
1064 send_wr.opcode = IB_WR_SEND;
1065 send_wr.send_flags = IB_SEND_SIGNALED;
1066
dac6ab30 1067 ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed);
bdf20e72 1068 if (ret)
24f412dd 1069 isert_err("ib_post_send() failed, ret: %d\n", ret);
b8d26b3b
NB
1070
1071 return ret;
1072}
1073
1074static void
1075isert_create_send_desc(struct isert_conn *isert_conn,
1076 struct isert_cmd *isert_cmd,
1077 struct iser_tx_desc *tx_desc)
1078{
dac6ab30 1079 struct isert_device *device = isert_conn->device;
67cb3949 1080 struct ib_device *ib_dev = device->ib_device;
b8d26b3b
NB
1081
1082 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
1083 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1084
1085 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
1086 tx_desc->iser_header.flags = ISER_VER;
1087
1088 tx_desc->num_sge = 1;
1089 tx_desc->isert_cmd = isert_cmd;
1090
67cb3949
SG
1091 if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
1092 tx_desc->tx_sg[0].lkey = device->mr->lkey;
24f412dd 1093 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
b8d26b3b
NB
1094 }
1095}
1096
1097static int
1098isert_init_tx_hdrs(struct isert_conn *isert_conn,
1099 struct iser_tx_desc *tx_desc)
1100{
dac6ab30 1101 struct isert_device *device = isert_conn->device;
67cb3949 1102 struct ib_device *ib_dev = device->ib_device;
b8d26b3b
NB
1103 u64 dma_addr;
1104
1105 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
1106 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1107 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
24f412dd 1108 isert_err("ib_dma_mapping_error() failed\n");
b8d26b3b
NB
1109 return -ENOMEM;
1110 }
1111
1112 tx_desc->dma_addr = dma_addr;
1113 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
1114 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
67cb3949 1115 tx_desc->tx_sg[0].lkey = device->mr->lkey;
b8d26b3b 1116
4c22e07f
SG
1117 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
1118 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
1119 tx_desc->tx_sg[0].lkey);
b8d26b3b
NB
1120
1121 return 0;
1122}
1123
1124static void
95b60f07 1125isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
68a86dee 1126 struct ib_send_wr *send_wr)
b8d26b3b 1127{
95b60f07
NB
1128 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
1129
b8d26b3b 1130 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
b0a191e7 1131 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
b8d26b3b 1132 send_wr->opcode = IB_WR_SEND;
95b60f07 1133 send_wr->sg_list = &tx_desc->tx_sg[0];
b8d26b3b 1134 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
95b60f07 1135 send_wr->send_flags = IB_SEND_SIGNALED;
b8d26b3b
NB
1136}
1137
1138static int
1139isert_rdma_post_recvl(struct isert_conn *isert_conn)
1140{
1141 struct ib_recv_wr rx_wr, *rx_wr_fail;
1142 struct ib_sge sge;
1143 int ret;
1144
1145 memset(&sge, 0, sizeof(struct ib_sge));
1146 sge.addr = isert_conn->login_req_dma;
1147 sge.length = ISER_RX_LOGIN_SIZE;
dac6ab30 1148 sge.lkey = isert_conn->device->mr->lkey;
b8d26b3b 1149
24f412dd 1150 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
b8d26b3b
NB
1151 sge.addr, sge.length, sge.lkey);
1152
1153 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
b0a191e7 1154 rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
b8d26b3b
NB
1155 rx_wr.sg_list = &sge;
1156 rx_wr.num_sge = 1;
1157
1158 isert_conn->post_recv_buf_count++;
dac6ab30 1159 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
b8d26b3b 1160 if (ret) {
24f412dd 1161 isert_err("ib_post_recv() failed: %d\n", ret);
b8d26b3b
NB
1162 isert_conn->post_recv_buf_count--;
1163 }
1164
b8d26b3b
NB
1165 return ret;
1166}
1167
1168static int
1169isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1170 u32 length)
1171{
1172 struct isert_conn *isert_conn = conn->context;
dac6ab30 1173 struct isert_device *device = isert_conn->device;
67cb3949 1174 struct ib_device *ib_dev = device->ib_device;
dac6ab30 1175 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
b8d26b3b
NB
1176 int ret;
1177
1178 isert_create_send_desc(isert_conn, NULL, tx_desc);
1179
1180 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
1181 sizeof(struct iscsi_hdr));
1182
1183 isert_init_tx_hdrs(isert_conn, tx_desc);
1184
1185 if (length > 0) {
1186 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
1187
1188 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
1189 length, DMA_TO_DEVICE);
1190
1191 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
1192
1193 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
1194 length, DMA_TO_DEVICE);
1195
1196 tx_dsg->addr = isert_conn->login_rsp_dma;
1197 tx_dsg->length = length;
dac6ab30 1198 tx_dsg->lkey = isert_conn->device->mr->lkey;
b8d26b3b
NB
1199 tx_desc->num_sge = 2;
1200 }
1201 if (!login->login_failed) {
1202 if (login->login_complete) {
e0546fc1 1203 if (!conn->sess->sess_ops->SessionType &&
dac6ab30 1204 isert_conn->device->use_fastreg) {
570db170 1205 ret = isert_conn_create_fastreg_pool(isert_conn);
f46d6a8a 1206 if (ret) {
24f412dd 1207 isert_err("Conn: %p failed to create"
f46d6a8a
NB
1208 " fastreg pool\n", isert_conn);
1209 return ret;
1210 }
1211 }
1212
b8d26b3b
NB
1213 ret = isert_alloc_rx_descriptors(isert_conn);
1214 if (ret)
1215 return ret;
1216
1217 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
1218 if (ret)
1219 return ret;
1220
128e9cc8 1221 /* Now we are in FULL_FEATURE phase */
dac6ab30 1222 mutex_lock(&isert_conn->mutex);
128e9cc8 1223 isert_conn->state = ISER_CONN_FULL_FEATURE;
dac6ab30 1224 mutex_unlock(&isert_conn->mutex);
b8d26b3b
NB
1225 goto post_send;
1226 }
1227
1228 ret = isert_rdma_post_recvl(isert_conn);
1229 if (ret)
1230 return ret;
1231 }
1232post_send:
1233 ret = isert_post_send(isert_conn, tx_desc);
1234 if (ret)
1235 return ret;
1236
1237 return 0;
1238}
1239
1240static void
2371e5da 1241isert_rx_login_req(struct isert_conn *isert_conn)
b8d26b3b 1242{
2371e5da
SG
1243 struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
1244 int rx_buflen = isert_conn->login_req_len;
b8d26b3b
NB
1245 struct iscsi_conn *conn = isert_conn->conn;
1246 struct iscsi_login *login = conn->conn_login;
1247 int size;
1248
24f412dd 1249 isert_info("conn %p\n", isert_conn);
2371e5da
SG
1250
1251 WARN_ON_ONCE(!login);
b8d26b3b
NB
1252
1253 if (login->first_request) {
1254 struct iscsi_login_req *login_req =
1255 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1256 /*
1257 * Setup the initial iscsi_login values from the leading
1258 * login request PDU.
1259 */
1260 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1261 login->current_stage =
1262 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1263 >> 2;
1264 login->version_min = login_req->min_version;
1265 login->version_max = login_req->max_version;
1266 memcpy(login->isid, login_req->isid, 6);
1267 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1268 login->init_task_tag = login_req->itt;
1269 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1270 login->cid = be16_to_cpu(login_req->cid);
1271 login->tsih = be16_to_cpu(login_req->tsih);
1272 }
1273
1274 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1275
1276 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
4c22e07f
SG
1277 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1278 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
1279 MAX_KEY_VALUE_PAIRS);
b8d26b3b
NB
1280 memcpy(login->req_buf, &rx_desc->data[0], size);
1281
6faaa85f 1282 if (login->first_request) {
dac6ab30 1283 complete(&isert_conn->login_comp);
6faaa85f
NB
1284 return;
1285 }
1286 schedule_delayed_work(&conn->login_work, 0);
b8d26b3b
NB
1287}
1288
b8d26b3b 1289static struct iscsi_cmd
676687c6 1290*isert_allocate_cmd(struct iscsi_conn *conn)
b8d26b3b 1291{
6700425e 1292 struct isert_conn *isert_conn = conn->context;
b8d26b3b 1293 struct isert_cmd *isert_cmd;
d703ce2f 1294 struct iscsi_cmd *cmd;
b8d26b3b 1295
676687c6 1296 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
d703ce2f 1297 if (!cmd) {
24f412dd 1298 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
b8d26b3b
NB
1299 return NULL;
1300 }
d703ce2f 1301 isert_cmd = iscsit_priv_cmd(cmd);
b8d26b3b 1302 isert_cmd->conn = isert_conn;
d703ce2f 1303 isert_cmd->iscsi_cmd = cmd;
b8d26b3b 1304
d703ce2f 1305 return cmd;
b8d26b3b
NB
1306}
1307
1308static int
1309isert_handle_scsi_cmd(struct isert_conn *isert_conn,
d703ce2f
NB
1310 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1311 struct iser_rx_desc *rx_desc, unsigned char *buf)
b8d26b3b 1312{
b8d26b3b
NB
1313 struct iscsi_conn *conn = isert_conn->conn;
1314 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1315 struct scatterlist *sg;
1316 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1317 bool dump_payload = false;
1318
1319 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1320 if (rc < 0)
1321 return rc;
1322
1323 imm_data = cmd->immediate_data;
1324 imm_data_len = cmd->first_burst_len;
1325 unsol_data = cmd->unsolicited_data;
1326
1327 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1328 if (rc < 0) {
1329 return 0;
1330 } else if (rc > 0) {
1331 dump_payload = true;
1332 goto sequence_cmd;
1333 }
1334
1335 if (!imm_data)
1336 return 0;
1337
1338 sg = &cmd->se_cmd.t_data_sg[0];
1339 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1340
24f412dd 1341 isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
4c22e07f 1342 sg, sg_nents, &rx_desc->data[0], imm_data_len);
b8d26b3b
NB
1343
1344 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1345
1346 cmd->write_data_done += imm_data_len;
1347
1348 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1349 spin_lock_bh(&cmd->istate_lock);
1350 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1351 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1352 spin_unlock_bh(&cmd->istate_lock);
1353 }
1354
1355sequence_cmd:
561bf158 1356 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
b8d26b3b
NB
1357
1358 if (!rc && dump_payload == false && unsol_data)
1359 iscsit_set_unsoliticed_dataout(cmd);
6cc44a6f 1360 else if (dump_payload && imm_data)
afc16604 1361 target_put_sess_cmd(&cmd->se_cmd);
b8d26b3b 1362
b8d26b3b
NB
1363 return 0;
1364}
1365
1366static int
1367isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1368 struct iser_rx_desc *rx_desc, unsigned char *buf)
1369{
1370 struct scatterlist *sg_start;
1371 struct iscsi_conn *conn = isert_conn->conn;
1372 struct iscsi_cmd *cmd = NULL;
1373 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1374 u32 unsol_data_len = ntoh24(hdr->dlength);
1375 int rc, sg_nents, sg_off, page_off;
1376
1377 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1378 if (rc < 0)
1379 return rc;
1380 else if (!cmd)
1381 return 0;
1382 /*
1383 * FIXME: Unexpected unsolicited_data out
1384 */
1385 if (!cmd->unsolicited_data) {
24f412dd 1386 isert_err("Received unexpected solicited data payload\n");
b8d26b3b
NB
1387 dump_stack();
1388 return -1;
1389 }
1390
4c22e07f
SG
1391 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1392 "write_data_done: %u, data_length: %u\n",
1393 unsol_data_len, cmd->write_data_done,
1394 cmd->se_cmd.data_length);
b8d26b3b
NB
1395
1396 sg_off = cmd->write_data_done / PAGE_SIZE;
1397 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1398 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1399 page_off = cmd->write_data_done % PAGE_SIZE;
1400 /*
1401 * FIXME: Non page-aligned unsolicited_data out
1402 */
1403 if (page_off) {
4c22e07f 1404 isert_err("unexpected non-page aligned data payload\n");
b8d26b3b
NB
1405 dump_stack();
1406 return -1;
1407 }
4c22e07f
SG
1408 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1409 "sg_nents: %u from %p %u\n", sg_start, sg_off,
1410 sg_nents, &rx_desc->data[0], unsol_data_len);
b8d26b3b
NB
1411
1412 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1413 unsol_data_len);
1414
1415 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1416 if (rc < 0)
1417 return rc;
1418
1419 return 0;
1420}
1421
778de368
NB
1422static int
1423isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
d703ce2f
NB
1424 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1425 unsigned char *buf)
778de368 1426{
778de368
NB
1427 struct iscsi_conn *conn = isert_conn->conn;
1428 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1429 int rc;
1430
1431 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1432 if (rc < 0)
1433 return rc;
1434 /*
1435 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1436 */
1437
1438 return iscsit_process_nop_out(conn, cmd, hdr);
1439}
1440
adb54c29
NB
1441static int
1442isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
d703ce2f
NB
1443 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1444 struct iscsi_text *hdr)
adb54c29 1445{
adb54c29
NB
1446 struct iscsi_conn *conn = isert_conn->conn;
1447 u32 payload_length = ntoh24(hdr->dlength);
1448 int rc;
b44a2b67 1449 unsigned char *text_in = NULL;
adb54c29
NB
1450
1451 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1452 if (rc < 0)
1453 return rc;
1454
b44a2b67
SG
1455 if (payload_length) {
1456 text_in = kzalloc(payload_length, GFP_KERNEL);
1457 if (!text_in) {
1458 isert_err("Unable to allocate text_in of payload_length: %u\n",
1459 payload_length);
1460 return -ENOMEM;
1461 }
adb54c29
NB
1462 }
1463 cmd->text_in_ptr = text_in;
1464
1465 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1466
1467 return iscsit_process_text_cmd(conn, cmd, hdr);
1468}
1469
b8d26b3b
NB
1470static int
1471isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1472 uint32_t read_stag, uint64_t read_va,
1473 uint32_t write_stag, uint64_t write_va)
1474{
1475 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1476 struct iscsi_conn *conn = isert_conn->conn;
1477 struct iscsi_cmd *cmd;
1478 struct isert_cmd *isert_cmd;
1479 int ret = -EINVAL;
1480 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1481
fb140271 1482 if (conn->sess->sess_ops->SessionType &&
ca40d24e 1483 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
24f412dd 1484 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
4c22e07f 1485 " ignoring\n", opcode);
ca40d24e
NB
1486 return 0;
1487 }
1488
b8d26b3b
NB
1489 switch (opcode) {
1490 case ISCSI_OP_SCSI_CMD:
676687c6 1491 cmd = isert_allocate_cmd(conn);
b8d26b3b
NB
1492 if (!cmd)
1493 break;
1494
d703ce2f 1495 isert_cmd = iscsit_priv_cmd(cmd);
b8d26b3b
NB
1496 isert_cmd->read_stag = read_stag;
1497 isert_cmd->read_va = read_va;
1498 isert_cmd->write_stag = write_stag;
1499 isert_cmd->write_va = write_va;
1500
d703ce2f 1501 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
b8d26b3b
NB
1502 rx_desc, (unsigned char *)hdr);
1503 break;
1504 case ISCSI_OP_NOOP_OUT:
676687c6 1505 cmd = isert_allocate_cmd(conn);
b8d26b3b
NB
1506 if (!cmd)
1507 break;
1508
d703ce2f
NB
1509 isert_cmd = iscsit_priv_cmd(cmd);
1510 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
778de368 1511 rx_desc, (unsigned char *)hdr);
b8d26b3b
NB
1512 break;
1513 case ISCSI_OP_SCSI_DATA_OUT:
1514 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1515 (unsigned char *)hdr);
1516 break;
1517 case ISCSI_OP_SCSI_TMFUNC:
676687c6 1518 cmd = isert_allocate_cmd(conn);
b8d26b3b
NB
1519 if (!cmd)
1520 break;
1521
1522 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1523 (unsigned char *)hdr);
1524 break;
1525 case ISCSI_OP_LOGOUT:
676687c6 1526 cmd = isert_allocate_cmd(conn);
b8d26b3b
NB
1527 if (!cmd)
1528 break;
1529
1530 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
b8d26b3b 1531 break;
adb54c29 1532 case ISCSI_OP_TEXT:
e4f4e801
SG
1533 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) {
1534 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1535 if (!cmd)
1536 break;
1537 } else {
1538 cmd = isert_allocate_cmd(conn);
1539 if (!cmd)
1540 break;
1541 }
adb54c29 1542
d703ce2f
NB
1543 isert_cmd = iscsit_priv_cmd(cmd);
1544 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
adb54c29
NB
1545 rx_desc, (struct iscsi_text *)hdr);
1546 break;
b8d26b3b 1547 default:
24f412dd 1548 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
b8d26b3b
NB
1549 dump_stack();
1550 break;
1551 }
1552
1553 return ret;
1554}
1555
1556static void
1557isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1558{
1559 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1560 uint64_t read_va = 0, write_va = 0;
1561 uint32_t read_stag = 0, write_stag = 0;
1562 int rc;
1563
1564 switch (iser_hdr->flags & 0xF0) {
1565 case ISCSI_CTRL:
1566 if (iser_hdr->flags & ISER_RSV) {
1567 read_stag = be32_to_cpu(iser_hdr->read_stag);
1568 read_va = be64_to_cpu(iser_hdr->read_va);
4c22e07f
SG
1569 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1570 read_stag, (unsigned long long)read_va);
b8d26b3b
NB
1571 }
1572 if (iser_hdr->flags & ISER_WSV) {
1573 write_stag = be32_to_cpu(iser_hdr->write_stag);
1574 write_va = be64_to_cpu(iser_hdr->write_va);
4c22e07f
SG
1575 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1576 write_stag, (unsigned long long)write_va);
b8d26b3b
NB
1577 }
1578
24f412dd 1579 isert_dbg("ISER ISCSI_CTRL PDU\n");
b8d26b3b
NB
1580 break;
1581 case ISER_HELLO:
24f412dd 1582 isert_err("iSER Hello message\n");
b8d26b3b
NB
1583 break;
1584 default:
24f412dd 1585 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
b8d26b3b
NB
1586 break;
1587 }
1588
1589 rc = isert_rx_opcode(isert_conn, rx_desc,
1590 read_stag, read_va, write_stag, write_va);
1591}
1592
1593static void
7748681b
SG
1594isert_rcv_completion(struct iser_rx_desc *desc,
1595 struct isert_conn *isert_conn,
1596 u32 xfer_len)
b8d26b3b 1597{
dac6ab30 1598 struct ib_device *ib_dev = isert_conn->cm_id->device;
b8d26b3b
NB
1599 struct iscsi_hdr *hdr;
1600 u64 rx_dma;
1601 int rx_buflen, outstanding;
1602
1603 if ((char *)desc == isert_conn->login_req_buf) {
1604 rx_dma = isert_conn->login_req_dma;
1605 rx_buflen = ISER_RX_LOGIN_SIZE;
4c22e07f 1606 isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
b8d26b3b
NB
1607 rx_dma, rx_buflen);
1608 } else {
1609 rx_dma = desc->dma_addr;
1610 rx_buflen = ISER_RX_PAYLOAD_SIZE;
4c22e07f 1611 isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
b8d26b3b
NB
1612 rx_dma, rx_buflen);
1613 }
1614
1615 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1616
1617 hdr = &desc->iscsi_header;
24f412dd 1618 isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
b8d26b3b
NB
1619 hdr->opcode, hdr->itt, hdr->flags,
1620 (int)(xfer_len - ISER_HEADERS_LEN));
1621
2371e5da
SG
1622 if ((char *)desc == isert_conn->login_req_buf) {
1623 isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
1624 if (isert_conn->conn) {
1625 struct iscsi_login *login = isert_conn->conn->conn_login;
1626
1627 if (login && !login->first_request)
1628 isert_rx_login_req(isert_conn);
1629 }
dac6ab30 1630 mutex_lock(&isert_conn->mutex);
2371e5da 1631 complete(&isert_conn->login_req_comp);
dac6ab30 1632 mutex_unlock(&isert_conn->mutex);
2371e5da 1633 } else {
b8d26b3b 1634 isert_rx_do_work(desc, isert_conn);
2371e5da 1635 }
b8d26b3b
NB
1636
1637 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1638 DMA_FROM_DEVICE);
1639
1640 isert_conn->post_recv_buf_count--;
4c22e07f
SG
1641 isert_dbg("Decremented post_recv_buf_count: %d\n",
1642 isert_conn->post_recv_buf_count);
b8d26b3b
NB
1643
1644 if ((char *)desc == isert_conn->login_req_buf)
1645 return;
1646
1647 outstanding = isert_conn->post_recv_buf_count;
1648 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1649 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1650 ISERT_MIN_POSTED_RX);
1651 err = isert_post_recv(isert_conn, count);
1652 if (err) {
24f412dd 1653 isert_err("isert_post_recv() count: %d failed, %d\n",
b8d26b3b
NB
1654 count, err);
1655 }
1656 }
1657}
1658
e3d7e4c3
SG
1659static int
1660isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1661 struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1662 enum iser_ib_op_code op, struct isert_data_buf *data)
1663{
dac6ab30 1664 struct ib_device *ib_dev = isert_conn->cm_id->device;
e3d7e4c3
SG
1665
1666 data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1667 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1668
1669 data->len = length - offset;
1670 data->offset = offset;
1671 data->sg_off = data->offset / PAGE_SIZE;
1672
1673 data->sg = &sg[data->sg_off];
1674 data->nents = min_t(unsigned int, nents - data->sg_off,
1675 ISCSI_ISER_SG_TABLESIZE);
1676 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1677 PAGE_SIZE);
1678
1679 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1680 data->dma_dir);
1681 if (unlikely(!data->dma_nents)) {
24f412dd 1682 isert_err("Cmd: unable to dma map SGs %p\n", sg);
e3d7e4c3
SG
1683 return -EINVAL;
1684 }
1685
24f412dd 1686 isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
4c22e07f 1687 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
e3d7e4c3
SG
1688
1689 return 0;
1690}
1691
1692static void
1693isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1694{
dac6ab30 1695 struct ib_device *ib_dev = isert_conn->cm_id->device;
e3d7e4c3
SG
1696
1697 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1698 memset(data, 0, sizeof(*data));
1699}
1700
1701
1702
b8d26b3b
NB
1703static void
1704isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1705{
1706 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
b8d26b3b 1707
4c22e07f 1708 isert_dbg("Cmd %p\n", isert_cmd);
e3d7e4c3
SG
1709
1710 if (wr->data.sg) {
4c22e07f 1711 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
e3d7e4c3 1712 isert_unmap_data_buf(isert_conn, &wr->data);
b8d26b3b
NB
1713 }
1714
90ecc6e2 1715 if (wr->send_wr) {
4c22e07f 1716 isert_dbg("Cmd %p free send_wr\n", isert_cmd);
90ecc6e2
VP
1717 kfree(wr->send_wr);
1718 wr->send_wr = NULL;
1719 }
b8d26b3b 1720
90ecc6e2 1721 if (wr->ib_sge) {
4c22e07f 1722 isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
90ecc6e2
VP
1723 kfree(wr->ib_sge);
1724 wr->ib_sge = NULL;
1725 }
b8d26b3b
NB
1726}
1727
59464ef4 1728static void
a3a5a826 1729isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
59464ef4
VP
1730{
1731 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
59464ef4 1732
4c22e07f 1733 isert_dbg("Cmd %p\n", isert_cmd);
59464ef4
VP
1734
1735 if (wr->fr_desc) {
4c22e07f 1736 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc);
9e961ae7
SG
1737 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1738 isert_unmap_data_buf(isert_conn, &wr->prot);
1739 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1740 }
dac6ab30
SG
1741 spin_lock_bh(&isert_conn->pool_lock);
1742 list_add_tail(&wr->fr_desc->list, &isert_conn->fr_pool);
1743 spin_unlock_bh(&isert_conn->pool_lock);
59464ef4
VP
1744 wr->fr_desc = NULL;
1745 }
1746
e3d7e4c3 1747 if (wr->data.sg) {
4c22e07f 1748 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
e3d7e4c3 1749 isert_unmap_data_buf(isert_conn, &wr->data);
59464ef4
VP
1750 }
1751
1752 wr->ib_sge = NULL;
1753 wr->send_wr = NULL;
1754}
1755
b8d26b3b 1756static void
03e7848a 1757isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
b8d26b3b 1758{
d703ce2f 1759 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
b8d26b3b 1760 struct isert_conn *isert_conn = isert_cmd->conn;
186a9647 1761 struct iscsi_conn *conn = isert_conn->conn;
dac6ab30 1762 struct isert_device *device = isert_conn->device;
e4f4e801 1763 struct iscsi_text_rsp *hdr;
b8d26b3b 1764
4c22e07f 1765 isert_dbg("Cmd %p\n", isert_cmd);
b8d26b3b
NB
1766
1767 switch (cmd->iscsi_opcode) {
1768 case ISCSI_OP_SCSI_CMD:
b8d26b3b
NB
1769 spin_lock_bh(&conn->cmd_lock);
1770 if (!list_empty(&cmd->i_conn_node))
5159d763 1771 list_del_init(&cmd->i_conn_node);
b8d26b3b
NB
1772 spin_unlock_bh(&conn->cmd_lock);
1773
03e7848a 1774 if (cmd->data_direction == DMA_TO_DEVICE) {
b8d26b3b 1775 iscsit_stop_dataout_timer(cmd);
03e7848a
NB
1776 /*
1777 * Check for special case during comp_err where
1778 * WRITE_PENDING has been handed off from core,
1779 * but requires an extra target_put_sess_cmd()
1780 * before transport_generic_free_cmd() below.
1781 */
1782 if (comp_err &&
1783 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1784 struct se_cmd *se_cmd = &cmd->se_cmd;
1785
afc16604 1786 target_put_sess_cmd(se_cmd);
03e7848a
NB
1787 }
1788 }
b8d26b3b 1789
d40945d8 1790 device->unreg_rdma_mem(isert_cmd, isert_conn);
186a9647
NB
1791 transport_generic_free_cmd(&cmd->se_cmd, 0);
1792 break;
b8d26b3b 1793 case ISCSI_OP_SCSI_TMFUNC:
186a9647
NB
1794 spin_lock_bh(&conn->cmd_lock);
1795 if (!list_empty(&cmd->i_conn_node))
5159d763 1796 list_del_init(&cmd->i_conn_node);
186a9647
NB
1797 spin_unlock_bh(&conn->cmd_lock);
1798
b8d26b3b
NB
1799 transport_generic_free_cmd(&cmd->se_cmd, 0);
1800 break;
1801 case ISCSI_OP_REJECT:
1802 case ISCSI_OP_NOOP_OUT:
adb54c29 1803 case ISCSI_OP_TEXT:
e4f4e801
SG
1804 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1805 /* If the continue bit is on, keep the command alive */
1806 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
1807 break;
1808
b8d26b3b
NB
1809 spin_lock_bh(&conn->cmd_lock);
1810 if (!list_empty(&cmd->i_conn_node))
5159d763 1811 list_del_init(&cmd->i_conn_node);
b8d26b3b
NB
1812 spin_unlock_bh(&conn->cmd_lock);
1813
1814 /*
1815 * Handle special case for REJECT when iscsi_add_reject*() has
1816 * overwritten the original iscsi_opcode assignment, and the
1817 * associated cmd->se_cmd needs to be released.
1818 */
1819 if (cmd->se_cmd.se_tfo != NULL) {
11378cdb 1820 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
3df8f68a 1821 cmd->iscsi_opcode);
b8d26b3b
NB
1822 transport_generic_free_cmd(&cmd->se_cmd, 0);
1823 break;
1824 }
1825 /*
1826 * Fall-through
1827 */
1828 default:
d703ce2f 1829 iscsit_release_cmd(cmd);
b8d26b3b
NB
1830 break;
1831 }
1832}
1833
1834static void
1835isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1836{
1837 if (tx_desc->dma_addr != 0) {
4c22e07f 1838 isert_dbg("unmap single for tx_desc->dma_addr\n");
b8d26b3b
NB
1839 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1840 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1841 tx_desc->dma_addr = 0;
1842 }
1843}
1844
1845static void
1846isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
03e7848a 1847 struct ib_device *ib_dev, bool comp_err)
b8d26b3b 1848{
dbbc5d11 1849 if (isert_cmd->pdu_buf_dma != 0) {
4c22e07f 1850 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
dbbc5d11
NB
1851 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1852 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1853 isert_cmd->pdu_buf_dma = 0;
b8d26b3b
NB
1854 }
1855
1856 isert_unmap_tx_desc(tx_desc, ib_dev);
03e7848a 1857 isert_put_cmd(isert_cmd, comp_err);
b8d26b3b
NB
1858}
1859
96b7973e
SG
1860static int
1861isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1862{
1863 struct ib_mr_status mr_status;
1864 int ret;
1865
1866 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1867 if (ret) {
24f412dd 1868 isert_err("ib_check_mr_status failed, ret %d\n", ret);
96b7973e
SG
1869 goto fail_mr_status;
1870 }
1871
1872 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1873 u64 sec_offset_err;
1874 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1875
1876 switch (mr_status.sig_err.err_type) {
1877 case IB_SIG_BAD_GUARD:
1878 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1879 break;
1880 case IB_SIG_BAD_REFTAG:
1881 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1882 break;
1883 case IB_SIG_BAD_APPTAG:
1884 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1885 break;
1886 }
1887 sec_offset_err = mr_status.sig_err.sig_err_offset;
1888 do_div(sec_offset_err, block_size);
1889 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1890
4c22e07f
SG
1891 isert_err("PI error found type %d at sector 0x%llx "
1892 "expected 0x%x vs actual 0x%x\n",
1893 mr_status.sig_err.err_type,
1894 (unsigned long long)se_cmd->bad_sector,
1895 mr_status.sig_err.expected,
1896 mr_status.sig_err.actual);
96b7973e
SG
1897 ret = 1;
1898 }
1899
1900fail_mr_status:
1901 return ret;
1902}
1903
f93f3a70
SG
1904static void
1905isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1906 struct isert_cmd *isert_cmd)
1907{
9e961ae7 1908 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
f93f3a70 1909 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
9e961ae7 1910 struct se_cmd *se_cmd = &cmd->se_cmd;
f93f3a70 1911 struct isert_conn *isert_conn = isert_cmd->conn;
dac6ab30 1912 struct isert_device *device = isert_conn->device;
9e961ae7
SG
1913 int ret = 0;
1914
1915 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
96b7973e
SG
1916 ret = isert_check_pi_status(se_cmd,
1917 wr->fr_desc->pi_ctx->sig_mr);
1918 wr->fr_desc->ind &= ~ISERT_PROTECTED;
9e961ae7 1919 }
f93f3a70
SG
1920
1921 device->unreg_rdma_mem(isert_cmd, isert_conn);
897bb2c9 1922 wr->send_wr_num = 0;
9e961ae7
SG
1923 if (ret)
1924 transport_send_check_condition_and_sense(se_cmd,
1925 se_cmd->pi_err, 0);
1926 else
1927 isert_put_response(isert_conn->conn, cmd);
f93f3a70
SG
1928}
1929
b8d26b3b
NB
1930static void
1931isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1932 struct isert_cmd *isert_cmd)
1933{
1934 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
d703ce2f 1935 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
b8d26b3b 1936 struct se_cmd *se_cmd = &cmd->se_cmd;
90ecc6e2 1937 struct isert_conn *isert_conn = isert_cmd->conn;
dac6ab30 1938 struct isert_device *device = isert_conn->device;
5bac4b1a 1939 int ret = 0;
b8d26b3b 1940
9e961ae7 1941 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
96b7973e
SG
1942 ret = isert_check_pi_status(se_cmd,
1943 wr->fr_desc->pi_ctx->sig_mr);
1944 wr->fr_desc->ind &= ~ISERT_PROTECTED;
9e961ae7
SG
1945 }
1946
b8d26b3b 1947 iscsit_stop_dataout_timer(cmd);
d40945d8 1948 device->unreg_rdma_mem(isert_cmd, isert_conn);
e3d7e4c3 1949 cmd->write_data_done = wr->data.len;
b6b87a1d 1950 wr->send_wr_num = 0;
b8d26b3b 1951
24f412dd 1952 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
b8d26b3b
NB
1953 spin_lock_bh(&cmd->istate_lock);
1954 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1955 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1956 spin_unlock_bh(&cmd->istate_lock);
1957
364189f0 1958 if (ret) {
afc16604 1959 target_put_sess_cmd(se_cmd);
5bac4b1a
SG
1960 transport_send_check_condition_and_sense(se_cmd,
1961 se_cmd->pi_err, 0);
364189f0 1962 } else {
5bac4b1a 1963 target_execute_cmd(se_cmd);
364189f0 1964 }
b8d26b3b
NB
1965}
1966
1967static void
1968isert_do_control_comp(struct work_struct *work)
1969{
1970 struct isert_cmd *isert_cmd = container_of(work,
1971 struct isert_cmd, comp_work);
1972 struct isert_conn *isert_conn = isert_cmd->conn;
dac6ab30 1973 struct ib_device *ib_dev = isert_conn->cm_id->device;
d703ce2f 1974 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
b8d26b3b 1975
4c22e07f
SG
1976 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
1977
b8d26b3b
NB
1978 switch (cmd->i_state) {
1979 case ISTATE_SEND_TASKMGTRSP:
b8d26b3b 1980 iscsit_tmr_post_handler(cmd, cmd->conn);
10633c37
SG
1981 case ISTATE_SEND_REJECT: /* FALLTHRU */
1982 case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
b8d26b3b 1983 cmd->i_state = ISTATE_SENT_STATUS;
4c22e07f
SG
1984 isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
1985 ib_dev, false);
3df8f68a 1986 break;
b8d26b3b 1987 case ISTATE_SEND_LOGOUTRSP:
b8d26b3b
NB
1988 iscsit_logout_post_handler(cmd, cmd->conn);
1989 break;
1990 default:
4c22e07f 1991 isert_err("Unknown i_state %d\n", cmd->i_state);
b8d26b3b
NB
1992 dump_stack();
1993 break;
1994 }
1995}
1996
1997static void
1998isert_response_completion(struct iser_tx_desc *tx_desc,
1999 struct isert_cmd *isert_cmd,
2000 struct isert_conn *isert_conn,
2001 struct ib_device *ib_dev)
2002{
d703ce2f 2003 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
b8d26b3b
NB
2004
2005 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
3df8f68a 2006 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
adb54c29
NB
2007 cmd->i_state == ISTATE_SEND_REJECT ||
2008 cmd->i_state == ISTATE_SEND_TEXTRSP) {
b8d26b3b
NB
2009 isert_unmap_tx_desc(tx_desc, ib_dev);
2010
2011 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
2012 queue_work(isert_comp_wq, &isert_cmd->comp_work);
2013 return;
2014 }
897bb2c9 2015
b8d26b3b 2016 cmd->i_state = ISTATE_SENT_STATUS;
03e7848a 2017 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
b8d26b3b
NB
2018}
2019
2020static void
7748681b 2021isert_snd_completion(struct iser_tx_desc *tx_desc,
68a86dee 2022 struct isert_conn *isert_conn)
b8d26b3b 2023{
dac6ab30 2024 struct ib_device *ib_dev = isert_conn->cm_id->device;
b8d26b3b
NB
2025 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
2026 struct isert_rdma_wr *wr;
2027
2028 if (!isert_cmd) {
b8d26b3b
NB
2029 isert_unmap_tx_desc(tx_desc, ib_dev);
2030 return;
2031 }
2032 wr = &isert_cmd->rdma_wr;
2033
4c22e07f
SG
2034 isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
2035
b8d26b3b 2036 switch (wr->iser_ib_op) {
b8d26b3b 2037 case ISER_IB_SEND:
b8d26b3b
NB
2038 isert_response_completion(tx_desc, isert_cmd,
2039 isert_conn, ib_dev);
2040 break;
2041 case ISER_IB_RDMA_WRITE:
f93f3a70 2042 isert_completion_rdma_write(tx_desc, isert_cmd);
b8d26b3b
NB
2043 break;
2044 case ISER_IB_RDMA_READ:
b8d26b3b
NB
2045 isert_completion_rdma_read(tx_desc, isert_cmd);
2046 break;
2047 default:
4c22e07f 2048 isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op);
b8d26b3b
NB
2049 dump_stack();
2050 break;
2051 }
2052}
2053
6f0fae3d
SG
2054/**
2055 * is_isert_tx_desc() - Indicate if the completion wr_id
2056 * is a TX descriptor or not.
2057 * @isert_conn: iser connection
2058 * @wr_id: completion WR identifier
2059 *
2060 * Since we cannot rely on wc opcode in FLUSH errors
2061 * we must work around it by checking if the wr_id address
2062 * falls in the iser connection rx_descs buffer. If so
2063 * it is an RX descriptor, otherwize it is a TX.
2064 */
2065static inline bool
2066is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
2067{
dac6ab30
SG
2068 void *start = isert_conn->rx_descs;
2069 int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
6f0fae3d
SG
2070
2071 if (wr_id >= start && wr_id < start + len)
2072 return false;
2073
2074 return true;
2075}
2076
b8d26b3b 2077static void
6f0fae3d 2078isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
b8d26b3b 2079{
bdf20e72 2080 if (wc->wr_id == ISER_BEACON_WRID) {
dac6ab30 2081 isert_info("conn %p completing wait_comp_err\n",
bdf20e72 2082 isert_conn);
dac6ab30 2083 complete(&isert_conn->wait_comp_err);
ed4520ae 2084 } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
dac6ab30 2085 struct ib_device *ib_dev = isert_conn->cm_id->device;
df43debd 2086 struct isert_cmd *isert_cmd;
6f0fae3d 2087 struct iser_tx_desc *desc;
df43debd 2088
6f0fae3d
SG
2089 desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2090 isert_cmd = desc->isert_cmd;
df43debd
SG
2091 if (!isert_cmd)
2092 isert_unmap_tx_desc(desc, ib_dev);
2093 else
2094 isert_completion_put(desc, isert_cmd, ib_dev, true);
df43debd
SG
2095 } else {
2096 isert_conn->post_recv_buf_count--;
bdf20e72
SG
2097 if (!isert_conn->post_recv_buf_count)
2098 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
df43debd 2099 }
b8d26b3b
NB
2100}
2101
2102static void
6f0fae3d 2103isert_handle_wc(struct ib_wc *wc)
b8d26b3b 2104{
b8d26b3b
NB
2105 struct isert_conn *isert_conn;
2106 struct iser_tx_desc *tx_desc;
6f0fae3d 2107 struct iser_rx_desc *rx_desc;
b8d26b3b 2108
6f0fae3d
SG
2109 isert_conn = wc->qp->qp_context;
2110 if (likely(wc->status == IB_WC_SUCCESS)) {
2111 if (wc->opcode == IB_WC_RECV) {
2112 rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
7748681b 2113 isert_rcv_completion(rx_desc, isert_conn, wc->byte_len);
b8d26b3b 2114 } else {
6f0fae3d 2115 tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
7748681b 2116 isert_snd_completion(tx_desc, isert_conn);
b8d26b3b 2117 }
6f0fae3d
SG
2118 } else {
2119 if (wc->status != IB_WC_WR_FLUSH_ERR)
ea8a1616
SG
2120 isert_err("%s (%d): wr id %llx vend_err %x\n",
2121 ib_wc_status_msg(wc->status), wc->status,
2122 wc->wr_id, wc->vendor_err);
6f0fae3d 2123 else
ea8a1616
SG
2124 isert_dbg("%s (%d): wr id %llx\n",
2125 ib_wc_status_msg(wc->status), wc->status,
2126 wc->wr_id);
b8d26b3b 2127
6f0fae3d
SG
2128 if (wc->wr_id != ISER_FASTREG_LI_WRID)
2129 isert_cq_comp_err(isert_conn, wc);
2130 }
b8d26b3b
NB
2131}
2132
2133static void
6f0fae3d 2134isert_cq_work(struct work_struct *work)
b8d26b3b 2135{
37d9fe80 2136 enum { isert_poll_budget = 65536 };
4a295bae 2137 struct isert_comp *comp = container_of(work, struct isert_comp,
6f0fae3d 2138 work);
36ea63b5
SG
2139 struct ib_wc *const wcs = comp->wcs;
2140 int i, n, completed = 0;
b8d26b3b 2141
36ea63b5
SG
2142 while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
2143 for (i = 0; i < n; i++)
2144 isert_handle_wc(&wcs[i]);
b8d26b3b 2145
36ea63b5
SG
2146 completed += n;
2147 if (completed >= isert_poll_budget)
37d9fe80
SG
2148 break;
2149 }
2150
6f0fae3d 2151 ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
b8d26b3b
NB
2152}
2153
2154static void
6f0fae3d 2155isert_cq_callback(struct ib_cq *cq, void *context)
b8d26b3b 2156{
4a295bae 2157 struct isert_comp *comp = context;
b8d26b3b 2158
6f0fae3d 2159 queue_work(isert_comp_wq, &comp->work);
b8d26b3b
NB
2160}
2161
2162static int
2163isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2164{
2165 struct ib_send_wr *wr_failed;
2166 int ret;
2167
dac6ab30 2168 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
b8d26b3b
NB
2169 &wr_failed);
2170 if (ret) {
24f412dd 2171 isert_err("ib_post_send failed with %d\n", ret);
b8d26b3b
NB
2172 return ret;
2173 }
2174 return ret;
2175}
2176
2177static int
2178isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2179{
d703ce2f 2180 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
6700425e 2181 struct isert_conn *isert_conn = conn->context;
b8d26b3b
NB
2182 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2183 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
2184 &isert_cmd->tx_desc.iscsi_header;
2185
2186 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2187 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
2188 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2189 /*
2190 * Attach SENSE DATA payload to iSCSI Response PDU
2191 */
2192 if (cmd->se_cmd.sense_buffer &&
2193 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
2194 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
dac6ab30 2195 struct isert_device *device = isert_conn->device;
67cb3949 2196 struct ib_device *ib_dev = device->ib_device;
b8d26b3b 2197 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
dbbc5d11 2198 u32 padding, pdu_len;
b8d26b3b
NB
2199
2200 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
2201 cmd->sense_buffer);
2202 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
2203
2204 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
2205 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
dbbc5d11 2206 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
b8d26b3b 2207
dbbc5d11
NB
2208 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2209 (void *)cmd->sense_buffer, pdu_len,
b8d26b3b
NB
2210 DMA_TO_DEVICE);
2211
dbbc5d11
NB
2212 isert_cmd->pdu_buf_len = pdu_len;
2213 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2214 tx_dsg->length = pdu_len;
67cb3949 2215 tx_dsg->lkey = device->mr->lkey;
b8d26b3b
NB
2216 isert_cmd->tx_desc.num_sge = 2;
2217 }
2218
68a86dee 2219 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
b8d26b3b 2220
4c22e07f 2221 isert_dbg("Posting SCSI Response\n");
b8d26b3b
NB
2222
2223 return isert_post_response(isert_conn, isert_cmd);
2224}
2225
131e6abc
NB
2226static void
2227isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2228{
2229 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
6700425e 2230 struct isert_conn *isert_conn = conn->context;
dac6ab30 2231 struct isert_device *device = isert_conn->device;
131e6abc
NB
2232
2233 spin_lock_bh(&conn->cmd_lock);
2234 if (!list_empty(&cmd->i_conn_node))
2235 list_del_init(&cmd->i_conn_node);
2236 spin_unlock_bh(&conn->cmd_lock);
2237
2238 if (cmd->data_direction == DMA_TO_DEVICE)
2239 iscsit_stop_dataout_timer(cmd);
2240
2241 device->unreg_rdma_mem(isert_cmd, isert_conn);
2242}
2243
e70beee7
NB
2244static enum target_prot_op
2245isert_get_sup_prot_ops(struct iscsi_conn *conn)
2246{
6700425e 2247 struct isert_conn *isert_conn = conn->context;
dac6ab30 2248 struct isert_device *device = isert_conn->device;
e70beee7 2249
23a548ee
SG
2250 if (conn->tpg->tpg_attrib.t10_pi) {
2251 if (device->pi_capable) {
24f412dd 2252 isert_info("conn %p PI offload enabled\n", isert_conn);
23a548ee
SG
2253 isert_conn->pi_support = true;
2254 return TARGET_PROT_ALL;
2255 }
2256 }
2257
24f412dd 2258 isert_info("conn %p PI offload disabled\n", isert_conn);
23a548ee 2259 isert_conn->pi_support = false;
e70beee7
NB
2260
2261 return TARGET_PROT_NORMAL;
2262}
2263
b8d26b3b
NB
2264static int
2265isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2266 bool nopout_response)
2267{
d703ce2f 2268 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
6700425e 2269 struct isert_conn *isert_conn = conn->context;
b8d26b3b
NB
2270 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2271
2272 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2273 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
2274 &isert_cmd->tx_desc.iscsi_header,
2275 nopout_response);
2276 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
68a86dee 2277 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
b8d26b3b 2278
4c22e07f 2279 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
b8d26b3b
NB
2280
2281 return isert_post_response(isert_conn, isert_cmd);
2282}
2283
2284static int
2285isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2286{
d703ce2f 2287 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
6700425e 2288 struct isert_conn *isert_conn = conn->context;
b8d26b3b
NB
2289 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2290
2291 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2292 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
2293 &isert_cmd->tx_desc.iscsi_header);
2294 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
68a86dee 2295 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
b8d26b3b 2296
4c22e07f 2297 isert_dbg("conn %p Posting Logout Response\n", isert_conn);
b8d26b3b
NB
2298
2299 return isert_post_response(isert_conn, isert_cmd);
2300}
2301
2302static int
2303isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2304{
d703ce2f 2305 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
6700425e 2306 struct isert_conn *isert_conn = conn->context;
b8d26b3b
NB
2307 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2308
2309 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2310 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
2311 &isert_cmd->tx_desc.iscsi_header);
2312 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
68a86dee 2313 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
b8d26b3b 2314
4c22e07f 2315 isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
b8d26b3b
NB
2316
2317 return isert_post_response(isert_conn, isert_cmd);
2318}
2319
2320static int
2321isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2322{
d703ce2f 2323 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
6700425e 2324 struct isert_conn *isert_conn = conn->context;
b8d26b3b 2325 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
dac6ab30 2326 struct isert_device *device = isert_conn->device;
67cb3949 2327 struct ib_device *ib_dev = device->ib_device;
3df8f68a
NB
2328 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2329 struct iscsi_reject *hdr =
2330 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
b8d26b3b
NB
2331
2332 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
3df8f68a 2333 iscsit_build_reject(cmd, conn, hdr);
b8d26b3b 2334 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
3df8f68a
NB
2335
2336 hton24(hdr->dlength, ISCSI_HDR_LEN);
dbbc5d11 2337 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
3df8f68a
NB
2338 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
2339 DMA_TO_DEVICE);
dbbc5d11
NB
2340 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
2341 tx_dsg->addr = isert_cmd->pdu_buf_dma;
3df8f68a 2342 tx_dsg->length = ISCSI_HDR_LEN;
67cb3949 2343 tx_dsg->lkey = device->mr->lkey;
3df8f68a
NB
2344 isert_cmd->tx_desc.num_sge = 2;
2345
68a86dee 2346 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
b8d26b3b 2347
4c22e07f 2348 isert_dbg("conn %p Posting Reject\n", isert_conn);
b8d26b3b
NB
2349
2350 return isert_post_response(isert_conn, isert_cmd);
2351}
2352
adb54c29
NB
2353static int
2354isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2355{
d703ce2f 2356 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
6700425e 2357 struct isert_conn *isert_conn = conn->context;
adb54c29
NB
2358 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2359 struct iscsi_text_rsp *hdr =
2360 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
2361 u32 txt_rsp_len;
2362 int rc;
2363
2364 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
22c7aaa5 2365 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
adb54c29
NB
2366 if (rc < 0)
2367 return rc;
2368
2369 txt_rsp_len = rc;
2370 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2371
2372 if (txt_rsp_len) {
dac6ab30 2373 struct isert_device *device = isert_conn->device;
67cb3949 2374 struct ib_device *ib_dev = device->ib_device;
adb54c29
NB
2375 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2376 void *txt_rsp_buf = cmd->buf_ptr;
2377
2378 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2379 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2380
2381 isert_cmd->pdu_buf_len = txt_rsp_len;
2382 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2383 tx_dsg->length = txt_rsp_len;
67cb3949 2384 tx_dsg->lkey = device->mr->lkey;
adb54c29
NB
2385 isert_cmd->tx_desc.num_sge = 2;
2386 }
68a86dee 2387 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
adb54c29 2388
f64d2792 2389 isert_dbg("conn %p Text Response\n", isert_conn);
adb54c29
NB
2390
2391 return isert_post_response(isert_conn, isert_cmd);
2392}
2393
b8d26b3b
NB
2394static int
2395isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2396 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
2397 u32 data_left, u32 offset)
2398{
d703ce2f 2399 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
b8d26b3b 2400 struct scatterlist *sg_start, *tmp_sg;
dac6ab30 2401 struct isert_device *device = isert_conn->device;
67cb3949 2402 struct ib_device *ib_dev = device->ib_device;
b8d26b3b
NB
2403 u32 sg_off, page_off;
2404 int i = 0, sg_nents;
2405
2406 sg_off = offset / PAGE_SIZE;
2407 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2408 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2409 page_off = offset % PAGE_SIZE;
2410
2411 send_wr->sg_list = ib_sge;
b0a191e7 2412 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
b8d26b3b
NB
2413 /*
2414 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2415 */
2416 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
4c22e07f
SG
2417 isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
2418 "page_off: %u\n",
2419 (unsigned long long)tmp_sg->dma_address,
2420 tmp_sg->length, page_off);
b8d26b3b
NB
2421
2422 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2423 ib_sge->length = min_t(u32, data_left,
2424 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
67cb3949 2425 ib_sge->lkey = device->mr->lkey;
b8d26b3b 2426
4c22e07f
SG
2427 isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
2428 ib_sge->addr, ib_sge->length, ib_sge->lkey);
b8d26b3b
NB
2429 page_off = 0;
2430 data_left -= ib_sge->length;
9253e667
SG
2431 if (!data_left)
2432 break;
b8d26b3b 2433 ib_sge++;
24f412dd 2434 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
b8d26b3b
NB
2435 }
2436
9253e667 2437 send_wr->num_sge = ++i;
24f412dd 2438 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
4c22e07f 2439 send_wr->sg_list, send_wr->num_sge);
b8d26b3b 2440
9253e667 2441 return send_wr->num_sge;
b8d26b3b
NB
2442}
2443
2444static int
90ecc6e2
VP
2445isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2446 struct isert_rdma_wr *wr)
b8d26b3b
NB
2447{
2448 struct se_cmd *se_cmd = &cmd->se_cmd;
d703ce2f 2449 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
6700425e 2450 struct isert_conn *isert_conn = conn->context;
e3d7e4c3 2451 struct isert_data_buf *data = &wr->data;
90ecc6e2 2452 struct ib_send_wr *send_wr;
b8d26b3b 2453 struct ib_sge *ib_sge;
e3d7e4c3
SG
2454 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2455 int ret = 0, i, ib_sge_cnt;
90ecc6e2 2456
e3d7e4c3 2457 isert_cmd->tx_desc.isert_cmd = isert_cmd;
b8d26b3b 2458
e3d7e4c3
SG
2459 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2460 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2461 se_cmd->t_data_nents, se_cmd->data_length,
2462 offset, wr->iser_ib_op, &wr->data);
2463 if (ret)
2464 return ret;
b8d26b3b 2465
e3d7e4c3
SG
2466 data_left = data->len;
2467 offset = data->offset;
b8d26b3b 2468
e3d7e4c3 2469 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
b8d26b3b 2470 if (!ib_sge) {
24f412dd 2471 isert_warn("Unable to allocate ib_sge\n");
b8d26b3b 2472 ret = -ENOMEM;
e3d7e4c3 2473 goto unmap_cmd;
b8d26b3b 2474 }
90ecc6e2 2475 wr->ib_sge = ib_sge;
b8d26b3b 2476
e3d7e4c3 2477 wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
b8d26b3b
NB
2478 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2479 GFP_KERNEL);
2480 if (!wr->send_wr) {
24f412dd 2481 isert_dbg("Unable to allocate wr->send_wr\n");
b8d26b3b 2482 ret = -ENOMEM;
e3d7e4c3 2483 goto unmap_cmd;
b8d26b3b 2484 }
b8d26b3b
NB
2485
2486 wr->isert_cmd = isert_cmd;
2487 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
b8d26b3b
NB
2488
2489 for (i = 0; i < wr->send_wr_num; i++) {
2490 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2491 data_len = min(data_left, rdma_write_max);
2492
b8d26b3b 2493 send_wr->send_flags = 0;
90ecc6e2
VP
2494 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2495 send_wr->opcode = IB_WR_RDMA_WRITE;
2496 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2497 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2498 if (i + 1 == wr->send_wr_num)
2499 send_wr->next = &isert_cmd->tx_desc.send_wr;
2500 else
2501 send_wr->next = &wr->send_wr[i + 1];
2502 } else {
2503 send_wr->opcode = IB_WR_RDMA_READ;
2504 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2505 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2506 if (i + 1 == wr->send_wr_num)
2507 send_wr->send_flags = IB_SEND_SIGNALED;
2508 else
2509 send_wr->next = &wr->send_wr[i + 1];
2510 }
b8d26b3b
NB
2511
2512 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2513 send_wr, data_len, offset);
2514 ib_sge += ib_sge_cnt;
2515
b8d26b3b 2516 offset += data_len;
90ecc6e2 2517 va_offset += data_len;
b8d26b3b
NB
2518 data_left -= data_len;
2519 }
90ecc6e2
VP
2520
2521 return 0;
e3d7e4c3
SG
2522unmap_cmd:
2523 isert_unmap_data_buf(isert_conn, data);
2524
90ecc6e2
VP
2525 return ret;
2526}
2527
59464ef4
VP
2528static int
2529isert_map_fr_pagelist(struct ib_device *ib_dev,
2530 struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2531{
2532 u64 start_addr, end_addr, page, chunk_start = 0;
2533 struct scatterlist *tmp_sg;
2534 int i = 0, new_chunk, last_ent, n_pages;
2535
2536 n_pages = 0;
2537 new_chunk = 1;
2538 last_ent = sg_nents - 1;
2539 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2540 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2541 if (new_chunk)
2542 chunk_start = start_addr;
2543 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2544
4c22e07f
SG
2545 isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n",
2546 i, (unsigned long long)tmp_sg->dma_address,
2547 tmp_sg->length);
59464ef4
VP
2548
2549 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2550 new_chunk = 0;
2551 continue;
2552 }
2553 new_chunk = 1;
2554
2555 page = chunk_start & PAGE_MASK;
2556 do {
2557 fr_pl[n_pages++] = page;
4c22e07f
SG
2558 isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n",
2559 n_pages - 1, page);
59464ef4
VP
2560 page += PAGE_SIZE;
2561 } while (page < end_addr);
2562 }
2563
2564 return n_pages;
2565}
2566
10633c37
SG
2567static inline void
2568isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
2569{
2570 u32 rkey;
2571
2572 memset(inv_wr, 0, sizeof(*inv_wr));
2573 inv_wr->wr_id = ISER_FASTREG_LI_WRID;
2574 inv_wr->opcode = IB_WR_LOCAL_INV;
2575 inv_wr->ex.invalidate_rkey = mr->rkey;
2576
2577 /* Bump the key */
2578 rkey = ib_inc_rkey(mr->rkey);
2579 ib_update_fast_reg_key(mr, rkey);
2580}
2581
59464ef4 2582static int
e3d7e4c3
SG
2583isert_fast_reg_mr(struct isert_conn *isert_conn,
2584 struct fast_reg_descriptor *fr_desc,
2585 struct isert_data_buf *mem,
9e961ae7 2586 enum isert_indicator ind,
e3d7e4c3 2587 struct ib_sge *sge)
59464ef4 2588{
dac6ab30 2589 struct isert_device *device = isert_conn->device;
67cb3949 2590 struct ib_device *ib_dev = device->ib_device;
9e961ae7
SG
2591 struct ib_mr *mr;
2592 struct ib_fast_reg_page_list *frpl;
59464ef4
VP
2593 struct ib_send_wr fr_wr, inv_wr;
2594 struct ib_send_wr *bad_wr, *wr = NULL;
9bd626e7
SG
2595 int ret, pagelist_len;
2596 u32 page_off;
59464ef4 2597
e3d7e4c3 2598 if (mem->dma_nents == 1) {
67cb3949 2599 sge->lkey = device->mr->lkey;
e3d7e4c3
SG
2600 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2601 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
4c22e07f
SG
2602 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2603 sge->addr, sge->length, sge->lkey);
e3d7e4c3
SG
2604 return 0;
2605 }
2606
9e961ae7
SG
2607 if (ind == ISERT_DATA_KEY_VALID) {
2608 /* Registering data buffer */
2609 mr = fr_desc->data_mr;
2610 frpl = fr_desc->data_frpl;
2611 } else {
2612 /* Registering protection buffer */
2613 mr = fr_desc->pi_ctx->prot_mr;
2614 frpl = fr_desc->pi_ctx->prot_frpl;
2615 }
2616
e3d7e4c3 2617 page_off = mem->offset % PAGE_SIZE;
59464ef4 2618
24f412dd 2619 isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
4c22e07f 2620 fr_desc, mem->nents, mem->offset);
59464ef4 2621
e3d7e4c3 2622 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
9e961ae7 2623 &frpl->page_list[0]);
59464ef4 2624
10633c37
SG
2625 if (!(fr_desc->ind & ind)) {
2626 isert_inv_rkey(&inv_wr, mr);
59464ef4 2627 wr = &inv_wr;
59464ef4
VP
2628 }
2629
2630 /* Prepare FASTREG WR */
2631 memset(&fr_wr, 0, sizeof(fr_wr));
9bb4ca68 2632 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
59464ef4 2633 fr_wr.opcode = IB_WR_FAST_REG_MR;
9e961ae7
SG
2634 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
2635 fr_wr.wr.fast_reg.page_list = frpl;
59464ef4
VP
2636 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2637 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
e3d7e4c3 2638 fr_wr.wr.fast_reg.length = mem->len;
9e961ae7 2639 fr_wr.wr.fast_reg.rkey = mr->rkey;
59464ef4
VP
2640 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2641
2642 if (!wr)
2643 wr = &fr_wr;
2644 else
2645 wr->next = &fr_wr;
2646
dac6ab30 2647 ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
59464ef4 2648 if (ret) {
24f412dd 2649 isert_err("fast registration failed, ret:%d\n", ret);
59464ef4
VP
2650 return ret;
2651 }
9e961ae7 2652 fr_desc->ind &= ~ind;
59464ef4 2653
9e961ae7
SG
2654 sge->lkey = mr->lkey;
2655 sge->addr = frpl->page_list[0] + page_off;
e3d7e4c3 2656 sge->length = mem->len;
59464ef4 2657
4c22e07f
SG
2658 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2659 sge->addr, sge->length, sge->lkey);
9e961ae7
SG
2660
2661 return ret;
2662}
2663
3d73cf1a
SG
2664static inline void
2665isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2666 struct ib_sig_domain *domain)
2667{
78eda2bb 2668 domain->sig_type = IB_SIG_TYPE_T10_DIF;
3d73cf1a
SG
2669 domain->sig.dif.bg_type = IB_T10DIF_CRC;
2670 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2671 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
78eda2bb
SG
2672 /*
2673 * At the moment we hard code those, but if in the future
2674 * the target core would like to use it, we will take it
2675 * from se_cmd.
2676 */
2677 domain->sig.dif.apptag_check_mask = 0xffff;
2678 domain->sig.dif.app_escape = true;
2679 domain->sig.dif.ref_escape = true;
2680 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2681 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2682 domain->sig.dif.ref_remap = true;
3d73cf1a
SG
2683};
2684
9e961ae7
SG
2685static int
2686isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2687{
9e961ae7
SG
2688 switch (se_cmd->prot_op) {
2689 case TARGET_PROT_DIN_INSERT:
2690 case TARGET_PROT_DOUT_STRIP:
78eda2bb 2691 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
3d73cf1a 2692 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
9e961ae7
SG
2693 break;
2694 case TARGET_PROT_DOUT_INSERT:
2695 case TARGET_PROT_DIN_STRIP:
78eda2bb 2696 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
3d73cf1a 2697 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
9e961ae7
SG
2698 break;
2699 case TARGET_PROT_DIN_PASS:
2700 case TARGET_PROT_DOUT_PASS:
3d73cf1a
SG
2701 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2702 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
9e961ae7
SG
2703 break;
2704 default:
24f412dd 2705 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
9e961ae7
SG
2706 return -EINVAL;
2707 }
2708
2709 return 0;
2710}
2711
2712static inline u8
2713isert_set_prot_checks(u8 prot_checks)
2714{
2715 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2716 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2717 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2718}
2719
2720static int
570db170
SG
2721isert_reg_sig_mr(struct isert_conn *isert_conn,
2722 struct se_cmd *se_cmd,
2723 struct isert_rdma_wr *rdma_wr,
2724 struct fast_reg_descriptor *fr_desc)
9e961ae7
SG
2725{
2726 struct ib_send_wr sig_wr, inv_wr;
2727 struct ib_send_wr *bad_wr, *wr = NULL;
2728 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2729 struct ib_sig_attrs sig_attrs;
2730 int ret;
9e961ae7
SG
2731
2732 memset(&sig_attrs, 0, sizeof(sig_attrs));
2733 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2734 if (ret)
2735 goto err;
59464ef4 2736
9e961ae7
SG
2737 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2738
2739 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
10633c37 2740 isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
9e961ae7 2741 wr = &inv_wr;
9e961ae7
SG
2742 }
2743
2744 memset(&sig_wr, 0, sizeof(sig_wr));
2745 sig_wr.opcode = IB_WR_REG_SIG_MR;
c2caa207 2746 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
570db170 2747 sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
9e961ae7
SG
2748 sig_wr.num_sge = 1;
2749 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2750 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2751 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2752 if (se_cmd->t_prot_sg)
570db170 2753 sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
9e961ae7
SG
2754
2755 if (!wr)
2756 wr = &sig_wr;
2757 else
2758 wr->next = &sig_wr;
2759
dac6ab30 2760 ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
9e961ae7 2761 if (ret) {
24f412dd 2762 isert_err("fast registration failed, ret:%d\n", ret);
9e961ae7
SG
2763 goto err;
2764 }
2765 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2766
570db170
SG
2767 rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
2768 rdma_wr->ib_sg[SIG].addr = 0;
2769 rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
9e961ae7
SG
2770 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2771 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2772 /*
2773 * We have protection guards on the wire
2774 * so we need to set a larget transfer
2775 */
570db170 2776 rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
9e961ae7 2777
24f412dd 2778 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
570db170
SG
2779 rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
2780 rdma_wr->ib_sg[SIG].lkey);
9e961ae7 2781err:
59464ef4
VP
2782 return ret;
2783}
2784
570db170
SG
2785static int
2786isert_handle_prot_cmd(struct isert_conn *isert_conn,
2787 struct isert_cmd *isert_cmd,
2788 struct isert_rdma_wr *wr)
2789{
dac6ab30 2790 struct isert_device *device = isert_conn->device;
570db170
SG
2791 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2792 int ret;
2793
2794 if (!wr->fr_desc->pi_ctx) {
2795 ret = isert_create_pi_ctx(wr->fr_desc,
2796 device->ib_device,
67cb3949 2797 device->pd);
570db170 2798 if (ret) {
24f412dd 2799 isert_err("conn %p failed to allocate pi_ctx\n",
570db170
SG
2800 isert_conn);
2801 return ret;
2802 }
2803 }
2804
2805 if (se_cmd->t_prot_sg) {
2806 ret = isert_map_data_buf(isert_conn, isert_cmd,
2807 se_cmd->t_prot_sg,
2808 se_cmd->t_prot_nents,
2809 se_cmd->prot_length,
2810 0, wr->iser_ib_op, &wr->prot);
2811 if (ret) {
24f412dd 2812 isert_err("conn %p failed to map protection buffer\n",
570db170
SG
2813 isert_conn);
2814 return ret;
2815 }
2816
2817 memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
2818 ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
2819 ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
2820 if (ret) {
24f412dd 2821 isert_err("conn %p failed to fast reg mr\n",
570db170
SG
2822 isert_conn);
2823 goto unmap_prot_cmd;
2824 }
2825 }
2826
2827 ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
2828 if (ret) {
24f412dd 2829 isert_err("conn %p failed to fast reg mr\n",
570db170
SG
2830 isert_conn);
2831 goto unmap_prot_cmd;
2832 }
2833 wr->fr_desc->ind |= ISERT_PROTECTED;
2834
2835 return 0;
2836
2837unmap_prot_cmd:
2838 if (se_cmd->t_prot_sg)
2839 isert_unmap_data_buf(isert_conn, &wr->prot);
2840
2841 return ret;
2842}
2843
59464ef4 2844static int
a3a5a826
SG
2845isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2846 struct isert_rdma_wr *wr)
59464ef4
VP
2847{
2848 struct se_cmd *se_cmd = &cmd->se_cmd;
2849 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
e3d7e4c3 2850 struct isert_conn *isert_conn = conn->context;
e3d7e4c3 2851 struct fast_reg_descriptor *fr_desc = NULL;
570db170
SG
2852 struct ib_send_wr *send_wr;
2853 struct ib_sge *ib_sg;
e3d7e4c3
SG
2854 u32 offset;
2855 int ret = 0;
59464ef4
VP
2856 unsigned long flags;
2857
e3d7e4c3 2858 isert_cmd->tx_desc.isert_cmd = isert_cmd;
59464ef4 2859
e3d7e4c3
SG
2860 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2861 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2862 se_cmd->t_data_nents, se_cmd->data_length,
2863 offset, wr->iser_ib_op, &wr->data);
2864 if (ret)
2865 return ret;
59464ef4 2866
302cc7c3 2867 if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
dac6ab30
SG
2868 spin_lock_irqsave(&isert_conn->pool_lock, flags);
2869 fr_desc = list_first_entry(&isert_conn->fr_pool,
e3d7e4c3
SG
2870 struct fast_reg_descriptor, list);
2871 list_del(&fr_desc->list);
dac6ab30 2872 spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
e3d7e4c3 2873 wr->fr_desc = fr_desc;
59464ef4 2874 }
59464ef4 2875
9e961ae7 2876 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
570db170 2877 ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
e3d7e4c3
SG
2878 if (ret)
2879 goto unmap_cmd;
59464ef4 2880
302cc7c3 2881 if (isert_prot_cmd(isert_conn, se_cmd)) {
570db170 2882 ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
9e961ae7 2883 if (ret)
570db170 2884 goto unmap_cmd;
9e961ae7 2885
570db170
SG
2886 ib_sg = &wr->ib_sg[SIG];
2887 } else {
2888 ib_sg = &wr->ib_sg[DATA];
2889 }
9e961ae7 2890
570db170 2891 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
e3d7e4c3 2892 wr->ib_sge = &wr->s_ib_sge;
59464ef4
VP
2893 wr->send_wr_num = 1;
2894 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2895 wr->send_wr = &wr->s_send_wr;
59464ef4 2896 wr->isert_cmd = isert_cmd;
59464ef4
VP
2897
2898 send_wr = &isert_cmd->rdma_wr.s_send_wr;
e3d7e4c3 2899 send_wr->sg_list = &wr->s_ib_sge;
59464ef4 2900 send_wr->num_sge = 1;
b0a191e7 2901 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
59464ef4
VP
2902 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2903 send_wr->opcode = IB_WR_RDMA_WRITE;
2904 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2905 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
302cc7c3 2906 send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
9e961ae7 2907 0 : IB_SEND_SIGNALED;
59464ef4
VP
2908 } else {
2909 send_wr->opcode = IB_WR_RDMA_READ;
2910 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2911 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2912 send_wr->send_flags = IB_SEND_SIGNALED;
2913 }
2914
e3d7e4c3 2915 return 0;
570db170 2916
e3d7e4c3
SG
2917unmap_cmd:
2918 if (fr_desc) {
dac6ab30
SG
2919 spin_lock_irqsave(&isert_conn->pool_lock, flags);
2920 list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
2921 spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
59464ef4 2922 }
e3d7e4c3 2923 isert_unmap_data_buf(isert_conn, &wr->data);
59464ef4 2924
59464ef4
VP
2925 return ret;
2926}
2927
90ecc6e2
VP
2928static int
2929isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2930{
2931 struct se_cmd *se_cmd = &cmd->se_cmd;
59464ef4 2932 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
90ecc6e2 2933 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
6700425e 2934 struct isert_conn *isert_conn = conn->context;
dac6ab30 2935 struct isert_device *device = isert_conn->device;
90ecc6e2
VP
2936 struct ib_send_wr *wr_failed;
2937 int rc;
2938
24f412dd 2939 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
90ecc6e2 2940 isert_cmd, se_cmd->data_length);
4c22e07f 2941
90ecc6e2 2942 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
d40945d8 2943 rc = device->reg_rdma_mem(conn, cmd, wr);
90ecc6e2 2944 if (rc) {
24f412dd 2945 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
90ecc6e2
VP
2946 return rc;
2947 }
2948
302cc7c3 2949 if (!isert_prot_cmd(isert_conn, se_cmd)) {
9e961ae7
SG
2950 /*
2951 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2952 */
2953 isert_create_send_desc(isert_conn, isert_cmd,
2954 &isert_cmd->tx_desc);
2955 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2956 &isert_cmd->tx_desc.iscsi_header);
2957 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2958 isert_init_send_wr(isert_conn, isert_cmd,
68a86dee 2959 &isert_cmd->tx_desc.send_wr);
9e961ae7 2960 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
897bb2c9 2961 wr->send_wr_num += 1;
9e961ae7 2962 }
b8d26b3b 2963
dac6ab30 2964 rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
bdf20e72 2965 if (rc)
24f412dd 2966 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
9e961ae7 2967
302cc7c3 2968 if (!isert_prot_cmd(isert_conn, se_cmd))
24f412dd 2969 isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
9e961ae7
SG
2970 "READ\n", isert_cmd);
2971 else
24f412dd 2972 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
9e961ae7 2973 isert_cmd);
b8d26b3b 2974
90ecc6e2 2975 return 1;
b8d26b3b
NB
2976}
2977
2978static int
2979isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2980{
2981 struct se_cmd *se_cmd = &cmd->se_cmd;
d703ce2f 2982 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
b8d26b3b 2983 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
6700425e 2984 struct isert_conn *isert_conn = conn->context;
dac6ab30 2985 struct isert_device *device = isert_conn->device;
90ecc6e2
VP
2986 struct ib_send_wr *wr_failed;
2987 int rc;
b8d26b3b 2988
24f412dd 2989 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
90ecc6e2 2990 isert_cmd, se_cmd->data_length, cmd->write_data_done);
b8d26b3b 2991 wr->iser_ib_op = ISER_IB_RDMA_READ;
d40945d8 2992 rc = device->reg_rdma_mem(conn, cmd, wr);
90ecc6e2 2993 if (rc) {
24f412dd 2994 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
90ecc6e2 2995 return rc;
b8d26b3b
NB
2996 }
2997
dac6ab30 2998 rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
bdf20e72 2999 if (rc)
24f412dd 3000 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
bdf20e72 3001
24f412dd 3002 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
90ecc6e2 3003 isert_cmd);
b8d26b3b 3004
90ecc6e2 3005 return 0;
b8d26b3b
NB
3006}
3007
3008static int
3009isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3010{
3011 int ret;
3012
3013 switch (state) {
3014 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3015 ret = isert_put_nopin(cmd, conn, false);
3016 break;
3017 default:
24f412dd 3018 isert_err("Unknown immediate state: 0x%02x\n", state);
b8d26b3b
NB
3019 ret = -EINVAL;
3020 break;
3021 }
3022
3023 return ret;
3024}
3025
3026static int
3027isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3028{
991bb764 3029 struct isert_conn *isert_conn = conn->context;
b8d26b3b
NB
3030 int ret;
3031
3032 switch (state) {
3033 case ISTATE_SEND_LOGOUTRSP:
3034 ret = isert_put_logout_rsp(cmd, conn);
991bb764
SG
3035 if (!ret)
3036 isert_conn->logout_posted = true;
b8d26b3b
NB
3037 break;
3038 case ISTATE_SEND_NOPIN:
3039 ret = isert_put_nopin(cmd, conn, true);
3040 break;
3041 case ISTATE_SEND_TASKMGTRSP:
3042 ret = isert_put_tm_rsp(cmd, conn);
3043 break;
3044 case ISTATE_SEND_REJECT:
3045 ret = isert_put_reject(cmd, conn);
3046 break;
adb54c29
NB
3047 case ISTATE_SEND_TEXTRSP:
3048 ret = isert_put_text_rsp(cmd, conn);
3049 break;
b8d26b3b
NB
3050 case ISTATE_SEND_STATUS:
3051 /*
3052 * Special case for sending non GOOD SCSI status from TX thread
3053 * context during pre se_cmd excecution failure.
3054 */
3055 ret = isert_put_response(conn, cmd);
3056 break;
3057 default:
24f412dd 3058 isert_err("Unknown response state: 0x%02x\n", state);
b8d26b3b
NB
3059 ret = -EINVAL;
3060 break;
3061 }
3062
3063 return ret;
3064}
3065
ca6c1d82
SG
3066struct rdma_cm_id *
3067isert_setup_id(struct isert_np *isert_np)
3068{
3069 struct iscsi_np *np = isert_np->np;
3070 struct rdma_cm_id *id;
3071 struct sockaddr *sa;
3072 int ret;
3073
3074 sa = (struct sockaddr *)&np->np_sockaddr;
24f412dd 3075 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
ca6c1d82
SG
3076
3077 id = rdma_create_id(isert_cma_handler, isert_np,
3078 RDMA_PS_TCP, IB_QPT_RC);
3079 if (IS_ERR(id)) {
24f412dd 3080 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
ca6c1d82
SG
3081 ret = PTR_ERR(id);
3082 goto out;
3083 }
24f412dd 3084 isert_dbg("id %p context %p\n", id, id->context);
ca6c1d82
SG
3085
3086 ret = rdma_bind_addr(id, sa);
3087 if (ret) {
24f412dd 3088 isert_err("rdma_bind_addr() failed: %d\n", ret);
ca6c1d82
SG
3089 goto out_id;
3090 }
3091
992607e8 3092 ret = rdma_listen(id, 0);
ca6c1d82 3093 if (ret) {
24f412dd 3094 isert_err("rdma_listen() failed: %d\n", ret);
ca6c1d82
SG
3095 goto out_id;
3096 }
3097
3098 return id;
3099out_id:
3100 rdma_destroy_id(id);
3101out:
3102 return ERR_PTR(ret);
3103}
3104
b8d26b3b
NB
3105static int
3106isert_setup_np(struct iscsi_np *np,
3107 struct __kernel_sockaddr_storage *ksockaddr)
3108{
3109 struct isert_np *isert_np;
3110 struct rdma_cm_id *isert_lid;
b8d26b3b
NB
3111 int ret;
3112
3113 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
3114 if (!isert_np) {
24f412dd 3115 isert_err("Unable to allocate struct isert_np\n");
b8d26b3b
NB
3116 return -ENOMEM;
3117 }
531b7bf4 3118 sema_init(&isert_np->np_sem, 0);
b8d26b3b
NB
3119 mutex_init(&isert_np->np_accept_mutex);
3120 INIT_LIST_HEAD(&isert_np->np_accept_list);
3121 init_completion(&isert_np->np_login_comp);
ca6c1d82 3122 isert_np->np = np;
b8d26b3b 3123
b8d26b3b
NB
3124 /*
3125 * Setup the np->np_sockaddr from the passed sockaddr setup
3126 * in iscsi_target_configfs.c code..
3127 */
3128 memcpy(&np->np_sockaddr, ksockaddr,
3129 sizeof(struct __kernel_sockaddr_storage));
3130
ca6c1d82 3131 isert_lid = isert_setup_id(isert_np);
b8d26b3b 3132 if (IS_ERR(isert_lid)) {
b8d26b3b
NB
3133 ret = PTR_ERR(isert_lid);
3134 goto out;
3135 }
3136
b8d26b3b
NB
3137 isert_np->np_cm_id = isert_lid;
3138 np->np_context = isert_np;
b8d26b3b
NB
3139
3140 return 0;
3141
b8d26b3b
NB
3142out:
3143 kfree(isert_np);
ca6c1d82 3144
b8d26b3b
NB
3145 return ret;
3146}
3147
b8d26b3b
NB
3148static int
3149isert_rdma_accept(struct isert_conn *isert_conn)
3150{
dac6ab30 3151 struct rdma_cm_id *cm_id = isert_conn->cm_id;
b8d26b3b
NB
3152 struct rdma_conn_param cp;
3153 int ret;
3154
3155 memset(&cp, 0, sizeof(struct rdma_conn_param));
b8d26b3b
NB
3156 cp.initiator_depth = isert_conn->initiator_depth;
3157 cp.retry_count = 7;
3158 cp.rnr_retry_count = 7;
3159
b8d26b3b
NB
3160 ret = rdma_accept(cm_id, &cp);
3161 if (ret) {
24f412dd 3162 isert_err("rdma_accept() failed with: %d\n", ret);
b8d26b3b
NB
3163 return ret;
3164 }
3165
b8d26b3b
NB
3166 return 0;
3167}
3168
3169static int
3170isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3171{
6700425e 3172 struct isert_conn *isert_conn = conn->context;
b8d26b3b
NB
3173 int ret;
3174
24f412dd 3175 isert_info("before login_req comp conn: %p\n", isert_conn);
2371e5da
SG
3176 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
3177 if (ret) {
24f412dd 3178 isert_err("isert_conn %p interrupted before got login req\n",
2371e5da
SG
3179 isert_conn);
3180 return ret;
3181 }
3182 reinit_completion(&isert_conn->login_req_comp);
3183
6faaa85f
NB
3184 /*
3185 * For login requests after the first PDU, isert_rx_login_req() will
3186 * kick schedule_delayed_work(&conn->login_work) as the packet is
3187 * received, which turns this callback from iscsi_target_do_login_rx()
3188 * into a NOP.
3189 */
3190 if (!login->first_request)
3191 return 0;
b8d26b3b 3192
2371e5da
SG
3193 isert_rx_login_req(isert_conn);
3194
dac6ab30
SG
3195 isert_info("before login_comp conn: %p\n", conn);
3196 ret = wait_for_completion_interruptible(&isert_conn->login_comp);
b8d26b3b
NB
3197 if (ret)
3198 return ret;
3199
24f412dd 3200 isert_info("processing login->req: %p\n", login->req);
2371e5da 3201
b8d26b3b
NB
3202 return 0;
3203}
3204
3205static void
3206isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
3207 struct isert_conn *isert_conn)
3208{
dac6ab30 3209 struct rdma_cm_id *cm_id = isert_conn->cm_id;
b8d26b3b
NB
3210 struct rdma_route *cm_route = &cm_id->route;
3211 struct sockaddr_in *sock_in;
3212 struct sockaddr_in6 *sock_in6;
3213
3214 conn->login_family = np->np_sockaddr.ss_family;
3215
3216 if (np->np_sockaddr.ss_family == AF_INET6) {
3217 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
3218 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
3219 &sock_in6->sin6_addr.in6_u);
3220 conn->login_port = ntohs(sock_in6->sin6_port);
3221
3222 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
3223 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
3224 &sock_in6->sin6_addr.in6_u);
3225 conn->local_port = ntohs(sock_in6->sin6_port);
3226 } else {
3227 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
3228 sprintf(conn->login_ip, "%pI4",
3229 &sock_in->sin_addr.s_addr);
3230 conn->login_port = ntohs(sock_in->sin_port);
3231
3232 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
3233 sprintf(conn->local_ip, "%pI4",
3234 &sock_in->sin_addr.s_addr);
3235 conn->local_port = ntohs(sock_in->sin_port);
3236 }
3237}
3238
3239static int
3240isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3241{
6700425e 3242 struct isert_np *isert_np = np->np_context;
b8d26b3b 3243 struct isert_conn *isert_conn;
c6b8e918 3244 int ret;
b8d26b3b
NB
3245
3246accept_wait:
531b7bf4 3247 ret = down_interruptible(&isert_np->np_sem);
c6b8e918 3248 if (ret)
b8d26b3b
NB
3249 return -ENODEV;
3250
3251 spin_lock_bh(&np->np_thread_lock);
e346ab34 3252 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
b8d26b3b 3253 spin_unlock_bh(&np->np_thread_lock);
11378cdb 3254 isert_dbg("np_thread_state %d\n",
e346ab34
SG
3255 np->np_thread_state);
3256 /**
3257 * No point in stalling here when np_thread
3258 * is in state RESET/SHUTDOWN/EXIT - bail
3259 **/
b8d26b3b
NB
3260 return -ENODEV;
3261 }
3262 spin_unlock_bh(&np->np_thread_lock);
3263
3264 mutex_lock(&isert_np->np_accept_mutex);
3265 if (list_empty(&isert_np->np_accept_list)) {
3266 mutex_unlock(&isert_np->np_accept_mutex);
b8d26b3b
NB
3267 goto accept_wait;
3268 }
3269 isert_conn = list_first_entry(&isert_np->np_accept_list,
dac6ab30
SG
3270 struct isert_conn, accept_node);
3271 list_del_init(&isert_conn->accept_node);
b8d26b3b
NB
3272 mutex_unlock(&isert_np->np_accept_mutex);
3273
3274 conn->context = isert_conn;
3275 isert_conn->conn = conn;
b8d26b3b 3276
b8d26b3b
NB
3277 isert_set_conn_info(np, conn, isert_conn);
3278
24f412dd 3279 isert_dbg("Processing isert_conn: %p\n", isert_conn);
2371e5da 3280
b8d26b3b
NB
3281 return 0;
3282}
3283
3284static void
3285isert_free_np(struct iscsi_np *np)
3286{
6700425e 3287 struct isert_np *isert_np = np->np_context;
268e6811 3288 struct isert_conn *isert_conn, *n;
b8d26b3b 3289
3b726ae2
SG
3290 if (isert_np->np_cm_id)
3291 rdma_destroy_id(isert_np->np_cm_id);
b8d26b3b 3292
268e6811
SG
3293 /*
3294 * FIXME: At this point we don't have a good way to insure
3295 * that at this point we don't have hanging connections that
3296 * completed RDMA establishment but didn't start iscsi login
3297 * process. So work-around this by cleaning up what ever piled
3298 * up in np_accept_list.
3299 */
3300 mutex_lock(&isert_np->np_accept_mutex);
3301 if (!list_empty(&isert_np->np_accept_list)) {
24f412dd 3302 isert_info("Still have isert connections, cleaning up...\n");
268e6811
SG
3303 list_for_each_entry_safe(isert_conn, n,
3304 &isert_np->np_accept_list,
dac6ab30 3305 accept_node) {
24f412dd 3306 isert_info("cleaning isert_conn %p state (%d)\n",
268e6811
SG
3307 isert_conn, isert_conn->state);
3308 isert_connect_release(isert_conn);
3309 }
3310 }
3311 mutex_unlock(&isert_np->np_accept_mutex);
3312
b8d26b3b
NB
3313 np->np_context = NULL;
3314 kfree(isert_np);
3315}
3316
b02efbfc
SG
3317static void isert_release_work(struct work_struct *work)
3318{
3319 struct isert_conn *isert_conn = container_of(work,
3320 struct isert_conn,
3321 release_work);
3322
24f412dd 3323 isert_info("Starting release conn %p\n", isert_conn);
b02efbfc 3324
dac6ab30 3325 wait_for_completion(&isert_conn->wait);
b02efbfc 3326
dac6ab30 3327 mutex_lock(&isert_conn->mutex);
b02efbfc 3328 isert_conn->state = ISER_CONN_DOWN;
dac6ab30 3329 mutex_unlock(&isert_conn->mutex);
b02efbfc 3330
24f412dd 3331 isert_info("Destroying conn %p\n", isert_conn);
b02efbfc
SG
3332 isert_put_conn(isert_conn);
3333}
3334
991bb764
SG
3335static void
3336isert_wait4logout(struct isert_conn *isert_conn)
3337{
3338 struct iscsi_conn *conn = isert_conn->conn;
3339
4c22e07f
SG
3340 isert_info("conn %p\n", isert_conn);
3341
991bb764 3342 if (isert_conn->logout_posted) {
24f412dd 3343 isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
991bb764
SG
3344 wait_for_completion_timeout(&conn->conn_logout_comp,
3345 SECONDS_FOR_LOGOUT_COMP * HZ);
3346 }
3347}
3348
c7e160ee
SG
3349static void
3350isert_wait4cmds(struct iscsi_conn *conn)
3351{
4c22e07f
SG
3352 isert_info("iscsi_conn %p\n", conn);
3353
c7e160ee
SG
3354 if (conn->sess) {
3355 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
3356 target_wait_for_sess_cmds(conn->sess->se_sess);
3357 }
3358}
3359
bdf20e72
SG
3360static void
3361isert_wait4flush(struct isert_conn *isert_conn)
3362{
3363 struct ib_recv_wr *bad_wr;
3364
4c22e07f
SG
3365 isert_info("conn %p\n", isert_conn);
3366
dac6ab30 3367 init_completion(&isert_conn->wait_comp_err);
bdf20e72
SG
3368 isert_conn->beacon.wr_id = ISER_BEACON_WRID;
3369 /* post an indication that all flush errors were consumed */
dac6ab30 3370 if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
24f412dd 3371 isert_err("conn %p failed to post beacon", isert_conn);
bdf20e72
SG
3372 return;
3373 }
3374
dac6ab30 3375 wait_for_completion(&isert_conn->wait_comp_err);
bdf20e72
SG
3376}
3377
defd8848 3378static void isert_wait_conn(struct iscsi_conn *conn)
b8d26b3b
NB
3379{
3380 struct isert_conn *isert_conn = conn->context;
3381
4c22e07f 3382 isert_info("Starting conn %p\n", isert_conn);
b8d26b3b 3383
dac6ab30 3384 mutex_lock(&isert_conn->mutex);
b8d26b3b 3385 /*
dac6ab30 3386 * Only wait for wait_comp_err if the isert_conn made it
b8d26b3b
NB
3387 * into full feature phase..
3388 */
b2cb9649 3389 if (isert_conn->state == ISER_CONN_INIT) {
dac6ab30 3390 mutex_unlock(&isert_conn->mutex);
b2cb9649 3391 return;
b8d26b3b 3392 }
954f2372 3393 isert_conn_terminate(isert_conn);
dac6ab30 3394 mutex_unlock(&isert_conn->mutex);
b8d26b3b 3395
c7e160ee 3396 isert_wait4cmds(conn);
bdf20e72 3397 isert_wait4flush(isert_conn);
991bb764 3398 isert_wait4logout(isert_conn);
954f2372 3399
b02efbfc 3400 queue_work(isert_release_wq, &isert_conn->release_work);
defd8848
NB
3401}
3402
3403static void isert_free_conn(struct iscsi_conn *conn)
3404{
3405 struct isert_conn *isert_conn = conn->context;
b8d26b3b 3406
524630d5 3407 isert_wait4flush(isert_conn);
b8d26b3b
NB
3408 isert_put_conn(isert_conn);
3409}
3410
3411static struct iscsit_transport iser_target_transport = {
3412 .name = "IB/iSER",
3413 .transport_type = ISCSI_INFINIBAND,
d703ce2f 3414 .priv_size = sizeof(struct isert_cmd),
b8d26b3b
NB
3415 .owner = THIS_MODULE,
3416 .iscsit_setup_np = isert_setup_np,
3417 .iscsit_accept_np = isert_accept_np,
3418 .iscsit_free_np = isert_free_np,
defd8848 3419 .iscsit_wait_conn = isert_wait_conn,
b8d26b3b 3420 .iscsit_free_conn = isert_free_conn,
b8d26b3b
NB
3421 .iscsit_get_login_rx = isert_get_login_rx,
3422 .iscsit_put_login_tx = isert_put_login_tx,
3423 .iscsit_immediate_queue = isert_immediate_queue,
3424 .iscsit_response_queue = isert_response_queue,
3425 .iscsit_get_dataout = isert_get_dataout,
3426 .iscsit_queue_data_in = isert_put_datain,
3427 .iscsit_queue_status = isert_put_response,
131e6abc 3428 .iscsit_aborted_task = isert_aborted_task,
e70beee7 3429 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
b8d26b3b
NB
3430};
3431
3432static int __init isert_init(void)
3433{
3434 int ret;
3435
631af550
SG
3436 isert_comp_wq = alloc_workqueue("isert_comp_wq",
3437 WQ_UNBOUND | WQ_HIGHPRI, 0);
b8d26b3b 3438 if (!isert_comp_wq) {
24f412dd 3439 isert_err("Unable to allocate isert_comp_wq\n");
b8d26b3b 3440 ret = -ENOMEM;
6f0fae3d 3441 return -ENOMEM;
b8d26b3b
NB
3442 }
3443
b02efbfc
SG
3444 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
3445 WQ_UNBOUND_MAX_ACTIVE);
3446 if (!isert_release_wq) {
24f412dd 3447 isert_err("Unable to allocate isert_release_wq\n");
b02efbfc
SG
3448 ret = -ENOMEM;
3449 goto destroy_comp_wq;
3450 }
3451
b8d26b3b 3452 iscsit_register_transport(&iser_target_transport);
24f412dd 3453 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
b02efbfc 3454
b8d26b3b
NB
3455 return 0;
3456
b02efbfc
SG
3457destroy_comp_wq:
3458 destroy_workqueue(isert_comp_wq);
6f0fae3d 3459
b8d26b3b
NB
3460 return ret;
3461}
3462
3463static void __exit isert_exit(void)
3464{
f5ebec96 3465 flush_scheduled_work();
b02efbfc 3466 destroy_workqueue(isert_release_wq);
b8d26b3b 3467 destroy_workqueue(isert_comp_wq);
b8d26b3b 3468 iscsit_unregister_transport(&iser_target_transport);
4c22e07f 3469 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
b8d26b3b
NB
3470}
3471
3472MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
9e35eff4 3473MODULE_VERSION("1.0");
b8d26b3b
NB
3474MODULE_AUTHOR("nab@Linux-iSCSI.org");
3475MODULE_LICENSE("GPL");
3476
3477module_init(isert_init);
3478module_exit(isert_exit);
This page took 0.324502 seconds and 5 git commands to generate.