Merge tag 'f2fs-for-3.8-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk...
[deliverable/linux.git] / drivers / scsi / bnx2fc / bnx2fc_els.c
1 /*
2 * bnx2fc_els.c: Broadcom NetXtreme II Linux FCoE offload driver.
3 * This file contains helper routines that handle ELS requests
4 * and responses.
5 *
6 * Copyright (c) 2008 - 2011 Broadcom Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 *
12 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
13 */
14
15 #include "bnx2fc.h"
16
17 static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
18 void *arg);
19 static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
20 void *arg);
21 static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
22 void *data, u32 data_len,
23 void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
24 struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
25
26 static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
27 {
28 struct bnx2fc_cmd *orig_io_req;
29 struct bnx2fc_cmd *rrq_req;
30 int rc = 0;
31
32 BUG_ON(!cb_arg);
33 rrq_req = cb_arg->io_req;
34 orig_io_req = cb_arg->aborted_io_req;
35 BUG_ON(!orig_io_req);
36 BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
37 orig_io_req->xid, rrq_req->xid);
38
39 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
40
41 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
42 /*
43 * els req is timed out. cleanup the IO with FW and
44 * drop the completion. Remove from active_cmd_queue.
45 */
46 BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
47 rrq_req->xid);
48
49 if (rrq_req->on_active_queue) {
50 list_del_init(&rrq_req->link);
51 rrq_req->on_active_queue = 0;
52 rc = bnx2fc_initiate_cleanup(rrq_req);
53 BUG_ON(rc);
54 }
55 }
56 kfree(cb_arg);
57 }
58 int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
59 {
60
61 struct fc_els_rrq rrq;
62 struct bnx2fc_rport *tgt = aborted_io_req->tgt;
63 struct fc_lport *lport = tgt->rdata->local_port;
64 struct bnx2fc_els_cb_arg *cb_arg = NULL;
65 u32 sid = tgt->sid;
66 u32 r_a_tov = lport->r_a_tov;
67 unsigned long start = jiffies;
68 int rc;
69
70 BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
71 aborted_io_req->xid);
72 memset(&rrq, 0, sizeof(rrq));
73
74 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
75 if (!cb_arg) {
76 printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
77 rc = -ENOMEM;
78 goto rrq_err;
79 }
80
81 cb_arg->aborted_io_req = aborted_io_req;
82
83 rrq.rrq_cmd = ELS_RRQ;
84 hton24(rrq.rrq_s_id, sid);
85 rrq.rrq_ox_id = htons(aborted_io_req->xid);
86 rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id);
87
88 retry_rrq:
89 rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
90 bnx2fc_rrq_compl, cb_arg,
91 r_a_tov);
92 if (rc == -ENOMEM) {
93 if (time_after(jiffies, start + (10 * HZ))) {
94 BNX2FC_ELS_DBG("rrq Failed\n");
95 rc = FAILED;
96 goto rrq_err;
97 }
98 msleep(20);
99 goto retry_rrq;
100 }
101 rrq_err:
102 if (rc) {
103 BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
104 aborted_io_req->xid);
105 kfree(cb_arg);
106 spin_lock_bh(&tgt->tgt_lock);
107 kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
108 spin_unlock_bh(&tgt->tgt_lock);
109 }
110 return rc;
111 }
112
113 static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
114 {
115 struct bnx2fc_cmd *els_req;
116 struct bnx2fc_rport *tgt;
117 struct bnx2fc_mp_req *mp_req;
118 struct fc_frame_header *fc_hdr;
119 unsigned char *buf;
120 void *resp_buf;
121 u32 resp_len, hdr_len;
122 u16 l2_oxid;
123 int frame_len;
124 int rc = 0;
125
126 l2_oxid = cb_arg->l2_oxid;
127 BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
128
129 els_req = cb_arg->io_req;
130 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
131 /*
132 * els req is timed out. cleanup the IO with FW and
133 * drop the completion. libfc will handle the els timeout
134 */
135 if (els_req->on_active_queue) {
136 list_del_init(&els_req->link);
137 els_req->on_active_queue = 0;
138 rc = bnx2fc_initiate_cleanup(els_req);
139 BUG_ON(rc);
140 }
141 goto free_arg;
142 }
143
144 tgt = els_req->tgt;
145 mp_req = &(els_req->mp_req);
146 fc_hdr = &(mp_req->resp_fc_hdr);
147 resp_len = mp_req->resp_len;
148 resp_buf = mp_req->resp_buf;
149
150 buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
151 if (!buf) {
152 printk(KERN_ERR PFX "Unable to alloc mp buf\n");
153 goto free_arg;
154 }
155 hdr_len = sizeof(*fc_hdr);
156 if (hdr_len + resp_len > PAGE_SIZE) {
157 printk(KERN_ERR PFX "l2_els_compl: resp len is "
158 "beyond page size\n");
159 goto free_buf;
160 }
161 memcpy(buf, fc_hdr, hdr_len);
162 memcpy(buf + hdr_len, resp_buf, resp_len);
163 frame_len = hdr_len + resp_len;
164
165 bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
166
167 free_buf:
168 kfree(buf);
169 free_arg:
170 kfree(cb_arg);
171 }
172
173 int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
174 {
175 struct fc_els_adisc *adisc;
176 struct fc_frame_header *fh;
177 struct bnx2fc_els_cb_arg *cb_arg;
178 struct fc_lport *lport = tgt->rdata->local_port;
179 u32 r_a_tov = lport->r_a_tov;
180 int rc;
181
182 fh = fc_frame_header_get(fp);
183 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
184 if (!cb_arg) {
185 printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
186 return -ENOMEM;
187 }
188
189 cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
190
191 BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
192 adisc = fc_frame_payload_get(fp, sizeof(*adisc));
193 /* adisc is initialized by libfc */
194 rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
195 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
196 if (rc)
197 kfree(cb_arg);
198 return rc;
199 }
200
201 int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
202 {
203 struct fc_els_logo *logo;
204 struct fc_frame_header *fh;
205 struct bnx2fc_els_cb_arg *cb_arg;
206 struct fc_lport *lport = tgt->rdata->local_port;
207 u32 r_a_tov = lport->r_a_tov;
208 int rc;
209
210 fh = fc_frame_header_get(fp);
211 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
212 if (!cb_arg) {
213 printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
214 return -ENOMEM;
215 }
216
217 cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
218
219 BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
220 logo = fc_frame_payload_get(fp, sizeof(*logo));
221 /* logo is initialized by libfc */
222 rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
223 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
224 if (rc)
225 kfree(cb_arg);
226 return rc;
227 }
228
229 int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
230 {
231 struct fc_els_rls *rls;
232 struct fc_frame_header *fh;
233 struct bnx2fc_els_cb_arg *cb_arg;
234 struct fc_lport *lport = tgt->rdata->local_port;
235 u32 r_a_tov = lport->r_a_tov;
236 int rc;
237
238 fh = fc_frame_header_get(fp);
239 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
240 if (!cb_arg) {
241 printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
242 return -ENOMEM;
243 }
244
245 cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
246
247 rls = fc_frame_payload_get(fp, sizeof(*rls));
248 /* rls is initialized by libfc */
249 rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
250 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
251 if (rc)
252 kfree(cb_arg);
253 return rc;
254 }
255
256 void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
257 {
258 struct bnx2fc_mp_req *mp_req;
259 struct fc_frame_header *fc_hdr, *fh;
260 struct bnx2fc_cmd *srr_req;
261 struct bnx2fc_cmd *orig_io_req;
262 struct fc_frame *fp;
263 unsigned char *buf;
264 void *resp_buf;
265 u32 resp_len, hdr_len;
266 u8 opcode;
267 int rc = 0;
268
269 orig_io_req = cb_arg->aborted_io_req;
270 srr_req = cb_arg->io_req;
271 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
272 /* SRR timedout */
273 BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
274 "orig_io - 0x%x\n",
275 orig_io_req->xid);
276 rc = bnx2fc_initiate_abts(srr_req);
277 if (rc != SUCCESS) {
278 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
279 "failed. issue cleanup\n");
280 bnx2fc_initiate_cleanup(srr_req);
281 }
282 if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
283 test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
284 BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx",
285 orig_io_req->xid, orig_io_req->req_flags);
286 goto srr_compl_done;
287 }
288 orig_io_req->srr_retry++;
289 if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
290 struct bnx2fc_rport *tgt = orig_io_req->tgt;
291 spin_unlock_bh(&tgt->tgt_lock);
292 rc = bnx2fc_send_srr(orig_io_req,
293 orig_io_req->srr_offset,
294 orig_io_req->srr_rctl);
295 spin_lock_bh(&tgt->tgt_lock);
296 if (!rc)
297 goto srr_compl_done;
298 }
299
300 rc = bnx2fc_initiate_abts(orig_io_req);
301 if (rc != SUCCESS) {
302 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
303 "failed xid = 0x%x. issue cleanup\n",
304 orig_io_req->xid);
305 bnx2fc_initiate_cleanup(orig_io_req);
306 }
307 goto srr_compl_done;
308 }
309 if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
310 test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
311 BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx",
312 orig_io_req->xid, orig_io_req->req_flags);
313 goto srr_compl_done;
314 }
315 mp_req = &(srr_req->mp_req);
316 fc_hdr = &(mp_req->resp_fc_hdr);
317 resp_len = mp_req->resp_len;
318 resp_buf = mp_req->resp_buf;
319
320 hdr_len = sizeof(*fc_hdr);
321 buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
322 if (!buf) {
323 printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
324 goto srr_compl_done;
325 }
326 memcpy(buf, fc_hdr, hdr_len);
327 memcpy(buf + hdr_len, resp_buf, resp_len);
328
329 fp = fc_frame_alloc(NULL, resp_len);
330 if (!fp) {
331 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
332 goto free_buf;
333 }
334
335 fh = (struct fc_frame_header *) fc_frame_header_get(fp);
336 /* Copy FC Frame header and payload into the frame */
337 memcpy(fh, buf, hdr_len + resp_len);
338
339 opcode = fc_frame_payload_op(fp);
340 switch (opcode) {
341 case ELS_LS_ACC:
342 BNX2FC_IO_DBG(srr_req, "SRR success\n");
343 break;
344 case ELS_LS_RJT:
345 BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
346 rc = bnx2fc_initiate_abts(orig_io_req);
347 if (rc != SUCCESS) {
348 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
349 "failed xid = 0x%x. issue cleanup\n",
350 orig_io_req->xid);
351 bnx2fc_initiate_cleanup(orig_io_req);
352 }
353 break;
354 default:
355 BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
356 opcode);
357 break;
358 }
359 fc_frame_free(fp);
360 free_buf:
361 kfree(buf);
362 srr_compl_done:
363 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
364 }
365
366 void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
367 {
368 struct bnx2fc_cmd *orig_io_req, *new_io_req;
369 struct bnx2fc_cmd *rec_req;
370 struct bnx2fc_mp_req *mp_req;
371 struct fc_frame_header *fc_hdr, *fh;
372 struct fc_els_ls_rjt *rjt;
373 struct fc_els_rec_acc *acc;
374 struct bnx2fc_rport *tgt;
375 struct fcoe_err_report_entry *err_entry;
376 struct scsi_cmnd *sc_cmd;
377 enum fc_rctl r_ctl;
378 unsigned char *buf;
379 void *resp_buf;
380 struct fc_frame *fp;
381 u8 opcode;
382 u32 offset;
383 u32 e_stat;
384 u32 resp_len, hdr_len;
385 int rc = 0;
386 bool send_seq_clnp = false;
387 bool abort_io = false;
388
389 BNX2FC_MISC_DBG("Entered rec_compl callback\n");
390 rec_req = cb_arg->io_req;
391 orig_io_req = cb_arg->aborted_io_req;
392 BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
393 tgt = orig_io_req->tgt;
394
395 /* Handle REC timeout case */
396 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
397 BNX2FC_IO_DBG(rec_req, "timed out, abort "
398 "orig_io - 0x%x\n",
399 orig_io_req->xid);
400 /* els req is timed out. send abts for els */
401 rc = bnx2fc_initiate_abts(rec_req);
402 if (rc != SUCCESS) {
403 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
404 "failed. issue cleanup\n");
405 bnx2fc_initiate_cleanup(rec_req);
406 }
407 orig_io_req->rec_retry++;
408 /* REC timedout. send ABTS to the orig IO req */
409 if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
410 spin_unlock_bh(&tgt->tgt_lock);
411 rc = bnx2fc_send_rec(orig_io_req);
412 spin_lock_bh(&tgt->tgt_lock);
413 if (!rc)
414 goto rec_compl_done;
415 }
416 rc = bnx2fc_initiate_abts(orig_io_req);
417 if (rc != SUCCESS) {
418 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
419 "failed xid = 0x%x. issue cleanup\n",
420 orig_io_req->xid);
421 bnx2fc_initiate_cleanup(orig_io_req);
422 }
423 goto rec_compl_done;
424 }
425
426 if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
427 BNX2FC_IO_DBG(rec_req, "completed"
428 "orig_io - 0x%x\n",
429 orig_io_req->xid);
430 goto rec_compl_done;
431 }
432 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
433 BNX2FC_IO_DBG(rec_req, "abts in prog "
434 "orig_io - 0x%x\n",
435 orig_io_req->xid);
436 goto rec_compl_done;
437 }
438
439 mp_req = &(rec_req->mp_req);
440 fc_hdr = &(mp_req->resp_fc_hdr);
441 resp_len = mp_req->resp_len;
442 acc = resp_buf = mp_req->resp_buf;
443
444 hdr_len = sizeof(*fc_hdr);
445
446 buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
447 if (!buf) {
448 printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
449 goto rec_compl_done;
450 }
451 memcpy(buf, fc_hdr, hdr_len);
452 memcpy(buf + hdr_len, resp_buf, resp_len);
453
454 fp = fc_frame_alloc(NULL, resp_len);
455 if (!fp) {
456 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
457 goto free_buf;
458 }
459
460 fh = (struct fc_frame_header *) fc_frame_header_get(fp);
461 /* Copy FC Frame header and payload into the frame */
462 memcpy(fh, buf, hdr_len + resp_len);
463
464 opcode = fc_frame_payload_op(fp);
465 if (opcode == ELS_LS_RJT) {
466 BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
467 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
468 if ((rjt->er_reason == ELS_RJT_LOGIC ||
469 rjt->er_reason == ELS_RJT_UNAB) &&
470 rjt->er_explan == ELS_EXPL_OXID_RXID) {
471 BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
472 new_io_req = bnx2fc_cmd_alloc(tgt);
473 if (!new_io_req)
474 goto abort_io;
475 new_io_req->sc_cmd = orig_io_req->sc_cmd;
476 /* cleanup orig_io_req that is with the FW */
477 set_bit(BNX2FC_FLAG_CMD_LOST,
478 &orig_io_req->req_flags);
479 bnx2fc_initiate_cleanup(orig_io_req);
480 /* Post a new IO req with the same sc_cmd */
481 BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
482 spin_unlock_bh(&tgt->tgt_lock);
483 rc = bnx2fc_post_io_req(tgt, new_io_req);
484 spin_lock_bh(&tgt->tgt_lock);
485 if (!rc)
486 goto free_frame;
487 BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
488 }
489 abort_io:
490 rc = bnx2fc_initiate_abts(orig_io_req);
491 if (rc != SUCCESS) {
492 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
493 "failed. issue cleanup\n");
494 bnx2fc_initiate_cleanup(orig_io_req);
495 }
496 } else if (opcode == ELS_LS_ACC) {
497 /* REVISIT: Check if the exchange is already aborted */
498 offset = ntohl(acc->reca_fc4value);
499 e_stat = ntohl(acc->reca_e_stat);
500 if (e_stat & ESB_ST_SEQ_INIT) {
501 BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
502 goto free_frame;
503 }
504 BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
505 e_stat, offset);
506 /* Seq initiative is with us */
507 err_entry = (struct fcoe_err_report_entry *)
508 &orig_io_req->err_entry;
509 sc_cmd = orig_io_req->sc_cmd;
510 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
511 /* SCSI WRITE command */
512 if (offset == orig_io_req->data_xfer_len) {
513 BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
514 /* FCP_RSP lost */
515 r_ctl = FC_RCTL_DD_CMD_STATUS;
516 offset = 0;
517 } else {
518 /* start transmitting from offset */
519 BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
520 send_seq_clnp = true;
521 r_ctl = FC_RCTL_DD_DATA_DESC;
522 if (bnx2fc_initiate_seq_cleanup(orig_io_req,
523 offset, r_ctl))
524 abort_io = true;
525 /* XFER_RDY */
526 }
527 } else {
528 /* SCSI READ command */
529 if (err_entry->data.rx_buf_off ==
530 orig_io_req->data_xfer_len) {
531 /* FCP_RSP lost */
532 BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
533 r_ctl = FC_RCTL_DD_CMD_STATUS;
534 offset = 0;
535 } else {
536 /* request retransmission from this offset */
537 send_seq_clnp = true;
538 offset = err_entry->data.rx_buf_off;
539 BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
540 /* FCP_DATA lost */
541 r_ctl = FC_RCTL_DD_SOL_DATA;
542 if (bnx2fc_initiate_seq_cleanup(orig_io_req,
543 offset, r_ctl))
544 abort_io = true;
545 }
546 }
547 if (abort_io) {
548 rc = bnx2fc_initiate_abts(orig_io_req);
549 if (rc != SUCCESS) {
550 BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
551 " failed. issue cleanup\n");
552 bnx2fc_initiate_cleanup(orig_io_req);
553 }
554 } else if (!send_seq_clnp) {
555 BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
556 spin_unlock_bh(&tgt->tgt_lock);
557 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
558 spin_lock_bh(&tgt->tgt_lock);
559
560 if (rc) {
561 BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
562 " IO will abort\n");
563 }
564 }
565 }
566 free_frame:
567 fc_frame_free(fp);
568 free_buf:
569 kfree(buf);
570 rec_compl_done:
571 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
572 kfree(cb_arg);
573 }
574
575 int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
576 {
577 struct fc_els_rec rec;
578 struct bnx2fc_rport *tgt = orig_io_req->tgt;
579 struct fc_lport *lport = tgt->rdata->local_port;
580 struct bnx2fc_els_cb_arg *cb_arg = NULL;
581 u32 sid = tgt->sid;
582 u32 r_a_tov = lport->r_a_tov;
583 int rc;
584
585 BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
586 memset(&rec, 0, sizeof(rec));
587
588 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
589 if (!cb_arg) {
590 printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
591 rc = -ENOMEM;
592 goto rec_err;
593 }
594 kref_get(&orig_io_req->refcount);
595
596 cb_arg->aborted_io_req = orig_io_req;
597
598 rec.rec_cmd = ELS_REC;
599 hton24(rec.rec_s_id, sid);
600 rec.rec_ox_id = htons(orig_io_req->xid);
601 rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
602
603 rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
604 bnx2fc_rec_compl, cb_arg,
605 r_a_tov);
606 rec_err:
607 if (rc) {
608 BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
609 spin_lock_bh(&tgt->tgt_lock);
610 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
611 spin_unlock_bh(&tgt->tgt_lock);
612 kfree(cb_arg);
613 }
614 return rc;
615 }
616
617 int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
618 {
619 struct fcp_srr srr;
620 struct bnx2fc_rport *tgt = orig_io_req->tgt;
621 struct fc_lport *lport = tgt->rdata->local_port;
622 struct bnx2fc_els_cb_arg *cb_arg = NULL;
623 u32 r_a_tov = lport->r_a_tov;
624 int rc;
625
626 BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
627 memset(&srr, 0, sizeof(srr));
628
629 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
630 if (!cb_arg) {
631 printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
632 rc = -ENOMEM;
633 goto srr_err;
634 }
635 kref_get(&orig_io_req->refcount);
636
637 cb_arg->aborted_io_req = orig_io_req;
638
639 srr.srr_op = ELS_SRR;
640 srr.srr_ox_id = htons(orig_io_req->xid);
641 srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
642 srr.srr_rel_off = htonl(offset);
643 srr.srr_r_ctl = r_ctl;
644 orig_io_req->srr_offset = offset;
645 orig_io_req->srr_rctl = r_ctl;
646
647 rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
648 bnx2fc_srr_compl, cb_arg,
649 r_a_tov);
650 srr_err:
651 if (rc) {
652 BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
653 spin_lock_bh(&tgt->tgt_lock);
654 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
655 spin_unlock_bh(&tgt->tgt_lock);
656 kfree(cb_arg);
657 } else
658 set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
659
660 return rc;
661 }
662
663 static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
664 void *data, u32 data_len,
665 void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
666 struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
667 {
668 struct fcoe_port *port = tgt->port;
669 struct bnx2fc_interface *interface = port->priv;
670 struct fc_rport *rport = tgt->rport;
671 struct fc_lport *lport = port->lport;
672 struct bnx2fc_cmd *els_req;
673 struct bnx2fc_mp_req *mp_req;
674 struct fc_frame_header *fc_hdr;
675 struct fcoe_task_ctx_entry *task;
676 struct fcoe_task_ctx_entry *task_page;
677 int rc = 0;
678 int task_idx, index;
679 u32 did, sid;
680 u16 xid;
681
682 rc = fc_remote_port_chkready(rport);
683 if (rc) {
684 printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
685 rc = -EINVAL;
686 goto els_err;
687 }
688 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
689 printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
690 rc = -EINVAL;
691 goto els_err;
692 }
693 if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) ||
694 (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) {
695 printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
696 rc = -EINVAL;
697 goto els_err;
698 }
699 els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
700 if (!els_req) {
701 rc = -ENOMEM;
702 goto els_err;
703 }
704
705 els_req->sc_cmd = NULL;
706 els_req->port = port;
707 els_req->tgt = tgt;
708 els_req->cb_func = cb_func;
709 cb_arg->io_req = els_req;
710 els_req->cb_arg = cb_arg;
711
712 mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
713 rc = bnx2fc_init_mp_req(els_req);
714 if (rc == FAILED) {
715 printk(KERN_ERR PFX "ELS MP request init failed\n");
716 spin_lock_bh(&tgt->tgt_lock);
717 kref_put(&els_req->refcount, bnx2fc_cmd_release);
718 spin_unlock_bh(&tgt->tgt_lock);
719 rc = -ENOMEM;
720 goto els_err;
721 } else {
722 /* rc SUCCESS */
723 rc = 0;
724 }
725
726 /* Set the data_xfer_len to the size of ELS payload */
727 mp_req->req_len = data_len;
728 els_req->data_xfer_len = mp_req->req_len;
729
730 /* Fill ELS Payload */
731 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
732 memcpy(mp_req->req_buf, data, data_len);
733 } else {
734 printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
735 els_req->cb_func = NULL;
736 els_req->cb_arg = NULL;
737 spin_lock_bh(&tgt->tgt_lock);
738 kref_put(&els_req->refcount, bnx2fc_cmd_release);
739 spin_unlock_bh(&tgt->tgt_lock);
740 rc = -EINVAL;
741 }
742
743 if (rc)
744 goto els_err;
745
746 /* Fill FC header */
747 fc_hdr = &(mp_req->req_fc_hdr);
748
749 did = tgt->rport->port_id;
750 sid = tgt->sid;
751
752 if (op == ELS_SRR)
753 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
754 FC_TYPE_FCP, FC_FC_FIRST_SEQ |
755 FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
756 else
757 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
758 FC_TYPE_ELS, FC_FC_FIRST_SEQ |
759 FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
760
761 /* Obtain exchange id */
762 xid = els_req->xid;
763 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
764 index = xid % BNX2FC_TASKS_PER_PAGE;
765
766 /* Initialize task context for this IO request */
767 task_page = (struct fcoe_task_ctx_entry *)
768 interface->hba->task_ctx[task_idx];
769 task = &(task_page[index]);
770 bnx2fc_init_mp_task(els_req, task);
771
772 spin_lock_bh(&tgt->tgt_lock);
773
774 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
775 printk(KERN_ERR PFX "initiate_els.. session not ready\n");
776 els_req->cb_func = NULL;
777 els_req->cb_arg = NULL;
778 kref_put(&els_req->refcount, bnx2fc_cmd_release);
779 spin_unlock_bh(&tgt->tgt_lock);
780 return -EINVAL;
781 }
782
783 if (timer_msec)
784 bnx2fc_cmd_timer_set(els_req, timer_msec);
785 bnx2fc_add_2_sq(tgt, xid);
786
787 els_req->on_active_queue = 1;
788 list_add_tail(&els_req->link, &tgt->els_queue);
789
790 /* Ring doorbell */
791 bnx2fc_ring_doorbell(tgt);
792 spin_unlock_bh(&tgt->tgt_lock);
793
794 els_err:
795 return rc;
796 }
797
798 void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
799 struct fcoe_task_ctx_entry *task, u8 num_rq)
800 {
801 struct bnx2fc_mp_req *mp_req;
802 struct fc_frame_header *fc_hdr;
803 u64 *hdr;
804 u64 *temp_hdr;
805
806 BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
807 "cmd_type = %d\n", els_req->xid, els_req->cmd_type);
808
809 if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
810 &els_req->req_flags)) {
811 BNX2FC_ELS_DBG("Timer context finished processing this "
812 "els - 0x%x\n", els_req->xid);
813 /* This IO doesn't receive cleanup completion */
814 kref_put(&els_req->refcount, bnx2fc_cmd_release);
815 return;
816 }
817
818 /* Cancel the timeout_work, as we received the response */
819 if (cancel_delayed_work(&els_req->timeout_work))
820 kref_put(&els_req->refcount,
821 bnx2fc_cmd_release); /* drop timer hold */
822
823 if (els_req->on_active_queue) {
824 list_del_init(&els_req->link);
825 els_req->on_active_queue = 0;
826 }
827
828 mp_req = &(els_req->mp_req);
829 fc_hdr = &(mp_req->resp_fc_hdr);
830
831 hdr = (u64 *)fc_hdr;
832 temp_hdr = (u64 *)
833 &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
834 hdr[0] = cpu_to_be64(temp_hdr[0]);
835 hdr[1] = cpu_to_be64(temp_hdr[1]);
836 hdr[2] = cpu_to_be64(temp_hdr[2]);
837
838 mp_req->resp_len =
839 task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
840
841 /* Parse ELS response */
842 if ((els_req->cb_func) && (els_req->cb_arg)) {
843 els_req->cb_func(els_req->cb_arg);
844 els_req->cb_arg = NULL;
845 }
846
847 kref_put(&els_req->refcount, bnx2fc_cmd_release);
848 }
849
850 static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
851 void *arg)
852 {
853 struct fcoe_ctlr *fip = arg;
854 struct fc_exch *exch = fc_seq_exch(seq);
855 struct fc_lport *lport = exch->lp;
856 u8 *mac;
857 u8 op;
858
859 if (IS_ERR(fp))
860 goto done;
861
862 mac = fr_cb(fp)->granted_mac;
863 if (is_zero_ether_addr(mac)) {
864 op = fc_frame_payload_op(fp);
865 if (lport->vport) {
866 if (op == ELS_LS_RJT) {
867 printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
868 fc_vport_terminate(lport->vport);
869 fc_frame_free(fp);
870 return;
871 }
872 }
873 fcoe_ctlr_recv_flogi(fip, lport, fp);
874 }
875 if (!is_zero_ether_addr(mac))
876 fip->update_mac(lport, mac);
877 done:
878 fc_lport_flogi_resp(seq, fp, lport);
879 }
880
881 static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
882 void *arg)
883 {
884 struct fcoe_ctlr *fip = arg;
885 struct fc_exch *exch = fc_seq_exch(seq);
886 struct fc_lport *lport = exch->lp;
887 static u8 zero_mac[ETH_ALEN] = { 0 };
888
889 if (!IS_ERR(fp))
890 fip->update_mac(lport, zero_mac);
891 fc_lport_logo_resp(seq, fp, lport);
892 }
893
894 struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
895 struct fc_frame *fp, unsigned int op,
896 void (*resp)(struct fc_seq *,
897 struct fc_frame *,
898 void *),
899 void *arg, u32 timeout)
900 {
901 struct fcoe_port *port = lport_priv(lport);
902 struct bnx2fc_interface *interface = port->priv;
903 struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
904 struct fc_frame_header *fh = fc_frame_header_get(fp);
905
906 switch (op) {
907 case ELS_FLOGI:
908 case ELS_FDISC:
909 return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
910 fip, timeout);
911 case ELS_LOGO:
912 /* only hook onto fabric logouts, not port logouts */
913 if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
914 break;
915 return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
916 fip, timeout);
917 }
918 return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
919 }
This page took 0.05031 seconds and 5 git commands to generate.