[SCSI] lpfc 8.3.13: Misc fixes
[deliverable/linux.git] / drivers / scsi / lpfc / lpfc_bsg.c
CommitLineData
f1c3b0fc
JS
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4fede78f 4 * Copyright (C) 2009-2010 Emulex. All rights reserved. *
f1c3b0fc
JS
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21#include <linux/interrupt.h>
22#include <linux/mempool.h>
23#include <linux/pci.h>
5a0e3ad6 24#include <linux/slab.h>
277e76f1 25#include <linux/delay.h>
f1c3b0fc
JS
26
27#include <scsi/scsi.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport_fc.h>
30#include <scsi/scsi_bsg_fc.h>
6a9c52cf 31#include <scsi/fc/fc_fs.h>
f1c3b0fc
JS
32
33#include "lpfc_hw4.h"
34#include "lpfc_hw.h"
35#include "lpfc_sli.h"
36#include "lpfc_sli4.h"
37#include "lpfc_nl.h"
4fede78f 38#include "lpfc_bsg.h"
f1c3b0fc
JS
39#include "lpfc_disc.h"
40#include "lpfc_scsi.h"
41#include "lpfc.h"
42#include "lpfc_logmsg.h"
43#include "lpfc_crtn.h"
44#include "lpfc_vport.h"
45#include "lpfc_version.h"
46
4cc0e56e
JS
47struct lpfc_bsg_event {
48 struct list_head node;
49 struct kref kref;
50 wait_queue_head_t wq;
51
52 /* Event type and waiter identifiers */
53 uint32_t type_mask;
54 uint32_t req_id;
55 uint32_t reg_id;
56
57 /* next two flags are here for the auto-delete logic */
58 unsigned long wait_time_stamp;
59 int waiting;
60
61 /* seen and not seen events */
62 struct list_head events_to_get;
63 struct list_head events_to_see;
64
65 /* job waiting for this event to finish */
66 struct fc_bsg_job *set_job;
67};
68
69struct lpfc_bsg_iocb {
70 struct lpfc_iocbq *cmdiocbq;
71 struct lpfc_iocbq *rspiocbq;
72 struct lpfc_dmabuf *bmp;
73 struct lpfc_nodelist *ndlp;
74
75 /* job waiting for this iocb to finish */
76 struct fc_bsg_job *set_job;
77};
78
3b5dd52a
JS
79struct lpfc_bsg_mbox {
80 LPFC_MBOXQ_t *pmboxq;
81 MAILBOX_t *mb;
7a470277
JS
82 struct lpfc_dmabuf *rxbmp; /* for BIU diags */
83 struct lpfc_dmabufext *dmp; /* for BIU diags */
84 uint8_t *ext; /* extended mailbox data */
85 uint32_t mbOffset; /* from app */
86 uint32_t inExtWLen; /* from app */
c7495937 87 uint32_t outExtWLen; /* from app */
3b5dd52a
JS
88
89 /* job waiting for this mbox command to finish */
90 struct fc_bsg_job *set_job;
91};
92
e2aed29f
JS
93#define MENLO_DID 0x0000FC0E
94
95struct lpfc_bsg_menlo {
96 struct lpfc_iocbq *cmdiocbq;
97 struct lpfc_iocbq *rspiocbq;
98 struct lpfc_dmabuf *bmp;
99
100 /* job waiting for this iocb to finish */
101 struct fc_bsg_job *set_job;
102};
103
4cc0e56e
JS
104#define TYPE_EVT 1
105#define TYPE_IOCB 2
3b5dd52a 106#define TYPE_MBOX 3
e2aed29f 107#define TYPE_MENLO 4
4cc0e56e
JS
108struct bsg_job_data {
109 uint32_t type;
110 union {
111 struct lpfc_bsg_event *evt;
112 struct lpfc_bsg_iocb iocb;
3b5dd52a 113 struct lpfc_bsg_mbox mbox;
e2aed29f 114 struct lpfc_bsg_menlo menlo;
4cc0e56e
JS
115 } context_un;
116};
117
118struct event_data {
119 struct list_head node;
120 uint32_t type;
121 uint32_t immed_dat;
122 void *data;
123 uint32_t len;
124};
125
3b5dd52a 126#define BUF_SZ_4K 4096
4cc0e56e
JS
127#define SLI_CT_ELX_LOOPBACK 0x10
128
129enum ELX_LOOPBACK_CMD {
130 ELX_LOOPBACK_XRI_SETUP,
131 ELX_LOOPBACK_DATA,
132};
133
3b5dd52a
JS
134#define ELX_LOOPBACK_HEADER_SZ \
135 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
136
4cc0e56e
JS
137struct lpfc_dmabufext {
138 struct lpfc_dmabuf dma;
139 uint32_t size;
140 uint32_t flag;
141};
142
143/**
144 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
145 * @phba: Pointer to HBA context object.
146 * @cmdiocbq: Pointer to command iocb.
147 * @rspiocbq: Pointer to response iocb.
148 *
149 * This function is the completion handler for iocbs issued using
150 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
151 * ring event handler function without any lock held. This function
152 * can be called from both worker thread context and interrupt
153 * context. This function also can be called from another thread which
154 * cleans up the SLI layer objects.
155 * This function copies the contents of the response iocb to the
156 * response iocb memory object provided by the caller of
157 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
158 * sleeps for the iocb completion.
159 **/
160static void
161lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
162 struct lpfc_iocbq *cmdiocbq,
163 struct lpfc_iocbq *rspiocbq)
164{
165 unsigned long iflags;
166 struct bsg_job_data *dd_data;
167 struct fc_bsg_job *job;
168 IOCB_t *rsp;
169 struct lpfc_dmabuf *bmp;
170 struct lpfc_nodelist *ndlp;
171 struct lpfc_bsg_iocb *iocb;
172 unsigned long flags;
173 int rc = 0;
174
175 spin_lock_irqsave(&phba->ct_ev_lock, flags);
176 dd_data = cmdiocbq->context1;
177 if (!dd_data) {
178 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
179 return;
180 }
181
182 iocb = &dd_data->context_un.iocb;
183 job = iocb->set_job;
184 job->dd_data = NULL; /* so timeout handler does not reply */
185
186 spin_lock_irqsave(&phba->hbalock, iflags);
187 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
188 if (cmdiocbq->context2 && rspiocbq)
189 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
190 &rspiocbq->iocb, sizeof(IOCB_t));
191 spin_unlock_irqrestore(&phba->hbalock, iflags);
192
193 bmp = iocb->bmp;
194 rspiocbq = iocb->rspiocbq;
195 rsp = &rspiocbq->iocb;
196 ndlp = iocb->ndlp;
197
198 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
199 job->request_payload.sg_cnt, DMA_TO_DEVICE);
200 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
201 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
202
203 if (rsp->ulpStatus) {
204 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
205 switch (rsp->un.ulpWord[4] & 0xff) {
206 case IOERR_SEQUENCE_TIMEOUT:
207 rc = -ETIMEDOUT;
208 break;
209 case IOERR_INVALID_RPI:
210 rc = -EFAULT;
211 break;
212 default:
213 rc = -EACCES;
214 break;
215 }
216 } else
217 rc = -EACCES;
218 } else
219 job->reply->reply_payload_rcv_len =
220 rsp->un.genreq64.bdl.bdeSize;
221
222 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
223 lpfc_sli_release_iocbq(phba, rspiocbq);
224 lpfc_sli_release_iocbq(phba, cmdiocbq);
225 lpfc_nlp_put(ndlp);
226 kfree(bmp);
227 kfree(dd_data);
228 /* make error code available to userspace */
229 job->reply->result = rc;
230 /* complete the job back to userspace */
231 job->job_done(job);
232 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
233 return;
234}
235
f1c3b0fc 236/**
4cc0e56e 237 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
f1c3b0fc 238 * @job: fc_bsg_job to handle
3b5dd52a 239 **/
f1c3b0fc 240static int
4cc0e56e 241lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
f1c3b0fc 242{
f1c3b0fc
JS
243 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
244 struct lpfc_hba *phba = vport->phba;
245 struct lpfc_rport_data *rdata = job->rport->dd_data;
246 struct lpfc_nodelist *ndlp = rdata->pnode;
247 struct ulp_bde64 *bpl = NULL;
248 uint32_t timeout;
249 struct lpfc_iocbq *cmdiocbq = NULL;
250 struct lpfc_iocbq *rspiocbq = NULL;
251 IOCB_t *cmd;
252 IOCB_t *rsp;
253 struct lpfc_dmabuf *bmp = NULL;
254 int request_nseg;
255 int reply_nseg;
256 struct scatterlist *sgel = NULL;
257 int numbde;
258 dma_addr_t busaddr;
4cc0e56e
JS
259 struct bsg_job_data *dd_data;
260 uint32_t creg_val;
f1c3b0fc
JS
261 int rc = 0;
262
263 /* in case no data is transferred */
264 job->reply->reply_payload_rcv_len = 0;
265
4cc0e56e
JS
266 /* allocate our bsg tracking structure */
267 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
268 if (!dd_data) {
269 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
270 "2733 Failed allocation of dd_data\n");
271 rc = -ENOMEM;
272 goto no_dd_data;
273 }
274
f1c3b0fc 275 if (!lpfc_nlp_get(ndlp)) {
4cc0e56e
JS
276 rc = -ENODEV;
277 goto no_ndlp;
278 }
279
280 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
281 if (!bmp) {
282 rc = -ENOMEM;
283 goto free_ndlp;
f1c3b0fc
JS
284 }
285
286 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
287 rc = -ENODEV;
4cc0e56e 288 goto free_bmp;
f1c3b0fc
JS
289 }
290
f1c3b0fc
JS
291 cmdiocbq = lpfc_sli_get_iocbq(phba);
292 if (!cmdiocbq) {
293 rc = -ENOMEM;
4cc0e56e 294 goto free_bmp;
f1c3b0fc 295 }
f1c3b0fc 296
4cc0e56e 297 cmd = &cmdiocbq->iocb;
f1c3b0fc
JS
298 rspiocbq = lpfc_sli_get_iocbq(phba);
299 if (!rspiocbq) {
300 rc = -ENOMEM;
301 goto free_cmdiocbq;
302 }
f1c3b0fc
JS
303
304 rsp = &rspiocbq->iocb;
f1c3b0fc
JS
305 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
306 if (!bmp->virt) {
307 rc = -ENOMEM;
4cc0e56e 308 goto free_rspiocbq;
f1c3b0fc 309 }
f1c3b0fc
JS
310
311 INIT_LIST_HEAD(&bmp->list);
312 bpl = (struct ulp_bde64 *) bmp->virt;
f1c3b0fc
JS
313 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
314 job->request_payload.sg_cnt, DMA_TO_DEVICE);
315 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
316 busaddr = sg_dma_address(sgel);
317 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
318 bpl->tus.f.bdeSize = sg_dma_len(sgel);
319 bpl->tus.w = cpu_to_le32(bpl->tus.w);
320 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
321 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
322 bpl++;
323 }
324
325 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
326 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
327 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
328 busaddr = sg_dma_address(sgel);
329 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
330 bpl->tus.f.bdeSize = sg_dma_len(sgel);
331 bpl->tus.w = cpu_to_le32(bpl->tus.w);
332 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
333 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
334 bpl++;
335 }
336
337 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
338 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
339 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
340 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
341 cmd->un.genreq64.bdl.bdeSize =
342 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
343 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
344 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
345 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
6a9c52cf
JS
346 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
347 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
f1c3b0fc
JS
348 cmd->ulpBdeCount = 1;
349 cmd->ulpLe = 1;
350 cmd->ulpClass = CLASS3;
351 cmd->ulpContext = ndlp->nlp_rpi;
352 cmd->ulpOwner = OWN_CHIP;
353 cmdiocbq->vport = phba->pport;
4cc0e56e 354 cmdiocbq->context3 = bmp;
f1c3b0fc 355 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
f1c3b0fc 356 timeout = phba->fc_ratov * 2;
4cc0e56e
JS
357 cmd->ulpTimeout = timeout;
358
359 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
360 cmdiocbq->context1 = dd_data;
361 cmdiocbq->context2 = rspiocbq;
362 dd_data->type = TYPE_IOCB;
363 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
364 dd_data->context_un.iocb.rspiocbq = rspiocbq;
365 dd_data->context_un.iocb.set_job = job;
366 dd_data->context_un.iocb.bmp = bmp;
367 dd_data->context_un.iocb.ndlp = ndlp;
368
369 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
370 creg_val = readl(phba->HCregaddr);
371 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
372 writel(creg_val, phba->HCregaddr);
373 readl(phba->HCregaddr); /* flush */
f1c3b0fc
JS
374 }
375
4cc0e56e 376 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
f1c3b0fc 377
4cc0e56e
JS
378 if (rc == IOCB_SUCCESS)
379 return 0; /* done for now */
f1c3b0fc 380
4cc0e56e
JS
381 /* iocb failed so cleanup */
382 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
383 job->request_payload.sg_cnt, DMA_TO_DEVICE);
384 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
385 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
f1c3b0fc 386
f1c3b0fc 387 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
4cc0e56e 388
f1c3b0fc
JS
389free_rspiocbq:
390 lpfc_sli_release_iocbq(phba, rspiocbq);
391free_cmdiocbq:
392 lpfc_sli_release_iocbq(phba, cmdiocbq);
4cc0e56e
JS
393free_bmp:
394 kfree(bmp);
395free_ndlp:
f1c3b0fc 396 lpfc_nlp_put(ndlp);
4cc0e56e
JS
397no_ndlp:
398 kfree(dd_data);
399no_dd_data:
400 /* make error code available to userspace */
401 job->reply->result = rc;
402 job->dd_data = NULL;
403 return rc;
404}
405
406/**
407 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
408 * @phba: Pointer to HBA context object.
409 * @cmdiocbq: Pointer to command iocb.
410 * @rspiocbq: Pointer to response iocb.
411 *
412 * This function is the completion handler for iocbs issued using
413 * lpfc_bsg_rport_els_cmp function. This function is called by the
414 * ring event handler function without any lock held. This function
415 * can be called from both worker thread context and interrupt
416 * context. This function also can be called from other thread which
417 * cleans up the SLI layer objects.
3b5dd52a 418 * This function copies the contents of the response iocb to the
4cc0e56e
JS
419 * response iocb memory object provided by the caller of
420 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
421 * sleeps for the iocb completion.
422 **/
423static void
424lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
425 struct lpfc_iocbq *cmdiocbq,
426 struct lpfc_iocbq *rspiocbq)
427{
428 struct bsg_job_data *dd_data;
429 struct fc_bsg_job *job;
430 IOCB_t *rsp;
431 struct lpfc_nodelist *ndlp;
432 struct lpfc_dmabuf *pbuflist = NULL;
433 struct fc_bsg_ctels_reply *els_reply;
434 uint8_t *rjt_data;
435 unsigned long flags;
436 int rc = 0;
437
438 spin_lock_irqsave(&phba->ct_ev_lock, flags);
439 dd_data = cmdiocbq->context1;
440 /* normal completion and timeout crossed paths, already done */
441 if (!dd_data) {
67221a42 442 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4cc0e56e
JS
443 return;
444 }
445
446 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
447 if (cmdiocbq->context2 && rspiocbq)
448 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
449 &rspiocbq->iocb, sizeof(IOCB_t));
450
451 job = dd_data->context_un.iocb.set_job;
452 cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
453 rspiocbq = dd_data->context_un.iocb.rspiocbq;
454 rsp = &rspiocbq->iocb;
455 ndlp = dd_data->context_un.iocb.ndlp;
456
457 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
458 job->request_payload.sg_cnt, DMA_TO_DEVICE);
459 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
460 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
461
462 if (job->reply->result == -EAGAIN)
463 rc = -EAGAIN;
464 else if (rsp->ulpStatus == IOSTAT_SUCCESS)
465 job->reply->reply_payload_rcv_len =
466 rsp->un.elsreq64.bdl.bdeSize;
467 else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
468 job->reply->reply_payload_rcv_len =
469 sizeof(struct fc_bsg_ctels_reply);
470 /* LS_RJT data returned in word 4 */
471 rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
472 els_reply = &job->reply->reply_data.ctels_reply;
473 els_reply->status = FC_CTELS_STATUS_REJECT;
474 els_reply->rjt_data.action = rjt_data[3];
475 els_reply->rjt_data.reason_code = rjt_data[2];
476 els_reply->rjt_data.reason_explanation = rjt_data[1];
477 els_reply->rjt_data.vendor_unique = rjt_data[0];
478 } else
479 rc = -EIO;
f1c3b0fc 480
4cc0e56e
JS
481 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
482 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
483 lpfc_sli_release_iocbq(phba, rspiocbq);
484 lpfc_sli_release_iocbq(phba, cmdiocbq);
485 lpfc_nlp_put(ndlp);
486 kfree(dd_data);
f1c3b0fc
JS
487 /* make error code available to userspace */
488 job->reply->result = rc;
4cc0e56e 489 job->dd_data = NULL;
f1c3b0fc
JS
490 /* complete the job back to userspace */
491 job->job_done(job);
4cc0e56e
JS
492 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
493 return;
f1c3b0fc
JS
494}
495
496/**
497 * lpfc_bsg_rport_els - send an ELS command from a bsg request
498 * @job: fc_bsg_job to handle
3b5dd52a 499 **/
f1c3b0fc
JS
500static int
501lpfc_bsg_rport_els(struct fc_bsg_job *job)
502{
503 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
504 struct lpfc_hba *phba = vport->phba;
505 struct lpfc_rport_data *rdata = job->rport->dd_data;
506 struct lpfc_nodelist *ndlp = rdata->pnode;
f1c3b0fc
JS
507 uint32_t elscmd;
508 uint32_t cmdsize;
509 uint32_t rspsize;
510 struct lpfc_iocbq *rspiocbq;
511 struct lpfc_iocbq *cmdiocbq;
512 IOCB_t *rsp;
513 uint16_t rpi = 0;
514 struct lpfc_dmabuf *pcmd;
515 struct lpfc_dmabuf *prsp;
516 struct lpfc_dmabuf *pbuflist = NULL;
517 struct ulp_bde64 *bpl;
f1c3b0fc
JS
518 int request_nseg;
519 int reply_nseg;
520 struct scatterlist *sgel = NULL;
521 int numbde;
522 dma_addr_t busaddr;
4cc0e56e
JS
523 struct bsg_job_data *dd_data;
524 uint32_t creg_val;
f1c3b0fc
JS
525 int rc = 0;
526
527 /* in case no data is transferred */
528 job->reply->reply_payload_rcv_len = 0;
529
4cc0e56e
JS
530 /* allocate our bsg tracking structure */
531 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
532 if (!dd_data) {
533 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
534 "2735 Failed allocation of dd_data\n");
535 rc = -ENOMEM;
536 goto no_dd_data;
537 }
538
f1c3b0fc
JS
539 if (!lpfc_nlp_get(ndlp)) {
540 rc = -ENODEV;
4cc0e56e 541 goto free_dd_data;
f1c3b0fc
JS
542 }
543
544 elscmd = job->request->rqst_data.r_els.els_code;
545 cmdsize = job->request_payload.payload_len;
546 rspsize = job->reply_payload.payload_len;
547 rspiocbq = lpfc_sli_get_iocbq(phba);
548 if (!rspiocbq) {
549 lpfc_nlp_put(ndlp);
550 rc = -ENOMEM;
4cc0e56e 551 goto free_dd_data;
f1c3b0fc
JS
552 }
553
554 rsp = &rspiocbq->iocb;
555 rpi = ndlp->nlp_rpi;
556
4cc0e56e 557 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
f1c3b0fc 558 ndlp->nlp_DID, elscmd);
f1c3b0fc 559 if (!cmdiocbq) {
4cc0e56e
JS
560 rc = -EIO;
561 goto free_rspiocbq;
f1c3b0fc
JS
562 }
563
4cc0e56e 564 /* prep els iocb set context1 to the ndlp, context2 to the command
3b5dd52a
JS
565 * dmabuf, context3 holds the data dmabuf
566 */
f1c3b0fc
JS
567 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
568 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
f1c3b0fc
JS
569 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
570 kfree(pcmd);
571 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
572 kfree(prsp);
573 cmdiocbq->context2 = NULL;
574
575 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
576 bpl = (struct ulp_bde64 *) pbuflist->virt;
577
578 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
579 job->request_payload.sg_cnt, DMA_TO_DEVICE);
f1c3b0fc
JS
580 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
581 busaddr = sg_dma_address(sgel);
582 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
583 bpl->tus.f.bdeSize = sg_dma_len(sgel);
584 bpl->tus.w = cpu_to_le32(bpl->tus.w);
585 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
586 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
587 bpl++;
588 }
589
590 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
591 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
592 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
593 busaddr = sg_dma_address(sgel);
594 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
595 bpl->tus.f.bdeSize = sg_dma_len(sgel);
596 bpl->tus.w = cpu_to_le32(bpl->tus.w);
597 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
598 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
599 bpl++;
600 }
f1c3b0fc
JS
601 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
602 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
603 cmdiocbq->iocb.ulpContext = rpi;
604 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
605 cmdiocbq->context1 = NULL;
606 cmdiocbq->context2 = NULL;
607
4cc0e56e
JS
608 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
609 cmdiocbq->context1 = dd_data;
610 cmdiocbq->context2 = rspiocbq;
611 dd_data->type = TYPE_IOCB;
612 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
613 dd_data->context_un.iocb.rspiocbq = rspiocbq;
614 dd_data->context_un.iocb.set_job = job;
615 dd_data->context_un.iocb.bmp = NULL;;
616 dd_data->context_un.iocb.ndlp = ndlp;
617
618 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
619 creg_val = readl(phba->HCregaddr);
620 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
621 writel(creg_val, phba->HCregaddr);
622 readl(phba->HCregaddr); /* flush */
623 }
624 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
f1c3b0fc 625 lpfc_nlp_put(ndlp);
4cc0e56e
JS
626 if (rc == IOCB_SUCCESS)
627 return 0; /* done for now */
f1c3b0fc 628
4cc0e56e
JS
629 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
630 job->request_payload.sg_cnt, DMA_TO_DEVICE);
631 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
632 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
f1c3b0fc 633
4cc0e56e 634 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
f1c3b0fc 635
4cc0e56e 636 lpfc_sli_release_iocbq(phba, cmdiocbq);
f1c3b0fc 637
4cc0e56e
JS
638free_rspiocbq:
639 lpfc_sli_release_iocbq(phba, rspiocbq);
f1c3b0fc 640
4cc0e56e
JS
641free_dd_data:
642 kfree(dd_data);
f1c3b0fc 643
4cc0e56e
JS
644no_dd_data:
645 /* make error code available to userspace */
646 job->reply->result = rc;
647 job->dd_data = NULL;
648 return rc;
f1c3b0fc
JS
649}
650
3b5dd52a
JS
651/**
652 * lpfc_bsg_event_free - frees an allocated event structure
653 * @kref: Pointer to a kref.
654 *
655 * Called from kref_put. Back cast the kref into an event structure address.
656 * Free any events to get, delete associated nodes, free any events to see,
657 * free any data then free the event itself.
658 **/
f1c3b0fc 659static void
4cc0e56e 660lpfc_bsg_event_free(struct kref *kref)
f1c3b0fc 661{
4cc0e56e
JS
662 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
663 kref);
f1c3b0fc
JS
664 struct event_data *ed;
665
666 list_del(&evt->node);
667
668 while (!list_empty(&evt->events_to_get)) {
669 ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
670 list_del(&ed->node);
671 kfree(ed->data);
672 kfree(ed);
673 }
674
675 while (!list_empty(&evt->events_to_see)) {
676 ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
677 list_del(&ed->node);
678 kfree(ed->data);
679 kfree(ed);
680 }
681
682 kfree(evt);
683}
684
3b5dd52a
JS
685/**
686 * lpfc_bsg_event_ref - increments the kref for an event
687 * @evt: Pointer to an event structure.
688 **/
f1c3b0fc 689static inline void
4cc0e56e 690lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
f1c3b0fc 691{
4cc0e56e 692 kref_get(&evt->kref);
f1c3b0fc
JS
693}
694
3b5dd52a
JS
695/**
696 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
697 * @evt: Pointer to an event structure.
698 **/
f1c3b0fc 699static inline void
4cc0e56e 700lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
f1c3b0fc 701{
4cc0e56e 702 kref_put(&evt->kref, lpfc_bsg_event_free);
f1c3b0fc
JS
703}
704
3b5dd52a
JS
705/**
706 * lpfc_bsg_event_new - allocate and initialize a event structure
707 * @ev_mask: Mask of events.
708 * @ev_reg_id: Event reg id.
709 * @ev_req_id: Event request id.
710 **/
4cc0e56e
JS
711static struct lpfc_bsg_event *
712lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
713{
714 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
f1c3b0fc 715
4cc0e56e
JS
716 if (!evt)
717 return NULL;
718
719 INIT_LIST_HEAD(&evt->events_to_get);
720 INIT_LIST_HEAD(&evt->events_to_see);
721 evt->type_mask = ev_mask;
722 evt->req_id = ev_req_id;
723 evt->reg_id = ev_reg_id;
724 evt->wait_time_stamp = jiffies;
725 init_waitqueue_head(&evt->wq);
726 kref_init(&evt->kref);
727 return evt;
728}
729
3b5dd52a
JS
730/**
731 * diag_cmd_data_free - Frees an lpfc dma buffer extension
732 * @phba: Pointer to HBA context object.
733 * @mlist: Pointer to an lpfc dma buffer extension.
734 **/
4cc0e56e 735static int
3b5dd52a 736diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
4cc0e56e
JS
737{
738 struct lpfc_dmabufext *mlast;
739 struct pci_dev *pcidev;
740 struct list_head head, *curr, *next;
741
742 if ((!mlist) || (!lpfc_is_link_up(phba) &&
743 (phba->link_flag & LS_LOOPBACK_MODE))) {
744 return 0;
745 }
746
747 pcidev = phba->pcidev;
748 list_add_tail(&head, &mlist->dma.list);
749
750 list_for_each_safe(curr, next, &head) {
751 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
752 if (mlast->dma.virt)
753 dma_free_coherent(&pcidev->dev,
754 mlast->size,
755 mlast->dma.virt,
756 mlast->dma.phys);
757 kfree(mlast);
758 }
759 return 0;
760}
f1c3b0fc
JS
761
762/**
763 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
764 * @phba:
765 * @pring:
766 * @piocbq:
767 *
768 * This function is called when an unsolicited CT command is received. It
4cc0e56e 769 * forwards the event to any processes registered to receive CT events.
3b5dd52a 770 **/
4fede78f 771int
f1c3b0fc
JS
772lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
773 struct lpfc_iocbq *piocbq)
774{
775 uint32_t evt_req_id = 0;
776 uint32_t cmd;
777 uint32_t len;
778 struct lpfc_dmabuf *dmabuf = NULL;
4cc0e56e 779 struct lpfc_bsg_event *evt;
f1c3b0fc
JS
780 struct event_data *evt_dat = NULL;
781 struct lpfc_iocbq *iocbq;
782 size_t offset = 0;
783 struct list_head head;
784 struct ulp_bde64 *bde;
785 dma_addr_t dma_addr;
786 int i;
787 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
788 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
789 struct lpfc_hbq_entry *hbqe;
790 struct lpfc_sli_ct_request *ct_req;
4cc0e56e 791 struct fc_bsg_job *job = NULL;
4fede78f 792 unsigned long flags;
4cc0e56e 793 int size = 0;
f1c3b0fc
JS
794
795 INIT_LIST_HEAD(&head);
796 list_add_tail(&head, &piocbq->list);
797
798 if (piocbq->iocb.ulpBdeCount == 0 ||
799 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
800 goto error_ct_unsol_exit;
801
4cc0e56e
JS
802 if (phba->link_state == LPFC_HBA_ERROR ||
803 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
804 goto error_ct_unsol_exit;
805
f1c3b0fc
JS
806 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
807 dmabuf = bdeBuf1;
808 else {
809 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
810 piocbq->iocb.un.cont64[0].addrLow);
811 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
812 }
4cc0e56e
JS
813 if (dmabuf == NULL)
814 goto error_ct_unsol_exit;
f1c3b0fc
JS
815 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
816 evt_req_id = ct_req->FsType;
817 cmd = ct_req->CommandResponse.bits.CmdRsp;
818 len = ct_req->CommandResponse.bits.Size;
819 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
820 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
821
4fede78f 822 spin_lock_irqsave(&phba->ct_ev_lock, flags);
f1c3b0fc 823 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
4cc0e56e
JS
824 if (!(evt->type_mask & FC_REG_CT_EVENT) ||
825 evt->req_id != evt_req_id)
f1c3b0fc
JS
826 continue;
827
4cc0e56e
JS
828 lpfc_bsg_event_ref(evt);
829 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
f1c3b0fc 830 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
4cc0e56e
JS
831 if (evt_dat == NULL) {
832 spin_lock_irqsave(&phba->ct_ev_lock, flags);
833 lpfc_bsg_event_unref(evt);
f1c3b0fc
JS
834 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
835 "2614 Memory allocation failed for "
836 "CT event\n");
837 break;
838 }
839
f1c3b0fc
JS
840 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
841 /* take accumulated byte count from the last iocbq */
842 iocbq = list_entry(head.prev, typeof(*iocbq), list);
843 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
844 } else {
845 list_for_each_entry(iocbq, &head, list) {
846 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
847 evt_dat->len +=
848 iocbq->iocb.un.cont64[i].tus.f.bdeSize;
849 }
850 }
851
852 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
4cc0e56e 853 if (evt_dat->data == NULL) {
f1c3b0fc
JS
854 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
855 "2615 Memory allocation failed for "
856 "CT event data, size %d\n",
857 evt_dat->len);
858 kfree(evt_dat);
4fede78f 859 spin_lock_irqsave(&phba->ct_ev_lock, flags);
4cc0e56e 860 lpfc_bsg_event_unref(evt);
4fede78f 861 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
f1c3b0fc
JS
862 goto error_ct_unsol_exit;
863 }
864
865 list_for_each_entry(iocbq, &head, list) {
4cc0e56e 866 size = 0;
f1c3b0fc
JS
867 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
868 bdeBuf1 = iocbq->context2;
869 bdeBuf2 = iocbq->context3;
870 }
871 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
f1c3b0fc
JS
872 if (phba->sli3_options &
873 LPFC_SLI3_HBQ_ENABLED) {
874 if (i == 0) {
875 hbqe = (struct lpfc_hbq_entry *)
876 &iocbq->iocb.un.ulpWord[0];
877 size = hbqe->bde.tus.f.bdeSize;
878 dmabuf = bdeBuf1;
879 } else if (i == 1) {
880 hbqe = (struct lpfc_hbq_entry *)
881 &iocbq->iocb.unsli3.
882 sli3Words[4];
883 size = hbqe->bde.tus.f.bdeSize;
884 dmabuf = bdeBuf2;
885 }
886 if ((offset + size) > evt_dat->len)
887 size = evt_dat->len - offset;
888 } else {
889 size = iocbq->iocb.un.cont64[i].
890 tus.f.bdeSize;
891 bde = &iocbq->iocb.un.cont64[i];
892 dma_addr = getPaddr(bde->addrHigh,
893 bde->addrLow);
894 dmabuf = lpfc_sli_ringpostbuf_get(phba,
895 pring, dma_addr);
896 }
897 if (!dmabuf) {
898 lpfc_printf_log(phba, KERN_ERR,
899 LOG_LIBDFC, "2616 No dmabuf "
900 "found for iocbq 0x%p\n",
901 iocbq);
902 kfree(evt_dat->data);
903 kfree(evt_dat);
4fede78f
JS
904 spin_lock_irqsave(&phba->ct_ev_lock,
905 flags);
4cc0e56e 906 lpfc_bsg_event_unref(evt);
4fede78f
JS
907 spin_unlock_irqrestore(
908 &phba->ct_ev_lock, flags);
f1c3b0fc
JS
909 goto error_ct_unsol_exit;
910 }
911 memcpy((char *)(evt_dat->data) + offset,
912 dmabuf->virt, size);
913 offset += size;
914 if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
915 !(phba->sli3_options &
916 LPFC_SLI3_HBQ_ENABLED)) {
917 lpfc_sli_ringpostbuf_put(phba, pring,
918 dmabuf);
919 } else {
920 switch (cmd) {
4cc0e56e 921 case ELX_LOOPBACK_DATA:
3b5dd52a 922 diag_cmd_data_free(phba,
4cc0e56e
JS
923 (struct lpfc_dmabufext *)
924 dmabuf);
925 break;
f1c3b0fc 926 case ELX_LOOPBACK_XRI_SETUP:
4cc0e56e
JS
927 if ((phba->sli_rev ==
928 LPFC_SLI_REV2) ||
929 (phba->sli3_options &
930 LPFC_SLI3_HBQ_ENABLED
931 )) {
932 lpfc_in_buf_free(phba,
933 dmabuf);
934 } else {
f1c3b0fc
JS
935 lpfc_post_buffer(phba,
936 pring,
937 1);
4cc0e56e 938 }
f1c3b0fc
JS
939 break;
940 default:
941 if (!(phba->sli3_options &
942 LPFC_SLI3_HBQ_ENABLED))
943 lpfc_post_buffer(phba,
944 pring,
945 1);
946 break;
947 }
948 }
949 }
950 }
951
4fede78f 952 spin_lock_irqsave(&phba->ct_ev_lock, flags);
f1c3b0fc
JS
953 if (phba->sli_rev == LPFC_SLI_REV4) {
954 evt_dat->immed_dat = phba->ctx_idx;
955 phba->ctx_idx = (phba->ctx_idx + 1) % 64;
956 phba->ct_ctx[evt_dat->immed_dat].oxid =
957 piocbq->iocb.ulpContext;
958 phba->ct_ctx[evt_dat->immed_dat].SID =
959 piocbq->iocb.un.rcvels.remoteID;
960 } else
961 evt_dat->immed_dat = piocbq->iocb.ulpContext;
962
963 evt_dat->type = FC_REG_CT_EVENT;
964 list_add(&evt_dat->node, &evt->events_to_see);
4cc0e56e
JS
965 if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
966 wake_up_interruptible(&evt->wq);
967 lpfc_bsg_event_unref(evt);
f1c3b0fc 968 break;
4cc0e56e
JS
969 }
970
971 list_move(evt->events_to_see.prev, &evt->events_to_get);
972 lpfc_bsg_event_unref(evt);
973
974 job = evt->set_job;
975 evt->set_job = NULL;
976 if (job) {
977 job->reply->reply_payload_rcv_len = size;
978 /* make error code available to userspace */
979 job->reply->result = 0;
980 job->dd_data = NULL;
981 /* complete the job back to userspace */
982 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
983 job->job_done(job);
984 spin_lock_irqsave(&phba->ct_ev_lock, flags);
985 }
f1c3b0fc 986 }
4fede78f 987 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
f1c3b0fc
JS
988
989error_ct_unsol_exit:
990 if (!list_empty(&head))
991 list_del(&head);
4cc0e56e
JS
992 if (evt_req_id == SLI_CT_ELX_LOOPBACK)
993 return 0;
4fede78f 994 return 1;
f1c3b0fc
JS
995}
996
997/**
4cc0e56e 998 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
f1c3b0fc 999 * @job: SET_EVENT fc_bsg_job
3b5dd52a 1000 **/
f1c3b0fc 1001static int
4cc0e56e 1002lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
f1c3b0fc
JS
1003{
1004 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1005 struct lpfc_hba *phba = vport->phba;
1006 struct set_ct_event *event_req;
4cc0e56e 1007 struct lpfc_bsg_event *evt;
f1c3b0fc 1008 int rc = 0;
4cc0e56e
JS
1009 struct bsg_job_data *dd_data = NULL;
1010 uint32_t ev_mask;
1011 unsigned long flags;
f1c3b0fc
JS
1012
1013 if (job->request_len <
1014 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
1015 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1016 "2612 Received SET_CT_EVENT below minimum "
1017 "size\n");
4cc0e56e
JS
1018 rc = -EINVAL;
1019 goto job_error;
1020 }
1021
1022 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1023 if (dd_data == NULL) {
1024 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1025 "2734 Failed allocation of dd_data\n");
1026 rc = -ENOMEM;
1027 goto job_error;
f1c3b0fc
JS
1028 }
1029
1030 event_req = (struct set_ct_event *)
1031 job->request->rqst_data.h_vendor.vendor_cmd;
4cc0e56e
JS
1032 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1033 FC_REG_EVENT_MASK);
4fede78f 1034 spin_lock_irqsave(&phba->ct_ev_lock, flags);
f1c3b0fc
JS
1035 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1036 if (evt->reg_id == event_req->ev_reg_id) {
4cc0e56e 1037 lpfc_bsg_event_ref(evt);
f1c3b0fc
JS
1038 evt->wait_time_stamp = jiffies;
1039 break;
1040 }
1041 }
4fede78f 1042 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
f1c3b0fc
JS
1043
1044 if (&evt->node == &phba->ct_ev_waiters) {
1045 /* no event waiting struct yet - first call */
4cc0e56e 1046 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
f1c3b0fc
JS
1047 event_req->ev_req_id);
1048 if (!evt) {
1049 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1050 "2617 Failed allocation of event "
1051 "waiter\n");
4cc0e56e
JS
1052 rc = -ENOMEM;
1053 goto job_error;
f1c3b0fc
JS
1054 }
1055
4fede78f 1056 spin_lock_irqsave(&phba->ct_ev_lock, flags);
f1c3b0fc 1057 list_add(&evt->node, &phba->ct_ev_waiters);
4cc0e56e
JS
1058 lpfc_bsg_event_ref(evt);
1059 evt->wait_time_stamp = jiffies;
4fede78f 1060 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
f1c3b0fc
JS
1061 }
1062
4fede78f 1063 spin_lock_irqsave(&phba->ct_ev_lock, flags);
4cc0e56e
JS
1064 evt->waiting = 1;
1065 dd_data->type = TYPE_EVT;
1066 dd_data->context_un.evt = evt;
1067 evt->set_job = job; /* for unsolicited command */
1068 job->dd_data = dd_data; /* for fc transport timeout callback*/
4fede78f 1069 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4cc0e56e 1070 return 0; /* call job done later */
f1c3b0fc 1071
4cc0e56e
JS
1072job_error:
1073 if (dd_data != NULL)
1074 kfree(dd_data);
f1c3b0fc 1075
4cc0e56e
JS
1076 job->dd_data = NULL;
1077 return rc;
f1c3b0fc
JS
1078}
1079
1080/**
4cc0e56e 1081 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
f1c3b0fc 1082 * @job: GET_EVENT fc_bsg_job
3b5dd52a 1083 **/
f1c3b0fc 1084static int
4cc0e56e 1085lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
f1c3b0fc
JS
1086{
1087 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1088 struct lpfc_hba *phba = vport->phba;
1089 struct get_ct_event *event_req;
1090 struct get_ct_event_reply *event_reply;
4cc0e56e 1091 struct lpfc_bsg_event *evt;
f1c3b0fc 1092 struct event_data *evt_dat = NULL;
4fede78f 1093 unsigned long flags;
4cc0e56e 1094 uint32_t rc = 0;
f1c3b0fc
JS
1095
1096 if (job->request_len <
1097 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1098 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1099 "2613 Received GET_CT_EVENT request below "
1100 "minimum size\n");
4cc0e56e
JS
1101 rc = -EINVAL;
1102 goto job_error;
f1c3b0fc
JS
1103 }
1104
1105 event_req = (struct get_ct_event *)
1106 job->request->rqst_data.h_vendor.vendor_cmd;
1107
1108 event_reply = (struct get_ct_event_reply *)
1109 job->reply->reply_data.vendor_reply.vendor_rsp;
4fede78f 1110 spin_lock_irqsave(&phba->ct_ev_lock, flags);
f1c3b0fc
JS
1111 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1112 if (evt->reg_id == event_req->ev_reg_id) {
1113 if (list_empty(&evt->events_to_get))
1114 break;
4cc0e56e 1115 lpfc_bsg_event_ref(evt);
f1c3b0fc
JS
1116 evt->wait_time_stamp = jiffies;
1117 evt_dat = list_entry(evt->events_to_get.prev,
1118 struct event_data, node);
1119 list_del(&evt_dat->node);
1120 break;
1121 }
1122 }
4fede78f 1123 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
f1c3b0fc 1124
4cc0e56e
JS
1125 /* The app may continue to ask for event data until it gets
1126 * an error indicating that there isn't anymore
1127 */
1128 if (evt_dat == NULL) {
f1c3b0fc
JS
1129 job->reply->reply_payload_rcv_len = 0;
1130 rc = -ENOENT;
4cc0e56e 1131 goto job_error;
f1c3b0fc
JS
1132 }
1133
4cc0e56e
JS
1134 if (evt_dat->len > job->request_payload.payload_len) {
1135 evt_dat->len = job->request_payload.payload_len;
1136 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1137 "2618 Truncated event data at %d "
1138 "bytes\n",
1139 job->request_payload.payload_len);
f1c3b0fc
JS
1140 }
1141
4cc0e56e 1142 event_reply->type = evt_dat->type;
f1c3b0fc 1143 event_reply->immed_data = evt_dat->immed_dat;
f1c3b0fc
JS
1144 if (evt_dat->len > 0)
1145 job->reply->reply_payload_rcv_len =
4cc0e56e
JS
1146 sg_copy_from_buffer(job->request_payload.sg_list,
1147 job->request_payload.sg_cnt,
f1c3b0fc
JS
1148 evt_dat->data, evt_dat->len);
1149 else
1150 job->reply->reply_payload_rcv_len = 0;
f1c3b0fc 1151
4cc0e56e 1152 if (evt_dat) {
f1c3b0fc 1153 kfree(evt_dat->data);
4cc0e56e
JS
1154 kfree(evt_dat);
1155 }
1156
4fede78f 1157 spin_lock_irqsave(&phba->ct_ev_lock, flags);
4cc0e56e 1158 lpfc_bsg_event_unref(evt);
4fede78f 1159 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4cc0e56e
JS
1160 job->dd_data = NULL;
1161 job->reply->result = 0;
f1c3b0fc 1162 job->job_done(job);
4cc0e56e 1163 return 0;
f1c3b0fc 1164
4cc0e56e
JS
1165job_error:
1166 job->dd_data = NULL;
1167 job->reply->result = rc;
f1c3b0fc
JS
1168 return rc;
1169}
1170
1171/**
3b5dd52a
JS
1172 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1173 * @phba: Pointer to HBA context object.
1174 * @cmdiocbq: Pointer to command iocb.
1175 * @rspiocbq: Pointer to response iocb.
1176 *
1177 * This function is the completion handler for iocbs issued using
1178 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1179 * ring event handler function without any lock held. This function
1180 * can be called from both worker thread context and interrupt
1181 * context. This function also can be called from other thread which
1182 * cleans up the SLI layer objects.
1183 * This function copy the contents of the response iocb to the
1184 * response iocb memory object provided by the caller of
1185 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1186 * sleeps for the iocb completion.
1187 **/
1188static void
1189lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1190 struct lpfc_iocbq *cmdiocbq,
1191 struct lpfc_iocbq *rspiocbq)
1192{
1193 struct bsg_job_data *dd_data;
1194 struct fc_bsg_job *job;
1195 IOCB_t *rsp;
1196 struct lpfc_dmabuf *bmp;
1197 struct lpfc_nodelist *ndlp;
1198 unsigned long flags;
1199 int rc = 0;
1200
1201 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1202 dd_data = cmdiocbq->context1;
1203 /* normal completion and timeout crossed paths, already done */
1204 if (!dd_data) {
67221a42 1205 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3b5dd52a
JS
1206 return;
1207 }
1208
1209 job = dd_data->context_un.iocb.set_job;
1210 bmp = dd_data->context_un.iocb.bmp;
1211 rsp = &rspiocbq->iocb;
1212 ndlp = dd_data->context_un.iocb.ndlp;
1213
1214 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1215 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1216
1217 if (rsp->ulpStatus) {
1218 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1219 switch (rsp->un.ulpWord[4] & 0xff) {
1220 case IOERR_SEQUENCE_TIMEOUT:
1221 rc = -ETIMEDOUT;
1222 break;
1223 case IOERR_INVALID_RPI:
1224 rc = -EFAULT;
1225 break;
1226 default:
1227 rc = -EACCES;
1228 break;
1229 }
1230 } else
1231 rc = -EACCES;
1232 } else
1233 job->reply->reply_payload_rcv_len =
1234 rsp->un.genreq64.bdl.bdeSize;
1235
1236 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1237 lpfc_sli_release_iocbq(phba, cmdiocbq);
1238 lpfc_nlp_put(ndlp);
1239 kfree(bmp);
1240 kfree(dd_data);
1241 /* make error code available to userspace */
1242 job->reply->result = rc;
1243 job->dd_data = NULL;
1244 /* complete the job back to userspace */
1245 job->job_done(job);
1246 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1247 return;
1248}
1249
1250/**
1251 * lpfc_issue_ct_rsp - issue a ct response
1252 * @phba: Pointer to HBA context object.
1253 * @job: Pointer to the job object.
1254 * @tag: tag index value into the ports context exchange array.
1255 * @bmp: Pointer to a dma buffer descriptor.
1256 * @num_entry: Number of enties in the bde.
1257 **/
f1c3b0fc 1258static int
3b5dd52a
JS
1259lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1260 struct lpfc_dmabuf *bmp, int num_entry)
f1c3b0fc 1261{
3b5dd52a
JS
1262 IOCB_t *icmd;
1263 struct lpfc_iocbq *ctiocb = NULL;
1264 int rc = 0;
1265 struct lpfc_nodelist *ndlp = NULL;
1266 struct bsg_job_data *dd_data;
1267 uint32_t creg_val;
f1c3b0fc 1268
3b5dd52a
JS
1269 /* allocate our bsg tracking structure */
1270 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1271 if (!dd_data) {
1272 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1273 "2736 Failed allocation of dd_data\n");
1274 rc = -ENOMEM;
1275 goto no_dd_data;
1276 }
f1c3b0fc 1277
3b5dd52a
JS
1278 /* Allocate buffer for command iocb */
1279 ctiocb = lpfc_sli_get_iocbq(phba);
1280 if (!ctiocb) {
1281 rc = ENOMEM;
1282 goto no_ctiocb;
1283 }
1284
1285 icmd = &ctiocb->iocb;
1286 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1287 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1288 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1289 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1290 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1291 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1292 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1293 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1294 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1295
1296 /* Fill in rest of iocb */
1297 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
1298 icmd->ulpBdeCount = 1;
1299 icmd->ulpLe = 1;
1300 icmd->ulpClass = CLASS3;
1301 if (phba->sli_rev == LPFC_SLI_REV4) {
1302 /* Do not issue unsol response if oxid not marked as valid */
1303 if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
1304 rc = IOCB_ERROR;
1305 goto issue_ct_rsp_exit;
1306 }
1307 icmd->ulpContext = phba->ct_ctx[tag].oxid;
1308 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1309 if (!ndlp) {
1310 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1311 "2721 ndlp null for oxid %x SID %x\n",
1312 icmd->ulpContext,
1313 phba->ct_ctx[tag].SID);
1314 rc = IOCB_ERROR;
1315 goto issue_ct_rsp_exit;
1316 }
1317 icmd->un.ulpWord[3] = ndlp->nlp_rpi;
1318 /* The exchange is done, mark the entry as invalid */
1319 phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
1320 } else
1321 icmd->ulpContext = (ushort) tag;
1322
1323 icmd->ulpTimeout = phba->fc_ratov * 2;
1324
1325 /* Xmit CT response on exchange <xid> */
1326 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1327 "2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
1328 icmd->ulpContext, icmd->ulpIoTag, phba->link_state);
1329
1330 ctiocb->iocb_cmpl = NULL;
1331 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1332 ctiocb->vport = phba->pport;
1333 ctiocb->context3 = bmp;
1334
1335 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1336 ctiocb->context1 = dd_data;
1337 ctiocb->context2 = NULL;
1338 dd_data->type = TYPE_IOCB;
1339 dd_data->context_un.iocb.cmdiocbq = ctiocb;
1340 dd_data->context_un.iocb.rspiocbq = NULL;
1341 dd_data->context_un.iocb.set_job = job;
1342 dd_data->context_un.iocb.bmp = bmp;
1343 dd_data->context_un.iocb.ndlp = ndlp;
1344
1345 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1346 creg_val = readl(phba->HCregaddr);
1347 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1348 writel(creg_val, phba->HCregaddr);
1349 readl(phba->HCregaddr); /* flush */
f1c3b0fc 1350 }
4cc0e56e 1351
3b5dd52a
JS
1352 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1353
1354 if (rc == IOCB_SUCCESS)
1355 return 0; /* done for now */
1356
1357issue_ct_rsp_exit:
1358 lpfc_sli_release_iocbq(phba, ctiocb);
1359no_ctiocb:
1360 kfree(dd_data);
1361no_dd_data:
4cc0e56e 1362 return rc;
f1c3b0fc
JS
1363}
1364
1365/**
3b5dd52a
JS
1366 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1367 * @job: SEND_MGMT_RESP fc_bsg_job
1368 **/
1369static int
1370lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
f1c3b0fc 1371{
3b5dd52a
JS
1372 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1373 struct lpfc_hba *phba = vport->phba;
1374 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1375 job->request->rqst_data.h_vendor.vendor_cmd;
1376 struct ulp_bde64 *bpl;
1377 struct lpfc_dmabuf *bmp = NULL;
1378 struct scatterlist *sgel = NULL;
1379 int request_nseg;
1380 int numbde;
1381 dma_addr_t busaddr;
1382 uint32_t tag = mgmt_resp->tag;
1383 unsigned long reqbfrcnt =
1384 (unsigned long)job->request_payload.payload_len;
1385 int rc = 0;
f1c3b0fc 1386
3b5dd52a
JS
1387 /* in case no data is transferred */
1388 job->reply->reply_payload_rcv_len = 0;
1389
1390 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1391 rc = -ERANGE;
1392 goto send_mgmt_rsp_exit;
1393 }
1394
1395 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1396 if (!bmp) {
1397 rc = -ENOMEM;
1398 goto send_mgmt_rsp_exit;
1399 }
1400
1401 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1402 if (!bmp->virt) {
1403 rc = -ENOMEM;
1404 goto send_mgmt_rsp_free_bmp;
1405 }
1406
1407 INIT_LIST_HEAD(&bmp->list);
1408 bpl = (struct ulp_bde64 *) bmp->virt;
1409 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
1410 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1411 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
1412 busaddr = sg_dma_address(sgel);
1413 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1414 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1415 bpl->tus.w = cpu_to_le32(bpl->tus.w);
1416 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
1417 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
1418 bpl++;
f1c3b0fc
JS
1419 }
1420
3b5dd52a
JS
1421 rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
1422
1423 if (rc == IOCB_SUCCESS)
1424 return 0; /* done for now */
1425
1426 /* TBD need to handle a timeout */
1427 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1428 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1429 rc = -EACCES;
1430 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1431
1432send_mgmt_rsp_free_bmp:
1433 kfree(bmp);
1434send_mgmt_rsp_exit:
1435 /* make error code available to userspace */
1436 job->reply->result = rc;
1437 job->dd_data = NULL;
f1c3b0fc
JS
1438 return rc;
1439}
1440
1441/**
3b5dd52a
JS
1442 * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
1443 * @job: LPFC_BSG_VENDOR_DIAG_MODE
f1c3b0fc 1444 *
3b5dd52a
JS
1445 * This function is responsible for placing a port into diagnostic loopback
1446 * mode in order to perform a diagnostic loopback test.
1447 * All new scsi requests are blocked, a small delay is used to allow the
1448 * scsi requests to complete then the link is brought down. If the link is
1449 * is placed in loopback mode then scsi requests are again allowed
1450 * so the scsi mid-layer doesn't give up on the port.
1451 * All of this is done in-line.
f1c3b0fc 1452 */
3b5dd52a
JS
1453static int
1454lpfc_bsg_diag_mode(struct fc_bsg_job *job)
f1c3b0fc 1455{
3b5dd52a 1456 struct Scsi_Host *shost = job->shost;
f1c3b0fc
JS
1457 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1458 struct lpfc_hba *phba = vport->phba;
3b5dd52a
JS
1459 struct diag_mode_set *loopback_mode;
1460 struct lpfc_sli *psli = &phba->sli;
1461 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
1462 uint32_t link_flags;
1463 uint32_t timeout;
1464 struct lpfc_vport **vports;
1465 LPFC_MBOXQ_t *pmboxq;
1466 int mbxstatus;
1467 int i = 0;
1468 int rc = 0;
f1c3b0fc 1469
3b5dd52a
JS
1470 /* no data to return just the return code */
1471 job->reply->reply_payload_rcv_len = 0;
1472
1473 if (job->request_len <
1474 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) {
1475 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1476 "2738 Received DIAG MODE request below minimum "
1477 "size\n");
1478 rc = -EINVAL;
1479 goto job_error;
1480 }
1481
1482 loopback_mode = (struct diag_mode_set *)
1483 job->request->rqst_data.h_vendor.vendor_cmd;
1484 link_flags = loopback_mode->type;
1485 timeout = loopback_mode->timeout;
1486
1487 if ((phba->link_state == LPFC_HBA_ERROR) ||
1488 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1489 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
1490 rc = -EACCES;
1491 goto job_error;
1492 }
1493
1494 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1495 if (!pmboxq) {
1496 rc = -ENOMEM;
1497 goto job_error;
1498 }
1499
1500 vports = lpfc_create_vport_work_array(phba);
1501 if (vports) {
1502 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1503 shost = lpfc_shost_from_vport(vports[i]);
1504 scsi_block_requests(shost);
1505 }
1506
1507 lpfc_destroy_vport_work_array(phba, vports);
1508 } else {
1509 shost = lpfc_shost_from_vport(phba->pport);
1510 scsi_block_requests(shost);
1511 }
1512
1513 while (pring->txcmplq_cnt) {
1514 if (i++ > 500) /* wait up to 5 seconds */
1515 break;
1516
1517 msleep(10);
1518 }
1519
1520 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1521 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1522 pmboxq->u.mb.mbxOwner = OWN_HOST;
1523
1524 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1525
1526 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1527 /* wait for link down before proceeding */
1528 i = 0;
1529 while (phba->link_state != LPFC_LINK_DOWN) {
1530 if (i++ > timeout) {
1531 rc = -ETIMEDOUT;
1532 goto loopback_mode_exit;
1533 }
1534
1535 msleep(10);
1536 }
1537
1538 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1539 if (link_flags == INTERNAL_LOOP_BACK)
1540 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1541 else
1542 pmboxq->u.mb.un.varInitLnk.link_flags =
1543 FLAGS_TOPOLOGY_MODE_LOOP;
1544
1545 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1546 pmboxq->u.mb.mbxOwner = OWN_HOST;
1547
1548 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1549 LPFC_MBOX_TMO);
1550
1551 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1552 rc = -ENODEV;
1553 else {
1554 phba->link_flag |= LS_LOOPBACK_MODE;
1555 /* wait for the link attention interrupt */
1556 msleep(100);
1557
1558 i = 0;
1559 while (phba->link_state != LPFC_HBA_READY) {
1560 if (i++ > timeout) {
1561 rc = -ETIMEDOUT;
1562 break;
1563 }
1564
1565 msleep(10);
1566 }
1567 }
1568
1569 } else
1570 rc = -ENODEV;
1571
1572loopback_mode_exit:
1573 vports = lpfc_create_vport_work_array(phba);
1574 if (vports) {
1575 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1576 shost = lpfc_shost_from_vport(vports[i]);
1577 scsi_unblock_requests(shost);
1578 }
1579 lpfc_destroy_vport_work_array(phba, vports);
1580 } else {
1581 shost = lpfc_shost_from_vport(phba->pport);
1582 scsi_unblock_requests(shost);
1583 }
1584
1585 /*
1586 * Let SLI layer release mboxq if mbox command completed after timeout.
1587 */
1588 if (mbxstatus != MBX_TIMEOUT)
1589 mempool_free(pmboxq, phba->mbox_mem_pool);
1590
1591job_error:
1592 /* make error code available to userspace */
1593 job->reply->result = rc;
1594 /* complete the job back to userspace if no error */
1595 if (rc == 0)
1596 job->job_done(job);
1597 return rc;
1598}
1599
1600/**
1601 * lpfcdiag_loop_self_reg - obtains a remote port login id
1602 * @phba: Pointer to HBA context object
1603 * @rpi: Pointer to a remote port login id
1604 *
1605 * This function obtains a remote port login id so the diag loopback test
1606 * can send and receive its own unsolicited CT command.
1607 **/
1608static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
1609{
1610 LPFC_MBOXQ_t *mbox;
1611 struct lpfc_dmabuf *dmabuff;
1612 int status;
1613
1614 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1615 if (!mbox)
1616 return ENOMEM;
1617
1618 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
1619 (uint8_t *)&phba->pport->fc_sparam, mbox, 0);
1620 if (status) {
1621 mempool_free(mbox, phba->mbox_mem_pool);
1622 return ENOMEM;
1623 }
1624
1625 dmabuff = (struct lpfc_dmabuf *) mbox->context1;
1626 mbox->context1 = NULL;
1627 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1628
1629 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
1630 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
1631 kfree(dmabuff);
1632 if (status != MBX_TIMEOUT)
1633 mempool_free(mbox, phba->mbox_mem_pool);
1634 return ENODEV;
1635 }
1636
1637 *rpi = mbox->u.mb.un.varWords[0];
1638
1639 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
1640 kfree(dmabuff);
1641 mempool_free(mbox, phba->mbox_mem_pool);
1642 return 0;
1643}
1644
1645/**
1646 * lpfcdiag_loop_self_unreg - unregs from the rpi
1647 * @phba: Pointer to HBA context object
1648 * @rpi: Remote port login id
1649 *
1650 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
1651 **/
1652static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
1653{
1654 LPFC_MBOXQ_t *mbox;
1655 int status;
1656
1657 /* Allocate mboxq structure */
1658 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1659 if (mbox == NULL)
1660 return ENOMEM;
1661
1662 lpfc_unreg_login(phba, 0, rpi, mbox);
1663 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1664
1665 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
1666 if (status != MBX_TIMEOUT)
1667 mempool_free(mbox, phba->mbox_mem_pool);
1668 return EIO;
1669 }
1670
1671 mempool_free(mbox, phba->mbox_mem_pool);
1672 return 0;
1673}
1674
1675/**
1676 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
1677 * @phba: Pointer to HBA context object
1678 * @rpi: Remote port login id
1679 * @txxri: Pointer to transmit exchange id
1680 * @rxxri: Pointer to response exchabge id
1681 *
1682 * This function obtains the transmit and receive ids required to send
1683 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
1684 * flags are used to the unsolicted response handler is able to process
1685 * the ct command sent on the same port.
1686 **/
1687static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
1688 uint16_t *txxri, uint16_t * rxxri)
1689{
1690 struct lpfc_bsg_event *evt;
1691 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
1692 IOCB_t *cmd, *rsp;
1693 struct lpfc_dmabuf *dmabuf;
1694 struct ulp_bde64 *bpl = NULL;
1695 struct lpfc_sli_ct_request *ctreq = NULL;
1696 int ret_val = 0;
1697 unsigned long flags;
1698
1699 *txxri = 0;
1700 *rxxri = 0;
1701 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
1702 SLI_CT_ELX_LOOPBACK);
1703 if (!evt)
1704 return ENOMEM;
1705
1706 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1707 list_add(&evt->node, &phba->ct_ev_waiters);
1708 lpfc_bsg_event_ref(evt);
1709 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1710
1711 cmdiocbq = lpfc_sli_get_iocbq(phba);
1712 rspiocbq = lpfc_sli_get_iocbq(phba);
1713
1714 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1715 if (dmabuf) {
1716 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
c7495937
JS
1717 if (dmabuf->virt) {
1718 INIT_LIST_HEAD(&dmabuf->list);
1719 bpl = (struct ulp_bde64 *) dmabuf->virt;
1720 memset(bpl, 0, sizeof(*bpl));
1721 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
1722 bpl->addrHigh =
1723 le32_to_cpu(putPaddrHigh(dmabuf->phys +
1724 sizeof(*bpl)));
1725 bpl->addrLow =
1726 le32_to_cpu(putPaddrLow(dmabuf->phys +
1727 sizeof(*bpl)));
1728 bpl->tus.f.bdeFlags = 0;
1729 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
1730 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1731 }
3b5dd52a
JS
1732 }
1733
1734 if (cmdiocbq == NULL || rspiocbq == NULL ||
c7495937
JS
1735 dmabuf == NULL || bpl == NULL || ctreq == NULL ||
1736 dmabuf->virt == NULL) {
3b5dd52a
JS
1737 ret_val = ENOMEM;
1738 goto err_get_xri_exit;
1739 }
1740
1741 cmd = &cmdiocbq->iocb;
1742 rsp = &rspiocbq->iocb;
1743
1744 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
1745
1746 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
1747 ctreq->RevisionId.bits.InId = 0;
1748 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
1749 ctreq->FsSubType = 0;
1750 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
1751 ctreq->CommandResponse.bits.Size = 0;
1752
1753
1754 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
1755 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
1756 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1757 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
1758
1759 cmd->un.xseq64.w5.hcsw.Fctl = LA;
1760 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
1761 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
1762 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1763
1764 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
1765 cmd->ulpBdeCount = 1;
1766 cmd->ulpLe = 1;
1767 cmd->ulpClass = CLASS3;
1768 cmd->ulpContext = rpi;
1769
1770 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
1771 cmdiocbq->vport = phba->pport;
1772
1773 ret_val = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
1774 rspiocbq,
1775 (phba->fc_ratov * 2)
1776 + LPFC_DRVR_TIMEOUT);
1777 if (ret_val)
1778 goto err_get_xri_exit;
1779
1780 *txxri = rsp->ulpContext;
1781
1782 evt->waiting = 1;
1783 evt->wait_time_stamp = jiffies;
1784 ret_val = wait_event_interruptible_timeout(
1785 evt->wq, !list_empty(&evt->events_to_see),
1786 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
1787 if (list_empty(&evt->events_to_see))
1788 ret_val = (ret_val) ? EINTR : ETIMEDOUT;
1789 else {
1790 ret_val = IOCB_SUCCESS;
1791 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1792 list_move(evt->events_to_see.prev, &evt->events_to_get);
1793 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1794 *rxxri = (list_entry(evt->events_to_get.prev,
1795 typeof(struct event_data),
1796 node))->immed_dat;
1797 }
1798 evt->waiting = 0;
1799
1800err_get_xri_exit:
1801 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1802 lpfc_bsg_event_unref(evt); /* release ref */
1803 lpfc_bsg_event_unref(evt); /* delete */
1804 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1805
1806 if (dmabuf) {
1807 if (dmabuf->virt)
1808 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
1809 kfree(dmabuf);
1810 }
1811
1812 if (cmdiocbq && (ret_val != IOCB_TIMEDOUT))
1813 lpfc_sli_release_iocbq(phba, cmdiocbq);
1814 if (rspiocbq)
1815 lpfc_sli_release_iocbq(phba, rspiocbq);
1816 return ret_val;
1817}
1818
1819/**
1820 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
1821 * @phba: Pointer to HBA context object
1822 * @bpl: Pointer to 64 bit bde structure
1823 * @size: Number of bytes to process
1824 * @nocopydata: Flag to copy user data into the allocated buffer
1825 *
1826 * This function allocates page size buffers and populates an lpfc_dmabufext.
1827 * If allowed the user data pointed to with indataptr is copied into the kernel
1828 * memory. The chained list of page size buffers is returned.
1829 **/
1830static struct lpfc_dmabufext *
1831diag_cmd_data_alloc(struct lpfc_hba *phba,
1832 struct ulp_bde64 *bpl, uint32_t size,
1833 int nocopydata)
1834{
1835 struct lpfc_dmabufext *mlist = NULL;
1836 struct lpfc_dmabufext *dmp;
1837 int cnt, offset = 0, i = 0;
1838 struct pci_dev *pcidev;
1839
1840 pcidev = phba->pcidev;
1841
1842 while (size) {
1843 /* We get chunks of 4K */
1844 if (size > BUF_SZ_4K)
1845 cnt = BUF_SZ_4K;
1846 else
1847 cnt = size;
1848
1849 /* allocate struct lpfc_dmabufext buffer header */
1850 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
1851 if (!dmp)
1852 goto out;
1853
1854 INIT_LIST_HEAD(&dmp->dma.list);
1855
1856 /* Queue it to a linked list */
1857 if (mlist)
1858 list_add_tail(&dmp->dma.list, &mlist->dma.list);
1859 else
1860 mlist = dmp;
1861
1862 /* allocate buffer */
1863 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
1864 cnt,
1865 &(dmp->dma.phys),
1866 GFP_KERNEL);
1867
1868 if (!dmp->dma.virt)
1869 goto out;
1870
1871 dmp->size = cnt;
1872
1873 if (nocopydata) {
1874 bpl->tus.f.bdeFlags = 0;
1875 pci_dma_sync_single_for_device(phba->pcidev,
1876 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
1877
1878 } else {
1879 memset((uint8_t *)dmp->dma.virt, 0, cnt);
1880 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1881 }
1882
1883 /* build buffer ptr list for IOCB */
1884 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
1885 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
1886 bpl->tus.f.bdeSize = (ushort) cnt;
1887 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1888 bpl++;
1889
1890 i++;
1891 offset += cnt;
1892 size -= cnt;
1893 }
1894
1895 mlist->flag = i;
1896 return mlist;
1897out:
1898 diag_cmd_data_free(phba, mlist);
1899 return NULL;
1900}
1901
1902/**
1903 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
1904 * @phba: Pointer to HBA context object
1905 * @rxxri: Receive exchange id
1906 * @len: Number of data bytes
1907 *
1908 * This function allocates and posts a data buffer of sufficient size to recieve
1909 * an unsolicted CT command.
1910 **/
1911static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
1912 size_t len)
1913{
1914 struct lpfc_sli *psli = &phba->sli;
1915 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1916 struct lpfc_iocbq *cmdiocbq;
1917 IOCB_t *cmd = NULL;
1918 struct list_head head, *curr, *next;
1919 struct lpfc_dmabuf *rxbmp;
1920 struct lpfc_dmabuf *dmp;
1921 struct lpfc_dmabuf *mp[2] = {NULL, NULL};
1922 struct ulp_bde64 *rxbpl = NULL;
1923 uint32_t num_bde;
1924 struct lpfc_dmabufext *rxbuffer = NULL;
1925 int ret_val = 0;
1926 int i = 0;
1927
1928 cmdiocbq = lpfc_sli_get_iocbq(phba);
1929 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1930 if (rxbmp != NULL) {
1931 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
c7495937
JS
1932 if (rxbmp->virt) {
1933 INIT_LIST_HEAD(&rxbmp->list);
1934 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
1935 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
1936 }
3b5dd52a
JS
1937 }
1938
1939 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
1940 ret_val = ENOMEM;
1941 goto err_post_rxbufs_exit;
1942 }
1943
1944 /* Queue buffers for the receive exchange */
1945 num_bde = (uint32_t)rxbuffer->flag;
1946 dmp = &rxbuffer->dma;
1947
1948 cmd = &cmdiocbq->iocb;
1949 i = 0;
1950
1951 INIT_LIST_HEAD(&head);
1952 list_add_tail(&head, &dmp->list);
1953 list_for_each_safe(curr, next, &head) {
1954 mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
1955 list_del(curr);
1956
1957 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
1958 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
1959 cmd->un.quexri64cx.buff.bde.addrHigh =
1960 putPaddrHigh(mp[i]->phys);
1961 cmd->un.quexri64cx.buff.bde.addrLow =
1962 putPaddrLow(mp[i]->phys);
1963 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
1964 ((struct lpfc_dmabufext *)mp[i])->size;
1965 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
1966 cmd->ulpCommand = CMD_QUE_XRI64_CX;
1967 cmd->ulpPU = 0;
1968 cmd->ulpLe = 1;
1969 cmd->ulpBdeCount = 1;
1970 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
1971
1972 } else {
1973 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
1974 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
1975 cmd->un.cont64[i].tus.f.bdeSize =
1976 ((struct lpfc_dmabufext *)mp[i])->size;
1977 cmd->ulpBdeCount = ++i;
1978
1979 if ((--num_bde > 0) && (i < 2))
1980 continue;
1981
1982 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
1983 cmd->ulpLe = 1;
1984 }
1985
1986 cmd->ulpClass = CLASS3;
1987 cmd->ulpContext = rxxri;
1988
1989 ret_val = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
1990
1991 if (ret_val == IOCB_ERROR) {
1992 diag_cmd_data_free(phba,
1993 (struct lpfc_dmabufext *)mp[0]);
1994 if (mp[1])
1995 diag_cmd_data_free(phba,
1996 (struct lpfc_dmabufext *)mp[1]);
1997 dmp = list_entry(next, struct lpfc_dmabuf, list);
1998 ret_val = EIO;
1999 goto err_post_rxbufs_exit;
2000 }
2001
2002 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
2003 if (mp[1]) {
2004 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
2005 mp[1] = NULL;
2006 }
2007
2008 /* The iocb was freed by lpfc_sli_issue_iocb */
2009 cmdiocbq = lpfc_sli_get_iocbq(phba);
2010 if (!cmdiocbq) {
2011 dmp = list_entry(next, struct lpfc_dmabuf, list);
2012 ret_val = EIO;
2013 goto err_post_rxbufs_exit;
2014 }
2015
2016 cmd = &cmdiocbq->iocb;
2017 i = 0;
2018 }
2019 list_del(&head);
2020
2021err_post_rxbufs_exit:
2022
2023 if (rxbmp) {
2024 if (rxbmp->virt)
2025 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
2026 kfree(rxbmp);
2027 }
2028
2029 if (cmdiocbq)
2030 lpfc_sli_release_iocbq(phba, cmdiocbq);
2031 return ret_val;
2032}
2033
2034/**
2035 * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
2036 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2037 *
2038 * This function receives a user data buffer to be transmitted and received on
2039 * the same port, the link must be up and in loopback mode prior
2040 * to being called.
2041 * 1. A kernel buffer is allocated to copy the user data into.
2042 * 2. The port registers with "itself".
2043 * 3. The transmit and receive exchange ids are obtained.
2044 * 4. The receive exchange id is posted.
2045 * 5. A new els loopback event is created.
2046 * 6. The command and response iocbs are allocated.
2047 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
2048 *
2049 * This function is meant to be called n times while the port is in loopback
2050 * so it is the apps responsibility to issue a reset to take the port out
2051 * of loopback mode.
2052 **/
2053static int
2054lpfc_bsg_diag_test(struct fc_bsg_job *job)
2055{
2056 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2057 struct lpfc_hba *phba = vport->phba;
2058 struct diag_mode_test *diag_mode;
2059 struct lpfc_bsg_event *evt;
2060 struct event_data *evdat;
2061 struct lpfc_sli *psli = &phba->sli;
2062 uint32_t size;
2063 uint32_t full_size;
2064 size_t segment_len = 0, segment_offset = 0, current_offset = 0;
2065 uint16_t rpi;
2066 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2067 IOCB_t *cmd, *rsp;
2068 struct lpfc_sli_ct_request *ctreq;
2069 struct lpfc_dmabuf *txbmp;
2070 struct ulp_bde64 *txbpl = NULL;
2071 struct lpfc_dmabufext *txbuffer = NULL;
2072 struct list_head head;
2073 struct lpfc_dmabuf *curr;
2074 uint16_t txxri, rxxri;
2075 uint32_t num_bde;
2076 uint8_t *ptr = NULL, *rx_databuf = NULL;
2077 int rc = 0;
2078 unsigned long flags;
2079 void *dataout = NULL;
2080 uint32_t total_mem;
2081
2082 /* in case no data is returned return just the return code */
2083 job->reply->reply_payload_rcv_len = 0;
2084
2085 if (job->request_len <
2086 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
2087 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2088 "2739 Received DIAG TEST request below minimum "
2089 "size\n");
2090 rc = -EINVAL;
2091 goto loopback_test_exit;
2092 }
2093
2094 if (job->request_payload.payload_len !=
2095 job->reply_payload.payload_len) {
2096 rc = -EINVAL;
2097 goto loopback_test_exit;
2098 }
2099
2100 diag_mode = (struct diag_mode_test *)
2101 job->request->rqst_data.h_vendor.vendor_cmd;
2102
2103 if ((phba->link_state == LPFC_HBA_ERROR) ||
2104 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
2105 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
2106 rc = -EACCES;
2107 goto loopback_test_exit;
2108 }
2109
2110 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
2111 rc = -EACCES;
2112 goto loopback_test_exit;
2113 }
2114
2115 size = job->request_payload.payload_len;
2116 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
2117
2118 if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
2119 rc = -ERANGE;
2120 goto loopback_test_exit;
2121 }
2122
2123 if (size >= BUF_SZ_4K) {
2124 /*
2125 * Allocate memory for ioctl data. If buffer is bigger than 64k,
2126 * then we allocate 64k and re-use that buffer over and over to
2127 * xfer the whole block. This is because Linux kernel has a
2128 * problem allocating more than 120k of kernel space memory. Saw
2129 * problem with GET_FCPTARGETMAPPING...
2130 */
2131 if (size <= (64 * 1024))
2132 total_mem = size;
2133 else
2134 total_mem = 64 * 1024;
2135 } else
2136 /* Allocate memory for ioctl data */
2137 total_mem = BUF_SZ_4K;
2138
2139 dataout = kmalloc(total_mem, GFP_KERNEL);
2140 if (dataout == NULL) {
2141 rc = -ENOMEM;
2142 goto loopback_test_exit;
2143 }
2144
2145 ptr = dataout;
2146 ptr += ELX_LOOPBACK_HEADER_SZ;
2147 sg_copy_to_buffer(job->request_payload.sg_list,
2148 job->request_payload.sg_cnt,
2149 ptr, size);
2150
2151 rc = lpfcdiag_loop_self_reg(phba, &rpi);
2152 if (rc) {
2153 rc = -ENOMEM;
2154 goto loopback_test_exit;
2155 }
2156
2157 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
2158 if (rc) {
2159 lpfcdiag_loop_self_unreg(phba, rpi);
2160 rc = -ENOMEM;
2161 goto loopback_test_exit;
2162 }
2163
2164 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
2165 if (rc) {
2166 lpfcdiag_loop_self_unreg(phba, rpi);
2167 rc = -ENOMEM;
2168 goto loopback_test_exit;
2169 }
2170
2171 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2172 SLI_CT_ELX_LOOPBACK);
2173 if (!evt) {
2174 lpfcdiag_loop_self_unreg(phba, rpi);
2175 rc = -ENOMEM;
2176 goto loopback_test_exit;
2177 }
2178
2179 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2180 list_add(&evt->node, &phba->ct_ev_waiters);
2181 lpfc_bsg_event_ref(evt);
2182 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2183
2184 cmdiocbq = lpfc_sli_get_iocbq(phba);
2185 rspiocbq = lpfc_sli_get_iocbq(phba);
2186 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2187
2188 if (txbmp) {
2189 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
c7495937
JS
2190 if (txbmp->virt) {
2191 INIT_LIST_HEAD(&txbmp->list);
2192 txbpl = (struct ulp_bde64 *) txbmp->virt;
3b5dd52a
JS
2193 txbuffer = diag_cmd_data_alloc(phba,
2194 txbpl, full_size, 0);
c7495937 2195 }
3b5dd52a
JS
2196 }
2197
c7495937
JS
2198 if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer ||
2199 !txbmp->virt) {
3b5dd52a
JS
2200 rc = -ENOMEM;
2201 goto err_loopback_test_exit;
2202 }
2203
2204 cmd = &cmdiocbq->iocb;
2205 rsp = &rspiocbq->iocb;
2206
2207 INIT_LIST_HEAD(&head);
2208 list_add_tail(&head, &txbuffer->dma.list);
2209 list_for_each_entry(curr, &head, list) {
2210 segment_len = ((struct lpfc_dmabufext *)curr)->size;
2211 if (current_offset == 0) {
2212 ctreq = curr->virt;
2213 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2214 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2215 ctreq->RevisionId.bits.InId = 0;
2216 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2217 ctreq->FsSubType = 0;
2218 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
2219 ctreq->CommandResponse.bits.Size = size;
2220 segment_offset = ELX_LOOPBACK_HEADER_SZ;
2221 } else
2222 segment_offset = 0;
2223
2224 BUG_ON(segment_offset >= segment_len);
2225 memcpy(curr->virt + segment_offset,
2226 ptr + current_offset,
2227 segment_len - segment_offset);
2228
2229 current_offset += segment_len - segment_offset;
2230 BUG_ON(current_offset > size);
2231 }
2232 list_del(&head);
2233
2234 /* Build the XMIT_SEQUENCE iocb */
2235
2236 num_bde = (uint32_t)txbuffer->flag;
2237
2238 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
2239 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
2240 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2241 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
2242
2243 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
2244 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2245 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2246 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2247
2248 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
2249 cmd->ulpBdeCount = 1;
2250 cmd->ulpLe = 1;
2251 cmd->ulpClass = CLASS3;
2252 cmd->ulpContext = txxri;
2253
2254 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2255 cmdiocbq->vport = phba->pport;
2256
2257 rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
2258 (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT);
2259
2260 if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
2261 rc = -EIO;
2262 goto err_loopback_test_exit;
2263 }
2264
2265 evt->waiting = 1;
2266 rc = wait_event_interruptible_timeout(
2267 evt->wq, !list_empty(&evt->events_to_see),
2268 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
2269 evt->waiting = 0;
2270 if (list_empty(&evt->events_to_see))
2271 rc = (rc) ? -EINTR : -ETIMEDOUT;
2272 else {
2273 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2274 list_move(evt->events_to_see.prev, &evt->events_to_get);
2275 evdat = list_entry(evt->events_to_get.prev,
2276 typeof(*evdat), node);
2277 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2278 rx_databuf = evdat->data;
2279 if (evdat->len != full_size) {
2280 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2281 "1603 Loopback test did not receive expected "
2282 "data length. actual length 0x%x expected "
2283 "length 0x%x\n",
2284 evdat->len, full_size);
2285 rc = -EIO;
2286 } else if (rx_databuf == NULL)
2287 rc = -EIO;
2288 else {
2289 rc = IOCB_SUCCESS;
2290 /* skip over elx loopback header */
2291 rx_databuf += ELX_LOOPBACK_HEADER_SZ;
2292 job->reply->reply_payload_rcv_len =
2293 sg_copy_from_buffer(job->reply_payload.sg_list,
2294 job->reply_payload.sg_cnt,
2295 rx_databuf, size);
2296 job->reply->reply_payload_rcv_len = size;
2297 }
2298 }
2299
2300err_loopback_test_exit:
2301 lpfcdiag_loop_self_unreg(phba, rpi);
2302
2303 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2304 lpfc_bsg_event_unref(evt); /* release ref */
2305 lpfc_bsg_event_unref(evt); /* delete */
2306 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2307
2308 if (cmdiocbq != NULL)
2309 lpfc_sli_release_iocbq(phba, cmdiocbq);
2310
2311 if (rspiocbq != NULL)
2312 lpfc_sli_release_iocbq(phba, rspiocbq);
2313
2314 if (txbmp != NULL) {
2315 if (txbpl != NULL) {
2316 if (txbuffer != NULL)
2317 diag_cmd_data_free(phba, txbuffer);
2318 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
2319 }
2320 kfree(txbmp);
2321 }
2322
2323loopback_test_exit:
2324 kfree(dataout);
2325 /* make error code available to userspace */
2326 job->reply->result = rc;
2327 job->dd_data = NULL;
2328 /* complete the job back to userspace if no error */
2329 if (rc == 0)
2330 job->job_done(job);
2331 return rc;
2332}
2333
2334/**
2335 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
2336 * @job: GET_DFC_REV fc_bsg_job
2337 **/
2338static int
2339lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
2340{
2341 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2342 struct lpfc_hba *phba = vport->phba;
2343 struct get_mgmt_rev *event_req;
2344 struct get_mgmt_rev_reply *event_reply;
2345 int rc = 0;
2346
2347 if (job->request_len <
2348 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
2349 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2350 "2740 Received GET_DFC_REV request below "
2351 "minimum size\n");
2352 rc = -EINVAL;
2353 goto job_error;
2354 }
2355
2356 event_req = (struct get_mgmt_rev *)
2357 job->request->rqst_data.h_vendor.vendor_cmd;
2358
2359 event_reply = (struct get_mgmt_rev_reply *)
2360 job->reply->reply_data.vendor_reply.vendor_rsp;
2361
2362 if (job->reply_len <
2363 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
2364 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2365 "2741 Received GET_DFC_REV reply below "
2366 "minimum size\n");
2367 rc = -EINVAL;
2368 goto job_error;
2369 }
2370
2371 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
2372 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
2373job_error:
2374 job->reply->result = rc;
2375 if (rc == 0)
2376 job->job_done(job);
2377 return rc;
2378}
2379
2380/**
2381 * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
2382 * @phba: Pointer to HBA context object.
2383 * @pmboxq: Pointer to mailbox command.
2384 *
2385 * This is completion handler function for mailbox commands issued from
2386 * lpfc_bsg_issue_mbox function. This function is called by the
2387 * mailbox event handler function with no lock held. This function
2388 * will wake up thread waiting on the wait queue pointed by context1
2389 * of the mailbox.
2390 **/
2391void
2392lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2393{
2394 struct bsg_job_data *dd_data;
3b5dd52a
JS
2395 struct fc_bsg_job *job;
2396 uint32_t size;
2397 unsigned long flags;
7a470277
JS
2398 uint8_t *to;
2399 uint8_t *from;
3b5dd52a
JS
2400
2401 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2402 dd_data = pmboxq->context1;
7a470277 2403 /* job already timed out? */
3b5dd52a
JS
2404 if (!dd_data) {
2405 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2406 return;
2407 }
2408
7a470277
JS
2409 /* build the outgoing buffer to do an sg copy
2410 * the format is the response mailbox followed by any extended
2411 * mailbox data
2412 */
2413 from = (uint8_t *)&pmboxq->u.mb;
2414 to = (uint8_t *)dd_data->context_un.mbox.mb;
2415 memcpy(to, from, sizeof(MAILBOX_t));
c7495937
JS
2416 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) {
2417 /* copy the extended data if any, count is in words */
2418 if (dd_data->context_un.mbox.outExtWLen) {
2419 from = (uint8_t *)dd_data->context_un.mbox.ext;
2420 to += sizeof(MAILBOX_t);
2421 size = dd_data->context_un.mbox.outExtWLen *
2422 sizeof(uint32_t);
2423 memcpy(to, from, size);
2424 } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) {
2425 from = (uint8_t *)dd_data->context_un.mbox.
2426 dmp->dma.virt;
2427 to += sizeof(MAILBOX_t);
2428 size = dd_data->context_un.mbox.dmp->size;
2429 memcpy(to, from, size);
2430 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
2431 (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) {
2432 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
2433 virt;
2434 to += sizeof(MAILBOX_t);
2435 size = pmboxq->u.mb.un.varWords[5];
2436 memcpy(to, from, size);
2437 } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
2438 from = (uint8_t *)dd_data->context_un.
2439 mbox.dmp->dma.virt;
2440 to += sizeof(MAILBOX_t);
2441 size = dd_data->context_un.mbox.dmp->size;
2442 memcpy(to, from, size);
2443 }
7a470277
JS
2444 }
2445
2446 from = (uint8_t *)dd_data->context_un.mbox.mb;
3b5dd52a 2447 job = dd_data->context_un.mbox.set_job;
7a470277 2448 size = job->reply_payload.payload_len;
3b5dd52a
JS
2449 job->reply->reply_payload_rcv_len =
2450 sg_copy_from_buffer(job->reply_payload.sg_list,
2451 job->reply_payload.sg_cnt,
7a470277 2452 from, size);
3b5dd52a 2453 job->reply->result = 0;
7a470277 2454
3b5dd52a
JS
2455 dd_data->context_un.mbox.set_job = NULL;
2456 job->dd_data = NULL;
2457 job->job_done(job);
7a470277
JS
2458 /* need to hold the lock until we call job done to hold off
2459 * the timeout handler returning to the midlayer while
2460 * we are stillprocessing the job
2461 */
3b5dd52a 2462 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
7a470277
JS
2463
2464 kfree(dd_data->context_un.mbox.mb);
3b5dd52a 2465 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
7a470277
JS
2466 kfree(dd_data->context_un.mbox.ext);
2467 if (dd_data->context_un.mbox.dmp) {
2468 dma_free_coherent(&phba->pcidev->dev,
2469 dd_data->context_un.mbox.dmp->size,
2470 dd_data->context_un.mbox.dmp->dma.virt,
2471 dd_data->context_un.mbox.dmp->dma.phys);
2472 kfree(dd_data->context_un.mbox.dmp);
2473 }
2474 if (dd_data->context_un.mbox.rxbmp) {
2475 lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
2476 dd_data->context_un.mbox.rxbmp->phys);
2477 kfree(dd_data->context_un.mbox.rxbmp);
2478 }
3b5dd52a
JS
2479 kfree(dd_data);
2480 return;
2481}
2482
2483/**
2484 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
2485 * @phba: Pointer to HBA context object.
2486 * @mb: Pointer to a mailbox object.
2487 * @vport: Pointer to a vport object.
2488 *
2489 * Some commands require the port to be offline, some may not be called from
2490 * the application.
2491 **/
2492static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2493 MAILBOX_t *mb, struct lpfc_vport *vport)
2494{
2495 /* return negative error values for bsg job */
2496 switch (mb->mbxCommand) {
2497 /* Offline only */
2498 case MBX_INIT_LINK:
2499 case MBX_DOWN_LINK:
2500 case MBX_CONFIG_LINK:
2501 case MBX_CONFIG_RING:
2502 case MBX_RESET_RING:
2503 case MBX_UNREG_LOGIN:
2504 case MBX_CLEAR_LA:
2505 case MBX_DUMP_CONTEXT:
2506 case MBX_RUN_DIAGS:
2507 case MBX_RESTART:
2508 case MBX_SET_MASK:
2509 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
2510 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2511 "2743 Command 0x%x is illegal in on-line "
2512 "state\n",
2513 mb->mbxCommand);
2514 return -EPERM;
2515 }
2516 case MBX_WRITE_NV:
2517 case MBX_WRITE_VPARMS:
2518 case MBX_LOAD_SM:
2519 case MBX_READ_NV:
2520 case MBX_READ_CONFIG:
2521 case MBX_READ_RCONFIG:
2522 case MBX_READ_STATUS:
2523 case MBX_READ_XRI:
2524 case MBX_READ_REV:
2525 case MBX_READ_LNK_STAT:
2526 case MBX_DUMP_MEMORY:
2527 case MBX_DOWN_LOAD:
2528 case MBX_UPDATE_CFG:
2529 case MBX_KILL_BOARD:
2530 case MBX_LOAD_AREA:
2531 case MBX_LOAD_EXP_ROM:
2532 case MBX_BEACON:
2533 case MBX_DEL_LD_ENTRY:
2534 case MBX_SET_DEBUG:
2535 case MBX_WRITE_WWN:
2536 case MBX_SLI4_CONFIG:
c7495937 2537 case MBX_READ_EVENT_LOG:
3b5dd52a
JS
2538 case MBX_READ_EVENT_LOG_STATUS:
2539 case MBX_WRITE_EVENT_LOG:
2540 case MBX_PORT_CAPABILITIES:
2541 case MBX_PORT_IOV_CONTROL:
7a470277 2542 case MBX_RUN_BIU_DIAG64:
3b5dd52a
JS
2543 break;
2544 case MBX_SET_VARIABLE:
e2aed29f
JS
2545 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2546 "1226 mbox: set_variable 0x%x, 0x%x\n",
2547 mb->un.varWords[0],
2548 mb->un.varWords[1]);
2549 if ((mb->un.varWords[0] == SETVAR_MLOMNT)
2550 && (mb->un.varWords[1] == 1)) {
2551 phba->wait_4_mlo_maint_flg = 1;
2552 } else if (mb->un.varWords[0] == SETVAR_MLORST) {
2553 phba->link_flag &= ~LS_LOOPBACK_MODE;
2554 phba->fc_topology = TOPOLOGY_PT_PT;
2555 }
2556 break;
3b5dd52a
JS
2557 case MBX_READ_SPARM64:
2558 case MBX_READ_LA:
2559 case MBX_READ_LA64:
2560 case MBX_REG_LOGIN:
2561 case MBX_REG_LOGIN64:
2562 case MBX_CONFIG_PORT:
2563 case MBX_RUN_BIU_DIAG:
2564 default:
2565 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2566 "2742 Unknown Command 0x%x\n",
2567 mb->mbxCommand);
2568 return -EPERM;
2569 }
2570
2571 return 0; /* ok */
2572}
2573
2574/**
2575 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
2576 * @phba: Pointer to HBA context object.
2577 * @mb: Pointer to a mailbox object.
2578 * @vport: Pointer to a vport object.
2579 *
2580 * Allocate a tracking object, mailbox command memory, get a mailbox
2581 * from the mailbox pool, copy the caller mailbox command.
2582 *
2583 * If offline and the sli is active we need to poll for the command (port is
2584 * being reset) and com-plete the job, otherwise issue the mailbox command and
2585 * let our completion handler finish the command.
2586 **/
2587static uint32_t
2588lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2589 struct lpfc_vport *vport)
2590{
7a470277
JS
2591 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
2592 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
2593 /* a 4k buffer to hold the mb and extended data from/to the bsg */
2594 MAILBOX_t *mb = NULL;
2595 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
3b5dd52a 2596 uint32_t size;
7a470277
JS
2597 struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */
2598 struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
2599 struct ulp_bde64 *rxbpl = NULL;
2600 struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
2601 job->request->rqst_data.h_vendor.vendor_cmd;
2602 uint8_t *ext = NULL;
3b5dd52a 2603 int rc = 0;
7a470277
JS
2604 uint8_t *from;
2605
2606 /* in case no data is transferred */
2607 job->reply->reply_payload_rcv_len = 0;
2608
2609 /* check if requested extended data lengths are valid */
2610 if ((mbox_req->inExtWLen > MAILBOX_EXT_SIZE) ||
c7495937 2611 (mbox_req->outExtWLen > MAILBOX_EXT_SIZE)) {
7a470277
JS
2612 rc = -ERANGE;
2613 goto job_done;
2614 }
3b5dd52a
JS
2615
2616 /* allocate our bsg tracking structure */
2617 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2618 if (!dd_data) {
2619 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2620 "2727 Failed allocation of dd_data\n");
7a470277
JS
2621 rc = -ENOMEM;
2622 goto job_done;
3b5dd52a
JS
2623 }
2624
49198b37 2625 mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL);
3b5dd52a 2626 if (!mb) {
7a470277
JS
2627 rc = -ENOMEM;
2628 goto job_done;
3b5dd52a
JS
2629 }
2630
2631 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2632 if (!pmboxq) {
7a470277
JS
2633 rc = -ENOMEM;
2634 goto job_done;
3b5dd52a 2635 }
7a470277 2636 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3b5dd52a
JS
2637
2638 size = job->request_payload.payload_len;
7a470277
JS
2639 sg_copy_to_buffer(job->request_payload.sg_list,
2640 job->request_payload.sg_cnt,
2641 mb, size);
3b5dd52a
JS
2642
2643 rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
7a470277
JS
2644 if (rc != 0)
2645 goto job_done; /* must be negative */
3b5dd52a 2646
3b5dd52a
JS
2647 pmb = &pmboxq->u.mb;
2648 memcpy(pmb, mb, sizeof(*pmb));
2649 pmb->mbxOwner = OWN_HOST;
3b5dd52a
JS
2650 pmboxq->vport = vport;
2651
c7495937
JS
2652 /* If HBA encountered an error attention, allow only DUMP
2653 * or RESTART mailbox commands until the HBA is restarted.
2654 */
2655 if (phba->pport->stopped &&
2656 pmb->mbxCommand != MBX_DUMP_MEMORY &&
2657 pmb->mbxCommand != MBX_RESTART &&
2658 pmb->mbxCommand != MBX_WRITE_VPARMS &&
2659 pmb->mbxCommand != MBX_WRITE_WWN)
2660 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2661 "2797 mbox: Issued mailbox cmd "
2662 "0x%x while in stopped state.\n",
2663 pmb->mbxCommand);
2664
2665 /* Don't allow mailbox commands to be sent when blocked
2666 * or when in the middle of discovery
2667 */
2668 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
2669 rc = -EAGAIN;
2670 goto job_done;
2671 }
2672
7a470277 2673 /* extended mailbox commands will need an extended buffer */
c7495937 2674 if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
7a470277
JS
2675 ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
2676 if (!ext) {
2677 rc = -ENOMEM;
2678 goto job_done;
2679 }
2680
2681 /* any data for the device? */
2682 if (mbox_req->inExtWLen) {
2683 from = (uint8_t *)mb;
2684 from += sizeof(MAILBOX_t);
2685 memcpy((uint8_t *)ext, from,
2686 mbox_req->inExtWLen * sizeof(uint32_t));
2687 }
2688
2689 pmboxq->context2 = ext;
2690 pmboxq->in_ext_byte_len =
2691 mbox_req->inExtWLen *
2692 sizeof(uint32_t);
2693 pmboxq->out_ext_byte_len =
c7495937 2694 mbox_req->outExtWLen *
7a470277
JS
2695 sizeof(uint32_t);
2696 pmboxq->mbox_offset_word =
2697 mbox_req->mbOffset;
2698 pmboxq->context2 = ext;
2699 pmboxq->in_ext_byte_len =
2700 mbox_req->inExtWLen * sizeof(uint32_t);
2701 pmboxq->out_ext_byte_len =
c7495937 2702 mbox_req->outExtWLen * sizeof(uint32_t);
7a470277
JS
2703 pmboxq->mbox_offset_word = mbox_req->mbOffset;
2704 }
2705
2706 /* biu diag will need a kernel buffer to transfer the data
2707 * allocate our own buffer and setup the mailbox command to
2708 * use ours
2709 */
2710 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
c7495937
JS
2711 uint32_t transmit_length = pmb->un.varWords[1];
2712 uint32_t receive_length = pmb->un.varWords[4];
2713 /* transmit length cannot be greater than receive length or
2714 * mailbox extension size
2715 */
2716 if ((transmit_length > receive_length) ||
2717 (transmit_length > MAILBOX_EXT_SIZE)) {
2718 rc = -ERANGE;
2719 goto job_done;
2720 }
2721
7a470277
JS
2722 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2723 if (!rxbmp) {
2724 rc = -ENOMEM;
2725 goto job_done;
2726 }
2727
2728 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
c7495937
JS
2729 if (!rxbmp->virt) {
2730 rc = -ENOMEM;
2731 goto job_done;
2732 }
2733
7a470277
JS
2734 INIT_LIST_HEAD(&rxbmp->list);
2735 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
c7495937 2736 dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0);
7a470277
JS
2737 if (!dmp) {
2738 rc = -ENOMEM;
2739 goto job_done;
2740 }
2741
7a470277
JS
2742 INIT_LIST_HEAD(&dmp->dma.list);
2743 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
2744 putPaddrHigh(dmp->dma.phys);
2745 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
2746 putPaddrLow(dmp->dma.phys);
2747
2748 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
2749 putPaddrHigh(dmp->dma.phys +
2750 pmb->un.varBIUdiag.un.s2.
2751 xmit_bde64.tus.f.bdeSize);
2752 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
2753 putPaddrLow(dmp->dma.phys +
2754 pmb->un.varBIUdiag.un.s2.
2755 xmit_bde64.tus.f.bdeSize);
c7495937
JS
2756
2757 /* copy the transmit data found in the mailbox extension area */
2758 from = (uint8_t *)mb;
2759 from += sizeof(MAILBOX_t);
2760 memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
2761 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
2762 struct READ_EVENT_LOG_VAR *rdEventLog =
2763 &pmb->un.varRdEventLog ;
2764 uint32_t receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
2765 uint32_t mode = bf_get(lpfc_event_log, rdEventLog);
2766
2767 /* receive length cannot be greater than mailbox
2768 * extension size
2769 */
2770 if (receive_length > MAILBOX_EXT_SIZE) {
2771 rc = -ERANGE;
2772 goto job_done;
2773 }
2774
2775 /* mode zero uses a bde like biu diags command */
2776 if (mode == 0) {
2777
2778 /* rebuild the command for sli4 using our own buffers
2779 * like we do for biu diags
2780 */
2781
2782 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2783 if (!rxbmp) {
2784 rc = -ENOMEM;
2785 goto job_done;
2786 }
2787
2788 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2789 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2790 if (rxbpl) {
2791 INIT_LIST_HEAD(&rxbmp->list);
2792 dmp = diag_cmd_data_alloc(phba, rxbpl,
2793 receive_length, 0);
2794 }
2795
2796 if (!dmp) {
2797 rc = -ENOMEM;
2798 goto job_done;
2799 }
2800
2801 INIT_LIST_HEAD(&dmp->dma.list);
2802 pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
2803 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
2804 }
2805 } else if (phba->sli_rev == LPFC_SLI_REV4) {
2806 if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
2807 /* rebuild the command for sli4 using our own buffers
2808 * like we do for biu diags
2809 */
2810 uint32_t receive_length = pmb->un.varWords[2];
2811 /* receive length cannot be greater than mailbox
2812 * extension size
2813 */
2814 if ((receive_length == 0) ||
2815 (receive_length > MAILBOX_EXT_SIZE)) {
2816 rc = -ERANGE;
2817 goto job_done;
2818 }
2819
2820 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2821 if (!rxbmp) {
2822 rc = -ENOMEM;
2823 goto job_done;
2824 }
2825
2826 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2827 if (!rxbmp->virt) {
2828 rc = -ENOMEM;
2829 goto job_done;
2830 }
2831
2832 INIT_LIST_HEAD(&rxbmp->list);
2833 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2834 dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
2835 0);
2836 if (!dmp) {
2837 rc = -ENOMEM;
2838 goto job_done;
2839 }
2840
2841 INIT_LIST_HEAD(&dmp->dma.list);
2842 pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
2843 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
2844 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
2845 pmb->un.varUpdateCfg.co) {
2846 struct ulp_bde64 *bde =
2847 (struct ulp_bde64 *)&pmb->un.varWords[4];
2848
2849 /* bde size cannot be greater than mailbox ext size */
2850 if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) {
2851 rc = -ERANGE;
2852 goto job_done;
2853 }
2854
2855 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2856 if (!rxbmp) {
2857 rc = -ENOMEM;
2858 goto job_done;
2859 }
2860
2861 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2862 if (!rxbmp->virt) {
2863 rc = -ENOMEM;
2864 goto job_done;
2865 }
2866
2867 INIT_LIST_HEAD(&rxbmp->list);
2868 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2869 dmp = diag_cmd_data_alloc(phba, rxbpl,
2870 bde->tus.f.bdeSize, 0);
2871 if (!dmp) {
2872 rc = -ENOMEM;
2873 goto job_done;
2874 }
2875
2876 INIT_LIST_HEAD(&dmp->dma.list);
2877 bde->addrHigh = putPaddrHigh(dmp->dma.phys);
2878 bde->addrLow = putPaddrLow(dmp->dma.phys);
2879
2880 /* copy the transmit data found in the mailbox
2881 * extension area
2882 */
2883 from = (uint8_t *)mb;
2884 from += sizeof(MAILBOX_t);
2885 memcpy((uint8_t *)dmp->dma.virt, from,
2886 bde->tus.f.bdeSize);
2887 }
7a470277
JS
2888 }
2889
c7495937
JS
2890 dd_data->context_un.mbox.rxbmp = rxbmp;
2891 dd_data->context_un.mbox.dmp = dmp;
2892
7a470277
JS
2893 /* setup wake call as IOCB callback */
2894 pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
2895
2896 /* setup context field to pass wait_queue pointer to wake function */
2897 pmboxq->context1 = dd_data;
2898 dd_data->type = TYPE_MBOX;
2899 dd_data->context_un.mbox.pmboxq = pmboxq;
2900 dd_data->context_un.mbox.mb = mb;
2901 dd_data->context_un.mbox.set_job = job;
2902 dd_data->context_un.mbox.ext = ext;
2903 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
2904 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
c7495937 2905 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
7a470277
JS
2906 job->dd_data = dd_data;
2907
3b5dd52a
JS
2908 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
2909 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
2910 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2911 if (rc != MBX_SUCCESS) {
7a470277
JS
2912 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
2913 goto job_done;
3b5dd52a
JS
2914 }
2915
7a470277 2916 /* job finished, copy the data */
3b5dd52a
JS
2917 memcpy(mb, pmb, sizeof(*pmb));
2918 job->reply->reply_payload_rcv_len =
2919 sg_copy_from_buffer(job->reply_payload.sg_list,
2920 job->reply_payload.sg_cnt,
2921 mb, size);
3b5dd52a 2922 /* not waiting mbox already done */
7a470277
JS
2923 rc = 0;
2924 goto job_done;
3b5dd52a
JS
2925 }
2926
3b5dd52a 2927 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
7a470277
JS
2928 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
2929 return 1; /* job started */
2930
2931job_done:
2932 /* common exit for error or job completed inline */
2933 kfree(mb);
2934 if (pmboxq)
3b5dd52a 2935 mempool_free(pmboxq, phba->mbox_mem_pool);
7a470277
JS
2936 kfree(ext);
2937 if (dmp) {
2938 dma_free_coherent(&phba->pcidev->dev,
2939 dmp->size, dmp->dma.virt,
2940 dmp->dma.phys);
2941 kfree(dmp);
2942 }
2943 if (rxbmp) {
2944 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
2945 kfree(rxbmp);
3b5dd52a 2946 }
7a470277 2947 kfree(dd_data);
3b5dd52a 2948
7a470277 2949 return rc;
3b5dd52a
JS
2950}
2951
2952/**
2953 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
2954 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
2955 **/
2956static int
2957lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
2958{
2959 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2960 struct lpfc_hba *phba = vport->phba;
2961 int rc = 0;
2962
2963 /* in case no data is transferred */
2964 job->reply->reply_payload_rcv_len = 0;
2965 if (job->request_len <
2966 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
2967 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2968 "2737 Received MBOX_REQ request below "
2969 "minimum size\n");
2970 rc = -EINVAL;
2971 goto job_error;
2972 }
2973
49198b37 2974 if (job->request_payload.payload_len != BSG_MBOX_SIZE) {
3b5dd52a
JS
2975 rc = -EINVAL;
2976 goto job_error;
2977 }
2978
49198b37 2979 if (job->reply_payload.payload_len != BSG_MBOX_SIZE) {
7a470277
JS
2980 rc = -EINVAL;
2981 goto job_error;
2982 }
2983
3b5dd52a
JS
2984 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
2985 rc = -EAGAIN;
2986 goto job_error;
2987 }
2988
2989 rc = lpfc_bsg_issue_mbox(phba, job, vport);
2990
2991job_error:
2992 if (rc == 0) {
2993 /* job done */
2994 job->reply->result = 0;
2995 job->dd_data = NULL;
2996 job->job_done(job);
2997 } else if (rc == 1)
2998 /* job submitted, will complete later*/
2999 rc = 0; /* return zero, no error */
3000 else {
3001 /* some error occurred */
3002 job->reply->result = rc;
3003 job->dd_data = NULL;
3004 }
3005
3006 return rc;
3007}
3008
e2aed29f
JS
3009/**
3010 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
3011 * @phba: Pointer to HBA context object.
3012 * @cmdiocbq: Pointer to command iocb.
3013 * @rspiocbq: Pointer to response iocb.
3014 *
3015 * This function is the completion handler for iocbs issued using
3016 * lpfc_menlo_cmd function. This function is called by the
3017 * ring event handler function without any lock held. This function
3018 * can be called from both worker thread context and interrupt
3019 * context. This function also can be called from another thread which
3020 * cleans up the SLI layer objects.
3021 * This function copies the contents of the response iocb to the
3022 * response iocb memory object provided by the caller of
3023 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
3024 * sleeps for the iocb completion.
3025 **/
3026static void
3027lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
3028 struct lpfc_iocbq *cmdiocbq,
3029 struct lpfc_iocbq *rspiocbq)
3030{
3031 struct bsg_job_data *dd_data;
3032 struct fc_bsg_job *job;
3033 IOCB_t *rsp;
3034 struct lpfc_dmabuf *bmp;
3035 struct lpfc_bsg_menlo *menlo;
3036 unsigned long flags;
3037 struct menlo_response *menlo_resp;
3038 int rc = 0;
3039
3040 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3041 dd_data = cmdiocbq->context1;
3042 if (!dd_data) {
3043 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3044 return;
3045 }
3046
3047 menlo = &dd_data->context_un.menlo;
3048 job = menlo->set_job;
3049 job->dd_data = NULL; /* so timeout handler does not reply */
3050
3051 spin_lock_irqsave(&phba->hbalock, flags);
3052 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
3053 if (cmdiocbq->context2 && rspiocbq)
3054 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
3055 &rspiocbq->iocb, sizeof(IOCB_t));
3056 spin_unlock_irqrestore(&phba->hbalock, flags);
3057
3058 bmp = menlo->bmp;
3059 rspiocbq = menlo->rspiocbq;
3060 rsp = &rspiocbq->iocb;
3061
3062 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
3063 job->request_payload.sg_cnt, DMA_TO_DEVICE);
3064 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
3065 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
3066
3067 /* always return the xri, this would be used in the case
3068 * of a menlo download to allow the data to be sent as a continuation
3069 * of the exchange.
3070 */
3071 menlo_resp = (struct menlo_response *)
3072 job->reply->reply_data.vendor_reply.vendor_rsp;
3073 menlo_resp->xri = rsp->ulpContext;
3074 if (rsp->ulpStatus) {
3075 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
3076 switch (rsp->un.ulpWord[4] & 0xff) {
3077 case IOERR_SEQUENCE_TIMEOUT:
3078 rc = -ETIMEDOUT;
3079 break;
3080 case IOERR_INVALID_RPI:
3081 rc = -EFAULT;
3082 break;
3083 default:
3084 rc = -EACCES;
3085 break;
3086 }
3087 } else
3088 rc = -EACCES;
3089 } else
3090 job->reply->reply_payload_rcv_len =
3091 rsp->un.genreq64.bdl.bdeSize;
3092
3093 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
3094 lpfc_sli_release_iocbq(phba, rspiocbq);
3095 lpfc_sli_release_iocbq(phba, cmdiocbq);
3096 kfree(bmp);
3097 kfree(dd_data);
3098 /* make error code available to userspace */
3099 job->reply->result = rc;
3100 /* complete the job back to userspace */
3101 job->job_done(job);
3102 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3103 return;
3104}
3105
3106/**
3107 * lpfc_menlo_cmd - send an ioctl for menlo hardware
3108 * @job: fc_bsg_job to handle
3109 *
3110 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
3111 * all the command completions will return the xri for the command.
3112 * For menlo data requests a gen request 64 CX is used to continue the exchange
3113 * supplied in the menlo request header xri field.
3114 **/
3115static int
3116lpfc_menlo_cmd(struct fc_bsg_job *job)
3117{
3118 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
3119 struct lpfc_hba *phba = vport->phba;
3120 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
3121 IOCB_t *cmd, *rsp;
3122 int rc = 0;
3123 struct menlo_command *menlo_cmd;
3124 struct menlo_response *menlo_resp;
3125 struct lpfc_dmabuf *bmp = NULL;
3126 int request_nseg;
3127 int reply_nseg;
3128 struct scatterlist *sgel = NULL;
3129 int numbde;
3130 dma_addr_t busaddr;
3131 struct bsg_job_data *dd_data;
3132 struct ulp_bde64 *bpl = NULL;
3133
3134 /* in case no data is returned return just the return code */
3135 job->reply->reply_payload_rcv_len = 0;
3136
3137 if (job->request_len <
3138 sizeof(struct fc_bsg_request) +
3139 sizeof(struct menlo_command)) {
3140 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3141 "2784 Received MENLO_CMD request below "
3142 "minimum size\n");
3143 rc = -ERANGE;
3144 goto no_dd_data;
3145 }
3146
3147 if (job->reply_len <
3148 sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
3149 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3150 "2785 Received MENLO_CMD reply below "
3151 "minimum size\n");
3152 rc = -ERANGE;
3153 goto no_dd_data;
3154 }
3155
3156 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
3157 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3158 "2786 Adapter does not support menlo "
3159 "commands\n");
3160 rc = -EPERM;
3161 goto no_dd_data;
3162 }
3163
3164 menlo_cmd = (struct menlo_command *)
3165 job->request->rqst_data.h_vendor.vendor_cmd;
3166
3167 menlo_resp = (struct menlo_response *)
3168 job->reply->reply_data.vendor_reply.vendor_rsp;
3169
3170 /* allocate our bsg tracking structure */
3171 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3172 if (!dd_data) {
3173 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3174 "2787 Failed allocation of dd_data\n");
3175 rc = -ENOMEM;
3176 goto no_dd_data;
3177 }
3178
3179 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3180 if (!bmp) {
3181 rc = -ENOMEM;
3182 goto free_dd;
3183 }
3184
3185 cmdiocbq = lpfc_sli_get_iocbq(phba);
3186 if (!cmdiocbq) {
3187 rc = -ENOMEM;
3188 goto free_bmp;
3189 }
3190
3191 rspiocbq = lpfc_sli_get_iocbq(phba);
3192 if (!rspiocbq) {
3193 rc = -ENOMEM;
3194 goto free_cmdiocbq;
3195 }
3196
3197 rsp = &rspiocbq->iocb;
3198
3199 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
3200 if (!bmp->virt) {
3201 rc = -ENOMEM;
3202 goto free_rspiocbq;
3203 }
3204
3205 INIT_LIST_HEAD(&bmp->list);
3206 bpl = (struct ulp_bde64 *) bmp->virt;
3207 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
3208 job->request_payload.sg_cnt, DMA_TO_DEVICE);
3209 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
3210 busaddr = sg_dma_address(sgel);
3211 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3212 bpl->tus.f.bdeSize = sg_dma_len(sgel);
3213 bpl->tus.w = cpu_to_le32(bpl->tus.w);
3214 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
3215 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
3216 bpl++;
3217 }
3218
3219 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
3220 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
3221 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
3222 busaddr = sg_dma_address(sgel);
3223 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
3224 bpl->tus.f.bdeSize = sg_dma_len(sgel);
3225 bpl->tus.w = cpu_to_le32(bpl->tus.w);
3226 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
3227 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
3228 bpl++;
3229 }
3230
3231 cmd = &cmdiocbq->iocb;
3232 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
3233 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
3234 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
3235 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
3236 cmd->un.genreq64.bdl.bdeSize =
3237 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
3238 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
3239 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
3240 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
3241 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
3242 cmd->ulpBdeCount = 1;
3243 cmd->ulpClass = CLASS3;
3244 cmd->ulpOwner = OWN_CHIP;
3245 cmd->ulpLe = 1; /* Limited Edition */
3246 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
3247 cmdiocbq->vport = phba->pport;
3248 /* We want the firmware to timeout before we do */
3249 cmd->ulpTimeout = MENLO_TIMEOUT - 5;
3250 cmdiocbq->context3 = bmp;
3251 cmdiocbq->context2 = rspiocbq;
3252 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
3253 cmdiocbq->context1 = dd_data;
3254 cmdiocbq->context2 = rspiocbq;
3255 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
3256 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
3257 cmd->ulpPU = MENLO_PU; /* 3 */
3258 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
3259 cmd->ulpContext = MENLO_CONTEXT; /* 0 */
3260 } else {
3261 cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
3262 cmd->ulpPU = 1;
3263 cmd->un.ulpWord[4] = 0;
3264 cmd->ulpContext = menlo_cmd->xri;
3265 }
3266
3267 dd_data->type = TYPE_MENLO;
3268 dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
3269 dd_data->context_un.menlo.rspiocbq = rspiocbq;
3270 dd_data->context_un.menlo.set_job = job;
3271 dd_data->context_un.menlo.bmp = bmp;
3272
3273 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
3274 MENLO_TIMEOUT - 5);
3275 if (rc == IOCB_SUCCESS)
3276 return 0; /* done for now */
3277
3278 /* iocb failed so cleanup */
3279 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
3280 job->request_payload.sg_cnt, DMA_TO_DEVICE);
3281 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
3282 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
3283
3284 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
3285
3286free_rspiocbq:
3287 lpfc_sli_release_iocbq(phba, rspiocbq);
3288free_cmdiocbq:
3289 lpfc_sli_release_iocbq(phba, cmdiocbq);
3290free_bmp:
3291 kfree(bmp);
3292free_dd:
3293 kfree(dd_data);
3294no_dd_data:
3295 /* make error code available to userspace */
3296 job->reply->result = rc;
3297 job->dd_data = NULL;
3298 return rc;
3299}
3b5dd52a
JS
3300/**
3301 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
3302 * @job: fc_bsg_job to handle
3303 **/
3304static int
3305lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
3306{
3307 int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
3308 int rc;
3309
3310 switch (command) {
3311 case LPFC_BSG_VENDOR_SET_CT_EVENT:
3312 rc = lpfc_bsg_hba_set_event(job);
3313 break;
3314 case LPFC_BSG_VENDOR_GET_CT_EVENT:
3315 rc = lpfc_bsg_hba_get_event(job);
3316 break;
3317 case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
3318 rc = lpfc_bsg_send_mgmt_rsp(job);
3319 break;
3320 case LPFC_BSG_VENDOR_DIAG_MODE:
3321 rc = lpfc_bsg_diag_mode(job);
3322 break;
3323 case LPFC_BSG_VENDOR_DIAG_TEST:
3324 rc = lpfc_bsg_diag_test(job);
3325 break;
3326 case LPFC_BSG_VENDOR_GET_MGMT_REV:
3327 rc = lpfc_bsg_get_dfc_rev(job);
3328 break;
3329 case LPFC_BSG_VENDOR_MBOX:
3330 rc = lpfc_bsg_mbox_cmd(job);
3331 break;
e2aed29f
JS
3332 case LPFC_BSG_VENDOR_MENLO_CMD:
3333 case LPFC_BSG_VENDOR_MENLO_DATA:
3334 rc = lpfc_menlo_cmd(job);
3335 break;
3b5dd52a
JS
3336 default:
3337 rc = -EINVAL;
3338 job->reply->reply_payload_rcv_len = 0;
3339 /* make error code available to userspace */
3340 job->reply->result = rc;
3341 break;
3342 }
3343
3344 return rc;
3345}
3346
3347/**
3348 * lpfc_bsg_request - handle a bsg request from the FC transport
3349 * @job: fc_bsg_job to handle
3350 **/
3351int
3352lpfc_bsg_request(struct fc_bsg_job *job)
3353{
3354 uint32_t msgcode;
3355 int rc;
3356
3357 msgcode = job->request->msgcode;
3358 switch (msgcode) {
3359 case FC_BSG_HST_VENDOR:
3360 rc = lpfc_bsg_hst_vendor(job);
3361 break;
3362 case FC_BSG_RPT_ELS:
3363 rc = lpfc_bsg_rport_els(job);
3364 break;
3365 case FC_BSG_RPT_CT:
3366 rc = lpfc_bsg_send_mgmt_cmd(job);
3367 break;
3368 default:
3369 rc = -EINVAL;
3370 job->reply->reply_payload_rcv_len = 0;
3371 /* make error code available to userspace */
3372 job->reply->result = rc;
3373 break;
3374 }
3375
3376 return rc;
3377}
3378
3379/**
3380 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
3381 * @job: fc_bsg_job that has timed out
3382 *
3383 * This function just aborts the job's IOCB. The aborted IOCB will return to
3384 * the waiting function which will handle passing the error back to userspace
3385 **/
3386int
3387lpfc_bsg_timeout(struct fc_bsg_job *job)
3388{
3389 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
3390 struct lpfc_hba *phba = vport->phba;
3391 struct lpfc_iocbq *cmdiocb;
3392 struct lpfc_bsg_event *evt;
3393 struct lpfc_bsg_iocb *iocb;
3394 struct lpfc_bsg_mbox *mbox;
e2aed29f 3395 struct lpfc_bsg_menlo *menlo;
3b5dd52a
JS
3396 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3397 struct bsg_job_data *dd_data;
3398 unsigned long flags;
3399
3400 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3401 dd_data = (struct bsg_job_data *)job->dd_data;
3402 /* timeout and completion crossed paths if no dd_data */
3403 if (!dd_data) {
3404 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3405 return 0;
4cc0e56e
JS
3406 }
3407
3408 switch (dd_data->type) {
3409 case TYPE_IOCB:
3410 iocb = &dd_data->context_un.iocb;
3411 cmdiocb = iocb->cmdiocbq;
3412 /* hint to completion handler that the job timed out */
3413 job->reply->result = -EAGAIN;
3414 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3415 /* this will call our completion handler */
3416 spin_lock_irq(&phba->hbalock);
f1c3b0fc 3417 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
4cc0e56e
JS
3418 spin_unlock_irq(&phba->hbalock);
3419 break;
3420 case TYPE_EVT:
3421 evt = dd_data->context_un.evt;
3422 /* this event has no job anymore */
3423 evt->set_job = NULL;
3424 job->dd_data = NULL;
3425 job->reply->reply_payload_rcv_len = 0;
3426 /* Return -EAGAIN which is our way of signallying the
3427 * app to retry.
3428 */
3429 job->reply->result = -EAGAIN;
3430 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3431 job->job_done(job);
3432 break;
3b5dd52a
JS
3433 case TYPE_MBOX:
3434 mbox = &dd_data->context_un.mbox;
3435 /* this mbox has no job anymore */
3436 mbox->set_job = NULL;
3437 job->dd_data = NULL;
3438 job->reply->reply_payload_rcv_len = 0;
3439 job->reply->result = -EAGAIN;
7a470277 3440 /* the mbox completion handler can now be run */
3b5dd52a
JS
3441 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3442 job->job_done(job);
3443 break;
e2aed29f
JS
3444 case TYPE_MENLO:
3445 menlo = &dd_data->context_un.menlo;
3446 cmdiocb = menlo->cmdiocbq;
3447 /* hint to completion handler that the job timed out */
3448 job->reply->result = -EAGAIN;
3449 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3450 /* this will call our completion handler */
3451 spin_lock_irq(&phba->hbalock);
3452 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
3453 spin_unlock_irq(&phba->hbalock);
3454 break;
4cc0e56e
JS
3455 default:
3456 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3457 break;
3458 }
f1c3b0fc 3459
4cc0e56e
JS
3460 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
3461 * otherwise an error message will be displayed on the console
3462 * so always return success (zero)
3463 */
f1c3b0fc
JS
3464 return 0;
3465}
This page took 0.380249 seconds and 5 git commands to generate.