[SCSI] lpfc 8.3.8: Add code to display logical link speed
[deliverable/linux.git] / drivers / scsi / lpfc / lpfc_bsg.c
CommitLineData
f1c3b0fc
JS
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21#include <linux/interrupt.h>
22#include <linux/mempool.h>
23#include <linux/pci.h>
24
25#include <scsi/scsi.h>
26#include <scsi/scsi_host.h>
27#include <scsi/scsi_transport_fc.h>
28#include <scsi/scsi_bsg_fc.h>
6a9c52cf 29#include <scsi/fc/fc_fs.h>
f1c3b0fc
JS
30
31#include "lpfc_hw4.h"
32#include "lpfc_hw.h"
33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
35#include "lpfc_nl.h"
36#include "lpfc_disc.h"
37#include "lpfc_scsi.h"
38#include "lpfc.h"
39#include "lpfc_logmsg.h"
40#include "lpfc_crtn.h"
41#include "lpfc_vport.h"
42#include "lpfc_version.h"
43
44/**
45 * lpfc_bsg_rport_ct - send a CT command from a bsg request
46 * @job: fc_bsg_job to handle
47 */
48static int
49lpfc_bsg_rport_ct(struct fc_bsg_job *job)
50{
51 struct Scsi_Host *shost = job->shost;
52 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
53 struct lpfc_hba *phba = vport->phba;
54 struct lpfc_rport_data *rdata = job->rport->dd_data;
55 struct lpfc_nodelist *ndlp = rdata->pnode;
56 struct ulp_bde64 *bpl = NULL;
57 uint32_t timeout;
58 struct lpfc_iocbq *cmdiocbq = NULL;
59 struct lpfc_iocbq *rspiocbq = NULL;
60 IOCB_t *cmd;
61 IOCB_t *rsp;
62 struct lpfc_dmabuf *bmp = NULL;
63 int request_nseg;
64 int reply_nseg;
65 struct scatterlist *sgel = NULL;
66 int numbde;
67 dma_addr_t busaddr;
68 int rc = 0;
69
70 /* in case no data is transferred */
71 job->reply->reply_payload_rcv_len = 0;
72
73 if (!lpfc_nlp_get(ndlp)) {
74 job->reply->result = -ENODEV;
75 return 0;
76 }
77
78 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
79 rc = -ENODEV;
80 goto free_ndlp_exit;
81 }
82
83 spin_lock_irq(shost->host_lock);
84 cmdiocbq = lpfc_sli_get_iocbq(phba);
85 if (!cmdiocbq) {
86 rc = -ENOMEM;
87 spin_unlock_irq(shost->host_lock);
88 goto free_ndlp_exit;
89 }
90 cmd = &cmdiocbq->iocb;
91
92 rspiocbq = lpfc_sli_get_iocbq(phba);
93 if (!rspiocbq) {
94 rc = -ENOMEM;
95 goto free_cmdiocbq;
96 }
97 spin_unlock_irq(shost->host_lock);
98
99 rsp = &rspiocbq->iocb;
100
101 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
102 if (!bmp) {
103 rc = -ENOMEM;
104 spin_lock_irq(shost->host_lock);
105 goto free_rspiocbq;
106 }
107
108 spin_lock_irq(shost->host_lock);
109 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
110 if (!bmp->virt) {
111 rc = -ENOMEM;
112 goto free_bmp;
113 }
114 spin_unlock_irq(shost->host_lock);
115
116 INIT_LIST_HEAD(&bmp->list);
117 bpl = (struct ulp_bde64 *) bmp->virt;
118
119 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
120 job->request_payload.sg_cnt, DMA_TO_DEVICE);
121 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
122 busaddr = sg_dma_address(sgel);
123 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
124 bpl->tus.f.bdeSize = sg_dma_len(sgel);
125 bpl->tus.w = cpu_to_le32(bpl->tus.w);
126 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
127 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
128 bpl++;
129 }
130
131 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
132 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
133 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
134 busaddr = sg_dma_address(sgel);
135 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
136 bpl->tus.f.bdeSize = sg_dma_len(sgel);
137 bpl->tus.w = cpu_to_le32(bpl->tus.w);
138 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
139 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
140 bpl++;
141 }
142
143 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
144 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
145 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
146 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
147 cmd->un.genreq64.bdl.bdeSize =
148 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
149 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
150 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
151 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
6a9c52cf
JS
152 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
153 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
f1c3b0fc
JS
154 cmd->ulpBdeCount = 1;
155 cmd->ulpLe = 1;
156 cmd->ulpClass = CLASS3;
157 cmd->ulpContext = ndlp->nlp_rpi;
158 cmd->ulpOwner = OWN_CHIP;
159 cmdiocbq->vport = phba->pport;
160 cmdiocbq->context1 = NULL;
161 cmdiocbq->context2 = NULL;
162 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
163
164 timeout = phba->fc_ratov * 2;
165 job->dd_data = cmdiocbq;
166
167 rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
168 timeout + LPFC_DRVR_TIMEOUT);
169
170 if (rc != IOCB_TIMEDOUT) {
171 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
172 job->request_payload.sg_cnt, DMA_TO_DEVICE);
173 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
174 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
175 }
176
177 if (rc == IOCB_TIMEDOUT) {
178 lpfc_sli_release_iocbq(phba, rspiocbq);
179 rc = -EACCES;
180 goto free_ndlp_exit;
181 }
182
183 if (rc != IOCB_SUCCESS) {
184 rc = -EACCES;
185 goto free_outdmp;
186 }
187
188 if (rsp->ulpStatus) {
189 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
190 switch (rsp->un.ulpWord[4] & 0xff) {
191 case IOERR_SEQUENCE_TIMEOUT:
192 rc = -ETIMEDOUT;
193 break;
194 case IOERR_INVALID_RPI:
195 rc = -EFAULT;
196 break;
197 default:
198 rc = -EACCES;
199 break;
200 }
201 goto free_outdmp;
202 }
203 } else
204 job->reply->reply_payload_rcv_len =
205 rsp->un.genreq64.bdl.bdeSize;
206
207free_outdmp:
208 spin_lock_irq(shost->host_lock);
209 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
210free_bmp:
211 kfree(bmp);
212free_rspiocbq:
213 lpfc_sli_release_iocbq(phba, rspiocbq);
214free_cmdiocbq:
215 lpfc_sli_release_iocbq(phba, cmdiocbq);
216 spin_unlock_irq(shost->host_lock);
217free_ndlp_exit:
218 lpfc_nlp_put(ndlp);
219
220 /* make error code available to userspace */
221 job->reply->result = rc;
222 /* complete the job back to userspace */
223 job->job_done(job);
224
225 return 0;
226}
227
228/**
229 * lpfc_bsg_rport_els - send an ELS command from a bsg request
230 * @job: fc_bsg_job to handle
231 */
232static int
233lpfc_bsg_rport_els(struct fc_bsg_job *job)
234{
235 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
236 struct lpfc_hba *phba = vport->phba;
237 struct lpfc_rport_data *rdata = job->rport->dd_data;
238 struct lpfc_nodelist *ndlp = rdata->pnode;
239
240 uint32_t elscmd;
241 uint32_t cmdsize;
242 uint32_t rspsize;
243 struct lpfc_iocbq *rspiocbq;
244 struct lpfc_iocbq *cmdiocbq;
245 IOCB_t *rsp;
246 uint16_t rpi = 0;
247 struct lpfc_dmabuf *pcmd;
248 struct lpfc_dmabuf *prsp;
249 struct lpfc_dmabuf *pbuflist = NULL;
250 struct ulp_bde64 *bpl;
251 int iocb_status;
252 int request_nseg;
253 int reply_nseg;
254 struct scatterlist *sgel = NULL;
255 int numbde;
256 dma_addr_t busaddr;
257 int rc = 0;
258
259 /* in case no data is transferred */
260 job->reply->reply_payload_rcv_len = 0;
261
262 if (!lpfc_nlp_get(ndlp)) {
263 rc = -ENODEV;
264 goto out;
265 }
266
267 elscmd = job->request->rqst_data.r_els.els_code;
268 cmdsize = job->request_payload.payload_len;
269 rspsize = job->reply_payload.payload_len;
270 rspiocbq = lpfc_sli_get_iocbq(phba);
271 if (!rspiocbq) {
272 lpfc_nlp_put(ndlp);
273 rc = -ENOMEM;
274 goto out;
275 }
276
277 rsp = &rspiocbq->iocb;
278 rpi = ndlp->nlp_rpi;
279
280 cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, ndlp,
281 ndlp->nlp_DID, elscmd);
282
283 if (!cmdiocbq) {
284 lpfc_sli_release_iocbq(phba, rspiocbq);
285 return -EIO;
286 }
287
288 job->dd_data = cmdiocbq;
289 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
290 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
291
292 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
293 kfree(pcmd);
294 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
295 kfree(prsp);
296 cmdiocbq->context2 = NULL;
297
298 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
299 bpl = (struct ulp_bde64 *) pbuflist->virt;
300
301 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
302 job->request_payload.sg_cnt, DMA_TO_DEVICE);
303
304 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
305 busaddr = sg_dma_address(sgel);
306 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
307 bpl->tus.f.bdeSize = sg_dma_len(sgel);
308 bpl->tus.w = cpu_to_le32(bpl->tus.w);
309 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
310 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
311 bpl++;
312 }
313
314 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
315 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
316 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
317 busaddr = sg_dma_address(sgel);
318 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
319 bpl->tus.f.bdeSize = sg_dma_len(sgel);
320 bpl->tus.w = cpu_to_le32(bpl->tus.w);
321 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
322 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
323 bpl++;
324 }
325
326 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
327 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
328 cmdiocbq->iocb.ulpContext = rpi;
329 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
330 cmdiocbq->context1 = NULL;
331 cmdiocbq->context2 = NULL;
332
333 iocb_status = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
334 rspiocbq, (phba->fc_ratov * 2)
335 + LPFC_DRVR_TIMEOUT);
336
337 /* release the new ndlp once the iocb completes */
338 lpfc_nlp_put(ndlp);
339 if (iocb_status != IOCB_TIMEDOUT) {
340 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
341 job->request_payload.sg_cnt, DMA_TO_DEVICE);
342 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
343 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
344 }
345
346 if (iocb_status == IOCB_SUCCESS) {
347 if (rsp->ulpStatus == IOSTAT_SUCCESS) {
348 job->reply->reply_payload_rcv_len =
349 rsp->un.elsreq64.bdl.bdeSize;
350 rc = 0;
351 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
352 struct fc_bsg_ctels_reply *els_reply;
353 /* LS_RJT data returned in word 4 */
354 uint8_t *rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
355
356 els_reply = &job->reply->reply_data.ctels_reply;
357 job->reply->result = 0;
358 els_reply->status = FC_CTELS_STATUS_REJECT;
359 els_reply->rjt_data.action = rjt_data[0];
360 els_reply->rjt_data.reason_code = rjt_data[1];
361 els_reply->rjt_data.reason_explanation = rjt_data[2];
362 els_reply->rjt_data.vendor_unique = rjt_data[3];
363 } else
364 rc = -EIO;
365 } else
366 rc = -EIO;
367
368 if (iocb_status != IOCB_TIMEDOUT)
369 lpfc_els_free_iocb(phba, cmdiocbq);
370
371 lpfc_sli_release_iocbq(phba, rspiocbq);
372
373out:
374 /* make error code available to userspace */
375 job->reply->result = rc;
376 /* complete the job back to userspace */
377 job->job_done(job);
378
379 return 0;
380}
381
382struct lpfc_ct_event {
383 struct list_head node;
384 int ref;
385 wait_queue_head_t wq;
386
387 /* Event type and waiter identifiers */
388 uint32_t type_mask;
389 uint32_t req_id;
390 uint32_t reg_id;
391
392 /* next two flags are here for the auto-delete logic */
393 unsigned long wait_time_stamp;
394 int waiting;
395
396 /* seen and not seen events */
397 struct list_head events_to_get;
398 struct list_head events_to_see;
399};
400
401struct event_data {
402 struct list_head node;
403 uint32_t type;
404 uint32_t immed_dat;
405 void *data;
406 uint32_t len;
407};
408
409static struct lpfc_ct_event *
410lpfc_ct_event_new(int ev_reg_id, uint32_t ev_req_id)
411{
412 struct lpfc_ct_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
413 if (!evt)
414 return NULL;
415
416 INIT_LIST_HEAD(&evt->events_to_get);
417 INIT_LIST_HEAD(&evt->events_to_see);
418 evt->req_id = ev_req_id;
419 evt->reg_id = ev_reg_id;
420 evt->wait_time_stamp = jiffies;
421 init_waitqueue_head(&evt->wq);
422
423 return evt;
424}
425
426static void
427lpfc_ct_event_free(struct lpfc_ct_event *evt)
428{
429 struct event_data *ed;
430
431 list_del(&evt->node);
432
433 while (!list_empty(&evt->events_to_get)) {
434 ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
435 list_del(&ed->node);
436 kfree(ed->data);
437 kfree(ed);
438 }
439
440 while (!list_empty(&evt->events_to_see)) {
441 ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
442 list_del(&ed->node);
443 kfree(ed->data);
444 kfree(ed);
445 }
446
447 kfree(evt);
448}
449
450static inline void
451lpfc_ct_event_ref(struct lpfc_ct_event *evt)
452{
453 evt->ref++;
454}
455
456static inline void
457lpfc_ct_event_unref(struct lpfc_ct_event *evt)
458{
459 if (--evt->ref < 0)
460 lpfc_ct_event_free(evt);
461}
462
463#define SLI_CT_ELX_LOOPBACK 0x10
464
465enum ELX_LOOPBACK_CMD {
466 ELX_LOOPBACK_XRI_SETUP,
467 ELX_LOOPBACK_DATA,
468};
469
470/**
471 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
472 * @phba:
473 * @pring:
474 * @piocbq:
475 *
476 * This function is called when an unsolicited CT command is received. It
477 * forwards the event to any processes registerd to receive CT events.
478 */
479void
480lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
481 struct lpfc_iocbq *piocbq)
482{
483 uint32_t evt_req_id = 0;
484 uint32_t cmd;
485 uint32_t len;
486 struct lpfc_dmabuf *dmabuf = NULL;
487 struct lpfc_ct_event *evt;
488 struct event_data *evt_dat = NULL;
489 struct lpfc_iocbq *iocbq;
490 size_t offset = 0;
491 struct list_head head;
492 struct ulp_bde64 *bde;
493 dma_addr_t dma_addr;
494 int i;
495 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
496 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
497 struct lpfc_hbq_entry *hbqe;
498 struct lpfc_sli_ct_request *ct_req;
499
500 INIT_LIST_HEAD(&head);
501 list_add_tail(&head, &piocbq->list);
502
503 if (piocbq->iocb.ulpBdeCount == 0 ||
504 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
505 goto error_ct_unsol_exit;
506
507 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
508 dmabuf = bdeBuf1;
509 else {
510 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
511 piocbq->iocb.un.cont64[0].addrLow);
512 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
513 }
514
515 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
516 evt_req_id = ct_req->FsType;
517 cmd = ct_req->CommandResponse.bits.CmdRsp;
518 len = ct_req->CommandResponse.bits.Size;
519 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
520 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
521
522 mutex_lock(&phba->ct_event_mutex);
523 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
524 if (evt->req_id != evt_req_id)
525 continue;
526
527 lpfc_ct_event_ref(evt);
528
529 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
530 if (!evt_dat) {
531 lpfc_ct_event_unref(evt);
532 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
533 "2614 Memory allocation failed for "
534 "CT event\n");
535 break;
536 }
537
538 mutex_unlock(&phba->ct_event_mutex);
539
540 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
541 /* take accumulated byte count from the last iocbq */
542 iocbq = list_entry(head.prev, typeof(*iocbq), list);
543 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
544 } else {
545 list_for_each_entry(iocbq, &head, list) {
546 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
547 evt_dat->len +=
548 iocbq->iocb.un.cont64[i].tus.f.bdeSize;
549 }
550 }
551
552 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
553 if (!evt_dat->data) {
554 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
555 "2615 Memory allocation failed for "
556 "CT event data, size %d\n",
557 evt_dat->len);
558 kfree(evt_dat);
559 mutex_lock(&phba->ct_event_mutex);
560 lpfc_ct_event_unref(evt);
561 mutex_unlock(&phba->ct_event_mutex);
562 goto error_ct_unsol_exit;
563 }
564
565 list_for_each_entry(iocbq, &head, list) {
566 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
567 bdeBuf1 = iocbq->context2;
568 bdeBuf2 = iocbq->context3;
569 }
570 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
571 int size = 0;
572 if (phba->sli3_options &
573 LPFC_SLI3_HBQ_ENABLED) {
574 if (i == 0) {
575 hbqe = (struct lpfc_hbq_entry *)
576 &iocbq->iocb.un.ulpWord[0];
577 size = hbqe->bde.tus.f.bdeSize;
578 dmabuf = bdeBuf1;
579 } else if (i == 1) {
580 hbqe = (struct lpfc_hbq_entry *)
581 &iocbq->iocb.unsli3.
582 sli3Words[4];
583 size = hbqe->bde.tus.f.bdeSize;
584 dmabuf = bdeBuf2;
585 }
586 if ((offset + size) > evt_dat->len)
587 size = evt_dat->len - offset;
588 } else {
589 size = iocbq->iocb.un.cont64[i].
590 tus.f.bdeSize;
591 bde = &iocbq->iocb.un.cont64[i];
592 dma_addr = getPaddr(bde->addrHigh,
593 bde->addrLow);
594 dmabuf = lpfc_sli_ringpostbuf_get(phba,
595 pring, dma_addr);
596 }
597 if (!dmabuf) {
598 lpfc_printf_log(phba, KERN_ERR,
599 LOG_LIBDFC, "2616 No dmabuf "
600 "found for iocbq 0x%p\n",
601 iocbq);
602 kfree(evt_dat->data);
603 kfree(evt_dat);
604 mutex_lock(&phba->ct_event_mutex);
605 lpfc_ct_event_unref(evt);
606 mutex_unlock(&phba->ct_event_mutex);
607 goto error_ct_unsol_exit;
608 }
609 memcpy((char *)(evt_dat->data) + offset,
610 dmabuf->virt, size);
611 offset += size;
612 if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
613 !(phba->sli3_options &
614 LPFC_SLI3_HBQ_ENABLED)) {
615 lpfc_sli_ringpostbuf_put(phba, pring,
616 dmabuf);
617 } else {
618 switch (cmd) {
619 case ELX_LOOPBACK_XRI_SETUP:
620 if (!(phba->sli3_options &
621 LPFC_SLI3_HBQ_ENABLED))
622 lpfc_post_buffer(phba,
623 pring,
624 1);
625 else
626 lpfc_in_buf_free(phba,
627 dmabuf);
628 break;
629 default:
630 if (!(phba->sli3_options &
631 LPFC_SLI3_HBQ_ENABLED))
632 lpfc_post_buffer(phba,
633 pring,
634 1);
635 break;
636 }
637 }
638 }
639 }
640
641 mutex_lock(&phba->ct_event_mutex);
642 if (phba->sli_rev == LPFC_SLI_REV4) {
643 evt_dat->immed_dat = phba->ctx_idx;
644 phba->ctx_idx = (phba->ctx_idx + 1) % 64;
645 phba->ct_ctx[evt_dat->immed_dat].oxid =
646 piocbq->iocb.ulpContext;
647 phba->ct_ctx[evt_dat->immed_dat].SID =
648 piocbq->iocb.un.rcvels.remoteID;
649 } else
650 evt_dat->immed_dat = piocbq->iocb.ulpContext;
651
652 evt_dat->type = FC_REG_CT_EVENT;
653 list_add(&evt_dat->node, &evt->events_to_see);
654 wake_up_interruptible(&evt->wq);
655 lpfc_ct_event_unref(evt);
656 if (evt_req_id == SLI_CT_ELX_LOOPBACK)
657 break;
658 }
659 mutex_unlock(&phba->ct_event_mutex);
660
661error_ct_unsol_exit:
662 if (!list_empty(&head))
663 list_del(&head);
664
665 return;
666}
667
668/**
669 * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command
670 * @job: SET_EVENT fc_bsg_job
671 */
672static int
673lpfc_bsg_set_event(struct fc_bsg_job *job)
674{
675 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
676 struct lpfc_hba *phba = vport->phba;
677 struct set_ct_event *event_req;
678 struct lpfc_ct_event *evt;
679 int rc = 0;
680
681 if (job->request_len <
682 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
683 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
684 "2612 Received SET_CT_EVENT below minimum "
685 "size\n");
686 return -EINVAL;
687 }
688
689 event_req = (struct set_ct_event *)
690 job->request->rqst_data.h_vendor.vendor_cmd;
691
692 mutex_lock(&phba->ct_event_mutex);
693 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
694 if (evt->reg_id == event_req->ev_reg_id) {
695 lpfc_ct_event_ref(evt);
696 evt->wait_time_stamp = jiffies;
697 break;
698 }
699 }
700 mutex_unlock(&phba->ct_event_mutex);
701
702 if (&evt->node == &phba->ct_ev_waiters) {
703 /* no event waiting struct yet - first call */
704 evt = lpfc_ct_event_new(event_req->ev_reg_id,
705 event_req->ev_req_id);
706 if (!evt) {
707 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
708 "2617 Failed allocation of event "
709 "waiter\n");
710 return -ENOMEM;
711 }
712
713 mutex_lock(&phba->ct_event_mutex);
714 list_add(&evt->node, &phba->ct_ev_waiters);
715 lpfc_ct_event_ref(evt);
716 mutex_unlock(&phba->ct_event_mutex);
717 }
718
719 evt->waiting = 1;
720 if (wait_event_interruptible(evt->wq,
721 !list_empty(&evt->events_to_see))) {
722 mutex_lock(&phba->ct_event_mutex);
723 lpfc_ct_event_unref(evt); /* release ref */
724 lpfc_ct_event_unref(evt); /* delete */
725 mutex_unlock(&phba->ct_event_mutex);
726 rc = -EINTR;
727 goto set_event_out;
728 }
729
730 evt->wait_time_stamp = jiffies;
731 evt->waiting = 0;
732
733 mutex_lock(&phba->ct_event_mutex);
734 list_move(evt->events_to_see.prev, &evt->events_to_get);
735 lpfc_ct_event_unref(evt); /* release ref */
736 mutex_unlock(&phba->ct_event_mutex);
737
738set_event_out:
739 /* set_event carries no reply payload */
740 job->reply->reply_payload_rcv_len = 0;
741 /* make error code available to userspace */
742 job->reply->result = rc;
743 /* complete the job back to userspace */
744 job->job_done(job);
745
746 return 0;
747}
748
749/**
750 * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command
751 * @job: GET_EVENT fc_bsg_job
752 */
753static int
754lpfc_bsg_get_event(struct fc_bsg_job *job)
755{
756 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
757 struct lpfc_hba *phba = vport->phba;
758 struct get_ct_event *event_req;
759 struct get_ct_event_reply *event_reply;
760 struct lpfc_ct_event *evt;
761 struct event_data *evt_dat = NULL;
762 int rc = 0;
763
764 if (job->request_len <
765 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
766 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
767 "2613 Received GET_CT_EVENT request below "
768 "minimum size\n");
769 return -EINVAL;
770 }
771
772 event_req = (struct get_ct_event *)
773 job->request->rqst_data.h_vendor.vendor_cmd;
774
775 event_reply = (struct get_ct_event_reply *)
776 job->reply->reply_data.vendor_reply.vendor_rsp;
777
778 mutex_lock(&phba->ct_event_mutex);
779 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
780 if (evt->reg_id == event_req->ev_reg_id) {
781 if (list_empty(&evt->events_to_get))
782 break;
783 lpfc_ct_event_ref(evt);
784 evt->wait_time_stamp = jiffies;
785 evt_dat = list_entry(evt->events_to_get.prev,
786 struct event_data, node);
787 list_del(&evt_dat->node);
788 break;
789 }
790 }
791 mutex_unlock(&phba->ct_event_mutex);
792
793 if (!evt_dat) {
794 job->reply->reply_payload_rcv_len = 0;
795 rc = -ENOENT;
796 goto error_get_event_exit;
797 }
798
799 if (evt_dat->len > job->reply_payload.payload_len) {
800 evt_dat->len = job->reply_payload.payload_len;
801 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
802 "2618 Truncated event data at %d "
803 "bytes\n",
804 job->reply_payload.payload_len);
805 }
806
807 event_reply->immed_data = evt_dat->immed_dat;
808
809 if (evt_dat->len > 0)
810 job->reply->reply_payload_rcv_len =
811 sg_copy_from_buffer(job->reply_payload.sg_list,
812 job->reply_payload.sg_cnt,
813 evt_dat->data, evt_dat->len);
814 else
815 job->reply->reply_payload_rcv_len = 0;
816 rc = 0;
817
818 if (evt_dat)
819 kfree(evt_dat->data);
820 kfree(evt_dat);
821 mutex_lock(&phba->ct_event_mutex);
822 lpfc_ct_event_unref(evt);
823 mutex_unlock(&phba->ct_event_mutex);
824
825error_get_event_exit:
826 /* make error code available to userspace */
827 job->reply->result = rc;
828 /* complete the job back to userspace */
829 job->job_done(job);
830
831 return rc;
832}
833
834/**
835 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
836 * @job: fc_bsg_job to handle
837 */
838static int
839lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
840{
841 int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
842
843 switch (command) {
844 case LPFC_BSG_VENDOR_SET_CT_EVENT:
845 return lpfc_bsg_set_event(job);
846 break;
847
848 case LPFC_BSG_VENDOR_GET_CT_EVENT:
849 return lpfc_bsg_get_event(job);
850 break;
851
852 default:
853 return -EINVAL;
854 }
855}
856
857/**
858 * lpfc_bsg_request - handle a bsg request from the FC transport
859 * @job: fc_bsg_job to handle
860 */
861int
862lpfc_bsg_request(struct fc_bsg_job *job)
863{
864 uint32_t msgcode;
865 int rc = -EINVAL;
866
867 msgcode = job->request->msgcode;
868
869 switch (msgcode) {
870 case FC_BSG_HST_VENDOR:
871 rc = lpfc_bsg_hst_vendor(job);
872 break;
873 case FC_BSG_RPT_ELS:
874 rc = lpfc_bsg_rport_els(job);
875 break;
876 case FC_BSG_RPT_CT:
877 rc = lpfc_bsg_rport_ct(job);
878 break;
879 default:
880 break;
881 }
882
883 return rc;
884}
885
886/**
887 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
888 * @job: fc_bsg_job that has timed out
889 *
890 * This function just aborts the job's IOCB. The aborted IOCB will return to
891 * the waiting function which will handle passing the error back to userspace
892 */
893int
894lpfc_bsg_timeout(struct fc_bsg_job *job)
895{
896 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
897 struct lpfc_hba *phba = vport->phba;
898 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)job->dd_data;
899 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
900
901 if (cmdiocb)
902 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
903
904 return 0;
905}
This page took 0.17653 seconds and 5 git commands to generate.