1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_transport_fc.h>
32 #include "lpfc_version.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
38 #include "lpfc_logmsg.h"
39 #include "lpfc_crtn.h"
40 #include "lpfc_vport.h"
42 #define LPFC_RESET_WAIT 2
43 #define LPFC_ABORT_WAIT 2
46 * This function is called with no lock held when there is a resource
47 * error in driver or in firmware.
50 lpfc_adjust_queue_depth(struct lpfc_hba
*phba
)
54 spin_lock_irqsave(&phba
->hbalock
, flags
);
55 atomic_inc(&phba
->num_rsrc_err
);
56 phba
->last_rsrc_error_time
= jiffies
;
58 if ((phba
->last_ramp_down_time
+ QUEUE_RAMP_DOWN_INTERVAL
) > jiffies
) {
59 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
63 phba
->last_ramp_down_time
= jiffies
;
65 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
67 spin_lock_irqsave(&phba
->pport
->work_port_lock
, flags
);
68 if ((phba
->pport
->work_port_events
&
69 WORKER_RAMP_DOWN_QUEUE
) == 0) {
70 phba
->pport
->work_port_events
|= WORKER_RAMP_DOWN_QUEUE
;
72 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, flags
);
74 spin_lock_irqsave(&phba
->hbalock
, flags
);
76 wake_up(phba
->work_wait
);
77 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
83 * This function is called with no lock held when there is a successful
84 * SCSI command completion.
87 lpfc_rampup_queue_depth(struct lpfc_hba
*phba
,
88 struct scsi_device
*sdev
)
91 atomic_inc(&phba
->num_cmd_success
);
93 if (phba
->cfg_lun_queue_depth
<= sdev
->queue_depth
)
96 spin_lock_irqsave(&phba
->hbalock
, flags
);
97 if (((phba
->last_ramp_up_time
+ QUEUE_RAMP_UP_INTERVAL
) > jiffies
) ||
98 ((phba
->last_rsrc_error_time
+ QUEUE_RAMP_UP_INTERVAL
) > jiffies
)) {
99 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
103 phba
->last_ramp_up_time
= jiffies
;
104 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
106 spin_lock_irqsave(&phba
->pport
->work_port_lock
, flags
);
107 if ((phba
->pport
->work_port_events
&
108 WORKER_RAMP_UP_QUEUE
) == 0) {
109 phba
->pport
->work_port_events
|= WORKER_RAMP_UP_QUEUE
;
111 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, flags
);
113 spin_lock_irqsave(&phba
->hbalock
, flags
);
115 wake_up(phba
->work_wait
);
116 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
120 lpfc_ramp_down_queue_handler(struct lpfc_hba
*phba
)
122 struct lpfc_vport
**vports
;
123 struct Scsi_Host
*shost
;
124 struct scsi_device
*sdev
;
125 unsigned long new_queue_depth
;
126 unsigned long num_rsrc_err
, num_cmd_success
;
129 num_rsrc_err
= atomic_read(&phba
->num_rsrc_err
);
130 num_cmd_success
= atomic_read(&phba
->num_cmd_success
);
132 vports
= lpfc_create_vport_work_array(phba
);
134 for(i
= 0; i
< LPFC_MAX_VPORTS
&& vports
[i
] != NULL
; i
++) {
135 shost
= lpfc_shost_from_vport(vports
[i
]);
136 shost_for_each_device(sdev
, shost
) {
138 sdev
->queue_depth
* num_rsrc_err
/
139 (num_rsrc_err
+ num_cmd_success
);
140 if (!new_queue_depth
)
141 new_queue_depth
= sdev
->queue_depth
- 1;
143 new_queue_depth
= sdev
->queue_depth
-
145 if (sdev
->ordered_tags
)
146 scsi_adjust_queue_depth(sdev
,
150 scsi_adjust_queue_depth(sdev
,
155 lpfc_destroy_vport_work_array(vports
);
156 spin_unlock_irq(&phba
->hbalock
);
157 atomic_set(&phba
->num_rsrc_err
, 0);
158 atomic_set(&phba
->num_cmd_success
, 0);
162 lpfc_ramp_up_queue_handler(struct lpfc_hba
*phba
)
164 struct lpfc_vport
**vports
;
165 struct Scsi_Host
*shost
;
166 struct scsi_device
*sdev
;
169 vports
= lpfc_create_vport_work_array(phba
);
171 for(i
= 0; i
< LPFC_MAX_VPORTS
&& vports
[i
] != NULL
; i
++) {
172 shost
= lpfc_shost_from_vport(vports
[i
]);
173 shost_for_each_device(sdev
, shost
) {
174 if (sdev
->ordered_tags
)
175 scsi_adjust_queue_depth(sdev
,
177 sdev
->queue_depth
+1);
179 scsi_adjust_queue_depth(sdev
,
181 sdev
->queue_depth
+1);
184 lpfc_destroy_vport_work_array(vports
);
185 atomic_set(&phba
->num_rsrc_err
, 0);
186 atomic_set(&phba
->num_cmd_success
, 0);
190 * This routine allocates a scsi buffer, which contains all the necessary
191 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
192 * contains information to build the IOCB. The DMAable region contains
193 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
194 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
195 * and the BPL BDE is setup in the IOCB.
197 static struct lpfc_scsi_buf
*
198 lpfc_new_scsi_buf(struct lpfc_vport
*vport
)
200 struct lpfc_hba
*phba
= vport
->phba
;
201 struct lpfc_scsi_buf
*psb
;
202 struct ulp_bde64
*bpl
;
204 dma_addr_t pdma_phys
;
207 psb
= kmalloc(sizeof(struct lpfc_scsi_buf
), GFP_KERNEL
);
210 memset(psb
, 0, sizeof (struct lpfc_scsi_buf
));
213 * Get memory from the pci pool to map the virt space to pci bus space
214 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
215 * struct fcp_rsp and the number of bde's necessary to support the
218 psb
->data
= pci_pool_alloc(phba
->lpfc_scsi_dma_buf_pool
, GFP_KERNEL
,
225 /* Initialize virtual ptrs to dma_buf region. */
226 memset(psb
->data
, 0, phba
->cfg_sg_dma_buf_size
);
228 /* Allocate iotag for psb->cur_iocbq. */
229 iotag
= lpfc_sli_next_iotag(phba
, &psb
->cur_iocbq
);
231 pci_pool_free(phba
->lpfc_scsi_dma_buf_pool
,
232 psb
->data
, psb
->dma_handle
);
236 psb
->cur_iocbq
.iocb_flag
|= LPFC_IO_FCP
;
238 psb
->fcp_cmnd
= psb
->data
;
239 psb
->fcp_rsp
= psb
->data
+ sizeof(struct fcp_cmnd
);
240 psb
->fcp_bpl
= psb
->data
+ sizeof(struct fcp_cmnd
) +
241 sizeof(struct fcp_rsp
);
243 /* Initialize local short-hand pointers. */
245 pdma_phys
= psb
->dma_handle
;
248 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
249 * list bdes. Initialize the first two and leave the rest for
252 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
253 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
254 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_cmnd
);
255 bpl
->tus
.f
.bdeFlags
= BUFF_USE_CMND
;
256 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
259 /* Setup the physical region for the FCP RSP */
260 pdma_phys
+= sizeof (struct fcp_cmnd
);
261 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(pdma_phys
));
262 bpl
->addrLow
= le32_to_cpu(putPaddrLow(pdma_phys
));
263 bpl
->tus
.f
.bdeSize
= sizeof (struct fcp_rsp
);
264 bpl
->tus
.f
.bdeFlags
= (BUFF_USE_CMND
| BUFF_USE_RCV
);
265 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
268 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
269 * initialize it with all known data now.
271 pdma_phys
+= (sizeof (struct fcp_rsp
));
272 iocb
= &psb
->cur_iocbq
.iocb
;
273 iocb
->un
.fcpi64
.bdl
.ulpIoTag32
= 0;
274 iocb
->un
.fcpi64
.bdl
.addrHigh
= putPaddrHigh(pdma_phys
);
275 iocb
->un
.fcpi64
.bdl
.addrLow
= putPaddrLow(pdma_phys
);
276 iocb
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof (struct ulp_bde64
));
277 iocb
->un
.fcpi64
.bdl
.bdeFlags
= BUFF_TYPE_BDL
;
278 iocb
->ulpBdeCount
= 1;
279 iocb
->ulpClass
= CLASS3
;
284 static struct lpfc_scsi_buf
*
285 lpfc_get_scsi_buf(struct lpfc_hba
* phba
)
287 struct lpfc_scsi_buf
* lpfc_cmd
= NULL
;
288 struct list_head
*scsi_buf_list
= &phba
->lpfc_scsi_buf_list
;
289 unsigned long iflag
= 0;
291 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, iflag
);
292 list_remove_head(scsi_buf_list
, lpfc_cmd
, struct lpfc_scsi_buf
, list
);
294 lpfc_cmd
->seg_cnt
= 0;
295 lpfc_cmd
->nonsg_phys
= 0;
297 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, iflag
);
302 lpfc_release_scsi_buf(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*psb
)
304 unsigned long iflag
= 0;
306 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, iflag
);
308 list_add_tail(&psb
->list
, &phba
->lpfc_scsi_buf_list
);
309 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, iflag
);
313 lpfc_scsi_prep_dma_buf(struct lpfc_hba
*phba
, struct lpfc_scsi_buf
*lpfc_cmd
)
315 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
316 struct scatterlist
*sgel
= NULL
;
317 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
318 struct ulp_bde64
*bpl
= lpfc_cmd
->fcp_bpl
;
319 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
321 uint32_t i
, num_bde
= 0;
322 int nseg
, datadir
= scsi_cmnd
->sc_data_direction
;
325 * There are three possibilities here - use scatter-gather segment, use
326 * the single mapping, or neither. Start the lpfc command prep by
327 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
331 if (scsi_sg_count(scsi_cmnd
)) {
333 * The driver stores the segment count returned from pci_map_sg
334 * because this a count of dma-mappings used to map the use_sg
335 * pages. They are not guaranteed to be the same for those
336 * architectures that implement an IOMMU.
339 nseg
= dma_map_sg(&phba
->pcidev
->dev
, scsi_sglist(scsi_cmnd
),
340 scsi_sg_count(scsi_cmnd
), datadir
);
344 lpfc_cmd
->seg_cnt
= nseg
;
345 if (lpfc_cmd
->seg_cnt
> phba
->cfg_sg_seg_cnt
) {
346 printk(KERN_ERR
"%s: Too many sg segments from "
347 "dma_map_sg. Config %d, seg_cnt %d",
348 __FUNCTION__
, phba
->cfg_sg_seg_cnt
,
350 scsi_dma_unmap(scsi_cmnd
);
355 * The driver established a maximum scatter-gather segment count
356 * during probe that limits the number of sg elements in any
357 * single scsi command. Just run through the seg_cnt and format
360 scsi_for_each_sg(scsi_cmnd
, sgel
, nseg
, i
) {
361 physaddr
= sg_dma_address(sgel
);
362 bpl
->addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
363 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
364 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
365 if (datadir
== DMA_TO_DEVICE
)
366 bpl
->tus
.f
.bdeFlags
= 0;
368 bpl
->tus
.f
.bdeFlags
= BUFF_USE_RCV
;
369 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
376 * Finish initializing those IOCB fields that are dependent on the
377 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
378 * reinitialized since all iocb memory resources are used many times
379 * for transmit, receive, and continuation bpl's.
381 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
= (2 * sizeof (struct ulp_bde64
));
382 iocb_cmd
->un
.fcpi64
.bdl
.bdeSize
+=
383 (num_bde
* sizeof (struct ulp_bde64
));
384 iocb_cmd
->ulpBdeCount
= 1;
386 fcp_cmnd
->fcpDl
= be32_to_cpu(scsi_bufflen(scsi_cmnd
));
391 lpfc_scsi_unprep_dma_buf(struct lpfc_hba
* phba
, struct lpfc_scsi_buf
* psb
)
394 * There are only two special cases to consider. (1) the scsi command
395 * requested scatter-gather usage or (2) the scsi command allocated
396 * a request buffer, but did not request use_sg. There is a third
397 * case, but it does not require resource deallocation.
399 if (psb
->seg_cnt
> 0)
400 scsi_dma_unmap(psb
->pCmd
);
404 lpfc_handle_fcp_err(struct lpfc_vport
*vport
, struct lpfc_scsi_buf
*lpfc_cmd
,
405 struct lpfc_iocbq
*rsp_iocb
)
407 struct scsi_cmnd
*cmnd
= lpfc_cmd
->pCmd
;
408 struct fcp_cmnd
*fcpcmd
= lpfc_cmd
->fcp_cmnd
;
409 struct fcp_rsp
*fcprsp
= lpfc_cmd
->fcp_rsp
;
410 struct lpfc_hba
*phba
= vport
->phba
;
411 uint32_t fcpi_parm
= rsp_iocb
->iocb
.un
.fcpi
.fcpi_parm
;
412 uint32_t vpi
= vport
->vpi
;
413 uint32_t resp_info
= fcprsp
->rspStatus2
;
414 uint32_t scsi_status
= fcprsp
->rspStatus3
;
416 uint32_t host_status
= DID_OK
;
418 uint32_t logit
= LOG_FCP
| LOG_FCP_ERROR
;
421 * If this is a task management command, there is no
422 * scsi packet associated with this lpfc_cmd. The driver
425 if (fcpcmd
->fcpCntl2
) {
430 if ((resp_info
& SNS_LEN_VALID
) && fcprsp
->rspSnsLen
) {
431 uint32_t snslen
= be32_to_cpu(fcprsp
->rspSnsLen
);
432 if (snslen
> SCSI_SENSE_BUFFERSIZE
)
433 snslen
= SCSI_SENSE_BUFFERSIZE
;
435 if (resp_info
& RSP_LEN_VALID
)
436 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
437 memcpy(cmnd
->sense_buffer
, &fcprsp
->rspInfo0
+ rsplen
, snslen
);
439 lp
= (uint32_t *)cmnd
->sense_buffer
;
441 if (!scsi_status
&& (resp_info
& RESID_UNDER
))
444 lpfc_printf_log(phba
, KERN_WARNING
, logit
,
445 "%d (%d):0730 FCP command x%x failed: x%x SNS x%x x%x "
446 "Data: x%x x%x x%x x%x x%x\n",
447 phba
->brd_no
, vpi
, cmnd
->cmnd
[0], scsi_status
,
448 be32_to_cpu(*lp
), be32_to_cpu(*(lp
+ 3)), resp_info
,
449 be32_to_cpu(fcprsp
->rspResId
),
450 be32_to_cpu(fcprsp
->rspSnsLen
),
451 be32_to_cpu(fcprsp
->rspRspLen
),
454 if (resp_info
& RSP_LEN_VALID
) {
455 rsplen
= be32_to_cpu(fcprsp
->rspRspLen
);
456 if ((rsplen
!= 0 && rsplen
!= 4 && rsplen
!= 8) ||
457 (fcprsp
->rspInfo3
!= RSP_NO_FAILURE
)) {
458 host_status
= DID_ERROR
;
463 scsi_set_resid(cmnd
, 0);
464 if (resp_info
& RESID_UNDER
) {
465 scsi_set_resid(cmnd
, be32_to_cpu(fcprsp
->rspResId
));
467 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
468 "%d (%d):0716 FCP Read Underrun, expected %d, "
469 "residual %d Data: x%x x%x x%x\n",
470 phba
->brd_no
, vpi
, be32_to_cpu(fcpcmd
->fcpDl
),
471 scsi_get_resid(cmnd
), fcpi_parm
, cmnd
->cmnd
[0],
475 * If there is an under run check if under run reported by
476 * storage array is same as the under run reported by HBA.
477 * If this is not same, there is a dropped frame.
479 if ((cmnd
->sc_data_direction
== DMA_FROM_DEVICE
) &&
481 (scsi_get_resid(cmnd
) != fcpi_parm
)) {
482 lpfc_printf_log(phba
, KERN_WARNING
,
483 LOG_FCP
| LOG_FCP_ERROR
,
484 "%d (%d):0735 FCP Read Check Error "
485 "and Underrun Data: x%x x%x x%x x%x\n",
487 be32_to_cpu(fcpcmd
->fcpDl
),
488 scsi_get_resid(cmnd
), fcpi_parm
,
490 scsi_set_resid(cmnd
, scsi_bufflen(cmnd
));
491 host_status
= DID_ERROR
;
494 * The cmnd->underflow is the minimum number of bytes that must
495 * be transfered for this command. Provided a sense condition
496 * is not present, make sure the actual amount transferred is at
497 * least the underflow value or fail.
499 if (!(resp_info
& SNS_LEN_VALID
) &&
500 (scsi_status
== SAM_STAT_GOOD
) &&
501 (scsi_bufflen(cmnd
) - scsi_get_resid(cmnd
)
502 < cmnd
->underflow
)) {
503 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
504 "%d (%d):0717 FCP command x%x residual "
505 "underrun converted to error "
506 "Data: x%x x%x x%x\n",
507 phba
->brd_no
, vpi
, cmnd
->cmnd
[0],
509 scsi_get_resid(cmnd
), cmnd
->underflow
);
510 host_status
= DID_ERROR
;
512 } else if (resp_info
& RESID_OVER
) {
513 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
514 "%d (%d):0720 FCP command x%x residual "
515 "overrun error. Data: x%x x%x \n",
516 phba
->brd_no
, vpi
, cmnd
->cmnd
[0],
517 scsi_bufflen(cmnd
), scsi_get_resid(cmnd
));
518 host_status
= DID_ERROR
;
521 * Check SLI validation that all the transfer was actually done
522 * (fcpi_parm should be zero). Apply check only to reads.
524 } else if ((scsi_status
== SAM_STAT_GOOD
) && fcpi_parm
&&
525 (cmnd
->sc_data_direction
== DMA_FROM_DEVICE
)) {
526 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
| LOG_FCP_ERROR
,
527 "%d (%d):0734 FCP Read Check Error Data: "
530 be32_to_cpu(fcpcmd
->fcpDl
),
531 be32_to_cpu(fcprsp
->rspResId
),
532 fcpi_parm
, cmnd
->cmnd
[0]);
533 host_status
= DID_ERROR
;
534 scsi_set_resid(cmnd
, scsi_bufflen(cmnd
));
538 cmnd
->result
= ScsiResult(host_status
, scsi_status
);
542 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*pIocbIn
,
543 struct lpfc_iocbq
*pIocbOut
)
545 struct lpfc_scsi_buf
*lpfc_cmd
=
546 (struct lpfc_scsi_buf
*) pIocbIn
->context1
;
547 struct lpfc_vport
*vport
= pIocbIn
->vport
;
548 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
549 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
550 struct scsi_cmnd
*cmd
= lpfc_cmd
->pCmd
;
551 uint32_t vpi
= (lpfc_cmd
->cur_iocbq
.vport
552 ? lpfc_cmd
->cur_iocbq
.vport
->vpi
555 struct scsi_device
*sdev
, *tmp_sdev
;
558 lpfc_cmd
->result
= pIocbOut
->iocb
.un
.ulpWord
[4];
559 lpfc_cmd
->status
= pIocbOut
->iocb
.ulpStatus
;
561 if (lpfc_cmd
->status
) {
562 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
563 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
564 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
565 else if (lpfc_cmd
->status
>= IOSTAT_CNT
)
566 lpfc_cmd
->status
= IOSTAT_DEFAULT
;
568 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
569 "%d (%d):0729 FCP cmd x%x failed <%d/%d> "
570 "status: x%x result: x%x Data: x%x x%x\n",
571 phba
->brd_no
, vpi
, cmd
->cmnd
[0],
572 cmd
->device
? cmd
->device
->id
: 0xffff,
573 cmd
->device
? cmd
->device
->lun
: 0xffff,
574 lpfc_cmd
->status
, lpfc_cmd
->result
,
575 pIocbOut
->iocb
.ulpContext
,
576 lpfc_cmd
->cur_iocbq
.iocb
.ulpIoTag
);
578 switch (lpfc_cmd
->status
) {
579 case IOSTAT_FCP_RSP_ERROR
:
580 /* Call FCP RSP handler to determine result */
581 lpfc_handle_fcp_err(vport
, lpfc_cmd
, pIocbOut
);
583 case IOSTAT_NPORT_BSY
:
584 case IOSTAT_FABRIC_BSY
:
585 cmd
->result
= ScsiResult(DID_BUS_BUSY
, 0);
587 case IOSTAT_LOCAL_REJECT
:
588 if (lpfc_cmd
->result
== RJT_UNAVAIL_PERM
||
589 lpfc_cmd
->result
== IOERR_NO_RESOURCES
||
590 lpfc_cmd
->result
== RJT_LOGIN_REQUIRED
) {
591 cmd
->result
= ScsiResult(DID_REQUEUE
, 0);
593 } /* else: fall through */
595 cmd
->result
= ScsiResult(DID_ERROR
, 0);
600 || (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
))
601 cmd
->result
= ScsiResult(DID_BUS_BUSY
, SAM_STAT_BUSY
);
603 cmd
->result
= ScsiResult(DID_OK
, 0);
606 if (cmd
->result
|| lpfc_cmd
->fcp_rsp
->rspSnsLen
) {
607 uint32_t *lp
= (uint32_t *)cmd
->sense_buffer
;
609 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
610 "%d (%d):0710 Iodone <%d/%d> cmd %p, error "
611 "x%x SNS x%x x%x Data: x%x x%x\n",
612 phba
->brd_no
, vpi
, cmd
->device
->id
,
613 cmd
->device
->lun
, cmd
, cmd
->result
,
614 *lp
, *(lp
+ 3), cmd
->retries
,
615 scsi_get_resid(cmd
));
618 result
= cmd
->result
;
620 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
623 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
624 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
630 lpfc_rampup_queue_depth(phba
, sdev
);
632 if (!result
&& pnode
!= NULL
&&
633 ((jiffies
- pnode
->last_ramp_up_time
) >
634 LPFC_Q_RAMP_UP_INTERVAL
* HZ
) &&
635 ((jiffies
- pnode
->last_q_full_time
) >
636 LPFC_Q_RAMP_UP_INTERVAL
* HZ
) &&
637 (phba
->cfg_lun_queue_depth
> sdev
->queue_depth
)) {
638 shost_for_each_device(tmp_sdev
, sdev
->host
) {
639 if (phba
->cfg_lun_queue_depth
> tmp_sdev
->queue_depth
) {
640 if (tmp_sdev
->id
!= sdev
->id
)
642 if (tmp_sdev
->ordered_tags
)
643 scsi_adjust_queue_depth(tmp_sdev
,
645 tmp_sdev
->queue_depth
+1);
647 scsi_adjust_queue_depth(tmp_sdev
,
649 tmp_sdev
->queue_depth
+1);
651 pnode
->last_ramp_up_time
= jiffies
;
657 * Check for queue full. If the lun is reporting queue full, then
658 * back off the lun queue depth to prevent target overloads.
660 if (result
== SAM_STAT_TASK_SET_FULL
&& pnode
!= NULL
) {
661 pnode
->last_q_full_time
= jiffies
;
663 shost_for_each_device(tmp_sdev
, sdev
->host
) {
664 if (tmp_sdev
->id
!= sdev
->id
)
666 depth
= scsi_track_queue_full(tmp_sdev
,
667 tmp_sdev
->queue_depth
- 1);
670 * The queue depth cannot be lowered any more.
671 * Modify the returned error code to store
672 * the final depth value set by
673 * scsi_track_queue_full.
676 depth
= sdev
->host
->cmd_per_lun
;
679 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
680 "%d (%d):0711 detected queue full - "
681 "lun queue depth adjusted to %d.\n",
682 phba
->brd_no
, vpi
, depth
);
686 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
690 lpfc_scsi_prep_cmnd(struct lpfc_vport
*vport
, struct lpfc_scsi_buf
*lpfc_cmd
,
691 struct lpfc_nodelist
*pnode
)
693 struct lpfc_hba
*phba
= vport
->phba
;
694 struct scsi_cmnd
*scsi_cmnd
= lpfc_cmd
->pCmd
;
695 struct fcp_cmnd
*fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
696 IOCB_t
*iocb_cmd
= &lpfc_cmd
->cur_iocbq
.iocb
;
697 struct lpfc_iocbq
*piocbq
= &(lpfc_cmd
->cur_iocbq
);
698 int datadir
= scsi_cmnd
->sc_data_direction
;
700 lpfc_cmd
->fcp_rsp
->rspSnsLen
= 0;
701 /* clear task management bits */
702 lpfc_cmd
->fcp_cmnd
->fcpCntl2
= 0;
704 int_to_scsilun(lpfc_cmd
->pCmd
->device
->lun
,
705 &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
707 memcpy(&fcp_cmnd
->fcpCdb
[0], scsi_cmnd
->cmnd
, 16);
709 if (scsi_cmnd
->device
->tagged_supported
) {
710 switch (scsi_cmnd
->tag
) {
711 case HEAD_OF_QUEUE_TAG
:
712 fcp_cmnd
->fcpCntl1
= HEAD_OF_Q
;
714 case ORDERED_QUEUE_TAG
:
715 fcp_cmnd
->fcpCntl1
= ORDERED_Q
;
718 fcp_cmnd
->fcpCntl1
= SIMPLE_Q
;
722 fcp_cmnd
->fcpCntl1
= 0;
725 * There are three possibilities here - use scatter-gather segment, use
726 * the single mapping, or neither. Start the lpfc command prep by
727 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
730 if (scsi_sg_count(scsi_cmnd
)) {
731 if (datadir
== DMA_TO_DEVICE
) {
732 iocb_cmd
->ulpCommand
= CMD_FCP_IWRITE64_CR
;
733 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
735 fcp_cmnd
->fcpCntl3
= WRITE_DATA
;
736 phba
->fc4OutputRequests
++;
738 iocb_cmd
->ulpCommand
= CMD_FCP_IREAD64_CR
;
739 iocb_cmd
->ulpPU
= PARM_READ_CHECK
;
740 iocb_cmd
->un
.fcpi
.fcpi_parm
= scsi_bufflen(scsi_cmnd
);
741 fcp_cmnd
->fcpCntl3
= READ_DATA
;
742 phba
->fc4InputRequests
++;
745 iocb_cmd
->ulpCommand
= CMD_FCP_ICMND64_CR
;
746 iocb_cmd
->un
.fcpi
.fcpi_parm
= 0;
748 fcp_cmnd
->fcpCntl3
= 0;
749 phba
->fc4ControlRequests
++;
753 * Finish initializing those IOCB fields that are independent
754 * of the scsi_cmnd request_buffer
756 piocbq
->iocb
.ulpContext
= pnode
->nlp_rpi
;
757 if (pnode
->nlp_fcp_info
& NLP_FCP_2_DEVICE
)
758 piocbq
->iocb
.ulpFCP2Rcvy
= 1;
760 piocbq
->iocb
.ulpClass
= (pnode
->nlp_fcp_info
& 0x0f);
761 piocbq
->context1
= lpfc_cmd
;
762 piocbq
->iocb_cmpl
= lpfc_scsi_cmd_iocb_cmpl
;
763 piocbq
->iocb
.ulpTimeout
= lpfc_cmd
->timeout
;
764 piocbq
->vport
= vport
;
768 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport
*vport
,
769 struct lpfc_scsi_buf
*lpfc_cmd
,
771 uint8_t task_mgmt_cmd
)
773 struct lpfc_iocbq
*piocbq
;
775 struct fcp_cmnd
*fcp_cmnd
;
776 struct lpfc_rport_data
*rdata
= lpfc_cmd
->rdata
;
777 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
779 if ((ndlp
== NULL
) || (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
)) {
783 piocbq
= &(lpfc_cmd
->cur_iocbq
);
784 piocbq
->vport
= vport
;
786 piocb
= &piocbq
->iocb
;
788 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
789 int_to_scsilun(lun
, &lpfc_cmd
->fcp_cmnd
->fcp_lun
);
790 fcp_cmnd
->fcpCntl2
= task_mgmt_cmd
;
792 piocb
->ulpCommand
= CMD_FCP_ICMND64_CR
;
794 piocb
->ulpContext
= ndlp
->nlp_rpi
;
795 if (ndlp
->nlp_fcp_info
& NLP_FCP_2_DEVICE
) {
796 piocb
->ulpFCP2Rcvy
= 1;
798 piocb
->ulpClass
= (ndlp
->nlp_fcp_info
& 0x0f);
800 /* ulpTimeout is only one byte */
801 if (lpfc_cmd
->timeout
> 0xff) {
803 * Do not timeout the command at the firmware level.
804 * The driver will provide the timeout mechanism.
806 piocb
->ulpTimeout
= 0;
808 piocb
->ulpTimeout
= lpfc_cmd
->timeout
;
815 lpfc_tskmgmt_def_cmpl(struct lpfc_hba
*phba
,
816 struct lpfc_iocbq
*cmdiocbq
,
817 struct lpfc_iocbq
*rspiocbq
)
819 struct lpfc_scsi_buf
*lpfc_cmd
=
820 (struct lpfc_scsi_buf
*) cmdiocbq
->context1
;
822 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
827 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf
*lpfc_cmd
, struct lpfc_vport
*vport
,
828 unsigned tgt_id
, unsigned int lun
,
829 struct lpfc_rport_data
*rdata
)
831 struct lpfc_hba
*phba
= vport
->phba
;
832 struct lpfc_iocbq
*iocbq
;
833 struct lpfc_iocbq
*iocbqrsp
;
839 lpfc_cmd
->rdata
= rdata
;
840 ret
= lpfc_scsi_prep_task_mgmt_cmd(vport
, lpfc_cmd
, lun
,
845 iocbq
= &lpfc_cmd
->cur_iocbq
;
846 iocbqrsp
= lpfc_sli_get_iocbq(phba
);
851 /* Issue Target Reset to TGT <num> */
852 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
853 "%d (%d):0702 Issue Target Reset to TGT %d "
855 phba
->brd_no
, vport
->vpi
, tgt_id
,
856 rdata
->pnode
->nlp_rpi
, rdata
->pnode
->nlp_flag
);
858 ret
= lpfc_sli_issue_iocb_wait(phba
,
859 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
860 iocbq
, iocbqrsp
, lpfc_cmd
->timeout
);
861 if (ret
!= IOCB_SUCCESS
) {
862 if (ret
== IOCB_TIMEDOUT
)
863 iocbq
->iocb_cmpl
= lpfc_tskmgmt_def_cmpl
;
864 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
867 lpfc_cmd
->result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
868 lpfc_cmd
->status
= iocbqrsp
->iocb
.ulpStatus
;
869 if (lpfc_cmd
->status
== IOSTAT_LOCAL_REJECT
&&
870 (lpfc_cmd
->result
& IOERR_DRVR_MASK
))
871 lpfc_cmd
->status
= IOSTAT_DRIVER_REJECT
;
874 lpfc_sli_release_iocbq(phba
, iocbqrsp
);
879 lpfc_info(struct Scsi_Host
*host
)
881 struct lpfc_vport
*vport
= (struct lpfc_vport
*) host
->hostdata
;
882 struct lpfc_hba
*phba
= vport
->phba
;
884 static char lpfcinfobuf
[384];
886 memset(lpfcinfobuf
,0,384);
887 if (phba
&& phba
->pcidev
){
888 strncpy(lpfcinfobuf
, phba
->ModelDesc
, 256);
889 len
= strlen(lpfcinfobuf
);
890 snprintf(lpfcinfobuf
+ len
,
892 " on PCI bus %02x device %02x irq %d",
893 phba
->pcidev
->bus
->number
,
896 len
= strlen(lpfcinfobuf
);
898 snprintf(lpfcinfobuf
+ len
,
907 static __inline__
void lpfc_poll_rearm_timer(struct lpfc_hba
* phba
)
909 unsigned long poll_tmo_expires
=
910 (jiffies
+ msecs_to_jiffies(phba
->cfg_poll_tmo
));
912 if (phba
->sli
.ring
[LPFC_FCP_RING
].txcmplq_cnt
)
913 mod_timer(&phba
->fcp_poll_timer
,
917 void lpfc_poll_start_timer(struct lpfc_hba
* phba
)
919 lpfc_poll_rearm_timer(phba
);
922 void lpfc_poll_timeout(unsigned long ptr
)
924 struct lpfc_hba
*phba
= (struct lpfc_hba
*) ptr
;
926 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
927 lpfc_sli_poll_fcp_ring (phba
);
928 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
929 lpfc_poll_rearm_timer(phba
);
934 lpfc_queuecommand(struct scsi_cmnd
*cmnd
, void (*done
) (struct scsi_cmnd
*))
936 struct Scsi_Host
*shost
= cmnd
->device
->host
;
937 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
938 struct lpfc_hba
*phba
= vport
->phba
;
939 struct lpfc_sli
*psli
= &phba
->sli
;
940 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
941 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
942 struct lpfc_scsi_buf
*lpfc_cmd
;
943 struct fc_rport
*rport
= starget_to_rport(scsi_target(cmnd
->device
));
946 err
= fc_remote_port_chkready(rport
);
949 goto out_fail_command
;
953 * Catch race where our node has transitioned, but the
954 * transport is still transitioning.
957 cmnd
->result
= ScsiResult(DID_BUS_BUSY
, 0);
958 goto out_fail_command
;
960 lpfc_cmd
= lpfc_get_scsi_buf(phba
);
961 if (lpfc_cmd
== NULL
) {
962 lpfc_adjust_queue_depth(phba
);
964 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
965 "%d (%d):0707 driver's buffer pool is empty, "
967 phba
->brd_no
, vport
->vpi
);
972 * Store the midlayer's command structure for the completion phase
973 * and complete the command initialization.
975 lpfc_cmd
->pCmd
= cmnd
;
976 lpfc_cmd
->rdata
= rdata
;
977 lpfc_cmd
->timeout
= 0;
978 cmnd
->host_scribble
= (unsigned char *)lpfc_cmd
;
979 cmnd
->scsi_done
= done
;
981 err
= lpfc_scsi_prep_dma_buf(phba
, lpfc_cmd
);
983 goto out_host_busy_free_buf
;
985 lpfc_scsi_prep_cmnd(vport
, lpfc_cmd
, ndlp
);
987 err
= lpfc_sli_issue_iocb(phba
, &phba
->sli
.ring
[psli
->fcp_ring
],
988 &lpfc_cmd
->cur_iocbq
, SLI_IOCB_RET_IOCB
);
990 goto out_host_busy_free_buf
;
992 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
993 lpfc_sli_poll_fcp_ring(phba
);
994 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
995 lpfc_poll_rearm_timer(phba
);
1000 out_host_busy_free_buf
:
1001 lpfc_scsi_unprep_dma_buf(phba
, lpfc_cmd
);
1002 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
1004 return SCSI_MLQUEUE_HOST_BUSY
;
1012 lpfc_block_error_handler(struct scsi_cmnd
*cmnd
)
1014 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1015 struct fc_rport
*rport
= starget_to_rport(scsi_target(cmnd
->device
));
1017 spin_lock_irq(shost
->host_lock
);
1018 while (rport
->port_state
== FC_PORTSTATE_BLOCKED
) {
1019 spin_unlock_irq(shost
->host_lock
);
1021 spin_lock_irq(shost
->host_lock
);
1023 spin_unlock_irq(shost
->host_lock
);
1028 lpfc_abort_handler(struct scsi_cmnd
*cmnd
)
1030 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1031 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
1032 struct lpfc_hba
*phba
= vport
->phba
;
1033 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[phba
->sli
.fcp_ring
];
1034 struct lpfc_iocbq
*iocb
;
1035 struct lpfc_iocbq
*abtsiocb
;
1036 struct lpfc_scsi_buf
*lpfc_cmd
;
1038 unsigned int loop_count
= 0;
1041 lpfc_block_error_handler(cmnd
);
1042 lpfc_cmd
= (struct lpfc_scsi_buf
*)cmnd
->host_scribble
;
1046 * If pCmd field of the corresponding lpfc_scsi_buf structure
1047 * points to a different SCSI command, then the driver has
1048 * already completed this command, but the midlayer did not
1049 * see the completion before the eh fired. Just return
1052 iocb
= &lpfc_cmd
->cur_iocbq
;
1053 if (lpfc_cmd
->pCmd
!= cmnd
)
1056 BUG_ON(iocb
->context1
!= lpfc_cmd
);
1058 abtsiocb
= lpfc_sli_get_iocbq(phba
);
1059 if (abtsiocb
== NULL
) {
1065 * The scsi command can not be in txq and it is in flight because the
1066 * pCmd is still pointig at the SCSI command we have to abort. There
1067 * is no need to search the txcmplq. Just send an abort to the FW.
1071 icmd
= &abtsiocb
->iocb
;
1072 icmd
->un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
1073 icmd
->un
.acxri
.abortContextTag
= cmd
->ulpContext
;
1074 icmd
->un
.acxri
.abortIoTag
= cmd
->ulpIoTag
;
1077 icmd
->ulpClass
= cmd
->ulpClass
;
1078 if (lpfc_is_link_up(phba
))
1079 icmd
->ulpCommand
= CMD_ABORT_XRI_CN
;
1081 icmd
->ulpCommand
= CMD_CLOSE_XRI_CN
;
1083 abtsiocb
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
1084 abtsiocb
->vport
= vport
;
1085 if (lpfc_sli_issue_iocb(phba
, pring
, abtsiocb
, 0) == IOCB_ERROR
) {
1086 lpfc_sli_release_iocbq(phba
, abtsiocb
);
1091 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
1092 lpfc_sli_poll_fcp_ring (phba
);
1094 /* Wait for abort to complete */
1095 while (lpfc_cmd
->pCmd
== cmnd
)
1097 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
1098 lpfc_sli_poll_fcp_ring (phba
);
1100 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT
* HZ
);
1102 > (2 * phba
->cfg_devloss_tmo
)/LPFC_ABORT_WAIT
)
1106 if (lpfc_cmd
->pCmd
== cmnd
) {
1108 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1109 "%d (%d):0748 abort handler timed out waiting "
1110 "for abort to complete: ret %#x, ID %d, "
1111 "LUN %d, snum %#lx\n",
1112 phba
->brd_no
, vport
->vpi
, ret
,
1113 cmnd
->device
->id
, cmnd
->device
->lun
,
1114 cmnd
->serial_number
);
1118 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
1119 "%d (%d):0749 SCSI Layer I/O Abort Request "
1120 "Status x%x ID %d LUN %d snum %#lx\n",
1121 phba
->brd_no
, vport
->vpi
, ret
, cmnd
->device
->id
,
1122 cmnd
->device
->lun
, cmnd
->serial_number
);
1128 lpfc_device_reset_handler(struct scsi_cmnd
*cmnd
)
1130 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1131 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
1132 struct lpfc_hba
*phba
= vport
->phba
;
1133 struct lpfc_scsi_buf
*lpfc_cmd
;
1134 struct lpfc_iocbq
*iocbq
, *iocbqrsp
;
1135 struct lpfc_rport_data
*rdata
= cmnd
->device
->hostdata
;
1136 struct lpfc_nodelist
*pnode
= rdata
->pnode
;
1137 uint32_t cmd_result
= 0, cmd_status
= 0;
1139 int iocb_status
= IOCB_SUCCESS
;
1142 lpfc_block_error_handler(cmnd
);
1145 * If target is not in a MAPPED state, delay the reset until
1146 * target is rediscovered or devloss timeout expires.
1152 if (pnode
->nlp_state
!= NLP_STE_MAPPED_NODE
) {
1153 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1155 rdata
= cmnd
->device
->hostdata
;
1157 (loopcnt
> ((phba
->cfg_devloss_tmo
* 2) + 1))) {
1158 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1159 "%d (%d):0721 LUN Reset rport "
1160 "failure: cnt x%x rdata x%p\n",
1161 phba
->brd_no
, vport
->vpi
,
1165 pnode
= rdata
->pnode
;
1169 if (pnode
->nlp_state
== NLP_STE_MAPPED_NODE
)
1173 lpfc_cmd
= lpfc_get_scsi_buf(phba
);
1174 if (lpfc_cmd
== NULL
)
1177 lpfc_cmd
->timeout
= 60;
1178 lpfc_cmd
->rdata
= rdata
;
1180 ret
= lpfc_scsi_prep_task_mgmt_cmd(vport
, lpfc_cmd
, cmnd
->device
->lun
,
1183 goto out_free_scsi_buf
;
1185 iocbq
= &lpfc_cmd
->cur_iocbq
;
1187 /* get a buffer for this IOCB command response */
1188 iocbqrsp
= lpfc_sli_get_iocbq(phba
);
1189 if (iocbqrsp
== NULL
)
1190 goto out_free_scsi_buf
;
1192 lpfc_printf_log(phba
, KERN_INFO
, LOG_FCP
,
1193 "%d (%d):0703 Issue target reset to TGT %d LUN %d "
1194 "rpi x%x nlp_flag x%x\n",
1195 phba
->brd_no
, vport
->vpi
, cmnd
->device
->id
,
1196 cmnd
->device
->lun
, pnode
->nlp_rpi
, pnode
->nlp_flag
);
1198 iocb_status
= lpfc_sli_issue_iocb_wait(phba
,
1199 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1200 iocbq
, iocbqrsp
, lpfc_cmd
->timeout
);
1202 if (iocb_status
== IOCB_TIMEDOUT
)
1203 iocbq
->iocb_cmpl
= lpfc_tskmgmt_def_cmpl
;
1205 if (iocb_status
== IOCB_SUCCESS
)
1210 cmd_result
= iocbqrsp
->iocb
.un
.ulpWord
[4];
1211 cmd_status
= iocbqrsp
->iocb
.ulpStatus
;
1213 lpfc_sli_release_iocbq(phba
, iocbqrsp
);
1216 * All outstanding txcmplq I/Os should have been aborted by the device.
1217 * Unfortunately, some targets do not abide by this forcing the driver
1220 cnt
= lpfc_sli_sum_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1221 cmnd
->device
->id
, cmnd
->device
->lun
,
1224 lpfc_sli_abort_iocb(phba
,
1225 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1226 cmnd
->device
->id
, cmnd
->device
->lun
,
1230 schedule_timeout_uninterruptible(LPFC_RESET_WAIT
*HZ
);
1233 > (2 * phba
->cfg_devloss_tmo
)/LPFC_RESET_WAIT
)
1236 cnt
= lpfc_sli_sum_iocb(phba
,
1237 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1238 cmnd
->device
->id
, cmnd
->device
->lun
,
1243 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1244 "%d (%d):0719 device reset I/O flush failure: "
1246 phba
->brd_no
, vport
->vpi
, cnt
);
1251 if (iocb_status
!= IOCB_TIMEDOUT
) {
1252 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
1254 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1255 "%d (%d):0713 SCSI layer issued device reset (%d, %d) "
1256 "return x%x status x%x result x%x\n",
1257 phba
->brd_no
, vport
->vpi
, cmnd
->device
->id
,
1258 cmnd
->device
->lun
, ret
, cmd_status
, cmd_result
);
1265 lpfc_bus_reset_handler(struct scsi_cmnd
*cmnd
)
1267 struct Scsi_Host
*shost
= cmnd
->device
->host
;
1268 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
1269 struct lpfc_hba
*phba
= vport
->phba
;
1270 struct lpfc_nodelist
*ndlp
= NULL
;
1272 int ret
= FAILED
, i
, err_count
= 0;
1274 struct lpfc_scsi_buf
* lpfc_cmd
;
1276 lpfc_block_error_handler(cmnd
);
1278 lpfc_cmd
= lpfc_get_scsi_buf(phba
);
1279 if (lpfc_cmd
== NULL
)
1282 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1283 lpfc_cmd
->timeout
= 60;
1286 * Since the driver manages a single bus device, reset all
1287 * targets known to the driver. Should any target reset
1288 * fail, this routine returns failure to the midlayer.
1290 for (i
= 0; i
< LPFC_MAX_TARGET
; i
++) {
1291 /* Search for mapped node by target ID */
1293 spin_lock_irq(shost
->host_lock
);
1294 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
1295 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
&&
1296 i
== ndlp
->nlp_sid
&&
1302 spin_unlock_irq(shost
->host_lock
);
1306 ret
= lpfc_scsi_tgt_reset(lpfc_cmd
, vport
, i
,
1308 ndlp
->rport
->dd_data
);
1309 if (ret
!= SUCCESS
) {
1310 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1311 "%d (%d):0700 Bus Reset on target %d "
1313 phba
->brd_no
, vport
->vpi
, i
);
1319 if (ret
!= IOCB_TIMEDOUT
)
1320 lpfc_release_scsi_buf(phba
, lpfc_cmd
);
1328 * All outstanding txcmplq I/Os should have been aborted by
1329 * the targets. Unfortunately, some targets do not abide by
1330 * this forcing the driver to double check.
1332 cnt
= lpfc_sli_sum_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1333 0, 0, LPFC_CTX_HOST
);
1335 lpfc_sli_abort_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1336 0, 0, 0, LPFC_CTX_HOST
);
1339 schedule_timeout_uninterruptible(LPFC_RESET_WAIT
*HZ
);
1342 > (2 * phba
->cfg_devloss_tmo
)/LPFC_RESET_WAIT
)
1345 cnt
= lpfc_sli_sum_iocb(phba
,
1346 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
1347 0, 0, LPFC_CTX_HOST
);
1351 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1352 "%d (%d):0715 Bus Reset I/O flush failure: "
1353 "cnt x%x left x%x\n",
1354 phba
->brd_no
, vport
->vpi
, cnt
, i
);
1358 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1359 "%d (%d):0714 SCSI layer issued Bus Reset Data: x%x\n",
1360 phba
->brd_no
, vport
->vpi
, ret
);
1366 lpfc_slave_alloc(struct scsi_device
*sdev
)
1368 struct lpfc_vport
*vport
= (struct lpfc_vport
*) sdev
->host
->hostdata
;
1369 struct lpfc_hba
*phba
= vport
->phba
;
1370 struct lpfc_scsi_buf
*scsi_buf
= NULL
;
1371 struct fc_rport
*rport
= starget_to_rport(scsi_target(sdev
));
1372 uint32_t total
= 0, i
;
1373 uint32_t num_to_alloc
= 0;
1374 unsigned long flags
;
1376 if (!rport
|| fc_remote_port_chkready(rport
))
1379 sdev
->hostdata
= rport
->dd_data
;
1382 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1383 * available list of scsi buffers. Don't allocate more than the
1384 * HBA limit conveyed to the midlayer via the host structure. The
1385 * formula accounts for the lun_queue_depth + error handlers + 1
1386 * extra. This list of scsi bufs exists for the lifetime of the driver.
1388 total
= phba
->total_scsi_bufs
;
1389 num_to_alloc
= phba
->cfg_lun_queue_depth
+ 2;
1391 /* Allow some exchanges to be available always to complete discovery */
1392 if (total
>= phba
->cfg_hba_queue_depth
- LPFC_DISC_IOCB_BUFF_COUNT
) {
1393 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
1394 "%d (%d):0704 At limitation of %d "
1395 "preallocated command buffers\n",
1396 phba
->brd_no
, vport
->vpi
, total
);
1399 /* Allow some exchanges to be available always to complete discovery */
1400 } else if (total
+ num_to_alloc
>
1401 phba
->cfg_hba_queue_depth
- LPFC_DISC_IOCB_BUFF_COUNT
) {
1402 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FCP
,
1403 "%d (%d):0705 Allocation request of %d "
1404 "command buffers will exceed max of %d. "
1405 "Reducing allocation request to %d.\n",
1406 phba
->brd_no
, vport
->vpi
, num_to_alloc
,
1407 phba
->cfg_hba_queue_depth
,
1408 (phba
->cfg_hba_queue_depth
- total
));
1409 num_to_alloc
= phba
->cfg_hba_queue_depth
- total
;
1412 for (i
= 0; i
< num_to_alloc
; i
++) {
1413 scsi_buf
= lpfc_new_scsi_buf(vport
);
1415 lpfc_printf_log(phba
, KERN_ERR
, LOG_FCP
,
1416 "%d (%d):0706 Failed to allocate "
1418 phba
->brd_no
, vport
->vpi
);
1422 spin_lock_irqsave(&phba
->scsi_buf_list_lock
, flags
);
1423 phba
->total_scsi_bufs
++;
1424 list_add_tail(&scsi_buf
->list
, &phba
->lpfc_scsi_buf_list
);
1425 spin_unlock_irqrestore(&phba
->scsi_buf_list_lock
, flags
);
1431 lpfc_slave_configure(struct scsi_device
*sdev
)
1433 struct lpfc_vport
*vport
= (struct lpfc_vport
*) sdev
->host
->hostdata
;
1434 struct lpfc_hba
*phba
= vport
->phba
;
1435 struct fc_rport
*rport
= starget_to_rport(sdev
->sdev_target
);
1437 if (sdev
->tagged_supported
)
1438 scsi_activate_tcq(sdev
, phba
->cfg_lun_queue_depth
);
1440 scsi_deactivate_tcq(sdev
, phba
->cfg_lun_queue_depth
);
1443 * Initialize the fc transport attributes for the target
1444 * containing this scsi device. Also note that the driver's
1445 * target pointer is stored in the starget_data for the
1446 * driver's sysfs entry point functions.
1448 rport
->dev_loss_tmo
= phba
->cfg_devloss_tmo
;
1450 if (phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) {
1451 lpfc_sli_poll_fcp_ring(phba
);
1452 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
)
1453 lpfc_poll_rearm_timer(phba
);
1460 lpfc_slave_destroy(struct scsi_device
*sdev
)
1462 sdev
->hostdata
= NULL
;
1467 struct scsi_host_template lpfc_template
= {
1468 .module
= THIS_MODULE
,
1469 .name
= LPFC_DRIVER_NAME
,
1471 .queuecommand
= lpfc_queuecommand
,
1472 .eh_abort_handler
= lpfc_abort_handler
,
1473 .eh_device_reset_handler
= lpfc_device_reset_handler
,
1474 .eh_bus_reset_handler
= lpfc_bus_reset_handler
,
1475 .slave_alloc
= lpfc_slave_alloc
,
1476 .slave_configure
= lpfc_slave_configure
,
1477 .slave_destroy
= lpfc_slave_destroy
,
1478 .scan_finished
= lpfc_scan_finished
,
1480 .sg_tablesize
= LPFC_SG_SEG_CNT
,
1481 .cmd_per_lun
= LPFC_CMD_PER_LUN
,
1482 .use_clustering
= ENABLE_CLUSTERING
,
1483 .shost_attrs
= lpfc_hba_attrs
,
1484 .max_sectors
= 0xFFFF,