1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/kthread.h>
29 #include <linux/pci.h>
30 #include <linux/spinlock.h>
31 #include <linux/ctype.h>
32 #include <linux/aer.h>
33 #include <linux/slab.h>
34 #include <linux/firmware.h>
35 #include <linux/miscdevice.h>
36 #include <linux/percpu.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi_transport_fc.h>
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
49 #include "lpfc_scsi.h"
51 #include "lpfc_logmsg.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_vport.h"
54 #include "lpfc_version.h"
57 unsigned long _dump_buf_data_order
;
59 unsigned long _dump_buf_dif_order
;
60 spinlock_t _dump_buf_lock
;
62 /* Used when mapping IRQ vectors in a driver centric manner */
63 uint16_t *lpfc_used_cpu
;
64 uint32_t lpfc_present_cpu
;
66 static void lpfc_get_hba_model_desc(struct lpfc_hba
*, uint8_t *, uint8_t *);
67 static int lpfc_post_rcv_buf(struct lpfc_hba
*);
68 static int lpfc_sli4_queue_verify(struct lpfc_hba
*);
69 static int lpfc_create_bootstrap_mbox(struct lpfc_hba
*);
70 static int lpfc_setup_endian_order(struct lpfc_hba
*);
71 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba
*);
72 static void lpfc_free_els_sgl_list(struct lpfc_hba
*);
73 static void lpfc_init_sgl_list(struct lpfc_hba
*);
74 static int lpfc_init_active_sgl_array(struct lpfc_hba
*);
75 static void lpfc_free_active_sgl(struct lpfc_hba
*);
76 static int lpfc_hba_down_post_s3(struct lpfc_hba
*phba
);
77 static int lpfc_hba_down_post_s4(struct lpfc_hba
*phba
);
78 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba
*);
79 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba
*);
80 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba
*);
81 static void lpfc_sli4_disable_intr(struct lpfc_hba
*);
82 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba
*, uint32_t);
83 static void lpfc_sli4_oas_verify(struct lpfc_hba
*phba
);
85 static struct scsi_transport_template
*lpfc_transport_template
= NULL
;
86 static struct scsi_transport_template
*lpfc_vport_transport_template
= NULL
;
87 static DEFINE_IDR(lpfc_hba_index
);
90 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
91 * @phba: pointer to lpfc hba data structure.
93 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
94 * mailbox command. It retrieves the revision information from the HBA and
95 * collects the Vital Product Data (VPD) about the HBA for preparing the
96 * configuration of the HBA.
100 * -ERESTART - requests the SLI layer to reset the HBA and try again.
101 * Any other value - indicates an error.
104 lpfc_config_port_prep(struct lpfc_hba
*phba
)
106 lpfc_vpd_t
*vp
= &phba
->vpd
;
110 char *lpfc_vpd_data
= NULL
;
112 static char licensed
[56] =
113 "key unlock for use with gnu public licensed code only\0";
114 static int init_key
= 1;
116 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
118 phba
->link_state
= LPFC_HBA_ERROR
;
123 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
125 if (lpfc_is_LC_HBA(phba
->pcidev
->device
)) {
127 uint32_t *ptext
= (uint32_t *) licensed
;
129 for (i
= 0; i
< 56; i
+= sizeof (uint32_t), ptext
++)
130 *ptext
= cpu_to_be32(*ptext
);
134 lpfc_read_nv(phba
, pmb
);
135 memset((char*)mb
->un
.varRDnvp
.rsvd3
, 0,
136 sizeof (mb
->un
.varRDnvp
.rsvd3
));
137 memcpy((char*)mb
->un
.varRDnvp
.rsvd3
, licensed
,
140 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
142 if (rc
!= MBX_SUCCESS
) {
143 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
144 "0324 Config Port initialization "
145 "error, mbxCmd x%x READ_NVPARM, "
147 mb
->mbxCommand
, mb
->mbxStatus
);
148 mempool_free(pmb
, phba
->mbox_mem_pool
);
151 memcpy(phba
->wwnn
, (char *)mb
->un
.varRDnvp
.nodename
,
153 memcpy(phba
->wwpn
, (char *)mb
->un
.varRDnvp
.portname
,
157 phba
->sli3_options
= 0x0;
159 /* Setup and issue mailbox READ REV command */
160 lpfc_read_rev(phba
, pmb
);
161 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
162 if (rc
!= MBX_SUCCESS
) {
163 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
164 "0439 Adapter failed to init, mbxCmd x%x "
165 "READ_REV, mbxStatus x%x\n",
166 mb
->mbxCommand
, mb
->mbxStatus
);
167 mempool_free( pmb
, phba
->mbox_mem_pool
);
173 * The value of rr must be 1 since the driver set the cv field to 1.
174 * This setting requires the FW to set all revision fields.
176 if (mb
->un
.varRdRev
.rr
== 0) {
178 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
179 "0440 Adapter failed to init, READ_REV has "
180 "missing revision information.\n");
181 mempool_free(pmb
, phba
->mbox_mem_pool
);
185 if (phba
->sli_rev
== 3 && !mb
->un
.varRdRev
.v3rsp
) {
186 mempool_free(pmb
, phba
->mbox_mem_pool
);
190 /* Save information as VPD data */
192 memcpy(&vp
->sli3Feat
, &mb
->un
.varRdRev
.sli3Feat
, sizeof(uint32_t));
193 vp
->rev
.sli1FwRev
= mb
->un
.varRdRev
.sli1FwRev
;
194 memcpy(vp
->rev
.sli1FwName
, (char*) mb
->un
.varRdRev
.sli1FwName
, 16);
195 vp
->rev
.sli2FwRev
= mb
->un
.varRdRev
.sli2FwRev
;
196 memcpy(vp
->rev
.sli2FwName
, (char *) mb
->un
.varRdRev
.sli2FwName
, 16);
197 vp
->rev
.biuRev
= mb
->un
.varRdRev
.biuRev
;
198 vp
->rev
.smRev
= mb
->un
.varRdRev
.smRev
;
199 vp
->rev
.smFwRev
= mb
->un
.varRdRev
.un
.smFwRev
;
200 vp
->rev
.endecRev
= mb
->un
.varRdRev
.endecRev
;
201 vp
->rev
.fcphHigh
= mb
->un
.varRdRev
.fcphHigh
;
202 vp
->rev
.fcphLow
= mb
->un
.varRdRev
.fcphLow
;
203 vp
->rev
.feaLevelHigh
= mb
->un
.varRdRev
.feaLevelHigh
;
204 vp
->rev
.feaLevelLow
= mb
->un
.varRdRev
.feaLevelLow
;
205 vp
->rev
.postKernRev
= mb
->un
.varRdRev
.postKernRev
;
206 vp
->rev
.opFwRev
= mb
->un
.varRdRev
.opFwRev
;
208 /* If the sli feature level is less then 9, we must
209 * tear down all RPIs and VPIs on link down if NPIV
212 if (vp
->rev
.feaLevelHigh
< 9)
213 phba
->sli3_options
|= LPFC_SLI3_VPORT_TEARDOWN
;
215 if (lpfc_is_LC_HBA(phba
->pcidev
->device
))
216 memcpy(phba
->RandomData
, (char *)&mb
->un
.varWords
[24],
217 sizeof (phba
->RandomData
));
219 /* Get adapter VPD information */
220 lpfc_vpd_data
= kmalloc(DMP_VPD_SIZE
, GFP_KERNEL
);
224 lpfc_dump_mem(phba
, pmb
, offset
, DMP_REGION_VPD
);
225 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
227 if (rc
!= MBX_SUCCESS
) {
228 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
229 "0441 VPD not present on adapter, "
230 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
231 mb
->mbxCommand
, mb
->mbxStatus
);
232 mb
->un
.varDmp
.word_cnt
= 0;
234 /* dump mem may return a zero when finished or we got a
235 * mailbox error, either way we are done.
237 if (mb
->un
.varDmp
.word_cnt
== 0)
239 if (mb
->un
.varDmp
.word_cnt
> DMP_VPD_SIZE
- offset
)
240 mb
->un
.varDmp
.word_cnt
= DMP_VPD_SIZE
- offset
;
241 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
242 lpfc_vpd_data
+ offset
,
243 mb
->un
.varDmp
.word_cnt
);
244 offset
+= mb
->un
.varDmp
.word_cnt
;
245 } while (mb
->un
.varDmp
.word_cnt
&& offset
< DMP_VPD_SIZE
);
246 lpfc_parse_vpd(phba
, lpfc_vpd_data
, offset
);
248 kfree(lpfc_vpd_data
);
250 mempool_free(pmb
, phba
->mbox_mem_pool
);
255 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
256 * @phba: pointer to lpfc hba data structure.
257 * @pmboxq: pointer to the driver internal queue element for mailbox command.
259 * This is the completion handler for driver's configuring asynchronous event
260 * mailbox command to the device. If the mailbox command returns successfully,
261 * it will set internal async event support flag to 1; otherwise, it will
262 * set internal async event support flag to 0.
265 lpfc_config_async_cmpl(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmboxq
)
267 if (pmboxq
->u
.mb
.mbxStatus
== MBX_SUCCESS
)
268 phba
->temp_sensor_support
= 1;
270 phba
->temp_sensor_support
= 0;
271 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
276 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
277 * @phba: pointer to lpfc hba data structure.
278 * @pmboxq: pointer to the driver internal queue element for mailbox command.
280 * This is the completion handler for dump mailbox command for getting
281 * wake up parameters. When this command complete, the response contain
282 * Option rom version of the HBA. This function translate the version number
283 * into a human readable string and store it in OptionROMVersion.
286 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
289 uint32_t prog_id_word
;
291 /* character array used for decoding dist type. */
292 char dist_char
[] = "nabx";
294 if (pmboxq
->u
.mb
.mbxStatus
!= MBX_SUCCESS
) {
295 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
299 prg
= (struct prog_id
*) &prog_id_word
;
301 /* word 7 contain option rom version */
302 prog_id_word
= pmboxq
->u
.mb
.un
.varWords
[7];
304 /* Decode the Option rom version word to a readable string */
306 dist
= dist_char
[prg
->dist
];
308 if ((prg
->dist
== 3) && (prg
->num
== 0))
309 sprintf(phba
->OptionROMVersion
, "%d.%d%d",
310 prg
->ver
, prg
->rev
, prg
->lev
);
312 sprintf(phba
->OptionROMVersion
, "%d.%d%d%c%d",
313 prg
->ver
, prg
->rev
, prg
->lev
,
315 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
320 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
321 * cfg_soft_wwnn, cfg_soft_wwpn
322 * @vport: pointer to lpfc vport data structure.
329 lpfc_update_vport_wwn(struct lpfc_vport
*vport
)
331 /* If the soft name exists then update it using the service params */
332 if (vport
->phba
->cfg_soft_wwnn
)
333 u64_to_wwn(vport
->phba
->cfg_soft_wwnn
,
334 vport
->fc_sparam
.nodeName
.u
.wwn
);
335 if (vport
->phba
->cfg_soft_wwpn
)
336 u64_to_wwn(vport
->phba
->cfg_soft_wwpn
,
337 vport
->fc_sparam
.portName
.u
.wwn
);
340 * If the name is empty or there exists a soft name
341 * then copy the service params name, otherwise use the fc name
343 if (vport
->fc_nodename
.u
.wwn
[0] == 0 || vport
->phba
->cfg_soft_wwnn
)
344 memcpy(&vport
->fc_nodename
, &vport
->fc_sparam
.nodeName
,
345 sizeof(struct lpfc_name
));
347 memcpy(&vport
->fc_sparam
.nodeName
, &vport
->fc_nodename
,
348 sizeof(struct lpfc_name
));
350 if (vport
->fc_portname
.u
.wwn
[0] == 0 || vport
->phba
->cfg_soft_wwpn
)
351 memcpy(&vport
->fc_portname
, &vport
->fc_sparam
.portName
,
352 sizeof(struct lpfc_name
));
354 memcpy(&vport
->fc_sparam
.portName
, &vport
->fc_portname
,
355 sizeof(struct lpfc_name
));
359 * lpfc_config_port_post - Perform lpfc initialization after config port
360 * @phba: pointer to lpfc hba data structure.
362 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
363 * command call. It performs all internal resource and state setups on the
364 * port: post IOCB buffers, enable appropriate host interrupt attentions,
365 * ELS ring timers, etc.
369 * Any other value - error.
372 lpfc_config_port_post(struct lpfc_hba
*phba
)
374 struct lpfc_vport
*vport
= phba
->pport
;
375 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
378 struct lpfc_dmabuf
*mp
;
379 struct lpfc_sli
*psli
= &phba
->sli
;
380 uint32_t status
, timeout
;
384 spin_lock_irq(&phba
->hbalock
);
386 * If the Config port completed correctly the HBA is not
387 * over heated any more.
389 if (phba
->over_temp_state
== HBA_OVER_TEMP
)
390 phba
->over_temp_state
= HBA_NORMAL_TEMP
;
391 spin_unlock_irq(&phba
->hbalock
);
393 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
395 phba
->link_state
= LPFC_HBA_ERROR
;
400 /* Get login parameters for NID. */
401 rc
= lpfc_read_sparam(phba
, pmb
, 0);
403 mempool_free(pmb
, phba
->mbox_mem_pool
);
408 if (lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
) != MBX_SUCCESS
) {
409 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
410 "0448 Adapter failed init, mbxCmd x%x "
411 "READ_SPARM mbxStatus x%x\n",
412 mb
->mbxCommand
, mb
->mbxStatus
);
413 phba
->link_state
= LPFC_HBA_ERROR
;
414 mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
415 mempool_free(pmb
, phba
->mbox_mem_pool
);
416 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
421 mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
423 memcpy(&vport
->fc_sparam
, mp
->virt
, sizeof (struct serv_parm
));
424 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
426 pmb
->context1
= NULL
;
427 lpfc_update_vport_wwn(vport
);
429 /* Update the fc_host data structures with new wwn. */
430 fc_host_node_name(shost
) = wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
431 fc_host_port_name(shost
) = wwn_to_u64(vport
->fc_portname
.u
.wwn
);
432 fc_host_max_npiv_vports(shost
) = phba
->max_vpi
;
434 /* If no serial number in VPD data, use low 6 bytes of WWNN */
435 /* This should be consolidated into parse_vpd ? - mr */
436 if (phba
->SerialNumber
[0] == 0) {
439 outptr
= &vport
->fc_nodename
.u
.s
.IEEE
[0];
440 for (i
= 0; i
< 12; i
++) {
442 j
= ((status
& 0xf0) >> 4);
444 phba
->SerialNumber
[i
] =
445 (char)((uint8_t) 0x30 + (uint8_t) j
);
447 phba
->SerialNumber
[i
] =
448 (char)((uint8_t) 0x61 + (uint8_t) (j
- 10));
452 phba
->SerialNumber
[i
] =
453 (char)((uint8_t) 0x30 + (uint8_t) j
);
455 phba
->SerialNumber
[i
] =
456 (char)((uint8_t) 0x61 + (uint8_t) (j
- 10));
460 lpfc_read_config(phba
, pmb
);
462 if (lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
) != MBX_SUCCESS
) {
463 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
464 "0453 Adapter failed to init, mbxCmd x%x "
465 "READ_CONFIG, mbxStatus x%x\n",
466 mb
->mbxCommand
, mb
->mbxStatus
);
467 phba
->link_state
= LPFC_HBA_ERROR
;
468 mempool_free( pmb
, phba
->mbox_mem_pool
);
472 /* Check if the port is disabled */
473 lpfc_sli_read_link_ste(phba
);
475 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
476 i
= (mb
->un
.varRdConfig
.max_xri
+ 1);
477 if (phba
->cfg_hba_queue_depth
> i
) {
478 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
479 "3359 HBA queue depth changed from %d to %d\n",
480 phba
->cfg_hba_queue_depth
, i
);
481 phba
->cfg_hba_queue_depth
= i
;
484 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
485 i
= (mb
->un
.varRdConfig
.max_xri
>> 3);
486 if (phba
->pport
->cfg_lun_queue_depth
> i
) {
487 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
488 "3360 LUN queue depth changed from %d to %d\n",
489 phba
->pport
->cfg_lun_queue_depth
, i
);
490 phba
->pport
->cfg_lun_queue_depth
= i
;
493 phba
->lmt
= mb
->un
.varRdConfig
.lmt
;
495 /* Get the default values for Model Name and Description */
496 lpfc_get_hba_model_desc(phba
, phba
->ModelName
, phba
->ModelDesc
);
498 phba
->link_state
= LPFC_LINK_DOWN
;
500 /* Only process IOCBs on ELS ring till hba_state is READY */
501 if (psli
->ring
[psli
->extra_ring
].sli
.sli3
.cmdringaddr
)
502 psli
->ring
[psli
->extra_ring
].flag
|= LPFC_STOP_IOCB_EVENT
;
503 if (psli
->ring
[psli
->fcp_ring
].sli
.sli3
.cmdringaddr
)
504 psli
->ring
[psli
->fcp_ring
].flag
|= LPFC_STOP_IOCB_EVENT
;
505 if (psli
->ring
[psli
->next_ring
].sli
.sli3
.cmdringaddr
)
506 psli
->ring
[psli
->next_ring
].flag
|= LPFC_STOP_IOCB_EVENT
;
508 /* Post receive buffers for desired rings */
509 if (phba
->sli_rev
!= 3)
510 lpfc_post_rcv_buf(phba
);
513 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
515 if (phba
->intr_type
== MSIX
) {
516 rc
= lpfc_config_msi(phba
, pmb
);
518 mempool_free(pmb
, phba
->mbox_mem_pool
);
521 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
522 if (rc
!= MBX_SUCCESS
) {
523 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
524 "0352 Config MSI mailbox command "
525 "failed, mbxCmd x%x, mbxStatus x%x\n",
526 pmb
->u
.mb
.mbxCommand
,
527 pmb
->u
.mb
.mbxStatus
);
528 mempool_free(pmb
, phba
->mbox_mem_pool
);
533 spin_lock_irq(&phba
->hbalock
);
534 /* Initialize ERATT handling flag */
535 phba
->hba_flag
&= ~HBA_ERATT_HANDLED
;
537 /* Enable appropriate host interrupts */
538 if (lpfc_readl(phba
->HCregaddr
, &status
)) {
539 spin_unlock_irq(&phba
->hbalock
);
542 status
|= HC_MBINT_ENA
| HC_ERINT_ENA
| HC_LAINT_ENA
;
543 if (psli
->num_rings
> 0)
544 status
|= HC_R0INT_ENA
;
545 if (psli
->num_rings
> 1)
546 status
|= HC_R1INT_ENA
;
547 if (psli
->num_rings
> 2)
548 status
|= HC_R2INT_ENA
;
549 if (psli
->num_rings
> 3)
550 status
|= HC_R3INT_ENA
;
552 if ((phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) &&
553 (phba
->cfg_poll
& DISABLE_FCP_RING_INT
))
554 status
&= ~(HC_R0INT_ENA
);
556 writel(status
, phba
->HCregaddr
);
557 readl(phba
->HCregaddr
); /* flush */
558 spin_unlock_irq(&phba
->hbalock
);
560 /* Set up ring-0 (ELS) timer */
561 timeout
= phba
->fc_ratov
* 2;
562 mod_timer(&vport
->els_tmofunc
,
563 jiffies
+ msecs_to_jiffies(1000 * timeout
));
564 /* Set up heart beat (HB) timer */
565 mod_timer(&phba
->hb_tmofunc
,
566 jiffies
+ msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
));
567 phba
->hb_outstanding
= 0;
568 phba
->last_completion_time
= jiffies
;
569 /* Set up error attention (ERATT) polling timer */
570 mod_timer(&phba
->eratt_poll
,
571 jiffies
+ msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL
));
573 if (phba
->hba_flag
& LINK_DISABLED
) {
574 lpfc_printf_log(phba
,
576 "2598 Adapter Link is disabled.\n");
577 lpfc_down_link(phba
, pmb
);
578 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
579 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
580 if ((rc
!= MBX_SUCCESS
) && (rc
!= MBX_BUSY
)) {
581 lpfc_printf_log(phba
,
583 "2599 Adapter failed to issue DOWN_LINK"
584 " mbox command rc 0x%x\n", rc
);
586 mempool_free(pmb
, phba
->mbox_mem_pool
);
589 } else if (phba
->cfg_suppress_link_up
== LPFC_INITIALIZE_LINK
) {
590 mempool_free(pmb
, phba
->mbox_mem_pool
);
591 rc
= phba
->lpfc_hba_init_link(phba
, MBX_NOWAIT
);
595 /* MBOX buffer will be freed in mbox compl */
596 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
598 phba
->link_state
= LPFC_HBA_ERROR
;
602 lpfc_config_async(phba
, pmb
, LPFC_ELS_RING
);
603 pmb
->mbox_cmpl
= lpfc_config_async_cmpl
;
604 pmb
->vport
= phba
->pport
;
605 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
607 if ((rc
!= MBX_BUSY
) && (rc
!= MBX_SUCCESS
)) {
608 lpfc_printf_log(phba
,
611 "0456 Adapter failed to issue "
612 "ASYNCEVT_ENABLE mbox status x%x\n",
614 mempool_free(pmb
, phba
->mbox_mem_pool
);
617 /* Get Option rom version */
618 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
620 phba
->link_state
= LPFC_HBA_ERROR
;
624 lpfc_dump_wakeup_param(phba
, pmb
);
625 pmb
->mbox_cmpl
= lpfc_dump_wakeup_param_cmpl
;
626 pmb
->vport
= phba
->pport
;
627 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
629 if ((rc
!= MBX_BUSY
) && (rc
!= MBX_SUCCESS
)) {
630 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
, "0435 Adapter failed "
631 "to get Option ROM version status x%x\n", rc
);
632 mempool_free(pmb
, phba
->mbox_mem_pool
);
639 * lpfc_hba_init_link - Initialize the FC link
640 * @phba: pointer to lpfc hba data structure.
641 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
643 * This routine will issue the INIT_LINK mailbox command call.
644 * It is available to other drivers through the lpfc_hba data
645 * structure for use as a delayed link up mechanism with the
646 * module parameter lpfc_suppress_link_up.
650 * Any other value - error
653 lpfc_hba_init_link(struct lpfc_hba
*phba
, uint32_t flag
)
655 return lpfc_hba_init_link_fc_topology(phba
, phba
->cfg_topology
, flag
);
659 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
660 * @phba: pointer to lpfc hba data structure.
661 * @fc_topology: desired fc topology.
662 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
664 * This routine will issue the INIT_LINK mailbox command call.
665 * It is available to other drivers through the lpfc_hba data
666 * structure for use as a delayed link up mechanism with the
667 * module parameter lpfc_suppress_link_up.
671 * Any other value - error
674 lpfc_hba_init_link_fc_topology(struct lpfc_hba
*phba
, uint32_t fc_topology
,
677 struct lpfc_vport
*vport
= phba
->pport
;
682 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
684 phba
->link_state
= LPFC_HBA_ERROR
;
690 if ((phba
->cfg_link_speed
> LPFC_USER_LINK_SPEED_MAX
) ||
691 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_1G
) &&
692 !(phba
->lmt
& LMT_1Gb
)) ||
693 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_2G
) &&
694 !(phba
->lmt
& LMT_2Gb
)) ||
695 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_4G
) &&
696 !(phba
->lmt
& LMT_4Gb
)) ||
697 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_8G
) &&
698 !(phba
->lmt
& LMT_8Gb
)) ||
699 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_10G
) &&
700 !(phba
->lmt
& LMT_10Gb
)) ||
701 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_16G
) &&
702 !(phba
->lmt
& LMT_16Gb
))) {
703 /* Reset link speed to auto */
704 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
705 "1302 Invalid speed for this board:%d "
706 "Reset link speed to auto.\n",
707 phba
->cfg_link_speed
);
708 phba
->cfg_link_speed
= LPFC_USER_LINK_SPEED_AUTO
;
710 lpfc_init_link(phba
, pmb
, fc_topology
, phba
->cfg_link_speed
);
711 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
712 if (phba
->sli_rev
< LPFC_SLI_REV4
)
713 lpfc_set_loopback_flag(phba
);
714 rc
= lpfc_sli_issue_mbox(phba
, pmb
, flag
);
715 if ((rc
!= MBX_BUSY
) && (rc
!= MBX_SUCCESS
)) {
716 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
717 "0498 Adapter failed to init, mbxCmd x%x "
718 "INIT_LINK, mbxStatus x%x\n",
719 mb
->mbxCommand
, mb
->mbxStatus
);
720 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
721 /* Clear all interrupt enable conditions */
722 writel(0, phba
->HCregaddr
);
723 readl(phba
->HCregaddr
); /* flush */
724 /* Clear all pending interrupts */
725 writel(0xffffffff, phba
->HAregaddr
);
726 readl(phba
->HAregaddr
); /* flush */
728 phba
->link_state
= LPFC_HBA_ERROR
;
729 if (rc
!= MBX_BUSY
|| flag
== MBX_POLL
)
730 mempool_free(pmb
, phba
->mbox_mem_pool
);
733 phba
->cfg_suppress_link_up
= LPFC_INITIALIZE_LINK
;
734 if (flag
== MBX_POLL
)
735 mempool_free(pmb
, phba
->mbox_mem_pool
);
741 * lpfc_hba_down_link - this routine downs the FC link
742 * @phba: pointer to lpfc hba data structure.
743 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
745 * This routine will issue the DOWN_LINK mailbox command call.
746 * It is available to other drivers through the lpfc_hba data
747 * structure for use to stop the link.
751 * Any other value - error
754 lpfc_hba_down_link(struct lpfc_hba
*phba
, uint32_t flag
)
759 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
761 phba
->link_state
= LPFC_HBA_ERROR
;
765 lpfc_printf_log(phba
,
767 "0491 Adapter Link is disabled.\n");
768 lpfc_down_link(phba
, pmb
);
769 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
770 rc
= lpfc_sli_issue_mbox(phba
, pmb
, flag
);
771 if ((rc
!= MBX_SUCCESS
) && (rc
!= MBX_BUSY
)) {
772 lpfc_printf_log(phba
,
774 "2522 Adapter failed to issue DOWN_LINK"
775 " mbox command rc 0x%x\n", rc
);
777 mempool_free(pmb
, phba
->mbox_mem_pool
);
780 if (flag
== MBX_POLL
)
781 mempool_free(pmb
, phba
->mbox_mem_pool
);
787 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
788 * @phba: pointer to lpfc HBA data structure.
790 * This routine will do LPFC uninitialization before the HBA is reset when
791 * bringing down the SLI Layer.
795 * Any other value - error.
798 lpfc_hba_down_prep(struct lpfc_hba
*phba
)
800 struct lpfc_vport
**vports
;
803 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
804 /* Disable interrupts */
805 writel(0, phba
->HCregaddr
);
806 readl(phba
->HCregaddr
); /* flush */
809 if (phba
->pport
->load_flag
& FC_UNLOADING
)
810 lpfc_cleanup_discovery_resources(phba
->pport
);
812 vports
= lpfc_create_vport_work_array(phba
);
814 for (i
= 0; i
<= phba
->max_vports
&&
815 vports
[i
] != NULL
; i
++)
816 lpfc_cleanup_discovery_resources(vports
[i
]);
817 lpfc_destroy_vport_work_array(phba
, vports
);
823 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
824 * @phba: pointer to lpfc HBA data structure.
826 * This routine will cleanup posted ELS buffers after the HBA is reset
827 * when bringing down the SLI Layer.
834 lpfc_hba_free_post_buf(struct lpfc_hba
*phba
)
836 struct lpfc_sli
*psli
= &phba
->sli
;
837 struct lpfc_sli_ring
*pring
;
838 struct lpfc_dmabuf
*mp
, *next_mp
;
842 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
)
843 lpfc_sli_hbqbuf_free_all(phba
);
845 /* Cleanup preposted buffers on the ELS ring */
846 pring
= &psli
->ring
[LPFC_ELS_RING
];
847 spin_lock_irq(&phba
->hbalock
);
848 list_splice_init(&pring
->postbufq
, &buflist
);
849 spin_unlock_irq(&phba
->hbalock
);
852 list_for_each_entry_safe(mp
, next_mp
, &buflist
, list
) {
855 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
859 spin_lock_irq(&phba
->hbalock
);
860 pring
->postbufq_cnt
-= count
;
861 spin_unlock_irq(&phba
->hbalock
);
866 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
867 * @phba: pointer to lpfc HBA data structure.
869 * This routine will cleanup the txcmplq after the HBA is reset when bringing
870 * down the SLI Layer.
876 lpfc_hba_clean_txcmplq(struct lpfc_hba
*phba
)
878 struct lpfc_sli
*psli
= &phba
->sli
;
879 struct lpfc_sli_ring
*pring
;
880 LIST_HEAD(completions
);
885 for (i
= 0; i
< psli
->num_rings
; i
++) {
886 pring
= &psli
->ring
[i
];
887 if (phba
->sli_rev
>= LPFC_SLI_REV4
)
888 spin_lock_irq(&pring
->ring_lock
);
890 spin_lock_irq(&phba
->hbalock
);
891 /* At this point in time the HBA is either reset or DOA. Either
892 * way, nothing should be on txcmplq as it will NEVER complete.
894 list_splice_init(&pring
->txcmplq
, &completions
);
896 if (phba
->sli_rev
>= LPFC_SLI_REV4
)
897 spin_unlock_irq(&pring
->ring_lock
);
899 spin_unlock_irq(&phba
->hbalock
);
901 /* Cancel all the IOCBs from the completions list */
902 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
904 lpfc_sli_abort_iocb_ring(phba
, pring
);
909 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
911 * @phba: pointer to lpfc HBA data structure.
913 * This routine will do uninitialization after the HBA is reset when bring
914 * down the SLI Layer.
918 * Any other value - error.
921 lpfc_hba_down_post_s3(struct lpfc_hba
*phba
)
923 lpfc_hba_free_post_buf(phba
);
924 lpfc_hba_clean_txcmplq(phba
);
929 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
930 * @phba: pointer to lpfc HBA data structure.
932 * This routine will do uninitialization after the HBA is reset when bring
933 * down the SLI Layer.
937 * Any other value - error.
940 lpfc_hba_down_post_s4(struct lpfc_hba
*phba
)
942 struct lpfc_scsi_buf
*psb
, *psb_next
;
944 unsigned long iflag
= 0;
945 struct lpfc_sglq
*sglq_entry
= NULL
;
947 lpfc_hba_free_post_buf(phba
);
948 lpfc_hba_clean_txcmplq(phba
);
950 /* At this point in time the HBA is either reset or DOA. Either
951 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
952 * on the lpfc_sgl_list so that it can either be freed if the
953 * driver is unloading or reposted if the driver is restarting
956 spin_lock_irq(&phba
->hbalock
); /* required for lpfc_sgl_list and */
958 /* abts_sgl_list_lock required because worker thread uses this
961 spin_lock(&phba
->sli4_hba
.abts_sgl_list_lock
);
962 list_for_each_entry(sglq_entry
,
963 &phba
->sli4_hba
.lpfc_abts_els_sgl_list
, list
)
964 sglq_entry
->state
= SGL_FREED
;
966 list_splice_init(&phba
->sli4_hba
.lpfc_abts_els_sgl_list
,
967 &phba
->sli4_hba
.lpfc_sgl_list
);
968 spin_unlock(&phba
->sli4_hba
.abts_sgl_list_lock
);
969 /* abts_scsi_buf_list_lock required because worker thread uses this
972 spin_lock(&phba
->sli4_hba
.abts_scsi_buf_list_lock
);
973 list_splice_init(&phba
->sli4_hba
.lpfc_abts_scsi_buf_list
,
975 spin_unlock(&phba
->sli4_hba
.abts_scsi_buf_list_lock
);
976 spin_unlock_irq(&phba
->hbalock
);
978 list_for_each_entry_safe(psb
, psb_next
, &aborts
, list
) {
980 psb
->status
= IOSTAT_SUCCESS
;
982 spin_lock_irqsave(&phba
->scsi_buf_list_put_lock
, iflag
);
983 list_splice(&aborts
, &phba
->lpfc_scsi_buf_list_put
);
984 spin_unlock_irqrestore(&phba
->scsi_buf_list_put_lock
, iflag
);
989 * lpfc_hba_down_post - Wrapper func for hba down post routine
990 * @phba: pointer to lpfc HBA data structure.
992 * This routine wraps the actual SLI3 or SLI4 routine for performing
993 * uninitialization after the HBA is reset when bring down the SLI Layer.
997 * Any other value - error.
1000 lpfc_hba_down_post(struct lpfc_hba
*phba
)
1002 return (*phba
->lpfc_hba_down_post
)(phba
);
1006 * lpfc_hb_timeout - The HBA-timer timeout handler
1007 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1009 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1010 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1011 * work-port-events bitmap and the worker thread is notified. This timeout
1012 * event will be used by the worker thread to invoke the actual timeout
1013 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1014 * be performed in the timeout handler and the HBA timeout event bit shall
1015 * be cleared by the worker thread after it has taken the event bitmap out.
1018 lpfc_hb_timeout(unsigned long ptr
)
1020 struct lpfc_hba
*phba
;
1021 uint32_t tmo_posted
;
1022 unsigned long iflag
;
1024 phba
= (struct lpfc_hba
*)ptr
;
1026 /* Check for heart beat timeout conditions */
1027 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflag
);
1028 tmo_posted
= phba
->pport
->work_port_events
& WORKER_HB_TMO
;
1030 phba
->pport
->work_port_events
|= WORKER_HB_TMO
;
1031 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflag
);
1033 /* Tell the worker thread there is work to do */
1035 lpfc_worker_wake_up(phba
);
1040 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1041 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1043 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1044 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1045 * work-port-events bitmap and the worker thread is notified. This timeout
1046 * event will be used by the worker thread to invoke the actual timeout
1047 * handler routine, lpfc_rrq_handler. Any periodical operations will
1048 * be performed in the timeout handler and the RRQ timeout event bit shall
1049 * be cleared by the worker thread after it has taken the event bitmap out.
1052 lpfc_rrq_timeout(unsigned long ptr
)
1054 struct lpfc_hba
*phba
;
1055 unsigned long iflag
;
1057 phba
= (struct lpfc_hba
*)ptr
;
1058 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflag
);
1059 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
1060 phba
->hba_flag
|= HBA_RRQ_ACTIVE
;
1062 phba
->hba_flag
&= ~HBA_RRQ_ACTIVE
;
1063 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflag
);
1065 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
1066 lpfc_worker_wake_up(phba
);
1070 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1071 * @phba: pointer to lpfc hba data structure.
1072 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1074 * This is the callback function to the lpfc heart-beat mailbox command.
1075 * If configured, the lpfc driver issues the heart-beat mailbox command to
1076 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1077 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1078 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1079 * heart-beat outstanding state. Once the mailbox command comes back and
1080 * no error conditions detected, the heart-beat mailbox command timer is
1081 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1082 * state is cleared for the next heart-beat. If the timer expired with the
1083 * heart-beat outstanding state set, the driver will put the HBA offline.
1086 lpfc_hb_mbox_cmpl(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmboxq
)
1088 unsigned long drvr_flag
;
1090 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
1091 phba
->hb_outstanding
= 0;
1092 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
1094 /* Check and reset heart-beat timer is necessary */
1095 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
1096 if (!(phba
->pport
->fc_flag
& FC_OFFLINE_MODE
) &&
1097 !(phba
->link_state
== LPFC_HBA_ERROR
) &&
1098 !(phba
->pport
->load_flag
& FC_UNLOADING
))
1099 mod_timer(&phba
->hb_tmofunc
,
1101 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
));
1106 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1107 * @phba: pointer to lpfc hba data structure.
1109 * This is the actual HBA-timer timeout handler to be invoked by the worker
1110 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1111 * handler performs any periodic operations needed for the device. If such
1112 * periodic event has already been attended to either in the interrupt handler
1113 * or by processing slow-ring or fast-ring events within the HBA-timer
1114 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1115 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1116 * is configured and there is no heart-beat mailbox command outstanding, a
1117 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1118 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1122 lpfc_hb_timeout_handler(struct lpfc_hba
*phba
)
1124 struct lpfc_vport
**vports
;
1125 LPFC_MBOXQ_t
*pmboxq
;
1126 struct lpfc_dmabuf
*buf_ptr
;
1128 struct lpfc_sli
*psli
= &phba
->sli
;
1129 LIST_HEAD(completions
);
1131 vports
= lpfc_create_vport_work_array(phba
);
1133 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
1134 lpfc_rcv_seq_check_edtov(vports
[i
]);
1135 lpfc_destroy_vport_work_array(phba
, vports
);
1137 if ((phba
->link_state
== LPFC_HBA_ERROR
) ||
1138 (phba
->pport
->load_flag
& FC_UNLOADING
) ||
1139 (phba
->pport
->fc_flag
& FC_OFFLINE_MODE
))
1142 spin_lock_irq(&phba
->pport
->work_port_lock
);
1144 if (time_after(phba
->last_completion_time
+
1145 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
),
1147 spin_unlock_irq(&phba
->pport
->work_port_lock
);
1148 if (!phba
->hb_outstanding
)
1149 mod_timer(&phba
->hb_tmofunc
,
1151 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
));
1153 mod_timer(&phba
->hb_tmofunc
,
1155 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT
));
1158 spin_unlock_irq(&phba
->pport
->work_port_lock
);
1160 if (phba
->elsbuf_cnt
&&
1161 (phba
->elsbuf_cnt
== phba
->elsbuf_prev_cnt
)) {
1162 spin_lock_irq(&phba
->hbalock
);
1163 list_splice_init(&phba
->elsbuf
, &completions
);
1164 phba
->elsbuf_cnt
= 0;
1165 phba
->elsbuf_prev_cnt
= 0;
1166 spin_unlock_irq(&phba
->hbalock
);
1168 while (!list_empty(&completions
)) {
1169 list_remove_head(&completions
, buf_ptr
,
1170 struct lpfc_dmabuf
, list
);
1171 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
1175 phba
->elsbuf_prev_cnt
= phba
->elsbuf_cnt
;
1177 /* If there is no heart beat outstanding, issue a heartbeat command */
1178 if (phba
->cfg_enable_hba_heartbeat
) {
1179 if (!phba
->hb_outstanding
) {
1180 if ((!(psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
)) &&
1181 (list_empty(&psli
->mboxq
))) {
1182 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
,
1185 mod_timer(&phba
->hb_tmofunc
,
1187 msecs_to_jiffies(1000 *
1188 LPFC_HB_MBOX_INTERVAL
));
1192 lpfc_heart_beat(phba
, pmboxq
);
1193 pmboxq
->mbox_cmpl
= lpfc_hb_mbox_cmpl
;
1194 pmboxq
->vport
= phba
->pport
;
1195 retval
= lpfc_sli_issue_mbox(phba
, pmboxq
,
1198 if (retval
!= MBX_BUSY
&&
1199 retval
!= MBX_SUCCESS
) {
1200 mempool_free(pmboxq
,
1201 phba
->mbox_mem_pool
);
1202 mod_timer(&phba
->hb_tmofunc
,
1204 msecs_to_jiffies(1000 *
1205 LPFC_HB_MBOX_INTERVAL
));
1208 phba
->skipped_hb
= 0;
1209 phba
->hb_outstanding
= 1;
1210 } else if (time_before_eq(phba
->last_completion_time
,
1211 phba
->skipped_hb
)) {
1212 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
1213 "2857 Last completion time not "
1214 " updated in %d ms\n",
1215 jiffies_to_msecs(jiffies
1216 - phba
->last_completion_time
));
1218 phba
->skipped_hb
= jiffies
;
1220 mod_timer(&phba
->hb_tmofunc
,
1222 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT
));
1226 * If heart beat timeout called with hb_outstanding set
1227 * we need to give the hb mailbox cmd a chance to
1230 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
1231 "0459 Adapter heartbeat still out"
1232 "standing:last compl time was %d ms.\n",
1233 jiffies_to_msecs(jiffies
1234 - phba
->last_completion_time
));
1235 mod_timer(&phba
->hb_tmofunc
,
1237 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT
));
1243 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1244 * @phba: pointer to lpfc hba data structure.
1246 * This routine is called to bring the HBA offline when HBA hardware error
1247 * other than Port Error 6 has been detected.
1250 lpfc_offline_eratt(struct lpfc_hba
*phba
)
1252 struct lpfc_sli
*psli
= &phba
->sli
;
1254 spin_lock_irq(&phba
->hbalock
);
1255 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
1256 spin_unlock_irq(&phba
->hbalock
);
1257 lpfc_offline_prep(phba
, LPFC_MBX_NO_WAIT
);
1260 lpfc_reset_barrier(phba
);
1261 spin_lock_irq(&phba
->hbalock
);
1262 lpfc_sli_brdreset(phba
);
1263 spin_unlock_irq(&phba
->hbalock
);
1264 lpfc_hba_down_post(phba
);
1265 lpfc_sli_brdready(phba
, HS_MBRDY
);
1266 lpfc_unblock_mgmt_io(phba
);
1267 phba
->link_state
= LPFC_HBA_ERROR
;
1272 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1273 * @phba: pointer to lpfc hba data structure.
1275 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1276 * other than Port Error 6 has been detected.
1279 lpfc_sli4_offline_eratt(struct lpfc_hba
*phba
)
1281 lpfc_offline_prep(phba
, LPFC_MBX_NO_WAIT
);
1283 lpfc_sli4_brdreset(phba
);
1284 lpfc_hba_down_post(phba
);
1285 lpfc_sli4_post_status_check(phba
);
1286 lpfc_unblock_mgmt_io(phba
);
1287 phba
->link_state
= LPFC_HBA_ERROR
;
1291 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1292 * @phba: pointer to lpfc hba data structure.
1294 * This routine is invoked to handle the deferred HBA hardware error
1295 * conditions. This type of error is indicated by HBA by setting ER1
1296 * and another ER bit in the host status register. The driver will
1297 * wait until the ER1 bit clears before handling the error condition.
1300 lpfc_handle_deferred_eratt(struct lpfc_hba
*phba
)
1302 uint32_t old_host_status
= phba
->work_hs
;
1303 struct lpfc_sli_ring
*pring
;
1304 struct lpfc_sli
*psli
= &phba
->sli
;
1306 /* If the pci channel is offline, ignore possible errors,
1307 * since we cannot communicate with the pci card anyway.
1309 if (pci_channel_offline(phba
->pcidev
)) {
1310 spin_lock_irq(&phba
->hbalock
);
1311 phba
->hba_flag
&= ~DEFER_ERATT
;
1312 spin_unlock_irq(&phba
->hbalock
);
1316 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1317 "0479 Deferred Adapter Hardware Error "
1318 "Data: x%x x%x x%x\n",
1320 phba
->work_status
[0], phba
->work_status
[1]);
1322 spin_lock_irq(&phba
->hbalock
);
1323 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
1324 spin_unlock_irq(&phba
->hbalock
);
1328 * Firmware stops when it triggred erratt. That could cause the I/Os
1329 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1330 * SCSI layer retry it after re-establishing link.
1332 pring
= &psli
->ring
[psli
->fcp_ring
];
1333 lpfc_sli_abort_iocb_ring(phba
, pring
);
1336 * There was a firmware error. Take the hba offline and then
1337 * attempt to restart it.
1339 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
1342 /* Wait for the ER1 bit to clear.*/
1343 while (phba
->work_hs
& HS_FFER1
) {
1345 if (lpfc_readl(phba
->HSregaddr
, &phba
->work_hs
)) {
1346 phba
->work_hs
= UNPLUG_ERR
;
1349 /* If driver is unloading let the worker thread continue */
1350 if (phba
->pport
->load_flag
& FC_UNLOADING
) {
1357 * This is to ptrotect against a race condition in which
1358 * first write to the host attention register clear the
1359 * host status register.
1361 if ((!phba
->work_hs
) && (!(phba
->pport
->load_flag
& FC_UNLOADING
)))
1362 phba
->work_hs
= old_host_status
& ~HS_FFER1
;
1364 spin_lock_irq(&phba
->hbalock
);
1365 phba
->hba_flag
&= ~DEFER_ERATT
;
1366 spin_unlock_irq(&phba
->hbalock
);
1367 phba
->work_status
[0] = readl(phba
->MBslimaddr
+ 0xa8);
1368 phba
->work_status
[1] = readl(phba
->MBslimaddr
+ 0xac);
1372 lpfc_board_errevt_to_mgmt(struct lpfc_hba
*phba
)
1374 struct lpfc_board_event_header board_event
;
1375 struct Scsi_Host
*shost
;
1377 board_event
.event_type
= FC_REG_BOARD_EVENT
;
1378 board_event
.subcategory
= LPFC_EVENT_PORTINTERR
;
1379 shost
= lpfc_shost_from_vport(phba
->pport
);
1380 fc_host_post_vendor_event(shost
, fc_get_event_number(),
1381 sizeof(board_event
),
1382 (char *) &board_event
,
1387 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1388 * @phba: pointer to lpfc hba data structure.
1390 * This routine is invoked to handle the following HBA hardware error
1392 * 1 - HBA error attention interrupt
1393 * 2 - DMA ring index out of range
1394 * 3 - Mailbox command came back as unknown
1397 lpfc_handle_eratt_s3(struct lpfc_hba
*phba
)
1399 struct lpfc_vport
*vport
= phba
->pport
;
1400 struct lpfc_sli
*psli
= &phba
->sli
;
1401 struct lpfc_sli_ring
*pring
;
1402 uint32_t event_data
;
1403 unsigned long temperature
;
1404 struct temp_event temp_event_data
;
1405 struct Scsi_Host
*shost
;
1407 /* If the pci channel is offline, ignore possible errors,
1408 * since we cannot communicate with the pci card anyway.
1410 if (pci_channel_offline(phba
->pcidev
)) {
1411 spin_lock_irq(&phba
->hbalock
);
1412 phba
->hba_flag
&= ~DEFER_ERATT
;
1413 spin_unlock_irq(&phba
->hbalock
);
1417 /* If resets are disabled then leave the HBA alone and return */
1418 if (!phba
->cfg_enable_hba_reset
)
1421 /* Send an internal error event to mgmt application */
1422 lpfc_board_errevt_to_mgmt(phba
);
1424 if (phba
->hba_flag
& DEFER_ERATT
)
1425 lpfc_handle_deferred_eratt(phba
);
1427 if ((phba
->work_hs
& HS_FFER6
) || (phba
->work_hs
& HS_FFER8
)) {
1428 if (phba
->work_hs
& HS_FFER6
)
1429 /* Re-establishing Link */
1430 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
1431 "1301 Re-establishing Link "
1432 "Data: x%x x%x x%x\n",
1433 phba
->work_hs
, phba
->work_status
[0],
1434 phba
->work_status
[1]);
1435 if (phba
->work_hs
& HS_FFER8
)
1436 /* Device Zeroization */
1437 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
1438 "2861 Host Authentication device "
1439 "zeroization Data:x%x x%x x%x\n",
1440 phba
->work_hs
, phba
->work_status
[0],
1441 phba
->work_status
[1]);
1443 spin_lock_irq(&phba
->hbalock
);
1444 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
1445 spin_unlock_irq(&phba
->hbalock
);
1448 * Firmware stops when it triggled erratt with HS_FFER6.
1449 * That could cause the I/Os dropped by the firmware.
1450 * Error iocb (I/O) on txcmplq and let the SCSI layer
1451 * retry it after re-establishing link.
1453 pring
= &psli
->ring
[psli
->fcp_ring
];
1454 lpfc_sli_abort_iocb_ring(phba
, pring
);
1457 * There was a firmware error. Take the hba offline and then
1458 * attempt to restart it.
1460 lpfc_offline_prep(phba
, LPFC_MBX_NO_WAIT
);
1462 lpfc_sli_brdrestart(phba
);
1463 if (lpfc_online(phba
) == 0) { /* Initialize the HBA */
1464 lpfc_unblock_mgmt_io(phba
);
1467 lpfc_unblock_mgmt_io(phba
);
1468 } else if (phba
->work_hs
& HS_CRIT_TEMP
) {
1469 temperature
= readl(phba
->MBslimaddr
+ TEMPERATURE_OFFSET
);
1470 temp_event_data
.event_type
= FC_REG_TEMPERATURE_EVENT
;
1471 temp_event_data
.event_code
= LPFC_CRIT_TEMP
;
1472 temp_event_data
.data
= (uint32_t)temperature
;
1474 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1475 "0406 Adapter maximum temperature exceeded "
1476 "(%ld), taking this port offline "
1477 "Data: x%x x%x x%x\n",
1478 temperature
, phba
->work_hs
,
1479 phba
->work_status
[0], phba
->work_status
[1]);
1481 shost
= lpfc_shost_from_vport(phba
->pport
);
1482 fc_host_post_vendor_event(shost
, fc_get_event_number(),
1483 sizeof(temp_event_data
),
1484 (char *) &temp_event_data
,
1485 SCSI_NL_VID_TYPE_PCI
1486 | PCI_VENDOR_ID_EMULEX
);
1488 spin_lock_irq(&phba
->hbalock
);
1489 phba
->over_temp_state
= HBA_OVER_TEMP
;
1490 spin_unlock_irq(&phba
->hbalock
);
1491 lpfc_offline_eratt(phba
);
1494 /* The if clause above forces this code path when the status
1495 * failure is a value other than FFER6. Do not call the offline
1496 * twice. This is the adapter hardware error path.
1498 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1499 "0457 Adapter Hardware Error "
1500 "Data: x%x x%x x%x\n",
1502 phba
->work_status
[0], phba
->work_status
[1]);
1504 event_data
= FC_REG_DUMP_EVENT
;
1505 shost
= lpfc_shost_from_vport(vport
);
1506 fc_host_post_vendor_event(shost
, fc_get_event_number(),
1507 sizeof(event_data
), (char *) &event_data
,
1508 SCSI_NL_VID_TYPE_PCI
| PCI_VENDOR_ID_EMULEX
);
1510 lpfc_offline_eratt(phba
);
1516 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1517 * @phba: pointer to lpfc hba data structure.
1518 * @mbx_action: flag for mailbox shutdown action.
1520 * This routine is invoked to perform an SLI4 port PCI function reset in
1521 * response to port status register polling attention. It waits for port
1522 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1523 * During this process, interrupt vectors are freed and later requested
1524 * for handling possible port resource change.
1527 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba
*phba
, int mbx_action
,
1534 * On error status condition, driver need to wait for port
1535 * ready before performing reset.
1537 rc
= lpfc_sli4_pdev_status_reg_wait(phba
);
1539 /* need reset: attempt for port recovery */
1541 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1542 "2887 Reset Needed: Attempting Port "
1544 lpfc_offline_prep(phba
, mbx_action
);
1546 /* release interrupt for possible resource change */
1547 lpfc_sli4_disable_intr(phba
);
1548 lpfc_sli_brdrestart(phba
);
1549 /* request and enable interrupt */
1550 intr_mode
= lpfc_sli4_enable_intr(phba
, phba
->intr_mode
);
1551 if (intr_mode
== LPFC_INTR_ERROR
) {
1552 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1553 "3175 Failed to enable interrupt\n");
1556 phba
->intr_mode
= intr_mode
;
1558 rc
= lpfc_online(phba
);
1560 lpfc_unblock_mgmt_io(phba
);
1566 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1567 * @phba: pointer to lpfc hba data structure.
1569 * This routine is invoked to handle the SLI4 HBA hardware error attention
1573 lpfc_handle_eratt_s4(struct lpfc_hba
*phba
)
1575 struct lpfc_vport
*vport
= phba
->pport
;
1576 uint32_t event_data
;
1577 struct Scsi_Host
*shost
;
1579 struct lpfc_register portstat_reg
= {0};
1580 uint32_t reg_err1
, reg_err2
;
1581 uint32_t uerrlo_reg
, uemasklo_reg
;
1582 uint32_t pci_rd_rc1
, pci_rd_rc2
;
1583 bool en_rn_msg
= true;
1586 /* If the pci channel is offline, ignore possible errors, since
1587 * we cannot communicate with the pci card anyway.
1589 if (pci_channel_offline(phba
->pcidev
))
1591 /* If resets are disabled then leave the HBA alone and return */
1592 if (!phba
->cfg_enable_hba_reset
)
1595 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
1597 case LPFC_SLI_INTF_IF_TYPE_0
:
1598 pci_rd_rc1
= lpfc_readl(
1599 phba
->sli4_hba
.u
.if_type0
.UERRLOregaddr
,
1601 pci_rd_rc2
= lpfc_readl(
1602 phba
->sli4_hba
.u
.if_type0
.UEMASKLOregaddr
,
1604 /* consider PCI bus read error as pci_channel_offline */
1605 if (pci_rd_rc1
== -EIO
&& pci_rd_rc2
== -EIO
)
1607 lpfc_sli4_offline_eratt(phba
);
1609 case LPFC_SLI_INTF_IF_TYPE_2
:
1610 pci_rd_rc1
= lpfc_readl(
1611 phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
,
1612 &portstat_reg
.word0
);
1613 /* consider PCI bus read error as pci_channel_offline */
1614 if (pci_rd_rc1
== -EIO
) {
1615 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1616 "3151 PCI bus read access failure: x%x\n",
1617 readl(phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
));
1620 reg_err1
= readl(phba
->sli4_hba
.u
.if_type2
.ERR1regaddr
);
1621 reg_err2
= readl(phba
->sli4_hba
.u
.if_type2
.ERR2regaddr
);
1622 if (bf_get(lpfc_sliport_status_oti
, &portstat_reg
)) {
1623 /* TODO: Register for Overtemp async events. */
1624 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1625 "2889 Port Overtemperature event, "
1626 "taking port offline\n");
1627 spin_lock_irq(&phba
->hbalock
);
1628 phba
->over_temp_state
= HBA_OVER_TEMP
;
1629 spin_unlock_irq(&phba
->hbalock
);
1630 lpfc_sli4_offline_eratt(phba
);
1633 if (reg_err1
== SLIPORT_ERR1_REG_ERR_CODE_2
&&
1634 reg_err2
== SLIPORT_ERR2_REG_FW_RESTART
) {
1635 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1636 "3143 Port Down: Firmware Update "
1639 } else if (reg_err1
== SLIPORT_ERR1_REG_ERR_CODE_2
&&
1640 reg_err2
== SLIPORT_ERR2_REG_FORCED_DUMP
)
1641 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1642 "3144 Port Down: Debug Dump\n");
1643 else if (reg_err1
== SLIPORT_ERR1_REG_ERR_CODE_2
&&
1644 reg_err2
== SLIPORT_ERR2_REG_FUNC_PROVISON
)
1645 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1646 "3145 Port Down: Provisioning\n");
1648 /* Check port status register for function reset */
1649 rc
= lpfc_sli4_port_sta_fn_reset(phba
, LPFC_MBX_NO_WAIT
,
1652 /* don't report event on forced debug dump */
1653 if (reg_err1
== SLIPORT_ERR1_REG_ERR_CODE_2
&&
1654 reg_err2
== SLIPORT_ERR2_REG_FORCED_DUMP
)
1659 /* fall through for not able to recover */
1660 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1661 "3152 Unrecoverable error, bring the port "
1663 lpfc_sli4_offline_eratt(phba
);
1665 case LPFC_SLI_INTF_IF_TYPE_1
:
1669 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
1670 "3123 Report dump event to upper layer\n");
1671 /* Send an internal error event to mgmt application */
1672 lpfc_board_errevt_to_mgmt(phba
);
1674 event_data
= FC_REG_DUMP_EVENT
;
1675 shost
= lpfc_shost_from_vport(vport
);
1676 fc_host_post_vendor_event(shost
, fc_get_event_number(),
1677 sizeof(event_data
), (char *) &event_data
,
1678 SCSI_NL_VID_TYPE_PCI
| PCI_VENDOR_ID_EMULEX
);
1682 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1683 * @phba: pointer to lpfc HBA data structure.
1685 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1686 * routine from the API jump table function pointer from the lpfc_hba struct.
1690 * Any other value - error.
1693 lpfc_handle_eratt(struct lpfc_hba
*phba
)
1695 (*phba
->lpfc_handle_eratt
)(phba
);
1699 * lpfc_handle_latt - The HBA link event handler
1700 * @phba: pointer to lpfc hba data structure.
1702 * This routine is invoked from the worker thread to handle a HBA host
1703 * attention link event.
1706 lpfc_handle_latt(struct lpfc_hba
*phba
)
1708 struct lpfc_vport
*vport
= phba
->pport
;
1709 struct lpfc_sli
*psli
= &phba
->sli
;
1711 volatile uint32_t control
;
1712 struct lpfc_dmabuf
*mp
;
1715 pmb
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1718 goto lpfc_handle_latt_err_exit
;
1721 mp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
1724 goto lpfc_handle_latt_free_pmb
;
1727 mp
->virt
= lpfc_mbuf_alloc(phba
, 0, &mp
->phys
);
1730 goto lpfc_handle_latt_free_mp
;
1733 /* Cleanup any outstanding ELS commands */
1734 lpfc_els_flush_all_cmd(phba
);
1736 psli
->slistat
.link_event
++;
1737 lpfc_read_topology(phba
, pmb
, mp
);
1738 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_read_topology
;
1740 /* Block ELS IOCBs until we have processed this mbox command */
1741 phba
->sli
.ring
[LPFC_ELS_RING
].flag
|= LPFC_STOP_IOCB_EVENT
;
1742 rc
= lpfc_sli_issue_mbox (phba
, pmb
, MBX_NOWAIT
);
1743 if (rc
== MBX_NOT_FINISHED
) {
1745 goto lpfc_handle_latt_free_mbuf
;
1748 /* Clear Link Attention in HA REG */
1749 spin_lock_irq(&phba
->hbalock
);
1750 writel(HA_LATT
, phba
->HAregaddr
);
1751 readl(phba
->HAregaddr
); /* flush */
1752 spin_unlock_irq(&phba
->hbalock
);
1756 lpfc_handle_latt_free_mbuf
:
1757 phba
->sli
.ring
[LPFC_ELS_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1758 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1759 lpfc_handle_latt_free_mp
:
1761 lpfc_handle_latt_free_pmb
:
1762 mempool_free(pmb
, phba
->mbox_mem_pool
);
1763 lpfc_handle_latt_err_exit
:
1764 /* Enable Link attention interrupts */
1765 spin_lock_irq(&phba
->hbalock
);
1766 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1767 control
= readl(phba
->HCregaddr
);
1768 control
|= HC_LAINT_ENA
;
1769 writel(control
, phba
->HCregaddr
);
1770 readl(phba
->HCregaddr
); /* flush */
1772 /* Clear Link Attention in HA REG */
1773 writel(HA_LATT
, phba
->HAregaddr
);
1774 readl(phba
->HAregaddr
); /* flush */
1775 spin_unlock_irq(&phba
->hbalock
);
1776 lpfc_linkdown(phba
);
1777 phba
->link_state
= LPFC_HBA_ERROR
;
1779 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
1780 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc
);
1786 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1787 * @phba: pointer to lpfc hba data structure.
1788 * @vpd: pointer to the vital product data.
1789 * @len: length of the vital product data in bytes.
1791 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1792 * an array of characters. In this routine, the ModelName, ProgramType, and
1793 * ModelDesc, etc. fields of the phba data structure will be populated.
1796 * 0 - pointer to the VPD passed in is NULL
1800 lpfc_parse_vpd(struct lpfc_hba
*phba
, uint8_t *vpd
, int len
)
1802 uint8_t lenlo
, lenhi
;
1812 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
1813 "0455 Vital Product Data: x%x x%x x%x x%x\n",
1814 (uint32_t) vpd
[0], (uint32_t) vpd
[1], (uint32_t) vpd
[2],
1816 while (!finished
&& (index
< (len
- 4))) {
1817 switch (vpd
[index
]) {
1825 i
= ((((unsigned short)lenhi
) << 8) + lenlo
);
1834 Length
= ((((unsigned short)lenhi
) << 8) + lenlo
);
1835 if (Length
> len
- index
)
1836 Length
= len
- index
;
1837 while (Length
> 0) {
1838 /* Look for Serial Number */
1839 if ((vpd
[index
] == 'S') && (vpd
[index
+1] == 'N')) {
1846 phba
->SerialNumber
[j
++] = vpd
[index
++];
1850 phba
->SerialNumber
[j
] = 0;
1853 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '1')) {
1854 phba
->vpd_flag
|= VPD_MODEL_DESC
;
1861 phba
->ModelDesc
[j
++] = vpd
[index
++];
1865 phba
->ModelDesc
[j
] = 0;
1868 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '2')) {
1869 phba
->vpd_flag
|= VPD_MODEL_NAME
;
1876 phba
->ModelName
[j
++] = vpd
[index
++];
1880 phba
->ModelName
[j
] = 0;
1883 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '3')) {
1884 phba
->vpd_flag
|= VPD_PROGRAM_TYPE
;
1891 phba
->ProgramType
[j
++] = vpd
[index
++];
1895 phba
->ProgramType
[j
] = 0;
1898 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '4')) {
1899 phba
->vpd_flag
|= VPD_PORT
;
1906 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
1907 (phba
->sli4_hba
.pport_name_sta
==
1908 LPFC_SLI4_PPNAME_GET
)) {
1912 phba
->Port
[j
++] = vpd
[index
++];
1916 if ((phba
->sli_rev
!= LPFC_SLI_REV4
) ||
1917 (phba
->sli4_hba
.pport_name_sta
==
1918 LPFC_SLI4_PPNAME_NON
))
1945 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1946 * @phba: pointer to lpfc hba data structure.
1947 * @mdp: pointer to the data structure to hold the derived model name.
1948 * @descp: pointer to the data structure to hold the derived description.
1950 * This routine retrieves HBA's description based on its registered PCI device
1951 * ID. The @descp passed into this function points to an array of 256 chars. It
1952 * shall be returned with the model name, maximum speed, and the host bus type.
1953 * The @mdp passed into this function points to an array of 80 chars. When the
1954 * function returns, the @mdp will be filled with the model name.
1957 lpfc_get_hba_model_desc(struct lpfc_hba
*phba
, uint8_t *mdp
, uint8_t *descp
)
1960 uint16_t dev_id
= phba
->pcidev
->device
;
1963 int oneConnect
= 0; /* default is not a oneConnect */
1968 } m
= {"<Unknown>", "", ""};
1970 if (mdp
&& mdp
[0] != '\0'
1971 && descp
&& descp
[0] != '\0')
1974 if (phba
->lmt
& LMT_16Gb
)
1976 else if (phba
->lmt
& LMT_10Gb
)
1978 else if (phba
->lmt
& LMT_8Gb
)
1980 else if (phba
->lmt
& LMT_4Gb
)
1982 else if (phba
->lmt
& LMT_2Gb
)
1984 else if (phba
->lmt
& LMT_1Gb
)
1992 case PCI_DEVICE_ID_FIREFLY
:
1993 m
= (typeof(m
)){"LP6000", "PCI", "Fibre Channel Adapter"};
1995 case PCI_DEVICE_ID_SUPERFLY
:
1996 if (vp
->rev
.biuRev
>= 1 && vp
->rev
.biuRev
<= 3)
1997 m
= (typeof(m
)){"LP7000", "PCI",
1998 "Fibre Channel Adapter"};
2000 m
= (typeof(m
)){"LP7000E", "PCI",
2001 "Fibre Channel Adapter"};
2003 case PCI_DEVICE_ID_DRAGONFLY
:
2004 m
= (typeof(m
)){"LP8000", "PCI",
2005 "Fibre Channel Adapter"};
2007 case PCI_DEVICE_ID_CENTAUR
:
2008 if (FC_JEDEC_ID(vp
->rev
.biuRev
) == CENTAUR_2G_JEDEC_ID
)
2009 m
= (typeof(m
)){"LP9002", "PCI",
2010 "Fibre Channel Adapter"};
2012 m
= (typeof(m
)){"LP9000", "PCI",
2013 "Fibre Channel Adapter"};
2015 case PCI_DEVICE_ID_RFLY
:
2016 m
= (typeof(m
)){"LP952", "PCI",
2017 "Fibre Channel Adapter"};
2019 case PCI_DEVICE_ID_PEGASUS
:
2020 m
= (typeof(m
)){"LP9802", "PCI-X",
2021 "Fibre Channel Adapter"};
2023 case PCI_DEVICE_ID_THOR
:
2024 m
= (typeof(m
)){"LP10000", "PCI-X",
2025 "Fibre Channel Adapter"};
2027 case PCI_DEVICE_ID_VIPER
:
2028 m
= (typeof(m
)){"LPX1000", "PCI-X",
2029 "Fibre Channel Adapter"};
2031 case PCI_DEVICE_ID_PFLY
:
2032 m
= (typeof(m
)){"LP982", "PCI-X",
2033 "Fibre Channel Adapter"};
2035 case PCI_DEVICE_ID_TFLY
:
2036 m
= (typeof(m
)){"LP1050", "PCI-X",
2037 "Fibre Channel Adapter"};
2039 case PCI_DEVICE_ID_HELIOS
:
2040 m
= (typeof(m
)){"LP11000", "PCI-X2",
2041 "Fibre Channel Adapter"};
2043 case PCI_DEVICE_ID_HELIOS_SCSP
:
2044 m
= (typeof(m
)){"LP11000-SP", "PCI-X2",
2045 "Fibre Channel Adapter"};
2047 case PCI_DEVICE_ID_HELIOS_DCSP
:
2048 m
= (typeof(m
)){"LP11002-SP", "PCI-X2",
2049 "Fibre Channel Adapter"};
2051 case PCI_DEVICE_ID_NEPTUNE
:
2052 m
= (typeof(m
)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
2054 case PCI_DEVICE_ID_NEPTUNE_SCSP
:
2055 m
= (typeof(m
)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
2057 case PCI_DEVICE_ID_NEPTUNE_DCSP
:
2058 m
= (typeof(m
)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
2060 case PCI_DEVICE_ID_BMID
:
2061 m
= (typeof(m
)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2063 case PCI_DEVICE_ID_BSMB
:
2064 m
= (typeof(m
)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
2066 case PCI_DEVICE_ID_ZEPHYR
:
2067 m
= (typeof(m
)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2069 case PCI_DEVICE_ID_ZEPHYR_SCSP
:
2070 m
= (typeof(m
)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2072 case PCI_DEVICE_ID_ZEPHYR_DCSP
:
2073 m
= (typeof(m
)){"LP2105", "PCIe", "FCoE Adapter"};
2076 case PCI_DEVICE_ID_ZMID
:
2077 m
= (typeof(m
)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2079 case PCI_DEVICE_ID_ZSMB
:
2080 m
= (typeof(m
)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2082 case PCI_DEVICE_ID_LP101
:
2083 m
= (typeof(m
)){"LP101", "PCI-X", "Fibre Channel Adapter"};
2085 case PCI_DEVICE_ID_LP10000S
:
2086 m
= (typeof(m
)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
2088 case PCI_DEVICE_ID_LP11000S
:
2089 m
= (typeof(m
)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
2091 case PCI_DEVICE_ID_LPE11000S
:
2092 m
= (typeof(m
)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
2094 case PCI_DEVICE_ID_SAT
:
2095 m
= (typeof(m
)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2097 case PCI_DEVICE_ID_SAT_MID
:
2098 m
= (typeof(m
)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2100 case PCI_DEVICE_ID_SAT_SMB
:
2101 m
= (typeof(m
)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2103 case PCI_DEVICE_ID_SAT_DCSP
:
2104 m
= (typeof(m
)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2106 case PCI_DEVICE_ID_SAT_SCSP
:
2107 m
= (typeof(m
)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2109 case PCI_DEVICE_ID_SAT_S
:
2110 m
= (typeof(m
)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2112 case PCI_DEVICE_ID_HORNET
:
2113 m
= (typeof(m
)){"LP21000", "PCIe", "FCoE Adapter"};
2116 case PCI_DEVICE_ID_PROTEUS_VF
:
2117 m
= (typeof(m
)){"LPev12000", "PCIe IOV",
2118 "Fibre Channel Adapter"};
2120 case PCI_DEVICE_ID_PROTEUS_PF
:
2121 m
= (typeof(m
)){"LPev12000", "PCIe IOV",
2122 "Fibre Channel Adapter"};
2124 case PCI_DEVICE_ID_PROTEUS_S
:
2125 m
= (typeof(m
)){"LPemv12002-S", "PCIe IOV",
2126 "Fibre Channel Adapter"};
2128 case PCI_DEVICE_ID_TIGERSHARK
:
2130 m
= (typeof(m
)){"OCe10100", "PCIe", "FCoE"};
2132 case PCI_DEVICE_ID_TOMCAT
:
2134 m
= (typeof(m
)){"OCe11100", "PCIe", "FCoE"};
2136 case PCI_DEVICE_ID_FALCON
:
2137 m
= (typeof(m
)){"LPSe12002-ML1-E", "PCIe",
2138 "EmulexSecure Fibre"};
2140 case PCI_DEVICE_ID_BALIUS
:
2141 m
= (typeof(m
)){"LPVe12002", "PCIe Shared I/O",
2142 "Fibre Channel Adapter"};
2144 case PCI_DEVICE_ID_LANCER_FC
:
2145 case PCI_DEVICE_ID_LANCER_FC_VF
:
2146 m
= (typeof(m
)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2148 case PCI_DEVICE_ID_LANCER_FCOE
:
2149 case PCI_DEVICE_ID_LANCER_FCOE_VF
:
2151 m
= (typeof(m
)){"OCe15100", "PCIe", "FCoE"};
2153 case PCI_DEVICE_ID_SKYHAWK
:
2154 case PCI_DEVICE_ID_SKYHAWK_VF
:
2156 m
= (typeof(m
)){"OCe14000", "PCIe", "FCoE"};
2159 m
= (typeof(m
)){"Unknown", "", ""};
2163 if (mdp
&& mdp
[0] == '\0')
2164 snprintf(mdp
, 79,"%s", m
.name
);
2166 * oneConnect hba requires special processing, they are all initiators
2167 * and we put the port number on the end
2169 if (descp
&& descp
[0] == '\0') {
2171 snprintf(descp
, 255,
2172 "Emulex OneConnect %s, %s Initiator %s",
2175 else if (max_speed
== 0)
2176 snprintf(descp
, 255,
2178 m
.name
, m
.bus
, m
.function
);
2180 snprintf(descp
, 255,
2181 "Emulex %s %d%s %s %s",
2182 m
.name
, max_speed
, (GE
) ? "GE" : "Gb",
2188 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2189 * @phba: pointer to lpfc hba data structure.
2190 * @pring: pointer to a IOCB ring.
2191 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2193 * This routine posts a given number of IOCBs with the associated DMA buffer
2194 * descriptors specified by the cnt argument to the given IOCB ring.
2197 * The number of IOCBs NOT able to be posted to the IOCB ring.
2200 lpfc_post_buffer(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
, int cnt
)
2203 struct lpfc_iocbq
*iocb
;
2204 struct lpfc_dmabuf
*mp1
, *mp2
;
2206 cnt
+= pring
->missbufcnt
;
2208 /* While there are buffers to post */
2210 /* Allocate buffer for command iocb */
2211 iocb
= lpfc_sli_get_iocbq(phba
);
2213 pring
->missbufcnt
= cnt
;
2218 /* 2 buffers can be posted per command */
2219 /* Allocate buffer to post */
2220 mp1
= kmalloc(sizeof (struct lpfc_dmabuf
), GFP_KERNEL
);
2222 mp1
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
, &mp1
->phys
);
2223 if (!mp1
|| !mp1
->virt
) {
2225 lpfc_sli_release_iocbq(phba
, iocb
);
2226 pring
->missbufcnt
= cnt
;
2230 INIT_LIST_HEAD(&mp1
->list
);
2231 /* Allocate buffer to post */
2233 mp2
= kmalloc(sizeof (struct lpfc_dmabuf
), GFP_KERNEL
);
2235 mp2
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
,
2237 if (!mp2
|| !mp2
->virt
) {
2239 lpfc_mbuf_free(phba
, mp1
->virt
, mp1
->phys
);
2241 lpfc_sli_release_iocbq(phba
, iocb
);
2242 pring
->missbufcnt
= cnt
;
2246 INIT_LIST_HEAD(&mp2
->list
);
2251 icmd
->un
.cont64
[0].addrHigh
= putPaddrHigh(mp1
->phys
);
2252 icmd
->un
.cont64
[0].addrLow
= putPaddrLow(mp1
->phys
);
2253 icmd
->un
.cont64
[0].tus
.f
.bdeSize
= FCELSSIZE
;
2254 icmd
->ulpBdeCount
= 1;
2257 icmd
->un
.cont64
[1].addrHigh
= putPaddrHigh(mp2
->phys
);
2258 icmd
->un
.cont64
[1].addrLow
= putPaddrLow(mp2
->phys
);
2259 icmd
->un
.cont64
[1].tus
.f
.bdeSize
= FCELSSIZE
;
2261 icmd
->ulpBdeCount
= 2;
2264 icmd
->ulpCommand
= CMD_QUE_RING_BUF64_CN
;
2267 if (lpfc_sli_issue_iocb(phba
, pring
->ringno
, iocb
, 0) ==
2269 lpfc_mbuf_free(phba
, mp1
->virt
, mp1
->phys
);
2273 lpfc_mbuf_free(phba
, mp2
->virt
, mp2
->phys
);
2277 lpfc_sli_release_iocbq(phba
, iocb
);
2278 pring
->missbufcnt
= cnt
;
2281 lpfc_sli_ringpostbuf_put(phba
, pring
, mp1
);
2283 lpfc_sli_ringpostbuf_put(phba
, pring
, mp2
);
2285 pring
->missbufcnt
= 0;
2290 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2291 * @phba: pointer to lpfc hba data structure.
2293 * This routine posts initial receive IOCB buffers to the ELS ring. The
2294 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2298 * 0 - success (currently always success)
2301 lpfc_post_rcv_buf(struct lpfc_hba
*phba
)
2303 struct lpfc_sli
*psli
= &phba
->sli
;
2305 /* Ring 0, ELS / CT buffers */
2306 lpfc_post_buffer(phba
, &psli
->ring
[LPFC_ELS_RING
], LPFC_BUF_RING0
);
2307 /* Ring 2 - FCP no buffers needed */
2312 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2315 * lpfc_sha_init - Set up initial array of hash table entries
2316 * @HashResultPointer: pointer to an array as hash table.
2318 * This routine sets up the initial values to the array of hash table entries
2322 lpfc_sha_init(uint32_t * HashResultPointer
)
2324 HashResultPointer
[0] = 0x67452301;
2325 HashResultPointer
[1] = 0xEFCDAB89;
2326 HashResultPointer
[2] = 0x98BADCFE;
2327 HashResultPointer
[3] = 0x10325476;
2328 HashResultPointer
[4] = 0xC3D2E1F0;
2332 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2333 * @HashResultPointer: pointer to an initial/result hash table.
2334 * @HashWorkingPointer: pointer to an working hash table.
2336 * This routine iterates an initial hash table pointed by @HashResultPointer
2337 * with the values from the working hash table pointeed by @HashWorkingPointer.
2338 * The results are putting back to the initial hash table, returned through
2339 * the @HashResultPointer as the result hash table.
2342 lpfc_sha_iterate(uint32_t * HashResultPointer
, uint32_t * HashWorkingPointer
)
2346 uint32_t A
, B
, C
, D
, E
;
2349 HashWorkingPointer
[t
] =
2351 HashWorkingPointer
[t
- 3] ^ HashWorkingPointer
[t
-
2353 HashWorkingPointer
[t
- 14] ^ HashWorkingPointer
[t
- 16]);
2354 } while (++t
<= 79);
2356 A
= HashResultPointer
[0];
2357 B
= HashResultPointer
[1];
2358 C
= HashResultPointer
[2];
2359 D
= HashResultPointer
[3];
2360 E
= HashResultPointer
[4];
2364 TEMP
= ((B
& C
) | ((~B
) & D
)) + 0x5A827999;
2365 } else if (t
< 40) {
2366 TEMP
= (B
^ C
^ D
) + 0x6ED9EBA1;
2367 } else if (t
< 60) {
2368 TEMP
= ((B
& C
) | (B
& D
) | (C
& D
)) + 0x8F1BBCDC;
2370 TEMP
= (B
^ C
^ D
) + 0xCA62C1D6;
2372 TEMP
+= S(5, A
) + E
+ HashWorkingPointer
[t
];
2378 } while (++t
<= 79);
2380 HashResultPointer
[0] += A
;
2381 HashResultPointer
[1] += B
;
2382 HashResultPointer
[2] += C
;
2383 HashResultPointer
[3] += D
;
2384 HashResultPointer
[4] += E
;
2389 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2390 * @RandomChallenge: pointer to the entry of host challenge random number array.
2391 * @HashWorking: pointer to the entry of the working hash array.
2393 * This routine calculates the working hash array referred by @HashWorking
2394 * from the challenge random numbers associated with the host, referred by
2395 * @RandomChallenge. The result is put into the entry of the working hash
2396 * array and returned by reference through @HashWorking.
2399 lpfc_challenge_key(uint32_t * RandomChallenge
, uint32_t * HashWorking
)
2401 *HashWorking
= (*RandomChallenge
^ *HashWorking
);
2405 * lpfc_hba_init - Perform special handling for LC HBA initialization
2406 * @phba: pointer to lpfc hba data structure.
2407 * @hbainit: pointer to an array of unsigned 32-bit integers.
2409 * This routine performs the special handling for LC HBA initialization.
2412 lpfc_hba_init(struct lpfc_hba
*phba
, uint32_t *hbainit
)
2415 uint32_t *HashWorking
;
2416 uint32_t *pwwnn
= (uint32_t *) phba
->wwnn
;
2418 HashWorking
= kcalloc(80, sizeof(uint32_t), GFP_KERNEL
);
2422 HashWorking
[0] = HashWorking
[78] = *pwwnn
++;
2423 HashWorking
[1] = HashWorking
[79] = *pwwnn
;
2425 for (t
= 0; t
< 7; t
++)
2426 lpfc_challenge_key(phba
->RandomData
+ t
, HashWorking
+ t
);
2428 lpfc_sha_init(hbainit
);
2429 lpfc_sha_iterate(hbainit
, HashWorking
);
2434 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2435 * @vport: pointer to a virtual N_Port data structure.
2437 * This routine performs the necessary cleanups before deleting the @vport.
2438 * It invokes the discovery state machine to perform necessary state
2439 * transitions and to release the ndlps associated with the @vport. Note,
2440 * the physical port is treated as @vport 0.
2443 lpfc_cleanup(struct lpfc_vport
*vport
)
2445 struct lpfc_hba
*phba
= vport
->phba
;
2446 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
2449 if (phba
->link_state
> LPFC_LINK_DOWN
)
2450 lpfc_port_link_failure(vport
);
2452 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
2453 if (!NLP_CHK_NODE_ACT(ndlp
)) {
2454 ndlp
= lpfc_enable_node(vport
, ndlp
,
2455 NLP_STE_UNUSED_NODE
);
2458 spin_lock_irq(&phba
->ndlp_lock
);
2459 NLP_SET_FREE_REQ(ndlp
);
2460 spin_unlock_irq(&phba
->ndlp_lock
);
2461 /* Trigger the release of the ndlp memory */
2465 spin_lock_irq(&phba
->ndlp_lock
);
2466 if (NLP_CHK_FREE_REQ(ndlp
)) {
2467 /* The ndlp should not be in memory free mode already */
2468 spin_unlock_irq(&phba
->ndlp_lock
);
2471 /* Indicate request for freeing ndlp memory */
2472 NLP_SET_FREE_REQ(ndlp
);
2473 spin_unlock_irq(&phba
->ndlp_lock
);
2475 if (vport
->port_type
!= LPFC_PHYSICAL_PORT
&&
2476 ndlp
->nlp_DID
== Fabric_DID
) {
2477 /* Just free up ndlp with Fabric_DID for vports */
2482 /* take care of nodes in unused state before the state
2483 * machine taking action.
2485 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
) {
2490 if (ndlp
->nlp_type
& NLP_FABRIC
)
2491 lpfc_disc_state_machine(vport
, ndlp
, NULL
,
2492 NLP_EVT_DEVICE_RECOVERY
);
2494 lpfc_disc_state_machine(vport
, ndlp
, NULL
,
2498 /* At this point, ALL ndlp's should be gone
2499 * because of the previous NLP_EVT_DEVICE_RM.
2500 * Lets wait for this to happen, if needed.
2502 while (!list_empty(&vport
->fc_nodes
)) {
2504 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
2505 "0233 Nodelist not empty\n");
2506 list_for_each_entry_safe(ndlp
, next_ndlp
,
2507 &vport
->fc_nodes
, nlp_listp
) {
2508 lpfc_printf_vlog(ndlp
->vport
, KERN_ERR
,
2510 "0282 did:x%x ndlp:x%p "
2511 "usgmap:x%x refcnt:%d\n",
2512 ndlp
->nlp_DID
, (void *)ndlp
,
2515 &ndlp
->kref
.refcount
));
2520 /* Wait for any activity on ndlps to settle */
2523 lpfc_cleanup_vports_rrqs(vport
, NULL
);
2527 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2528 * @vport: pointer to a virtual N_Port data structure.
2530 * This routine stops all the timers associated with a @vport. This function
2531 * is invoked before disabling or deleting a @vport. Note that the physical
2532 * port is treated as @vport 0.
2535 lpfc_stop_vport_timers(struct lpfc_vport
*vport
)
2537 del_timer_sync(&vport
->els_tmofunc
);
2538 del_timer_sync(&vport
->fc_fdmitmo
);
2539 del_timer_sync(&vport
->delayed_disc_tmo
);
2540 lpfc_can_disctmo(vport
);
2545 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2546 * @phba: pointer to lpfc hba data structure.
2548 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2549 * caller of this routine should already hold the host lock.
2552 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba
*phba
)
2554 /* Clear pending FCF rediscovery wait flag */
2555 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_PEND
;
2557 /* Now, try to stop the timer */
2558 del_timer(&phba
->fcf
.redisc_wait
);
2562 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2563 * @phba: pointer to lpfc hba data structure.
2565 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2566 * checks whether the FCF rediscovery wait timer is pending with the host
2567 * lock held before proceeding with disabling the timer and clearing the
2568 * wait timer pendig flag.
2571 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba
*phba
)
2573 spin_lock_irq(&phba
->hbalock
);
2574 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)) {
2575 /* FCF rediscovery timer already fired or stopped */
2576 spin_unlock_irq(&phba
->hbalock
);
2579 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba
);
2580 /* Clear failover in progress flags */
2581 phba
->fcf
.fcf_flag
&= ~(FCF_DEAD_DISC
| FCF_ACVL_DISC
);
2582 spin_unlock_irq(&phba
->hbalock
);
2586 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2587 * @phba: pointer to lpfc hba data structure.
2589 * This routine stops all the timers associated with a HBA. This function is
2590 * invoked before either putting a HBA offline or unloading the driver.
2593 lpfc_stop_hba_timers(struct lpfc_hba
*phba
)
2595 lpfc_stop_vport_timers(phba
->pport
);
2596 del_timer_sync(&phba
->sli
.mbox_tmo
);
2597 del_timer_sync(&phba
->fabric_block_timer
);
2598 del_timer_sync(&phba
->eratt_poll
);
2599 del_timer_sync(&phba
->hb_tmofunc
);
2600 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2601 del_timer_sync(&phba
->rrq_tmr
);
2602 phba
->hba_flag
&= ~HBA_RRQ_ACTIVE
;
2604 phba
->hb_outstanding
= 0;
2606 switch (phba
->pci_dev_grp
) {
2607 case LPFC_PCI_DEV_LP
:
2608 /* Stop any LightPulse device specific driver timers */
2609 del_timer_sync(&phba
->fcp_poll_timer
);
2611 case LPFC_PCI_DEV_OC
:
2612 /* Stop any OneConnect device sepcific driver timers */
2613 lpfc_sli4_stop_fcf_redisc_wait_timer(phba
);
2616 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2617 "0297 Invalid device group (x%x)\n",
2625 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2626 * @phba: pointer to lpfc hba data structure.
2628 * This routine marks a HBA's management interface as blocked. Once the HBA's
2629 * management interface is marked as blocked, all the user space access to
2630 * the HBA, whether they are from sysfs interface or libdfc interface will
2631 * all be blocked. The HBA is set to block the management interface when the
2632 * driver prepares the HBA interface for online or offline.
2635 lpfc_block_mgmt_io(struct lpfc_hba
*phba
, int mbx_action
)
2637 unsigned long iflag
;
2638 uint8_t actcmd
= MBX_HEARTBEAT
;
2639 unsigned long timeout
;
2641 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2642 phba
->sli
.sli_flag
|= LPFC_BLOCK_MGMT_IO
;
2643 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2644 if (mbx_action
== LPFC_MBX_NO_WAIT
)
2646 timeout
= msecs_to_jiffies(LPFC_MBOX_TMO
* 1000) + jiffies
;
2647 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2648 if (phba
->sli
.mbox_active
) {
2649 actcmd
= phba
->sli
.mbox_active
->u
.mb
.mbxCommand
;
2650 /* Determine how long we might wait for the active mailbox
2651 * command to be gracefully completed by firmware.
2653 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
,
2654 phba
->sli
.mbox_active
) * 1000) + jiffies
;
2656 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2658 /* Wait for the outstnading mailbox command to complete */
2659 while (phba
->sli
.mbox_active
) {
2660 /* Check active mailbox complete status every 2ms */
2662 if (time_after(jiffies
, timeout
)) {
2663 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2664 "2813 Mgmt IO is Blocked %x "
2665 "- mbox cmd %x still active\n",
2666 phba
->sli
.sli_flag
, actcmd
);
2673 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
2674 * @phba: pointer to lpfc hba data structure.
2676 * Allocate RPIs for all active remote nodes. This is needed whenever
2677 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
2678 * is to fixup the temporary rpi assignments.
2681 lpfc_sli4_node_prep(struct lpfc_hba
*phba
)
2683 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
2684 struct lpfc_vport
**vports
;
2687 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
2690 vports
= lpfc_create_vport_work_array(phba
);
2691 if (vports
!= NULL
) {
2692 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2693 if (vports
[i
]->load_flag
& FC_UNLOADING
)
2696 list_for_each_entry_safe(ndlp
, next_ndlp
,
2697 &vports
[i
]->fc_nodes
,
2699 if (NLP_CHK_NODE_ACT(ndlp
))
2701 lpfc_sli4_alloc_rpi(phba
);
2705 lpfc_destroy_vport_work_array(phba
, vports
);
2709 * lpfc_online - Initialize and bring a HBA online
2710 * @phba: pointer to lpfc hba data structure.
2712 * This routine initializes the HBA and brings a HBA online. During this
2713 * process, the management interface is blocked to prevent user space access
2714 * to the HBA interfering with the driver initialization.
2721 lpfc_online(struct lpfc_hba
*phba
)
2723 struct lpfc_vport
*vport
;
2724 struct lpfc_vport
**vports
;
2726 bool vpis_cleared
= false;
2730 vport
= phba
->pport
;
2732 if (!(vport
->fc_flag
& FC_OFFLINE_MODE
))
2735 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
2736 "0458 Bring Adapter online\n");
2738 lpfc_block_mgmt_io(phba
, LPFC_MBX_WAIT
);
2740 if (!lpfc_sli_queue_setup(phba
)) {
2741 lpfc_unblock_mgmt_io(phba
);
2745 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2746 if (lpfc_sli4_hba_setup(phba
)) { /* Initialize SLI4 HBA */
2747 lpfc_unblock_mgmt_io(phba
);
2750 spin_lock_irq(&phba
->hbalock
);
2751 if (!phba
->sli4_hba
.max_cfg_param
.vpi_used
)
2752 vpis_cleared
= true;
2753 spin_unlock_irq(&phba
->hbalock
);
2755 if (lpfc_sli_hba_setup(phba
)) { /* Initialize SLI2/SLI3 HBA */
2756 lpfc_unblock_mgmt_io(phba
);
2761 vports
= lpfc_create_vport_work_array(phba
);
2763 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2764 struct Scsi_Host
*shost
;
2765 shost
= lpfc_shost_from_vport(vports
[i
]);
2766 spin_lock_irq(shost
->host_lock
);
2767 vports
[i
]->fc_flag
&= ~FC_OFFLINE_MODE
;
2768 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
2769 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
2770 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2771 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
2772 if ((vpis_cleared
) &&
2773 (vports
[i
]->port_type
!=
2774 LPFC_PHYSICAL_PORT
))
2777 spin_unlock_irq(shost
->host_lock
);
2779 lpfc_destroy_vport_work_array(phba
, vports
);
2781 lpfc_unblock_mgmt_io(phba
);
2786 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2787 * @phba: pointer to lpfc hba data structure.
2789 * This routine marks a HBA's management interface as not blocked. Once the
2790 * HBA's management interface is marked as not blocked, all the user space
2791 * access to the HBA, whether they are from sysfs interface or libdfc
2792 * interface will be allowed. The HBA is set to block the management interface
2793 * when the driver prepares the HBA interface for online or offline and then
2794 * set to unblock the management interface afterwards.
2797 lpfc_unblock_mgmt_io(struct lpfc_hba
* phba
)
2799 unsigned long iflag
;
2801 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2802 phba
->sli
.sli_flag
&= ~LPFC_BLOCK_MGMT_IO
;
2803 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2807 * lpfc_offline_prep - Prepare a HBA to be brought offline
2808 * @phba: pointer to lpfc hba data structure.
2810 * This routine is invoked to prepare a HBA to be brought offline. It performs
2811 * unregistration login to all the nodes on all vports and flushes the mailbox
2812 * queue to make it ready to be brought offline.
2815 lpfc_offline_prep(struct lpfc_hba
*phba
, int mbx_action
)
2817 struct lpfc_vport
*vport
= phba
->pport
;
2818 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
2819 struct lpfc_vport
**vports
;
2820 struct Scsi_Host
*shost
;
2823 if (vport
->fc_flag
& FC_OFFLINE_MODE
)
2826 lpfc_block_mgmt_io(phba
, mbx_action
);
2828 lpfc_linkdown(phba
);
2830 /* Issue an unreg_login to all nodes on all vports */
2831 vports
= lpfc_create_vport_work_array(phba
);
2832 if (vports
!= NULL
) {
2833 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2834 if (vports
[i
]->load_flag
& FC_UNLOADING
)
2836 shost
= lpfc_shost_from_vport(vports
[i
]);
2837 spin_lock_irq(shost
->host_lock
);
2838 vports
[i
]->vpi_state
&= ~LPFC_VPI_REGISTERED
;
2839 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
2840 vports
[i
]->fc_flag
&= ~FC_VFI_REGISTERED
;
2841 spin_unlock_irq(shost
->host_lock
);
2843 shost
= lpfc_shost_from_vport(vports
[i
]);
2844 list_for_each_entry_safe(ndlp
, next_ndlp
,
2845 &vports
[i
]->fc_nodes
,
2847 if (!NLP_CHK_NODE_ACT(ndlp
))
2849 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
2851 if (ndlp
->nlp_type
& NLP_FABRIC
) {
2852 lpfc_disc_state_machine(vports
[i
], ndlp
,
2853 NULL
, NLP_EVT_DEVICE_RECOVERY
);
2854 lpfc_disc_state_machine(vports
[i
], ndlp
,
2855 NULL
, NLP_EVT_DEVICE_RM
);
2857 spin_lock_irq(shost
->host_lock
);
2858 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
2859 spin_unlock_irq(shost
->host_lock
);
2861 * Whenever an SLI4 port goes offline, free the
2862 * RPI. Get a new RPI when the adapter port
2863 * comes back online.
2865 if (phba
->sli_rev
== LPFC_SLI_REV4
)
2866 lpfc_sli4_free_rpi(phba
, ndlp
->nlp_rpi
);
2867 lpfc_unreg_rpi(vports
[i
], ndlp
);
2871 lpfc_destroy_vport_work_array(phba
, vports
);
2873 lpfc_sli_mbox_sys_shutdown(phba
, mbx_action
);
2877 * lpfc_offline - Bring a HBA offline
2878 * @phba: pointer to lpfc hba data structure.
2880 * This routine actually brings a HBA offline. It stops all the timers
2881 * associated with the HBA, brings down the SLI layer, and eventually
2882 * marks the HBA as in offline state for the upper layer protocol.
2885 lpfc_offline(struct lpfc_hba
*phba
)
2887 struct Scsi_Host
*shost
;
2888 struct lpfc_vport
**vports
;
2891 if (phba
->pport
->fc_flag
& FC_OFFLINE_MODE
)
2894 /* stop port and all timers associated with this hba */
2895 lpfc_stop_port(phba
);
2896 vports
= lpfc_create_vport_work_array(phba
);
2898 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
2899 lpfc_stop_vport_timers(vports
[i
]);
2900 lpfc_destroy_vport_work_array(phba
, vports
);
2901 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
2902 "0460 Bring Adapter offline\n");
2903 /* Bring down the SLI Layer and cleanup. The HBA is offline
2905 lpfc_sli_hba_down(phba
);
2906 spin_lock_irq(&phba
->hbalock
);
2908 spin_unlock_irq(&phba
->hbalock
);
2909 vports
= lpfc_create_vport_work_array(phba
);
2911 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2912 shost
= lpfc_shost_from_vport(vports
[i
]);
2913 spin_lock_irq(shost
->host_lock
);
2914 vports
[i
]->work_port_events
= 0;
2915 vports
[i
]->fc_flag
|= FC_OFFLINE_MODE
;
2916 spin_unlock_irq(shost
->host_lock
);
2918 lpfc_destroy_vport_work_array(phba
, vports
);
2922 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2923 * @phba: pointer to lpfc hba data structure.
2925 * This routine is to free all the SCSI buffers and IOCBs from the driver
2926 * list back to kernel. It is called from lpfc_pci_remove_one to free
2927 * the internal resources before the device is removed from the system.
2930 lpfc_scsi_free(struct lpfc_hba
*phba
)
2932 struct lpfc_scsi_buf
*sb
, *sb_next
;
2933 struct lpfc_iocbq
*io
, *io_next
;
2935 spin_lock_irq(&phba
->hbalock
);
2937 /* Release all the lpfc_scsi_bufs maintained by this host. */
2939 spin_lock(&phba
->scsi_buf_list_put_lock
);
2940 list_for_each_entry_safe(sb
, sb_next
, &phba
->lpfc_scsi_buf_list_put
,
2942 list_del(&sb
->list
);
2943 pci_pool_free(phba
->lpfc_scsi_dma_buf_pool
, sb
->data
,
2946 phba
->total_scsi_bufs
--;
2948 spin_unlock(&phba
->scsi_buf_list_put_lock
);
2950 spin_lock(&phba
->scsi_buf_list_get_lock
);
2951 list_for_each_entry_safe(sb
, sb_next
, &phba
->lpfc_scsi_buf_list_get
,
2953 list_del(&sb
->list
);
2954 pci_pool_free(phba
->lpfc_scsi_dma_buf_pool
, sb
->data
,
2957 phba
->total_scsi_bufs
--;
2959 spin_unlock(&phba
->scsi_buf_list_get_lock
);
2961 /* Release all the lpfc_iocbq entries maintained by this host. */
2962 list_for_each_entry_safe(io
, io_next
, &phba
->lpfc_iocb_list
, list
) {
2963 list_del(&io
->list
);
2965 phba
->total_iocbq_bufs
--;
2968 spin_unlock_irq(&phba
->hbalock
);
2972 * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping
2973 * @phba: pointer to lpfc hba data structure.
2975 * This routine first calculates the sizes of the current els and allocated
2976 * scsi sgl lists, and then goes through all sgls to updates the physical
2977 * XRIs assigned due to port function reset. During port initialization, the
2978 * current els and allocated scsi sgl lists are 0s.
2981 * 0 - successful (for now, it always returns 0)
2984 lpfc_sli4_xri_sgl_update(struct lpfc_hba
*phba
)
2986 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_entry_next
= NULL
;
2987 struct lpfc_scsi_buf
*psb
= NULL
, *psb_next
= NULL
;
2988 uint16_t i
, lxri
, xri_cnt
, els_xri_cnt
, scsi_xri_cnt
;
2989 LIST_HEAD(els_sgl_list
);
2990 LIST_HEAD(scsi_sgl_list
);
2994 * update on pci function's els xri-sgl list
2996 els_xri_cnt
= lpfc_sli4_get_els_iocb_cnt(phba
);
2997 if (els_xri_cnt
> phba
->sli4_hba
.els_xri_cnt
) {
2998 /* els xri-sgl expanded */
2999 xri_cnt
= els_xri_cnt
- phba
->sli4_hba
.els_xri_cnt
;
3000 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3001 "3157 ELS xri-sgl count increased from "
3002 "%d to %d\n", phba
->sli4_hba
.els_xri_cnt
,
3004 /* allocate the additional els sgls */
3005 for (i
= 0; i
< xri_cnt
; i
++) {
3006 sglq_entry
= kzalloc(sizeof(struct lpfc_sglq
),
3008 if (sglq_entry
== NULL
) {
3009 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3010 "2562 Failure to allocate an "
3011 "ELS sgl entry:%d\n", i
);
3015 sglq_entry
->buff_type
= GEN_BUFF_TYPE
;
3016 sglq_entry
->virt
= lpfc_mbuf_alloc(phba
, 0,
3018 if (sglq_entry
->virt
== NULL
) {
3020 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3021 "2563 Failure to allocate an "
3022 "ELS mbuf:%d\n", i
);
3026 sglq_entry
->sgl
= sglq_entry
->virt
;
3027 memset(sglq_entry
->sgl
, 0, LPFC_BPL_SIZE
);
3028 sglq_entry
->state
= SGL_FREED
;
3029 list_add_tail(&sglq_entry
->list
, &els_sgl_list
);
3031 spin_lock_irq(&phba
->hbalock
);
3032 list_splice_init(&els_sgl_list
, &phba
->sli4_hba
.lpfc_sgl_list
);
3033 spin_unlock_irq(&phba
->hbalock
);
3034 } else if (els_xri_cnt
< phba
->sli4_hba
.els_xri_cnt
) {
3035 /* els xri-sgl shrinked */
3036 xri_cnt
= phba
->sli4_hba
.els_xri_cnt
- els_xri_cnt
;
3037 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3038 "3158 ELS xri-sgl count decreased from "
3039 "%d to %d\n", phba
->sli4_hba
.els_xri_cnt
,
3041 spin_lock_irq(&phba
->hbalock
);
3042 list_splice_init(&phba
->sli4_hba
.lpfc_sgl_list
, &els_sgl_list
);
3043 spin_unlock_irq(&phba
->hbalock
);
3044 /* release extra els sgls from list */
3045 for (i
= 0; i
< xri_cnt
; i
++) {
3046 list_remove_head(&els_sgl_list
,
3047 sglq_entry
, struct lpfc_sglq
, list
);
3049 lpfc_mbuf_free(phba
, sglq_entry
->virt
,
3054 spin_lock_irq(&phba
->hbalock
);
3055 list_splice_init(&els_sgl_list
, &phba
->sli4_hba
.lpfc_sgl_list
);
3056 spin_unlock_irq(&phba
->hbalock
);
3058 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3059 "3163 ELS xri-sgl count unchanged: %d\n",
3061 phba
->sli4_hba
.els_xri_cnt
= els_xri_cnt
;
3063 /* update xris to els sgls on the list */
3065 sglq_entry_next
= NULL
;
3066 list_for_each_entry_safe(sglq_entry
, sglq_entry_next
,
3067 &phba
->sli4_hba
.lpfc_sgl_list
, list
) {
3068 lxri
= lpfc_sli4_next_xritag(phba
);
3069 if (lxri
== NO_XRI
) {
3070 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3071 "2400 Failed to allocate xri for "
3076 sglq_entry
->sli4_lxritag
= lxri
;
3077 sglq_entry
->sli4_xritag
= phba
->sli4_hba
.xri_ids
[lxri
];
3081 * update on pci function's allocated scsi xri-sgl list
3083 phba
->total_scsi_bufs
= 0;
3085 /* maximum number of xris available for scsi buffers */
3086 phba
->sli4_hba
.scsi_xri_max
= phba
->sli4_hba
.max_cfg_param
.max_xri
-
3089 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3090 "2401 Current allocated SCSI xri-sgl count:%d, "
3091 "maximum SCSI xri count:%d\n",
3092 phba
->sli4_hba
.scsi_xri_cnt
,
3093 phba
->sli4_hba
.scsi_xri_max
);
3095 spin_lock_irq(&phba
->scsi_buf_list_get_lock
);
3096 spin_lock(&phba
->scsi_buf_list_put_lock
);
3097 list_splice_init(&phba
->lpfc_scsi_buf_list_get
, &scsi_sgl_list
);
3098 list_splice(&phba
->lpfc_scsi_buf_list_put
, &scsi_sgl_list
);
3099 spin_unlock(&phba
->scsi_buf_list_put_lock
);
3100 spin_unlock_irq(&phba
->scsi_buf_list_get_lock
);
3102 if (phba
->sli4_hba
.scsi_xri_cnt
> phba
->sli4_hba
.scsi_xri_max
) {
3103 /* max scsi xri shrinked below the allocated scsi buffers */
3104 scsi_xri_cnt
= phba
->sli4_hba
.scsi_xri_cnt
-
3105 phba
->sli4_hba
.scsi_xri_max
;
3106 /* release the extra allocated scsi buffers */
3107 for (i
= 0; i
< scsi_xri_cnt
; i
++) {
3108 list_remove_head(&scsi_sgl_list
, psb
,
3109 struct lpfc_scsi_buf
, list
);
3110 pci_pool_free(phba
->lpfc_scsi_dma_buf_pool
, psb
->data
,
3114 spin_lock_irq(&phba
->scsi_buf_list_get_lock
);
3115 phba
->sli4_hba
.scsi_xri_cnt
-= scsi_xri_cnt
;
3116 spin_unlock_irq(&phba
->scsi_buf_list_get_lock
);
3119 /* update xris associated to remaining allocated scsi buffers */
3122 list_for_each_entry_safe(psb
, psb_next
, &scsi_sgl_list
, list
) {
3123 lxri
= lpfc_sli4_next_xritag(phba
);
3124 if (lxri
== NO_XRI
) {
3125 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3126 "2560 Failed to allocate xri for "
3131 psb
->cur_iocbq
.sli4_lxritag
= lxri
;
3132 psb
->cur_iocbq
.sli4_xritag
= phba
->sli4_hba
.xri_ids
[lxri
];
3134 spin_lock_irq(&phba
->scsi_buf_list_get_lock
);
3135 spin_lock(&phba
->scsi_buf_list_put_lock
);
3136 list_splice_init(&scsi_sgl_list
, &phba
->lpfc_scsi_buf_list_get
);
3137 INIT_LIST_HEAD(&phba
->lpfc_scsi_buf_list_put
);
3138 spin_unlock(&phba
->scsi_buf_list_put_lock
);
3139 spin_unlock_irq(&phba
->scsi_buf_list_get_lock
);
3144 lpfc_free_els_sgl_list(phba
);
3145 lpfc_scsi_free(phba
);
3150 * lpfc_create_port - Create an FC port
3151 * @phba: pointer to lpfc hba data structure.
3152 * @instance: a unique integer ID to this FC port.
3153 * @dev: pointer to the device data structure.
3155 * This routine creates a FC port for the upper layer protocol. The FC port
3156 * can be created on top of either a physical port or a virtual port provided
3157 * by the HBA. This routine also allocates a SCSI host data structure (shost)
3158 * and associates the FC port created before adding the shost into the SCSI
3162 * @vport - pointer to the virtual N_Port data structure.
3163 * NULL - port create failed.
3166 lpfc_create_port(struct lpfc_hba
*phba
, int instance
, struct device
*dev
)
3168 struct lpfc_vport
*vport
;
3169 struct Scsi_Host
*shost
;
3172 if (dev
!= &phba
->pcidev
->dev
)
3173 shost
= scsi_host_alloc(&lpfc_vport_template
,
3174 sizeof(struct lpfc_vport
));
3176 shost
= scsi_host_alloc(&lpfc_template
,
3177 sizeof(struct lpfc_vport
));
3181 vport
= (struct lpfc_vport
*) shost
->hostdata
;
3183 vport
->load_flag
|= FC_LOADING
;
3184 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
3185 vport
->fc_rscn_flush
= 0;
3187 lpfc_get_vport_cfgparam(vport
);
3188 shost
->unique_id
= instance
;
3189 shost
->max_id
= LPFC_MAX_TARGET
;
3190 shost
->max_lun
= vport
->cfg_max_luns
;
3191 shost
->this_id
= -1;
3192 shost
->max_cmd_len
= 16;
3193 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3194 shost
->dma_boundary
=
3195 phba
->sli4_hba
.pc_sli4_params
.sge_supp_len
-1;
3196 shost
->sg_tablesize
= phba
->cfg_sg_seg_cnt
;
3200 * Set initial can_queue value since 0 is no longer supported and
3201 * scsi_add_host will fail. This will be adjusted later based on the
3202 * max xri value determined in hba setup.
3204 shost
->can_queue
= phba
->cfg_hba_queue_depth
- 10;
3205 if (dev
!= &phba
->pcidev
->dev
) {
3206 shost
->transportt
= lpfc_vport_transport_template
;
3207 vport
->port_type
= LPFC_NPIV_PORT
;
3209 shost
->transportt
= lpfc_transport_template
;
3210 vport
->port_type
= LPFC_PHYSICAL_PORT
;
3213 /* Initialize all internally managed lists. */
3214 INIT_LIST_HEAD(&vport
->fc_nodes
);
3215 INIT_LIST_HEAD(&vport
->rcv_buffer_list
);
3216 spin_lock_init(&vport
->work_port_lock
);
3218 init_timer(&vport
->fc_disctmo
);
3219 vport
->fc_disctmo
.function
= lpfc_disc_timeout
;
3220 vport
->fc_disctmo
.data
= (unsigned long)vport
;
3222 init_timer(&vport
->fc_fdmitmo
);
3223 vport
->fc_fdmitmo
.function
= lpfc_fdmi_tmo
;
3224 vport
->fc_fdmitmo
.data
= (unsigned long)vport
;
3226 init_timer(&vport
->els_tmofunc
);
3227 vport
->els_tmofunc
.function
= lpfc_els_timeout
;
3228 vport
->els_tmofunc
.data
= (unsigned long)vport
;
3230 init_timer(&vport
->delayed_disc_tmo
);
3231 vport
->delayed_disc_tmo
.function
= lpfc_delayed_disc_tmo
;
3232 vport
->delayed_disc_tmo
.data
= (unsigned long)vport
;
3234 error
= scsi_add_host_with_dma(shost
, dev
, &phba
->pcidev
->dev
);
3238 spin_lock_irq(&phba
->hbalock
);
3239 list_add_tail(&vport
->listentry
, &phba
->port_list
);
3240 spin_unlock_irq(&phba
->hbalock
);
3244 scsi_host_put(shost
);
3250 * destroy_port - destroy an FC port
3251 * @vport: pointer to an lpfc virtual N_Port data structure.
3253 * This routine destroys a FC port from the upper layer protocol. All the
3254 * resources associated with the port are released.
3257 destroy_port(struct lpfc_vport
*vport
)
3259 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3260 struct lpfc_hba
*phba
= vport
->phba
;
3262 lpfc_debugfs_terminate(vport
);
3263 fc_remove_host(shost
);
3264 scsi_remove_host(shost
);
3266 spin_lock_irq(&phba
->hbalock
);
3267 list_del_init(&vport
->listentry
);
3268 spin_unlock_irq(&phba
->hbalock
);
3270 lpfc_cleanup(vport
);
3275 * lpfc_get_instance - Get a unique integer ID
3277 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
3278 * uses the kernel idr facility to perform the task.
3281 * instance - a unique integer ID allocated as the new instance.
3282 * -1 - lpfc get instance failed.
3285 lpfc_get_instance(void)
3289 ret
= idr_alloc(&lpfc_hba_index
, NULL
, 0, 0, GFP_KERNEL
);
3290 return ret
< 0 ? -1 : ret
;
3294 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
3295 * @shost: pointer to SCSI host data structure.
3296 * @time: elapsed time of the scan in jiffies.
3298 * This routine is called by the SCSI layer with a SCSI host to determine
3299 * whether the scan host is finished.
3301 * Note: there is no scan_start function as adapter initialization will have
3302 * asynchronously kicked off the link initialization.
3305 * 0 - SCSI host scan is not over yet.
3306 * 1 - SCSI host scan is over.
3308 int lpfc_scan_finished(struct Scsi_Host
*shost
, unsigned long time
)
3310 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
3311 struct lpfc_hba
*phba
= vport
->phba
;
3314 spin_lock_irq(shost
->host_lock
);
3316 if (vport
->load_flag
& FC_UNLOADING
) {
3320 if (time
>= msecs_to_jiffies(30 * 1000)) {
3321 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
3322 "0461 Scanning longer than 30 "
3323 "seconds. Continuing initialization\n");
3327 if (time
>= msecs_to_jiffies(15 * 1000) &&
3328 phba
->link_state
<= LPFC_LINK_DOWN
) {
3329 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
3330 "0465 Link down longer than 15 "
3331 "seconds. Continuing initialization\n");
3336 if (vport
->port_state
!= LPFC_VPORT_READY
)
3338 if (vport
->num_disc_nodes
|| vport
->fc_prli_sent
)
3340 if (vport
->fc_map_cnt
== 0 && time
< msecs_to_jiffies(2 * 1000))
3342 if ((phba
->sli
.sli_flag
& LPFC_SLI_MBOX_ACTIVE
) != 0)
3348 spin_unlock_irq(shost
->host_lock
);
3353 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
3354 * @shost: pointer to SCSI host data structure.
3356 * This routine initializes a given SCSI host attributes on a FC port. The
3357 * SCSI host can be either on top of a physical port or a virtual port.
3359 void lpfc_host_attrib_init(struct Scsi_Host
*shost
)
3361 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
3362 struct lpfc_hba
*phba
= vport
->phba
;
3364 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
3367 fc_host_node_name(shost
) = wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
3368 fc_host_port_name(shost
) = wwn_to_u64(vport
->fc_portname
.u
.wwn
);
3369 fc_host_supported_classes(shost
) = FC_COS_CLASS3
;
3371 memset(fc_host_supported_fc4s(shost
), 0,
3372 sizeof(fc_host_supported_fc4s(shost
)));
3373 fc_host_supported_fc4s(shost
)[2] = 1;
3374 fc_host_supported_fc4s(shost
)[7] = 1;
3376 lpfc_vport_symbolic_node_name(vport
, fc_host_symbolic_name(shost
),
3377 sizeof fc_host_symbolic_name(shost
));
3379 fc_host_supported_speeds(shost
) = 0;
3380 if (phba
->lmt
& LMT_16Gb
)
3381 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_16GBIT
;
3382 if (phba
->lmt
& LMT_10Gb
)
3383 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_10GBIT
;
3384 if (phba
->lmt
& LMT_8Gb
)
3385 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_8GBIT
;
3386 if (phba
->lmt
& LMT_4Gb
)
3387 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_4GBIT
;
3388 if (phba
->lmt
& LMT_2Gb
)
3389 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_2GBIT
;
3390 if (phba
->lmt
& LMT_1Gb
)
3391 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_1GBIT
;
3393 fc_host_maxframe_size(shost
) =
3394 (((uint32_t) vport
->fc_sparam
.cmn
.bbRcvSizeMsb
& 0x0F) << 8) |
3395 (uint32_t) vport
->fc_sparam
.cmn
.bbRcvSizeLsb
;
3397 fc_host_dev_loss_tmo(shost
) = vport
->cfg_devloss_tmo
;
3399 /* This value is also unchanging */
3400 memset(fc_host_active_fc4s(shost
), 0,
3401 sizeof(fc_host_active_fc4s(shost
)));
3402 fc_host_active_fc4s(shost
)[2] = 1;
3403 fc_host_active_fc4s(shost
)[7] = 1;
3405 fc_host_max_npiv_vports(shost
) = phba
->max_vpi
;
3406 spin_lock_irq(shost
->host_lock
);
3407 vport
->load_flag
&= ~FC_LOADING
;
3408 spin_unlock_irq(shost
->host_lock
);
3412 * lpfc_stop_port_s3 - Stop SLI3 device port
3413 * @phba: pointer to lpfc hba data structure.
3415 * This routine is invoked to stop an SLI3 device port, it stops the device
3416 * from generating interrupts and stops the device driver's timers for the
3420 lpfc_stop_port_s3(struct lpfc_hba
*phba
)
3422 /* Clear all interrupt enable conditions */
3423 writel(0, phba
->HCregaddr
);
3424 readl(phba
->HCregaddr
); /* flush */
3425 /* Clear all pending interrupts */
3426 writel(0xffffffff, phba
->HAregaddr
);
3427 readl(phba
->HAregaddr
); /* flush */
3429 /* Reset some HBA SLI setup states */
3430 lpfc_stop_hba_timers(phba
);
3431 phba
->pport
->work_port_events
= 0;
3435 * lpfc_stop_port_s4 - Stop SLI4 device port
3436 * @phba: pointer to lpfc hba data structure.
3438 * This routine is invoked to stop an SLI4 device port, it stops the device
3439 * from generating interrupts and stops the device driver's timers for the
3443 lpfc_stop_port_s4(struct lpfc_hba
*phba
)
3445 /* Reset some HBA SLI4 setup states */
3446 lpfc_stop_hba_timers(phba
);
3447 phba
->pport
->work_port_events
= 0;
3448 phba
->sli4_hba
.intr_enable
= 0;
3452 * lpfc_stop_port - Wrapper function for stopping hba port
3453 * @phba: Pointer to HBA context object.
3455 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
3456 * the API jump table function pointer from the lpfc_hba struct.
3459 lpfc_stop_port(struct lpfc_hba
*phba
)
3461 phba
->lpfc_stop_port(phba
);
3465 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
3466 * @phba: Pointer to hba for which this call is being executed.
3468 * This routine starts the timer waiting for the FCF rediscovery to complete.
3471 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba
*phba
)
3473 unsigned long fcf_redisc_wait_tmo
=
3474 (jiffies
+ msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO
));
3475 /* Start fcf rediscovery wait period timer */
3476 mod_timer(&phba
->fcf
.redisc_wait
, fcf_redisc_wait_tmo
);
3477 spin_lock_irq(&phba
->hbalock
);
3478 /* Allow action to new fcf asynchronous event */
3479 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
3480 /* Mark the FCF rediscovery pending state */
3481 phba
->fcf
.fcf_flag
|= FCF_REDISC_PEND
;
3482 spin_unlock_irq(&phba
->hbalock
);
3486 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
3487 * @ptr: Map to lpfc_hba data structure pointer.
3489 * This routine is invoked when waiting for FCF table rediscover has been
3490 * timed out. If new FCF record(s) has (have) been discovered during the
3491 * wait period, a new FCF event shall be added to the FCOE async event
3492 * list, and then worker thread shall be waked up for processing from the
3493 * worker thread context.
3496 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr
)
3498 struct lpfc_hba
*phba
= (struct lpfc_hba
*)ptr
;
3500 /* Don't send FCF rediscovery event if timer cancelled */
3501 spin_lock_irq(&phba
->hbalock
);
3502 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)) {
3503 spin_unlock_irq(&phba
->hbalock
);
3506 /* Clear FCF rediscovery timer pending flag */
3507 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_PEND
;
3508 /* FCF rediscovery event to worker thread */
3509 phba
->fcf
.fcf_flag
|= FCF_REDISC_EVT
;
3510 spin_unlock_irq(&phba
->hbalock
);
3511 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
3512 "2776 FCF rediscover quiescent timer expired\n");
3513 /* wake up worker thread */
3514 lpfc_worker_wake_up(phba
);
3518 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3519 * @phba: pointer to lpfc hba data structure.
3520 * @acqe_link: pointer to the async link completion queue entry.
3522 * This routine is to parse the SLI4 link-attention link fault code and
3523 * translate it into the base driver's read link attention mailbox command
3526 * Return: Link-attention status in terms of base driver's coding.
3529 lpfc_sli4_parse_latt_fault(struct lpfc_hba
*phba
,
3530 struct lpfc_acqe_link
*acqe_link
)
3532 uint16_t latt_fault
;
3534 switch (bf_get(lpfc_acqe_link_fault
, acqe_link
)) {
3535 case LPFC_ASYNC_LINK_FAULT_NONE
:
3536 case LPFC_ASYNC_LINK_FAULT_LOCAL
:
3537 case LPFC_ASYNC_LINK_FAULT_REMOTE
:
3541 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3542 "0398 Invalid link fault code: x%x\n",
3543 bf_get(lpfc_acqe_link_fault
, acqe_link
));
3544 latt_fault
= MBXERR_ERROR
;
3551 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3552 * @phba: pointer to lpfc hba data structure.
3553 * @acqe_link: pointer to the async link completion queue entry.
3555 * This routine is to parse the SLI4 link attention type and translate it
3556 * into the base driver's link attention type coding.
3558 * Return: Link attention type in terms of base driver's coding.
3561 lpfc_sli4_parse_latt_type(struct lpfc_hba
*phba
,
3562 struct lpfc_acqe_link
*acqe_link
)
3566 switch (bf_get(lpfc_acqe_link_status
, acqe_link
)) {
3567 case LPFC_ASYNC_LINK_STATUS_DOWN
:
3568 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN
:
3569 att_type
= LPFC_ATT_LINK_DOWN
;
3571 case LPFC_ASYNC_LINK_STATUS_UP
:
3572 /* Ignore physical link up events - wait for logical link up */
3573 att_type
= LPFC_ATT_RESERVED
;
3575 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP
:
3576 att_type
= LPFC_ATT_LINK_UP
;
3579 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3580 "0399 Invalid link attention type: x%x\n",
3581 bf_get(lpfc_acqe_link_status
, acqe_link
));
3582 att_type
= LPFC_ATT_RESERVED
;
3589 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3590 * @phba: pointer to lpfc hba data structure.
3591 * @acqe_link: pointer to the async link completion queue entry.
3593 * This routine is to parse the SLI4 link-attention link speed and translate
3594 * it into the base driver's link-attention link speed coding.
3596 * Return: Link-attention link speed in terms of base driver's coding.
3599 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba
*phba
,
3600 struct lpfc_acqe_link
*acqe_link
)
3604 switch (bf_get(lpfc_acqe_link_speed
, acqe_link
)) {
3605 case LPFC_ASYNC_LINK_SPEED_ZERO
:
3606 case LPFC_ASYNC_LINK_SPEED_10MBPS
:
3607 case LPFC_ASYNC_LINK_SPEED_100MBPS
:
3608 link_speed
= LPFC_LINK_SPEED_UNKNOWN
;
3610 case LPFC_ASYNC_LINK_SPEED_1GBPS
:
3611 link_speed
= LPFC_LINK_SPEED_1GHZ
;
3613 case LPFC_ASYNC_LINK_SPEED_10GBPS
:
3614 link_speed
= LPFC_LINK_SPEED_10GHZ
;
3617 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3618 "0483 Invalid link-attention link speed: x%x\n",
3619 bf_get(lpfc_acqe_link_speed
, acqe_link
));
3620 link_speed
= LPFC_LINK_SPEED_UNKNOWN
;
3627 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
3628 * @phba: pointer to lpfc hba data structure.
3630 * This routine is to get an SLI3 FC port's link speed in Mbps.
3632 * Return: link speed in terms of Mbps.
3635 lpfc_sli_port_speed_get(struct lpfc_hba
*phba
)
3637 uint32_t link_speed
;
3639 if (!lpfc_is_link_up(phba
))
3642 switch (phba
->fc_linkspeed
) {
3643 case LPFC_LINK_SPEED_1GHZ
:
3646 case LPFC_LINK_SPEED_2GHZ
:
3649 case LPFC_LINK_SPEED_4GHZ
:
3652 case LPFC_LINK_SPEED_8GHZ
:
3655 case LPFC_LINK_SPEED_10GHZ
:
3658 case LPFC_LINK_SPEED_16GHZ
:
3668 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
3669 * @phba: pointer to lpfc hba data structure.
3670 * @evt_code: asynchronous event code.
3671 * @speed_code: asynchronous event link speed code.
3673 * This routine is to parse the giving SLI4 async event link speed code into
3674 * value of Mbps for the link speed.
3676 * Return: link speed in terms of Mbps.
3679 lpfc_sli4_port_speed_parse(struct lpfc_hba
*phba
, uint32_t evt_code
,
3682 uint32_t port_speed
;
3685 case LPFC_TRAILER_CODE_LINK
:
3686 switch (speed_code
) {
3687 case LPFC_EVT_CODE_LINK_NO_LINK
:
3690 case LPFC_EVT_CODE_LINK_10_MBIT
:
3693 case LPFC_EVT_CODE_LINK_100_MBIT
:
3696 case LPFC_EVT_CODE_LINK_1_GBIT
:
3699 case LPFC_EVT_CODE_LINK_10_GBIT
:
3706 case LPFC_TRAILER_CODE_FC
:
3707 switch (speed_code
) {
3708 case LPFC_EVT_CODE_FC_NO_LINK
:
3711 case LPFC_EVT_CODE_FC_1_GBAUD
:
3714 case LPFC_EVT_CODE_FC_2_GBAUD
:
3717 case LPFC_EVT_CODE_FC_4_GBAUD
:
3720 case LPFC_EVT_CODE_FC_8_GBAUD
:
3723 case LPFC_EVT_CODE_FC_10_GBAUD
:
3726 case LPFC_EVT_CODE_FC_16_GBAUD
:
3740 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
3741 * @phba: pointer to lpfc hba data structure.
3742 * @acqe_link: pointer to the async link completion queue entry.
3744 * This routine is to handle the SLI4 asynchronous FCoE link event.
3747 lpfc_sli4_async_link_evt(struct lpfc_hba
*phba
,
3748 struct lpfc_acqe_link
*acqe_link
)
3750 struct lpfc_dmabuf
*mp
;
3753 struct lpfc_mbx_read_top
*la
;
3757 att_type
= lpfc_sli4_parse_latt_type(phba
, acqe_link
);
3758 if (att_type
!= LPFC_ATT_LINK_DOWN
&& att_type
!= LPFC_ATT_LINK_UP
)
3760 phba
->fcoe_eventtag
= acqe_link
->event_tag
;
3761 pmb
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3763 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3764 "0395 The mboxq allocation failed\n");
3767 mp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
3769 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3770 "0396 The lpfc_dmabuf allocation failed\n");
3773 mp
->virt
= lpfc_mbuf_alloc(phba
, 0, &mp
->phys
);
3775 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3776 "0397 The mbuf allocation failed\n");
3777 goto out_free_dmabuf
;
3780 /* Cleanup any outstanding ELS commands */
3781 lpfc_els_flush_all_cmd(phba
);
3783 /* Block ELS IOCBs until we have done process link event */
3784 phba
->sli
.ring
[LPFC_ELS_RING
].flag
|= LPFC_STOP_IOCB_EVENT
;
3786 /* Update link event statistics */
3787 phba
->sli
.slistat
.link_event
++;
3789 /* Create lpfc_handle_latt mailbox command from link ACQE */
3790 lpfc_read_topology(phba
, pmb
, mp
);
3791 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_read_topology
;
3792 pmb
->vport
= phba
->pport
;
3794 /* Keep the link status for extra SLI4 state machine reference */
3795 phba
->sli4_hba
.link_state
.speed
=
3796 lpfc_sli4_port_speed_parse(phba
, LPFC_TRAILER_CODE_LINK
,
3797 bf_get(lpfc_acqe_link_speed
, acqe_link
));
3798 phba
->sli4_hba
.link_state
.duplex
=
3799 bf_get(lpfc_acqe_link_duplex
, acqe_link
);
3800 phba
->sli4_hba
.link_state
.status
=
3801 bf_get(lpfc_acqe_link_status
, acqe_link
);
3802 phba
->sli4_hba
.link_state
.type
=
3803 bf_get(lpfc_acqe_link_type
, acqe_link
);
3804 phba
->sli4_hba
.link_state
.number
=
3805 bf_get(lpfc_acqe_link_number
, acqe_link
);
3806 phba
->sli4_hba
.link_state
.fault
=
3807 bf_get(lpfc_acqe_link_fault
, acqe_link
);
3808 phba
->sli4_hba
.link_state
.logical_speed
=
3809 bf_get(lpfc_acqe_logical_link_speed
, acqe_link
) * 10;
3811 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3812 "2900 Async FC/FCoE Link event - Speed:%dGBit "
3813 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3814 "Logical speed:%dMbps Fault:%d\n",
3815 phba
->sli4_hba
.link_state
.speed
,
3816 phba
->sli4_hba
.link_state
.topology
,
3817 phba
->sli4_hba
.link_state
.status
,
3818 phba
->sli4_hba
.link_state
.type
,
3819 phba
->sli4_hba
.link_state
.number
,
3820 phba
->sli4_hba
.link_state
.logical_speed
,
3821 phba
->sli4_hba
.link_state
.fault
);
3823 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3824 * topology info. Note: Optional for non FC-AL ports.
3826 if (!(phba
->hba_flag
& HBA_FCOE_MODE
)) {
3827 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
3828 if (rc
== MBX_NOT_FINISHED
)
3829 goto out_free_dmabuf
;
3833 * For FCoE Mode: fill in all the topology information we need and call
3834 * the READ_TOPOLOGY completion routine to continue without actually
3835 * sending the READ_TOPOLOGY mailbox command to the port.
3837 /* Parse and translate status field */
3839 mb
->mbxStatus
= lpfc_sli4_parse_latt_fault(phba
, acqe_link
);
3841 /* Parse and translate link attention fields */
3842 la
= (struct lpfc_mbx_read_top
*) &pmb
->u
.mb
.un
.varReadTop
;
3843 la
->eventTag
= acqe_link
->event_tag
;
3844 bf_set(lpfc_mbx_read_top_att_type
, la
, att_type
);
3845 bf_set(lpfc_mbx_read_top_link_spd
, la
,
3846 lpfc_sli4_parse_latt_link_speed(phba
, acqe_link
));
3848 /* Fake the the following irrelvant fields */
3849 bf_set(lpfc_mbx_read_top_topology
, la
, LPFC_TOPOLOGY_PT_PT
);
3850 bf_set(lpfc_mbx_read_top_alpa_granted
, la
, 0);
3851 bf_set(lpfc_mbx_read_top_il
, la
, 0);
3852 bf_set(lpfc_mbx_read_top_pb
, la
, 0);
3853 bf_set(lpfc_mbx_read_top_fa
, la
, 0);
3854 bf_set(lpfc_mbx_read_top_mm
, la
, 0);
3856 /* Invoke the lpfc_handle_latt mailbox command callback function */
3857 lpfc_mbx_cmpl_read_topology(phba
, pmb
);
3864 mempool_free(pmb
, phba
->mbox_mem_pool
);
3868 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
3869 * @phba: pointer to lpfc hba data structure.
3870 * @acqe_fc: pointer to the async fc completion queue entry.
3872 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
3873 * that the event was received and then issue a read_topology mailbox command so
3874 * that the rest of the driver will treat it the same as SLI3.
3877 lpfc_sli4_async_fc_evt(struct lpfc_hba
*phba
, struct lpfc_acqe_fc_la
*acqe_fc
)
3879 struct lpfc_dmabuf
*mp
;
3883 if (bf_get(lpfc_trailer_type
, acqe_fc
) !=
3884 LPFC_FC_LA_EVENT_TYPE_FC_LINK
) {
3885 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3886 "2895 Non FC link Event detected.(%d)\n",
3887 bf_get(lpfc_trailer_type
, acqe_fc
));
3890 /* Keep the link status for extra SLI4 state machine reference */
3891 phba
->sli4_hba
.link_state
.speed
=
3892 lpfc_sli4_port_speed_parse(phba
, LPFC_TRAILER_CODE_FC
,
3893 bf_get(lpfc_acqe_fc_la_speed
, acqe_fc
));
3894 phba
->sli4_hba
.link_state
.duplex
= LPFC_ASYNC_LINK_DUPLEX_FULL
;
3895 phba
->sli4_hba
.link_state
.topology
=
3896 bf_get(lpfc_acqe_fc_la_topology
, acqe_fc
);
3897 phba
->sli4_hba
.link_state
.status
=
3898 bf_get(lpfc_acqe_fc_la_att_type
, acqe_fc
);
3899 phba
->sli4_hba
.link_state
.type
=
3900 bf_get(lpfc_acqe_fc_la_port_type
, acqe_fc
);
3901 phba
->sli4_hba
.link_state
.number
=
3902 bf_get(lpfc_acqe_fc_la_port_number
, acqe_fc
);
3903 phba
->sli4_hba
.link_state
.fault
=
3904 bf_get(lpfc_acqe_link_fault
, acqe_fc
);
3905 phba
->sli4_hba
.link_state
.logical_speed
=
3906 bf_get(lpfc_acqe_fc_la_llink_spd
, acqe_fc
) * 10;
3907 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3908 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
3909 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
3910 "%dMbps Fault:%d\n",
3911 phba
->sli4_hba
.link_state
.speed
,
3912 phba
->sli4_hba
.link_state
.topology
,
3913 phba
->sli4_hba
.link_state
.status
,
3914 phba
->sli4_hba
.link_state
.type
,
3915 phba
->sli4_hba
.link_state
.number
,
3916 phba
->sli4_hba
.link_state
.logical_speed
,
3917 phba
->sli4_hba
.link_state
.fault
);
3918 pmb
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3920 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3921 "2897 The mboxq allocation failed\n");
3924 mp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
3926 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3927 "2898 The lpfc_dmabuf allocation failed\n");
3930 mp
->virt
= lpfc_mbuf_alloc(phba
, 0, &mp
->phys
);
3932 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3933 "2899 The mbuf allocation failed\n");
3934 goto out_free_dmabuf
;
3937 /* Cleanup any outstanding ELS commands */
3938 lpfc_els_flush_all_cmd(phba
);
3940 /* Block ELS IOCBs until we have done process link event */
3941 phba
->sli
.ring
[LPFC_ELS_RING
].flag
|= LPFC_STOP_IOCB_EVENT
;
3943 /* Update link event statistics */
3944 phba
->sli
.slistat
.link_event
++;
3946 /* Create lpfc_handle_latt mailbox command from link ACQE */
3947 lpfc_read_topology(phba
, pmb
, mp
);
3948 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_read_topology
;
3949 pmb
->vport
= phba
->pport
;
3951 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
3952 if (rc
== MBX_NOT_FINISHED
)
3953 goto out_free_dmabuf
;
3959 mempool_free(pmb
, phba
->mbox_mem_pool
);
3963 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
3964 * @phba: pointer to lpfc hba data structure.
3965 * @acqe_fc: pointer to the async SLI completion queue entry.
3967 * This routine is to handle the SLI4 asynchronous SLI events.
3970 lpfc_sli4_async_sli_evt(struct lpfc_hba
*phba
, struct lpfc_acqe_sli
*acqe_sli
)
3975 struct lpfc_acqe_misconfigured_event
*misconfigured
;
3977 /* special case misconfigured event as it contains data for all ports */
3978 if ((bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
3979 LPFC_SLI_INTF_IF_TYPE_2
) ||
3980 (bf_get(lpfc_trailer_type
, acqe_sli
) !=
3981 LPFC_SLI_EVENT_TYPE_MISCONFIGURED
)) {
3982 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3983 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
3984 "x%08x SLI Event Type:%d\n",
3985 acqe_sli
->event_data1
, acqe_sli
->event_data2
,
3986 bf_get(lpfc_trailer_type
, acqe_sli
));
3990 port_name
= phba
->Port
[0];
3991 if (port_name
== 0x00)
3992 port_name
= '?'; /* get port name is empty */
3994 misconfigured
= (struct lpfc_acqe_misconfigured_event
*)
3995 &acqe_sli
->event_data1
;
3997 /* fetch the status for this port */
3998 switch (phba
->sli4_hba
.lnk_info
.lnk_no
) {
3999 case LPFC_LINK_NUMBER_0
:
4000 status
= bf_get(lpfc_sli_misconfigured_port0
,
4001 &misconfigured
->theEvent
);
4003 case LPFC_LINK_NUMBER_1
:
4004 status
= bf_get(lpfc_sli_misconfigured_port1
,
4005 &misconfigured
->theEvent
);
4007 case LPFC_LINK_NUMBER_2
:
4008 status
= bf_get(lpfc_sli_misconfigured_port2
,
4009 &misconfigured
->theEvent
);
4011 case LPFC_LINK_NUMBER_3
:
4012 status
= bf_get(lpfc_sli_misconfigured_port3
,
4013 &misconfigured
->theEvent
);
4016 status
= ~LPFC_SLI_EVENT_STATUS_VALID
;
4021 case LPFC_SLI_EVENT_STATUS_VALID
:
4022 return; /* no message if the sfp is okay */
4023 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT
:
4024 sprintf(message
, "Optics faulted/incorrectly installed/not " \
4025 "installed - Reseat optics, if issue not "
4026 "resolved, replace.");
4028 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE
:
4030 "Optics of two types installed - Remove one optic or " \
4031 "install matching pair of optics.");
4033 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED
:
4034 sprintf(message
, "Incompatible optics - Replace with " \
4035 "compatible optics for card to function.");
4038 /* firmware is reporting a status we don't know about */
4039 sprintf(message
, "Unknown event status x%02x", status
);
4043 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
4044 "3176 Misconfigured Physical Port - "
4045 "Port Name %c %s\n", port_name
, message
);
4049 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
4050 * @vport: pointer to vport data structure.
4052 * This routine is to perform Clear Virtual Link (CVL) on a vport in
4053 * response to a CVL event.
4055 * Return the pointer to the ndlp with the vport if successful, otherwise
4058 static struct lpfc_nodelist
*
4059 lpfc_sli4_perform_vport_cvl(struct lpfc_vport
*vport
)
4061 struct lpfc_nodelist
*ndlp
;
4062 struct Scsi_Host
*shost
;
4063 struct lpfc_hba
*phba
;
4070 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
4072 /* Cannot find existing Fabric ndlp, so allocate a new one */
4073 ndlp
= mempool_alloc(phba
->nlp_mem_pool
, GFP_KERNEL
);
4076 lpfc_nlp_init(vport
, ndlp
, Fabric_DID
);
4077 /* Set the node type */
4078 ndlp
->nlp_type
|= NLP_FABRIC
;
4079 /* Put ndlp onto node list */
4080 lpfc_enqueue_node(vport
, ndlp
);
4081 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
4082 /* re-setup ndlp without removing from node list */
4083 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
4087 if ((phba
->pport
->port_state
< LPFC_FLOGI
) &&
4088 (phba
->pport
->port_state
!= LPFC_VPORT_FAILED
))
4090 /* If virtual link is not yet instantiated ignore CVL */
4091 if ((vport
!= phba
->pport
) && (vport
->port_state
< LPFC_FDISC
)
4092 && (vport
->port_state
!= LPFC_VPORT_FAILED
))
4094 shost
= lpfc_shost_from_vport(vport
);
4097 lpfc_linkdown_port(vport
);
4098 lpfc_cleanup_pending_mbox(vport
);
4099 spin_lock_irq(shost
->host_lock
);
4100 vport
->fc_flag
|= FC_VPORT_CVL_RCVD
;
4101 spin_unlock_irq(shost
->host_lock
);
4107 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
4108 * @vport: pointer to lpfc hba data structure.
4110 * This routine is to perform Clear Virtual Link (CVL) on all vports in
4111 * response to a FCF dead event.
4114 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba
*phba
)
4116 struct lpfc_vport
**vports
;
4119 vports
= lpfc_create_vport_work_array(phba
);
4121 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
4122 lpfc_sli4_perform_vport_cvl(vports
[i
]);
4123 lpfc_destroy_vport_work_array(phba
, vports
);
4127 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
4128 * @phba: pointer to lpfc hba data structure.
4129 * @acqe_link: pointer to the async fcoe completion queue entry.
4131 * This routine is to handle the SLI4 asynchronous fcoe event.
4134 lpfc_sli4_async_fip_evt(struct lpfc_hba
*phba
,
4135 struct lpfc_acqe_fip
*acqe_fip
)
4137 uint8_t event_type
= bf_get(lpfc_trailer_type
, acqe_fip
);
4139 struct lpfc_vport
*vport
;
4140 struct lpfc_nodelist
*ndlp
;
4141 struct Scsi_Host
*shost
;
4142 int active_vlink_present
;
4143 struct lpfc_vport
**vports
;
4146 phba
->fc_eventTag
= acqe_fip
->event_tag
;
4147 phba
->fcoe_eventtag
= acqe_fip
->event_tag
;
4148 switch (event_type
) {
4149 case LPFC_FIP_EVENT_TYPE_NEW_FCF
:
4150 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD
:
4151 if (event_type
== LPFC_FIP_EVENT_TYPE_NEW_FCF
)
4152 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
|
4154 "2546 New FCF event, evt_tag:x%x, "
4156 acqe_fip
->event_tag
,
4159 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
|
4161 "2788 FCF param modified event, "
4162 "evt_tag:x%x, index:x%x\n",
4163 acqe_fip
->event_tag
,
4165 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
4167 * During period of FCF discovery, read the FCF
4168 * table record indexed by the event to update
4169 * FCF roundrobin failover eligible FCF bmask.
4171 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
|
4173 "2779 Read FCF (x%x) for updating "
4174 "roundrobin FCF failover bmask\n",
4176 rc
= lpfc_sli4_read_fcf_rec(phba
, acqe_fip
->index
);
4179 /* If the FCF discovery is in progress, do nothing. */
4180 spin_lock_irq(&phba
->hbalock
);
4181 if (phba
->hba_flag
& FCF_TS_INPROG
) {
4182 spin_unlock_irq(&phba
->hbalock
);
4185 /* If fast FCF failover rescan event is pending, do nothing */
4186 if (phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
) {
4187 spin_unlock_irq(&phba
->hbalock
);
4191 /* If the FCF has been in discovered state, do nothing. */
4192 if (phba
->fcf
.fcf_flag
& FCF_SCAN_DONE
) {
4193 spin_unlock_irq(&phba
->hbalock
);
4196 spin_unlock_irq(&phba
->hbalock
);
4198 /* Otherwise, scan the entire FCF table and re-discover SAN */
4199 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
4200 "2770 Start FCF table scan per async FCF "
4201 "event, evt_tag:x%x, index:x%x\n",
4202 acqe_fip
->event_tag
, acqe_fip
->index
);
4203 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
4204 LPFC_FCOE_FCF_GET_FIRST
);
4206 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_DISCOVERY
,
4207 "2547 Issue FCF scan read FCF mailbox "
4208 "command failed (x%x)\n", rc
);
4211 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL
:
4212 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
4213 "2548 FCF Table full count 0x%x tag 0x%x\n",
4214 bf_get(lpfc_acqe_fip_fcf_count
, acqe_fip
),
4215 acqe_fip
->event_tag
);
4218 case LPFC_FIP_EVENT_TYPE_FCF_DEAD
:
4219 phba
->fcoe_cvl_eventtag
= acqe_fip
->event_tag
;
4220 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_DISCOVERY
,
4221 "2549 FCF (x%x) disconnected from network, "
4222 "tag:x%x\n", acqe_fip
->index
, acqe_fip
->event_tag
);
4224 * If we are in the middle of FCF failover process, clear
4225 * the corresponding FCF bit in the roundrobin bitmap.
4227 spin_lock_irq(&phba
->hbalock
);
4228 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
4229 spin_unlock_irq(&phba
->hbalock
);
4230 /* Update FLOGI FCF failover eligible FCF bmask */
4231 lpfc_sli4_fcf_rr_index_clear(phba
, acqe_fip
->index
);
4234 spin_unlock_irq(&phba
->hbalock
);
4236 /* If the event is not for currently used fcf do nothing */
4237 if (phba
->fcf
.current_rec
.fcf_indx
!= acqe_fip
->index
)
4241 * Otherwise, request the port to rediscover the entire FCF
4242 * table for a fast recovery from case that the current FCF
4243 * is no longer valid as we are not in the middle of FCF
4244 * failover process already.
4246 spin_lock_irq(&phba
->hbalock
);
4247 /* Mark the fast failover process in progress */
4248 phba
->fcf
.fcf_flag
|= FCF_DEAD_DISC
;
4249 spin_unlock_irq(&phba
->hbalock
);
4251 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
4252 "2771 Start FCF fast failover process due to "
4253 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
4254 "\n", acqe_fip
->event_tag
, acqe_fip
->index
);
4255 rc
= lpfc_sli4_redisc_fcf_table(phba
);
4257 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
|
4259 "2772 Issue FCF rediscover mabilbox "
4260 "command failed, fail through to FCF "
4262 spin_lock_irq(&phba
->hbalock
);
4263 phba
->fcf
.fcf_flag
&= ~FCF_DEAD_DISC
;
4264 spin_unlock_irq(&phba
->hbalock
);
4266 * Last resort will fail over by treating this
4267 * as a link down to FCF registration.
4269 lpfc_sli4_fcf_dead_failthrough(phba
);
4271 /* Reset FCF roundrobin bmask for new discovery */
4272 lpfc_sli4_clear_fcf_rr_bmask(phba
);
4274 * Handling fast FCF failover to a DEAD FCF event is
4275 * considered equalivant to receiving CVL to all vports.
4277 lpfc_sli4_perform_all_vport_cvl(phba
);
4280 case LPFC_FIP_EVENT_TYPE_CVL
:
4281 phba
->fcoe_cvl_eventtag
= acqe_fip
->event_tag
;
4282 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_DISCOVERY
,
4283 "2718 Clear Virtual Link Received for VPI 0x%x"
4284 " tag 0x%x\n", acqe_fip
->index
, acqe_fip
->event_tag
);
4286 vport
= lpfc_find_vport_by_vpid(phba
,
4288 ndlp
= lpfc_sli4_perform_vport_cvl(vport
);
4291 active_vlink_present
= 0;
4293 vports
= lpfc_create_vport_work_array(phba
);
4295 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
;
4297 if ((!(vports
[i
]->fc_flag
&
4298 FC_VPORT_CVL_RCVD
)) &&
4299 (vports
[i
]->port_state
> LPFC_FDISC
)) {
4300 active_vlink_present
= 1;
4304 lpfc_destroy_vport_work_array(phba
, vports
);
4307 if (active_vlink_present
) {
4309 * If there are other active VLinks present,
4310 * re-instantiate the Vlink using FDISC.
4312 mod_timer(&ndlp
->nlp_delayfunc
,
4313 jiffies
+ msecs_to_jiffies(1000));
4314 shost
= lpfc_shost_from_vport(vport
);
4315 spin_lock_irq(shost
->host_lock
);
4316 ndlp
->nlp_flag
|= NLP_DELAY_TMO
;
4317 spin_unlock_irq(shost
->host_lock
);
4318 ndlp
->nlp_last_elscmd
= ELS_CMD_FDISC
;
4319 vport
->port_state
= LPFC_FDISC
;
4322 * Otherwise, we request port to rediscover
4323 * the entire FCF table for a fast recovery
4324 * from possible case that the current FCF
4325 * is no longer valid if we are not already
4326 * in the FCF failover process.
4328 spin_lock_irq(&phba
->hbalock
);
4329 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
4330 spin_unlock_irq(&phba
->hbalock
);
4333 /* Mark the fast failover process in progress */
4334 phba
->fcf
.fcf_flag
|= FCF_ACVL_DISC
;
4335 spin_unlock_irq(&phba
->hbalock
);
4336 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
|
4338 "2773 Start FCF failover per CVL, "
4339 "evt_tag:x%x\n", acqe_fip
->event_tag
);
4340 rc
= lpfc_sli4_redisc_fcf_table(phba
);
4342 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
|
4344 "2774 Issue FCF rediscover "
4345 "mabilbox command failed, "
4346 "through to CVL event\n");
4347 spin_lock_irq(&phba
->hbalock
);
4348 phba
->fcf
.fcf_flag
&= ~FCF_ACVL_DISC
;
4349 spin_unlock_irq(&phba
->hbalock
);
4351 * Last resort will be re-try on the
4352 * the current registered FCF entry.
4354 lpfc_retry_pport_discovery(phba
);
4357 * Reset FCF roundrobin bmask for new
4360 lpfc_sli4_clear_fcf_rr_bmask(phba
);
4364 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
4365 "0288 Unknown FCoE event type 0x%x event tag "
4366 "0x%x\n", event_type
, acqe_fip
->event_tag
);
4372 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
4373 * @phba: pointer to lpfc hba data structure.
4374 * @acqe_link: pointer to the async dcbx completion queue entry.
4376 * This routine is to handle the SLI4 asynchronous dcbx event.
4379 lpfc_sli4_async_dcbx_evt(struct lpfc_hba
*phba
,
4380 struct lpfc_acqe_dcbx
*acqe_dcbx
)
4382 phba
->fc_eventTag
= acqe_dcbx
->event_tag
;
4383 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
4384 "0290 The SLI4 DCBX asynchronous event is not "
4389 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
4390 * @phba: pointer to lpfc hba data structure.
4391 * @acqe_link: pointer to the async grp5 completion queue entry.
4393 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
4394 * is an asynchronous notified of a logical link speed change. The Port
4395 * reports the logical link speed in units of 10Mbps.
4398 lpfc_sli4_async_grp5_evt(struct lpfc_hba
*phba
,
4399 struct lpfc_acqe_grp5
*acqe_grp5
)
4401 uint16_t prev_ll_spd
;
4403 phba
->fc_eventTag
= acqe_grp5
->event_tag
;
4404 phba
->fcoe_eventtag
= acqe_grp5
->event_tag
;
4405 prev_ll_spd
= phba
->sli4_hba
.link_state
.logical_speed
;
4406 phba
->sli4_hba
.link_state
.logical_speed
=
4407 (bf_get(lpfc_acqe_grp5_llink_spd
, acqe_grp5
)) * 10;
4408 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4409 "2789 GRP5 Async Event: Updating logical link speed "
4410 "from %dMbps to %dMbps\n", prev_ll_spd
,
4411 phba
->sli4_hba
.link_state
.logical_speed
);
4415 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
4416 * @phba: pointer to lpfc hba data structure.
4418 * This routine is invoked by the worker thread to process all the pending
4419 * SLI4 asynchronous events.
4421 void lpfc_sli4_async_event_proc(struct lpfc_hba
*phba
)
4423 struct lpfc_cq_event
*cq_event
;
4425 /* First, declare the async event has been handled */
4426 spin_lock_irq(&phba
->hbalock
);
4427 phba
->hba_flag
&= ~ASYNC_EVENT
;
4428 spin_unlock_irq(&phba
->hbalock
);
4429 /* Now, handle all the async events */
4430 while (!list_empty(&phba
->sli4_hba
.sp_asynce_work_queue
)) {
4431 /* Get the first event from the head of the event queue */
4432 spin_lock_irq(&phba
->hbalock
);
4433 list_remove_head(&phba
->sli4_hba
.sp_asynce_work_queue
,
4434 cq_event
, struct lpfc_cq_event
, list
);
4435 spin_unlock_irq(&phba
->hbalock
);
4436 /* Process the asynchronous event */
4437 switch (bf_get(lpfc_trailer_code
, &cq_event
->cqe
.mcqe_cmpl
)) {
4438 case LPFC_TRAILER_CODE_LINK
:
4439 lpfc_sli4_async_link_evt(phba
,
4440 &cq_event
->cqe
.acqe_link
);
4442 case LPFC_TRAILER_CODE_FCOE
:
4443 lpfc_sli4_async_fip_evt(phba
, &cq_event
->cqe
.acqe_fip
);
4445 case LPFC_TRAILER_CODE_DCBX
:
4446 lpfc_sli4_async_dcbx_evt(phba
,
4447 &cq_event
->cqe
.acqe_dcbx
);
4449 case LPFC_TRAILER_CODE_GRP5
:
4450 lpfc_sli4_async_grp5_evt(phba
,
4451 &cq_event
->cqe
.acqe_grp5
);
4453 case LPFC_TRAILER_CODE_FC
:
4454 lpfc_sli4_async_fc_evt(phba
, &cq_event
->cqe
.acqe_fc
);
4456 case LPFC_TRAILER_CODE_SLI
:
4457 lpfc_sli4_async_sli_evt(phba
, &cq_event
->cqe
.acqe_sli
);
4460 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
4461 "1804 Invalid asynchrous event code: "
4462 "x%x\n", bf_get(lpfc_trailer_code
,
4463 &cq_event
->cqe
.mcqe_cmpl
));
4466 /* Free the completion event processed to the free pool */
4467 lpfc_sli4_cq_event_release(phba
, cq_event
);
4472 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
4473 * @phba: pointer to lpfc hba data structure.
4475 * This routine is invoked by the worker thread to process FCF table
4476 * rediscovery pending completion event.
4478 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba
*phba
)
4482 spin_lock_irq(&phba
->hbalock
);
4483 /* Clear FCF rediscovery timeout event */
4484 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_EVT
;
4485 /* Clear driver fast failover FCF record flag */
4486 phba
->fcf
.failover_rec
.flag
= 0;
4487 /* Set state for FCF fast failover */
4488 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
4489 spin_unlock_irq(&phba
->hbalock
);
4491 /* Scan FCF table from the first entry to re-discover SAN */
4492 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
4493 "2777 Start post-quiescent FCF table scan\n");
4494 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
4496 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_DISCOVERY
,
4497 "2747 Issue FCF scan read FCF mailbox "
4498 "command failed 0x%x\n", rc
);
4502 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
4503 * @phba: pointer to lpfc hba data structure.
4504 * @dev_grp: The HBA PCI-Device group number.
4506 * This routine is invoked to set up the per HBA PCI-Device group function
4507 * API jump table entries.
4509 * Return: 0 if success, otherwise -ENODEV
4512 lpfc_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
4516 /* Set up lpfc PCI-device group */
4517 phba
->pci_dev_grp
= dev_grp
;
4519 /* The LPFC_PCI_DEV_OC uses SLI4 */
4520 if (dev_grp
== LPFC_PCI_DEV_OC
)
4521 phba
->sli_rev
= LPFC_SLI_REV4
;
4523 /* Set up device INIT API function jump table */
4524 rc
= lpfc_init_api_table_setup(phba
, dev_grp
);
4527 /* Set up SCSI API function jump table */
4528 rc
= lpfc_scsi_api_table_setup(phba
, dev_grp
);
4531 /* Set up SLI API function jump table */
4532 rc
= lpfc_sli_api_table_setup(phba
, dev_grp
);
4535 /* Set up MBOX API function jump table */
4536 rc
= lpfc_mbox_api_table_setup(phba
, dev_grp
);
4544 * lpfc_log_intr_mode - Log the active interrupt mode
4545 * @phba: pointer to lpfc hba data structure.
4546 * @intr_mode: active interrupt mode adopted.
4548 * This routine it invoked to log the currently used active interrupt mode
4551 static void lpfc_log_intr_mode(struct lpfc_hba
*phba
, uint32_t intr_mode
)
4553 switch (intr_mode
) {
4555 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4556 "0470 Enable INTx interrupt mode.\n");
4559 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4560 "0481 Enabled MSI interrupt mode.\n");
4563 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4564 "0480 Enabled MSI-X interrupt mode.\n");
4567 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4568 "0482 Illegal interrupt mode.\n");
4575 * lpfc_enable_pci_dev - Enable a generic PCI device.
4576 * @phba: pointer to lpfc hba data structure.
4578 * This routine is invoked to enable the PCI device that is common to all
4583 * other values - error
4586 lpfc_enable_pci_dev(struct lpfc_hba
*phba
)
4588 struct pci_dev
*pdev
;
4591 /* Obtain PCI device reference */
4595 pdev
= phba
->pcidev
;
4596 /* Select PCI BARs */
4597 bars
= pci_select_bars(pdev
, IORESOURCE_MEM
);
4598 /* Enable PCI device */
4599 if (pci_enable_device_mem(pdev
))
4601 /* Request PCI resource for the device */
4602 if (pci_request_selected_regions(pdev
, bars
, LPFC_DRIVER_NAME
))
4603 goto out_disable_device
;
4604 /* Set up device as PCI master and save state for EEH */
4605 pci_set_master(pdev
);
4606 pci_try_set_mwi(pdev
);
4607 pci_save_state(pdev
);
4609 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
4610 if (pci_is_pcie(pdev
))
4611 pdev
->needs_freset
= 1;
4616 pci_disable_device(pdev
);
4618 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4619 "1401 Failed to enable pci device, bars:x%x\n", bars
);
4624 * lpfc_disable_pci_dev - Disable a generic PCI device.
4625 * @phba: pointer to lpfc hba data structure.
4627 * This routine is invoked to disable the PCI device that is common to all
4631 lpfc_disable_pci_dev(struct lpfc_hba
*phba
)
4633 struct pci_dev
*pdev
;
4636 /* Obtain PCI device reference */
4640 pdev
= phba
->pcidev
;
4641 /* Select PCI BARs */
4642 bars
= pci_select_bars(pdev
, IORESOURCE_MEM
);
4643 /* Release PCI resource and disable PCI device */
4644 pci_release_selected_regions(pdev
, bars
);
4645 pci_disable_device(pdev
);
4651 * lpfc_reset_hba - Reset a hba
4652 * @phba: pointer to lpfc hba data structure.
4654 * This routine is invoked to reset a hba device. It brings the HBA
4655 * offline, performs a board restart, and then brings the board back
4656 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
4657 * on outstanding mailbox commands.
4660 lpfc_reset_hba(struct lpfc_hba
*phba
)
4662 /* If resets are disabled then set error state and return. */
4663 if (!phba
->cfg_enable_hba_reset
) {
4664 phba
->link_state
= LPFC_HBA_ERROR
;
4667 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
4669 lpfc_sli_brdrestart(phba
);
4671 lpfc_unblock_mgmt_io(phba
);
4675 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
4676 * @phba: pointer to lpfc hba data structure.
4678 * This function enables the PCI SR-IOV virtual functions to a physical
4679 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4680 * enable the number of virtual functions to the physical function. As
4681 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4682 * API call does not considered as an error condition for most of the device.
4685 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba
*phba
)
4687 struct pci_dev
*pdev
= phba
->pcidev
;
4691 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_SRIOV
);
4695 pci_read_config_word(pdev
, pos
+ PCI_SRIOV_TOTAL_VF
, &nr_virtfn
);
4700 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4701 * @phba: pointer to lpfc hba data structure.
4702 * @nr_vfn: number of virtual functions to be enabled.
4704 * This function enables the PCI SR-IOV virtual functions to a physical
4705 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4706 * enable the number of virtual functions to the physical function. As
4707 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4708 * API call does not considered as an error condition for most of the device.
4711 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba
*phba
, int nr_vfn
)
4713 struct pci_dev
*pdev
= phba
->pcidev
;
4714 uint16_t max_nr_vfn
;
4717 max_nr_vfn
= lpfc_sli_sriov_nr_virtfn_get(phba
);
4718 if (nr_vfn
> max_nr_vfn
) {
4719 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4720 "3057 Requested vfs (%d) greater than "
4721 "supported vfs (%d)", nr_vfn
, max_nr_vfn
);
4725 rc
= pci_enable_sriov(pdev
, nr_vfn
);
4727 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
4728 "2806 Failed to enable sriov on this device "
4729 "with vfn number nr_vf:%d, rc:%d\n",
4732 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
4733 "2807 Successful enable sriov on this device "
4734 "with vfn number nr_vf:%d\n", nr_vfn
);
4739 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4740 * @phba: pointer to lpfc hba data structure.
4742 * This routine is invoked to set up the driver internal resources specific to
4743 * support the SLI-3 HBA device it attached to.
4747 * other values - error
4750 lpfc_sli_driver_resource_setup(struct lpfc_hba
*phba
)
4752 struct lpfc_sli
*psli
;
4756 * Initialize timers used by driver
4759 /* Heartbeat timer */
4760 init_timer(&phba
->hb_tmofunc
);
4761 phba
->hb_tmofunc
.function
= lpfc_hb_timeout
;
4762 phba
->hb_tmofunc
.data
= (unsigned long)phba
;
4765 /* MBOX heartbeat timer */
4766 init_timer(&psli
->mbox_tmo
);
4767 psli
->mbox_tmo
.function
= lpfc_mbox_timeout
;
4768 psli
->mbox_tmo
.data
= (unsigned long) phba
;
4769 /* FCP polling mode timer */
4770 init_timer(&phba
->fcp_poll_timer
);
4771 phba
->fcp_poll_timer
.function
= lpfc_poll_timeout
;
4772 phba
->fcp_poll_timer
.data
= (unsigned long) phba
;
4773 /* Fabric block timer */
4774 init_timer(&phba
->fabric_block_timer
);
4775 phba
->fabric_block_timer
.function
= lpfc_fabric_block_timeout
;
4776 phba
->fabric_block_timer
.data
= (unsigned long) phba
;
4777 /* EA polling mode timer */
4778 init_timer(&phba
->eratt_poll
);
4779 phba
->eratt_poll
.function
= lpfc_poll_eratt
;
4780 phba
->eratt_poll
.data
= (unsigned long) phba
;
4782 /* Host attention work mask setup */
4783 phba
->work_ha_mask
= (HA_ERATT
| HA_MBATT
| HA_LATT
);
4784 phba
->work_ha_mask
|= (HA_RXMASK
<< (LPFC_ELS_RING
* 4));
4786 /* Get all the module params for configuring this host */
4787 lpfc_get_cfgparam(phba
);
4788 if (phba
->pcidev
->device
== PCI_DEVICE_ID_HORNET
) {
4789 phba
->menlo_flag
|= HBA_MENLO_SUPPORT
;
4790 /* check for menlo minimum sg count */
4791 if (phba
->cfg_sg_seg_cnt
< LPFC_DEFAULT_MENLO_SG_SEG_CNT
)
4792 phba
->cfg_sg_seg_cnt
= LPFC_DEFAULT_MENLO_SG_SEG_CNT
;
4795 if (!phba
->sli
.ring
)
4796 phba
->sli
.ring
= (struct lpfc_sli_ring
*)
4797 kzalloc(LPFC_SLI3_MAX_RING
*
4798 sizeof(struct lpfc_sli_ring
), GFP_KERNEL
);
4799 if (!phba
->sli
.ring
)
4803 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
4804 * used to create the sg_dma_buf_pool must be dynamically calculated.
4807 /* Initialize the host templates the configured values. */
4808 lpfc_vport_template
.sg_tablesize
= phba
->cfg_sg_seg_cnt
;
4809 lpfc_template
.sg_tablesize
= phba
->cfg_sg_seg_cnt
;
4811 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
4812 if (phba
->cfg_enable_bg
) {
4814 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
4815 * the FCP rsp, and a BDE for each. Sice we have no control
4816 * over how many protection data segments the SCSI Layer
4817 * will hand us (ie: there could be one for every block
4818 * in the IO), we just allocate enough BDEs to accomidate
4819 * our max amount and we need to limit lpfc_sg_seg_cnt to
4820 * minimize the risk of running out.
4822 phba
->cfg_sg_dma_buf_size
= sizeof(struct fcp_cmnd
) +
4823 sizeof(struct fcp_rsp
) +
4824 (LPFC_MAX_SG_SEG_CNT
* sizeof(struct ulp_bde64
));
4826 if (phba
->cfg_sg_seg_cnt
> LPFC_MAX_SG_SEG_CNT_DIF
)
4827 phba
->cfg_sg_seg_cnt
= LPFC_MAX_SG_SEG_CNT_DIF
;
4829 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
4830 phba
->cfg_total_seg_cnt
= LPFC_MAX_SG_SEG_CNT
;
4833 * The scsi_buf for a regular I/O will hold the FCP cmnd,
4834 * the FCP rsp, a BDE for each, and a BDE for up to
4835 * cfg_sg_seg_cnt data segments.
4837 phba
->cfg_sg_dma_buf_size
= sizeof(struct fcp_cmnd
) +
4838 sizeof(struct fcp_rsp
) +
4839 ((phba
->cfg_sg_seg_cnt
+ 2) * sizeof(struct ulp_bde64
));
4841 /* Total BDEs in BPL for scsi_sg_list */
4842 phba
->cfg_total_seg_cnt
= phba
->cfg_sg_seg_cnt
+ 2;
4845 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
| LOG_FCP
,
4846 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
4847 phba
->cfg_sg_seg_cnt
, phba
->cfg_sg_dma_buf_size
,
4848 phba
->cfg_total_seg_cnt
);
4850 phba
->max_vpi
= LPFC_MAX_VPI
;
4851 /* This will be set to correct value after config_port mbox */
4852 phba
->max_vports
= 0;
4855 * Initialize the SLI Layer to run with lpfc HBAs.
4857 lpfc_sli_setup(phba
);
4858 lpfc_sli_queue_setup(phba
);
4860 /* Allocate device driver memory */
4861 if (lpfc_mem_alloc(phba
, BPL_ALIGN_SZ
))
4865 * Enable sr-iov virtual functions if supported and configured
4866 * through the module parameter.
4868 if (phba
->cfg_sriov_nr_virtfn
> 0) {
4869 rc
= lpfc_sli_probe_sriov_nr_virtfn(phba
,
4870 phba
->cfg_sriov_nr_virtfn
);
4872 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
4873 "2808 Requested number of SR-IOV "
4874 "virtual functions (%d) is not "
4876 phba
->cfg_sriov_nr_virtfn
);
4877 phba
->cfg_sriov_nr_virtfn
= 0;
4885 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
4886 * @phba: pointer to lpfc hba data structure.
4888 * This routine is invoked to unset the driver internal resources set up
4889 * specific for supporting the SLI-3 HBA device it attached to.
4892 lpfc_sli_driver_resource_unset(struct lpfc_hba
*phba
)
4894 /* Free device driver memory allocated */
4895 lpfc_mem_free_all(phba
);
4901 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
4902 * @phba: pointer to lpfc hba data structure.
4904 * This routine is invoked to set up the driver internal resources specific to
4905 * support the SLI-4 HBA device it attached to.
4909 * other values - error
4912 lpfc_sli4_driver_resource_setup(struct lpfc_hba
*phba
)
4914 struct lpfc_vector_map_info
*cpup
;
4915 struct lpfc_sli
*psli
;
4916 LPFC_MBOXQ_t
*mboxq
;
4917 int rc
, i
, hbq_count
, max_buf_size
;
4918 uint8_t pn_page
[LPFC_MAX_SUPPORTED_PAGES
] = {0};
4919 struct lpfc_mqe
*mqe
;
4921 int fof_vectors
= 0;
4923 /* Get all the module params for configuring this host */
4924 lpfc_get_cfgparam(phba
);
4926 /* Before proceed, wait for POST done and device ready */
4927 rc
= lpfc_sli4_post_status_check(phba
);
4932 * Initialize timers used by driver
4935 /* Heartbeat timer */
4936 init_timer(&phba
->hb_tmofunc
);
4937 phba
->hb_tmofunc
.function
= lpfc_hb_timeout
;
4938 phba
->hb_tmofunc
.data
= (unsigned long)phba
;
4939 init_timer(&phba
->rrq_tmr
);
4940 phba
->rrq_tmr
.function
= lpfc_rrq_timeout
;
4941 phba
->rrq_tmr
.data
= (unsigned long)phba
;
4944 /* MBOX heartbeat timer */
4945 init_timer(&psli
->mbox_tmo
);
4946 psli
->mbox_tmo
.function
= lpfc_mbox_timeout
;
4947 psli
->mbox_tmo
.data
= (unsigned long) phba
;
4948 /* Fabric block timer */
4949 init_timer(&phba
->fabric_block_timer
);
4950 phba
->fabric_block_timer
.function
= lpfc_fabric_block_timeout
;
4951 phba
->fabric_block_timer
.data
= (unsigned long) phba
;
4952 /* EA polling mode timer */
4953 init_timer(&phba
->eratt_poll
);
4954 phba
->eratt_poll
.function
= lpfc_poll_eratt
;
4955 phba
->eratt_poll
.data
= (unsigned long) phba
;
4956 /* FCF rediscover timer */
4957 init_timer(&phba
->fcf
.redisc_wait
);
4958 phba
->fcf
.redisc_wait
.function
= lpfc_sli4_fcf_redisc_wait_tmo
;
4959 phba
->fcf
.redisc_wait
.data
= (unsigned long)phba
;
4962 * Control structure for handling external multi-buffer mailbox
4963 * command pass-through.
4965 memset((uint8_t *)&phba
->mbox_ext_buf_ctx
, 0,
4966 sizeof(struct lpfc_mbox_ext_buf_ctx
));
4967 INIT_LIST_HEAD(&phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
);
4969 phba
->max_vpi
= LPFC_MAX_VPI
;
4971 /* This will be set to correct value after the read_config mbox */
4972 phba
->max_vports
= 0;
4974 /* Program the default value of vlan_id and fc_map */
4975 phba
->valid_vlan
= 0;
4976 phba
->fc_map
[0] = LPFC_FCOE_FCF_MAP0
;
4977 phba
->fc_map
[1] = LPFC_FCOE_FCF_MAP1
;
4978 phba
->fc_map
[2] = LPFC_FCOE_FCF_MAP2
;
4981 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
4982 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
4984 if (!phba
->sli
.ring
)
4985 phba
->sli
.ring
= kzalloc(
4986 (LPFC_SLI3_MAX_RING
+ phba
->cfg_fcp_io_channel
) *
4987 sizeof(struct lpfc_sli_ring
), GFP_KERNEL
);
4988 if (!phba
->sli
.ring
)
4992 * It doesn't matter what family our adapter is in, we are
4993 * limited to 2 Pages, 512 SGEs, for our SGL.
4994 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
4996 max_buf_size
= (2 * SLI4_PAGE_SIZE
);
4997 if (phba
->cfg_sg_seg_cnt
> LPFC_MAX_SGL_SEG_CNT
- 2)
4998 phba
->cfg_sg_seg_cnt
= LPFC_MAX_SGL_SEG_CNT
- 2;
5001 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
5002 * used to create the sg_dma_buf_pool must be dynamically calculated.
5005 if (phba
->cfg_enable_bg
) {
5007 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
5008 * the FCP rsp, and a SGE for each. Sice we have no control
5009 * over how many protection data segments the SCSI Layer
5010 * will hand us (ie: there could be one for every block
5011 * in the IO), we just allocate enough SGEs to accomidate
5012 * our max amount and we need to limit lpfc_sg_seg_cnt to
5013 * minimize the risk of running out.
5015 phba
->cfg_sg_dma_buf_size
= sizeof(struct fcp_cmnd
) +
5016 sizeof(struct fcp_rsp
) + max_buf_size
;
5018 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
5019 phba
->cfg_total_seg_cnt
= LPFC_MAX_SGL_SEG_CNT
;
5021 if (phba
->cfg_sg_seg_cnt
> LPFC_MAX_SG_SLI4_SEG_CNT_DIF
)
5022 phba
->cfg_sg_seg_cnt
= LPFC_MAX_SG_SLI4_SEG_CNT_DIF
;
5025 * The scsi_buf for a regular I/O will hold the FCP cmnd,
5026 * the FCP rsp, a SGE for each, and a SGE for up to
5027 * cfg_sg_seg_cnt data segments.
5029 phba
->cfg_sg_dma_buf_size
= sizeof(struct fcp_cmnd
) +
5030 sizeof(struct fcp_rsp
) +
5031 ((phba
->cfg_sg_seg_cnt
+ 2) * sizeof(struct sli4_sge
));
5033 /* Total SGEs for scsi_sg_list */
5034 phba
->cfg_total_seg_cnt
= phba
->cfg_sg_seg_cnt
+ 2;
5036 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need
5037 * to post 1 page for the SGL.
5041 /* Initialize the host templates with the updated values. */
5042 lpfc_vport_template
.sg_tablesize
= phba
->cfg_sg_seg_cnt
;
5043 lpfc_template
.sg_tablesize
= phba
->cfg_sg_seg_cnt
;
5045 if (phba
->cfg_sg_dma_buf_size
<= LPFC_MIN_SG_SLI4_BUF_SZ
)
5046 phba
->cfg_sg_dma_buf_size
= LPFC_MIN_SG_SLI4_BUF_SZ
;
5048 phba
->cfg_sg_dma_buf_size
=
5049 SLI4_PAGE_ALIGN(phba
->cfg_sg_dma_buf_size
);
5051 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
| LOG_FCP
,
5052 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
5053 phba
->cfg_sg_seg_cnt
, phba
->cfg_sg_dma_buf_size
,
5054 phba
->cfg_total_seg_cnt
);
5056 /* Initialize buffer queue management fields */
5057 hbq_count
= lpfc_sli_hbq_count();
5058 for (i
= 0; i
< hbq_count
; ++i
)
5059 INIT_LIST_HEAD(&phba
->hbqs
[i
].hbq_buffer_list
);
5060 INIT_LIST_HEAD(&phba
->rb_pend_list
);
5061 phba
->hbqs
[LPFC_ELS_HBQ
].hbq_alloc_buffer
= lpfc_sli4_rb_alloc
;
5062 phba
->hbqs
[LPFC_ELS_HBQ
].hbq_free_buffer
= lpfc_sli4_rb_free
;
5065 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
5067 /* Initialize the Abort scsi buffer list used by driver */
5068 spin_lock_init(&phba
->sli4_hba
.abts_scsi_buf_list_lock
);
5069 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_abts_scsi_buf_list
);
5070 /* This abort list used by worker thread */
5071 spin_lock_init(&phba
->sli4_hba
.abts_sgl_list_lock
);
5074 * Initialize driver internal slow-path work queues
5077 /* Driver internel slow-path CQ Event pool */
5078 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_cqe_event_pool
);
5079 /* Response IOCB work queue list */
5080 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_queue_event
);
5081 /* Asynchronous event CQ Event work queue list */
5082 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_asynce_work_queue
);
5083 /* Fast-path XRI aborted CQ Event work queue list */
5084 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_fcp_xri_aborted_work_queue
);
5085 /* Slow-path XRI aborted CQ Event work queue list */
5086 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
);
5087 /* Receive queue CQ Event work queue list */
5088 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_unsol_work_queue
);
5090 /* Initialize extent block lists. */
5091 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_rpi_blk_list
);
5092 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_xri_blk_list
);
5093 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_vfi_blk_list
);
5094 INIT_LIST_HEAD(&phba
->lpfc_vpi_blk_list
);
5096 /* Initialize the driver internal SLI layer lists. */
5097 lpfc_sli_setup(phba
);
5098 lpfc_sli_queue_setup(phba
);
5100 /* Allocate device driver memory */
5101 rc
= lpfc_mem_alloc(phba
, SGL_ALIGN_SZ
);
5105 /* IF Type 2 ports get initialized now. */
5106 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
5107 LPFC_SLI_INTF_IF_TYPE_2
) {
5108 rc
= lpfc_pci_function_reset(phba
);
5113 /* Create the bootstrap mailbox command */
5114 rc
= lpfc_create_bootstrap_mbox(phba
);
5118 /* Set up the host's endian order with the device. */
5119 rc
= lpfc_setup_endian_order(phba
);
5121 goto out_free_bsmbx
;
5123 /* Set up the hba's configuration parameters. */
5124 rc
= lpfc_sli4_read_config(phba
);
5126 goto out_free_bsmbx
;
5127 rc
= lpfc_mem_alloc_active_rrq_pool_s4(phba
);
5129 goto out_free_bsmbx
;
5131 /* IF Type 0 ports get initialized now. */
5132 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
5133 LPFC_SLI_INTF_IF_TYPE_0
) {
5134 rc
= lpfc_pci_function_reset(phba
);
5136 goto out_free_bsmbx
;
5139 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
,
5143 goto out_free_bsmbx
;
5146 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
5147 lpfc_supported_pages(mboxq
);
5148 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
5150 mqe
= &mboxq
->u
.mqe
;
5151 memcpy(&pn_page
[0], ((uint8_t *)&mqe
->un
.supp_pages
.word3
),
5152 LPFC_MAX_SUPPORTED_PAGES
);
5153 for (i
= 0; i
< LPFC_MAX_SUPPORTED_PAGES
; i
++) {
5154 switch (pn_page
[i
]) {
5155 case LPFC_SLI4_PARAMETERS
:
5156 phba
->sli4_hba
.pc_sli4_params
.supported
= 1;
5162 /* Read the port's SLI4 Parameters capabilities if supported. */
5163 if (phba
->sli4_hba
.pc_sli4_params
.supported
)
5164 rc
= lpfc_pc_sli4_params_get(phba
, mboxq
);
5166 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5168 goto out_free_bsmbx
;
5172 * Get sli4 parameters that override parameters from Port capabilities.
5173 * If this call fails, it isn't critical unless the SLI4 parameters come
5176 rc
= lpfc_get_sli4_parameters(phba
, mboxq
);
5178 if (phba
->sli4_hba
.extents_in_use
&&
5179 phba
->sli4_hba
.rpi_hdrs_in_use
) {
5180 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5181 "2999 Unsupported SLI4 Parameters "
5182 "Extents and RPI headers enabled.\n");
5183 goto out_free_bsmbx
;
5186 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5188 /* Verify OAS is supported */
5189 lpfc_sli4_oas_verify(phba
);
5193 /* Verify all the SLI4 queues */
5194 rc
= lpfc_sli4_queue_verify(phba
);
5196 goto out_free_bsmbx
;
5198 /* Create driver internal CQE event pool */
5199 rc
= lpfc_sli4_cq_event_pool_create(phba
);
5201 goto out_free_bsmbx
;
5203 /* Initialize sgl lists per host */
5204 lpfc_init_sgl_list(phba
);
5206 /* Allocate and initialize active sgl array */
5207 rc
= lpfc_init_active_sgl_array(phba
);
5209 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5210 "1430 Failed to initialize sgl list.\n");
5211 goto out_destroy_cq_event_pool
;
5213 rc
= lpfc_sli4_init_rpi_hdrs(phba
);
5215 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5216 "1432 Failed to initialize rpi headers.\n");
5217 goto out_free_active_sgl
;
5220 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
5221 longs
= (LPFC_SLI4_FCF_TBL_INDX_MAX
+ BITS_PER_LONG
- 1)/BITS_PER_LONG
;
5222 phba
->fcf
.fcf_rr_bmask
= kzalloc(longs
* sizeof(unsigned long),
5224 if (!phba
->fcf
.fcf_rr_bmask
) {
5225 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5226 "2759 Failed allocate memory for FCF round "
5227 "robin failover bmask\n");
5229 goto out_remove_rpi_hdrs
;
5232 phba
->sli4_hba
.fcp_eq_hdl
=
5233 kzalloc((sizeof(struct lpfc_fcp_eq_hdl
) *
5234 (fof_vectors
+ phba
->cfg_fcp_io_channel
)),
5236 if (!phba
->sli4_hba
.fcp_eq_hdl
) {
5237 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5238 "2572 Failed allocate memory for "
5239 "fast-path per-EQ handle array\n");
5241 goto out_free_fcf_rr_bmask
;
5244 phba
->sli4_hba
.msix_entries
= kzalloc((sizeof(struct msix_entry
) *
5246 phba
->cfg_fcp_io_channel
)), GFP_KERNEL
);
5247 if (!phba
->sli4_hba
.msix_entries
) {
5248 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5249 "2573 Failed allocate memory for msi-x "
5250 "interrupt vector entries\n");
5252 goto out_free_fcp_eq_hdl
;
5255 phba
->sli4_hba
.cpu_map
= kzalloc((sizeof(struct lpfc_vector_map_info
) *
5256 phba
->sli4_hba
.num_present_cpu
),
5258 if (!phba
->sli4_hba
.cpu_map
) {
5259 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5260 "3327 Failed allocate memory for msi-x "
5261 "interrupt vector mapping\n");
5265 if (lpfc_used_cpu
== NULL
) {
5266 lpfc_used_cpu
= kzalloc((sizeof(uint16_t) * lpfc_present_cpu
),
5268 if (!lpfc_used_cpu
) {
5269 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5270 "3335 Failed allocate memory for msi-x "
5271 "interrupt vector mapping\n");
5272 kfree(phba
->sli4_hba
.cpu_map
);
5276 for (i
= 0; i
< lpfc_present_cpu
; i
++)
5277 lpfc_used_cpu
[i
] = LPFC_VECTOR_MAP_EMPTY
;
5280 /* Initialize io channels for round robin */
5281 cpup
= phba
->sli4_hba
.cpu_map
;
5283 for (i
= 0; i
< phba
->sli4_hba
.num_present_cpu
; i
++) {
5284 cpup
->channel_id
= rc
;
5286 if (rc
>= phba
->cfg_fcp_io_channel
)
5291 * Enable sr-iov virtual functions if supported and configured
5292 * through the module parameter.
5294 if (phba
->cfg_sriov_nr_virtfn
> 0) {
5295 rc
= lpfc_sli_probe_sriov_nr_virtfn(phba
,
5296 phba
->cfg_sriov_nr_virtfn
);
5298 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
5299 "3020 Requested number of SR-IOV "
5300 "virtual functions (%d) is not "
5302 phba
->cfg_sriov_nr_virtfn
);
5303 phba
->cfg_sriov_nr_virtfn
= 0;
5310 kfree(phba
->sli4_hba
.msix_entries
);
5311 out_free_fcp_eq_hdl
:
5312 kfree(phba
->sli4_hba
.fcp_eq_hdl
);
5313 out_free_fcf_rr_bmask
:
5314 kfree(phba
->fcf
.fcf_rr_bmask
);
5315 out_remove_rpi_hdrs
:
5316 lpfc_sli4_remove_rpi_hdrs(phba
);
5317 out_free_active_sgl
:
5318 lpfc_free_active_sgl(phba
);
5319 out_destroy_cq_event_pool
:
5320 lpfc_sli4_cq_event_pool_destroy(phba
);
5322 lpfc_destroy_bootstrap_mbox(phba
);
5324 lpfc_mem_free(phba
);
5329 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
5330 * @phba: pointer to lpfc hba data structure.
5332 * This routine is invoked to unset the driver internal resources set up
5333 * specific for supporting the SLI-4 HBA device it attached to.
5336 lpfc_sli4_driver_resource_unset(struct lpfc_hba
*phba
)
5338 struct lpfc_fcf_conn_entry
*conn_entry
, *next_conn_entry
;
5340 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
5341 kfree(phba
->sli4_hba
.cpu_map
);
5342 phba
->sli4_hba
.num_present_cpu
= 0;
5343 phba
->sli4_hba
.num_online_cpu
= 0;
5344 phba
->sli4_hba
.curr_disp_cpu
= 0;
5346 /* Free memory allocated for msi-x interrupt vector entries */
5347 kfree(phba
->sli4_hba
.msix_entries
);
5349 /* Free memory allocated for fast-path work queue handles */
5350 kfree(phba
->sli4_hba
.fcp_eq_hdl
);
5352 /* Free the allocated rpi headers. */
5353 lpfc_sli4_remove_rpi_hdrs(phba
);
5354 lpfc_sli4_remove_rpis(phba
);
5356 /* Free eligible FCF index bmask */
5357 kfree(phba
->fcf
.fcf_rr_bmask
);
5359 /* Free the ELS sgl list */
5360 lpfc_free_active_sgl(phba
);
5361 lpfc_free_els_sgl_list(phba
);
5363 /* Free the completion queue EQ event pool */
5364 lpfc_sli4_cq_event_release_all(phba
);
5365 lpfc_sli4_cq_event_pool_destroy(phba
);
5367 /* Release resource identifiers. */
5368 lpfc_sli4_dealloc_resource_identifiers(phba
);
5370 /* Free the bsmbx region. */
5371 lpfc_destroy_bootstrap_mbox(phba
);
5373 /* Free the SLI Layer memory with SLI4 HBAs */
5374 lpfc_mem_free_all(phba
);
5376 /* Free the current connect table */
5377 list_for_each_entry_safe(conn_entry
, next_conn_entry
,
5378 &phba
->fcf_conn_rec_list
, list
) {
5379 list_del_init(&conn_entry
->list
);
5387 * lpfc_init_api_table_setup - Set up init api function jump table
5388 * @phba: The hba struct for which this call is being executed.
5389 * @dev_grp: The HBA PCI-Device group number.
5391 * This routine sets up the device INIT interface API function jump table
5394 * Returns: 0 - success, -ENODEV - failure.
5397 lpfc_init_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
5399 phba
->lpfc_hba_init_link
= lpfc_hba_init_link
;
5400 phba
->lpfc_hba_down_link
= lpfc_hba_down_link
;
5401 phba
->lpfc_selective_reset
= lpfc_selective_reset
;
5403 case LPFC_PCI_DEV_LP
:
5404 phba
->lpfc_hba_down_post
= lpfc_hba_down_post_s3
;
5405 phba
->lpfc_handle_eratt
= lpfc_handle_eratt_s3
;
5406 phba
->lpfc_stop_port
= lpfc_stop_port_s3
;
5408 case LPFC_PCI_DEV_OC
:
5409 phba
->lpfc_hba_down_post
= lpfc_hba_down_post_s4
;
5410 phba
->lpfc_handle_eratt
= lpfc_handle_eratt_s4
;
5411 phba
->lpfc_stop_port
= lpfc_stop_port_s4
;
5414 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5415 "1431 Invalid HBA PCI-device group: 0x%x\n",
5424 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
5425 * @phba: pointer to lpfc hba data structure.
5427 * This routine is invoked to set up the driver internal resources before the
5428 * device specific resource setup to support the HBA device it attached to.
5432 * other values - error
5435 lpfc_setup_driver_resource_phase1(struct lpfc_hba
*phba
)
5438 * Driver resources common to all SLI revisions
5440 atomic_set(&phba
->fast_event_count
, 0);
5441 spin_lock_init(&phba
->hbalock
);
5443 /* Initialize ndlp management spinlock */
5444 spin_lock_init(&phba
->ndlp_lock
);
5446 INIT_LIST_HEAD(&phba
->port_list
);
5447 INIT_LIST_HEAD(&phba
->work_list
);
5448 init_waitqueue_head(&phba
->wait_4_mlo_m_q
);
5450 /* Initialize the wait queue head for the kernel thread */
5451 init_waitqueue_head(&phba
->work_waitq
);
5453 /* Initialize the scsi buffer list used by driver for scsi IO */
5454 spin_lock_init(&phba
->scsi_buf_list_get_lock
);
5455 INIT_LIST_HEAD(&phba
->lpfc_scsi_buf_list_get
);
5456 spin_lock_init(&phba
->scsi_buf_list_put_lock
);
5457 INIT_LIST_HEAD(&phba
->lpfc_scsi_buf_list_put
);
5459 /* Initialize the fabric iocb list */
5460 INIT_LIST_HEAD(&phba
->fabric_iocb_list
);
5462 /* Initialize list to save ELS buffers */
5463 INIT_LIST_HEAD(&phba
->elsbuf
);
5465 /* Initialize FCF connection rec list */
5466 INIT_LIST_HEAD(&phba
->fcf_conn_rec_list
);
5468 /* Initialize OAS configuration list */
5469 spin_lock_init(&phba
->devicelock
);
5470 INIT_LIST_HEAD(&phba
->luns
);
5476 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
5477 * @phba: pointer to lpfc hba data structure.
5479 * This routine is invoked to set up the driver internal resources after the
5480 * device specific resource setup to support the HBA device it attached to.
5484 * other values - error
5487 lpfc_setup_driver_resource_phase2(struct lpfc_hba
*phba
)
5491 /* Startup the kernel thread for this host adapter. */
5492 phba
->worker_thread
= kthread_run(lpfc_do_work
, phba
,
5493 "lpfc_worker_%d", phba
->brd_no
);
5494 if (IS_ERR(phba
->worker_thread
)) {
5495 error
= PTR_ERR(phba
->worker_thread
);
5503 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
5504 * @phba: pointer to lpfc hba data structure.
5506 * This routine is invoked to unset the driver internal resources set up after
5507 * the device specific resource setup for supporting the HBA device it
5511 lpfc_unset_driver_resource_phase2(struct lpfc_hba
*phba
)
5513 /* Stop kernel worker thread */
5514 kthread_stop(phba
->worker_thread
);
5518 * lpfc_free_iocb_list - Free iocb list.
5519 * @phba: pointer to lpfc hba data structure.
5521 * This routine is invoked to free the driver's IOCB list and memory.
5524 lpfc_free_iocb_list(struct lpfc_hba
*phba
)
5526 struct lpfc_iocbq
*iocbq_entry
= NULL
, *iocbq_next
= NULL
;
5528 spin_lock_irq(&phba
->hbalock
);
5529 list_for_each_entry_safe(iocbq_entry
, iocbq_next
,
5530 &phba
->lpfc_iocb_list
, list
) {
5531 list_del(&iocbq_entry
->list
);
5533 phba
->total_iocbq_bufs
--;
5535 spin_unlock_irq(&phba
->hbalock
);
5541 * lpfc_init_iocb_list - Allocate and initialize iocb list.
5542 * @phba: pointer to lpfc hba data structure.
5544 * This routine is invoked to allocate and initizlize the driver's IOCB
5545 * list and set up the IOCB tag array accordingly.
5549 * other values - error
5552 lpfc_init_iocb_list(struct lpfc_hba
*phba
, int iocb_count
)
5554 struct lpfc_iocbq
*iocbq_entry
= NULL
;
5558 /* Initialize and populate the iocb list per host. */
5559 INIT_LIST_HEAD(&phba
->lpfc_iocb_list
);
5560 for (i
= 0; i
< iocb_count
; i
++) {
5561 iocbq_entry
= kzalloc(sizeof(struct lpfc_iocbq
), GFP_KERNEL
);
5562 if (iocbq_entry
== NULL
) {
5563 printk(KERN_ERR
"%s: only allocated %d iocbs of "
5564 "expected %d count. Unloading driver.\n",
5565 __func__
, i
, LPFC_IOCB_LIST_CNT
);
5566 goto out_free_iocbq
;
5569 iotag
= lpfc_sli_next_iotag(phba
, iocbq_entry
);
5572 printk(KERN_ERR
"%s: failed to allocate IOTAG. "
5573 "Unloading driver.\n", __func__
);
5574 goto out_free_iocbq
;
5576 iocbq_entry
->sli4_lxritag
= NO_XRI
;
5577 iocbq_entry
->sli4_xritag
= NO_XRI
;
5579 spin_lock_irq(&phba
->hbalock
);
5580 list_add(&iocbq_entry
->list
, &phba
->lpfc_iocb_list
);
5581 phba
->total_iocbq_bufs
++;
5582 spin_unlock_irq(&phba
->hbalock
);
5588 lpfc_free_iocb_list(phba
);
5594 * lpfc_free_sgl_list - Free a given sgl list.
5595 * @phba: pointer to lpfc hba data structure.
5596 * @sglq_list: pointer to the head of sgl list.
5598 * This routine is invoked to free a give sgl list and memory.
5601 lpfc_free_sgl_list(struct lpfc_hba
*phba
, struct list_head
*sglq_list
)
5603 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_next
= NULL
;
5605 list_for_each_entry_safe(sglq_entry
, sglq_next
, sglq_list
, list
) {
5606 list_del(&sglq_entry
->list
);
5607 lpfc_mbuf_free(phba
, sglq_entry
->virt
, sglq_entry
->phys
);
5613 * lpfc_free_els_sgl_list - Free els sgl list.
5614 * @phba: pointer to lpfc hba data structure.
5616 * This routine is invoked to free the driver's els sgl list and memory.
5619 lpfc_free_els_sgl_list(struct lpfc_hba
*phba
)
5621 LIST_HEAD(sglq_list
);
5623 /* Retrieve all els sgls from driver list */
5624 spin_lock_irq(&phba
->hbalock
);
5625 list_splice_init(&phba
->sli4_hba
.lpfc_sgl_list
, &sglq_list
);
5626 spin_unlock_irq(&phba
->hbalock
);
5628 /* Now free the sgl list */
5629 lpfc_free_sgl_list(phba
, &sglq_list
);
5633 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
5634 * @phba: pointer to lpfc hba data structure.
5636 * This routine is invoked to allocate the driver's active sgl memory.
5637 * This array will hold the sglq_entry's for active IOs.
5640 lpfc_init_active_sgl_array(struct lpfc_hba
*phba
)
5643 size
= sizeof(struct lpfc_sglq
*);
5644 size
*= phba
->sli4_hba
.max_cfg_param
.max_xri
;
5646 phba
->sli4_hba
.lpfc_sglq_active_list
=
5647 kzalloc(size
, GFP_KERNEL
);
5648 if (!phba
->sli4_hba
.lpfc_sglq_active_list
)
5654 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
5655 * @phba: pointer to lpfc hba data structure.
5657 * This routine is invoked to walk through the array of active sglq entries
5658 * and free all of the resources.
5659 * This is just a place holder for now.
5662 lpfc_free_active_sgl(struct lpfc_hba
*phba
)
5664 kfree(phba
->sli4_hba
.lpfc_sglq_active_list
);
5668 * lpfc_init_sgl_list - Allocate and initialize sgl list.
5669 * @phba: pointer to lpfc hba data structure.
5671 * This routine is invoked to allocate and initizlize the driver's sgl
5672 * list and set up the sgl xritag tag array accordingly.
5676 lpfc_init_sgl_list(struct lpfc_hba
*phba
)
5678 /* Initialize and populate the sglq list per host/VF. */
5679 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_sgl_list
);
5680 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_abts_els_sgl_list
);
5682 /* els xri-sgl book keeping */
5683 phba
->sli4_hba
.els_xri_cnt
= 0;
5685 /* scsi xri-buffer book keeping */
5686 phba
->sli4_hba
.scsi_xri_cnt
= 0;
5690 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
5691 * @phba: pointer to lpfc hba data structure.
5693 * This routine is invoked to post rpi header templates to the
5694 * port for those SLI4 ports that do not support extents. This routine
5695 * posts a PAGE_SIZE memory region to the port to hold up to
5696 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
5697 * and should be called only when interrupts are disabled.
5701 * -ERROR - otherwise.
5704 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba
*phba
)
5707 struct lpfc_rpi_hdr
*rpi_hdr
;
5709 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_rpi_hdr_list
);
5710 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
5712 if (phba
->sli4_hba
.extents_in_use
)
5715 rpi_hdr
= lpfc_sli4_create_rpi_hdr(phba
);
5717 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5718 "0391 Error during rpi post operation\n");
5719 lpfc_sli4_remove_rpis(phba
);
5727 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
5728 * @phba: pointer to lpfc hba data structure.
5730 * This routine is invoked to allocate a single 4KB memory region to
5731 * support rpis and stores them in the phba. This single region
5732 * provides support for up to 64 rpis. The region is used globally
5736 * A valid rpi hdr on success.
5737 * A NULL pointer on any failure.
5739 struct lpfc_rpi_hdr
*
5740 lpfc_sli4_create_rpi_hdr(struct lpfc_hba
*phba
)
5742 uint16_t rpi_limit
, curr_rpi_range
;
5743 struct lpfc_dmabuf
*dmabuf
;
5744 struct lpfc_rpi_hdr
*rpi_hdr
;
5748 * If the SLI4 port supports extents, posting the rpi header isn't
5749 * required. Set the expected maximum count and let the actual value
5750 * get set when extents are fully allocated.
5752 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
5754 if (phba
->sli4_hba
.extents_in_use
)
5757 /* The limit on the logical index is just the max_rpi count. */
5758 rpi_limit
= phba
->sli4_hba
.max_cfg_param
.rpi_base
+
5759 phba
->sli4_hba
.max_cfg_param
.max_rpi
- 1;
5761 spin_lock_irq(&phba
->hbalock
);
5763 * Establish the starting RPI in this header block. The starting
5764 * rpi is normalized to a zero base because the physical rpi is
5767 curr_rpi_range
= phba
->sli4_hba
.next_rpi
;
5768 spin_unlock_irq(&phba
->hbalock
);
5771 * The port has a limited number of rpis. The increment here
5772 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
5773 * and to allow the full max_rpi range per port.
5775 if ((curr_rpi_range
+ (LPFC_RPI_HDR_COUNT
- 1)) > rpi_limit
)
5776 rpi_count
= rpi_limit
- curr_rpi_range
;
5778 rpi_count
= LPFC_RPI_HDR_COUNT
;
5783 * First allocate the protocol header region for the port. The
5784 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
5786 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
5790 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
,
5791 LPFC_HDR_TEMPLATE_SIZE
,
5794 if (!dmabuf
->virt
) {
5796 goto err_free_dmabuf
;
5799 memset(dmabuf
->virt
, 0, LPFC_HDR_TEMPLATE_SIZE
);
5800 if (!IS_ALIGNED(dmabuf
->phys
, LPFC_HDR_TEMPLATE_SIZE
)) {
5802 goto err_free_coherent
;
5805 /* Save the rpi header data for cleanup later. */
5806 rpi_hdr
= kzalloc(sizeof(struct lpfc_rpi_hdr
), GFP_KERNEL
);
5808 goto err_free_coherent
;
5810 rpi_hdr
->dmabuf
= dmabuf
;
5811 rpi_hdr
->len
= LPFC_HDR_TEMPLATE_SIZE
;
5812 rpi_hdr
->page_count
= 1;
5813 spin_lock_irq(&phba
->hbalock
);
5815 /* The rpi_hdr stores the logical index only. */
5816 rpi_hdr
->start_rpi
= curr_rpi_range
;
5817 list_add_tail(&rpi_hdr
->list
, &phba
->sli4_hba
.lpfc_rpi_hdr_list
);
5820 * The next_rpi stores the next logical module-64 rpi value used
5821 * to post physical rpis in subsequent rpi postings.
5823 phba
->sli4_hba
.next_rpi
+= rpi_count
;
5824 spin_unlock_irq(&phba
->hbalock
);
5828 dma_free_coherent(&phba
->pcidev
->dev
, LPFC_HDR_TEMPLATE_SIZE
,
5829 dmabuf
->virt
, dmabuf
->phys
);
5836 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
5837 * @phba: pointer to lpfc hba data structure.
5839 * This routine is invoked to remove all memory resources allocated
5840 * to support rpis for SLI4 ports not supporting extents. This routine
5841 * presumes the caller has released all rpis consumed by fabric or port
5842 * logins and is prepared to have the header pages removed.
5845 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba
*phba
)
5847 struct lpfc_rpi_hdr
*rpi_hdr
, *next_rpi_hdr
;
5849 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
5852 list_for_each_entry_safe(rpi_hdr
, next_rpi_hdr
,
5853 &phba
->sli4_hba
.lpfc_rpi_hdr_list
, list
) {
5854 list_del(&rpi_hdr
->list
);
5855 dma_free_coherent(&phba
->pcidev
->dev
, rpi_hdr
->len
,
5856 rpi_hdr
->dmabuf
->virt
, rpi_hdr
->dmabuf
->phys
);
5857 kfree(rpi_hdr
->dmabuf
);
5861 /* There are no rpis available to the port now. */
5862 phba
->sli4_hba
.next_rpi
= 0;
5866 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
5867 * @pdev: pointer to pci device data structure.
5869 * This routine is invoked to allocate the driver hba data structure for an
5870 * HBA device. If the allocation is successful, the phba reference to the
5871 * PCI device data structure is set.
5874 * pointer to @phba - successful
5877 static struct lpfc_hba
*
5878 lpfc_hba_alloc(struct pci_dev
*pdev
)
5880 struct lpfc_hba
*phba
;
5882 /* Allocate memory for HBA structure */
5883 phba
= kzalloc(sizeof(struct lpfc_hba
), GFP_KERNEL
);
5885 dev_err(&pdev
->dev
, "failed to allocate hba struct\n");
5889 /* Set reference to PCI device in HBA structure */
5890 phba
->pcidev
= pdev
;
5892 /* Assign an unused board number */
5893 phba
->brd_no
= lpfc_get_instance();
5894 if (phba
->brd_no
< 0) {
5899 spin_lock_init(&phba
->ct_ev_lock
);
5900 INIT_LIST_HEAD(&phba
->ct_ev_waiters
);
5906 * lpfc_hba_free - Free driver hba data structure with a device.
5907 * @phba: pointer to lpfc hba data structure.
5909 * This routine is invoked to free the driver hba data structure with an
5913 lpfc_hba_free(struct lpfc_hba
*phba
)
5915 /* Release the driver assigned board number */
5916 idr_remove(&lpfc_hba_index
, phba
->brd_no
);
5918 /* Free memory allocated with sli rings */
5919 kfree(phba
->sli
.ring
);
5920 phba
->sli
.ring
= NULL
;
5927 * lpfc_create_shost - Create hba physical port with associated scsi host.
5928 * @phba: pointer to lpfc hba data structure.
5930 * This routine is invoked to create HBA physical port and associate a SCSI
5935 * other values - error
5938 lpfc_create_shost(struct lpfc_hba
*phba
)
5940 struct lpfc_vport
*vport
;
5941 struct Scsi_Host
*shost
;
5943 /* Initialize HBA FC structure */
5944 phba
->fc_edtov
= FF_DEF_EDTOV
;
5945 phba
->fc_ratov
= FF_DEF_RATOV
;
5946 phba
->fc_altov
= FF_DEF_ALTOV
;
5947 phba
->fc_arbtov
= FF_DEF_ARBTOV
;
5949 atomic_set(&phba
->sdev_cnt
, 0);
5950 vport
= lpfc_create_port(phba
, phba
->brd_no
, &phba
->pcidev
->dev
);
5954 shost
= lpfc_shost_from_vport(vport
);
5955 phba
->pport
= vport
;
5956 lpfc_debugfs_initialize(vport
);
5957 /* Put reference to SCSI host to driver's device private data */
5958 pci_set_drvdata(phba
->pcidev
, shost
);
5964 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
5965 * @phba: pointer to lpfc hba data structure.
5967 * This routine is invoked to destroy HBA physical port and the associated
5971 lpfc_destroy_shost(struct lpfc_hba
*phba
)
5973 struct lpfc_vport
*vport
= phba
->pport
;
5975 /* Destroy physical port that associated with the SCSI host */
5976 destroy_port(vport
);
5982 * lpfc_setup_bg - Setup Block guard structures and debug areas.
5983 * @phba: pointer to lpfc hba data structure.
5984 * @shost: the shost to be used to detect Block guard settings.
5986 * This routine sets up the local Block guard protocol settings for @shost.
5987 * This routine also allocates memory for debugging bg buffers.
5990 lpfc_setup_bg(struct lpfc_hba
*phba
, struct Scsi_Host
*shost
)
5996 if (lpfc_prot_mask
&& lpfc_prot_guard
) {
5997 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
5998 "1478 Registering BlockGuard with the "
6001 old_mask
= lpfc_prot_mask
;
6002 old_guard
= lpfc_prot_guard
;
6004 /* Only allow supported values */
6005 lpfc_prot_mask
&= (SHOST_DIF_TYPE1_PROTECTION
|
6006 SHOST_DIX_TYPE0_PROTECTION
|
6007 SHOST_DIX_TYPE1_PROTECTION
);
6008 lpfc_prot_guard
&= (SHOST_DIX_GUARD_IP
| SHOST_DIX_GUARD_CRC
);
6010 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
6011 if (lpfc_prot_mask
== SHOST_DIX_TYPE1_PROTECTION
)
6012 lpfc_prot_mask
|= SHOST_DIF_TYPE1_PROTECTION
;
6014 if (lpfc_prot_mask
&& lpfc_prot_guard
) {
6015 if ((old_mask
!= lpfc_prot_mask
) ||
6016 (old_guard
!= lpfc_prot_guard
))
6017 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6018 "1475 Registering BlockGuard with the "
6019 "SCSI layer: mask %d guard %d\n",
6020 lpfc_prot_mask
, lpfc_prot_guard
);
6022 scsi_host_set_prot(shost
, lpfc_prot_mask
);
6023 scsi_host_set_guard(shost
, lpfc_prot_guard
);
6025 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6026 "1479 Not Registering BlockGuard with the SCSI "
6027 "layer, Bad protection parameters: %d %d\n",
6028 old_mask
, old_guard
);
6031 if (!_dump_buf_data
) {
6033 spin_lock_init(&_dump_buf_lock
);
6035 (char *) __get_free_pages(GFP_KERNEL
, pagecnt
);
6036 if (_dump_buf_data
) {
6037 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
6038 "9043 BLKGRD: allocated %d pages for "
6039 "_dump_buf_data at 0x%p\n",
6040 (1 << pagecnt
), _dump_buf_data
);
6041 _dump_buf_data_order
= pagecnt
;
6042 memset(_dump_buf_data
, 0,
6043 ((1 << PAGE_SHIFT
) << pagecnt
));
6048 if (!_dump_buf_data_order
)
6049 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
6050 "9044 BLKGRD: ERROR unable to allocate "
6051 "memory for hexdump\n");
6053 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
6054 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
6055 "\n", _dump_buf_data
);
6056 if (!_dump_buf_dif
) {
6059 (char *) __get_free_pages(GFP_KERNEL
, pagecnt
);
6060 if (_dump_buf_dif
) {
6061 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
6062 "9046 BLKGRD: allocated %d pages for "
6063 "_dump_buf_dif at 0x%p\n",
6064 (1 << pagecnt
), _dump_buf_dif
);
6065 _dump_buf_dif_order
= pagecnt
;
6066 memset(_dump_buf_dif
, 0,
6067 ((1 << PAGE_SHIFT
) << pagecnt
));
6072 if (!_dump_buf_dif_order
)
6073 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
6074 "9047 BLKGRD: ERROR unable to allocate "
6075 "memory for hexdump\n");
6077 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
6078 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
6083 * lpfc_post_init_setup - Perform necessary device post initialization setup.
6084 * @phba: pointer to lpfc hba data structure.
6086 * This routine is invoked to perform all the necessary post initialization
6087 * setup for the device.
6090 lpfc_post_init_setup(struct lpfc_hba
*phba
)
6092 struct Scsi_Host
*shost
;
6093 struct lpfc_adapter_event_header adapter_event
;
6095 /* Get the default values for Model Name and Description */
6096 lpfc_get_hba_model_desc(phba
, phba
->ModelName
, phba
->ModelDesc
);
6099 * hba setup may have changed the hba_queue_depth so we need to
6100 * adjust the value of can_queue.
6102 shost
= pci_get_drvdata(phba
->pcidev
);
6103 shost
->can_queue
= phba
->cfg_hba_queue_depth
- 10;
6104 if (phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
)
6105 lpfc_setup_bg(phba
, shost
);
6107 lpfc_host_attrib_init(shost
);
6109 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
6110 spin_lock_irq(shost
->host_lock
);
6111 lpfc_poll_start_timer(phba
);
6112 spin_unlock_irq(shost
->host_lock
);
6115 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
6116 "0428 Perform SCSI scan\n");
6117 /* Send board arrival event to upper layer */
6118 adapter_event
.event_type
= FC_REG_ADAPTER_EVENT
;
6119 adapter_event
.subcategory
= LPFC_EVENT_ARRIVAL
;
6120 fc_host_post_vendor_event(shost
, fc_get_event_number(),
6121 sizeof(adapter_event
),
6122 (char *) &adapter_event
,
6128 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
6129 * @phba: pointer to lpfc hba data structure.
6131 * This routine is invoked to set up the PCI device memory space for device
6132 * with SLI-3 interface spec.
6136 * other values - error
6139 lpfc_sli_pci_mem_setup(struct lpfc_hba
*phba
)
6141 struct pci_dev
*pdev
;
6142 unsigned long bar0map_len
, bar2map_len
;
6145 int error
= -ENODEV
;
6147 /* Obtain PCI device reference */
6151 pdev
= phba
->pcidev
;
6153 /* Set the device DMA mask size */
6154 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) != 0
6155 || pci_set_consistent_dma_mask(pdev
,DMA_BIT_MASK(64)) != 0) {
6156 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)) != 0
6157 || pci_set_consistent_dma_mask(pdev
,DMA_BIT_MASK(32)) != 0) {
6162 /* Get the bus address of Bar0 and Bar2 and the number of bytes
6163 * required by each mapping.
6165 phba
->pci_bar0_map
= pci_resource_start(pdev
, 0);
6166 bar0map_len
= pci_resource_len(pdev
, 0);
6168 phba
->pci_bar2_map
= pci_resource_start(pdev
, 2);
6169 bar2map_len
= pci_resource_len(pdev
, 2);
6171 /* Map HBA SLIM to a kernel virtual address. */
6172 phba
->slim_memmap_p
= ioremap(phba
->pci_bar0_map
, bar0map_len
);
6173 if (!phba
->slim_memmap_p
) {
6174 dev_printk(KERN_ERR
, &pdev
->dev
,
6175 "ioremap failed for SLIM memory.\n");
6179 /* Map HBA Control Registers to a kernel virtual address. */
6180 phba
->ctrl_regs_memmap_p
= ioremap(phba
->pci_bar2_map
, bar2map_len
);
6181 if (!phba
->ctrl_regs_memmap_p
) {
6182 dev_printk(KERN_ERR
, &pdev
->dev
,
6183 "ioremap failed for HBA control registers.\n");
6184 goto out_iounmap_slim
;
6187 /* Allocate memory for SLI-2 structures */
6188 phba
->slim2p
.virt
= dma_alloc_coherent(&pdev
->dev
,
6192 if (!phba
->slim2p
.virt
)
6195 memset(phba
->slim2p
.virt
, 0, SLI2_SLIM_SIZE
);
6196 phba
->mbox
= phba
->slim2p
.virt
+ offsetof(struct lpfc_sli2_slim
, mbx
);
6197 phba
->mbox_ext
= (phba
->slim2p
.virt
+
6198 offsetof(struct lpfc_sli2_slim
, mbx_ext_words
));
6199 phba
->pcb
= (phba
->slim2p
.virt
+ offsetof(struct lpfc_sli2_slim
, pcb
));
6200 phba
->IOCBs
= (phba
->slim2p
.virt
+
6201 offsetof(struct lpfc_sli2_slim
, IOCBs
));
6203 phba
->hbqslimp
.virt
= dma_alloc_coherent(&pdev
->dev
,
6204 lpfc_sli_hbq_size(),
6205 &phba
->hbqslimp
.phys
,
6207 if (!phba
->hbqslimp
.virt
)
6210 hbq_count
= lpfc_sli_hbq_count();
6211 ptr
= phba
->hbqslimp
.virt
;
6212 for (i
= 0; i
< hbq_count
; ++i
) {
6213 phba
->hbqs
[i
].hbq_virt
= ptr
;
6214 INIT_LIST_HEAD(&phba
->hbqs
[i
].hbq_buffer_list
);
6215 ptr
+= (lpfc_hbq_defs
[i
]->entry_count
*
6216 sizeof(struct lpfc_hbq_entry
));
6218 phba
->hbqs
[LPFC_ELS_HBQ
].hbq_alloc_buffer
= lpfc_els_hbq_alloc
;
6219 phba
->hbqs
[LPFC_ELS_HBQ
].hbq_free_buffer
= lpfc_els_hbq_free
;
6221 memset(phba
->hbqslimp
.virt
, 0, lpfc_sli_hbq_size());
6223 INIT_LIST_HEAD(&phba
->rb_pend_list
);
6225 phba
->MBslimaddr
= phba
->slim_memmap_p
;
6226 phba
->HAregaddr
= phba
->ctrl_regs_memmap_p
+ HA_REG_OFFSET
;
6227 phba
->CAregaddr
= phba
->ctrl_regs_memmap_p
+ CA_REG_OFFSET
;
6228 phba
->HSregaddr
= phba
->ctrl_regs_memmap_p
+ HS_REG_OFFSET
;
6229 phba
->HCregaddr
= phba
->ctrl_regs_memmap_p
+ HC_REG_OFFSET
;
6234 dma_free_coherent(&pdev
->dev
, SLI2_SLIM_SIZE
,
6235 phba
->slim2p
.virt
, phba
->slim2p
.phys
);
6237 iounmap(phba
->ctrl_regs_memmap_p
);
6239 iounmap(phba
->slim_memmap_p
);
6245 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
6246 * @phba: pointer to lpfc hba data structure.
6248 * This routine is invoked to unset the PCI device memory space for device
6249 * with SLI-3 interface spec.
6252 lpfc_sli_pci_mem_unset(struct lpfc_hba
*phba
)
6254 struct pci_dev
*pdev
;
6256 /* Obtain PCI device reference */
6260 pdev
= phba
->pcidev
;
6262 /* Free coherent DMA memory allocated */
6263 dma_free_coherent(&pdev
->dev
, lpfc_sli_hbq_size(),
6264 phba
->hbqslimp
.virt
, phba
->hbqslimp
.phys
);
6265 dma_free_coherent(&pdev
->dev
, SLI2_SLIM_SIZE
,
6266 phba
->slim2p
.virt
, phba
->slim2p
.phys
);
6268 /* I/O memory unmap */
6269 iounmap(phba
->ctrl_regs_memmap_p
);
6270 iounmap(phba
->slim_memmap_p
);
6276 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
6277 * @phba: pointer to lpfc hba data structure.
6279 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
6280 * done and check status.
6282 * Return 0 if successful, otherwise -ENODEV.
6285 lpfc_sli4_post_status_check(struct lpfc_hba
*phba
)
6287 struct lpfc_register portsmphr_reg
, uerrlo_reg
, uerrhi_reg
;
6288 struct lpfc_register reg_data
;
6289 int i
, port_error
= 0;
6292 memset(&portsmphr_reg
, 0, sizeof(portsmphr_reg
));
6293 memset(®_data
, 0, sizeof(reg_data
));
6294 if (!phba
->sli4_hba
.PSMPHRregaddr
)
6297 /* Wait up to 30 seconds for the SLI Port POST done and ready */
6298 for (i
= 0; i
< 3000; i
++) {
6299 if (lpfc_readl(phba
->sli4_hba
.PSMPHRregaddr
,
6300 &portsmphr_reg
.word0
) ||
6301 (bf_get(lpfc_port_smphr_perr
, &portsmphr_reg
))) {
6302 /* Port has a fatal POST error, break out */
6303 port_error
= -ENODEV
;
6306 if (LPFC_POST_STAGE_PORT_READY
==
6307 bf_get(lpfc_port_smphr_port_status
, &portsmphr_reg
))
6313 * If there was a port error during POST, then don't proceed with
6314 * other register reads as the data may not be valid. Just exit.
6317 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6318 "1408 Port Failed POST - portsmphr=0x%x, "
6319 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
6320 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
6321 portsmphr_reg
.word0
,
6322 bf_get(lpfc_port_smphr_perr
, &portsmphr_reg
),
6323 bf_get(lpfc_port_smphr_sfi
, &portsmphr_reg
),
6324 bf_get(lpfc_port_smphr_nip
, &portsmphr_reg
),
6325 bf_get(lpfc_port_smphr_ipc
, &portsmphr_reg
),
6326 bf_get(lpfc_port_smphr_scr1
, &portsmphr_reg
),
6327 bf_get(lpfc_port_smphr_scr2
, &portsmphr_reg
),
6328 bf_get(lpfc_port_smphr_host_scratch
, &portsmphr_reg
),
6329 bf_get(lpfc_port_smphr_port_status
, &portsmphr_reg
));
6331 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
6332 "2534 Device Info: SLIFamily=0x%x, "
6333 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
6334 "SLIHint_2=0x%x, FT=0x%x\n",
6335 bf_get(lpfc_sli_intf_sli_family
,
6336 &phba
->sli4_hba
.sli_intf
),
6337 bf_get(lpfc_sli_intf_slirev
,
6338 &phba
->sli4_hba
.sli_intf
),
6339 bf_get(lpfc_sli_intf_if_type
,
6340 &phba
->sli4_hba
.sli_intf
),
6341 bf_get(lpfc_sli_intf_sli_hint1
,
6342 &phba
->sli4_hba
.sli_intf
),
6343 bf_get(lpfc_sli_intf_sli_hint2
,
6344 &phba
->sli4_hba
.sli_intf
),
6345 bf_get(lpfc_sli_intf_func_type
,
6346 &phba
->sli4_hba
.sli_intf
));
6348 * Check for other Port errors during the initialization
6349 * process. Fail the load if the port did not come up
6352 if_type
= bf_get(lpfc_sli_intf_if_type
,
6353 &phba
->sli4_hba
.sli_intf
);
6355 case LPFC_SLI_INTF_IF_TYPE_0
:
6356 phba
->sli4_hba
.ue_mask_lo
=
6357 readl(phba
->sli4_hba
.u
.if_type0
.UEMASKLOregaddr
);
6358 phba
->sli4_hba
.ue_mask_hi
=
6359 readl(phba
->sli4_hba
.u
.if_type0
.UEMASKHIregaddr
);
6361 readl(phba
->sli4_hba
.u
.if_type0
.UERRLOregaddr
);
6363 readl(phba
->sli4_hba
.u
.if_type0
.UERRHIregaddr
);
6364 if ((~phba
->sli4_hba
.ue_mask_lo
& uerrlo_reg
.word0
) ||
6365 (~phba
->sli4_hba
.ue_mask_hi
& uerrhi_reg
.word0
)) {
6366 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6367 "1422 Unrecoverable Error "
6368 "Detected during POST "
6369 "uerr_lo_reg=0x%x, "
6370 "uerr_hi_reg=0x%x, "
6371 "ue_mask_lo_reg=0x%x, "
6372 "ue_mask_hi_reg=0x%x\n",
6375 phba
->sli4_hba
.ue_mask_lo
,
6376 phba
->sli4_hba
.ue_mask_hi
);
6377 port_error
= -ENODEV
;
6380 case LPFC_SLI_INTF_IF_TYPE_2
:
6381 /* Final checks. The port status should be clean. */
6382 if (lpfc_readl(phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
,
6384 (bf_get(lpfc_sliport_status_err
, ®_data
) &&
6385 !bf_get(lpfc_sliport_status_rn
, ®_data
))) {
6386 phba
->work_status
[0] =
6387 readl(phba
->sli4_hba
.u
.if_type2
.
6389 phba
->work_status
[1] =
6390 readl(phba
->sli4_hba
.u
.if_type2
.
6392 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6393 "2888 Unrecoverable port error "
6394 "following POST: port status reg "
6395 "0x%x, port_smphr reg 0x%x, "
6396 "error 1=0x%x, error 2=0x%x\n",
6398 portsmphr_reg
.word0
,
6399 phba
->work_status
[0],
6400 phba
->work_status
[1]);
6401 port_error
= -ENODEV
;
6404 case LPFC_SLI_INTF_IF_TYPE_1
:
6413 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
6414 * @phba: pointer to lpfc hba data structure.
6415 * @if_type: The SLI4 interface type getting configured.
6417 * This routine is invoked to set up SLI4 BAR0 PCI config space register
6421 lpfc_sli4_bar0_register_memmap(struct lpfc_hba
*phba
, uint32_t if_type
)
6424 case LPFC_SLI_INTF_IF_TYPE_0
:
6425 phba
->sli4_hba
.u
.if_type0
.UERRLOregaddr
=
6426 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_UERR_STATUS_LO
;
6427 phba
->sli4_hba
.u
.if_type0
.UERRHIregaddr
=
6428 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_UERR_STATUS_HI
;
6429 phba
->sli4_hba
.u
.if_type0
.UEMASKLOregaddr
=
6430 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_UE_MASK_LO
;
6431 phba
->sli4_hba
.u
.if_type0
.UEMASKHIregaddr
=
6432 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_UE_MASK_HI
;
6433 phba
->sli4_hba
.SLIINTFregaddr
=
6434 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_SLI_INTF
;
6436 case LPFC_SLI_INTF_IF_TYPE_2
:
6437 phba
->sli4_hba
.u
.if_type2
.ERR1regaddr
=
6438 phba
->sli4_hba
.conf_regs_memmap_p
+
6439 LPFC_CTL_PORT_ER1_OFFSET
;
6440 phba
->sli4_hba
.u
.if_type2
.ERR2regaddr
=
6441 phba
->sli4_hba
.conf_regs_memmap_p
+
6442 LPFC_CTL_PORT_ER2_OFFSET
;
6443 phba
->sli4_hba
.u
.if_type2
.CTRLregaddr
=
6444 phba
->sli4_hba
.conf_regs_memmap_p
+
6445 LPFC_CTL_PORT_CTL_OFFSET
;
6446 phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
=
6447 phba
->sli4_hba
.conf_regs_memmap_p
+
6448 LPFC_CTL_PORT_STA_OFFSET
;
6449 phba
->sli4_hba
.SLIINTFregaddr
=
6450 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_SLI_INTF
;
6451 phba
->sli4_hba
.PSMPHRregaddr
=
6452 phba
->sli4_hba
.conf_regs_memmap_p
+
6453 LPFC_CTL_PORT_SEM_OFFSET
;
6454 phba
->sli4_hba
.RQDBregaddr
=
6455 phba
->sli4_hba
.conf_regs_memmap_p
+
6456 LPFC_ULP0_RQ_DOORBELL
;
6457 phba
->sli4_hba
.WQDBregaddr
=
6458 phba
->sli4_hba
.conf_regs_memmap_p
+
6459 LPFC_ULP0_WQ_DOORBELL
;
6460 phba
->sli4_hba
.EQCQDBregaddr
=
6461 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_EQCQ_DOORBELL
;
6462 phba
->sli4_hba
.MQDBregaddr
=
6463 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_MQ_DOORBELL
;
6464 phba
->sli4_hba
.BMBXregaddr
=
6465 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_BMBX
;
6467 case LPFC_SLI_INTF_IF_TYPE_1
:
6469 dev_printk(KERN_ERR
, &phba
->pcidev
->dev
,
6470 "FATAL - unsupported SLI4 interface type - %d\n",
6477 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
6478 * @phba: pointer to lpfc hba data structure.
6480 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
6484 lpfc_sli4_bar1_register_memmap(struct lpfc_hba
*phba
)
6486 phba
->sli4_hba
.PSMPHRregaddr
= phba
->sli4_hba
.ctrl_regs_memmap_p
+
6487 LPFC_SLIPORT_IF0_SMPHR
;
6488 phba
->sli4_hba
.ISRregaddr
= phba
->sli4_hba
.ctrl_regs_memmap_p
+
6490 phba
->sli4_hba
.IMRregaddr
= phba
->sli4_hba
.ctrl_regs_memmap_p
+
6492 phba
->sli4_hba
.ISCRregaddr
= phba
->sli4_hba
.ctrl_regs_memmap_p
+
6497 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
6498 * @phba: pointer to lpfc hba data structure.
6499 * @vf: virtual function number
6501 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
6502 * based on the given viftual function number, @vf.
6504 * Return 0 if successful, otherwise -ENODEV.
6507 lpfc_sli4_bar2_register_memmap(struct lpfc_hba
*phba
, uint32_t vf
)
6509 if (vf
> LPFC_VIR_FUNC_MAX
)
6512 phba
->sli4_hba
.RQDBregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
6513 vf
* LPFC_VFR_PAGE_SIZE
+
6514 LPFC_ULP0_RQ_DOORBELL
);
6515 phba
->sli4_hba
.WQDBregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
6516 vf
* LPFC_VFR_PAGE_SIZE
+
6517 LPFC_ULP0_WQ_DOORBELL
);
6518 phba
->sli4_hba
.EQCQDBregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
6519 vf
* LPFC_VFR_PAGE_SIZE
+ LPFC_EQCQ_DOORBELL
);
6520 phba
->sli4_hba
.MQDBregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
6521 vf
* LPFC_VFR_PAGE_SIZE
+ LPFC_MQ_DOORBELL
);
6522 phba
->sli4_hba
.BMBXregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
6523 vf
* LPFC_VFR_PAGE_SIZE
+ LPFC_BMBX
);
6528 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
6529 * @phba: pointer to lpfc hba data structure.
6531 * This routine is invoked to create the bootstrap mailbox
6532 * region consistent with the SLI-4 interface spec. This
6533 * routine allocates all memory necessary to communicate
6534 * mailbox commands to the port and sets up all alignment
6535 * needs. No locks are expected to be held when calling
6540 * -ENOMEM - could not allocated memory.
6543 lpfc_create_bootstrap_mbox(struct lpfc_hba
*phba
)
6546 struct lpfc_dmabuf
*dmabuf
;
6547 struct dma_address
*dma_address
;
6551 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
6556 * The bootstrap mailbox region is comprised of 2 parts
6557 * plus an alignment restriction of 16 bytes.
6559 bmbx_size
= sizeof(struct lpfc_bmbx_create
) + (LPFC_ALIGN_16_BYTE
- 1);
6560 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
,
6564 if (!dmabuf
->virt
) {
6568 memset(dmabuf
->virt
, 0, bmbx_size
);
6571 * Initialize the bootstrap mailbox pointers now so that the register
6572 * operations are simple later. The mailbox dma address is required
6573 * to be 16-byte aligned. Also align the virtual memory as each
6574 * maibox is copied into the bmbx mailbox region before issuing the
6575 * command to the port.
6577 phba
->sli4_hba
.bmbx
.dmabuf
= dmabuf
;
6578 phba
->sli4_hba
.bmbx
.bmbx_size
= bmbx_size
;
6580 phba
->sli4_hba
.bmbx
.avirt
= PTR_ALIGN(dmabuf
->virt
,
6581 LPFC_ALIGN_16_BYTE
);
6582 phba
->sli4_hba
.bmbx
.aphys
= ALIGN(dmabuf
->phys
,
6583 LPFC_ALIGN_16_BYTE
);
6586 * Set the high and low physical addresses now. The SLI4 alignment
6587 * requirement is 16 bytes and the mailbox is posted to the port
6588 * as two 30-bit addresses. The other data is a bit marking whether
6589 * the 30-bit address is the high or low address.
6590 * Upcast bmbx aphys to 64bits so shift instruction compiles
6591 * clean on 32 bit machines.
6593 dma_address
= &phba
->sli4_hba
.bmbx
.dma_address
;
6594 phys_addr
= (uint64_t)phba
->sli4_hba
.bmbx
.aphys
;
6595 pa_addr
= (uint32_t) ((phys_addr
>> 34) & 0x3fffffff);
6596 dma_address
->addr_hi
= (uint32_t) ((pa_addr
<< 2) |
6597 LPFC_BMBX_BIT1_ADDR_HI
);
6599 pa_addr
= (uint32_t) ((phba
->sli4_hba
.bmbx
.aphys
>> 4) & 0x3fffffff);
6600 dma_address
->addr_lo
= (uint32_t) ((pa_addr
<< 2) |
6601 LPFC_BMBX_BIT1_ADDR_LO
);
6606 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
6607 * @phba: pointer to lpfc hba data structure.
6609 * This routine is invoked to teardown the bootstrap mailbox
6610 * region and release all host resources. This routine requires
6611 * the caller to ensure all mailbox commands recovered, no
6612 * additional mailbox comands are sent, and interrupts are disabled
6613 * before calling this routine.
6617 lpfc_destroy_bootstrap_mbox(struct lpfc_hba
*phba
)
6619 dma_free_coherent(&phba
->pcidev
->dev
,
6620 phba
->sli4_hba
.bmbx
.bmbx_size
,
6621 phba
->sli4_hba
.bmbx
.dmabuf
->virt
,
6622 phba
->sli4_hba
.bmbx
.dmabuf
->phys
);
6624 kfree(phba
->sli4_hba
.bmbx
.dmabuf
);
6625 memset(&phba
->sli4_hba
.bmbx
, 0, sizeof(struct lpfc_bmbx
));
6629 * lpfc_sli4_read_config - Get the config parameters.
6630 * @phba: pointer to lpfc hba data structure.
6632 * This routine is invoked to read the configuration parameters from the HBA.
6633 * The configuration parameters are used to set the base and maximum values
6634 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
6635 * allocation for the port.
6639 * -ENOMEM - No available memory
6640 * -EIO - The mailbox failed to complete successfully.
6643 lpfc_sli4_read_config(struct lpfc_hba
*phba
)
6646 struct lpfc_mbx_read_config
*rd_config
;
6647 union lpfc_sli4_cfg_shdr
*shdr
;
6648 uint32_t shdr_status
, shdr_add_status
;
6649 struct lpfc_mbx_get_func_cfg
*get_func_cfg
;
6650 struct lpfc_rsrc_desc_fcfcoe
*desc
;
6652 uint32_t desc_count
;
6653 int length
, i
, rc
= 0, rc2
;
6655 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6657 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6658 "2011 Unable to allocate memory for issuing "
6659 "SLI_CONFIG_SPECIAL mailbox command\n");
6663 lpfc_read_config(phba
, pmb
);
6665 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
6666 if (rc
!= MBX_SUCCESS
) {
6667 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6668 "2012 Mailbox failed , mbxCmd x%x "
6669 "READ_CONFIG, mbxStatus x%x\n",
6670 bf_get(lpfc_mqe_command
, &pmb
->u
.mqe
),
6671 bf_get(lpfc_mqe_status
, &pmb
->u
.mqe
));
6674 rd_config
= &pmb
->u
.mqe
.un
.rd_config
;
6675 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv
, rd_config
)) {
6676 phba
->sli4_hba
.lnk_info
.lnk_dv
= LPFC_LNK_DAT_VAL
;
6677 phba
->sli4_hba
.lnk_info
.lnk_tp
=
6678 bf_get(lpfc_mbx_rd_conf_lnk_type
, rd_config
);
6679 phba
->sli4_hba
.lnk_info
.lnk_no
=
6680 bf_get(lpfc_mbx_rd_conf_lnk_numb
, rd_config
);
6681 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
6682 "3081 lnk_type:%d, lnk_numb:%d\n",
6683 phba
->sli4_hba
.lnk_info
.lnk_tp
,
6684 phba
->sli4_hba
.lnk_info
.lnk_no
);
6686 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
6687 "3082 Mailbox (x%x) returned ldv:x0\n",
6688 bf_get(lpfc_mqe_command
, &pmb
->u
.mqe
));
6689 phba
->sli4_hba
.extents_in_use
=
6690 bf_get(lpfc_mbx_rd_conf_extnts_inuse
, rd_config
);
6691 phba
->sli4_hba
.max_cfg_param
.max_xri
=
6692 bf_get(lpfc_mbx_rd_conf_xri_count
, rd_config
);
6693 phba
->sli4_hba
.max_cfg_param
.xri_base
=
6694 bf_get(lpfc_mbx_rd_conf_xri_base
, rd_config
);
6695 phba
->sli4_hba
.max_cfg_param
.max_vpi
=
6696 bf_get(lpfc_mbx_rd_conf_vpi_count
, rd_config
);
6697 phba
->sli4_hba
.max_cfg_param
.vpi_base
=
6698 bf_get(lpfc_mbx_rd_conf_vpi_base
, rd_config
);
6699 phba
->sli4_hba
.max_cfg_param
.max_rpi
=
6700 bf_get(lpfc_mbx_rd_conf_rpi_count
, rd_config
);
6701 phba
->sli4_hba
.max_cfg_param
.rpi_base
=
6702 bf_get(lpfc_mbx_rd_conf_rpi_base
, rd_config
);
6703 phba
->sli4_hba
.max_cfg_param
.max_vfi
=
6704 bf_get(lpfc_mbx_rd_conf_vfi_count
, rd_config
);
6705 phba
->sli4_hba
.max_cfg_param
.vfi_base
=
6706 bf_get(lpfc_mbx_rd_conf_vfi_base
, rd_config
);
6707 phba
->sli4_hba
.max_cfg_param
.max_fcfi
=
6708 bf_get(lpfc_mbx_rd_conf_fcfi_count
, rd_config
);
6709 phba
->sli4_hba
.max_cfg_param
.max_eq
=
6710 bf_get(lpfc_mbx_rd_conf_eq_count
, rd_config
);
6711 phba
->sli4_hba
.max_cfg_param
.max_rq
=
6712 bf_get(lpfc_mbx_rd_conf_rq_count
, rd_config
);
6713 phba
->sli4_hba
.max_cfg_param
.max_wq
=
6714 bf_get(lpfc_mbx_rd_conf_wq_count
, rd_config
);
6715 phba
->sli4_hba
.max_cfg_param
.max_cq
=
6716 bf_get(lpfc_mbx_rd_conf_cq_count
, rd_config
);
6717 phba
->lmt
= bf_get(lpfc_mbx_rd_conf_lmt
, rd_config
);
6718 phba
->sli4_hba
.next_xri
= phba
->sli4_hba
.max_cfg_param
.xri_base
;
6719 phba
->vpi_base
= phba
->sli4_hba
.max_cfg_param
.vpi_base
;
6720 phba
->vfi_base
= phba
->sli4_hba
.max_cfg_param
.vfi_base
;
6721 phba
->max_vpi
= (phba
->sli4_hba
.max_cfg_param
.max_vpi
> 0) ?
6722 (phba
->sli4_hba
.max_cfg_param
.max_vpi
- 1) : 0;
6723 phba
->max_vports
= phba
->max_vpi
;
6724 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
6725 "2003 cfg params Extents? %d "
6731 phba
->sli4_hba
.extents_in_use
,
6732 phba
->sli4_hba
.max_cfg_param
.xri_base
,
6733 phba
->sli4_hba
.max_cfg_param
.max_xri
,
6734 phba
->sli4_hba
.max_cfg_param
.vpi_base
,
6735 phba
->sli4_hba
.max_cfg_param
.max_vpi
,
6736 phba
->sli4_hba
.max_cfg_param
.vfi_base
,
6737 phba
->sli4_hba
.max_cfg_param
.max_vfi
,
6738 phba
->sli4_hba
.max_cfg_param
.rpi_base
,
6739 phba
->sli4_hba
.max_cfg_param
.max_rpi
,
6740 phba
->sli4_hba
.max_cfg_param
.max_fcfi
);
6746 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
6747 length
= phba
->sli4_hba
.max_cfg_param
.max_xri
-
6748 lpfc_sli4_get_els_iocb_cnt(phba
);
6749 if (phba
->cfg_hba_queue_depth
> length
) {
6750 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
6751 "3361 HBA queue depth changed from %d to %d\n",
6752 phba
->cfg_hba_queue_depth
, length
);
6753 phba
->cfg_hba_queue_depth
= length
;
6756 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
6757 LPFC_SLI_INTF_IF_TYPE_2
)
6760 /* get the pf# and vf# for SLI4 if_type 2 port */
6761 length
= (sizeof(struct lpfc_mbx_get_func_cfg
) -
6762 sizeof(struct lpfc_sli4_cfg_mhdr
));
6763 lpfc_sli4_config(phba
, pmb
, LPFC_MBOX_SUBSYSTEM_COMMON
,
6764 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG
,
6765 length
, LPFC_SLI4_MBX_EMBED
);
6767 rc2
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
6768 shdr
= (union lpfc_sli4_cfg_shdr
*)
6769 &pmb
->u
.mqe
.un
.sli4_config
.header
.cfg_shdr
;
6770 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
6771 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
6772 if (rc2
|| shdr_status
|| shdr_add_status
) {
6773 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6774 "3026 Mailbox failed , mbxCmd x%x "
6775 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6776 bf_get(lpfc_mqe_command
, &pmb
->u
.mqe
),
6777 bf_get(lpfc_mqe_status
, &pmb
->u
.mqe
));
6781 /* search for fc_fcoe resrouce descriptor */
6782 get_func_cfg
= &pmb
->u
.mqe
.un
.get_func_cfg
;
6783 desc_count
= get_func_cfg
->func_cfg
.rsrc_desc_count
;
6785 pdesc_0
= (char *)&get_func_cfg
->func_cfg
.desc
[0];
6786 desc
= (struct lpfc_rsrc_desc_fcfcoe
*)pdesc_0
;
6787 length
= bf_get(lpfc_rsrc_desc_fcfcoe_length
, desc
);
6788 if (length
== LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD
)
6789 length
= LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH
;
6790 else if (length
!= LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH
)
6793 for (i
= 0; i
< LPFC_RSRC_DESC_MAX_NUM
; i
++) {
6794 desc
= (struct lpfc_rsrc_desc_fcfcoe
*)(pdesc_0
+ length
* i
);
6795 if (LPFC_RSRC_DESC_TYPE_FCFCOE
==
6796 bf_get(lpfc_rsrc_desc_fcfcoe_type
, desc
)) {
6797 phba
->sli4_hba
.iov
.pf_number
=
6798 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum
, desc
);
6799 phba
->sli4_hba
.iov
.vf_number
=
6800 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum
, desc
);
6805 if (i
< LPFC_RSRC_DESC_MAX_NUM
)
6806 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
6807 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6808 "vf_number:%d\n", phba
->sli4_hba
.iov
.pf_number
,
6809 phba
->sli4_hba
.iov
.vf_number
);
6811 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6812 "3028 GET_FUNCTION_CONFIG: failed to find "
6813 "Resrouce Descriptor:x%x\n",
6814 LPFC_RSRC_DESC_TYPE_FCFCOE
);
6817 mempool_free(pmb
, phba
->mbox_mem_pool
);
6822 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
6823 * @phba: pointer to lpfc hba data structure.
6825 * This routine is invoked to setup the port-side endian order when
6826 * the port if_type is 0. This routine has no function for other
6831 * -ENOMEM - No available memory
6832 * -EIO - The mailbox failed to complete successfully.
6835 lpfc_setup_endian_order(struct lpfc_hba
*phba
)
6837 LPFC_MBOXQ_t
*mboxq
;
6838 uint32_t if_type
, rc
= 0;
6839 uint32_t endian_mb_data
[2] = {HOST_ENDIAN_LOW_WORD0
,
6840 HOST_ENDIAN_HIGH_WORD1
};
6842 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
6844 case LPFC_SLI_INTF_IF_TYPE_0
:
6845 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
,
6848 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6849 "0492 Unable to allocate memory for "
6850 "issuing SLI_CONFIG_SPECIAL mailbox "
6856 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
6857 * two words to contain special data values and no other data.
6859 memset(mboxq
, 0, sizeof(LPFC_MBOXQ_t
));
6860 memcpy(&mboxq
->u
.mqe
, &endian_mb_data
, sizeof(endian_mb_data
));
6861 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
6862 if (rc
!= MBX_SUCCESS
) {
6863 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6864 "0493 SLI_CONFIG_SPECIAL mailbox "
6865 "failed with status x%x\n",
6869 mempool_free(mboxq
, phba
->mbox_mem_pool
);
6871 case LPFC_SLI_INTF_IF_TYPE_2
:
6872 case LPFC_SLI_INTF_IF_TYPE_1
:
6880 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
6881 * @phba: pointer to lpfc hba data structure.
6883 * This routine is invoked to check the user settable queue counts for EQs and
6884 * CQs. after this routine is called the counts will be set to valid values that
6885 * adhere to the constraints of the system's interrupt vectors and the port's
6890 * -ENOMEM - No available memory
6893 lpfc_sli4_queue_verify(struct lpfc_hba
*phba
)
6895 int cfg_fcp_io_channel
;
6898 int fof_vectors
= phba
->cfg_fof
? 1 : 0;
6901 * Sanity check for configured queue parameters against the run-time
6905 /* Sanity check on HBA EQ parameters */
6906 cfg_fcp_io_channel
= phba
->cfg_fcp_io_channel
;
6908 /* It doesn't make sense to have more io channels then online CPUs */
6909 for_each_present_cpu(cpu
) {
6910 if (cpu_online(cpu
))
6913 phba
->sli4_hba
.num_online_cpu
= i
;
6914 phba
->sli4_hba
.num_present_cpu
= lpfc_present_cpu
;
6915 phba
->sli4_hba
.curr_disp_cpu
= 0;
6917 if (i
< cfg_fcp_io_channel
) {
6918 lpfc_printf_log(phba
,
6920 "3188 Reducing IO channels to match number of "
6921 "online CPUs: from %d to %d\n",
6922 cfg_fcp_io_channel
, i
);
6923 cfg_fcp_io_channel
= i
;
6926 if (cfg_fcp_io_channel
+ fof_vectors
>
6927 phba
->sli4_hba
.max_cfg_param
.max_eq
) {
6928 if (phba
->sli4_hba
.max_cfg_param
.max_eq
<
6929 LPFC_FCP_IO_CHAN_MIN
) {
6930 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6931 "2574 Not enough EQs (%d) from the "
6932 "pci function for supporting FCP "
6934 phba
->sli4_hba
.max_cfg_param
.max_eq
,
6935 phba
->cfg_fcp_io_channel
);
6938 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6939 "2575 Reducing IO channels to match number of "
6940 "available EQs: from %d to %d\n",
6942 phba
->sli4_hba
.max_cfg_param
.max_eq
);
6943 cfg_fcp_io_channel
= phba
->sli4_hba
.max_cfg_param
.max_eq
-
6947 /* The actual number of FCP event queues adopted */
6948 phba
->cfg_fcp_io_channel
= cfg_fcp_io_channel
;
6950 /* Get EQ depth from module parameter, fake the default for now */
6951 phba
->sli4_hba
.eq_esize
= LPFC_EQE_SIZE_4B
;
6952 phba
->sli4_hba
.eq_ecount
= LPFC_EQE_DEF_COUNT
;
6954 /* Get CQ depth from module parameter, fake the default for now */
6955 phba
->sli4_hba
.cq_esize
= LPFC_CQE_SIZE
;
6956 phba
->sli4_hba
.cq_ecount
= LPFC_CQE_DEF_COUNT
;
6964 * lpfc_sli4_queue_create - Create all the SLI4 queues
6965 * @phba: pointer to lpfc hba data structure.
6967 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
6968 * operation. For each SLI4 queue type, the parameters such as queue entry
6969 * count (queue depth) shall be taken from the module parameter. For now,
6970 * we just use some constant number as place holder.
6974 * -ENOMEM - No availble memory
6975 * -EIO - The mailbox failed to complete successfully.
6978 lpfc_sli4_queue_create(struct lpfc_hba
*phba
)
6980 struct lpfc_queue
*qdesc
;
6984 * Create HBA Record arrays.
6986 if (!phba
->cfg_fcp_io_channel
)
6989 phba
->sli4_hba
.mq_esize
= LPFC_MQE_SIZE
;
6990 phba
->sli4_hba
.mq_ecount
= LPFC_MQE_DEF_COUNT
;
6991 phba
->sli4_hba
.wq_esize
= LPFC_WQE_SIZE
;
6992 phba
->sli4_hba
.wq_ecount
= LPFC_WQE_DEF_COUNT
;
6993 phba
->sli4_hba
.rq_esize
= LPFC_RQE_SIZE
;
6994 phba
->sli4_hba
.rq_ecount
= LPFC_RQE_DEF_COUNT
;
6996 phba
->sli4_hba
.hba_eq
= kzalloc((sizeof(struct lpfc_queue
*) *
6997 phba
->cfg_fcp_io_channel
), GFP_KERNEL
);
6998 if (!phba
->sli4_hba
.hba_eq
) {
6999 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7000 "2576 Failed allocate memory for "
7001 "fast-path EQ record array\n");
7005 phba
->sli4_hba
.fcp_cq
= kzalloc((sizeof(struct lpfc_queue
*) *
7006 phba
->cfg_fcp_io_channel
), GFP_KERNEL
);
7007 if (!phba
->sli4_hba
.fcp_cq
) {
7008 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7009 "2577 Failed allocate memory for fast-path "
7010 "CQ record array\n");
7014 phba
->sli4_hba
.fcp_wq
= kzalloc((sizeof(struct lpfc_queue
*) *
7015 phba
->cfg_fcp_io_channel
), GFP_KERNEL
);
7016 if (!phba
->sli4_hba
.fcp_wq
) {
7017 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7018 "2578 Failed allocate memory for fast-path "
7019 "WQ record array\n");
7024 * Since the first EQ can have multiple CQs associated with it,
7025 * this array is used to quickly see if we have a FCP fast-path
7028 phba
->sli4_hba
.fcp_cq_map
= kzalloc((sizeof(uint16_t) *
7029 phba
->cfg_fcp_io_channel
), GFP_KERNEL
);
7030 if (!phba
->sli4_hba
.fcp_cq_map
) {
7031 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7032 "2545 Failed allocate memory for fast-path "
7038 * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies
7039 * how many EQs to create.
7041 for (idx
= 0; idx
< phba
->cfg_fcp_io_channel
; idx
++) {
7044 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.eq_esize
,
7045 phba
->sli4_hba
.eq_ecount
);
7047 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7048 "0497 Failed allocate EQ (%d)\n", idx
);
7051 phba
->sli4_hba
.hba_eq
[idx
] = qdesc
;
7053 /* Create Fast Path FCP CQs */
7054 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.cq_esize
,
7055 phba
->sli4_hba
.cq_ecount
);
7057 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7058 "0499 Failed allocate fast-path FCP "
7062 phba
->sli4_hba
.fcp_cq
[idx
] = qdesc
;
7064 /* Create Fast Path FCP WQs */
7065 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.wq_esize
,
7066 phba
->sli4_hba
.wq_ecount
);
7068 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7069 "0503 Failed allocate fast-path FCP "
7073 phba
->sli4_hba
.fcp_wq
[idx
] = qdesc
;
7078 * Create Slow Path Completion Queues (CQs)
7081 /* Create slow-path Mailbox Command Complete Queue */
7082 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.cq_esize
,
7083 phba
->sli4_hba
.cq_ecount
);
7085 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7086 "0500 Failed allocate slow-path mailbox CQ\n");
7089 phba
->sli4_hba
.mbx_cq
= qdesc
;
7091 /* Create slow-path ELS Complete Queue */
7092 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.cq_esize
,
7093 phba
->sli4_hba
.cq_ecount
);
7095 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7096 "0501 Failed allocate slow-path ELS CQ\n");
7099 phba
->sli4_hba
.els_cq
= qdesc
;
7103 * Create Slow Path Work Queues (WQs)
7106 /* Create Mailbox Command Queue */
7108 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.mq_esize
,
7109 phba
->sli4_hba
.mq_ecount
);
7111 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7112 "0505 Failed allocate slow-path MQ\n");
7115 phba
->sli4_hba
.mbx_wq
= qdesc
;
7118 * Create ELS Work Queues
7121 /* Create slow-path ELS Work Queue */
7122 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.wq_esize
,
7123 phba
->sli4_hba
.wq_ecount
);
7125 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7126 "0504 Failed allocate slow-path ELS WQ\n");
7129 phba
->sli4_hba
.els_wq
= qdesc
;
7132 * Create Receive Queue (RQ)
7135 /* Create Receive Queue for header */
7136 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.rq_esize
,
7137 phba
->sli4_hba
.rq_ecount
);
7139 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7140 "0506 Failed allocate receive HRQ\n");
7143 phba
->sli4_hba
.hdr_rq
= qdesc
;
7145 /* Create Receive Queue for data */
7146 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.rq_esize
,
7147 phba
->sli4_hba
.rq_ecount
);
7149 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7150 "0507 Failed allocate receive DRQ\n");
7153 phba
->sli4_hba
.dat_rq
= qdesc
;
7155 /* Create the Queues needed for Flash Optimized Fabric operations */
7157 lpfc_fof_queue_create(phba
);
7161 lpfc_sli4_queue_destroy(phba
);
7166 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
7167 * @phba: pointer to lpfc hba data structure.
7169 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
7174 * -ENOMEM - No available memory
7175 * -EIO - The mailbox failed to complete successfully.
7178 lpfc_sli4_queue_destroy(struct lpfc_hba
*phba
)
7183 lpfc_fof_queue_destroy(phba
);
7185 if (phba
->sli4_hba
.hba_eq
!= NULL
) {
7186 /* Release HBA event queue */
7187 for (idx
= 0; idx
< phba
->cfg_fcp_io_channel
; idx
++) {
7188 if (phba
->sli4_hba
.hba_eq
[idx
] != NULL
) {
7189 lpfc_sli4_queue_free(
7190 phba
->sli4_hba
.hba_eq
[idx
]);
7191 phba
->sli4_hba
.hba_eq
[idx
] = NULL
;
7194 kfree(phba
->sli4_hba
.hba_eq
);
7195 phba
->sli4_hba
.hba_eq
= NULL
;
7198 if (phba
->sli4_hba
.fcp_cq
!= NULL
) {
7199 /* Release FCP completion queue */
7200 for (idx
= 0; idx
< phba
->cfg_fcp_io_channel
; idx
++) {
7201 if (phba
->sli4_hba
.fcp_cq
[idx
] != NULL
) {
7202 lpfc_sli4_queue_free(
7203 phba
->sli4_hba
.fcp_cq
[idx
]);
7204 phba
->sli4_hba
.fcp_cq
[idx
] = NULL
;
7207 kfree(phba
->sli4_hba
.fcp_cq
);
7208 phba
->sli4_hba
.fcp_cq
= NULL
;
7211 if (phba
->sli4_hba
.fcp_wq
!= NULL
) {
7212 /* Release FCP work queue */
7213 for (idx
= 0; idx
< phba
->cfg_fcp_io_channel
; idx
++) {
7214 if (phba
->sli4_hba
.fcp_wq
[idx
] != NULL
) {
7215 lpfc_sli4_queue_free(
7216 phba
->sli4_hba
.fcp_wq
[idx
]);
7217 phba
->sli4_hba
.fcp_wq
[idx
] = NULL
;
7220 kfree(phba
->sli4_hba
.fcp_wq
);
7221 phba
->sli4_hba
.fcp_wq
= NULL
;
7224 /* Release FCP CQ mapping array */
7225 if (phba
->sli4_hba
.fcp_cq_map
!= NULL
) {
7226 kfree(phba
->sli4_hba
.fcp_cq_map
);
7227 phba
->sli4_hba
.fcp_cq_map
= NULL
;
7230 /* Release mailbox command work queue */
7231 if (phba
->sli4_hba
.mbx_wq
!= NULL
) {
7232 lpfc_sli4_queue_free(phba
->sli4_hba
.mbx_wq
);
7233 phba
->sli4_hba
.mbx_wq
= NULL
;
7236 /* Release ELS work queue */
7237 if (phba
->sli4_hba
.els_wq
!= NULL
) {
7238 lpfc_sli4_queue_free(phba
->sli4_hba
.els_wq
);
7239 phba
->sli4_hba
.els_wq
= NULL
;
7242 /* Release unsolicited receive queue */
7243 if (phba
->sli4_hba
.hdr_rq
!= NULL
) {
7244 lpfc_sli4_queue_free(phba
->sli4_hba
.hdr_rq
);
7245 phba
->sli4_hba
.hdr_rq
= NULL
;
7247 if (phba
->sli4_hba
.dat_rq
!= NULL
) {
7248 lpfc_sli4_queue_free(phba
->sli4_hba
.dat_rq
);
7249 phba
->sli4_hba
.dat_rq
= NULL
;
7252 /* Release ELS complete queue */
7253 if (phba
->sli4_hba
.els_cq
!= NULL
) {
7254 lpfc_sli4_queue_free(phba
->sli4_hba
.els_cq
);
7255 phba
->sli4_hba
.els_cq
= NULL
;
7258 /* Release mailbox command complete queue */
7259 if (phba
->sli4_hba
.mbx_cq
!= NULL
) {
7260 lpfc_sli4_queue_free(phba
->sli4_hba
.mbx_cq
);
7261 phba
->sli4_hba
.mbx_cq
= NULL
;
7268 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
7269 * @phba: pointer to lpfc hba data structure.
7271 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
7276 * -ENOMEM - No available memory
7277 * -EIO - The mailbox failed to complete successfully.
7280 lpfc_sli4_queue_setup(struct lpfc_hba
*phba
)
7282 struct lpfc_sli
*psli
= &phba
->sli
;
7283 struct lpfc_sli_ring
*pring
;
7285 int fcp_eqidx
, fcp_cqidx
, fcp_wqidx
;
7286 int fcp_cq_index
= 0;
7287 uint32_t shdr_status
, shdr_add_status
;
7288 union lpfc_sli4_cfg_shdr
*shdr
;
7289 LPFC_MBOXQ_t
*mboxq
;
7292 /* Check for dual-ULP support */
7293 mboxq
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
7295 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7296 "3249 Unable to allocate memory for "
7297 "QUERY_FW_CFG mailbox command\n");
7300 length
= (sizeof(struct lpfc_mbx_query_fw_config
) -
7301 sizeof(struct lpfc_sli4_cfg_mhdr
));
7302 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
7303 LPFC_MBOX_OPCODE_QUERY_FW_CFG
,
7304 length
, LPFC_SLI4_MBX_EMBED
);
7306 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7308 shdr
= (union lpfc_sli4_cfg_shdr
*)
7309 &mboxq
->u
.mqe
.un
.sli4_config
.header
.cfg_shdr
;
7310 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
7311 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
7312 if (shdr_status
|| shdr_add_status
|| rc
) {
7313 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7314 "3250 QUERY_FW_CFG mailbox failed with status "
7315 "x%x add_status x%x, mbx status x%x\n",
7316 shdr_status
, shdr_add_status
, rc
);
7317 if (rc
!= MBX_TIMEOUT
)
7318 mempool_free(mboxq
, phba
->mbox_mem_pool
);
7323 phba
->sli4_hba
.fw_func_mode
=
7324 mboxq
->u
.mqe
.un
.query_fw_cfg
.rsp
.function_mode
;
7325 phba
->sli4_hba
.ulp0_mode
= mboxq
->u
.mqe
.un
.query_fw_cfg
.rsp
.ulp0_mode
;
7326 phba
->sli4_hba
.ulp1_mode
= mboxq
->u
.mqe
.un
.query_fw_cfg
.rsp
.ulp1_mode
;
7327 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7328 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
7329 "ulp1_mode:x%x\n", phba
->sli4_hba
.fw_func_mode
,
7330 phba
->sli4_hba
.ulp0_mode
, phba
->sli4_hba
.ulp1_mode
);
7332 if (rc
!= MBX_TIMEOUT
)
7333 mempool_free(mboxq
, phba
->mbox_mem_pool
);
7336 * Set up HBA Event Queues (EQs)
7339 /* Set up HBA event queue */
7340 if (phba
->cfg_fcp_io_channel
&& !phba
->sli4_hba
.hba_eq
) {
7341 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7342 "3147 Fast-path EQs not allocated\n");
7346 for (fcp_eqidx
= 0; fcp_eqidx
< phba
->cfg_fcp_io_channel
; fcp_eqidx
++) {
7347 if (!phba
->sli4_hba
.hba_eq
[fcp_eqidx
]) {
7348 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7349 "0522 Fast-path EQ (%d) not "
7350 "allocated\n", fcp_eqidx
);
7352 goto out_destroy_hba_eq
;
7354 rc
= lpfc_eq_create(phba
, phba
->sli4_hba
.hba_eq
[fcp_eqidx
],
7355 (phba
->cfg_fcp_imax
/ phba
->cfg_fcp_io_channel
));
7357 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7358 "0523 Failed setup of fast-path EQ "
7359 "(%d), rc = 0x%x\n", fcp_eqidx
, rc
);
7360 goto out_destroy_hba_eq
;
7362 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7363 "2584 HBA EQ setup: "
7364 "queue[%d]-id=%d\n", fcp_eqidx
,
7365 phba
->sli4_hba
.hba_eq
[fcp_eqidx
]->queue_id
);
7368 /* Set up fast-path FCP Response Complete Queue */
7369 if (!phba
->sli4_hba
.fcp_cq
) {
7370 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7371 "3148 Fast-path FCP CQ array not "
7374 goto out_destroy_hba_eq
;
7377 for (fcp_cqidx
= 0; fcp_cqidx
< phba
->cfg_fcp_io_channel
; fcp_cqidx
++) {
7378 if (!phba
->sli4_hba
.fcp_cq
[fcp_cqidx
]) {
7379 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7380 "0526 Fast-path FCP CQ (%d) not "
7381 "allocated\n", fcp_cqidx
);
7383 goto out_destroy_fcp_cq
;
7385 rc
= lpfc_cq_create(phba
, phba
->sli4_hba
.fcp_cq
[fcp_cqidx
],
7386 phba
->sli4_hba
.hba_eq
[fcp_cqidx
], LPFC_WCQ
, LPFC_FCP
);
7388 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7389 "0527 Failed setup of fast-path FCP "
7390 "CQ (%d), rc = 0x%x\n", fcp_cqidx
, rc
);
7391 goto out_destroy_fcp_cq
;
7394 /* Setup fcp_cq_map for fast lookup */
7395 phba
->sli4_hba
.fcp_cq_map
[fcp_cqidx
] =
7396 phba
->sli4_hba
.fcp_cq
[fcp_cqidx
]->queue_id
;
7398 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7399 "2588 FCP CQ setup: cq[%d]-id=%d, "
7400 "parent seq[%d]-id=%d\n",
7402 phba
->sli4_hba
.fcp_cq
[fcp_cqidx
]->queue_id
,
7404 phba
->sli4_hba
.hba_eq
[fcp_cqidx
]->queue_id
);
7407 /* Set up fast-path FCP Work Queue */
7408 if (!phba
->sli4_hba
.fcp_wq
) {
7409 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7410 "3149 Fast-path FCP WQ array not "
7413 goto out_destroy_fcp_cq
;
7416 for (fcp_wqidx
= 0; fcp_wqidx
< phba
->cfg_fcp_io_channel
; fcp_wqidx
++) {
7417 if (!phba
->sli4_hba
.fcp_wq
[fcp_wqidx
]) {
7418 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7419 "0534 Fast-path FCP WQ (%d) not "
7420 "allocated\n", fcp_wqidx
);
7422 goto out_destroy_fcp_wq
;
7424 rc
= lpfc_wq_create(phba
, phba
->sli4_hba
.fcp_wq
[fcp_wqidx
],
7425 phba
->sli4_hba
.fcp_cq
[fcp_wqidx
],
7428 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7429 "0535 Failed setup of fast-path FCP "
7430 "WQ (%d), rc = 0x%x\n", fcp_wqidx
, rc
);
7431 goto out_destroy_fcp_wq
;
7434 /* Bind this WQ to the next FCP ring */
7435 pring
= &psli
->ring
[MAX_SLI3_CONFIGURED_RINGS
+ fcp_wqidx
];
7436 pring
->sli
.sli4
.wqp
= (void *)phba
->sli4_hba
.fcp_wq
[fcp_wqidx
];
7437 phba
->sli4_hba
.fcp_cq
[fcp_wqidx
]->pring
= pring
;
7439 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7440 "2591 FCP WQ setup: wq[%d]-id=%d, "
7441 "parent cq[%d]-id=%d\n",
7443 phba
->sli4_hba
.fcp_wq
[fcp_wqidx
]->queue_id
,
7445 phba
->sli4_hba
.fcp_cq
[fcp_wqidx
]->queue_id
);
7448 * Set up Complete Queues (CQs)
7451 /* Set up slow-path MBOX Complete Queue as the first CQ */
7452 if (!phba
->sli4_hba
.mbx_cq
) {
7453 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7454 "0528 Mailbox CQ not allocated\n");
7456 goto out_destroy_fcp_wq
;
7458 rc
= lpfc_cq_create(phba
, phba
->sli4_hba
.mbx_cq
,
7459 phba
->sli4_hba
.hba_eq
[0], LPFC_MCQ
, LPFC_MBOX
);
7461 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7462 "0529 Failed setup of slow-path mailbox CQ: "
7464 goto out_destroy_fcp_wq
;
7466 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7467 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
7468 phba
->sli4_hba
.mbx_cq
->queue_id
,
7469 phba
->sli4_hba
.hba_eq
[0]->queue_id
);
7471 /* Set up slow-path ELS Complete Queue */
7472 if (!phba
->sli4_hba
.els_cq
) {
7473 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7474 "0530 ELS CQ not allocated\n");
7476 goto out_destroy_mbx_cq
;
7478 rc
= lpfc_cq_create(phba
, phba
->sli4_hba
.els_cq
,
7479 phba
->sli4_hba
.hba_eq
[0], LPFC_WCQ
, LPFC_ELS
);
7481 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7482 "0531 Failed setup of slow-path ELS CQ: "
7484 goto out_destroy_mbx_cq
;
7486 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7487 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
7488 phba
->sli4_hba
.els_cq
->queue_id
,
7489 phba
->sli4_hba
.hba_eq
[0]->queue_id
);
7492 * Set up all the Work Queues (WQs)
7495 /* Set up Mailbox Command Queue */
7496 if (!phba
->sli4_hba
.mbx_wq
) {
7497 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7498 "0538 Slow-path MQ not allocated\n");
7500 goto out_destroy_els_cq
;
7502 rc
= lpfc_mq_create(phba
, phba
->sli4_hba
.mbx_wq
,
7503 phba
->sli4_hba
.mbx_cq
, LPFC_MBOX
);
7505 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7506 "0539 Failed setup of slow-path MQ: "
7508 goto out_destroy_els_cq
;
7510 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7511 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
7512 phba
->sli4_hba
.mbx_wq
->queue_id
,
7513 phba
->sli4_hba
.mbx_cq
->queue_id
);
7515 /* Set up slow-path ELS Work Queue */
7516 if (!phba
->sli4_hba
.els_wq
) {
7517 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7518 "0536 Slow-path ELS WQ not allocated\n");
7520 goto out_destroy_mbx_wq
;
7522 rc
= lpfc_wq_create(phba
, phba
->sli4_hba
.els_wq
,
7523 phba
->sli4_hba
.els_cq
, LPFC_ELS
);
7525 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7526 "0537 Failed setup of slow-path ELS WQ: "
7528 goto out_destroy_mbx_wq
;
7531 /* Bind this WQ to the ELS ring */
7532 pring
= &psli
->ring
[LPFC_ELS_RING
];
7533 pring
->sli
.sli4
.wqp
= (void *)phba
->sli4_hba
.els_wq
;
7534 phba
->sli4_hba
.els_cq
->pring
= pring
;
7536 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7537 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
7538 phba
->sli4_hba
.els_wq
->queue_id
,
7539 phba
->sli4_hba
.els_cq
->queue_id
);
7542 * Create Receive Queue (RQ)
7544 if (!phba
->sli4_hba
.hdr_rq
|| !phba
->sli4_hba
.dat_rq
) {
7545 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7546 "0540 Receive Queue not allocated\n");
7548 goto out_destroy_els_wq
;
7551 lpfc_rq_adjust_repost(phba
, phba
->sli4_hba
.hdr_rq
, LPFC_ELS_HBQ
);
7552 lpfc_rq_adjust_repost(phba
, phba
->sli4_hba
.dat_rq
, LPFC_ELS_HBQ
);
7554 rc
= lpfc_rq_create(phba
, phba
->sli4_hba
.hdr_rq
, phba
->sli4_hba
.dat_rq
,
7555 phba
->sli4_hba
.els_cq
, LPFC_USOL
);
7557 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7558 "0541 Failed setup of Receive Queue: "
7560 goto out_destroy_fcp_wq
;
7563 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7564 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
7565 "parent cq-id=%d\n",
7566 phba
->sli4_hba
.hdr_rq
->queue_id
,
7567 phba
->sli4_hba
.dat_rq
->queue_id
,
7568 phba
->sli4_hba
.els_cq
->queue_id
);
7570 if (phba
->cfg_fof
) {
7571 rc
= lpfc_fof_queue_setup(phba
);
7573 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7574 "0549 Failed setup of FOF Queues: "
7576 goto out_destroy_els_rq
;
7582 lpfc_rq_destroy(phba
, phba
->sli4_hba
.hdr_rq
, phba
->sli4_hba
.dat_rq
);
7584 lpfc_wq_destroy(phba
, phba
->sli4_hba
.els_wq
);
7586 lpfc_mq_destroy(phba
, phba
->sli4_hba
.mbx_wq
);
7588 lpfc_cq_destroy(phba
, phba
->sli4_hba
.els_cq
);
7590 lpfc_cq_destroy(phba
, phba
->sli4_hba
.mbx_cq
);
7592 for (--fcp_wqidx
; fcp_wqidx
>= 0; fcp_wqidx
--)
7593 lpfc_wq_destroy(phba
, phba
->sli4_hba
.fcp_wq
[fcp_wqidx
]);
7595 for (--fcp_cqidx
; fcp_cqidx
>= 0; fcp_cqidx
--)
7596 lpfc_cq_destroy(phba
, phba
->sli4_hba
.fcp_cq
[fcp_cqidx
]);
7598 for (--fcp_eqidx
; fcp_eqidx
>= 0; fcp_eqidx
--)
7599 lpfc_eq_destroy(phba
, phba
->sli4_hba
.hba_eq
[fcp_eqidx
]);
7605 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
7606 * @phba: pointer to lpfc hba data structure.
7608 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
7613 * -ENOMEM - No available memory
7614 * -EIO - The mailbox failed to complete successfully.
7617 lpfc_sli4_queue_unset(struct lpfc_hba
*phba
)
7621 /* Unset the queues created for Flash Optimized Fabric operations */
7623 lpfc_fof_queue_destroy(phba
);
7624 /* Unset mailbox command work queue */
7625 lpfc_mq_destroy(phba
, phba
->sli4_hba
.mbx_wq
);
7626 /* Unset ELS work queue */
7627 lpfc_wq_destroy(phba
, phba
->sli4_hba
.els_wq
);
7628 /* Unset unsolicited receive queue */
7629 lpfc_rq_destroy(phba
, phba
->sli4_hba
.hdr_rq
, phba
->sli4_hba
.dat_rq
);
7630 /* Unset FCP work queue */
7631 if (phba
->sli4_hba
.fcp_wq
) {
7632 for (fcp_qidx
= 0; fcp_qidx
< phba
->cfg_fcp_io_channel
;
7634 lpfc_wq_destroy(phba
, phba
->sli4_hba
.fcp_wq
[fcp_qidx
]);
7636 /* Unset mailbox command complete queue */
7637 lpfc_cq_destroy(phba
, phba
->sli4_hba
.mbx_cq
);
7638 /* Unset ELS complete queue */
7639 lpfc_cq_destroy(phba
, phba
->sli4_hba
.els_cq
);
7640 /* Unset FCP response complete queue */
7641 if (phba
->sli4_hba
.fcp_cq
) {
7642 for (fcp_qidx
= 0; fcp_qidx
< phba
->cfg_fcp_io_channel
;
7644 lpfc_cq_destroy(phba
, phba
->sli4_hba
.fcp_cq
[fcp_qidx
]);
7646 /* Unset fast-path event queue */
7647 if (phba
->sli4_hba
.hba_eq
) {
7648 for (fcp_qidx
= 0; fcp_qidx
< phba
->cfg_fcp_io_channel
;
7650 lpfc_eq_destroy(phba
, phba
->sli4_hba
.hba_eq
[fcp_qidx
]);
7655 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
7656 * @phba: pointer to lpfc hba data structure.
7658 * This routine is invoked to allocate and set up a pool of completion queue
7659 * events. The body of the completion queue event is a completion queue entry
7660 * CQE. For now, this pool is used for the interrupt service routine to queue
7661 * the following HBA completion queue events for the worker thread to process:
7662 * - Mailbox asynchronous events
7663 * - Receive queue completion unsolicited events
7664 * Later, this can be used for all the slow-path events.
7668 * -ENOMEM - No available memory
7671 lpfc_sli4_cq_event_pool_create(struct lpfc_hba
*phba
)
7673 struct lpfc_cq_event
*cq_event
;
7676 for (i
= 0; i
< (4 * phba
->sli4_hba
.cq_ecount
); i
++) {
7677 cq_event
= kmalloc(sizeof(struct lpfc_cq_event
), GFP_KERNEL
);
7679 goto out_pool_create_fail
;
7680 list_add_tail(&cq_event
->list
,
7681 &phba
->sli4_hba
.sp_cqe_event_pool
);
7685 out_pool_create_fail
:
7686 lpfc_sli4_cq_event_pool_destroy(phba
);
7691 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
7692 * @phba: pointer to lpfc hba data structure.
7694 * This routine is invoked to free the pool of completion queue events at
7695 * driver unload time. Note that, it is the responsibility of the driver
7696 * cleanup routine to free all the outstanding completion-queue events
7697 * allocated from this pool back into the pool before invoking this routine
7698 * to destroy the pool.
7701 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba
*phba
)
7703 struct lpfc_cq_event
*cq_event
, *next_cq_event
;
7705 list_for_each_entry_safe(cq_event
, next_cq_event
,
7706 &phba
->sli4_hba
.sp_cqe_event_pool
, list
) {
7707 list_del(&cq_event
->list
);
7713 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
7714 * @phba: pointer to lpfc hba data structure.
7716 * This routine is the lock free version of the API invoked to allocate a
7717 * completion-queue event from the free pool.
7719 * Return: Pointer to the newly allocated completion-queue event if successful
7722 struct lpfc_cq_event
*
7723 __lpfc_sli4_cq_event_alloc(struct lpfc_hba
*phba
)
7725 struct lpfc_cq_event
*cq_event
= NULL
;
7727 list_remove_head(&phba
->sli4_hba
.sp_cqe_event_pool
, cq_event
,
7728 struct lpfc_cq_event
, list
);
7733 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
7734 * @phba: pointer to lpfc hba data structure.
7736 * This routine is the lock version of the API invoked to allocate a
7737 * completion-queue event from the free pool.
7739 * Return: Pointer to the newly allocated completion-queue event if successful
7742 struct lpfc_cq_event
*
7743 lpfc_sli4_cq_event_alloc(struct lpfc_hba
*phba
)
7745 struct lpfc_cq_event
*cq_event
;
7746 unsigned long iflags
;
7748 spin_lock_irqsave(&phba
->hbalock
, iflags
);
7749 cq_event
= __lpfc_sli4_cq_event_alloc(phba
);
7750 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7755 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
7756 * @phba: pointer to lpfc hba data structure.
7757 * @cq_event: pointer to the completion queue event to be freed.
7759 * This routine is the lock free version of the API invoked to release a
7760 * completion-queue event back into the free pool.
7763 __lpfc_sli4_cq_event_release(struct lpfc_hba
*phba
,
7764 struct lpfc_cq_event
*cq_event
)
7766 list_add_tail(&cq_event
->list
, &phba
->sli4_hba
.sp_cqe_event_pool
);
7770 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
7771 * @phba: pointer to lpfc hba data structure.
7772 * @cq_event: pointer to the completion queue event to be freed.
7774 * This routine is the lock version of the API invoked to release a
7775 * completion-queue event back into the free pool.
7778 lpfc_sli4_cq_event_release(struct lpfc_hba
*phba
,
7779 struct lpfc_cq_event
*cq_event
)
7781 unsigned long iflags
;
7782 spin_lock_irqsave(&phba
->hbalock
, iflags
);
7783 __lpfc_sli4_cq_event_release(phba
, cq_event
);
7784 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7788 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
7789 * @phba: pointer to lpfc hba data structure.
7791 * This routine is to free all the pending completion-queue events to the
7792 * back into the free pool for device reset.
7795 lpfc_sli4_cq_event_release_all(struct lpfc_hba
*phba
)
7798 struct lpfc_cq_event
*cqe
;
7799 unsigned long iflags
;
7801 /* Retrieve all the pending WCQEs from pending WCQE lists */
7802 spin_lock_irqsave(&phba
->hbalock
, iflags
);
7803 /* Pending FCP XRI abort events */
7804 list_splice_init(&phba
->sli4_hba
.sp_fcp_xri_aborted_work_queue
,
7806 /* Pending ELS XRI abort events */
7807 list_splice_init(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
,
7809 /* Pending asynnc events */
7810 list_splice_init(&phba
->sli4_hba
.sp_asynce_work_queue
,
7812 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7814 while (!list_empty(&cqelist
)) {
7815 list_remove_head(&cqelist
, cqe
, struct lpfc_cq_event
, list
);
7816 lpfc_sli4_cq_event_release(phba
, cqe
);
7821 * lpfc_pci_function_reset - Reset pci function.
7822 * @phba: pointer to lpfc hba data structure.
7824 * This routine is invoked to request a PCI function reset. It will destroys
7825 * all resources assigned to the PCI function which originates this request.
7829 * -ENOMEM - No available memory
7830 * -EIO - The mailbox failed to complete successfully.
7833 lpfc_pci_function_reset(struct lpfc_hba
*phba
)
7835 LPFC_MBOXQ_t
*mboxq
;
7836 uint32_t rc
= 0, if_type
;
7837 uint32_t shdr_status
, shdr_add_status
;
7838 uint32_t rdy_chk
, num_resets
= 0, reset_again
= 0;
7839 union lpfc_sli4_cfg_shdr
*shdr
;
7840 struct lpfc_register reg_data
;
7843 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
7845 case LPFC_SLI_INTF_IF_TYPE_0
:
7846 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
,
7849 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7850 "0494 Unable to allocate memory for "
7851 "issuing SLI_FUNCTION_RESET mailbox "
7856 /* Setup PCI function reset mailbox-ioctl command */
7857 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
7858 LPFC_MBOX_OPCODE_FUNCTION_RESET
, 0,
7859 LPFC_SLI4_MBX_EMBED
);
7860 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7861 shdr
= (union lpfc_sli4_cfg_shdr
*)
7862 &mboxq
->u
.mqe
.un
.sli4_config
.header
.cfg_shdr
;
7863 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
7864 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
,
7866 if (rc
!= MBX_TIMEOUT
)
7867 mempool_free(mboxq
, phba
->mbox_mem_pool
);
7868 if (shdr_status
|| shdr_add_status
|| rc
) {
7869 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7870 "0495 SLI_FUNCTION_RESET mailbox "
7871 "failed with status x%x add_status x%x,"
7872 " mbx status x%x\n",
7873 shdr_status
, shdr_add_status
, rc
);
7877 case LPFC_SLI_INTF_IF_TYPE_2
:
7878 for (num_resets
= 0;
7879 num_resets
< MAX_IF_TYPE_2_RESETS
;
7882 bf_set(lpfc_sliport_ctrl_end
, ®_data
,
7883 LPFC_SLIPORT_LITTLE_ENDIAN
);
7884 bf_set(lpfc_sliport_ctrl_ip
, ®_data
,
7885 LPFC_SLIPORT_INIT_PORT
);
7886 writel(reg_data
.word0
, phba
->sli4_hba
.u
.if_type2
.
7889 pci_read_config_word(phba
->pcidev
,
7890 PCI_DEVICE_ID
, &devid
);
7892 * Poll the Port Status Register and wait for RDY for
7893 * up to 10 seconds. If the port doesn't respond, treat
7894 * it as an error. If the port responds with RN, start
7897 for (rdy_chk
= 0; rdy_chk
< 1000; rdy_chk
++) {
7899 if (lpfc_readl(phba
->sli4_hba
.u
.if_type2
.
7900 STATUSregaddr
, ®_data
.word0
)) {
7904 if (bf_get(lpfc_sliport_status_rn
, ®_data
))
7906 if (bf_get(lpfc_sliport_status_rdy
, ®_data
))
7911 * If the port responds to the init request with
7912 * reset needed, delay for a bit and restart the loop.
7914 if (reset_again
&& (rdy_chk
< 1000)) {
7920 /* Detect any port errors. */
7921 if ((bf_get(lpfc_sliport_status_err
, ®_data
)) ||
7922 (rdy_chk
>= 1000)) {
7923 phba
->work_status
[0] = readl(
7924 phba
->sli4_hba
.u
.if_type2
.ERR1regaddr
);
7925 phba
->work_status
[1] = readl(
7926 phba
->sli4_hba
.u
.if_type2
.ERR2regaddr
);
7927 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7928 "2890 Port error detected during port "
7929 "reset(%d): wait_tmo:%d ms, "
7930 "port status reg 0x%x, "
7931 "error 1=0x%x, error 2=0x%x\n",
7932 num_resets
, rdy_chk
*10,
7934 phba
->work_status
[0],
7935 phba
->work_status
[1]);
7940 * Terminate the outer loop provided the Port indicated
7941 * ready within 10 seconds.
7946 /* delay driver action following IF_TYPE_2 function reset */
7949 case LPFC_SLI_INTF_IF_TYPE_1
:
7955 /* Catch the not-ready port failure after a port reset. */
7956 if (num_resets
>= MAX_IF_TYPE_2_RESETS
) {
7957 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7958 "3317 HBA not functional: IP Reset Failed "
7959 "after (%d) retries, try: "
7960 "echo fw_reset > board_mode\n", num_resets
);
7968 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
7969 * @phba: pointer to lpfc hba data structure.
7971 * This routine is invoked to set up the PCI device memory space for device
7972 * with SLI-4 interface spec.
7976 * other values - error
7979 lpfc_sli4_pci_mem_setup(struct lpfc_hba
*phba
)
7981 struct pci_dev
*pdev
;
7982 unsigned long bar0map_len
, bar1map_len
, bar2map_len
;
7983 int error
= -ENODEV
;
7986 /* Obtain PCI device reference */
7990 pdev
= phba
->pcidev
;
7992 /* Set the device DMA mask size */
7993 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) != 0
7994 || pci_set_consistent_dma_mask(pdev
,DMA_BIT_MASK(64)) != 0) {
7995 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)) != 0
7996 || pci_set_consistent_dma_mask(pdev
,DMA_BIT_MASK(32)) != 0) {
8002 * The BARs and register set definitions and offset locations are
8003 * dependent on the if_type.
8005 if (pci_read_config_dword(pdev
, LPFC_SLI_INTF
,
8006 &phba
->sli4_hba
.sli_intf
.word0
)) {
8010 /* There is no SLI3 failback for SLI4 devices. */
8011 if (bf_get(lpfc_sli_intf_valid
, &phba
->sli4_hba
.sli_intf
) !=
8012 LPFC_SLI_INTF_VALID
) {
8013 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8014 "2894 SLI_INTF reg contents invalid "
8015 "sli_intf reg 0x%x\n",
8016 phba
->sli4_hba
.sli_intf
.word0
);
8020 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
8022 * Get the bus address of SLI4 device Bar regions and the
8023 * number of bytes required by each mapping. The mapping of the
8024 * particular PCI BARs regions is dependent on the type of
8027 if (pci_resource_start(pdev
, PCI_64BIT_BAR0
)) {
8028 phba
->pci_bar0_map
= pci_resource_start(pdev
, PCI_64BIT_BAR0
);
8029 bar0map_len
= pci_resource_len(pdev
, PCI_64BIT_BAR0
);
8032 * Map SLI4 PCI Config Space Register base to a kernel virtual
8035 phba
->sli4_hba
.conf_regs_memmap_p
=
8036 ioremap(phba
->pci_bar0_map
, bar0map_len
);
8037 if (!phba
->sli4_hba
.conf_regs_memmap_p
) {
8038 dev_printk(KERN_ERR
, &pdev
->dev
,
8039 "ioremap failed for SLI4 PCI config "
8043 phba
->pci_bar0_memmap_p
= phba
->sli4_hba
.conf_regs_memmap_p
;
8044 /* Set up BAR0 PCI config space register memory map */
8045 lpfc_sli4_bar0_register_memmap(phba
, if_type
);
8047 phba
->pci_bar0_map
= pci_resource_start(pdev
, 1);
8048 bar0map_len
= pci_resource_len(pdev
, 1);
8049 if (if_type
== LPFC_SLI_INTF_IF_TYPE_2
) {
8050 dev_printk(KERN_ERR
, &pdev
->dev
,
8051 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
8054 phba
->sli4_hba
.conf_regs_memmap_p
=
8055 ioremap(phba
->pci_bar0_map
, bar0map_len
);
8056 if (!phba
->sli4_hba
.conf_regs_memmap_p
) {
8057 dev_printk(KERN_ERR
, &pdev
->dev
,
8058 "ioremap failed for SLI4 PCI config "
8062 lpfc_sli4_bar0_register_memmap(phba
, if_type
);
8065 if ((if_type
== LPFC_SLI_INTF_IF_TYPE_0
) &&
8066 (pci_resource_start(pdev
, PCI_64BIT_BAR2
))) {
8068 * Map SLI4 if type 0 HBA Control Register base to a kernel
8069 * virtual address and setup the registers.
8071 phba
->pci_bar1_map
= pci_resource_start(pdev
, PCI_64BIT_BAR2
);
8072 bar1map_len
= pci_resource_len(pdev
, PCI_64BIT_BAR2
);
8073 phba
->sli4_hba
.ctrl_regs_memmap_p
=
8074 ioremap(phba
->pci_bar1_map
, bar1map_len
);
8075 if (!phba
->sli4_hba
.ctrl_regs_memmap_p
) {
8076 dev_printk(KERN_ERR
, &pdev
->dev
,
8077 "ioremap failed for SLI4 HBA control registers.\n");
8078 goto out_iounmap_conf
;
8080 phba
->pci_bar2_memmap_p
= phba
->sli4_hba
.ctrl_regs_memmap_p
;
8081 lpfc_sli4_bar1_register_memmap(phba
);
8084 if ((if_type
== LPFC_SLI_INTF_IF_TYPE_0
) &&
8085 (pci_resource_start(pdev
, PCI_64BIT_BAR4
))) {
8087 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
8088 * virtual address and setup the registers.
8090 phba
->pci_bar2_map
= pci_resource_start(pdev
, PCI_64BIT_BAR4
);
8091 bar2map_len
= pci_resource_len(pdev
, PCI_64BIT_BAR4
);
8092 phba
->sli4_hba
.drbl_regs_memmap_p
=
8093 ioremap(phba
->pci_bar2_map
, bar2map_len
);
8094 if (!phba
->sli4_hba
.drbl_regs_memmap_p
) {
8095 dev_printk(KERN_ERR
, &pdev
->dev
,
8096 "ioremap failed for SLI4 HBA doorbell registers.\n");
8097 goto out_iounmap_ctrl
;
8099 phba
->pci_bar4_memmap_p
= phba
->sli4_hba
.drbl_regs_memmap_p
;
8100 error
= lpfc_sli4_bar2_register_memmap(phba
, LPFC_VF0
);
8102 goto out_iounmap_all
;
8108 iounmap(phba
->sli4_hba
.drbl_regs_memmap_p
);
8110 iounmap(phba
->sli4_hba
.ctrl_regs_memmap_p
);
8112 iounmap(phba
->sli4_hba
.conf_regs_memmap_p
);
8118 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
8119 * @phba: pointer to lpfc hba data structure.
8121 * This routine is invoked to unset the PCI device memory space for device
8122 * with SLI-4 interface spec.
8125 lpfc_sli4_pci_mem_unset(struct lpfc_hba
*phba
)
8128 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
8131 case LPFC_SLI_INTF_IF_TYPE_0
:
8132 iounmap(phba
->sli4_hba
.drbl_regs_memmap_p
);
8133 iounmap(phba
->sli4_hba
.ctrl_regs_memmap_p
);
8134 iounmap(phba
->sli4_hba
.conf_regs_memmap_p
);
8136 case LPFC_SLI_INTF_IF_TYPE_2
:
8137 iounmap(phba
->sli4_hba
.conf_regs_memmap_p
);
8139 case LPFC_SLI_INTF_IF_TYPE_1
:
8141 dev_printk(KERN_ERR
, &phba
->pcidev
->dev
,
8142 "FATAL - unsupported SLI4 interface type - %d\n",
8149 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
8150 * @phba: pointer to lpfc hba data structure.
8152 * This routine is invoked to enable the MSI-X interrupt vectors to device
8153 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
8154 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
8155 * invoked, enables either all or nothing, depending on the current
8156 * availability of PCI vector resources. The device driver is responsible
8157 * for calling the individual request_irq() to register each MSI-X vector
8158 * with a interrupt handler, which is done in this function. Note that
8159 * later when device is unloading, the driver should always call free_irq()
8160 * on all MSI-X vectors it has done request_irq() on before calling
8161 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
8162 * will be left with MSI-X enabled and leaks its vectors.
8166 * other values - error
8169 lpfc_sli_enable_msix(struct lpfc_hba
*phba
)
8174 /* Set up MSI-X multi-message vectors */
8175 for (i
= 0; i
< LPFC_MSIX_VECTORS
; i
++)
8176 phba
->msix_entries
[i
].entry
= i
;
8178 /* Configure MSI-X capability structure */
8179 rc
= pci_enable_msix(phba
->pcidev
, phba
->msix_entries
,
8180 ARRAY_SIZE(phba
->msix_entries
));
8182 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8183 "0420 PCI enable MSI-X failed (%d)\n", rc
);
8186 for (i
= 0; i
< LPFC_MSIX_VECTORS
; i
++)
8187 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8188 "0477 MSI-X entry[%d]: vector=x%x "
8190 phba
->msix_entries
[i
].vector
,
8191 phba
->msix_entries
[i
].entry
);
8193 * Assign MSI-X vectors to interrupt handlers
8196 /* vector-0 is associated to slow-path handler */
8197 rc
= request_irq(phba
->msix_entries
[0].vector
,
8198 &lpfc_sli_sp_intr_handler
, IRQF_SHARED
,
8199 LPFC_SP_DRIVER_HANDLER_NAME
, phba
);
8201 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
8202 "0421 MSI-X slow-path request_irq failed "
8207 /* vector-1 is associated to fast-path handler */
8208 rc
= request_irq(phba
->msix_entries
[1].vector
,
8209 &lpfc_sli_fp_intr_handler
, IRQF_SHARED
,
8210 LPFC_FP_DRIVER_HANDLER_NAME
, phba
);
8213 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
8214 "0429 MSI-X fast-path request_irq failed "
8220 * Configure HBA MSI-X attention conditions to messages
8222 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
8226 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8227 "0474 Unable to allocate memory for issuing "
8228 "MBOX_CONFIG_MSI command\n");
8231 rc
= lpfc_config_msi(phba
, pmb
);
8234 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
8235 if (rc
!= MBX_SUCCESS
) {
8236 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
,
8237 "0351 Config MSI mailbox command failed, "
8238 "mbxCmd x%x, mbxStatus x%x\n",
8239 pmb
->u
.mb
.mbxCommand
, pmb
->u
.mb
.mbxStatus
);
8243 /* Free memory allocated for mailbox command */
8244 mempool_free(pmb
, phba
->mbox_mem_pool
);
8248 /* Free memory allocated for mailbox command */
8249 mempool_free(pmb
, phba
->mbox_mem_pool
);
8252 /* free the irq already requested */
8253 free_irq(phba
->msix_entries
[1].vector
, phba
);
8256 /* free the irq already requested */
8257 free_irq(phba
->msix_entries
[0].vector
, phba
);
8260 /* Unconfigure MSI-X capability structure */
8261 pci_disable_msix(phba
->pcidev
);
8266 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
8267 * @phba: pointer to lpfc hba data structure.
8269 * This routine is invoked to release the MSI-X vectors and then disable the
8270 * MSI-X interrupt mode to device with SLI-3 interface spec.
8273 lpfc_sli_disable_msix(struct lpfc_hba
*phba
)
8277 /* Free up MSI-X multi-message vectors */
8278 for (i
= 0; i
< LPFC_MSIX_VECTORS
; i
++)
8279 free_irq(phba
->msix_entries
[i
].vector
, phba
);
8281 pci_disable_msix(phba
->pcidev
);
8287 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
8288 * @phba: pointer to lpfc hba data structure.
8290 * This routine is invoked to enable the MSI interrupt mode to device with
8291 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
8292 * enable the MSI vector. The device driver is responsible for calling the
8293 * request_irq() to register MSI vector with a interrupt the handler, which
8294 * is done in this function.
8298 * other values - error
8301 lpfc_sli_enable_msi(struct lpfc_hba
*phba
)
8305 rc
= pci_enable_msi(phba
->pcidev
);
8307 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8308 "0462 PCI enable MSI mode success.\n");
8310 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8311 "0471 PCI enable MSI mode failed (%d)\n", rc
);
8315 rc
= request_irq(phba
->pcidev
->irq
, lpfc_sli_intr_handler
,
8316 IRQF_SHARED
, LPFC_DRIVER_NAME
, phba
);
8318 pci_disable_msi(phba
->pcidev
);
8319 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
8320 "0478 MSI request_irq failed (%d)\n", rc
);
8326 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
8327 * @phba: pointer to lpfc hba data structure.
8329 * This routine is invoked to disable the MSI interrupt mode to device with
8330 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
8331 * done request_irq() on before calling pci_disable_msi(). Failure to do so
8332 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
8336 lpfc_sli_disable_msi(struct lpfc_hba
*phba
)
8338 free_irq(phba
->pcidev
->irq
, phba
);
8339 pci_disable_msi(phba
->pcidev
);
8344 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
8345 * @phba: pointer to lpfc hba data structure.
8347 * This routine is invoked to enable device interrupt and associate driver's
8348 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
8349 * spec. Depends on the interrupt mode configured to the driver, the driver
8350 * will try to fallback from the configured interrupt mode to an interrupt
8351 * mode which is supported by the platform, kernel, and device in the order
8353 * MSI-X -> MSI -> IRQ.
8357 * other values - error
8360 lpfc_sli_enable_intr(struct lpfc_hba
*phba
, uint32_t cfg_mode
)
8362 uint32_t intr_mode
= LPFC_INTR_ERROR
;
8365 if (cfg_mode
== 2) {
8366 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
8367 retval
= lpfc_sli_config_port(phba
, LPFC_SLI_REV3
);
8369 /* Now, try to enable MSI-X interrupt mode */
8370 retval
= lpfc_sli_enable_msix(phba
);
8372 /* Indicate initialization to MSI-X mode */
8373 phba
->intr_type
= MSIX
;
8379 /* Fallback to MSI if MSI-X initialization failed */
8380 if (cfg_mode
>= 1 && phba
->intr_type
== NONE
) {
8381 retval
= lpfc_sli_enable_msi(phba
);
8383 /* Indicate initialization to MSI mode */
8384 phba
->intr_type
= MSI
;
8389 /* Fallback to INTx if both MSI-X/MSI initalization failed */
8390 if (phba
->intr_type
== NONE
) {
8391 retval
= request_irq(phba
->pcidev
->irq
, lpfc_sli_intr_handler
,
8392 IRQF_SHARED
, LPFC_DRIVER_NAME
, phba
);
8394 /* Indicate initialization to INTx mode */
8395 phba
->intr_type
= INTx
;
8403 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
8404 * @phba: pointer to lpfc hba data structure.
8406 * This routine is invoked to disable device interrupt and disassociate the
8407 * driver's interrupt handler(s) from interrupt vector(s) to device with
8408 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
8409 * release the interrupt vector(s) for the message signaled interrupt.
8412 lpfc_sli_disable_intr(struct lpfc_hba
*phba
)
8414 /* Disable the currently initialized interrupt mode */
8415 if (phba
->intr_type
== MSIX
)
8416 lpfc_sli_disable_msix(phba
);
8417 else if (phba
->intr_type
== MSI
)
8418 lpfc_sli_disable_msi(phba
);
8419 else if (phba
->intr_type
== INTx
)
8420 free_irq(phba
->pcidev
->irq
, phba
);
8422 /* Reset interrupt management states */
8423 phba
->intr_type
= NONE
;
8424 phba
->sli
.slistat
.sli_intr
= 0;
8430 * lpfc_find_next_cpu - Find next available CPU that matches the phys_id
8431 * @phba: pointer to lpfc hba data structure.
8433 * Find next available CPU to use for IRQ to CPU affinity.
8436 lpfc_find_next_cpu(struct lpfc_hba
*phba
, uint32_t phys_id
)
8438 struct lpfc_vector_map_info
*cpup
;
8441 cpup
= phba
->sli4_hba
.cpu_map
;
8442 for (cpu
= 0; cpu
< phba
->sli4_hba
.num_present_cpu
; cpu
++) {
8443 /* CPU must be online */
8444 if (cpu_online(cpu
)) {
8445 if ((cpup
->irq
== LPFC_VECTOR_MAP_EMPTY
) &&
8446 (lpfc_used_cpu
[cpu
] == LPFC_VECTOR_MAP_EMPTY
) &&
8447 (cpup
->phys_id
== phys_id
)) {
8455 * If we get here, we have used ALL CPUs for the specific
8456 * phys_id. Now we need to clear out lpfc_used_cpu and start
8460 for (cpu
= 0; cpu
< phba
->sli4_hba
.num_present_cpu
; cpu
++) {
8461 if (lpfc_used_cpu
[cpu
] == phys_id
)
8462 lpfc_used_cpu
[cpu
] = LPFC_VECTOR_MAP_EMPTY
;
8465 cpup
= phba
->sli4_hba
.cpu_map
;
8466 for (cpu
= 0; cpu
< phba
->sli4_hba
.num_present_cpu
; cpu
++) {
8467 /* CPU must be online */
8468 if (cpu_online(cpu
)) {
8469 if ((cpup
->irq
== LPFC_VECTOR_MAP_EMPTY
) &&
8470 (cpup
->phys_id
== phys_id
)) {
8476 return LPFC_VECTOR_MAP_EMPTY
;
8480 * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors
8481 * @phba: pointer to lpfc hba data structure.
8482 * @vectors: number of HBA vectors
8484 * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector
8485 * affinization across multple physical CPUs (numa nodes).
8486 * In addition, this routine will assign an IO channel for each CPU
8487 * to use when issuing I/Os.
8490 lpfc_sli4_set_affinity(struct lpfc_hba
*phba
, int vectors
)
8492 int i
, idx
, saved_chann
, used_chann
, cpu
, phys_id
;
8493 int max_phys_id
, min_phys_id
;
8494 int num_io_channel
, first_cpu
, chan
;
8495 struct lpfc_vector_map_info
*cpup
;
8497 struct cpuinfo_x86
*cpuinfo
;
8499 struct cpumask
*mask
;
8500 uint8_t chann
[LPFC_FCP_IO_CHAN_MAX
+1];
8502 /* If there is no mapping, just return */
8503 if (!phba
->cfg_fcp_cpu_map
)
8506 /* Init cpu_map array */
8507 memset(phba
->sli4_hba
.cpu_map
, 0xff,
8508 (sizeof(struct lpfc_vector_map_info
) *
8509 phba
->sli4_hba
.num_present_cpu
));
8515 first_cpu
= LPFC_VECTOR_MAP_EMPTY
;
8517 /* Update CPU map with physical id and core id of each CPU */
8518 cpup
= phba
->sli4_hba
.cpu_map
;
8519 for (cpu
= 0; cpu
< phba
->sli4_hba
.num_present_cpu
; cpu
++) {
8521 cpuinfo
= &cpu_data(cpu
);
8522 cpup
->phys_id
= cpuinfo
->phys_proc_id
;
8523 cpup
->core_id
= cpuinfo
->cpu_core_id
;
8525 /* No distinction between CPUs for other platforms */
8530 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8531 "3328 CPU physid %d coreid %d\n",
8532 cpup
->phys_id
, cpup
->core_id
);
8534 if (cpup
->phys_id
> max_phys_id
)
8535 max_phys_id
= cpup
->phys_id
;
8536 if (cpup
->phys_id
< min_phys_id
)
8537 min_phys_id
= cpup
->phys_id
;
8541 phys_id
= min_phys_id
;
8542 /* Now associate the HBA vectors with specific CPUs */
8543 for (idx
= 0; idx
< vectors
; idx
++) {
8544 cpup
= phba
->sli4_hba
.cpu_map
;
8545 cpu
= lpfc_find_next_cpu(phba
, phys_id
);
8546 if (cpu
== LPFC_VECTOR_MAP_EMPTY
) {
8548 /* Try for all phys_id's */
8549 for (i
= 1; i
< max_phys_id
; i
++) {
8551 if (phys_id
> max_phys_id
)
8552 phys_id
= min_phys_id
;
8553 cpu
= lpfc_find_next_cpu(phba
, phys_id
);
8554 if (cpu
== LPFC_VECTOR_MAP_EMPTY
)
8559 /* Use round robin for scheduling */
8560 phba
->cfg_fcp_io_sched
= LPFC_FCP_SCHED_ROUND_ROBIN
;
8562 cpup
= phba
->sli4_hba
.cpu_map
;
8563 for (i
= 0; i
< phba
->sli4_hba
.num_present_cpu
; i
++) {
8564 cpup
->channel_id
= chan
;
8567 if (chan
>= phba
->cfg_fcp_io_channel
)
8571 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8572 "3329 Cannot set affinity:"
8573 "Error mapping vector %d (%d)\n",
8579 if (phba
->cfg_fcp_cpu_map
== LPFC_DRIVER_CPU_MAP
)
8580 lpfc_used_cpu
[cpu
] = phys_id
;
8582 /* Associate vector with selected CPU */
8583 cpup
->irq
= phba
->sli4_hba
.msix_entries
[idx
].vector
;
8585 /* Associate IO channel with selected CPU */
8586 cpup
->channel_id
= idx
;
8589 if (first_cpu
== LPFC_VECTOR_MAP_EMPTY
)
8592 /* Now affinitize to the selected CPU */
8593 mask
= &cpup
->maskbits
;
8594 cpumask_clear(mask
);
8595 cpumask_set_cpu(cpu
, mask
);
8596 i
= irq_set_affinity_hint(phba
->sli4_hba
.msix_entries
[idx
].
8599 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8600 "3330 Set Affinity: CPU %d channel %d "
8602 cpu
, cpup
->channel_id
,
8603 phba
->sli4_hba
.msix_entries
[idx
].vector
, i
);
8605 /* Spread vector mapping across multple physical CPU nodes */
8607 if (phys_id
> max_phys_id
)
8608 phys_id
= min_phys_id
;
8612 * Finally fill in the IO channel for any remaining CPUs.
8613 * At this point, all IO channels have been assigned to a specific
8614 * MSIx vector, mapped to a specific CPU.
8615 * Base the remaining IO channel assigned, to IO channels already
8616 * assigned to other CPUs on the same phys_id.
8618 for (i
= min_phys_id
; i
<= max_phys_id
; i
++) {
8620 * If there are no io channels already mapped to
8621 * this phys_id, just round robin thru the io_channels.
8622 * Setup chann[] for round robin.
8624 for (idx
= 0; idx
< phba
->cfg_fcp_io_channel
; idx
++)
8631 * First build a list of IO channels already assigned
8632 * to this phys_id before reassigning the same IO
8633 * channels to the remaining CPUs.
8635 cpup
= phba
->sli4_hba
.cpu_map
;
8638 for (idx
= 0; idx
< phba
->sli4_hba
.num_present_cpu
;
8640 if (cpup
->phys_id
== i
) {
8642 * Save any IO channels that are
8643 * already mapped to this phys_id.
8645 if (cpup
->irq
!= LPFC_VECTOR_MAP_EMPTY
) {
8646 chann
[saved_chann
] =
8652 /* See if we are using round-robin */
8653 if (saved_chann
== 0)
8655 phba
->cfg_fcp_io_channel
;
8657 /* Associate next IO channel with CPU */
8658 cpup
->channel_id
= chann
[used_chann
];
8661 if (used_chann
== saved_chann
)
8664 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8665 "3331 Set IO_CHANN "
8666 "CPU %d channel %d\n",
8667 idx
, cpup
->channel_id
);
8671 if (cpu
>= phba
->sli4_hba
.num_present_cpu
) {
8672 cpup
= phba
->sli4_hba
.cpu_map
;
8680 if (phba
->sli4_hba
.num_online_cpu
!= phba
->sli4_hba
.num_present_cpu
) {
8681 cpup
= phba
->sli4_hba
.cpu_map
;
8682 for (idx
= 0; idx
< phba
->sli4_hba
.num_present_cpu
; idx
++) {
8683 if (cpup
->channel_id
== LPFC_VECTOR_MAP_EMPTY
) {
8684 cpup
->channel_id
= 0;
8687 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8688 "3332 Assign IO_CHANN "
8689 "CPU %d channel %d\n",
8690 idx
, cpup
->channel_id
);
8697 if (num_io_channel
!= phba
->sli4_hba
.num_present_cpu
)
8698 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8699 "3333 Set affinity mismatch:"
8700 "%d chann != %d cpus: %d vectors\n",
8701 num_io_channel
, phba
->sli4_hba
.num_present_cpu
,
8704 /* Enable using cpu affinity for scheduling */
8705 phba
->cfg_fcp_io_sched
= LPFC_FCP_SCHED_BY_CPU
;
8711 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
8712 * @phba: pointer to lpfc hba data structure.
8714 * This routine is invoked to enable the MSI-X interrupt vectors to device
8715 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
8716 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
8717 * enables either all or nothing, depending on the current availability of
8718 * PCI vector resources. The device driver is responsible for calling the
8719 * individual request_irq() to register each MSI-X vector with a interrupt
8720 * handler, which is done in this function. Note that later when device is
8721 * unloading, the driver should always call free_irq() on all MSI-X vectors
8722 * it has done request_irq() on before calling pci_disable_msix(). Failure
8723 * to do so results in a BUG_ON() and a device will be left with MSI-X
8724 * enabled and leaks its vectors.
8728 * other values - error
8731 lpfc_sli4_enable_msix(struct lpfc_hba
*phba
)
8733 int vectors
, rc
, index
;
8735 /* Set up MSI-X multi-message vectors */
8736 for (index
= 0; index
< phba
->cfg_fcp_io_channel
; index
++)
8737 phba
->sli4_hba
.msix_entries
[index
].entry
= index
;
8739 /* Configure MSI-X capability structure */
8740 vectors
= phba
->cfg_fcp_io_channel
;
8741 if (phba
->cfg_fof
) {
8742 phba
->sli4_hba
.msix_entries
[index
].entry
= index
;
8745 enable_msix_vectors
:
8746 rc
= pci_enable_msix(phba
->pcidev
, phba
->sli4_hba
.msix_entries
,
8750 goto enable_msix_vectors
;
8752 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8753 "0484 PCI enable MSI-X failed (%d)\n", rc
);
8757 /* Log MSI-X vector assignment */
8758 for (index
= 0; index
< vectors
; index
++)
8759 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8760 "0489 MSI-X entry[%d]: vector=x%x "
8761 "message=%d\n", index
,
8762 phba
->sli4_hba
.msix_entries
[index
].vector
,
8763 phba
->sli4_hba
.msix_entries
[index
].entry
);
8765 /* Assign MSI-X vectors to interrupt handlers */
8766 for (index
= 0; index
< vectors
; index
++) {
8767 memset(&phba
->sli4_hba
.handler_name
[index
], 0, 16);
8768 sprintf((char *)&phba
->sli4_hba
.handler_name
[index
],
8769 LPFC_DRIVER_HANDLER_NAME
"%d", index
);
8771 phba
->sli4_hba
.fcp_eq_hdl
[index
].idx
= index
;
8772 phba
->sli4_hba
.fcp_eq_hdl
[index
].phba
= phba
;
8773 atomic_set(&phba
->sli4_hba
.fcp_eq_hdl
[index
].fcp_eq_in_use
, 1);
8774 if (phba
->cfg_fof
&& (index
== (vectors
- 1)))
8776 phba
->sli4_hba
.msix_entries
[index
].vector
,
8777 &lpfc_sli4_fof_intr_handler
, IRQF_SHARED
,
8778 (char *)&phba
->sli4_hba
.handler_name
[index
],
8779 &phba
->sli4_hba
.fcp_eq_hdl
[index
]);
8782 phba
->sli4_hba
.msix_entries
[index
].vector
,
8783 &lpfc_sli4_hba_intr_handler
, IRQF_SHARED
,
8784 (char *)&phba
->sli4_hba
.handler_name
[index
],
8785 &phba
->sli4_hba
.fcp_eq_hdl
[index
]);
8787 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
8788 "0486 MSI-X fast-path (%d) "
8789 "request_irq failed (%d)\n", index
, rc
);
8797 if (vectors
!= phba
->cfg_fcp_io_channel
) {
8798 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8799 "3238 Reducing IO channels to match number of "
8800 "MSI-X vectors, requested %d got %d\n",
8801 phba
->cfg_fcp_io_channel
, vectors
);
8802 phba
->cfg_fcp_io_channel
= vectors
;
8805 lpfc_sli4_set_affinity(phba
, vectors
);
8809 /* free the irq already requested */
8810 for (--index
; index
>= 0; index
--) {
8811 irq_set_affinity_hint(phba
->sli4_hba
.msix_entries
[index
].
8813 free_irq(phba
->sli4_hba
.msix_entries
[index
].vector
,
8814 &phba
->sli4_hba
.fcp_eq_hdl
[index
]);
8818 /* Unconfigure MSI-X capability structure */
8819 pci_disable_msix(phba
->pcidev
);
8824 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
8825 * @phba: pointer to lpfc hba data structure.
8827 * This routine is invoked to release the MSI-X vectors and then disable the
8828 * MSI-X interrupt mode to device with SLI-4 interface spec.
8831 lpfc_sli4_disable_msix(struct lpfc_hba
*phba
)
8835 /* Free up MSI-X multi-message vectors */
8836 for (index
= 0; index
< phba
->cfg_fcp_io_channel
; index
++) {
8837 irq_set_affinity_hint(phba
->sli4_hba
.msix_entries
[index
].
8839 free_irq(phba
->sli4_hba
.msix_entries
[index
].vector
,
8840 &phba
->sli4_hba
.fcp_eq_hdl
[index
]);
8842 if (phba
->cfg_fof
) {
8843 free_irq(phba
->sli4_hba
.msix_entries
[index
].vector
,
8844 &phba
->sli4_hba
.fcp_eq_hdl
[index
]);
8847 pci_disable_msix(phba
->pcidev
);
8853 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
8854 * @phba: pointer to lpfc hba data structure.
8856 * This routine is invoked to enable the MSI interrupt mode to device with
8857 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
8858 * to enable the MSI vector. The device driver is responsible for calling
8859 * the request_irq() to register MSI vector with a interrupt the handler,
8860 * which is done in this function.
8864 * other values - error
8867 lpfc_sli4_enable_msi(struct lpfc_hba
*phba
)
8871 rc
= pci_enable_msi(phba
->pcidev
);
8873 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8874 "0487 PCI enable MSI mode success.\n");
8876 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8877 "0488 PCI enable MSI mode failed (%d)\n", rc
);
8881 rc
= request_irq(phba
->pcidev
->irq
, lpfc_sli4_intr_handler
,
8882 IRQF_SHARED
, LPFC_DRIVER_NAME
, phba
);
8884 pci_disable_msi(phba
->pcidev
);
8885 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
8886 "0490 MSI request_irq failed (%d)\n", rc
);
8890 for (index
= 0; index
< phba
->cfg_fcp_io_channel
; index
++) {
8891 phba
->sli4_hba
.fcp_eq_hdl
[index
].idx
= index
;
8892 phba
->sli4_hba
.fcp_eq_hdl
[index
].phba
= phba
;
8895 if (phba
->cfg_fof
) {
8896 phba
->sli4_hba
.fcp_eq_hdl
[index
].idx
= index
;
8897 phba
->sli4_hba
.fcp_eq_hdl
[index
].phba
= phba
;
8903 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
8904 * @phba: pointer to lpfc hba data structure.
8906 * This routine is invoked to disable the MSI interrupt mode to device with
8907 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
8908 * done request_irq() on before calling pci_disable_msi(). Failure to do so
8909 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
8913 lpfc_sli4_disable_msi(struct lpfc_hba
*phba
)
8915 free_irq(phba
->pcidev
->irq
, phba
);
8916 pci_disable_msi(phba
->pcidev
);
8921 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
8922 * @phba: pointer to lpfc hba data structure.
8924 * This routine is invoked to enable device interrupt and associate driver's
8925 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
8926 * interface spec. Depends on the interrupt mode configured to the driver,
8927 * the driver will try to fallback from the configured interrupt mode to an
8928 * interrupt mode which is supported by the platform, kernel, and device in
8930 * MSI-X -> MSI -> IRQ.
8934 * other values - error
8937 lpfc_sli4_enable_intr(struct lpfc_hba
*phba
, uint32_t cfg_mode
)
8939 uint32_t intr_mode
= LPFC_INTR_ERROR
;
8942 if (cfg_mode
== 2) {
8943 /* Preparation before conf_msi mbox cmd */
8946 /* Now, try to enable MSI-X interrupt mode */
8947 retval
= lpfc_sli4_enable_msix(phba
);
8949 /* Indicate initialization to MSI-X mode */
8950 phba
->intr_type
= MSIX
;
8956 /* Fallback to MSI if MSI-X initialization failed */
8957 if (cfg_mode
>= 1 && phba
->intr_type
== NONE
) {
8958 retval
= lpfc_sli4_enable_msi(phba
);
8960 /* Indicate initialization to MSI mode */
8961 phba
->intr_type
= MSI
;
8966 /* Fallback to INTx if both MSI-X/MSI initalization failed */
8967 if (phba
->intr_type
== NONE
) {
8968 retval
= request_irq(phba
->pcidev
->irq
, lpfc_sli4_intr_handler
,
8969 IRQF_SHARED
, LPFC_DRIVER_NAME
, phba
);
8971 /* Indicate initialization to INTx mode */
8972 phba
->intr_type
= INTx
;
8974 for (index
= 0; index
< phba
->cfg_fcp_io_channel
;
8976 phba
->sli4_hba
.fcp_eq_hdl
[index
].idx
= index
;
8977 phba
->sli4_hba
.fcp_eq_hdl
[index
].phba
= phba
;
8978 atomic_set(&phba
->sli4_hba
.fcp_eq_hdl
[index
].
8981 if (phba
->cfg_fof
) {
8982 phba
->sli4_hba
.fcp_eq_hdl
[index
].idx
= index
;
8983 phba
->sli4_hba
.fcp_eq_hdl
[index
].phba
= phba
;
8984 atomic_set(&phba
->sli4_hba
.fcp_eq_hdl
[index
].
8993 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
8994 * @phba: pointer to lpfc hba data structure.
8996 * This routine is invoked to disable device interrupt and disassociate
8997 * the driver's interrupt handler(s) from interrupt vector(s) to device
8998 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
8999 * will release the interrupt vector(s) for the message signaled interrupt.
9002 lpfc_sli4_disable_intr(struct lpfc_hba
*phba
)
9004 /* Disable the currently initialized interrupt mode */
9005 if (phba
->intr_type
== MSIX
)
9006 lpfc_sli4_disable_msix(phba
);
9007 else if (phba
->intr_type
== MSI
)
9008 lpfc_sli4_disable_msi(phba
);
9009 else if (phba
->intr_type
== INTx
)
9010 free_irq(phba
->pcidev
->irq
, phba
);
9012 /* Reset interrupt management states */
9013 phba
->intr_type
= NONE
;
9014 phba
->sli
.slistat
.sli_intr
= 0;
9020 * lpfc_unset_hba - Unset SLI3 hba device initialization
9021 * @phba: pointer to lpfc hba data structure.
9023 * This routine is invoked to unset the HBA device initialization steps to
9024 * a device with SLI-3 interface spec.
9027 lpfc_unset_hba(struct lpfc_hba
*phba
)
9029 struct lpfc_vport
*vport
= phba
->pport
;
9030 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
9032 spin_lock_irq(shost
->host_lock
);
9033 vport
->load_flag
|= FC_UNLOADING
;
9034 spin_unlock_irq(shost
->host_lock
);
9036 kfree(phba
->vpi_bmask
);
9037 kfree(phba
->vpi_ids
);
9039 lpfc_stop_hba_timers(phba
);
9041 phba
->pport
->work_port_events
= 0;
9043 lpfc_sli_hba_down(phba
);
9045 lpfc_sli_brdrestart(phba
);
9047 lpfc_sli_disable_intr(phba
);
9053 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
9054 * @phba: Pointer to HBA context object.
9056 * This function is called in the SLI4 code path to wait for completion
9057 * of device's XRIs exchange busy. It will check the XRI exchange busy
9058 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
9059 * that, it will check the XRI exchange busy on outstanding FCP and ELS
9060 * I/Os every 30 seconds, log error message, and wait forever. Only when
9061 * all XRI exchange busy complete, the driver unload shall proceed with
9062 * invoking the function reset ioctl mailbox command to the CNA and the
9063 * the rest of the driver unload resource release.
9066 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba
*phba
)
9069 int fcp_xri_cmpl
= list_empty(&phba
->sli4_hba
.lpfc_abts_scsi_buf_list
);
9070 int els_xri_cmpl
= list_empty(&phba
->sli4_hba
.lpfc_abts_els_sgl_list
);
9072 while (!fcp_xri_cmpl
|| !els_xri_cmpl
) {
9073 if (wait_time
> LPFC_XRI_EXCH_BUSY_WAIT_TMO
) {
9075 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9076 "2877 FCP XRI exchange busy "
9077 "wait time: %d seconds.\n",
9080 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9081 "2878 ELS XRI exchange busy "
9082 "wait time: %d seconds.\n",
9084 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2
);
9085 wait_time
+= LPFC_XRI_EXCH_BUSY_WAIT_T2
;
9087 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1
);
9088 wait_time
+= LPFC_XRI_EXCH_BUSY_WAIT_T1
;
9091 list_empty(&phba
->sli4_hba
.lpfc_abts_scsi_buf_list
);
9093 list_empty(&phba
->sli4_hba
.lpfc_abts_els_sgl_list
);
9098 * lpfc_sli4_hba_unset - Unset the fcoe hba
9099 * @phba: Pointer to HBA context object.
9101 * This function is called in the SLI4 code path to reset the HBA's FCoE
9102 * function. The caller is not required to hold any lock. This routine
9103 * issues PCI function reset mailbox command to reset the FCoE function.
9104 * At the end of the function, it calls lpfc_hba_down_post function to
9105 * free any pending commands.
9108 lpfc_sli4_hba_unset(struct lpfc_hba
*phba
)
9111 LPFC_MBOXQ_t
*mboxq
;
9112 struct pci_dev
*pdev
= phba
->pcidev
;
9114 lpfc_stop_hba_timers(phba
);
9115 phba
->sli4_hba
.intr_enable
= 0;
9118 * Gracefully wait out the potential current outstanding asynchronous
9122 /* First, block any pending async mailbox command from posted */
9123 spin_lock_irq(&phba
->hbalock
);
9124 phba
->sli
.sli_flag
|= LPFC_SLI_ASYNC_MBX_BLK
;
9125 spin_unlock_irq(&phba
->hbalock
);
9126 /* Now, trying to wait it out if we can */
9127 while (phba
->sli
.sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
9129 if (++wait_cnt
> LPFC_ACTIVE_MBOX_WAIT_CNT
)
9132 /* Forcefully release the outstanding mailbox command if timed out */
9133 if (phba
->sli
.sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
9134 spin_lock_irq(&phba
->hbalock
);
9135 mboxq
= phba
->sli
.mbox_active
;
9136 mboxq
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
9137 __lpfc_mbox_cmpl_put(phba
, mboxq
);
9138 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
9139 phba
->sli
.mbox_active
= NULL
;
9140 spin_unlock_irq(&phba
->hbalock
);
9143 /* Abort all iocbs associated with the hba */
9144 lpfc_sli_hba_iocb_abort(phba
);
9146 /* Wait for completion of device XRI exchange busy */
9147 lpfc_sli4_xri_exchange_busy_wait(phba
);
9149 /* Disable PCI subsystem interrupt */
9150 lpfc_sli4_disable_intr(phba
);
9152 /* Disable SR-IOV if enabled */
9153 if (phba
->cfg_sriov_nr_virtfn
)
9154 pci_disable_sriov(pdev
);
9156 /* Stop kthread signal shall trigger work_done one more time */
9157 kthread_stop(phba
->worker_thread
);
9159 /* Reset SLI4 HBA FCoE function */
9160 lpfc_pci_function_reset(phba
);
9161 lpfc_sli4_queue_destroy(phba
);
9163 /* Stop the SLI4 device port */
9164 phba
->pport
->work_port_events
= 0;
9168 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
9169 * @phba: Pointer to HBA context object.
9170 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
9172 * This function is called in the SLI4 code path to read the port's
9173 * sli4 capabilities.
9175 * This function may be be called from any context that can block-wait
9176 * for the completion. The expectation is that this routine is called
9177 * typically from probe_one or from the online routine.
9180 lpfc_pc_sli4_params_get(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
9183 struct lpfc_mqe
*mqe
;
9184 struct lpfc_pc_sli4_params
*sli4_params
;
9188 mqe
= &mboxq
->u
.mqe
;
9190 /* Read the port's SLI4 Parameters port capabilities */
9191 lpfc_pc_sli4_params(mboxq
);
9192 if (!phba
->sli4_hba
.intr_enable
)
9193 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
9195 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mboxq
);
9196 rc
= lpfc_sli_issue_mbox_wait(phba
, mboxq
, mbox_tmo
);
9202 sli4_params
= &phba
->sli4_hba
.pc_sli4_params
;
9203 sli4_params
->if_type
= bf_get(if_type
, &mqe
->un
.sli4_params
);
9204 sli4_params
->sli_rev
= bf_get(sli_rev
, &mqe
->un
.sli4_params
);
9205 sli4_params
->sli_family
= bf_get(sli_family
, &mqe
->un
.sli4_params
);
9206 sli4_params
->featurelevel_1
= bf_get(featurelevel_1
,
9207 &mqe
->un
.sli4_params
);
9208 sli4_params
->featurelevel_2
= bf_get(featurelevel_2
,
9209 &mqe
->un
.sli4_params
);
9210 sli4_params
->proto_types
= mqe
->un
.sli4_params
.word3
;
9211 sli4_params
->sge_supp_len
= mqe
->un
.sli4_params
.sge_supp_len
;
9212 sli4_params
->if_page_sz
= bf_get(if_page_sz
, &mqe
->un
.sli4_params
);
9213 sli4_params
->rq_db_window
= bf_get(rq_db_window
, &mqe
->un
.sli4_params
);
9214 sli4_params
->loopbk_scope
= bf_get(loopbk_scope
, &mqe
->un
.sli4_params
);
9215 sli4_params
->eq_pages_max
= bf_get(eq_pages
, &mqe
->un
.sli4_params
);
9216 sli4_params
->eqe_size
= bf_get(eqe_size
, &mqe
->un
.sli4_params
);
9217 sli4_params
->cq_pages_max
= bf_get(cq_pages
, &mqe
->un
.sli4_params
);
9218 sli4_params
->cqe_size
= bf_get(cqe_size
, &mqe
->un
.sli4_params
);
9219 sli4_params
->mq_pages_max
= bf_get(mq_pages
, &mqe
->un
.sli4_params
);
9220 sli4_params
->mqe_size
= bf_get(mqe_size
, &mqe
->un
.sli4_params
);
9221 sli4_params
->mq_elem_cnt
= bf_get(mq_elem_cnt
, &mqe
->un
.sli4_params
);
9222 sli4_params
->wq_pages_max
= bf_get(wq_pages
, &mqe
->un
.sli4_params
);
9223 sli4_params
->wqe_size
= bf_get(wqe_size
, &mqe
->un
.sli4_params
);
9224 sli4_params
->rq_pages_max
= bf_get(rq_pages
, &mqe
->un
.sli4_params
);
9225 sli4_params
->rqe_size
= bf_get(rqe_size
, &mqe
->un
.sli4_params
);
9226 sli4_params
->hdr_pages_max
= bf_get(hdr_pages
, &mqe
->un
.sli4_params
);
9227 sli4_params
->hdr_size
= bf_get(hdr_size
, &mqe
->un
.sli4_params
);
9228 sli4_params
->hdr_pp_align
= bf_get(hdr_pp_align
, &mqe
->un
.sli4_params
);
9229 sli4_params
->sgl_pages_max
= bf_get(sgl_pages
, &mqe
->un
.sli4_params
);
9230 sli4_params
->sgl_pp_align
= bf_get(sgl_pp_align
, &mqe
->un
.sli4_params
);
9232 /* Make sure that sge_supp_len can be handled by the driver */
9233 if (sli4_params
->sge_supp_len
> LPFC_MAX_SGE_SIZE
)
9234 sli4_params
->sge_supp_len
= LPFC_MAX_SGE_SIZE
;
9240 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
9241 * @phba: Pointer to HBA context object.
9242 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
9244 * This function is called in the SLI4 code path to read the port's
9245 * sli4 capabilities.
9247 * This function may be be called from any context that can block-wait
9248 * for the completion. The expectation is that this routine is called
9249 * typically from probe_one or from the online routine.
9252 lpfc_get_sli4_parameters(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
9255 struct lpfc_mqe
*mqe
= &mboxq
->u
.mqe
;
9256 struct lpfc_pc_sli4_params
*sli4_params
;
9259 struct lpfc_sli4_parameters
*mbx_sli4_parameters
;
9262 * By default, the driver assumes the SLI4 port requires RPI
9263 * header postings. The SLI4_PARAM response will correct this
9266 phba
->sli4_hba
.rpi_hdrs_in_use
= 1;
9268 /* Read the port's SLI4 Config Parameters */
9269 length
= (sizeof(struct lpfc_mbx_get_sli4_parameters
) -
9270 sizeof(struct lpfc_sli4_cfg_mhdr
));
9271 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
9272 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS
,
9273 length
, LPFC_SLI4_MBX_EMBED
);
9274 if (!phba
->sli4_hba
.intr_enable
)
9275 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
9277 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mboxq
);
9278 rc
= lpfc_sli_issue_mbox_wait(phba
, mboxq
, mbox_tmo
);
9282 sli4_params
= &phba
->sli4_hba
.pc_sli4_params
;
9283 mbx_sli4_parameters
= &mqe
->un
.get_sli4_parameters
.sli4_parameters
;
9284 sli4_params
->if_type
= bf_get(cfg_if_type
, mbx_sli4_parameters
);
9285 sli4_params
->sli_rev
= bf_get(cfg_sli_rev
, mbx_sli4_parameters
);
9286 sli4_params
->sli_family
= bf_get(cfg_sli_family
, mbx_sli4_parameters
);
9287 sli4_params
->featurelevel_1
= bf_get(cfg_sli_hint_1
,
9288 mbx_sli4_parameters
);
9289 sli4_params
->featurelevel_2
= bf_get(cfg_sli_hint_2
,
9290 mbx_sli4_parameters
);
9291 if (bf_get(cfg_phwq
, mbx_sli4_parameters
))
9292 phba
->sli3_options
|= LPFC_SLI4_PHWQ_ENABLED
;
9294 phba
->sli3_options
&= ~LPFC_SLI4_PHWQ_ENABLED
;
9295 sli4_params
->sge_supp_len
= mbx_sli4_parameters
->sge_supp_len
;
9296 sli4_params
->loopbk_scope
= bf_get(loopbk_scope
, mbx_sli4_parameters
);
9297 sli4_params
->oas_supported
= bf_get(cfg_oas
, mbx_sli4_parameters
);
9298 sli4_params
->cqv
= bf_get(cfg_cqv
, mbx_sli4_parameters
);
9299 sli4_params
->mqv
= bf_get(cfg_mqv
, mbx_sli4_parameters
);
9300 sli4_params
->wqv
= bf_get(cfg_wqv
, mbx_sli4_parameters
);
9301 sli4_params
->rqv
= bf_get(cfg_rqv
, mbx_sli4_parameters
);
9302 sli4_params
->wqsize
= bf_get(cfg_wqsize
, mbx_sli4_parameters
);
9303 sli4_params
->sgl_pages_max
= bf_get(cfg_sgl_page_cnt
,
9304 mbx_sli4_parameters
);
9305 sli4_params
->sgl_pp_align
= bf_get(cfg_sgl_pp_align
,
9306 mbx_sli4_parameters
);
9307 phba
->sli4_hba
.extents_in_use
= bf_get(cfg_ext
, mbx_sli4_parameters
);
9308 phba
->sli4_hba
.rpi_hdrs_in_use
= bf_get(cfg_hdrr
, mbx_sli4_parameters
);
9310 /* Make sure that sge_supp_len can be handled by the driver */
9311 if (sli4_params
->sge_supp_len
> LPFC_MAX_SGE_SIZE
)
9312 sli4_params
->sge_supp_len
= LPFC_MAX_SGE_SIZE
;
9318 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
9319 * @pdev: pointer to PCI device
9320 * @pid: pointer to PCI device identifier
9322 * This routine is to be called to attach a device with SLI-3 interface spec
9323 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
9324 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
9325 * information of the device and driver to see if the driver state that it can
9326 * support this kind of device. If the match is successful, the driver core
9327 * invokes this routine. If this routine determines it can claim the HBA, it
9328 * does all the initialization that it needs to do to handle the HBA properly.
9331 * 0 - driver can claim the device
9332 * negative value - driver can not claim the device
9335 lpfc_pci_probe_one_s3(struct pci_dev
*pdev
, const struct pci_device_id
*pid
)
9337 struct lpfc_hba
*phba
;
9338 struct lpfc_vport
*vport
= NULL
;
9339 struct Scsi_Host
*shost
= NULL
;
9341 uint32_t cfg_mode
, intr_mode
;
9343 /* Allocate memory for HBA structure */
9344 phba
= lpfc_hba_alloc(pdev
);
9348 /* Perform generic PCI device enabling operation */
9349 error
= lpfc_enable_pci_dev(phba
);
9353 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
9354 error
= lpfc_api_table_setup(phba
, LPFC_PCI_DEV_LP
);
9356 goto out_disable_pci_dev
;
9358 /* Set up SLI-3 specific device PCI memory space */
9359 error
= lpfc_sli_pci_mem_setup(phba
);
9361 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9362 "1402 Failed to set up pci memory space.\n");
9363 goto out_disable_pci_dev
;
9366 /* Set up phase-1 common device driver resources */
9367 error
= lpfc_setup_driver_resource_phase1(phba
);
9369 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9370 "1403 Failed to set up driver resource.\n");
9371 goto out_unset_pci_mem_s3
;
9374 /* Set up SLI-3 specific device driver resources */
9375 error
= lpfc_sli_driver_resource_setup(phba
);
9377 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9378 "1404 Failed to set up driver resource.\n");
9379 goto out_unset_pci_mem_s3
;
9382 /* Initialize and populate the iocb list per host */
9383 error
= lpfc_init_iocb_list(phba
, LPFC_IOCB_LIST_CNT
);
9385 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9386 "1405 Failed to initialize iocb list.\n");
9387 goto out_unset_driver_resource_s3
;
9390 /* Set up common device driver resources */
9391 error
= lpfc_setup_driver_resource_phase2(phba
);
9393 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9394 "1406 Failed to set up driver resource.\n");
9395 goto out_free_iocb_list
;
9398 /* Get the default values for Model Name and Description */
9399 lpfc_get_hba_model_desc(phba
, phba
->ModelName
, phba
->ModelDesc
);
9401 /* Create SCSI host to the physical port */
9402 error
= lpfc_create_shost(phba
);
9404 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9405 "1407 Failed to create scsi host.\n");
9406 goto out_unset_driver_resource
;
9409 /* Configure sysfs attributes */
9410 vport
= phba
->pport
;
9411 error
= lpfc_alloc_sysfs_attr(vport
);
9413 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9414 "1476 Failed to allocate sysfs attr\n");
9415 goto out_destroy_shost
;
9418 shost
= lpfc_shost_from_vport(vport
); /* save shost for error cleanup */
9419 /* Now, trying to enable interrupt and bring up the device */
9420 cfg_mode
= phba
->cfg_use_msi
;
9422 /* Put device to a known state before enabling interrupt */
9423 lpfc_stop_port(phba
);
9424 /* Configure and enable interrupt */
9425 intr_mode
= lpfc_sli_enable_intr(phba
, cfg_mode
);
9426 if (intr_mode
== LPFC_INTR_ERROR
) {
9427 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9428 "0431 Failed to enable interrupt.\n");
9430 goto out_free_sysfs_attr
;
9432 /* SLI-3 HBA setup */
9433 if (lpfc_sli_hba_setup(phba
)) {
9434 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9435 "1477 Failed to set up hba\n");
9437 goto out_remove_device
;
9440 /* Wait 50ms for the interrupts of previous mailbox commands */
9442 /* Check active interrupts on message signaled interrupts */
9443 if (intr_mode
== 0 ||
9444 phba
->sli
.slistat
.sli_intr
> LPFC_MSIX_VECTORS
) {
9445 /* Log the current active interrupt mode */
9446 phba
->intr_mode
= intr_mode
;
9447 lpfc_log_intr_mode(phba
, intr_mode
);
9450 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9451 "0447 Configure interrupt mode (%d) "
9452 "failed active interrupt test.\n",
9454 /* Disable the current interrupt mode */
9455 lpfc_sli_disable_intr(phba
);
9456 /* Try next level of interrupt mode */
9457 cfg_mode
= --intr_mode
;
9461 /* Perform post initialization setup */
9462 lpfc_post_init_setup(phba
);
9464 /* Check if there are static vports to be created. */
9465 lpfc_create_static_vport(phba
);
9470 lpfc_unset_hba(phba
);
9471 out_free_sysfs_attr
:
9472 lpfc_free_sysfs_attr(vport
);
9474 lpfc_destroy_shost(phba
);
9475 out_unset_driver_resource
:
9476 lpfc_unset_driver_resource_phase2(phba
);
9478 lpfc_free_iocb_list(phba
);
9479 out_unset_driver_resource_s3
:
9480 lpfc_sli_driver_resource_unset(phba
);
9481 out_unset_pci_mem_s3
:
9482 lpfc_sli_pci_mem_unset(phba
);
9483 out_disable_pci_dev
:
9484 lpfc_disable_pci_dev(phba
);
9486 scsi_host_put(shost
);
9488 lpfc_hba_free(phba
);
9493 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
9494 * @pdev: pointer to PCI device
9496 * This routine is to be called to disattach a device with SLI-3 interface
9497 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
9498 * removed from PCI bus, it performs all the necessary cleanup for the HBA
9499 * device to be removed from the PCI subsystem properly.
9502 lpfc_pci_remove_one_s3(struct pci_dev
*pdev
)
9504 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
9505 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
9506 struct lpfc_vport
**vports
;
9507 struct lpfc_hba
*phba
= vport
->phba
;
9509 int bars
= pci_select_bars(pdev
, IORESOURCE_MEM
);
9511 spin_lock_irq(&phba
->hbalock
);
9512 vport
->load_flag
|= FC_UNLOADING
;
9513 spin_unlock_irq(&phba
->hbalock
);
9515 lpfc_free_sysfs_attr(vport
);
9517 /* Release all the vports against this physical port */
9518 vports
= lpfc_create_vport_work_array(phba
);
9520 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
9521 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
9523 fc_vport_terminate(vports
[i
]->fc_vport
);
9525 lpfc_destroy_vport_work_array(phba
, vports
);
9527 /* Remove FC host and then SCSI host with the physical port */
9528 fc_remove_host(shost
);
9529 scsi_remove_host(shost
);
9530 lpfc_cleanup(vport
);
9533 * Bring down the SLI Layer. This step disable all interrupts,
9534 * clears the rings, discards all mailbox commands, and resets
9538 /* HBA interrupt will be disabled after this call */
9539 lpfc_sli_hba_down(phba
);
9540 /* Stop kthread signal shall trigger work_done one more time */
9541 kthread_stop(phba
->worker_thread
);
9542 /* Final cleanup of txcmplq and reset the HBA */
9543 lpfc_sli_brdrestart(phba
);
9545 kfree(phba
->vpi_bmask
);
9546 kfree(phba
->vpi_ids
);
9548 lpfc_stop_hba_timers(phba
);
9549 spin_lock_irq(&phba
->hbalock
);
9550 list_del_init(&vport
->listentry
);
9551 spin_unlock_irq(&phba
->hbalock
);
9553 lpfc_debugfs_terminate(vport
);
9555 /* Disable SR-IOV if enabled */
9556 if (phba
->cfg_sriov_nr_virtfn
)
9557 pci_disable_sriov(pdev
);
9559 /* Disable interrupt */
9560 lpfc_sli_disable_intr(phba
);
9562 scsi_host_put(shost
);
9565 * Call scsi_free before mem_free since scsi bufs are released to their
9566 * corresponding pools here.
9568 lpfc_scsi_free(phba
);
9569 lpfc_mem_free_all(phba
);
9571 dma_free_coherent(&pdev
->dev
, lpfc_sli_hbq_size(),
9572 phba
->hbqslimp
.virt
, phba
->hbqslimp
.phys
);
9574 /* Free resources associated with SLI2 interface */
9575 dma_free_coherent(&pdev
->dev
, SLI2_SLIM_SIZE
,
9576 phba
->slim2p
.virt
, phba
->slim2p
.phys
);
9578 /* unmap adapter SLIM and Control Registers */
9579 iounmap(phba
->ctrl_regs_memmap_p
);
9580 iounmap(phba
->slim_memmap_p
);
9582 lpfc_hba_free(phba
);
9584 pci_release_selected_regions(pdev
, bars
);
9585 pci_disable_device(pdev
);
9589 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
9590 * @pdev: pointer to PCI device
9591 * @msg: power management message
9593 * This routine is to be called from the kernel's PCI subsystem to support
9594 * system Power Management (PM) to device with SLI-3 interface spec. When
9595 * PM invokes this method, it quiesces the device by stopping the driver's
9596 * worker thread for the device, turning off device's interrupt and DMA,
9597 * and bring the device offline. Note that as the driver implements the
9598 * minimum PM requirements to a power-aware driver's PM support for the
9599 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
9600 * to the suspend() method call will be treated as SUSPEND and the driver will
9601 * fully reinitialize its device during resume() method call, the driver will
9602 * set device to PCI_D3hot state in PCI config space instead of setting it
9603 * according to the @msg provided by the PM.
9606 * 0 - driver suspended the device
9610 lpfc_pci_suspend_one_s3(struct pci_dev
*pdev
, pm_message_t msg
)
9612 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
9613 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
9615 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9616 "0473 PCI device Power Management suspend.\n");
9618 /* Bring down the device */
9619 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
9621 kthread_stop(phba
->worker_thread
);
9623 /* Disable interrupt from device */
9624 lpfc_sli_disable_intr(phba
);
9626 /* Save device state to PCI config space */
9627 pci_save_state(pdev
);
9628 pci_set_power_state(pdev
, PCI_D3hot
);
9634 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
9635 * @pdev: pointer to PCI device
9637 * This routine is to be called from the kernel's PCI subsystem to support
9638 * system Power Management (PM) to device with SLI-3 interface spec. When PM
9639 * invokes this method, it restores the device's PCI config space state and
9640 * fully reinitializes the device and brings it online. Note that as the
9641 * driver implements the minimum PM requirements to a power-aware driver's
9642 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
9643 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
9644 * driver will fully reinitialize its device during resume() method call,
9645 * the device will be set to PCI_D0 directly in PCI config space before
9646 * restoring the state.
9649 * 0 - driver suspended the device
9653 lpfc_pci_resume_one_s3(struct pci_dev
*pdev
)
9655 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
9656 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
9660 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9661 "0452 PCI device Power Management resume.\n");
9663 /* Restore device state from PCI config space */
9664 pci_set_power_state(pdev
, PCI_D0
);
9665 pci_restore_state(pdev
);
9668 * As the new kernel behavior of pci_restore_state() API call clears
9669 * device saved_state flag, need to save the restored state again.
9671 pci_save_state(pdev
);
9673 if (pdev
->is_busmaster
)
9674 pci_set_master(pdev
);
9676 /* Startup the kernel thread for this host adapter. */
9677 phba
->worker_thread
= kthread_run(lpfc_do_work
, phba
,
9678 "lpfc_worker_%d", phba
->brd_no
);
9679 if (IS_ERR(phba
->worker_thread
)) {
9680 error
= PTR_ERR(phba
->worker_thread
);
9681 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9682 "0434 PM resume failed to start worker "
9683 "thread: error=x%x.\n", error
);
9687 /* Configure and enable interrupt */
9688 intr_mode
= lpfc_sli_enable_intr(phba
, phba
->intr_mode
);
9689 if (intr_mode
== LPFC_INTR_ERROR
) {
9690 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9691 "0430 PM resume Failed to enable interrupt\n");
9694 phba
->intr_mode
= intr_mode
;
9696 /* Restart HBA and bring it online */
9697 lpfc_sli_brdrestart(phba
);
9700 /* Log the current active interrupt mode */
9701 lpfc_log_intr_mode(phba
, phba
->intr_mode
);
9707 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
9708 * @phba: pointer to lpfc hba data structure.
9710 * This routine is called to prepare the SLI3 device for PCI slot recover. It
9711 * aborts all the outstanding SCSI I/Os to the pci device.
9714 lpfc_sli_prep_dev_for_recover(struct lpfc_hba
*phba
)
9716 struct lpfc_sli
*psli
= &phba
->sli
;
9717 struct lpfc_sli_ring
*pring
;
9719 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9720 "2723 PCI channel I/O abort preparing for recovery\n");
9723 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
9724 * and let the SCSI mid-layer to retry them to recover.
9726 pring
= &psli
->ring
[psli
->fcp_ring
];
9727 lpfc_sli_abort_iocb_ring(phba
, pring
);
9731 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
9732 * @phba: pointer to lpfc hba data structure.
9734 * This routine is called to prepare the SLI3 device for PCI slot reset. It
9735 * disables the device interrupt and pci device, and aborts the internal FCP
9739 lpfc_sli_prep_dev_for_reset(struct lpfc_hba
*phba
)
9741 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9742 "2710 PCI channel disable preparing for reset\n");
9744 /* Block any management I/Os to the device */
9745 lpfc_block_mgmt_io(phba
, LPFC_MBX_WAIT
);
9747 /* Block all SCSI devices' I/Os on the host */
9748 lpfc_scsi_dev_block(phba
);
9750 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9751 lpfc_sli_flush_fcp_rings(phba
);
9753 /* stop all timers */
9754 lpfc_stop_hba_timers(phba
);
9756 /* Disable interrupt and pci device */
9757 lpfc_sli_disable_intr(phba
);
9758 pci_disable_device(phba
->pcidev
);
9762 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
9763 * @phba: pointer to lpfc hba data structure.
9765 * This routine is called to prepare the SLI3 device for PCI slot permanently
9766 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
9770 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba
*phba
)
9772 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9773 "2711 PCI channel permanent disable for failure\n");
9774 /* Block all SCSI devices' I/Os on the host */
9775 lpfc_scsi_dev_block(phba
);
9777 /* stop all timers */
9778 lpfc_stop_hba_timers(phba
);
9780 /* Clean up all driver's outstanding SCSI I/Os */
9781 lpfc_sli_flush_fcp_rings(phba
);
9785 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
9786 * @pdev: pointer to PCI device.
9787 * @state: the current PCI connection state.
9789 * This routine is called from the PCI subsystem for I/O error handling to
9790 * device with SLI-3 interface spec. This function is called by the PCI
9791 * subsystem after a PCI bus error affecting this device has been detected.
9792 * When this function is invoked, it will need to stop all the I/Os and
9793 * interrupt(s) to the device. Once that is done, it will return
9794 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
9798 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
9799 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9800 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9802 static pci_ers_result_t
9803 lpfc_io_error_detected_s3(struct pci_dev
*pdev
, pci_channel_state_t state
)
9805 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
9806 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
9809 case pci_channel_io_normal
:
9810 /* Non-fatal error, prepare for recovery */
9811 lpfc_sli_prep_dev_for_recover(phba
);
9812 return PCI_ERS_RESULT_CAN_RECOVER
;
9813 case pci_channel_io_frozen
:
9814 /* Fatal error, prepare for slot reset */
9815 lpfc_sli_prep_dev_for_reset(phba
);
9816 return PCI_ERS_RESULT_NEED_RESET
;
9817 case pci_channel_io_perm_failure
:
9818 /* Permanent failure, prepare for device down */
9819 lpfc_sli_prep_dev_for_perm_failure(phba
);
9820 return PCI_ERS_RESULT_DISCONNECT
;
9822 /* Unknown state, prepare and request slot reset */
9823 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9824 "0472 Unknown PCI error state: x%x\n", state
);
9825 lpfc_sli_prep_dev_for_reset(phba
);
9826 return PCI_ERS_RESULT_NEED_RESET
;
9831 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
9832 * @pdev: pointer to PCI device.
9834 * This routine is called from the PCI subsystem for error handling to
9835 * device with SLI-3 interface spec. This is called after PCI bus has been
9836 * reset to restart the PCI card from scratch, as if from a cold-boot.
9837 * During the PCI subsystem error recovery, after driver returns
9838 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
9839 * recovery and then call this routine before calling the .resume method
9840 * to recover the device. This function will initialize the HBA device,
9841 * enable the interrupt, but it will just put the HBA to offline state
9842 * without passing any I/O traffic.
9845 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
9846 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9848 static pci_ers_result_t
9849 lpfc_io_slot_reset_s3(struct pci_dev
*pdev
)
9851 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
9852 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
9853 struct lpfc_sli
*psli
= &phba
->sli
;
9856 dev_printk(KERN_INFO
, &pdev
->dev
, "recovering from a slot reset.\n");
9857 if (pci_enable_device_mem(pdev
)) {
9858 printk(KERN_ERR
"lpfc: Cannot re-enable "
9859 "PCI device after reset.\n");
9860 return PCI_ERS_RESULT_DISCONNECT
;
9863 pci_restore_state(pdev
);
9866 * As the new kernel behavior of pci_restore_state() API call clears
9867 * device saved_state flag, need to save the restored state again.
9869 pci_save_state(pdev
);
9871 if (pdev
->is_busmaster
)
9872 pci_set_master(pdev
);
9874 spin_lock_irq(&phba
->hbalock
);
9875 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
9876 spin_unlock_irq(&phba
->hbalock
);
9878 /* Configure and enable interrupt */
9879 intr_mode
= lpfc_sli_enable_intr(phba
, phba
->intr_mode
);
9880 if (intr_mode
== LPFC_INTR_ERROR
) {
9881 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9882 "0427 Cannot re-enable interrupt after "
9884 return PCI_ERS_RESULT_DISCONNECT
;
9886 phba
->intr_mode
= intr_mode
;
9888 /* Take device offline, it will perform cleanup */
9889 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
9891 lpfc_sli_brdrestart(phba
);
9893 /* Log the current active interrupt mode */
9894 lpfc_log_intr_mode(phba
, phba
->intr_mode
);
9896 return PCI_ERS_RESULT_RECOVERED
;
9900 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
9901 * @pdev: pointer to PCI device
9903 * This routine is called from the PCI subsystem for error handling to device
9904 * with SLI-3 interface spec. It is called when kernel error recovery tells
9905 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
9906 * error recovery. After this call, traffic can start to flow from this device
9910 lpfc_io_resume_s3(struct pci_dev
*pdev
)
9912 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
9913 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
9915 /* Bring device online, it will be no-op for non-fatal error resume */
9918 /* Clean up Advanced Error Reporting (AER) if needed */
9919 if (phba
->hba_flag
& HBA_AER_ENABLED
)
9920 pci_cleanup_aer_uncorrect_error_status(pdev
);
9924 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
9925 * @phba: pointer to lpfc hba data structure.
9927 * returns the number of ELS/CT IOCBs to reserve
9930 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba
*phba
)
9932 int max_xri
= phba
->sli4_hba
.max_cfg_param
.max_xri
;
9934 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
9937 else if (max_xri
<= 256)
9939 else if (max_xri
<= 512)
9941 else if (max_xri
<= 1024)
9943 else if (max_xri
<= 1536)
9945 else if (max_xri
<= 2048)
9954 * lpfc_write_firmware - attempt to write a firmware image to the port
9955 * @fw: pointer to firmware image returned from request_firmware.
9956 * @phba: pointer to lpfc hba data structure.
9960 lpfc_write_firmware(const struct firmware
*fw
, void *context
)
9962 struct lpfc_hba
*phba
= (struct lpfc_hba
*)context
;
9963 char fwrev
[FW_REV_STR_SIZE
];
9964 struct lpfc_grp_hdr
*image
;
9965 struct list_head dma_buffer_list
;
9967 struct lpfc_dmabuf
*dmabuf
, *next
;
9968 uint32_t offset
= 0, temp_offset
= 0;
9970 /* It can be null in no-wait mode, sanity check */
9975 image
= (struct lpfc_grp_hdr
*)fw
->data
;
9977 INIT_LIST_HEAD(&dma_buffer_list
);
9978 if ((be32_to_cpu(image
->magic_number
) != LPFC_GROUP_OJECT_MAGIC_NUM
) ||
9979 (bf_get_be32(lpfc_grp_hdr_file_type
, image
) !=
9980 LPFC_FILE_TYPE_GROUP
) ||
9981 (bf_get_be32(lpfc_grp_hdr_id
, image
) != LPFC_FILE_ID_GROUP
) ||
9982 (be32_to_cpu(image
->size
) != fw
->size
)) {
9983 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9984 "3022 Invalid FW image found. "
9985 "Magic:%x Type:%x ID:%x\n",
9986 be32_to_cpu(image
->magic_number
),
9987 bf_get_be32(lpfc_grp_hdr_file_type
, image
),
9988 bf_get_be32(lpfc_grp_hdr_id
, image
));
9992 lpfc_decode_firmware_rev(phba
, fwrev
, 1);
9993 if (strncmp(fwrev
, image
->revision
, strnlen(image
->revision
, 16))) {
9994 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9995 "3023 Updating Firmware, Current Version:%s "
9997 fwrev
, image
->revision
);
9998 for (i
= 0; i
< LPFC_MBX_WR_CONFIG_MAX_BDE
; i
++) {
9999 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
),
10005 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
,
10009 if (!dmabuf
->virt
) {
10014 list_add_tail(&dmabuf
->list
, &dma_buffer_list
);
10016 while (offset
< fw
->size
) {
10017 temp_offset
= offset
;
10018 list_for_each_entry(dmabuf
, &dma_buffer_list
, list
) {
10019 if (temp_offset
+ SLI4_PAGE_SIZE
> fw
->size
) {
10020 memcpy(dmabuf
->virt
,
10021 fw
->data
+ temp_offset
,
10022 fw
->size
- temp_offset
);
10023 temp_offset
= fw
->size
;
10026 memcpy(dmabuf
->virt
, fw
->data
+ temp_offset
,
10028 temp_offset
+= SLI4_PAGE_SIZE
;
10030 rc
= lpfc_wr_object(phba
, &dma_buffer_list
,
10031 (fw
->size
- offset
), &offset
);
10039 list_for_each_entry_safe(dmabuf
, next
, &dma_buffer_list
, list
) {
10040 list_del(&dmabuf
->list
);
10041 dma_free_coherent(&phba
->pcidev
->dev
, SLI4_PAGE_SIZE
,
10042 dmabuf
->virt
, dmabuf
->phys
);
10045 release_firmware(fw
);
10047 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10048 "3024 Firmware update done: %d.\n", rc
);
10053 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
10054 * @phba: pointer to lpfc hba data structure.
10056 * This routine is called to perform Linux generic firmware upgrade on device
10057 * that supports such feature.
10060 lpfc_sli4_request_firmware_update(struct lpfc_hba
*phba
, uint8_t fw_upgrade
)
10062 uint8_t file_name
[ELX_MODEL_NAME_SIZE
];
10064 const struct firmware
*fw
;
10066 /* Only supported on SLI4 interface type 2 for now */
10067 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
10068 LPFC_SLI_INTF_IF_TYPE_2
)
10071 snprintf(file_name
, ELX_MODEL_NAME_SIZE
, "%s.grp", phba
->ModelName
);
10073 if (fw_upgrade
== INT_FW_UPGRADE
) {
10074 ret
= request_firmware_nowait(THIS_MODULE
, FW_ACTION_HOTPLUG
,
10075 file_name
, &phba
->pcidev
->dev
,
10076 GFP_KERNEL
, (void *)phba
,
10077 lpfc_write_firmware
);
10078 } else if (fw_upgrade
== RUN_FW_UPGRADE
) {
10079 ret
= request_firmware(&fw
, file_name
, &phba
->pcidev
->dev
);
10081 lpfc_write_firmware(fw
, (void *)phba
);
10090 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
10091 * @pdev: pointer to PCI device
10092 * @pid: pointer to PCI device identifier
10094 * This routine is called from the kernel's PCI subsystem to device with
10095 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
10096 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
10097 * information of the device and driver to see if the driver state that it
10098 * can support this kind of device. If the match is successful, the driver
10099 * core invokes this routine. If this routine determines it can claim the HBA,
10100 * it does all the initialization that it needs to do to handle the HBA
10104 * 0 - driver can claim the device
10105 * negative value - driver can not claim the device
10108 lpfc_pci_probe_one_s4(struct pci_dev
*pdev
, const struct pci_device_id
*pid
)
10110 struct lpfc_hba
*phba
;
10111 struct lpfc_vport
*vport
= NULL
;
10112 struct Scsi_Host
*shost
= NULL
;
10114 uint32_t cfg_mode
, intr_mode
;
10115 int adjusted_fcp_io_channel
;
10117 /* Allocate memory for HBA structure */
10118 phba
= lpfc_hba_alloc(pdev
);
10122 /* Perform generic PCI device enabling operation */
10123 error
= lpfc_enable_pci_dev(phba
);
10125 goto out_free_phba
;
10127 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
10128 error
= lpfc_api_table_setup(phba
, LPFC_PCI_DEV_OC
);
10130 goto out_disable_pci_dev
;
10132 /* Set up SLI-4 specific device PCI memory space */
10133 error
= lpfc_sli4_pci_mem_setup(phba
);
10135 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10136 "1410 Failed to set up pci memory space.\n");
10137 goto out_disable_pci_dev
;
10140 /* Set up phase-1 common device driver resources */
10141 error
= lpfc_setup_driver_resource_phase1(phba
);
10143 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10144 "1411 Failed to set up driver resource.\n");
10145 goto out_unset_pci_mem_s4
;
10148 /* Set up SLI-4 Specific device driver resources */
10149 error
= lpfc_sli4_driver_resource_setup(phba
);
10151 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10152 "1412 Failed to set up driver resource.\n");
10153 goto out_unset_pci_mem_s4
;
10156 /* Initialize and populate the iocb list per host */
10158 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
10159 "2821 initialize iocb list %d.\n",
10160 phba
->cfg_iocb_cnt
*1024);
10161 error
= lpfc_init_iocb_list(phba
, phba
->cfg_iocb_cnt
*1024);
10164 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10165 "1413 Failed to initialize iocb list.\n");
10166 goto out_unset_driver_resource_s4
;
10169 INIT_LIST_HEAD(&phba
->active_rrq_list
);
10170 INIT_LIST_HEAD(&phba
->fcf
.fcf_pri_list
);
10172 /* Set up common device driver resources */
10173 error
= lpfc_setup_driver_resource_phase2(phba
);
10175 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10176 "1414 Failed to set up driver resource.\n");
10177 goto out_free_iocb_list
;
10180 /* Get the default values for Model Name and Description */
10181 lpfc_get_hba_model_desc(phba
, phba
->ModelName
, phba
->ModelDesc
);
10183 /* Create SCSI host to the physical port */
10184 error
= lpfc_create_shost(phba
);
10186 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10187 "1415 Failed to create scsi host.\n");
10188 goto out_unset_driver_resource
;
10191 /* Configure sysfs attributes */
10192 vport
= phba
->pport
;
10193 error
= lpfc_alloc_sysfs_attr(vport
);
10195 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10196 "1416 Failed to allocate sysfs attr\n");
10197 goto out_destroy_shost
;
10200 shost
= lpfc_shost_from_vport(vport
); /* save shost for error cleanup */
10201 /* Now, trying to enable interrupt and bring up the device */
10202 cfg_mode
= phba
->cfg_use_msi
;
10204 /* Put device to a known state before enabling interrupt */
10205 lpfc_stop_port(phba
);
10206 /* Configure and enable interrupt */
10207 intr_mode
= lpfc_sli4_enable_intr(phba
, cfg_mode
);
10208 if (intr_mode
== LPFC_INTR_ERROR
) {
10209 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10210 "0426 Failed to enable interrupt.\n");
10212 goto out_free_sysfs_attr
;
10214 /* Default to single EQ for non-MSI-X */
10215 if (phba
->intr_type
!= MSIX
)
10216 adjusted_fcp_io_channel
= 1;
10218 adjusted_fcp_io_channel
= phba
->cfg_fcp_io_channel
;
10219 phba
->cfg_fcp_io_channel
= adjusted_fcp_io_channel
;
10220 /* Set up SLI-4 HBA */
10221 if (lpfc_sli4_hba_setup(phba
)) {
10222 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10223 "1421 Failed to set up hba\n");
10225 goto out_disable_intr
;
10228 /* Log the current active interrupt mode */
10229 phba
->intr_mode
= intr_mode
;
10230 lpfc_log_intr_mode(phba
, intr_mode
);
10232 /* Perform post initialization setup */
10233 lpfc_post_init_setup(phba
);
10235 /* check for firmware upgrade or downgrade */
10236 if (phba
->cfg_request_firmware_upgrade
)
10237 ret
= lpfc_sli4_request_firmware_update(phba
, INT_FW_UPGRADE
);
10239 /* Check if there are static vports to be created. */
10240 lpfc_create_static_vport(phba
);
10244 lpfc_sli4_disable_intr(phba
);
10245 out_free_sysfs_attr
:
10246 lpfc_free_sysfs_attr(vport
);
10248 lpfc_destroy_shost(phba
);
10249 out_unset_driver_resource
:
10250 lpfc_unset_driver_resource_phase2(phba
);
10251 out_free_iocb_list
:
10252 lpfc_free_iocb_list(phba
);
10253 out_unset_driver_resource_s4
:
10254 lpfc_sli4_driver_resource_unset(phba
);
10255 out_unset_pci_mem_s4
:
10256 lpfc_sli4_pci_mem_unset(phba
);
10257 out_disable_pci_dev
:
10258 lpfc_disable_pci_dev(phba
);
10260 scsi_host_put(shost
);
10262 lpfc_hba_free(phba
);
10267 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
10268 * @pdev: pointer to PCI device
10270 * This routine is called from the kernel's PCI subsystem to device with
10271 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
10272 * removed from PCI bus, it performs all the necessary cleanup for the HBA
10273 * device to be removed from the PCI subsystem properly.
10276 lpfc_pci_remove_one_s4(struct pci_dev
*pdev
)
10278 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10279 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
10280 struct lpfc_vport
**vports
;
10281 struct lpfc_hba
*phba
= vport
->phba
;
10284 /* Mark the device unloading flag */
10285 spin_lock_irq(&phba
->hbalock
);
10286 vport
->load_flag
|= FC_UNLOADING
;
10287 spin_unlock_irq(&phba
->hbalock
);
10289 /* Free the HBA sysfs attributes */
10290 lpfc_free_sysfs_attr(vport
);
10292 /* Release all the vports against this physical port */
10293 vports
= lpfc_create_vport_work_array(phba
);
10294 if (vports
!= NULL
)
10295 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
10296 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
10298 fc_vport_terminate(vports
[i
]->fc_vport
);
10300 lpfc_destroy_vport_work_array(phba
, vports
);
10302 /* Remove FC host and then SCSI host with the physical port */
10303 fc_remove_host(shost
);
10304 scsi_remove_host(shost
);
10306 /* Perform cleanup on the physical port */
10307 lpfc_cleanup(vport
);
10310 * Bring down the SLI Layer. This step disables all interrupts,
10311 * clears the rings, discards all mailbox commands, and resets
10312 * the HBA FCoE function.
10314 lpfc_debugfs_terminate(vport
);
10315 lpfc_sli4_hba_unset(phba
);
10317 spin_lock_irq(&phba
->hbalock
);
10318 list_del_init(&vport
->listentry
);
10319 spin_unlock_irq(&phba
->hbalock
);
10321 /* Perform scsi free before driver resource_unset since scsi
10322 * buffers are released to their corresponding pools here.
10324 lpfc_scsi_free(phba
);
10326 lpfc_sli4_driver_resource_unset(phba
);
10328 /* Unmap adapter Control and Doorbell registers */
10329 lpfc_sli4_pci_mem_unset(phba
);
10331 /* Release PCI resources and disable device's PCI function */
10332 scsi_host_put(shost
);
10333 lpfc_disable_pci_dev(phba
);
10335 /* Finally, free the driver's device data structure */
10336 lpfc_hba_free(phba
);
10342 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
10343 * @pdev: pointer to PCI device
10344 * @msg: power management message
10346 * This routine is called from the kernel's PCI subsystem to support system
10347 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
10348 * this method, it quiesces the device by stopping the driver's worker
10349 * thread for the device, turning off device's interrupt and DMA, and bring
10350 * the device offline. Note that as the driver implements the minimum PM
10351 * requirements to a power-aware driver's PM support for suspend/resume -- all
10352 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
10353 * method call will be treated as SUSPEND and the driver will fully
10354 * reinitialize its device during resume() method call, the driver will set
10355 * device to PCI_D3hot state in PCI config space instead of setting it
10356 * according to the @msg provided by the PM.
10359 * 0 - driver suspended the device
10363 lpfc_pci_suspend_one_s4(struct pci_dev
*pdev
, pm_message_t msg
)
10365 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10366 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
10368 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
10369 "2843 PCI device Power Management suspend.\n");
10371 /* Bring down the device */
10372 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
10373 lpfc_offline(phba
);
10374 kthread_stop(phba
->worker_thread
);
10376 /* Disable interrupt from device */
10377 lpfc_sli4_disable_intr(phba
);
10378 lpfc_sli4_queue_destroy(phba
);
10380 /* Save device state to PCI config space */
10381 pci_save_state(pdev
);
10382 pci_set_power_state(pdev
, PCI_D3hot
);
10388 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
10389 * @pdev: pointer to PCI device
10391 * This routine is called from the kernel's PCI subsystem to support system
10392 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
10393 * this method, it restores the device's PCI config space state and fully
10394 * reinitializes the device and brings it online. Note that as the driver
10395 * implements the minimum PM requirements to a power-aware driver's PM for
10396 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
10397 * to the suspend() method call will be treated as SUSPEND and the driver
10398 * will fully reinitialize its device during resume() method call, the device
10399 * will be set to PCI_D0 directly in PCI config space before restoring the
10403 * 0 - driver suspended the device
10407 lpfc_pci_resume_one_s4(struct pci_dev
*pdev
)
10409 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10410 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
10411 uint32_t intr_mode
;
10414 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
10415 "0292 PCI device Power Management resume.\n");
10417 /* Restore device state from PCI config space */
10418 pci_set_power_state(pdev
, PCI_D0
);
10419 pci_restore_state(pdev
);
10422 * As the new kernel behavior of pci_restore_state() API call clears
10423 * device saved_state flag, need to save the restored state again.
10425 pci_save_state(pdev
);
10427 if (pdev
->is_busmaster
)
10428 pci_set_master(pdev
);
10430 /* Startup the kernel thread for this host adapter. */
10431 phba
->worker_thread
= kthread_run(lpfc_do_work
, phba
,
10432 "lpfc_worker_%d", phba
->brd_no
);
10433 if (IS_ERR(phba
->worker_thread
)) {
10434 error
= PTR_ERR(phba
->worker_thread
);
10435 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10436 "0293 PM resume failed to start worker "
10437 "thread: error=x%x.\n", error
);
10441 /* Configure and enable interrupt */
10442 intr_mode
= lpfc_sli4_enable_intr(phba
, phba
->intr_mode
);
10443 if (intr_mode
== LPFC_INTR_ERROR
) {
10444 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10445 "0294 PM resume Failed to enable interrupt\n");
10448 phba
->intr_mode
= intr_mode
;
10450 /* Restart HBA and bring it online */
10451 lpfc_sli_brdrestart(phba
);
10454 /* Log the current active interrupt mode */
10455 lpfc_log_intr_mode(phba
, phba
->intr_mode
);
10461 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
10462 * @phba: pointer to lpfc hba data structure.
10464 * This routine is called to prepare the SLI4 device for PCI slot recover. It
10465 * aborts all the outstanding SCSI I/Os to the pci device.
10468 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba
*phba
)
10470 struct lpfc_sli
*psli
= &phba
->sli
;
10471 struct lpfc_sli_ring
*pring
;
10473 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10474 "2828 PCI channel I/O abort preparing for recovery\n");
10476 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
10477 * and let the SCSI mid-layer to retry them to recover.
10479 pring
= &psli
->ring
[psli
->fcp_ring
];
10480 lpfc_sli_abort_iocb_ring(phba
, pring
);
10484 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
10485 * @phba: pointer to lpfc hba data structure.
10487 * This routine is called to prepare the SLI4 device for PCI slot reset. It
10488 * disables the device interrupt and pci device, and aborts the internal FCP
10492 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba
*phba
)
10494 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10495 "2826 PCI channel disable preparing for reset\n");
10497 /* Block any management I/Os to the device */
10498 lpfc_block_mgmt_io(phba
, LPFC_MBX_NO_WAIT
);
10500 /* Block all SCSI devices' I/Os on the host */
10501 lpfc_scsi_dev_block(phba
);
10503 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
10504 lpfc_sli_flush_fcp_rings(phba
);
10506 /* stop all timers */
10507 lpfc_stop_hba_timers(phba
);
10509 /* Disable interrupt and pci device */
10510 lpfc_sli4_disable_intr(phba
);
10511 lpfc_sli4_queue_destroy(phba
);
10512 pci_disable_device(phba
->pcidev
);
10516 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
10517 * @phba: pointer to lpfc hba data structure.
10519 * This routine is called to prepare the SLI4 device for PCI slot permanently
10520 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
10524 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba
*phba
)
10526 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10527 "2827 PCI channel permanent disable for failure\n");
10529 /* Block all SCSI devices' I/Os on the host */
10530 lpfc_scsi_dev_block(phba
);
10532 /* stop all timers */
10533 lpfc_stop_hba_timers(phba
);
10535 /* Clean up all driver's outstanding SCSI I/Os */
10536 lpfc_sli_flush_fcp_rings(phba
);
10540 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
10541 * @pdev: pointer to PCI device.
10542 * @state: the current PCI connection state.
10544 * This routine is called from the PCI subsystem for error handling to device
10545 * with SLI-4 interface spec. This function is called by the PCI subsystem
10546 * after a PCI bus error affecting this device has been detected. When this
10547 * function is invoked, it will need to stop all the I/Os and interrupt(s)
10548 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
10549 * for the PCI subsystem to perform proper recovery as desired.
10552 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
10553 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10555 static pci_ers_result_t
10556 lpfc_io_error_detected_s4(struct pci_dev
*pdev
, pci_channel_state_t state
)
10558 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10559 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
10562 case pci_channel_io_normal
:
10563 /* Non-fatal error, prepare for recovery */
10564 lpfc_sli4_prep_dev_for_recover(phba
);
10565 return PCI_ERS_RESULT_CAN_RECOVER
;
10566 case pci_channel_io_frozen
:
10567 /* Fatal error, prepare for slot reset */
10568 lpfc_sli4_prep_dev_for_reset(phba
);
10569 return PCI_ERS_RESULT_NEED_RESET
;
10570 case pci_channel_io_perm_failure
:
10571 /* Permanent failure, prepare for device down */
10572 lpfc_sli4_prep_dev_for_perm_failure(phba
);
10573 return PCI_ERS_RESULT_DISCONNECT
;
10575 /* Unknown state, prepare and request slot reset */
10576 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10577 "2825 Unknown PCI error state: x%x\n", state
);
10578 lpfc_sli4_prep_dev_for_reset(phba
);
10579 return PCI_ERS_RESULT_NEED_RESET
;
10584 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
10585 * @pdev: pointer to PCI device.
10587 * This routine is called from the PCI subsystem for error handling to device
10588 * with SLI-4 interface spec. It is called after PCI bus has been reset to
10589 * restart the PCI card from scratch, as if from a cold-boot. During the
10590 * PCI subsystem error recovery, after the driver returns
10591 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
10592 * recovery and then call this routine before calling the .resume method to
10593 * recover the device. This function will initialize the HBA device, enable
10594 * the interrupt, but it will just put the HBA to offline state without
10595 * passing any I/O traffic.
10598 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
10599 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10601 static pci_ers_result_t
10602 lpfc_io_slot_reset_s4(struct pci_dev
*pdev
)
10604 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10605 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
10606 struct lpfc_sli
*psli
= &phba
->sli
;
10607 uint32_t intr_mode
;
10609 dev_printk(KERN_INFO
, &pdev
->dev
, "recovering from a slot reset.\n");
10610 if (pci_enable_device_mem(pdev
)) {
10611 printk(KERN_ERR
"lpfc: Cannot re-enable "
10612 "PCI device after reset.\n");
10613 return PCI_ERS_RESULT_DISCONNECT
;
10616 pci_restore_state(pdev
);
10619 * As the new kernel behavior of pci_restore_state() API call clears
10620 * device saved_state flag, need to save the restored state again.
10622 pci_save_state(pdev
);
10624 if (pdev
->is_busmaster
)
10625 pci_set_master(pdev
);
10627 spin_lock_irq(&phba
->hbalock
);
10628 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
10629 spin_unlock_irq(&phba
->hbalock
);
10631 /* Configure and enable interrupt */
10632 intr_mode
= lpfc_sli4_enable_intr(phba
, phba
->intr_mode
);
10633 if (intr_mode
== LPFC_INTR_ERROR
) {
10634 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10635 "2824 Cannot re-enable interrupt after "
10637 return PCI_ERS_RESULT_DISCONNECT
;
10639 phba
->intr_mode
= intr_mode
;
10641 /* Log the current active interrupt mode */
10642 lpfc_log_intr_mode(phba
, phba
->intr_mode
);
10644 return PCI_ERS_RESULT_RECOVERED
;
10648 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
10649 * @pdev: pointer to PCI device
10651 * This routine is called from the PCI subsystem for error handling to device
10652 * with SLI-4 interface spec. It is called when kernel error recovery tells
10653 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
10654 * error recovery. After this call, traffic can start to flow from this device
10658 lpfc_io_resume_s4(struct pci_dev
*pdev
)
10660 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10661 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
10664 * In case of slot reset, as function reset is performed through
10665 * mailbox command which needs DMA to be enabled, this operation
10666 * has to be moved to the io resume phase. Taking device offline
10667 * will perform the necessary cleanup.
10669 if (!(phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)) {
10670 /* Perform device reset */
10671 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
10672 lpfc_offline(phba
);
10673 lpfc_sli_brdrestart(phba
);
10674 /* Bring the device back online */
10678 /* Clean up Advanced Error Reporting (AER) if needed */
10679 if (phba
->hba_flag
& HBA_AER_ENABLED
)
10680 pci_cleanup_aer_uncorrect_error_status(pdev
);
10684 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
10685 * @pdev: pointer to PCI device
10686 * @pid: pointer to PCI device identifier
10688 * This routine is to be registered to the kernel's PCI subsystem. When an
10689 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
10690 * at PCI device-specific information of the device and driver to see if the
10691 * driver state that it can support this kind of device. If the match is
10692 * successful, the driver core invokes this routine. This routine dispatches
10693 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
10694 * do all the initialization that it needs to do to handle the HBA device
10698 * 0 - driver can claim the device
10699 * negative value - driver can not claim the device
10702 lpfc_pci_probe_one(struct pci_dev
*pdev
, const struct pci_device_id
*pid
)
10705 struct lpfc_sli_intf intf
;
10707 if (pci_read_config_dword(pdev
, LPFC_SLI_INTF
, &intf
.word0
))
10710 if ((bf_get(lpfc_sli_intf_valid
, &intf
) == LPFC_SLI_INTF_VALID
) &&
10711 (bf_get(lpfc_sli_intf_slirev
, &intf
) == LPFC_SLI_INTF_REV_SLI4
))
10712 rc
= lpfc_pci_probe_one_s4(pdev
, pid
);
10714 rc
= lpfc_pci_probe_one_s3(pdev
, pid
);
10720 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
10721 * @pdev: pointer to PCI device
10723 * This routine is to be registered to the kernel's PCI subsystem. When an
10724 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
10725 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
10726 * remove routine, which will perform all the necessary cleanup for the
10727 * device to be removed from the PCI subsystem properly.
10730 lpfc_pci_remove_one(struct pci_dev
*pdev
)
10732 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10733 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
10735 switch (phba
->pci_dev_grp
) {
10736 case LPFC_PCI_DEV_LP
:
10737 lpfc_pci_remove_one_s3(pdev
);
10739 case LPFC_PCI_DEV_OC
:
10740 lpfc_pci_remove_one_s4(pdev
);
10743 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10744 "1424 Invalid PCI device group: 0x%x\n",
10745 phba
->pci_dev_grp
);
10752 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
10753 * @pdev: pointer to PCI device
10754 * @msg: power management message
10756 * This routine is to be registered to the kernel's PCI subsystem to support
10757 * system Power Management (PM). When PM invokes this method, it dispatches
10758 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
10759 * suspend the device.
10762 * 0 - driver suspended the device
10766 lpfc_pci_suspend_one(struct pci_dev
*pdev
, pm_message_t msg
)
10768 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10769 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
10772 switch (phba
->pci_dev_grp
) {
10773 case LPFC_PCI_DEV_LP
:
10774 rc
= lpfc_pci_suspend_one_s3(pdev
, msg
);
10776 case LPFC_PCI_DEV_OC
:
10777 rc
= lpfc_pci_suspend_one_s4(pdev
, msg
);
10780 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10781 "1425 Invalid PCI device group: 0x%x\n",
10782 phba
->pci_dev_grp
);
10789 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
10790 * @pdev: pointer to PCI device
10792 * This routine is to be registered to the kernel's PCI subsystem to support
10793 * system Power Management (PM). When PM invokes this method, it dispatches
10794 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
10795 * resume the device.
10798 * 0 - driver suspended the device
10802 lpfc_pci_resume_one(struct pci_dev
*pdev
)
10804 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10805 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
10808 switch (phba
->pci_dev_grp
) {
10809 case LPFC_PCI_DEV_LP
:
10810 rc
= lpfc_pci_resume_one_s3(pdev
);
10812 case LPFC_PCI_DEV_OC
:
10813 rc
= lpfc_pci_resume_one_s4(pdev
);
10816 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10817 "1426 Invalid PCI device group: 0x%x\n",
10818 phba
->pci_dev_grp
);
10825 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
10826 * @pdev: pointer to PCI device.
10827 * @state: the current PCI connection state.
10829 * This routine is registered to the PCI subsystem for error handling. This
10830 * function is called by the PCI subsystem after a PCI bus error affecting
10831 * this device has been detected. When this routine is invoked, it dispatches
10832 * the action to the proper SLI-3 or SLI-4 device error detected handling
10833 * routine, which will perform the proper error detected operation.
10836 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
10837 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10839 static pci_ers_result_t
10840 lpfc_io_error_detected(struct pci_dev
*pdev
, pci_channel_state_t state
)
10842 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10843 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
10844 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
10846 switch (phba
->pci_dev_grp
) {
10847 case LPFC_PCI_DEV_LP
:
10848 rc
= lpfc_io_error_detected_s3(pdev
, state
);
10850 case LPFC_PCI_DEV_OC
:
10851 rc
= lpfc_io_error_detected_s4(pdev
, state
);
10854 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10855 "1427 Invalid PCI device group: 0x%x\n",
10856 phba
->pci_dev_grp
);
10863 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
10864 * @pdev: pointer to PCI device.
10866 * This routine is registered to the PCI subsystem for error handling. This
10867 * function is called after PCI bus has been reset to restart the PCI card
10868 * from scratch, as if from a cold-boot. When this routine is invoked, it
10869 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
10870 * routine, which will perform the proper device reset.
10873 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
10874 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10876 static pci_ers_result_t
10877 lpfc_io_slot_reset(struct pci_dev
*pdev
)
10879 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10880 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
10881 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
10883 switch (phba
->pci_dev_grp
) {
10884 case LPFC_PCI_DEV_LP
:
10885 rc
= lpfc_io_slot_reset_s3(pdev
);
10887 case LPFC_PCI_DEV_OC
:
10888 rc
= lpfc_io_slot_reset_s4(pdev
);
10891 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10892 "1428 Invalid PCI device group: 0x%x\n",
10893 phba
->pci_dev_grp
);
10900 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
10901 * @pdev: pointer to PCI device
10903 * This routine is registered to the PCI subsystem for error handling. It
10904 * is called when kernel error recovery tells the lpfc driver that it is
10905 * OK to resume normal PCI operation after PCI bus error recovery. When
10906 * this routine is invoked, it dispatches the action to the proper SLI-3
10907 * or SLI-4 device io_resume routine, which will resume the device operation.
10910 lpfc_io_resume(struct pci_dev
*pdev
)
10912 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10913 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
10915 switch (phba
->pci_dev_grp
) {
10916 case LPFC_PCI_DEV_LP
:
10917 lpfc_io_resume_s3(pdev
);
10919 case LPFC_PCI_DEV_OC
:
10920 lpfc_io_resume_s4(pdev
);
10923 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10924 "1429 Invalid PCI device group: 0x%x\n",
10925 phba
->pci_dev_grp
);
10932 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
10933 * @phba: pointer to lpfc hba data structure.
10935 * This routine checks to see if OAS is supported for this adapter. If
10936 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
10937 * the enable oas flag is cleared and the pool created for OAS device data
10942 lpfc_sli4_oas_verify(struct lpfc_hba
*phba
)
10945 if (!phba
->cfg_EnableXLane
)
10948 if (phba
->sli4_hba
.pc_sli4_params
.oas_supported
) {
10951 phba
->cfg_EnableXLane
= 0;
10952 if (phba
->device_data_mem_pool
)
10953 mempool_destroy(phba
->device_data_mem_pool
);
10954 phba
->device_data_mem_pool
= NULL
;
10961 * lpfc_fof_queue_setup - Set up all the fof queues
10962 * @phba: pointer to lpfc hba data structure.
10964 * This routine is invoked to set up all the fof queues for the FC HBA
10969 * -ENOMEM - No available memory
10972 lpfc_fof_queue_setup(struct lpfc_hba
*phba
)
10974 struct lpfc_sli
*psli
= &phba
->sli
;
10977 rc
= lpfc_eq_create(phba
, phba
->sli4_hba
.fof_eq
, LPFC_MAX_IMAX
);
10981 if (phba
->cfg_EnableXLane
) {
10983 rc
= lpfc_cq_create(phba
, phba
->sli4_hba
.oas_cq
,
10984 phba
->sli4_hba
.fof_eq
, LPFC_WCQ
, LPFC_FCP
);
10988 rc
= lpfc_wq_create(phba
, phba
->sli4_hba
.oas_wq
,
10989 phba
->sli4_hba
.oas_cq
, LPFC_FCP
);
10993 phba
->sli4_hba
.oas_cq
->pring
= &psli
->ring
[LPFC_FCP_OAS_RING
];
10994 phba
->sli4_hba
.oas_ring
= &psli
->ring
[LPFC_FCP_OAS_RING
];
11000 if (phba
->cfg_EnableXLane
)
11001 lpfc_cq_destroy(phba
, phba
->sli4_hba
.oas_cq
);
11003 lpfc_eq_destroy(phba
, phba
->sli4_hba
.fof_eq
);
11009 * lpfc_fof_queue_create - Create all the fof queues
11010 * @phba: pointer to lpfc hba data structure.
11012 * This routine is invoked to allocate all the fof queues for the FC HBA
11013 * operation. For each SLI4 queue type, the parameters such as queue entry
11014 * count (queue depth) shall be taken from the module parameter. For now,
11015 * we just use some constant number as place holder.
11019 * -ENOMEM - No availble memory
11020 * -EIO - The mailbox failed to complete successfully.
11023 lpfc_fof_queue_create(struct lpfc_hba
*phba
)
11025 struct lpfc_queue
*qdesc
;
11027 /* Create FOF EQ */
11028 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.eq_esize
,
11029 phba
->sli4_hba
.eq_ecount
);
11033 phba
->sli4_hba
.fof_eq
= qdesc
;
11035 if (phba
->cfg_EnableXLane
) {
11037 /* Create OAS CQ */
11038 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.cq_esize
,
11039 phba
->sli4_hba
.cq_ecount
);
11043 phba
->sli4_hba
.oas_cq
= qdesc
;
11045 /* Create OAS WQ */
11046 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.wq_esize
,
11047 phba
->sli4_hba
.wq_ecount
);
11051 phba
->sli4_hba
.oas_wq
= qdesc
;
11057 lpfc_fof_queue_destroy(phba
);
11062 * lpfc_fof_queue_destroy - Destroy all the fof queues
11063 * @phba: pointer to lpfc hba data structure.
11065 * This routine is invoked to release all the SLI4 queues with the FC HBA
11072 lpfc_fof_queue_destroy(struct lpfc_hba
*phba
)
11074 /* Release FOF Event queue */
11075 if (phba
->sli4_hba
.fof_eq
!= NULL
) {
11076 lpfc_sli4_queue_free(phba
->sli4_hba
.fof_eq
);
11077 phba
->sli4_hba
.fof_eq
= NULL
;
11080 /* Release OAS Completion queue */
11081 if (phba
->sli4_hba
.oas_cq
!= NULL
) {
11082 lpfc_sli4_queue_free(phba
->sli4_hba
.oas_cq
);
11083 phba
->sli4_hba
.oas_cq
= NULL
;
11086 /* Release OAS Work queue */
11087 if (phba
->sli4_hba
.oas_wq
!= NULL
) {
11088 lpfc_sli4_queue_free(phba
->sli4_hba
.oas_wq
);
11089 phba
->sli4_hba
.oas_wq
= NULL
;
11094 static struct pci_device_id lpfc_id_table
[] = {
11095 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_VIPER
,
11096 PCI_ANY_ID
, PCI_ANY_ID
, },
11097 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_FIREFLY
,
11098 PCI_ANY_ID
, PCI_ANY_ID
, },
11099 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_THOR
,
11100 PCI_ANY_ID
, PCI_ANY_ID
, },
11101 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_PEGASUS
,
11102 PCI_ANY_ID
, PCI_ANY_ID
, },
11103 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_CENTAUR
,
11104 PCI_ANY_ID
, PCI_ANY_ID
, },
11105 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_DRAGONFLY
,
11106 PCI_ANY_ID
, PCI_ANY_ID
, },
11107 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_SUPERFLY
,
11108 PCI_ANY_ID
, PCI_ANY_ID
, },
11109 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_RFLY
,
11110 PCI_ANY_ID
, PCI_ANY_ID
, },
11111 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_PFLY
,
11112 PCI_ANY_ID
, PCI_ANY_ID
, },
11113 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_NEPTUNE
,
11114 PCI_ANY_ID
, PCI_ANY_ID
, },
11115 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_NEPTUNE_SCSP
,
11116 PCI_ANY_ID
, PCI_ANY_ID
, },
11117 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_NEPTUNE_DCSP
,
11118 PCI_ANY_ID
, PCI_ANY_ID
, },
11119 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_HELIOS
,
11120 PCI_ANY_ID
, PCI_ANY_ID
, },
11121 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_HELIOS_SCSP
,
11122 PCI_ANY_ID
, PCI_ANY_ID
, },
11123 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_HELIOS_DCSP
,
11124 PCI_ANY_ID
, PCI_ANY_ID
, },
11125 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_BMID
,
11126 PCI_ANY_ID
, PCI_ANY_ID
, },
11127 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_BSMB
,
11128 PCI_ANY_ID
, PCI_ANY_ID
, },
11129 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_ZEPHYR
,
11130 PCI_ANY_ID
, PCI_ANY_ID
, },
11131 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_HORNET
,
11132 PCI_ANY_ID
, PCI_ANY_ID
, },
11133 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_ZEPHYR_SCSP
,
11134 PCI_ANY_ID
, PCI_ANY_ID
, },
11135 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_ZEPHYR_DCSP
,
11136 PCI_ANY_ID
, PCI_ANY_ID
, },
11137 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_ZMID
,
11138 PCI_ANY_ID
, PCI_ANY_ID
, },
11139 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_ZSMB
,
11140 PCI_ANY_ID
, PCI_ANY_ID
, },
11141 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_TFLY
,
11142 PCI_ANY_ID
, PCI_ANY_ID
, },
11143 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_LP101
,
11144 PCI_ANY_ID
, PCI_ANY_ID
, },
11145 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_LP10000S
,
11146 PCI_ANY_ID
, PCI_ANY_ID
, },
11147 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_LP11000S
,
11148 PCI_ANY_ID
, PCI_ANY_ID
, },
11149 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_LPE11000S
,
11150 PCI_ANY_ID
, PCI_ANY_ID
, },
11151 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_SAT
,
11152 PCI_ANY_ID
, PCI_ANY_ID
, },
11153 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_SAT_MID
,
11154 PCI_ANY_ID
, PCI_ANY_ID
, },
11155 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_SAT_SMB
,
11156 PCI_ANY_ID
, PCI_ANY_ID
, },
11157 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_SAT_DCSP
,
11158 PCI_ANY_ID
, PCI_ANY_ID
, },
11159 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_SAT_SCSP
,
11160 PCI_ANY_ID
, PCI_ANY_ID
, },
11161 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_SAT_S
,
11162 PCI_ANY_ID
, PCI_ANY_ID
, },
11163 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_PROTEUS_VF
,
11164 PCI_ANY_ID
, PCI_ANY_ID
, },
11165 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_PROTEUS_PF
,
11166 PCI_ANY_ID
, PCI_ANY_ID
, },
11167 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_PROTEUS_S
,
11168 PCI_ANY_ID
, PCI_ANY_ID
, },
11169 {PCI_VENDOR_ID_SERVERENGINE
, PCI_DEVICE_ID_TIGERSHARK
,
11170 PCI_ANY_ID
, PCI_ANY_ID
, },
11171 {PCI_VENDOR_ID_SERVERENGINE
, PCI_DEVICE_ID_TOMCAT
,
11172 PCI_ANY_ID
, PCI_ANY_ID
, },
11173 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_FALCON
,
11174 PCI_ANY_ID
, PCI_ANY_ID
, },
11175 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_BALIUS
,
11176 PCI_ANY_ID
, PCI_ANY_ID
, },
11177 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_LANCER_FC
,
11178 PCI_ANY_ID
, PCI_ANY_ID
, },
11179 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_LANCER_FCOE
,
11180 PCI_ANY_ID
, PCI_ANY_ID
, },
11181 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_LANCER_FC_VF
,
11182 PCI_ANY_ID
, PCI_ANY_ID
, },
11183 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_LANCER_FCOE_VF
,
11184 PCI_ANY_ID
, PCI_ANY_ID
, },
11185 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_SKYHAWK
,
11186 PCI_ANY_ID
, PCI_ANY_ID
, },
11187 {PCI_VENDOR_ID_EMULEX
, PCI_DEVICE_ID_SKYHAWK_VF
,
11188 PCI_ANY_ID
, PCI_ANY_ID
, },
11192 MODULE_DEVICE_TABLE(pci
, lpfc_id_table
);
11194 static const struct pci_error_handlers lpfc_err_handler
= {
11195 .error_detected
= lpfc_io_error_detected
,
11196 .slot_reset
= lpfc_io_slot_reset
,
11197 .resume
= lpfc_io_resume
,
11200 static struct pci_driver lpfc_driver
= {
11201 .name
= LPFC_DRIVER_NAME
,
11202 .id_table
= lpfc_id_table
,
11203 .probe
= lpfc_pci_probe_one
,
11204 .remove
= lpfc_pci_remove_one
,
11205 .suspend
= lpfc_pci_suspend_one
,
11206 .resume
= lpfc_pci_resume_one
,
11207 .err_handler
= &lpfc_err_handler
,
11210 static const struct file_operations lpfc_mgmt_fop
= {
11211 .owner
= THIS_MODULE
,
11214 static struct miscdevice lpfc_mgmt_dev
= {
11215 .minor
= MISC_DYNAMIC_MINOR
,
11216 .name
= "lpfcmgmt",
11217 .fops
= &lpfc_mgmt_fop
,
11221 * lpfc_init - lpfc module initialization routine
11223 * This routine is to be invoked when the lpfc module is loaded into the
11224 * kernel. The special kernel macro module_init() is used to indicate the
11225 * role of this routine to the kernel as lpfc module entry point.
11229 * -ENOMEM - FC attach transport failed
11230 * all others - failed
11238 printk(LPFC_MODULE_DESC
"\n");
11239 printk(LPFC_COPYRIGHT
"\n");
11241 error
= misc_register(&lpfc_mgmt_dev
);
11243 printk(KERN_ERR
"Could not register lpfcmgmt device, "
11244 "misc_register returned with status %d", error
);
11246 if (lpfc_enable_npiv
) {
11247 lpfc_transport_functions
.vport_create
= lpfc_vport_create
;
11248 lpfc_transport_functions
.vport_delete
= lpfc_vport_delete
;
11250 lpfc_transport_template
=
11251 fc_attach_transport(&lpfc_transport_functions
);
11252 if (lpfc_transport_template
== NULL
)
11254 if (lpfc_enable_npiv
) {
11255 lpfc_vport_transport_template
=
11256 fc_attach_transport(&lpfc_vport_transport_functions
);
11257 if (lpfc_vport_transport_template
== NULL
) {
11258 fc_release_transport(lpfc_transport_template
);
11263 /* Initialize in case vector mapping is needed */
11264 lpfc_used_cpu
= NULL
;
11265 lpfc_present_cpu
= 0;
11266 for_each_present_cpu(cpu
)
11267 lpfc_present_cpu
++;
11269 error
= pci_register_driver(&lpfc_driver
);
11271 fc_release_transport(lpfc_transport_template
);
11272 if (lpfc_enable_npiv
)
11273 fc_release_transport(lpfc_vport_transport_template
);
11280 * lpfc_exit - lpfc module removal routine
11282 * This routine is invoked when the lpfc module is removed from the kernel.
11283 * The special kernel macro module_exit() is used to indicate the role of
11284 * this routine to the kernel as lpfc module exit point.
11289 misc_deregister(&lpfc_mgmt_dev
);
11290 pci_unregister_driver(&lpfc_driver
);
11291 fc_release_transport(lpfc_transport_template
);
11292 if (lpfc_enable_npiv
)
11293 fc_release_transport(lpfc_vport_transport_template
);
11294 if (_dump_buf_data
) {
11295 printk(KERN_ERR
"9062 BLKGRD: freeing %lu pages for "
11296 "_dump_buf_data at 0x%p\n",
11297 (1L << _dump_buf_data_order
), _dump_buf_data
);
11298 free_pages((unsigned long)_dump_buf_data
, _dump_buf_data_order
);
11301 if (_dump_buf_dif
) {
11302 printk(KERN_ERR
"9049 BLKGRD: freeing %lu pages for "
11303 "_dump_buf_dif at 0x%p\n",
11304 (1L << _dump_buf_dif_order
), _dump_buf_dif
);
11305 free_pages((unsigned long)_dump_buf_dif
, _dump_buf_dif_order
);
11307 kfree(lpfc_used_cpu
);
11310 module_init(lpfc_init
);
11311 module_exit(lpfc_exit
);
11312 MODULE_LICENSE("GPL");
11313 MODULE_DESCRIPTION(LPFC_MODULE_DESC
);
11314 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
11315 MODULE_VERSION("0:" LPFC_DRIVER_VERSION
);