1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/kthread.h>
29 #include <linux/pci.h>
30 #include <linux/spinlock.h>
31 #include <linux/ctype.h>
32 #include <linux/aer.h>
33 #include <linux/slab.h>
34 #include <linux/firmware.h>
35 #include <linux/miscdevice.h>
36 #include <linux/percpu.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi_transport_fc.h>
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
49 #include "lpfc_scsi.h"
51 #include "lpfc_logmsg.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_vport.h"
54 #include "lpfc_version.h"
58 unsigned long _dump_buf_data_order
;
60 unsigned long _dump_buf_dif_order
;
61 spinlock_t _dump_buf_lock
;
63 /* Used when mapping IRQ vectors in a driver centric manner */
64 uint16_t *lpfc_used_cpu
;
65 uint32_t lpfc_present_cpu
;
67 static void lpfc_get_hba_model_desc(struct lpfc_hba
*, uint8_t *, uint8_t *);
68 static int lpfc_post_rcv_buf(struct lpfc_hba
*);
69 static int lpfc_sli4_queue_verify(struct lpfc_hba
*);
70 static int lpfc_create_bootstrap_mbox(struct lpfc_hba
*);
71 static int lpfc_setup_endian_order(struct lpfc_hba
*);
72 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba
*);
73 static void lpfc_free_els_sgl_list(struct lpfc_hba
*);
74 static void lpfc_init_sgl_list(struct lpfc_hba
*);
75 static int lpfc_init_active_sgl_array(struct lpfc_hba
*);
76 static void lpfc_free_active_sgl(struct lpfc_hba
*);
77 static int lpfc_hba_down_post_s3(struct lpfc_hba
*phba
);
78 static int lpfc_hba_down_post_s4(struct lpfc_hba
*phba
);
79 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba
*);
80 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba
*);
81 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba
*);
82 static void lpfc_sli4_disable_intr(struct lpfc_hba
*);
83 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba
*, uint32_t);
84 static void lpfc_sli4_oas_verify(struct lpfc_hba
*phba
);
86 static struct scsi_transport_template
*lpfc_transport_template
= NULL
;
87 static struct scsi_transport_template
*lpfc_vport_transport_template
= NULL
;
88 static DEFINE_IDR(lpfc_hba_index
);
91 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
92 * @phba: pointer to lpfc hba data structure.
94 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
95 * mailbox command. It retrieves the revision information from the HBA and
96 * collects the Vital Product Data (VPD) about the HBA for preparing the
97 * configuration of the HBA.
101 * -ERESTART - requests the SLI layer to reset the HBA and try again.
102 * Any other value - indicates an error.
105 lpfc_config_port_prep(struct lpfc_hba
*phba
)
107 lpfc_vpd_t
*vp
= &phba
->vpd
;
111 char *lpfc_vpd_data
= NULL
;
113 static char licensed
[56] =
114 "key unlock for use with gnu public licensed code only\0";
115 static int init_key
= 1;
117 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
119 phba
->link_state
= LPFC_HBA_ERROR
;
124 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
126 if (lpfc_is_LC_HBA(phba
->pcidev
->device
)) {
128 uint32_t *ptext
= (uint32_t *) licensed
;
130 for (i
= 0; i
< 56; i
+= sizeof (uint32_t), ptext
++)
131 *ptext
= cpu_to_be32(*ptext
);
135 lpfc_read_nv(phba
, pmb
);
136 memset((char*)mb
->un
.varRDnvp
.rsvd3
, 0,
137 sizeof (mb
->un
.varRDnvp
.rsvd3
));
138 memcpy((char*)mb
->un
.varRDnvp
.rsvd3
, licensed
,
141 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
143 if (rc
!= MBX_SUCCESS
) {
144 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
145 "0324 Config Port initialization "
146 "error, mbxCmd x%x READ_NVPARM, "
148 mb
->mbxCommand
, mb
->mbxStatus
);
149 mempool_free(pmb
, phba
->mbox_mem_pool
);
152 memcpy(phba
->wwnn
, (char *)mb
->un
.varRDnvp
.nodename
,
154 memcpy(phba
->wwpn
, (char *)mb
->un
.varRDnvp
.portname
,
158 phba
->sli3_options
= 0x0;
160 /* Setup and issue mailbox READ REV command */
161 lpfc_read_rev(phba
, pmb
);
162 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
163 if (rc
!= MBX_SUCCESS
) {
164 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
165 "0439 Adapter failed to init, mbxCmd x%x "
166 "READ_REV, mbxStatus x%x\n",
167 mb
->mbxCommand
, mb
->mbxStatus
);
168 mempool_free( pmb
, phba
->mbox_mem_pool
);
174 * The value of rr must be 1 since the driver set the cv field to 1.
175 * This setting requires the FW to set all revision fields.
177 if (mb
->un
.varRdRev
.rr
== 0) {
179 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
180 "0440 Adapter failed to init, READ_REV has "
181 "missing revision information.\n");
182 mempool_free(pmb
, phba
->mbox_mem_pool
);
186 if (phba
->sli_rev
== 3 && !mb
->un
.varRdRev
.v3rsp
) {
187 mempool_free(pmb
, phba
->mbox_mem_pool
);
191 /* Save information as VPD data */
193 memcpy(&vp
->sli3Feat
, &mb
->un
.varRdRev
.sli3Feat
, sizeof(uint32_t));
194 vp
->rev
.sli1FwRev
= mb
->un
.varRdRev
.sli1FwRev
;
195 memcpy(vp
->rev
.sli1FwName
, (char*) mb
->un
.varRdRev
.sli1FwName
, 16);
196 vp
->rev
.sli2FwRev
= mb
->un
.varRdRev
.sli2FwRev
;
197 memcpy(vp
->rev
.sli2FwName
, (char *) mb
->un
.varRdRev
.sli2FwName
, 16);
198 vp
->rev
.biuRev
= mb
->un
.varRdRev
.biuRev
;
199 vp
->rev
.smRev
= mb
->un
.varRdRev
.smRev
;
200 vp
->rev
.smFwRev
= mb
->un
.varRdRev
.un
.smFwRev
;
201 vp
->rev
.endecRev
= mb
->un
.varRdRev
.endecRev
;
202 vp
->rev
.fcphHigh
= mb
->un
.varRdRev
.fcphHigh
;
203 vp
->rev
.fcphLow
= mb
->un
.varRdRev
.fcphLow
;
204 vp
->rev
.feaLevelHigh
= mb
->un
.varRdRev
.feaLevelHigh
;
205 vp
->rev
.feaLevelLow
= mb
->un
.varRdRev
.feaLevelLow
;
206 vp
->rev
.postKernRev
= mb
->un
.varRdRev
.postKernRev
;
207 vp
->rev
.opFwRev
= mb
->un
.varRdRev
.opFwRev
;
209 /* If the sli feature level is less then 9, we must
210 * tear down all RPIs and VPIs on link down if NPIV
213 if (vp
->rev
.feaLevelHigh
< 9)
214 phba
->sli3_options
|= LPFC_SLI3_VPORT_TEARDOWN
;
216 if (lpfc_is_LC_HBA(phba
->pcidev
->device
))
217 memcpy(phba
->RandomData
, (char *)&mb
->un
.varWords
[24],
218 sizeof (phba
->RandomData
));
220 /* Get adapter VPD information */
221 lpfc_vpd_data
= kmalloc(DMP_VPD_SIZE
, GFP_KERNEL
);
225 lpfc_dump_mem(phba
, pmb
, offset
, DMP_REGION_VPD
);
226 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
228 if (rc
!= MBX_SUCCESS
) {
229 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
230 "0441 VPD not present on adapter, "
231 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
232 mb
->mbxCommand
, mb
->mbxStatus
);
233 mb
->un
.varDmp
.word_cnt
= 0;
235 /* dump mem may return a zero when finished or we got a
236 * mailbox error, either way we are done.
238 if (mb
->un
.varDmp
.word_cnt
== 0)
240 if (mb
->un
.varDmp
.word_cnt
> DMP_VPD_SIZE
- offset
)
241 mb
->un
.varDmp
.word_cnt
= DMP_VPD_SIZE
- offset
;
242 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
243 lpfc_vpd_data
+ offset
,
244 mb
->un
.varDmp
.word_cnt
);
245 offset
+= mb
->un
.varDmp
.word_cnt
;
246 } while (mb
->un
.varDmp
.word_cnt
&& offset
< DMP_VPD_SIZE
);
247 lpfc_parse_vpd(phba
, lpfc_vpd_data
, offset
);
249 kfree(lpfc_vpd_data
);
251 mempool_free(pmb
, phba
->mbox_mem_pool
);
256 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
257 * @phba: pointer to lpfc hba data structure.
258 * @pmboxq: pointer to the driver internal queue element for mailbox command.
260 * This is the completion handler for driver's configuring asynchronous event
261 * mailbox command to the device. If the mailbox command returns successfully,
262 * it will set internal async event support flag to 1; otherwise, it will
263 * set internal async event support flag to 0.
266 lpfc_config_async_cmpl(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmboxq
)
268 if (pmboxq
->u
.mb
.mbxStatus
== MBX_SUCCESS
)
269 phba
->temp_sensor_support
= 1;
271 phba
->temp_sensor_support
= 0;
272 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
277 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
278 * @phba: pointer to lpfc hba data structure.
279 * @pmboxq: pointer to the driver internal queue element for mailbox command.
281 * This is the completion handler for dump mailbox command for getting
282 * wake up parameters. When this command complete, the response contain
283 * Option rom version of the HBA. This function translate the version number
284 * into a human readable string and store it in OptionROMVersion.
287 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
290 uint32_t prog_id_word
;
292 /* character array used for decoding dist type. */
293 char dist_char
[] = "nabx";
295 if (pmboxq
->u
.mb
.mbxStatus
!= MBX_SUCCESS
) {
296 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
300 prg
= (struct prog_id
*) &prog_id_word
;
302 /* word 7 contain option rom version */
303 prog_id_word
= pmboxq
->u
.mb
.un
.varWords
[7];
305 /* Decode the Option rom version word to a readable string */
307 dist
= dist_char
[prg
->dist
];
309 if ((prg
->dist
== 3) && (prg
->num
== 0))
310 snprintf(phba
->OptionROMVersion
, 32, "%d.%d%d",
311 prg
->ver
, prg
->rev
, prg
->lev
);
313 snprintf(phba
->OptionROMVersion
, 32, "%d.%d%d%c%d",
314 prg
->ver
, prg
->rev
, prg
->lev
,
316 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
321 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
322 * cfg_soft_wwnn, cfg_soft_wwpn
323 * @vport: pointer to lpfc vport data structure.
330 lpfc_update_vport_wwn(struct lpfc_vport
*vport
)
332 /* If the soft name exists then update it using the service params */
333 if (vport
->phba
->cfg_soft_wwnn
)
334 u64_to_wwn(vport
->phba
->cfg_soft_wwnn
,
335 vport
->fc_sparam
.nodeName
.u
.wwn
);
336 if (vport
->phba
->cfg_soft_wwpn
)
337 u64_to_wwn(vport
->phba
->cfg_soft_wwpn
,
338 vport
->fc_sparam
.portName
.u
.wwn
);
341 * If the name is empty or there exists a soft name
342 * then copy the service params name, otherwise use the fc name
344 if (vport
->fc_nodename
.u
.wwn
[0] == 0 || vport
->phba
->cfg_soft_wwnn
)
345 memcpy(&vport
->fc_nodename
, &vport
->fc_sparam
.nodeName
,
346 sizeof(struct lpfc_name
));
348 memcpy(&vport
->fc_sparam
.nodeName
, &vport
->fc_nodename
,
349 sizeof(struct lpfc_name
));
351 if (vport
->fc_portname
.u
.wwn
[0] == 0 || vport
->phba
->cfg_soft_wwpn
)
352 memcpy(&vport
->fc_portname
, &vport
->fc_sparam
.portName
,
353 sizeof(struct lpfc_name
));
355 memcpy(&vport
->fc_sparam
.portName
, &vport
->fc_portname
,
356 sizeof(struct lpfc_name
));
360 * lpfc_config_port_post - Perform lpfc initialization after config port
361 * @phba: pointer to lpfc hba data structure.
363 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
364 * command call. It performs all internal resource and state setups on the
365 * port: post IOCB buffers, enable appropriate host interrupt attentions,
366 * ELS ring timers, etc.
370 * Any other value - error.
373 lpfc_config_port_post(struct lpfc_hba
*phba
)
375 struct lpfc_vport
*vport
= phba
->pport
;
376 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
379 struct lpfc_dmabuf
*mp
;
380 struct lpfc_sli
*psli
= &phba
->sli
;
381 uint32_t status
, timeout
;
385 spin_lock_irq(&phba
->hbalock
);
387 * If the Config port completed correctly the HBA is not
388 * over heated any more.
390 if (phba
->over_temp_state
== HBA_OVER_TEMP
)
391 phba
->over_temp_state
= HBA_NORMAL_TEMP
;
392 spin_unlock_irq(&phba
->hbalock
);
394 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
396 phba
->link_state
= LPFC_HBA_ERROR
;
401 /* Get login parameters for NID. */
402 rc
= lpfc_read_sparam(phba
, pmb
, 0);
404 mempool_free(pmb
, phba
->mbox_mem_pool
);
409 if (lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
) != MBX_SUCCESS
) {
410 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
411 "0448 Adapter failed init, mbxCmd x%x "
412 "READ_SPARM mbxStatus x%x\n",
413 mb
->mbxCommand
, mb
->mbxStatus
);
414 phba
->link_state
= LPFC_HBA_ERROR
;
415 mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
416 mempool_free(pmb
, phba
->mbox_mem_pool
);
417 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
422 mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
424 memcpy(&vport
->fc_sparam
, mp
->virt
, sizeof (struct serv_parm
));
425 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
427 pmb
->context1
= NULL
;
428 lpfc_update_vport_wwn(vport
);
430 /* Update the fc_host data structures with new wwn. */
431 fc_host_node_name(shost
) = wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
432 fc_host_port_name(shost
) = wwn_to_u64(vport
->fc_portname
.u
.wwn
);
433 fc_host_max_npiv_vports(shost
) = phba
->max_vpi
;
435 /* If no serial number in VPD data, use low 6 bytes of WWNN */
436 /* This should be consolidated into parse_vpd ? - mr */
437 if (phba
->SerialNumber
[0] == 0) {
440 outptr
= &vport
->fc_nodename
.u
.s
.IEEE
[0];
441 for (i
= 0; i
< 12; i
++) {
443 j
= ((status
& 0xf0) >> 4);
445 phba
->SerialNumber
[i
] =
446 (char)((uint8_t) 0x30 + (uint8_t) j
);
448 phba
->SerialNumber
[i
] =
449 (char)((uint8_t) 0x61 + (uint8_t) (j
- 10));
453 phba
->SerialNumber
[i
] =
454 (char)((uint8_t) 0x30 + (uint8_t) j
);
456 phba
->SerialNumber
[i
] =
457 (char)((uint8_t) 0x61 + (uint8_t) (j
- 10));
461 lpfc_read_config(phba
, pmb
);
463 if (lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
) != MBX_SUCCESS
) {
464 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
465 "0453 Adapter failed to init, mbxCmd x%x "
466 "READ_CONFIG, mbxStatus x%x\n",
467 mb
->mbxCommand
, mb
->mbxStatus
);
468 phba
->link_state
= LPFC_HBA_ERROR
;
469 mempool_free( pmb
, phba
->mbox_mem_pool
);
473 /* Check if the port is disabled */
474 lpfc_sli_read_link_ste(phba
);
476 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
477 i
= (mb
->un
.varRdConfig
.max_xri
+ 1);
478 if (phba
->cfg_hba_queue_depth
> i
) {
479 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
480 "3359 HBA queue depth changed from %d to %d\n",
481 phba
->cfg_hba_queue_depth
, i
);
482 phba
->cfg_hba_queue_depth
= i
;
485 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
486 i
= (mb
->un
.varRdConfig
.max_xri
>> 3);
487 if (phba
->pport
->cfg_lun_queue_depth
> i
) {
488 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
489 "3360 LUN queue depth changed from %d to %d\n",
490 phba
->pport
->cfg_lun_queue_depth
, i
);
491 phba
->pport
->cfg_lun_queue_depth
= i
;
494 phba
->lmt
= mb
->un
.varRdConfig
.lmt
;
496 /* Get the default values for Model Name and Description */
497 lpfc_get_hba_model_desc(phba
, phba
->ModelName
, phba
->ModelDesc
);
499 phba
->link_state
= LPFC_LINK_DOWN
;
501 /* Only process IOCBs on ELS ring till hba_state is READY */
502 if (psli
->ring
[psli
->extra_ring
].sli
.sli3
.cmdringaddr
)
503 psli
->ring
[psli
->extra_ring
].flag
|= LPFC_STOP_IOCB_EVENT
;
504 if (psli
->ring
[psli
->fcp_ring
].sli
.sli3
.cmdringaddr
)
505 psli
->ring
[psli
->fcp_ring
].flag
|= LPFC_STOP_IOCB_EVENT
;
506 if (psli
->ring
[psli
->next_ring
].sli
.sli3
.cmdringaddr
)
507 psli
->ring
[psli
->next_ring
].flag
|= LPFC_STOP_IOCB_EVENT
;
509 /* Post receive buffers for desired rings */
510 if (phba
->sli_rev
!= 3)
511 lpfc_post_rcv_buf(phba
);
514 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
516 if (phba
->intr_type
== MSIX
) {
517 rc
= lpfc_config_msi(phba
, pmb
);
519 mempool_free(pmb
, phba
->mbox_mem_pool
);
522 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
523 if (rc
!= MBX_SUCCESS
) {
524 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
525 "0352 Config MSI mailbox command "
526 "failed, mbxCmd x%x, mbxStatus x%x\n",
527 pmb
->u
.mb
.mbxCommand
,
528 pmb
->u
.mb
.mbxStatus
);
529 mempool_free(pmb
, phba
->mbox_mem_pool
);
534 spin_lock_irq(&phba
->hbalock
);
535 /* Initialize ERATT handling flag */
536 phba
->hba_flag
&= ~HBA_ERATT_HANDLED
;
538 /* Enable appropriate host interrupts */
539 if (lpfc_readl(phba
->HCregaddr
, &status
)) {
540 spin_unlock_irq(&phba
->hbalock
);
543 status
|= HC_MBINT_ENA
| HC_ERINT_ENA
| HC_LAINT_ENA
;
544 if (psli
->num_rings
> 0)
545 status
|= HC_R0INT_ENA
;
546 if (psli
->num_rings
> 1)
547 status
|= HC_R1INT_ENA
;
548 if (psli
->num_rings
> 2)
549 status
|= HC_R2INT_ENA
;
550 if (psli
->num_rings
> 3)
551 status
|= HC_R3INT_ENA
;
553 if ((phba
->cfg_poll
& ENABLE_FCP_RING_POLLING
) &&
554 (phba
->cfg_poll
& DISABLE_FCP_RING_INT
))
555 status
&= ~(HC_R0INT_ENA
);
557 writel(status
, phba
->HCregaddr
);
558 readl(phba
->HCregaddr
); /* flush */
559 spin_unlock_irq(&phba
->hbalock
);
561 /* Set up ring-0 (ELS) timer */
562 timeout
= phba
->fc_ratov
* 2;
563 mod_timer(&vport
->els_tmofunc
,
564 jiffies
+ msecs_to_jiffies(1000 * timeout
));
565 /* Set up heart beat (HB) timer */
566 mod_timer(&phba
->hb_tmofunc
,
567 jiffies
+ msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
));
568 phba
->hb_outstanding
= 0;
569 phba
->last_completion_time
= jiffies
;
570 /* Set up error attention (ERATT) polling timer */
571 mod_timer(&phba
->eratt_poll
,
572 jiffies
+ msecs_to_jiffies(1000 * phba
->eratt_poll_interval
));
574 if (phba
->hba_flag
& LINK_DISABLED
) {
575 lpfc_printf_log(phba
,
577 "2598 Adapter Link is disabled.\n");
578 lpfc_down_link(phba
, pmb
);
579 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
580 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
581 if ((rc
!= MBX_SUCCESS
) && (rc
!= MBX_BUSY
)) {
582 lpfc_printf_log(phba
,
584 "2599 Adapter failed to issue DOWN_LINK"
585 " mbox command rc 0x%x\n", rc
);
587 mempool_free(pmb
, phba
->mbox_mem_pool
);
590 } else if (phba
->cfg_suppress_link_up
== LPFC_INITIALIZE_LINK
) {
591 mempool_free(pmb
, phba
->mbox_mem_pool
);
592 rc
= phba
->lpfc_hba_init_link(phba
, MBX_NOWAIT
);
596 /* MBOX buffer will be freed in mbox compl */
597 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
599 phba
->link_state
= LPFC_HBA_ERROR
;
603 lpfc_config_async(phba
, pmb
, LPFC_ELS_RING
);
604 pmb
->mbox_cmpl
= lpfc_config_async_cmpl
;
605 pmb
->vport
= phba
->pport
;
606 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
608 if ((rc
!= MBX_BUSY
) && (rc
!= MBX_SUCCESS
)) {
609 lpfc_printf_log(phba
,
612 "0456 Adapter failed to issue "
613 "ASYNCEVT_ENABLE mbox status x%x\n",
615 mempool_free(pmb
, phba
->mbox_mem_pool
);
618 /* Get Option rom version */
619 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
621 phba
->link_state
= LPFC_HBA_ERROR
;
625 lpfc_dump_wakeup_param(phba
, pmb
);
626 pmb
->mbox_cmpl
= lpfc_dump_wakeup_param_cmpl
;
627 pmb
->vport
= phba
->pport
;
628 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
630 if ((rc
!= MBX_BUSY
) && (rc
!= MBX_SUCCESS
)) {
631 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
, "0435 Adapter failed "
632 "to get Option ROM version status x%x\n", rc
);
633 mempool_free(pmb
, phba
->mbox_mem_pool
);
640 * lpfc_hba_init_link - Initialize the FC link
641 * @phba: pointer to lpfc hba data structure.
642 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
644 * This routine will issue the INIT_LINK mailbox command call.
645 * It is available to other drivers through the lpfc_hba data
646 * structure for use as a delayed link up mechanism with the
647 * module parameter lpfc_suppress_link_up.
651 * Any other value - error
654 lpfc_hba_init_link(struct lpfc_hba
*phba
, uint32_t flag
)
656 return lpfc_hba_init_link_fc_topology(phba
, phba
->cfg_topology
, flag
);
660 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
661 * @phba: pointer to lpfc hba data structure.
662 * @fc_topology: desired fc topology.
663 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
665 * This routine will issue the INIT_LINK mailbox command call.
666 * It is available to other drivers through the lpfc_hba data
667 * structure for use as a delayed link up mechanism with the
668 * module parameter lpfc_suppress_link_up.
672 * Any other value - error
675 lpfc_hba_init_link_fc_topology(struct lpfc_hba
*phba
, uint32_t fc_topology
,
678 struct lpfc_vport
*vport
= phba
->pport
;
683 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
685 phba
->link_state
= LPFC_HBA_ERROR
;
691 if ((phba
->cfg_link_speed
> LPFC_USER_LINK_SPEED_MAX
) ||
692 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_1G
) &&
693 !(phba
->lmt
& LMT_1Gb
)) ||
694 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_2G
) &&
695 !(phba
->lmt
& LMT_2Gb
)) ||
696 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_4G
) &&
697 !(phba
->lmt
& LMT_4Gb
)) ||
698 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_8G
) &&
699 !(phba
->lmt
& LMT_8Gb
)) ||
700 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_10G
) &&
701 !(phba
->lmt
& LMT_10Gb
)) ||
702 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_16G
) &&
703 !(phba
->lmt
& LMT_16Gb
)) ||
704 ((phba
->cfg_link_speed
== LPFC_USER_LINK_SPEED_32G
) &&
705 !(phba
->lmt
& LMT_32Gb
))) {
706 /* Reset link speed to auto */
707 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
708 "1302 Invalid speed for this board:%d "
709 "Reset link speed to auto.\n",
710 phba
->cfg_link_speed
);
711 phba
->cfg_link_speed
= LPFC_USER_LINK_SPEED_AUTO
;
713 lpfc_init_link(phba
, pmb
, fc_topology
, phba
->cfg_link_speed
);
714 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
715 if (phba
->sli_rev
< LPFC_SLI_REV4
)
716 lpfc_set_loopback_flag(phba
);
717 rc
= lpfc_sli_issue_mbox(phba
, pmb
, flag
);
718 if ((rc
!= MBX_BUSY
) && (rc
!= MBX_SUCCESS
)) {
719 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
720 "0498 Adapter failed to init, mbxCmd x%x "
721 "INIT_LINK, mbxStatus x%x\n",
722 mb
->mbxCommand
, mb
->mbxStatus
);
723 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
724 /* Clear all interrupt enable conditions */
725 writel(0, phba
->HCregaddr
);
726 readl(phba
->HCregaddr
); /* flush */
727 /* Clear all pending interrupts */
728 writel(0xffffffff, phba
->HAregaddr
);
729 readl(phba
->HAregaddr
); /* flush */
731 phba
->link_state
= LPFC_HBA_ERROR
;
732 if (rc
!= MBX_BUSY
|| flag
== MBX_POLL
)
733 mempool_free(pmb
, phba
->mbox_mem_pool
);
736 phba
->cfg_suppress_link_up
= LPFC_INITIALIZE_LINK
;
737 if (flag
== MBX_POLL
)
738 mempool_free(pmb
, phba
->mbox_mem_pool
);
744 * lpfc_hba_down_link - this routine downs the FC link
745 * @phba: pointer to lpfc hba data structure.
746 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
748 * This routine will issue the DOWN_LINK mailbox command call.
749 * It is available to other drivers through the lpfc_hba data
750 * structure for use to stop the link.
754 * Any other value - error
757 lpfc_hba_down_link(struct lpfc_hba
*phba
, uint32_t flag
)
762 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
764 phba
->link_state
= LPFC_HBA_ERROR
;
768 lpfc_printf_log(phba
,
770 "0491 Adapter Link is disabled.\n");
771 lpfc_down_link(phba
, pmb
);
772 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
773 rc
= lpfc_sli_issue_mbox(phba
, pmb
, flag
);
774 if ((rc
!= MBX_SUCCESS
) && (rc
!= MBX_BUSY
)) {
775 lpfc_printf_log(phba
,
777 "2522 Adapter failed to issue DOWN_LINK"
778 " mbox command rc 0x%x\n", rc
);
780 mempool_free(pmb
, phba
->mbox_mem_pool
);
783 if (flag
== MBX_POLL
)
784 mempool_free(pmb
, phba
->mbox_mem_pool
);
790 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
791 * @phba: pointer to lpfc HBA data structure.
793 * This routine will do LPFC uninitialization before the HBA is reset when
794 * bringing down the SLI Layer.
798 * Any other value - error.
801 lpfc_hba_down_prep(struct lpfc_hba
*phba
)
803 struct lpfc_vport
**vports
;
806 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
807 /* Disable interrupts */
808 writel(0, phba
->HCregaddr
);
809 readl(phba
->HCregaddr
); /* flush */
812 if (phba
->pport
->load_flag
& FC_UNLOADING
)
813 lpfc_cleanup_discovery_resources(phba
->pport
);
815 vports
= lpfc_create_vport_work_array(phba
);
817 for (i
= 0; i
<= phba
->max_vports
&&
818 vports
[i
] != NULL
; i
++)
819 lpfc_cleanup_discovery_resources(vports
[i
]);
820 lpfc_destroy_vport_work_array(phba
, vports
);
826 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
827 * rspiocb which got deferred
829 * @phba: pointer to lpfc HBA data structure.
831 * This routine will cleanup completed slow path events after HBA is reset
832 * when bringing down the SLI Layer.
839 lpfc_sli4_free_sp_events(struct lpfc_hba
*phba
)
841 struct lpfc_iocbq
*rspiocbq
;
842 struct hbq_dmabuf
*dmabuf
;
843 struct lpfc_cq_event
*cq_event
;
845 spin_lock_irq(&phba
->hbalock
);
846 phba
->hba_flag
&= ~HBA_SP_QUEUE_EVT
;
847 spin_unlock_irq(&phba
->hbalock
);
849 while (!list_empty(&phba
->sli4_hba
.sp_queue_event
)) {
850 /* Get the response iocb from the head of work queue */
851 spin_lock_irq(&phba
->hbalock
);
852 list_remove_head(&phba
->sli4_hba
.sp_queue_event
,
853 cq_event
, struct lpfc_cq_event
, list
);
854 spin_unlock_irq(&phba
->hbalock
);
856 switch (bf_get(lpfc_wcqe_c_code
, &cq_event
->cqe
.wcqe_cmpl
)) {
857 case CQE_CODE_COMPL_WQE
:
858 rspiocbq
= container_of(cq_event
, struct lpfc_iocbq
,
860 lpfc_sli_release_iocbq(phba
, rspiocbq
);
862 case CQE_CODE_RECEIVE
:
863 case CQE_CODE_RECEIVE_V1
:
864 dmabuf
= container_of(cq_event
, struct hbq_dmabuf
,
866 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
872 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
873 * @phba: pointer to lpfc HBA data structure.
875 * This routine will cleanup posted ELS buffers after the HBA is reset
876 * when bringing down the SLI Layer.
883 lpfc_hba_free_post_buf(struct lpfc_hba
*phba
)
885 struct lpfc_sli
*psli
= &phba
->sli
;
886 struct lpfc_sli_ring
*pring
;
887 struct lpfc_dmabuf
*mp
, *next_mp
;
891 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
)
892 lpfc_sli_hbqbuf_free_all(phba
);
894 /* Cleanup preposted buffers on the ELS ring */
895 pring
= &psli
->ring
[LPFC_ELS_RING
];
896 spin_lock_irq(&phba
->hbalock
);
897 list_splice_init(&pring
->postbufq
, &buflist
);
898 spin_unlock_irq(&phba
->hbalock
);
901 list_for_each_entry_safe(mp
, next_mp
, &buflist
, list
) {
904 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
908 spin_lock_irq(&phba
->hbalock
);
909 pring
->postbufq_cnt
-= count
;
910 spin_unlock_irq(&phba
->hbalock
);
915 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
916 * @phba: pointer to lpfc HBA data structure.
918 * This routine will cleanup the txcmplq after the HBA is reset when bringing
919 * down the SLI Layer.
925 lpfc_hba_clean_txcmplq(struct lpfc_hba
*phba
)
927 struct lpfc_sli
*psli
= &phba
->sli
;
928 struct lpfc_sli_ring
*pring
;
929 LIST_HEAD(completions
);
932 for (i
= 0; i
< psli
->num_rings
; i
++) {
933 pring
= &psli
->ring
[i
];
934 if (phba
->sli_rev
>= LPFC_SLI_REV4
)
935 spin_lock_irq(&pring
->ring_lock
);
937 spin_lock_irq(&phba
->hbalock
);
938 /* At this point in time the HBA is either reset or DOA. Either
939 * way, nothing should be on txcmplq as it will NEVER complete.
941 list_splice_init(&pring
->txcmplq
, &completions
);
942 pring
->txcmplq_cnt
= 0;
944 if (phba
->sli_rev
>= LPFC_SLI_REV4
)
945 spin_unlock_irq(&pring
->ring_lock
);
947 spin_unlock_irq(&phba
->hbalock
);
949 /* Cancel all the IOCBs from the completions list */
950 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
952 lpfc_sli_abort_iocb_ring(phba
, pring
);
957 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
959 * @phba: pointer to lpfc HBA data structure.
961 * This routine will do uninitialization after the HBA is reset when bring
962 * down the SLI Layer.
966 * Any other value - error.
969 lpfc_hba_down_post_s3(struct lpfc_hba
*phba
)
971 lpfc_hba_free_post_buf(phba
);
972 lpfc_hba_clean_txcmplq(phba
);
977 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
978 * @phba: pointer to lpfc HBA data structure.
980 * This routine will do uninitialization after the HBA is reset when bring
981 * down the SLI Layer.
985 * Any other value - error.
988 lpfc_hba_down_post_s4(struct lpfc_hba
*phba
)
990 struct lpfc_scsi_buf
*psb
, *psb_next
;
992 unsigned long iflag
= 0;
993 struct lpfc_sglq
*sglq_entry
= NULL
;
994 struct lpfc_sli
*psli
= &phba
->sli
;
995 struct lpfc_sli_ring
*pring
;
997 lpfc_hba_free_post_buf(phba
);
998 lpfc_hba_clean_txcmplq(phba
);
999 pring
= &psli
->ring
[LPFC_ELS_RING
];
1001 /* At this point in time the HBA is either reset or DOA. Either
1002 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1003 * on the lpfc_sgl_list so that it can either be freed if the
1004 * driver is unloading or reposted if the driver is restarting
1007 spin_lock_irq(&phba
->hbalock
); /* required for lpfc_sgl_list and */
1009 /* abts_sgl_list_lock required because worker thread uses this
1012 spin_lock(&phba
->sli4_hba
.abts_sgl_list_lock
);
1013 list_for_each_entry(sglq_entry
,
1014 &phba
->sli4_hba
.lpfc_abts_els_sgl_list
, list
)
1015 sglq_entry
->state
= SGL_FREED
;
1017 spin_lock(&pring
->ring_lock
);
1018 list_splice_init(&phba
->sli4_hba
.lpfc_abts_els_sgl_list
,
1019 &phba
->sli4_hba
.lpfc_sgl_list
);
1020 spin_unlock(&pring
->ring_lock
);
1021 spin_unlock(&phba
->sli4_hba
.abts_sgl_list_lock
);
1022 /* abts_scsi_buf_list_lock required because worker thread uses this
1025 spin_lock(&phba
->sli4_hba
.abts_scsi_buf_list_lock
);
1026 list_splice_init(&phba
->sli4_hba
.lpfc_abts_scsi_buf_list
,
1028 spin_unlock(&phba
->sli4_hba
.abts_scsi_buf_list_lock
);
1029 spin_unlock_irq(&phba
->hbalock
);
1031 list_for_each_entry_safe(psb
, psb_next
, &aborts
, list
) {
1033 psb
->status
= IOSTAT_SUCCESS
;
1035 spin_lock_irqsave(&phba
->scsi_buf_list_put_lock
, iflag
);
1036 list_splice(&aborts
, &phba
->lpfc_scsi_buf_list_put
);
1037 spin_unlock_irqrestore(&phba
->scsi_buf_list_put_lock
, iflag
);
1039 lpfc_sli4_free_sp_events(phba
);
1044 * lpfc_hba_down_post - Wrapper func for hba down post routine
1045 * @phba: pointer to lpfc HBA data structure.
1047 * This routine wraps the actual SLI3 or SLI4 routine for performing
1048 * uninitialization after the HBA is reset when bring down the SLI Layer.
1052 * Any other value - error.
1055 lpfc_hba_down_post(struct lpfc_hba
*phba
)
1057 return (*phba
->lpfc_hba_down_post
)(phba
);
1061 * lpfc_hb_timeout - The HBA-timer timeout handler
1062 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1064 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1065 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1066 * work-port-events bitmap and the worker thread is notified. This timeout
1067 * event will be used by the worker thread to invoke the actual timeout
1068 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1069 * be performed in the timeout handler and the HBA timeout event bit shall
1070 * be cleared by the worker thread after it has taken the event bitmap out.
1073 lpfc_hb_timeout(unsigned long ptr
)
1075 struct lpfc_hba
*phba
;
1076 uint32_t tmo_posted
;
1077 unsigned long iflag
;
1079 phba
= (struct lpfc_hba
*)ptr
;
1081 /* Check for heart beat timeout conditions */
1082 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflag
);
1083 tmo_posted
= phba
->pport
->work_port_events
& WORKER_HB_TMO
;
1085 phba
->pport
->work_port_events
|= WORKER_HB_TMO
;
1086 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflag
);
1088 /* Tell the worker thread there is work to do */
1090 lpfc_worker_wake_up(phba
);
1095 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1096 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1098 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1099 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1100 * work-port-events bitmap and the worker thread is notified. This timeout
1101 * event will be used by the worker thread to invoke the actual timeout
1102 * handler routine, lpfc_rrq_handler. Any periodical operations will
1103 * be performed in the timeout handler and the RRQ timeout event bit shall
1104 * be cleared by the worker thread after it has taken the event bitmap out.
1107 lpfc_rrq_timeout(unsigned long ptr
)
1109 struct lpfc_hba
*phba
;
1110 unsigned long iflag
;
1112 phba
= (struct lpfc_hba
*)ptr
;
1113 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflag
);
1114 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
1115 phba
->hba_flag
|= HBA_RRQ_ACTIVE
;
1117 phba
->hba_flag
&= ~HBA_RRQ_ACTIVE
;
1118 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflag
);
1120 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
1121 lpfc_worker_wake_up(phba
);
1125 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1126 * @phba: pointer to lpfc hba data structure.
1127 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1129 * This is the callback function to the lpfc heart-beat mailbox command.
1130 * If configured, the lpfc driver issues the heart-beat mailbox command to
1131 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1132 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1133 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1134 * heart-beat outstanding state. Once the mailbox command comes back and
1135 * no error conditions detected, the heart-beat mailbox command timer is
1136 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1137 * state is cleared for the next heart-beat. If the timer expired with the
1138 * heart-beat outstanding state set, the driver will put the HBA offline.
1141 lpfc_hb_mbox_cmpl(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmboxq
)
1143 unsigned long drvr_flag
;
1145 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
1146 phba
->hb_outstanding
= 0;
1147 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
1149 /* Check and reset heart-beat timer is necessary */
1150 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
1151 if (!(phba
->pport
->fc_flag
& FC_OFFLINE_MODE
) &&
1152 !(phba
->link_state
== LPFC_HBA_ERROR
) &&
1153 !(phba
->pport
->load_flag
& FC_UNLOADING
))
1154 mod_timer(&phba
->hb_tmofunc
,
1156 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
));
1161 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1162 * @phba: pointer to lpfc hba data structure.
1164 * This is the actual HBA-timer timeout handler to be invoked by the worker
1165 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1166 * handler performs any periodic operations needed for the device. If such
1167 * periodic event has already been attended to either in the interrupt handler
1168 * or by processing slow-ring or fast-ring events within the HBA-timer
1169 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1170 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1171 * is configured and there is no heart-beat mailbox command outstanding, a
1172 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1173 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1177 lpfc_hb_timeout_handler(struct lpfc_hba
*phba
)
1179 struct lpfc_vport
**vports
;
1180 LPFC_MBOXQ_t
*pmboxq
;
1181 struct lpfc_dmabuf
*buf_ptr
;
1183 struct lpfc_sli
*psli
= &phba
->sli
;
1184 LIST_HEAD(completions
);
1186 vports
= lpfc_create_vport_work_array(phba
);
1188 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
1189 lpfc_rcv_seq_check_edtov(vports
[i
]);
1190 lpfc_fdmi_num_disc_check(vports
[i
]);
1192 lpfc_destroy_vport_work_array(phba
, vports
);
1194 if ((phba
->link_state
== LPFC_HBA_ERROR
) ||
1195 (phba
->pport
->load_flag
& FC_UNLOADING
) ||
1196 (phba
->pport
->fc_flag
& FC_OFFLINE_MODE
))
1199 spin_lock_irq(&phba
->pport
->work_port_lock
);
1201 if (time_after(phba
->last_completion_time
+
1202 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
),
1204 spin_unlock_irq(&phba
->pport
->work_port_lock
);
1205 if (!phba
->hb_outstanding
)
1206 mod_timer(&phba
->hb_tmofunc
,
1208 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
));
1210 mod_timer(&phba
->hb_tmofunc
,
1212 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT
));
1215 spin_unlock_irq(&phba
->pport
->work_port_lock
);
1217 if (phba
->elsbuf_cnt
&&
1218 (phba
->elsbuf_cnt
== phba
->elsbuf_prev_cnt
)) {
1219 spin_lock_irq(&phba
->hbalock
);
1220 list_splice_init(&phba
->elsbuf
, &completions
);
1221 phba
->elsbuf_cnt
= 0;
1222 phba
->elsbuf_prev_cnt
= 0;
1223 spin_unlock_irq(&phba
->hbalock
);
1225 while (!list_empty(&completions
)) {
1226 list_remove_head(&completions
, buf_ptr
,
1227 struct lpfc_dmabuf
, list
);
1228 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
1232 phba
->elsbuf_prev_cnt
= phba
->elsbuf_cnt
;
1234 /* If there is no heart beat outstanding, issue a heartbeat command */
1235 if (phba
->cfg_enable_hba_heartbeat
) {
1236 if (!phba
->hb_outstanding
) {
1237 if ((!(psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
)) &&
1238 (list_empty(&psli
->mboxq
))) {
1239 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
,
1242 mod_timer(&phba
->hb_tmofunc
,
1244 msecs_to_jiffies(1000 *
1245 LPFC_HB_MBOX_INTERVAL
));
1249 lpfc_heart_beat(phba
, pmboxq
);
1250 pmboxq
->mbox_cmpl
= lpfc_hb_mbox_cmpl
;
1251 pmboxq
->vport
= phba
->pport
;
1252 retval
= lpfc_sli_issue_mbox(phba
, pmboxq
,
1255 if (retval
!= MBX_BUSY
&&
1256 retval
!= MBX_SUCCESS
) {
1257 mempool_free(pmboxq
,
1258 phba
->mbox_mem_pool
);
1259 mod_timer(&phba
->hb_tmofunc
,
1261 msecs_to_jiffies(1000 *
1262 LPFC_HB_MBOX_INTERVAL
));
1265 phba
->skipped_hb
= 0;
1266 phba
->hb_outstanding
= 1;
1267 } else if (time_before_eq(phba
->last_completion_time
,
1268 phba
->skipped_hb
)) {
1269 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
1270 "2857 Last completion time not "
1271 " updated in %d ms\n",
1272 jiffies_to_msecs(jiffies
1273 - phba
->last_completion_time
));
1275 phba
->skipped_hb
= jiffies
;
1277 mod_timer(&phba
->hb_tmofunc
,
1279 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT
));
1283 * If heart beat timeout called with hb_outstanding set
1284 * we need to give the hb mailbox cmd a chance to
1287 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
1288 "0459 Adapter heartbeat still out"
1289 "standing:last compl time was %d ms.\n",
1290 jiffies_to_msecs(jiffies
1291 - phba
->last_completion_time
));
1292 mod_timer(&phba
->hb_tmofunc
,
1294 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT
));
1297 mod_timer(&phba
->hb_tmofunc
,
1299 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
));
1304 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1305 * @phba: pointer to lpfc hba data structure.
1307 * This routine is called to bring the HBA offline when HBA hardware error
1308 * other than Port Error 6 has been detected.
1311 lpfc_offline_eratt(struct lpfc_hba
*phba
)
1313 struct lpfc_sli
*psli
= &phba
->sli
;
1315 spin_lock_irq(&phba
->hbalock
);
1316 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
1317 spin_unlock_irq(&phba
->hbalock
);
1318 lpfc_offline_prep(phba
, LPFC_MBX_NO_WAIT
);
1321 lpfc_reset_barrier(phba
);
1322 spin_lock_irq(&phba
->hbalock
);
1323 lpfc_sli_brdreset(phba
);
1324 spin_unlock_irq(&phba
->hbalock
);
1325 lpfc_hba_down_post(phba
);
1326 lpfc_sli_brdready(phba
, HS_MBRDY
);
1327 lpfc_unblock_mgmt_io(phba
);
1328 phba
->link_state
= LPFC_HBA_ERROR
;
1333 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1334 * @phba: pointer to lpfc hba data structure.
1336 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1337 * other than Port Error 6 has been detected.
1340 lpfc_sli4_offline_eratt(struct lpfc_hba
*phba
)
1342 spin_lock_irq(&phba
->hbalock
);
1343 phba
->link_state
= LPFC_HBA_ERROR
;
1344 spin_unlock_irq(&phba
->hbalock
);
1346 lpfc_offline_prep(phba
, LPFC_MBX_NO_WAIT
);
1348 lpfc_hba_down_post(phba
);
1349 lpfc_unblock_mgmt_io(phba
);
1353 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1354 * @phba: pointer to lpfc hba data structure.
1356 * This routine is invoked to handle the deferred HBA hardware error
1357 * conditions. This type of error is indicated by HBA by setting ER1
1358 * and another ER bit in the host status register. The driver will
1359 * wait until the ER1 bit clears before handling the error condition.
1362 lpfc_handle_deferred_eratt(struct lpfc_hba
*phba
)
1364 uint32_t old_host_status
= phba
->work_hs
;
1365 struct lpfc_sli
*psli
= &phba
->sli
;
1367 /* If the pci channel is offline, ignore possible errors,
1368 * since we cannot communicate with the pci card anyway.
1370 if (pci_channel_offline(phba
->pcidev
)) {
1371 spin_lock_irq(&phba
->hbalock
);
1372 phba
->hba_flag
&= ~DEFER_ERATT
;
1373 spin_unlock_irq(&phba
->hbalock
);
1377 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1378 "0479 Deferred Adapter Hardware Error "
1379 "Data: x%x x%x x%x\n",
1381 phba
->work_status
[0], phba
->work_status
[1]);
1383 spin_lock_irq(&phba
->hbalock
);
1384 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
1385 spin_unlock_irq(&phba
->hbalock
);
1389 * Firmware stops when it triggred erratt. That could cause the I/Os
1390 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1391 * SCSI layer retry it after re-establishing link.
1393 lpfc_sli_abort_fcp_rings(phba
);
1396 * There was a firmware error. Take the hba offline and then
1397 * attempt to restart it.
1399 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
1402 /* Wait for the ER1 bit to clear.*/
1403 while (phba
->work_hs
& HS_FFER1
) {
1405 if (lpfc_readl(phba
->HSregaddr
, &phba
->work_hs
)) {
1406 phba
->work_hs
= UNPLUG_ERR
;
1409 /* If driver is unloading let the worker thread continue */
1410 if (phba
->pport
->load_flag
& FC_UNLOADING
) {
1417 * This is to ptrotect against a race condition in which
1418 * first write to the host attention register clear the
1419 * host status register.
1421 if ((!phba
->work_hs
) && (!(phba
->pport
->load_flag
& FC_UNLOADING
)))
1422 phba
->work_hs
= old_host_status
& ~HS_FFER1
;
1424 spin_lock_irq(&phba
->hbalock
);
1425 phba
->hba_flag
&= ~DEFER_ERATT
;
1426 spin_unlock_irq(&phba
->hbalock
);
1427 phba
->work_status
[0] = readl(phba
->MBslimaddr
+ 0xa8);
1428 phba
->work_status
[1] = readl(phba
->MBslimaddr
+ 0xac);
1432 lpfc_board_errevt_to_mgmt(struct lpfc_hba
*phba
)
1434 struct lpfc_board_event_header board_event
;
1435 struct Scsi_Host
*shost
;
1437 board_event
.event_type
= FC_REG_BOARD_EVENT
;
1438 board_event
.subcategory
= LPFC_EVENT_PORTINTERR
;
1439 shost
= lpfc_shost_from_vport(phba
->pport
);
1440 fc_host_post_vendor_event(shost
, fc_get_event_number(),
1441 sizeof(board_event
),
1442 (char *) &board_event
,
1447 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1448 * @phba: pointer to lpfc hba data structure.
1450 * This routine is invoked to handle the following HBA hardware error
1452 * 1 - HBA error attention interrupt
1453 * 2 - DMA ring index out of range
1454 * 3 - Mailbox command came back as unknown
1457 lpfc_handle_eratt_s3(struct lpfc_hba
*phba
)
1459 struct lpfc_vport
*vport
= phba
->pport
;
1460 struct lpfc_sli
*psli
= &phba
->sli
;
1461 uint32_t event_data
;
1462 unsigned long temperature
;
1463 struct temp_event temp_event_data
;
1464 struct Scsi_Host
*shost
;
1466 /* If the pci channel is offline, ignore possible errors,
1467 * since we cannot communicate with the pci card anyway.
1469 if (pci_channel_offline(phba
->pcidev
)) {
1470 spin_lock_irq(&phba
->hbalock
);
1471 phba
->hba_flag
&= ~DEFER_ERATT
;
1472 spin_unlock_irq(&phba
->hbalock
);
1476 /* If resets are disabled then leave the HBA alone and return */
1477 if (!phba
->cfg_enable_hba_reset
)
1480 /* Send an internal error event to mgmt application */
1481 lpfc_board_errevt_to_mgmt(phba
);
1483 if (phba
->hba_flag
& DEFER_ERATT
)
1484 lpfc_handle_deferred_eratt(phba
);
1486 if ((phba
->work_hs
& HS_FFER6
) || (phba
->work_hs
& HS_FFER8
)) {
1487 if (phba
->work_hs
& HS_FFER6
)
1488 /* Re-establishing Link */
1489 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
1490 "1301 Re-establishing Link "
1491 "Data: x%x x%x x%x\n",
1492 phba
->work_hs
, phba
->work_status
[0],
1493 phba
->work_status
[1]);
1494 if (phba
->work_hs
& HS_FFER8
)
1495 /* Device Zeroization */
1496 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
1497 "2861 Host Authentication device "
1498 "zeroization Data:x%x x%x x%x\n",
1499 phba
->work_hs
, phba
->work_status
[0],
1500 phba
->work_status
[1]);
1502 spin_lock_irq(&phba
->hbalock
);
1503 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
1504 spin_unlock_irq(&phba
->hbalock
);
1507 * Firmware stops when it triggled erratt with HS_FFER6.
1508 * That could cause the I/Os dropped by the firmware.
1509 * Error iocb (I/O) on txcmplq and let the SCSI layer
1510 * retry it after re-establishing link.
1512 lpfc_sli_abort_fcp_rings(phba
);
1515 * There was a firmware error. Take the hba offline and then
1516 * attempt to restart it.
1518 lpfc_offline_prep(phba
, LPFC_MBX_NO_WAIT
);
1520 lpfc_sli_brdrestart(phba
);
1521 if (lpfc_online(phba
) == 0) { /* Initialize the HBA */
1522 lpfc_unblock_mgmt_io(phba
);
1525 lpfc_unblock_mgmt_io(phba
);
1526 } else if (phba
->work_hs
& HS_CRIT_TEMP
) {
1527 temperature
= readl(phba
->MBslimaddr
+ TEMPERATURE_OFFSET
);
1528 temp_event_data
.event_type
= FC_REG_TEMPERATURE_EVENT
;
1529 temp_event_data
.event_code
= LPFC_CRIT_TEMP
;
1530 temp_event_data
.data
= (uint32_t)temperature
;
1532 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1533 "0406 Adapter maximum temperature exceeded "
1534 "(%ld), taking this port offline "
1535 "Data: x%x x%x x%x\n",
1536 temperature
, phba
->work_hs
,
1537 phba
->work_status
[0], phba
->work_status
[1]);
1539 shost
= lpfc_shost_from_vport(phba
->pport
);
1540 fc_host_post_vendor_event(shost
, fc_get_event_number(),
1541 sizeof(temp_event_data
),
1542 (char *) &temp_event_data
,
1543 SCSI_NL_VID_TYPE_PCI
1544 | PCI_VENDOR_ID_EMULEX
);
1546 spin_lock_irq(&phba
->hbalock
);
1547 phba
->over_temp_state
= HBA_OVER_TEMP
;
1548 spin_unlock_irq(&phba
->hbalock
);
1549 lpfc_offline_eratt(phba
);
1552 /* The if clause above forces this code path when the status
1553 * failure is a value other than FFER6. Do not call the offline
1554 * twice. This is the adapter hardware error path.
1556 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1557 "0457 Adapter Hardware Error "
1558 "Data: x%x x%x x%x\n",
1560 phba
->work_status
[0], phba
->work_status
[1]);
1562 event_data
= FC_REG_DUMP_EVENT
;
1563 shost
= lpfc_shost_from_vport(vport
);
1564 fc_host_post_vendor_event(shost
, fc_get_event_number(),
1565 sizeof(event_data
), (char *) &event_data
,
1566 SCSI_NL_VID_TYPE_PCI
| PCI_VENDOR_ID_EMULEX
);
1568 lpfc_offline_eratt(phba
);
1574 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1575 * @phba: pointer to lpfc hba data structure.
1576 * @mbx_action: flag for mailbox shutdown action.
1578 * This routine is invoked to perform an SLI4 port PCI function reset in
1579 * response to port status register polling attention. It waits for port
1580 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1581 * During this process, interrupt vectors are freed and later requested
1582 * for handling possible port resource change.
1585 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba
*phba
, int mbx_action
,
1591 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
1592 LPFC_SLI_INTF_IF_TYPE_2
) {
1594 * On error status condition, driver need to wait for port
1595 * ready before performing reset.
1597 rc
= lpfc_sli4_pdev_status_reg_wait(phba
);
1602 /* need reset: attempt for port recovery */
1604 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1605 "2887 Reset Needed: Attempting Port "
1607 lpfc_offline_prep(phba
, mbx_action
);
1609 /* release interrupt for possible resource change */
1610 lpfc_sli4_disable_intr(phba
);
1611 lpfc_sli_brdrestart(phba
);
1612 /* request and enable interrupt */
1613 intr_mode
= lpfc_sli4_enable_intr(phba
, phba
->intr_mode
);
1614 if (intr_mode
== LPFC_INTR_ERROR
) {
1615 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1616 "3175 Failed to enable interrupt\n");
1619 phba
->intr_mode
= intr_mode
;
1620 rc
= lpfc_online(phba
);
1622 lpfc_unblock_mgmt_io(phba
);
1628 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1629 * @phba: pointer to lpfc hba data structure.
1631 * This routine is invoked to handle the SLI4 HBA hardware error attention
1635 lpfc_handle_eratt_s4(struct lpfc_hba
*phba
)
1637 struct lpfc_vport
*vport
= phba
->pport
;
1638 uint32_t event_data
;
1639 struct Scsi_Host
*shost
;
1641 struct lpfc_register portstat_reg
= {0};
1642 uint32_t reg_err1
, reg_err2
;
1643 uint32_t uerrlo_reg
, uemasklo_reg
;
1644 uint32_t smphr_port_status
= 0, pci_rd_rc1
, pci_rd_rc2
;
1645 bool en_rn_msg
= true;
1646 struct temp_event temp_event_data
;
1647 struct lpfc_register portsmphr_reg
;
1650 /* If the pci channel is offline, ignore possible errors, since
1651 * we cannot communicate with the pci card anyway.
1653 if (pci_channel_offline(phba
->pcidev
))
1656 memset(&portsmphr_reg
, 0, sizeof(portsmphr_reg
));
1657 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
1659 case LPFC_SLI_INTF_IF_TYPE_0
:
1660 pci_rd_rc1
= lpfc_readl(
1661 phba
->sli4_hba
.u
.if_type0
.UERRLOregaddr
,
1663 pci_rd_rc2
= lpfc_readl(
1664 phba
->sli4_hba
.u
.if_type0
.UEMASKLOregaddr
,
1666 /* consider PCI bus read error as pci_channel_offline */
1667 if (pci_rd_rc1
== -EIO
&& pci_rd_rc2
== -EIO
)
1669 if (!(phba
->hba_flag
& HBA_RECOVERABLE_UE
)) {
1670 lpfc_sli4_offline_eratt(phba
);
1673 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1674 "7623 Checking UE recoverable");
1676 for (i
= 0; i
< phba
->sli4_hba
.ue_to_sr
/ 1000; i
++) {
1677 if (lpfc_readl(phba
->sli4_hba
.PSMPHRregaddr
,
1678 &portsmphr_reg
.word0
))
1681 smphr_port_status
= bf_get(lpfc_port_smphr_port_status
,
1683 if ((smphr_port_status
& LPFC_PORT_SEM_MASK
) ==
1684 LPFC_PORT_SEM_UE_RECOVERABLE
)
1686 /*Sleep for 1Sec, before checking SEMAPHORE */
1690 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1691 "4827 smphr_port_status x%x : Waited %dSec",
1692 smphr_port_status
, i
);
1694 /* Recoverable UE, reset the HBA device */
1695 if ((smphr_port_status
& LPFC_PORT_SEM_MASK
) ==
1696 LPFC_PORT_SEM_UE_RECOVERABLE
) {
1697 for (i
= 0; i
< 20; i
++) {
1699 if (!lpfc_readl(phba
->sli4_hba
.PSMPHRregaddr
,
1700 &portsmphr_reg
.word0
) &&
1701 (LPFC_POST_STAGE_PORT_READY
==
1702 bf_get(lpfc_port_smphr_port_status
,
1704 rc
= lpfc_sli4_port_sta_fn_reset(phba
,
1705 LPFC_MBX_NO_WAIT
, en_rn_msg
);
1708 lpfc_printf_log(phba
,
1710 "4215 Failed to recover UE");
1715 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1716 "7624 Firmware not ready: Failing UE recovery,"
1717 " waited %dSec", i
);
1718 lpfc_sli4_offline_eratt(phba
);
1721 case LPFC_SLI_INTF_IF_TYPE_2
:
1722 pci_rd_rc1
= lpfc_readl(
1723 phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
,
1724 &portstat_reg
.word0
);
1725 /* consider PCI bus read error as pci_channel_offline */
1726 if (pci_rd_rc1
== -EIO
) {
1727 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1728 "3151 PCI bus read access failure: x%x\n",
1729 readl(phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
));
1732 reg_err1
= readl(phba
->sli4_hba
.u
.if_type2
.ERR1regaddr
);
1733 reg_err2
= readl(phba
->sli4_hba
.u
.if_type2
.ERR2regaddr
);
1734 if (bf_get(lpfc_sliport_status_oti
, &portstat_reg
)) {
1735 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1736 "2889 Port Overtemperature event, "
1737 "taking port offline Data: x%x x%x\n",
1738 reg_err1
, reg_err2
);
1740 phba
->sfp_alarm
|= LPFC_TRANSGRESSION_HIGH_TEMPERATURE
;
1741 temp_event_data
.event_type
= FC_REG_TEMPERATURE_EVENT
;
1742 temp_event_data
.event_code
= LPFC_CRIT_TEMP
;
1743 temp_event_data
.data
= 0xFFFFFFFF;
1745 shost
= lpfc_shost_from_vport(phba
->pport
);
1746 fc_host_post_vendor_event(shost
, fc_get_event_number(),
1747 sizeof(temp_event_data
),
1748 (char *)&temp_event_data
,
1749 SCSI_NL_VID_TYPE_PCI
1750 | PCI_VENDOR_ID_EMULEX
);
1752 spin_lock_irq(&phba
->hbalock
);
1753 phba
->over_temp_state
= HBA_OVER_TEMP
;
1754 spin_unlock_irq(&phba
->hbalock
);
1755 lpfc_sli4_offline_eratt(phba
);
1758 if (reg_err1
== SLIPORT_ERR1_REG_ERR_CODE_2
&&
1759 reg_err2
== SLIPORT_ERR2_REG_FW_RESTART
) {
1760 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1761 "3143 Port Down: Firmware Update "
1764 } else if (reg_err1
== SLIPORT_ERR1_REG_ERR_CODE_2
&&
1765 reg_err2
== SLIPORT_ERR2_REG_FORCED_DUMP
)
1766 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1767 "3144 Port Down: Debug Dump\n");
1768 else if (reg_err1
== SLIPORT_ERR1_REG_ERR_CODE_2
&&
1769 reg_err2
== SLIPORT_ERR2_REG_FUNC_PROVISON
)
1770 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1771 "3145 Port Down: Provisioning\n");
1773 /* If resets are disabled then leave the HBA alone and return */
1774 if (!phba
->cfg_enable_hba_reset
)
1777 /* Check port status register for function reset */
1778 rc
= lpfc_sli4_port_sta_fn_reset(phba
, LPFC_MBX_NO_WAIT
,
1781 /* don't report event on forced debug dump */
1782 if (reg_err1
== SLIPORT_ERR1_REG_ERR_CODE_2
&&
1783 reg_err2
== SLIPORT_ERR2_REG_FORCED_DUMP
)
1788 /* fall through for not able to recover */
1789 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1790 "3152 Unrecoverable error, bring the port "
1792 lpfc_sli4_offline_eratt(phba
);
1794 case LPFC_SLI_INTF_IF_TYPE_1
:
1798 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
1799 "3123 Report dump event to upper layer\n");
1800 /* Send an internal error event to mgmt application */
1801 lpfc_board_errevt_to_mgmt(phba
);
1803 event_data
= FC_REG_DUMP_EVENT
;
1804 shost
= lpfc_shost_from_vport(vport
);
1805 fc_host_post_vendor_event(shost
, fc_get_event_number(),
1806 sizeof(event_data
), (char *) &event_data
,
1807 SCSI_NL_VID_TYPE_PCI
| PCI_VENDOR_ID_EMULEX
);
1811 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1812 * @phba: pointer to lpfc HBA data structure.
1814 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1815 * routine from the API jump table function pointer from the lpfc_hba struct.
1819 * Any other value - error.
1822 lpfc_handle_eratt(struct lpfc_hba
*phba
)
1824 (*phba
->lpfc_handle_eratt
)(phba
);
1828 * lpfc_handle_latt - The HBA link event handler
1829 * @phba: pointer to lpfc hba data structure.
1831 * This routine is invoked from the worker thread to handle a HBA host
1832 * attention link event.
1835 lpfc_handle_latt(struct lpfc_hba
*phba
)
1837 struct lpfc_vport
*vport
= phba
->pport
;
1838 struct lpfc_sli
*psli
= &phba
->sli
;
1840 volatile uint32_t control
;
1841 struct lpfc_dmabuf
*mp
;
1844 pmb
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1847 goto lpfc_handle_latt_err_exit
;
1850 mp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
1853 goto lpfc_handle_latt_free_pmb
;
1856 mp
->virt
= lpfc_mbuf_alloc(phba
, 0, &mp
->phys
);
1859 goto lpfc_handle_latt_free_mp
;
1862 /* Cleanup any outstanding ELS commands */
1863 lpfc_els_flush_all_cmd(phba
);
1865 psli
->slistat
.link_event
++;
1866 lpfc_read_topology(phba
, pmb
, mp
);
1867 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_read_topology
;
1869 /* Block ELS IOCBs until we have processed this mbox command */
1870 phba
->sli
.ring
[LPFC_ELS_RING
].flag
|= LPFC_STOP_IOCB_EVENT
;
1871 rc
= lpfc_sli_issue_mbox (phba
, pmb
, MBX_NOWAIT
);
1872 if (rc
== MBX_NOT_FINISHED
) {
1874 goto lpfc_handle_latt_free_mbuf
;
1877 /* Clear Link Attention in HA REG */
1878 spin_lock_irq(&phba
->hbalock
);
1879 writel(HA_LATT
, phba
->HAregaddr
);
1880 readl(phba
->HAregaddr
); /* flush */
1881 spin_unlock_irq(&phba
->hbalock
);
1885 lpfc_handle_latt_free_mbuf
:
1886 phba
->sli
.ring
[LPFC_ELS_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1887 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1888 lpfc_handle_latt_free_mp
:
1890 lpfc_handle_latt_free_pmb
:
1891 mempool_free(pmb
, phba
->mbox_mem_pool
);
1892 lpfc_handle_latt_err_exit
:
1893 /* Enable Link attention interrupts */
1894 spin_lock_irq(&phba
->hbalock
);
1895 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1896 control
= readl(phba
->HCregaddr
);
1897 control
|= HC_LAINT_ENA
;
1898 writel(control
, phba
->HCregaddr
);
1899 readl(phba
->HCregaddr
); /* flush */
1901 /* Clear Link Attention in HA REG */
1902 writel(HA_LATT
, phba
->HAregaddr
);
1903 readl(phba
->HAregaddr
); /* flush */
1904 spin_unlock_irq(&phba
->hbalock
);
1905 lpfc_linkdown(phba
);
1906 phba
->link_state
= LPFC_HBA_ERROR
;
1908 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
1909 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc
);
1915 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1916 * @phba: pointer to lpfc hba data structure.
1917 * @vpd: pointer to the vital product data.
1918 * @len: length of the vital product data in bytes.
1920 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1921 * an array of characters. In this routine, the ModelName, ProgramType, and
1922 * ModelDesc, etc. fields of the phba data structure will be populated.
1925 * 0 - pointer to the VPD passed in is NULL
1929 lpfc_parse_vpd(struct lpfc_hba
*phba
, uint8_t *vpd
, int len
)
1931 uint8_t lenlo
, lenhi
;
1941 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
1942 "0455 Vital Product Data: x%x x%x x%x x%x\n",
1943 (uint32_t) vpd
[0], (uint32_t) vpd
[1], (uint32_t) vpd
[2],
1945 while (!finished
&& (index
< (len
- 4))) {
1946 switch (vpd
[index
]) {
1954 i
= ((((unsigned short)lenhi
) << 8) + lenlo
);
1963 Length
= ((((unsigned short)lenhi
) << 8) + lenlo
);
1964 if (Length
> len
- index
)
1965 Length
= len
- index
;
1966 while (Length
> 0) {
1967 /* Look for Serial Number */
1968 if ((vpd
[index
] == 'S') && (vpd
[index
+1] == 'N')) {
1975 phba
->SerialNumber
[j
++] = vpd
[index
++];
1979 phba
->SerialNumber
[j
] = 0;
1982 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '1')) {
1983 phba
->vpd_flag
|= VPD_MODEL_DESC
;
1990 phba
->ModelDesc
[j
++] = vpd
[index
++];
1994 phba
->ModelDesc
[j
] = 0;
1997 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '2')) {
1998 phba
->vpd_flag
|= VPD_MODEL_NAME
;
2005 phba
->ModelName
[j
++] = vpd
[index
++];
2009 phba
->ModelName
[j
] = 0;
2012 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '3')) {
2013 phba
->vpd_flag
|= VPD_PROGRAM_TYPE
;
2020 phba
->ProgramType
[j
++] = vpd
[index
++];
2024 phba
->ProgramType
[j
] = 0;
2027 else if ((vpd
[index
] == 'V') && (vpd
[index
+1] == '4')) {
2028 phba
->vpd_flag
|= VPD_PORT
;
2035 if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
2036 (phba
->sli4_hba
.pport_name_sta
==
2037 LPFC_SLI4_PPNAME_GET
)) {
2041 phba
->Port
[j
++] = vpd
[index
++];
2045 if ((phba
->sli_rev
!= LPFC_SLI_REV4
) ||
2046 (phba
->sli4_hba
.pport_name_sta
==
2047 LPFC_SLI4_PPNAME_NON
))
2074 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2075 * @phba: pointer to lpfc hba data structure.
2076 * @mdp: pointer to the data structure to hold the derived model name.
2077 * @descp: pointer to the data structure to hold the derived description.
2079 * This routine retrieves HBA's description based on its registered PCI device
2080 * ID. The @descp passed into this function points to an array of 256 chars. It
2081 * shall be returned with the model name, maximum speed, and the host bus type.
2082 * The @mdp passed into this function points to an array of 80 chars. When the
2083 * function returns, the @mdp will be filled with the model name.
2086 lpfc_get_hba_model_desc(struct lpfc_hba
*phba
, uint8_t *mdp
, uint8_t *descp
)
2089 uint16_t dev_id
= phba
->pcidev
->device
;
2092 int oneConnect
= 0; /* default is not a oneConnect */
2097 } m
= {"<Unknown>", "", ""};
2099 if (mdp
&& mdp
[0] != '\0'
2100 && descp
&& descp
[0] != '\0')
2103 if (phba
->lmt
& LMT_32Gb
)
2105 else if (phba
->lmt
& LMT_16Gb
)
2107 else if (phba
->lmt
& LMT_10Gb
)
2109 else if (phba
->lmt
& LMT_8Gb
)
2111 else if (phba
->lmt
& LMT_4Gb
)
2113 else if (phba
->lmt
& LMT_2Gb
)
2115 else if (phba
->lmt
& LMT_1Gb
)
2123 case PCI_DEVICE_ID_FIREFLY
:
2124 m
= (typeof(m
)){"LP6000", "PCI",
2125 "Obsolete, Unsupported Fibre Channel Adapter"};
2127 case PCI_DEVICE_ID_SUPERFLY
:
2128 if (vp
->rev
.biuRev
>= 1 && vp
->rev
.biuRev
<= 3)
2129 m
= (typeof(m
)){"LP7000", "PCI", ""};
2131 m
= (typeof(m
)){"LP7000E", "PCI", ""};
2132 m
.function
= "Obsolete, Unsupported Fibre Channel Adapter";
2134 case PCI_DEVICE_ID_DRAGONFLY
:
2135 m
= (typeof(m
)){"LP8000", "PCI",
2136 "Obsolete, Unsupported Fibre Channel Adapter"};
2138 case PCI_DEVICE_ID_CENTAUR
:
2139 if (FC_JEDEC_ID(vp
->rev
.biuRev
) == CENTAUR_2G_JEDEC_ID
)
2140 m
= (typeof(m
)){"LP9002", "PCI", ""};
2142 m
= (typeof(m
)){"LP9000", "PCI", ""};
2143 m
.function
= "Obsolete, Unsupported Fibre Channel Adapter";
2145 case PCI_DEVICE_ID_RFLY
:
2146 m
= (typeof(m
)){"LP952", "PCI",
2147 "Obsolete, Unsupported Fibre Channel Adapter"};
2149 case PCI_DEVICE_ID_PEGASUS
:
2150 m
= (typeof(m
)){"LP9802", "PCI-X",
2151 "Obsolete, Unsupported Fibre Channel Adapter"};
2153 case PCI_DEVICE_ID_THOR
:
2154 m
= (typeof(m
)){"LP10000", "PCI-X",
2155 "Obsolete, Unsupported Fibre Channel Adapter"};
2157 case PCI_DEVICE_ID_VIPER
:
2158 m
= (typeof(m
)){"LPX1000", "PCI-X",
2159 "Obsolete, Unsupported Fibre Channel Adapter"};
2161 case PCI_DEVICE_ID_PFLY
:
2162 m
= (typeof(m
)){"LP982", "PCI-X",
2163 "Obsolete, Unsupported Fibre Channel Adapter"};
2165 case PCI_DEVICE_ID_TFLY
:
2166 m
= (typeof(m
)){"LP1050", "PCI-X",
2167 "Obsolete, Unsupported Fibre Channel Adapter"};
2169 case PCI_DEVICE_ID_HELIOS
:
2170 m
= (typeof(m
)){"LP11000", "PCI-X2",
2171 "Obsolete, Unsupported Fibre Channel Adapter"};
2173 case PCI_DEVICE_ID_HELIOS_SCSP
:
2174 m
= (typeof(m
)){"LP11000-SP", "PCI-X2",
2175 "Obsolete, Unsupported Fibre Channel Adapter"};
2177 case PCI_DEVICE_ID_HELIOS_DCSP
:
2178 m
= (typeof(m
)){"LP11002-SP", "PCI-X2",
2179 "Obsolete, Unsupported Fibre Channel Adapter"};
2181 case PCI_DEVICE_ID_NEPTUNE
:
2182 m
= (typeof(m
)){"LPe1000", "PCIe",
2183 "Obsolete, Unsupported Fibre Channel Adapter"};
2185 case PCI_DEVICE_ID_NEPTUNE_SCSP
:
2186 m
= (typeof(m
)){"LPe1000-SP", "PCIe",
2187 "Obsolete, Unsupported Fibre Channel Adapter"};
2189 case PCI_DEVICE_ID_NEPTUNE_DCSP
:
2190 m
= (typeof(m
)){"LPe1002-SP", "PCIe",
2191 "Obsolete, Unsupported Fibre Channel Adapter"};
2193 case PCI_DEVICE_ID_BMID
:
2194 m
= (typeof(m
)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2196 case PCI_DEVICE_ID_BSMB
:
2197 m
= (typeof(m
)){"LP111", "PCI-X2",
2198 "Obsolete, Unsupported Fibre Channel Adapter"};
2200 case PCI_DEVICE_ID_ZEPHYR
:
2201 m
= (typeof(m
)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2203 case PCI_DEVICE_ID_ZEPHYR_SCSP
:
2204 m
= (typeof(m
)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2206 case PCI_DEVICE_ID_ZEPHYR_DCSP
:
2207 m
= (typeof(m
)){"LP2105", "PCIe", "FCoE Adapter"};
2210 case PCI_DEVICE_ID_ZMID
:
2211 m
= (typeof(m
)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2213 case PCI_DEVICE_ID_ZSMB
:
2214 m
= (typeof(m
)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2216 case PCI_DEVICE_ID_LP101
:
2217 m
= (typeof(m
)){"LP101", "PCI-X",
2218 "Obsolete, Unsupported Fibre Channel Adapter"};
2220 case PCI_DEVICE_ID_LP10000S
:
2221 m
= (typeof(m
)){"LP10000-S", "PCI",
2222 "Obsolete, Unsupported Fibre Channel Adapter"};
2224 case PCI_DEVICE_ID_LP11000S
:
2225 m
= (typeof(m
)){"LP11000-S", "PCI-X2",
2226 "Obsolete, Unsupported Fibre Channel Adapter"};
2228 case PCI_DEVICE_ID_LPE11000S
:
2229 m
= (typeof(m
)){"LPe11000-S", "PCIe",
2230 "Obsolete, Unsupported Fibre Channel Adapter"};
2232 case PCI_DEVICE_ID_SAT
:
2233 m
= (typeof(m
)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2235 case PCI_DEVICE_ID_SAT_MID
:
2236 m
= (typeof(m
)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2238 case PCI_DEVICE_ID_SAT_SMB
:
2239 m
= (typeof(m
)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2241 case PCI_DEVICE_ID_SAT_DCSP
:
2242 m
= (typeof(m
)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2244 case PCI_DEVICE_ID_SAT_SCSP
:
2245 m
= (typeof(m
)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2247 case PCI_DEVICE_ID_SAT_S
:
2248 m
= (typeof(m
)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2250 case PCI_DEVICE_ID_HORNET
:
2251 m
= (typeof(m
)){"LP21000", "PCIe",
2252 "Obsolete, Unsupported FCoE Adapter"};
2255 case PCI_DEVICE_ID_PROTEUS_VF
:
2256 m
= (typeof(m
)){"LPev12000", "PCIe IOV",
2257 "Obsolete, Unsupported Fibre Channel Adapter"};
2259 case PCI_DEVICE_ID_PROTEUS_PF
:
2260 m
= (typeof(m
)){"LPev12000", "PCIe IOV",
2261 "Obsolete, Unsupported Fibre Channel Adapter"};
2263 case PCI_DEVICE_ID_PROTEUS_S
:
2264 m
= (typeof(m
)){"LPemv12002-S", "PCIe IOV",
2265 "Obsolete, Unsupported Fibre Channel Adapter"};
2267 case PCI_DEVICE_ID_TIGERSHARK
:
2269 m
= (typeof(m
)){"OCe10100", "PCIe", "FCoE"};
2271 case PCI_DEVICE_ID_TOMCAT
:
2273 m
= (typeof(m
)){"OCe11100", "PCIe", "FCoE"};
2275 case PCI_DEVICE_ID_FALCON
:
2276 m
= (typeof(m
)){"LPSe12002-ML1-E", "PCIe",
2277 "EmulexSecure Fibre"};
2279 case PCI_DEVICE_ID_BALIUS
:
2280 m
= (typeof(m
)){"LPVe12002", "PCIe Shared I/O",
2281 "Obsolete, Unsupported Fibre Channel Adapter"};
2283 case PCI_DEVICE_ID_LANCER_FC
:
2284 m
= (typeof(m
)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2286 case PCI_DEVICE_ID_LANCER_FC_VF
:
2287 m
= (typeof(m
)){"LPe16000", "PCIe",
2288 "Obsolete, Unsupported Fibre Channel Adapter"};
2290 case PCI_DEVICE_ID_LANCER_FCOE
:
2292 m
= (typeof(m
)){"OCe15100", "PCIe", "FCoE"};
2294 case PCI_DEVICE_ID_LANCER_FCOE_VF
:
2296 m
= (typeof(m
)){"OCe15100", "PCIe",
2297 "Obsolete, Unsupported FCoE"};
2299 case PCI_DEVICE_ID_LANCER_G6_FC
:
2300 m
= (typeof(m
)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2302 case PCI_DEVICE_ID_SKYHAWK
:
2303 case PCI_DEVICE_ID_SKYHAWK_VF
:
2305 m
= (typeof(m
)){"OCe14000", "PCIe", "FCoE"};
2308 m
= (typeof(m
)){"Unknown", "", ""};
2312 if (mdp
&& mdp
[0] == '\0')
2313 snprintf(mdp
, 79,"%s", m
.name
);
2315 * oneConnect hba requires special processing, they are all initiators
2316 * and we put the port number on the end
2318 if (descp
&& descp
[0] == '\0') {
2320 snprintf(descp
, 255,
2321 "Emulex OneConnect %s, %s Initiator %s",
2324 else if (max_speed
== 0)
2325 snprintf(descp
, 255,
2327 m
.name
, m
.bus
, m
.function
);
2329 snprintf(descp
, 255,
2330 "Emulex %s %d%s %s %s",
2331 m
.name
, max_speed
, (GE
) ? "GE" : "Gb",
2337 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2338 * @phba: pointer to lpfc hba data structure.
2339 * @pring: pointer to a IOCB ring.
2340 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2342 * This routine posts a given number of IOCBs with the associated DMA buffer
2343 * descriptors specified by the cnt argument to the given IOCB ring.
2346 * The number of IOCBs NOT able to be posted to the IOCB ring.
2349 lpfc_post_buffer(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
, int cnt
)
2352 struct lpfc_iocbq
*iocb
;
2353 struct lpfc_dmabuf
*mp1
, *mp2
;
2355 cnt
+= pring
->missbufcnt
;
2357 /* While there are buffers to post */
2359 /* Allocate buffer for command iocb */
2360 iocb
= lpfc_sli_get_iocbq(phba
);
2362 pring
->missbufcnt
= cnt
;
2367 /* 2 buffers can be posted per command */
2368 /* Allocate buffer to post */
2369 mp1
= kmalloc(sizeof (struct lpfc_dmabuf
), GFP_KERNEL
);
2371 mp1
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
, &mp1
->phys
);
2372 if (!mp1
|| !mp1
->virt
) {
2374 lpfc_sli_release_iocbq(phba
, iocb
);
2375 pring
->missbufcnt
= cnt
;
2379 INIT_LIST_HEAD(&mp1
->list
);
2380 /* Allocate buffer to post */
2382 mp2
= kmalloc(sizeof (struct lpfc_dmabuf
), GFP_KERNEL
);
2384 mp2
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
,
2386 if (!mp2
|| !mp2
->virt
) {
2388 lpfc_mbuf_free(phba
, mp1
->virt
, mp1
->phys
);
2390 lpfc_sli_release_iocbq(phba
, iocb
);
2391 pring
->missbufcnt
= cnt
;
2395 INIT_LIST_HEAD(&mp2
->list
);
2400 icmd
->un
.cont64
[0].addrHigh
= putPaddrHigh(mp1
->phys
);
2401 icmd
->un
.cont64
[0].addrLow
= putPaddrLow(mp1
->phys
);
2402 icmd
->un
.cont64
[0].tus
.f
.bdeSize
= FCELSSIZE
;
2403 icmd
->ulpBdeCount
= 1;
2406 icmd
->un
.cont64
[1].addrHigh
= putPaddrHigh(mp2
->phys
);
2407 icmd
->un
.cont64
[1].addrLow
= putPaddrLow(mp2
->phys
);
2408 icmd
->un
.cont64
[1].tus
.f
.bdeSize
= FCELSSIZE
;
2410 icmd
->ulpBdeCount
= 2;
2413 icmd
->ulpCommand
= CMD_QUE_RING_BUF64_CN
;
2416 if (lpfc_sli_issue_iocb(phba
, pring
->ringno
, iocb
, 0) ==
2418 lpfc_mbuf_free(phba
, mp1
->virt
, mp1
->phys
);
2422 lpfc_mbuf_free(phba
, mp2
->virt
, mp2
->phys
);
2426 lpfc_sli_release_iocbq(phba
, iocb
);
2427 pring
->missbufcnt
= cnt
;
2430 lpfc_sli_ringpostbuf_put(phba
, pring
, mp1
);
2432 lpfc_sli_ringpostbuf_put(phba
, pring
, mp2
);
2434 pring
->missbufcnt
= 0;
2439 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2440 * @phba: pointer to lpfc hba data structure.
2442 * This routine posts initial receive IOCB buffers to the ELS ring. The
2443 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2447 * 0 - success (currently always success)
2450 lpfc_post_rcv_buf(struct lpfc_hba
*phba
)
2452 struct lpfc_sli
*psli
= &phba
->sli
;
2454 /* Ring 0, ELS / CT buffers */
2455 lpfc_post_buffer(phba
, &psli
->ring
[LPFC_ELS_RING
], LPFC_BUF_RING0
);
2456 /* Ring 2 - FCP no buffers needed */
2461 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2464 * lpfc_sha_init - Set up initial array of hash table entries
2465 * @HashResultPointer: pointer to an array as hash table.
2467 * This routine sets up the initial values to the array of hash table entries
2471 lpfc_sha_init(uint32_t * HashResultPointer
)
2473 HashResultPointer
[0] = 0x67452301;
2474 HashResultPointer
[1] = 0xEFCDAB89;
2475 HashResultPointer
[2] = 0x98BADCFE;
2476 HashResultPointer
[3] = 0x10325476;
2477 HashResultPointer
[4] = 0xC3D2E1F0;
2481 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2482 * @HashResultPointer: pointer to an initial/result hash table.
2483 * @HashWorkingPointer: pointer to an working hash table.
2485 * This routine iterates an initial hash table pointed by @HashResultPointer
2486 * with the values from the working hash table pointeed by @HashWorkingPointer.
2487 * The results are putting back to the initial hash table, returned through
2488 * the @HashResultPointer as the result hash table.
2491 lpfc_sha_iterate(uint32_t * HashResultPointer
, uint32_t * HashWorkingPointer
)
2495 uint32_t A
, B
, C
, D
, E
;
2498 HashWorkingPointer
[t
] =
2500 HashWorkingPointer
[t
- 3] ^ HashWorkingPointer
[t
-
2502 HashWorkingPointer
[t
- 14] ^ HashWorkingPointer
[t
- 16]);
2503 } while (++t
<= 79);
2505 A
= HashResultPointer
[0];
2506 B
= HashResultPointer
[1];
2507 C
= HashResultPointer
[2];
2508 D
= HashResultPointer
[3];
2509 E
= HashResultPointer
[4];
2513 TEMP
= ((B
& C
) | ((~B
) & D
)) + 0x5A827999;
2514 } else if (t
< 40) {
2515 TEMP
= (B
^ C
^ D
) + 0x6ED9EBA1;
2516 } else if (t
< 60) {
2517 TEMP
= ((B
& C
) | (B
& D
) | (C
& D
)) + 0x8F1BBCDC;
2519 TEMP
= (B
^ C
^ D
) + 0xCA62C1D6;
2521 TEMP
+= S(5, A
) + E
+ HashWorkingPointer
[t
];
2527 } while (++t
<= 79);
2529 HashResultPointer
[0] += A
;
2530 HashResultPointer
[1] += B
;
2531 HashResultPointer
[2] += C
;
2532 HashResultPointer
[3] += D
;
2533 HashResultPointer
[4] += E
;
2538 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2539 * @RandomChallenge: pointer to the entry of host challenge random number array.
2540 * @HashWorking: pointer to the entry of the working hash array.
2542 * This routine calculates the working hash array referred by @HashWorking
2543 * from the challenge random numbers associated with the host, referred by
2544 * @RandomChallenge. The result is put into the entry of the working hash
2545 * array and returned by reference through @HashWorking.
2548 lpfc_challenge_key(uint32_t * RandomChallenge
, uint32_t * HashWorking
)
2550 *HashWorking
= (*RandomChallenge
^ *HashWorking
);
2554 * lpfc_hba_init - Perform special handling for LC HBA initialization
2555 * @phba: pointer to lpfc hba data structure.
2556 * @hbainit: pointer to an array of unsigned 32-bit integers.
2558 * This routine performs the special handling for LC HBA initialization.
2561 lpfc_hba_init(struct lpfc_hba
*phba
, uint32_t *hbainit
)
2564 uint32_t *HashWorking
;
2565 uint32_t *pwwnn
= (uint32_t *) phba
->wwnn
;
2567 HashWorking
= kcalloc(80, sizeof(uint32_t), GFP_KERNEL
);
2571 HashWorking
[0] = HashWorking
[78] = *pwwnn
++;
2572 HashWorking
[1] = HashWorking
[79] = *pwwnn
;
2574 for (t
= 0; t
< 7; t
++)
2575 lpfc_challenge_key(phba
->RandomData
+ t
, HashWorking
+ t
);
2577 lpfc_sha_init(hbainit
);
2578 lpfc_sha_iterate(hbainit
, HashWorking
);
2583 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2584 * @vport: pointer to a virtual N_Port data structure.
2586 * This routine performs the necessary cleanups before deleting the @vport.
2587 * It invokes the discovery state machine to perform necessary state
2588 * transitions and to release the ndlps associated with the @vport. Note,
2589 * the physical port is treated as @vport 0.
2592 lpfc_cleanup(struct lpfc_vport
*vport
)
2594 struct lpfc_hba
*phba
= vport
->phba
;
2595 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
2598 if (phba
->link_state
> LPFC_LINK_DOWN
)
2599 lpfc_port_link_failure(vport
);
2601 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
2602 if (!NLP_CHK_NODE_ACT(ndlp
)) {
2603 ndlp
= lpfc_enable_node(vport
, ndlp
,
2604 NLP_STE_UNUSED_NODE
);
2607 spin_lock_irq(&phba
->ndlp_lock
);
2608 NLP_SET_FREE_REQ(ndlp
);
2609 spin_unlock_irq(&phba
->ndlp_lock
);
2610 /* Trigger the release of the ndlp memory */
2614 spin_lock_irq(&phba
->ndlp_lock
);
2615 if (NLP_CHK_FREE_REQ(ndlp
)) {
2616 /* The ndlp should not be in memory free mode already */
2617 spin_unlock_irq(&phba
->ndlp_lock
);
2620 /* Indicate request for freeing ndlp memory */
2621 NLP_SET_FREE_REQ(ndlp
);
2622 spin_unlock_irq(&phba
->ndlp_lock
);
2624 if (vport
->port_type
!= LPFC_PHYSICAL_PORT
&&
2625 ndlp
->nlp_DID
== Fabric_DID
) {
2626 /* Just free up ndlp with Fabric_DID for vports */
2631 /* take care of nodes in unused state before the state
2632 * machine taking action.
2634 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
) {
2639 if (ndlp
->nlp_type
& NLP_FABRIC
)
2640 lpfc_disc_state_machine(vport
, ndlp
, NULL
,
2641 NLP_EVT_DEVICE_RECOVERY
);
2643 lpfc_disc_state_machine(vport
, ndlp
, NULL
,
2647 /* At this point, ALL ndlp's should be gone
2648 * because of the previous NLP_EVT_DEVICE_RM.
2649 * Lets wait for this to happen, if needed.
2651 while (!list_empty(&vport
->fc_nodes
)) {
2653 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
2654 "0233 Nodelist not empty\n");
2655 list_for_each_entry_safe(ndlp
, next_ndlp
,
2656 &vport
->fc_nodes
, nlp_listp
) {
2657 lpfc_printf_vlog(ndlp
->vport
, KERN_ERR
,
2659 "0282 did:x%x ndlp:x%p "
2660 "usgmap:x%x refcnt:%d\n",
2661 ndlp
->nlp_DID
, (void *)ndlp
,
2664 &ndlp
->kref
.refcount
));
2669 /* Wait for any activity on ndlps to settle */
2672 lpfc_cleanup_vports_rrqs(vport
, NULL
);
2676 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2677 * @vport: pointer to a virtual N_Port data structure.
2679 * This routine stops all the timers associated with a @vport. This function
2680 * is invoked before disabling or deleting a @vport. Note that the physical
2681 * port is treated as @vport 0.
2684 lpfc_stop_vport_timers(struct lpfc_vport
*vport
)
2686 del_timer_sync(&vport
->els_tmofunc
);
2687 del_timer_sync(&vport
->delayed_disc_tmo
);
2688 lpfc_can_disctmo(vport
);
2693 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2694 * @phba: pointer to lpfc hba data structure.
2696 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2697 * caller of this routine should already hold the host lock.
2700 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba
*phba
)
2702 /* Clear pending FCF rediscovery wait flag */
2703 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_PEND
;
2705 /* Now, try to stop the timer */
2706 del_timer(&phba
->fcf
.redisc_wait
);
2710 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2711 * @phba: pointer to lpfc hba data structure.
2713 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2714 * checks whether the FCF rediscovery wait timer is pending with the host
2715 * lock held before proceeding with disabling the timer and clearing the
2716 * wait timer pendig flag.
2719 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba
*phba
)
2721 spin_lock_irq(&phba
->hbalock
);
2722 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)) {
2723 /* FCF rediscovery timer already fired or stopped */
2724 spin_unlock_irq(&phba
->hbalock
);
2727 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba
);
2728 /* Clear failover in progress flags */
2729 phba
->fcf
.fcf_flag
&= ~(FCF_DEAD_DISC
| FCF_ACVL_DISC
);
2730 spin_unlock_irq(&phba
->hbalock
);
2734 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2735 * @phba: pointer to lpfc hba data structure.
2737 * This routine stops all the timers associated with a HBA. This function is
2738 * invoked before either putting a HBA offline or unloading the driver.
2741 lpfc_stop_hba_timers(struct lpfc_hba
*phba
)
2743 lpfc_stop_vport_timers(phba
->pport
);
2744 del_timer_sync(&phba
->sli
.mbox_tmo
);
2745 del_timer_sync(&phba
->fabric_block_timer
);
2746 del_timer_sync(&phba
->eratt_poll
);
2747 del_timer_sync(&phba
->hb_tmofunc
);
2748 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2749 del_timer_sync(&phba
->rrq_tmr
);
2750 phba
->hba_flag
&= ~HBA_RRQ_ACTIVE
;
2752 phba
->hb_outstanding
= 0;
2754 switch (phba
->pci_dev_grp
) {
2755 case LPFC_PCI_DEV_LP
:
2756 /* Stop any LightPulse device specific driver timers */
2757 del_timer_sync(&phba
->fcp_poll_timer
);
2759 case LPFC_PCI_DEV_OC
:
2760 /* Stop any OneConnect device sepcific driver timers */
2761 lpfc_sli4_stop_fcf_redisc_wait_timer(phba
);
2764 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2765 "0297 Invalid device group (x%x)\n",
2773 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2774 * @phba: pointer to lpfc hba data structure.
2776 * This routine marks a HBA's management interface as blocked. Once the HBA's
2777 * management interface is marked as blocked, all the user space access to
2778 * the HBA, whether they are from sysfs interface or libdfc interface will
2779 * all be blocked. The HBA is set to block the management interface when the
2780 * driver prepares the HBA interface for online or offline.
2783 lpfc_block_mgmt_io(struct lpfc_hba
*phba
, int mbx_action
)
2785 unsigned long iflag
;
2786 uint8_t actcmd
= MBX_HEARTBEAT
;
2787 unsigned long timeout
;
2789 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2790 phba
->sli
.sli_flag
|= LPFC_BLOCK_MGMT_IO
;
2791 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2792 if (mbx_action
== LPFC_MBX_NO_WAIT
)
2794 timeout
= msecs_to_jiffies(LPFC_MBOX_TMO
* 1000) + jiffies
;
2795 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2796 if (phba
->sli
.mbox_active
) {
2797 actcmd
= phba
->sli
.mbox_active
->u
.mb
.mbxCommand
;
2798 /* Determine how long we might wait for the active mailbox
2799 * command to be gracefully completed by firmware.
2801 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
,
2802 phba
->sli
.mbox_active
) * 1000) + jiffies
;
2804 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2806 /* Wait for the outstnading mailbox command to complete */
2807 while (phba
->sli
.mbox_active
) {
2808 /* Check active mailbox complete status every 2ms */
2810 if (time_after(jiffies
, timeout
)) {
2811 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2812 "2813 Mgmt IO is Blocked %x "
2813 "- mbox cmd %x still active\n",
2814 phba
->sli
.sli_flag
, actcmd
);
2821 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
2822 * @phba: pointer to lpfc hba data structure.
2824 * Allocate RPIs for all active remote nodes. This is needed whenever
2825 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
2826 * is to fixup the temporary rpi assignments.
2829 lpfc_sli4_node_prep(struct lpfc_hba
*phba
)
2831 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
2832 struct lpfc_vport
**vports
;
2835 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
2838 vports
= lpfc_create_vport_work_array(phba
);
2839 if (vports
!= NULL
) {
2840 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2841 if (vports
[i
]->load_flag
& FC_UNLOADING
)
2844 list_for_each_entry_safe(ndlp
, next_ndlp
,
2845 &vports
[i
]->fc_nodes
,
2847 if (NLP_CHK_NODE_ACT(ndlp
)) {
2849 lpfc_sli4_alloc_rpi(phba
);
2850 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
,
2852 "0009 rpi:%x DID:%x "
2853 "flg:%x map:%x %p\n",
2863 lpfc_destroy_vport_work_array(phba
, vports
);
2867 * lpfc_online - Initialize and bring a HBA online
2868 * @phba: pointer to lpfc hba data structure.
2870 * This routine initializes the HBA and brings a HBA online. During this
2871 * process, the management interface is blocked to prevent user space access
2872 * to the HBA interfering with the driver initialization.
2879 lpfc_online(struct lpfc_hba
*phba
)
2881 struct lpfc_vport
*vport
;
2882 struct lpfc_vport
**vports
;
2884 bool vpis_cleared
= false;
2888 vport
= phba
->pport
;
2890 if (!(vport
->fc_flag
& FC_OFFLINE_MODE
))
2893 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
2894 "0458 Bring Adapter online\n");
2896 lpfc_block_mgmt_io(phba
, LPFC_MBX_WAIT
);
2898 if (!lpfc_sli_queue_setup(phba
)) {
2899 lpfc_unblock_mgmt_io(phba
);
2903 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2904 if (lpfc_sli4_hba_setup(phba
)) { /* Initialize SLI4 HBA */
2905 lpfc_unblock_mgmt_io(phba
);
2908 spin_lock_irq(&phba
->hbalock
);
2909 if (!phba
->sli4_hba
.max_cfg_param
.vpi_used
)
2910 vpis_cleared
= true;
2911 spin_unlock_irq(&phba
->hbalock
);
2913 if (lpfc_sli_hba_setup(phba
)) { /* Initialize SLI2/SLI3 HBA */
2914 lpfc_unblock_mgmt_io(phba
);
2919 vports
= lpfc_create_vport_work_array(phba
);
2920 if (vports
!= NULL
) {
2921 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2922 struct Scsi_Host
*shost
;
2923 shost
= lpfc_shost_from_vport(vports
[i
]);
2924 spin_lock_irq(shost
->host_lock
);
2925 vports
[i
]->fc_flag
&= ~FC_OFFLINE_MODE
;
2926 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
2927 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
2928 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2929 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
2930 if ((vpis_cleared
) &&
2931 (vports
[i
]->port_type
!=
2932 LPFC_PHYSICAL_PORT
))
2935 spin_unlock_irq(shost
->host_lock
);
2938 lpfc_destroy_vport_work_array(phba
, vports
);
2940 lpfc_unblock_mgmt_io(phba
);
2945 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2946 * @phba: pointer to lpfc hba data structure.
2948 * This routine marks a HBA's management interface as not blocked. Once the
2949 * HBA's management interface is marked as not blocked, all the user space
2950 * access to the HBA, whether they are from sysfs interface or libdfc
2951 * interface will be allowed. The HBA is set to block the management interface
2952 * when the driver prepares the HBA interface for online or offline and then
2953 * set to unblock the management interface afterwards.
2956 lpfc_unblock_mgmt_io(struct lpfc_hba
* phba
)
2958 unsigned long iflag
;
2960 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2961 phba
->sli
.sli_flag
&= ~LPFC_BLOCK_MGMT_IO
;
2962 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2966 * lpfc_offline_prep - Prepare a HBA to be brought offline
2967 * @phba: pointer to lpfc hba data structure.
2969 * This routine is invoked to prepare a HBA to be brought offline. It performs
2970 * unregistration login to all the nodes on all vports and flushes the mailbox
2971 * queue to make it ready to be brought offline.
2974 lpfc_offline_prep(struct lpfc_hba
*phba
, int mbx_action
)
2976 struct lpfc_vport
*vport
= phba
->pport
;
2977 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
2978 struct lpfc_vport
**vports
;
2979 struct Scsi_Host
*shost
;
2982 if (vport
->fc_flag
& FC_OFFLINE_MODE
)
2985 lpfc_block_mgmt_io(phba
, mbx_action
);
2987 lpfc_linkdown(phba
);
2989 /* Issue an unreg_login to all nodes on all vports */
2990 vports
= lpfc_create_vport_work_array(phba
);
2991 if (vports
!= NULL
) {
2992 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2993 if (vports
[i
]->load_flag
& FC_UNLOADING
)
2995 shost
= lpfc_shost_from_vport(vports
[i
]);
2996 spin_lock_irq(shost
->host_lock
);
2997 vports
[i
]->vpi_state
&= ~LPFC_VPI_REGISTERED
;
2998 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
2999 vports
[i
]->fc_flag
&= ~FC_VFI_REGISTERED
;
3000 spin_unlock_irq(shost
->host_lock
);
3002 shost
= lpfc_shost_from_vport(vports
[i
]);
3003 list_for_each_entry_safe(ndlp
, next_ndlp
,
3004 &vports
[i
]->fc_nodes
,
3006 if (!NLP_CHK_NODE_ACT(ndlp
))
3008 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
3010 if (ndlp
->nlp_type
& NLP_FABRIC
) {
3011 lpfc_disc_state_machine(vports
[i
], ndlp
,
3012 NULL
, NLP_EVT_DEVICE_RECOVERY
);
3013 lpfc_disc_state_machine(vports
[i
], ndlp
,
3014 NULL
, NLP_EVT_DEVICE_RM
);
3016 spin_lock_irq(shost
->host_lock
);
3017 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
3018 spin_unlock_irq(shost
->host_lock
);
3020 * Whenever an SLI4 port goes offline, free the
3021 * RPI. Get a new RPI when the adapter port
3022 * comes back online.
3024 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3025 lpfc_printf_vlog(ndlp
->vport
,
3026 KERN_INFO
, LOG_NODE
,
3027 "0011 lpfc_offline: "
3029 "usgmap:x%x rpi:%x\n",
3030 ndlp
, ndlp
->nlp_DID
,
3034 lpfc_sli4_free_rpi(phba
, ndlp
->nlp_rpi
);
3036 lpfc_unreg_rpi(vports
[i
], ndlp
);
3040 lpfc_destroy_vport_work_array(phba
, vports
);
3042 lpfc_sli_mbox_sys_shutdown(phba
, mbx_action
);
3046 * lpfc_offline - Bring a HBA offline
3047 * @phba: pointer to lpfc hba data structure.
3049 * This routine actually brings a HBA offline. It stops all the timers
3050 * associated with the HBA, brings down the SLI layer, and eventually
3051 * marks the HBA as in offline state for the upper layer protocol.
3054 lpfc_offline(struct lpfc_hba
*phba
)
3056 struct Scsi_Host
*shost
;
3057 struct lpfc_vport
**vports
;
3060 if (phba
->pport
->fc_flag
& FC_OFFLINE_MODE
)
3063 /* stop port and all timers associated with this hba */
3064 lpfc_stop_port(phba
);
3065 vports
= lpfc_create_vport_work_array(phba
);
3067 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
3068 lpfc_stop_vport_timers(vports
[i
]);
3069 lpfc_destroy_vport_work_array(phba
, vports
);
3070 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3071 "0460 Bring Adapter offline\n");
3072 /* Bring down the SLI Layer and cleanup. The HBA is offline
3074 lpfc_sli_hba_down(phba
);
3075 spin_lock_irq(&phba
->hbalock
);
3077 spin_unlock_irq(&phba
->hbalock
);
3078 vports
= lpfc_create_vport_work_array(phba
);
3080 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
3081 shost
= lpfc_shost_from_vport(vports
[i
]);
3082 spin_lock_irq(shost
->host_lock
);
3083 vports
[i
]->work_port_events
= 0;
3084 vports
[i
]->fc_flag
|= FC_OFFLINE_MODE
;
3085 spin_unlock_irq(shost
->host_lock
);
3087 lpfc_destroy_vport_work_array(phba
, vports
);
3091 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3092 * @phba: pointer to lpfc hba data structure.
3094 * This routine is to free all the SCSI buffers and IOCBs from the driver
3095 * list back to kernel. It is called from lpfc_pci_remove_one to free
3096 * the internal resources before the device is removed from the system.
3099 lpfc_scsi_free(struct lpfc_hba
*phba
)
3101 struct lpfc_scsi_buf
*sb
, *sb_next
;
3102 struct lpfc_iocbq
*io
, *io_next
;
3104 spin_lock_irq(&phba
->hbalock
);
3106 /* Release all the lpfc_scsi_bufs maintained by this host. */
3108 spin_lock(&phba
->scsi_buf_list_put_lock
);
3109 list_for_each_entry_safe(sb
, sb_next
, &phba
->lpfc_scsi_buf_list_put
,
3111 list_del(&sb
->list
);
3112 pci_pool_free(phba
->lpfc_scsi_dma_buf_pool
, sb
->data
,
3115 phba
->total_scsi_bufs
--;
3117 spin_unlock(&phba
->scsi_buf_list_put_lock
);
3119 spin_lock(&phba
->scsi_buf_list_get_lock
);
3120 list_for_each_entry_safe(sb
, sb_next
, &phba
->lpfc_scsi_buf_list_get
,
3122 list_del(&sb
->list
);
3123 pci_pool_free(phba
->lpfc_scsi_dma_buf_pool
, sb
->data
,
3126 phba
->total_scsi_bufs
--;
3128 spin_unlock(&phba
->scsi_buf_list_get_lock
);
3130 /* Release all the lpfc_iocbq entries maintained by this host. */
3131 list_for_each_entry_safe(io
, io_next
, &phba
->lpfc_iocb_list
, list
) {
3132 list_del(&io
->list
);
3134 phba
->total_iocbq_bufs
--;
3137 spin_unlock_irq(&phba
->hbalock
);
3141 * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping
3142 * @phba: pointer to lpfc hba data structure.
3144 * This routine first calculates the sizes of the current els and allocated
3145 * scsi sgl lists, and then goes through all sgls to updates the physical
3146 * XRIs assigned due to port function reset. During port initialization, the
3147 * current els and allocated scsi sgl lists are 0s.
3150 * 0 - successful (for now, it always returns 0)
3153 lpfc_sli4_xri_sgl_update(struct lpfc_hba
*phba
)
3155 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_entry_next
= NULL
;
3156 struct lpfc_scsi_buf
*psb
= NULL
, *psb_next
= NULL
;
3157 uint16_t i
, lxri
, xri_cnt
, els_xri_cnt
, scsi_xri_cnt
;
3158 LIST_HEAD(els_sgl_list
);
3159 LIST_HEAD(scsi_sgl_list
);
3161 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
3164 * update on pci function's els xri-sgl list
3166 els_xri_cnt
= lpfc_sli4_get_els_iocb_cnt(phba
);
3167 if (els_xri_cnt
> phba
->sli4_hba
.els_xri_cnt
) {
3168 /* els xri-sgl expanded */
3169 xri_cnt
= els_xri_cnt
- phba
->sli4_hba
.els_xri_cnt
;
3170 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3171 "3157 ELS xri-sgl count increased from "
3172 "%d to %d\n", phba
->sli4_hba
.els_xri_cnt
,
3174 /* allocate the additional els sgls */
3175 for (i
= 0; i
< xri_cnt
; i
++) {
3176 sglq_entry
= kzalloc(sizeof(struct lpfc_sglq
),
3178 if (sglq_entry
== NULL
) {
3179 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3180 "2562 Failure to allocate an "
3181 "ELS sgl entry:%d\n", i
);
3185 sglq_entry
->buff_type
= GEN_BUFF_TYPE
;
3186 sglq_entry
->virt
= lpfc_mbuf_alloc(phba
, 0,
3188 if (sglq_entry
->virt
== NULL
) {
3190 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3191 "2563 Failure to allocate an "
3192 "ELS mbuf:%d\n", i
);
3196 sglq_entry
->sgl
= sglq_entry
->virt
;
3197 memset(sglq_entry
->sgl
, 0, LPFC_BPL_SIZE
);
3198 sglq_entry
->state
= SGL_FREED
;
3199 list_add_tail(&sglq_entry
->list
, &els_sgl_list
);
3201 spin_lock_irq(&phba
->hbalock
);
3202 spin_lock(&pring
->ring_lock
);
3203 list_splice_init(&els_sgl_list
, &phba
->sli4_hba
.lpfc_sgl_list
);
3204 spin_unlock(&pring
->ring_lock
);
3205 spin_unlock_irq(&phba
->hbalock
);
3206 } else if (els_xri_cnt
< phba
->sli4_hba
.els_xri_cnt
) {
3207 /* els xri-sgl shrinked */
3208 xri_cnt
= phba
->sli4_hba
.els_xri_cnt
- els_xri_cnt
;
3209 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3210 "3158 ELS xri-sgl count decreased from "
3211 "%d to %d\n", phba
->sli4_hba
.els_xri_cnt
,
3213 spin_lock_irq(&phba
->hbalock
);
3214 spin_lock(&pring
->ring_lock
);
3215 list_splice_init(&phba
->sli4_hba
.lpfc_sgl_list
, &els_sgl_list
);
3216 spin_unlock(&pring
->ring_lock
);
3217 spin_unlock_irq(&phba
->hbalock
);
3218 /* release extra els sgls from list */
3219 for (i
= 0; i
< xri_cnt
; i
++) {
3220 list_remove_head(&els_sgl_list
,
3221 sglq_entry
, struct lpfc_sglq
, list
);
3223 lpfc_mbuf_free(phba
, sglq_entry
->virt
,
3228 spin_lock_irq(&phba
->hbalock
);
3229 spin_lock(&pring
->ring_lock
);
3230 list_splice_init(&els_sgl_list
, &phba
->sli4_hba
.lpfc_sgl_list
);
3231 spin_unlock(&pring
->ring_lock
);
3232 spin_unlock_irq(&phba
->hbalock
);
3234 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3235 "3163 ELS xri-sgl count unchanged: %d\n",
3237 phba
->sli4_hba
.els_xri_cnt
= els_xri_cnt
;
3239 /* update xris to els sgls on the list */
3241 sglq_entry_next
= NULL
;
3242 list_for_each_entry_safe(sglq_entry
, sglq_entry_next
,
3243 &phba
->sli4_hba
.lpfc_sgl_list
, list
) {
3244 lxri
= lpfc_sli4_next_xritag(phba
);
3245 if (lxri
== NO_XRI
) {
3246 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3247 "2400 Failed to allocate xri for "
3252 sglq_entry
->sli4_lxritag
= lxri
;
3253 sglq_entry
->sli4_xritag
= phba
->sli4_hba
.xri_ids
[lxri
];
3257 * update on pci function's allocated scsi xri-sgl list
3259 phba
->total_scsi_bufs
= 0;
3261 /* maximum number of xris available for scsi buffers */
3262 phba
->sli4_hba
.scsi_xri_max
= phba
->sli4_hba
.max_cfg_param
.max_xri
-
3265 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3266 "2401 Current allocated SCSI xri-sgl count:%d, "
3267 "maximum SCSI xri count:%d\n",
3268 phba
->sli4_hba
.scsi_xri_cnt
,
3269 phba
->sli4_hba
.scsi_xri_max
);
3271 spin_lock_irq(&phba
->scsi_buf_list_get_lock
);
3272 spin_lock(&phba
->scsi_buf_list_put_lock
);
3273 list_splice_init(&phba
->lpfc_scsi_buf_list_get
, &scsi_sgl_list
);
3274 list_splice(&phba
->lpfc_scsi_buf_list_put
, &scsi_sgl_list
);
3275 spin_unlock(&phba
->scsi_buf_list_put_lock
);
3276 spin_unlock_irq(&phba
->scsi_buf_list_get_lock
);
3278 if (phba
->sli4_hba
.scsi_xri_cnt
> phba
->sli4_hba
.scsi_xri_max
) {
3279 /* max scsi xri shrinked below the allocated scsi buffers */
3280 scsi_xri_cnt
= phba
->sli4_hba
.scsi_xri_cnt
-
3281 phba
->sli4_hba
.scsi_xri_max
;
3282 /* release the extra allocated scsi buffers */
3283 for (i
= 0; i
< scsi_xri_cnt
; i
++) {
3284 list_remove_head(&scsi_sgl_list
, psb
,
3285 struct lpfc_scsi_buf
, list
);
3287 pci_pool_free(phba
->lpfc_scsi_dma_buf_pool
,
3288 psb
->data
, psb
->dma_handle
);
3292 spin_lock_irq(&phba
->scsi_buf_list_get_lock
);
3293 phba
->sli4_hba
.scsi_xri_cnt
-= scsi_xri_cnt
;
3294 spin_unlock_irq(&phba
->scsi_buf_list_get_lock
);
3297 /* update xris associated to remaining allocated scsi buffers */
3300 list_for_each_entry_safe(psb
, psb_next
, &scsi_sgl_list
, list
) {
3301 lxri
= lpfc_sli4_next_xritag(phba
);
3302 if (lxri
== NO_XRI
) {
3303 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3304 "2560 Failed to allocate xri for "
3309 psb
->cur_iocbq
.sli4_lxritag
= lxri
;
3310 psb
->cur_iocbq
.sli4_xritag
= phba
->sli4_hba
.xri_ids
[lxri
];
3312 spin_lock_irq(&phba
->scsi_buf_list_get_lock
);
3313 spin_lock(&phba
->scsi_buf_list_put_lock
);
3314 list_splice_init(&scsi_sgl_list
, &phba
->lpfc_scsi_buf_list_get
);
3315 INIT_LIST_HEAD(&phba
->lpfc_scsi_buf_list_put
);
3316 spin_unlock(&phba
->scsi_buf_list_put_lock
);
3317 spin_unlock_irq(&phba
->scsi_buf_list_get_lock
);
3322 lpfc_free_els_sgl_list(phba
);
3323 lpfc_scsi_free(phba
);
3328 * lpfc_create_port - Create an FC port
3329 * @phba: pointer to lpfc hba data structure.
3330 * @instance: a unique integer ID to this FC port.
3331 * @dev: pointer to the device data structure.
3333 * This routine creates a FC port for the upper layer protocol. The FC port
3334 * can be created on top of either a physical port or a virtual port provided
3335 * by the HBA. This routine also allocates a SCSI host data structure (shost)
3336 * and associates the FC port created before adding the shost into the SCSI
3340 * @vport - pointer to the virtual N_Port data structure.
3341 * NULL - port create failed.
3344 lpfc_create_port(struct lpfc_hba
*phba
, int instance
, struct device
*dev
)
3346 struct lpfc_vport
*vport
;
3347 struct Scsi_Host
*shost
;
3350 if (dev
!= &phba
->pcidev
->dev
) {
3351 shost
= scsi_host_alloc(&lpfc_vport_template
,
3352 sizeof(struct lpfc_vport
));
3354 if (phba
->sli_rev
== LPFC_SLI_REV4
)
3355 shost
= scsi_host_alloc(&lpfc_template
,
3356 sizeof(struct lpfc_vport
));
3358 shost
= scsi_host_alloc(&lpfc_template_s3
,
3359 sizeof(struct lpfc_vport
));
3364 vport
= (struct lpfc_vport
*) shost
->hostdata
;
3366 vport
->load_flag
|= FC_LOADING
;
3367 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
3368 vport
->fc_rscn_flush
= 0;
3370 lpfc_get_vport_cfgparam(vport
);
3371 shost
->unique_id
= instance
;
3372 shost
->max_id
= LPFC_MAX_TARGET
;
3373 shost
->max_lun
= vport
->cfg_max_luns
;
3374 shost
->this_id
= -1;
3375 shost
->max_cmd_len
= 16;
3376 shost
->nr_hw_queues
= phba
->cfg_fcp_io_channel
;
3377 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3378 shost
->dma_boundary
=
3379 phba
->sli4_hba
.pc_sli4_params
.sge_supp_len
-1;
3380 shost
->sg_tablesize
= phba
->cfg_sg_seg_cnt
;
3384 * Set initial can_queue value since 0 is no longer supported and
3385 * scsi_add_host will fail. This will be adjusted later based on the
3386 * max xri value determined in hba setup.
3388 shost
->can_queue
= phba
->cfg_hba_queue_depth
- 10;
3389 if (dev
!= &phba
->pcidev
->dev
) {
3390 shost
->transportt
= lpfc_vport_transport_template
;
3391 vport
->port_type
= LPFC_NPIV_PORT
;
3393 shost
->transportt
= lpfc_transport_template
;
3394 vport
->port_type
= LPFC_PHYSICAL_PORT
;
3397 /* Initialize all internally managed lists. */
3398 INIT_LIST_HEAD(&vport
->fc_nodes
);
3399 INIT_LIST_HEAD(&vport
->rcv_buffer_list
);
3400 spin_lock_init(&vport
->work_port_lock
);
3402 init_timer(&vport
->fc_disctmo
);
3403 vport
->fc_disctmo
.function
= lpfc_disc_timeout
;
3404 vport
->fc_disctmo
.data
= (unsigned long)vport
;
3406 init_timer(&vport
->els_tmofunc
);
3407 vport
->els_tmofunc
.function
= lpfc_els_timeout
;
3408 vport
->els_tmofunc
.data
= (unsigned long)vport
;
3410 init_timer(&vport
->delayed_disc_tmo
);
3411 vport
->delayed_disc_tmo
.function
= lpfc_delayed_disc_tmo
;
3412 vport
->delayed_disc_tmo
.data
= (unsigned long)vport
;
3414 error
= scsi_add_host_with_dma(shost
, dev
, &phba
->pcidev
->dev
);
3418 spin_lock_irq(&phba
->hbalock
);
3419 list_add_tail(&vport
->listentry
, &phba
->port_list
);
3420 spin_unlock_irq(&phba
->hbalock
);
3424 scsi_host_put(shost
);
3430 * destroy_port - destroy an FC port
3431 * @vport: pointer to an lpfc virtual N_Port data structure.
3433 * This routine destroys a FC port from the upper layer protocol. All the
3434 * resources associated with the port are released.
3437 destroy_port(struct lpfc_vport
*vport
)
3439 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3440 struct lpfc_hba
*phba
= vport
->phba
;
3442 lpfc_debugfs_terminate(vport
);
3443 fc_remove_host(shost
);
3444 scsi_remove_host(shost
);
3446 spin_lock_irq(&phba
->hbalock
);
3447 list_del_init(&vport
->listentry
);
3448 spin_unlock_irq(&phba
->hbalock
);
3450 lpfc_cleanup(vport
);
3455 * lpfc_get_instance - Get a unique integer ID
3457 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
3458 * uses the kernel idr facility to perform the task.
3461 * instance - a unique integer ID allocated as the new instance.
3462 * -1 - lpfc get instance failed.
3465 lpfc_get_instance(void)
3469 ret
= idr_alloc(&lpfc_hba_index
, NULL
, 0, 0, GFP_KERNEL
);
3470 return ret
< 0 ? -1 : ret
;
3474 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
3475 * @shost: pointer to SCSI host data structure.
3476 * @time: elapsed time of the scan in jiffies.
3478 * This routine is called by the SCSI layer with a SCSI host to determine
3479 * whether the scan host is finished.
3481 * Note: there is no scan_start function as adapter initialization will have
3482 * asynchronously kicked off the link initialization.
3485 * 0 - SCSI host scan is not over yet.
3486 * 1 - SCSI host scan is over.
3488 int lpfc_scan_finished(struct Scsi_Host
*shost
, unsigned long time
)
3490 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
3491 struct lpfc_hba
*phba
= vport
->phba
;
3494 spin_lock_irq(shost
->host_lock
);
3496 if (vport
->load_flag
& FC_UNLOADING
) {
3500 if (time
>= msecs_to_jiffies(30 * 1000)) {
3501 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
3502 "0461 Scanning longer than 30 "
3503 "seconds. Continuing initialization\n");
3507 if (time
>= msecs_to_jiffies(15 * 1000) &&
3508 phba
->link_state
<= LPFC_LINK_DOWN
) {
3509 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
3510 "0465 Link down longer than 15 "
3511 "seconds. Continuing initialization\n");
3516 if (vport
->port_state
!= LPFC_VPORT_READY
)
3518 if (vport
->num_disc_nodes
|| vport
->fc_prli_sent
)
3520 if (vport
->fc_map_cnt
== 0 && time
< msecs_to_jiffies(2 * 1000))
3522 if ((phba
->sli
.sli_flag
& LPFC_SLI_MBOX_ACTIVE
) != 0)
3528 spin_unlock_irq(shost
->host_lock
);
3533 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
3534 * @shost: pointer to SCSI host data structure.
3536 * This routine initializes a given SCSI host attributes on a FC port. The
3537 * SCSI host can be either on top of a physical port or a virtual port.
3539 void lpfc_host_attrib_init(struct Scsi_Host
*shost
)
3541 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
3542 struct lpfc_hba
*phba
= vport
->phba
;
3544 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
3547 fc_host_node_name(shost
) = wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
3548 fc_host_port_name(shost
) = wwn_to_u64(vport
->fc_portname
.u
.wwn
);
3549 fc_host_supported_classes(shost
) = FC_COS_CLASS3
;
3551 memset(fc_host_supported_fc4s(shost
), 0,
3552 sizeof(fc_host_supported_fc4s(shost
)));
3553 fc_host_supported_fc4s(shost
)[2] = 1;
3554 fc_host_supported_fc4s(shost
)[7] = 1;
3556 lpfc_vport_symbolic_node_name(vport
, fc_host_symbolic_name(shost
),
3557 sizeof fc_host_symbolic_name(shost
));
3559 fc_host_supported_speeds(shost
) = 0;
3560 if (phba
->lmt
& LMT_32Gb
)
3561 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_32GBIT
;
3562 if (phba
->lmt
& LMT_16Gb
)
3563 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_16GBIT
;
3564 if (phba
->lmt
& LMT_10Gb
)
3565 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_10GBIT
;
3566 if (phba
->lmt
& LMT_8Gb
)
3567 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_8GBIT
;
3568 if (phba
->lmt
& LMT_4Gb
)
3569 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_4GBIT
;
3570 if (phba
->lmt
& LMT_2Gb
)
3571 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_2GBIT
;
3572 if (phba
->lmt
& LMT_1Gb
)
3573 fc_host_supported_speeds(shost
) |= FC_PORTSPEED_1GBIT
;
3575 fc_host_maxframe_size(shost
) =
3576 (((uint32_t) vport
->fc_sparam
.cmn
.bbRcvSizeMsb
& 0x0F) << 8) |
3577 (uint32_t) vport
->fc_sparam
.cmn
.bbRcvSizeLsb
;
3579 fc_host_dev_loss_tmo(shost
) = vport
->cfg_devloss_tmo
;
3581 /* This value is also unchanging */
3582 memset(fc_host_active_fc4s(shost
), 0,
3583 sizeof(fc_host_active_fc4s(shost
)));
3584 fc_host_active_fc4s(shost
)[2] = 1;
3585 fc_host_active_fc4s(shost
)[7] = 1;
3587 fc_host_max_npiv_vports(shost
) = phba
->max_vpi
;
3588 spin_lock_irq(shost
->host_lock
);
3589 vport
->load_flag
&= ~FC_LOADING
;
3590 spin_unlock_irq(shost
->host_lock
);
3594 * lpfc_stop_port_s3 - Stop SLI3 device port
3595 * @phba: pointer to lpfc hba data structure.
3597 * This routine is invoked to stop an SLI3 device port, it stops the device
3598 * from generating interrupts and stops the device driver's timers for the
3602 lpfc_stop_port_s3(struct lpfc_hba
*phba
)
3604 /* Clear all interrupt enable conditions */
3605 writel(0, phba
->HCregaddr
);
3606 readl(phba
->HCregaddr
); /* flush */
3607 /* Clear all pending interrupts */
3608 writel(0xffffffff, phba
->HAregaddr
);
3609 readl(phba
->HAregaddr
); /* flush */
3611 /* Reset some HBA SLI setup states */
3612 lpfc_stop_hba_timers(phba
);
3613 phba
->pport
->work_port_events
= 0;
3617 * lpfc_stop_port_s4 - Stop SLI4 device port
3618 * @phba: pointer to lpfc hba data structure.
3620 * This routine is invoked to stop an SLI4 device port, it stops the device
3621 * from generating interrupts and stops the device driver's timers for the
3625 lpfc_stop_port_s4(struct lpfc_hba
*phba
)
3627 /* Reset some HBA SLI4 setup states */
3628 lpfc_stop_hba_timers(phba
);
3629 phba
->pport
->work_port_events
= 0;
3630 phba
->sli4_hba
.intr_enable
= 0;
3634 * lpfc_stop_port - Wrapper function for stopping hba port
3635 * @phba: Pointer to HBA context object.
3637 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
3638 * the API jump table function pointer from the lpfc_hba struct.
3641 lpfc_stop_port(struct lpfc_hba
*phba
)
3643 phba
->lpfc_stop_port(phba
);
3647 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
3648 * @phba: Pointer to hba for which this call is being executed.
3650 * This routine starts the timer waiting for the FCF rediscovery to complete.
3653 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba
*phba
)
3655 unsigned long fcf_redisc_wait_tmo
=
3656 (jiffies
+ msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO
));
3657 /* Start fcf rediscovery wait period timer */
3658 mod_timer(&phba
->fcf
.redisc_wait
, fcf_redisc_wait_tmo
);
3659 spin_lock_irq(&phba
->hbalock
);
3660 /* Allow action to new fcf asynchronous event */
3661 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
3662 /* Mark the FCF rediscovery pending state */
3663 phba
->fcf
.fcf_flag
|= FCF_REDISC_PEND
;
3664 spin_unlock_irq(&phba
->hbalock
);
3668 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
3669 * @ptr: Map to lpfc_hba data structure pointer.
3671 * This routine is invoked when waiting for FCF table rediscover has been
3672 * timed out. If new FCF record(s) has (have) been discovered during the
3673 * wait period, a new FCF event shall be added to the FCOE async event
3674 * list, and then worker thread shall be waked up for processing from the
3675 * worker thread context.
3678 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr
)
3680 struct lpfc_hba
*phba
= (struct lpfc_hba
*)ptr
;
3682 /* Don't send FCF rediscovery event if timer cancelled */
3683 spin_lock_irq(&phba
->hbalock
);
3684 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)) {
3685 spin_unlock_irq(&phba
->hbalock
);
3688 /* Clear FCF rediscovery timer pending flag */
3689 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_PEND
;
3690 /* FCF rediscovery event to worker thread */
3691 phba
->fcf
.fcf_flag
|= FCF_REDISC_EVT
;
3692 spin_unlock_irq(&phba
->hbalock
);
3693 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
3694 "2776 FCF rediscover quiescent timer expired\n");
3695 /* wake up worker thread */
3696 lpfc_worker_wake_up(phba
);
3700 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3701 * @phba: pointer to lpfc hba data structure.
3702 * @acqe_link: pointer to the async link completion queue entry.
3704 * This routine is to parse the SLI4 link-attention link fault code and
3705 * translate it into the base driver's read link attention mailbox command
3708 * Return: Link-attention status in terms of base driver's coding.
3711 lpfc_sli4_parse_latt_fault(struct lpfc_hba
*phba
,
3712 struct lpfc_acqe_link
*acqe_link
)
3714 uint16_t latt_fault
;
3716 switch (bf_get(lpfc_acqe_link_fault
, acqe_link
)) {
3717 case LPFC_ASYNC_LINK_FAULT_NONE
:
3718 case LPFC_ASYNC_LINK_FAULT_LOCAL
:
3719 case LPFC_ASYNC_LINK_FAULT_REMOTE
:
3723 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3724 "0398 Invalid link fault code: x%x\n",
3725 bf_get(lpfc_acqe_link_fault
, acqe_link
));
3726 latt_fault
= MBXERR_ERROR
;
3733 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3734 * @phba: pointer to lpfc hba data structure.
3735 * @acqe_link: pointer to the async link completion queue entry.
3737 * This routine is to parse the SLI4 link attention type and translate it
3738 * into the base driver's link attention type coding.
3740 * Return: Link attention type in terms of base driver's coding.
3743 lpfc_sli4_parse_latt_type(struct lpfc_hba
*phba
,
3744 struct lpfc_acqe_link
*acqe_link
)
3748 switch (bf_get(lpfc_acqe_link_status
, acqe_link
)) {
3749 case LPFC_ASYNC_LINK_STATUS_DOWN
:
3750 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN
:
3751 att_type
= LPFC_ATT_LINK_DOWN
;
3753 case LPFC_ASYNC_LINK_STATUS_UP
:
3754 /* Ignore physical link up events - wait for logical link up */
3755 att_type
= LPFC_ATT_RESERVED
;
3757 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP
:
3758 att_type
= LPFC_ATT_LINK_UP
;
3761 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3762 "0399 Invalid link attention type: x%x\n",
3763 bf_get(lpfc_acqe_link_status
, acqe_link
));
3764 att_type
= LPFC_ATT_RESERVED
;
3771 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
3772 * @phba: pointer to lpfc hba data structure.
3774 * This routine is to get an SLI3 FC port's link speed in Mbps.
3776 * Return: link speed in terms of Mbps.
3779 lpfc_sli_port_speed_get(struct lpfc_hba
*phba
)
3781 uint32_t link_speed
;
3783 if (!lpfc_is_link_up(phba
))
3786 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
3787 switch (phba
->fc_linkspeed
) {
3788 case LPFC_LINK_SPEED_1GHZ
:
3791 case LPFC_LINK_SPEED_2GHZ
:
3794 case LPFC_LINK_SPEED_4GHZ
:
3797 case LPFC_LINK_SPEED_8GHZ
:
3800 case LPFC_LINK_SPEED_10GHZ
:
3803 case LPFC_LINK_SPEED_16GHZ
:
3810 if (phba
->sli4_hba
.link_state
.logical_speed
)
3812 phba
->sli4_hba
.link_state
.logical_speed
;
3814 link_speed
= phba
->sli4_hba
.link_state
.speed
;
3820 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
3821 * @phba: pointer to lpfc hba data structure.
3822 * @evt_code: asynchronous event code.
3823 * @speed_code: asynchronous event link speed code.
3825 * This routine is to parse the giving SLI4 async event link speed code into
3826 * value of Mbps for the link speed.
3828 * Return: link speed in terms of Mbps.
3831 lpfc_sli4_port_speed_parse(struct lpfc_hba
*phba
, uint32_t evt_code
,
3834 uint32_t port_speed
;
3837 case LPFC_TRAILER_CODE_LINK
:
3838 switch (speed_code
) {
3839 case LPFC_ASYNC_LINK_SPEED_ZERO
:
3842 case LPFC_ASYNC_LINK_SPEED_10MBPS
:
3845 case LPFC_ASYNC_LINK_SPEED_100MBPS
:
3848 case LPFC_ASYNC_LINK_SPEED_1GBPS
:
3851 case LPFC_ASYNC_LINK_SPEED_10GBPS
:
3854 case LPFC_ASYNC_LINK_SPEED_20GBPS
:
3857 case LPFC_ASYNC_LINK_SPEED_25GBPS
:
3860 case LPFC_ASYNC_LINK_SPEED_40GBPS
:
3867 case LPFC_TRAILER_CODE_FC
:
3868 switch (speed_code
) {
3869 case LPFC_FC_LA_SPEED_UNKNOWN
:
3872 case LPFC_FC_LA_SPEED_1G
:
3875 case LPFC_FC_LA_SPEED_2G
:
3878 case LPFC_FC_LA_SPEED_4G
:
3881 case LPFC_FC_LA_SPEED_8G
:
3884 case LPFC_FC_LA_SPEED_10G
:
3887 case LPFC_FC_LA_SPEED_16G
:
3890 case LPFC_FC_LA_SPEED_32G
:
3904 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
3905 * @phba: pointer to lpfc hba data structure.
3906 * @acqe_link: pointer to the async link completion queue entry.
3908 * This routine is to handle the SLI4 asynchronous FCoE link event.
3911 lpfc_sli4_async_link_evt(struct lpfc_hba
*phba
,
3912 struct lpfc_acqe_link
*acqe_link
)
3914 struct lpfc_dmabuf
*mp
;
3917 struct lpfc_mbx_read_top
*la
;
3921 att_type
= lpfc_sli4_parse_latt_type(phba
, acqe_link
);
3922 if (att_type
!= LPFC_ATT_LINK_DOWN
&& att_type
!= LPFC_ATT_LINK_UP
)
3924 phba
->fcoe_eventtag
= acqe_link
->event_tag
;
3925 pmb
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3927 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3928 "0395 The mboxq allocation failed\n");
3931 mp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
3933 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3934 "0396 The lpfc_dmabuf allocation failed\n");
3937 mp
->virt
= lpfc_mbuf_alloc(phba
, 0, &mp
->phys
);
3939 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3940 "0397 The mbuf allocation failed\n");
3941 goto out_free_dmabuf
;
3944 /* Cleanup any outstanding ELS commands */
3945 lpfc_els_flush_all_cmd(phba
);
3947 /* Block ELS IOCBs until we have done process link event */
3948 phba
->sli
.ring
[LPFC_ELS_RING
].flag
|= LPFC_STOP_IOCB_EVENT
;
3950 /* Update link event statistics */
3951 phba
->sli
.slistat
.link_event
++;
3953 /* Create lpfc_handle_latt mailbox command from link ACQE */
3954 lpfc_read_topology(phba
, pmb
, mp
);
3955 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_read_topology
;
3956 pmb
->vport
= phba
->pport
;
3958 /* Keep the link status for extra SLI4 state machine reference */
3959 phba
->sli4_hba
.link_state
.speed
=
3960 lpfc_sli4_port_speed_parse(phba
, LPFC_TRAILER_CODE_LINK
,
3961 bf_get(lpfc_acqe_link_speed
, acqe_link
));
3962 phba
->sli4_hba
.link_state
.duplex
=
3963 bf_get(lpfc_acqe_link_duplex
, acqe_link
);
3964 phba
->sli4_hba
.link_state
.status
=
3965 bf_get(lpfc_acqe_link_status
, acqe_link
);
3966 phba
->sli4_hba
.link_state
.type
=
3967 bf_get(lpfc_acqe_link_type
, acqe_link
);
3968 phba
->sli4_hba
.link_state
.number
=
3969 bf_get(lpfc_acqe_link_number
, acqe_link
);
3970 phba
->sli4_hba
.link_state
.fault
=
3971 bf_get(lpfc_acqe_link_fault
, acqe_link
);
3972 phba
->sli4_hba
.link_state
.logical_speed
=
3973 bf_get(lpfc_acqe_logical_link_speed
, acqe_link
) * 10;
3975 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3976 "2900 Async FC/FCoE Link event - Speed:%dGBit "
3977 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3978 "Logical speed:%dMbps Fault:%d\n",
3979 phba
->sli4_hba
.link_state
.speed
,
3980 phba
->sli4_hba
.link_state
.topology
,
3981 phba
->sli4_hba
.link_state
.status
,
3982 phba
->sli4_hba
.link_state
.type
,
3983 phba
->sli4_hba
.link_state
.number
,
3984 phba
->sli4_hba
.link_state
.logical_speed
,
3985 phba
->sli4_hba
.link_state
.fault
);
3987 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3988 * topology info. Note: Optional for non FC-AL ports.
3990 if (!(phba
->hba_flag
& HBA_FCOE_MODE
)) {
3991 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
3992 if (rc
== MBX_NOT_FINISHED
)
3993 goto out_free_dmabuf
;
3997 * For FCoE Mode: fill in all the topology information we need and call
3998 * the READ_TOPOLOGY completion routine to continue without actually
3999 * sending the READ_TOPOLOGY mailbox command to the port.
4001 /* Parse and translate status field */
4003 mb
->mbxStatus
= lpfc_sli4_parse_latt_fault(phba
, acqe_link
);
4005 /* Parse and translate link attention fields */
4006 la
= (struct lpfc_mbx_read_top
*) &pmb
->u
.mb
.un
.varReadTop
;
4007 la
->eventTag
= acqe_link
->event_tag
;
4008 bf_set(lpfc_mbx_read_top_att_type
, la
, att_type
);
4009 bf_set(lpfc_mbx_read_top_link_spd
, la
,
4010 (bf_get(lpfc_acqe_link_speed
, acqe_link
)));
4012 /* Fake the the following irrelvant fields */
4013 bf_set(lpfc_mbx_read_top_topology
, la
, LPFC_TOPOLOGY_PT_PT
);
4014 bf_set(lpfc_mbx_read_top_alpa_granted
, la
, 0);
4015 bf_set(lpfc_mbx_read_top_il
, la
, 0);
4016 bf_set(lpfc_mbx_read_top_pb
, la
, 0);
4017 bf_set(lpfc_mbx_read_top_fa
, la
, 0);
4018 bf_set(lpfc_mbx_read_top_mm
, la
, 0);
4020 /* Invoke the lpfc_handle_latt mailbox command callback function */
4021 lpfc_mbx_cmpl_read_topology(phba
, pmb
);
4028 mempool_free(pmb
, phba
->mbox_mem_pool
);
4032 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
4033 * @phba: pointer to lpfc hba data structure.
4034 * @acqe_fc: pointer to the async fc completion queue entry.
4036 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
4037 * that the event was received and then issue a read_topology mailbox command so
4038 * that the rest of the driver will treat it the same as SLI3.
4041 lpfc_sli4_async_fc_evt(struct lpfc_hba
*phba
, struct lpfc_acqe_fc_la
*acqe_fc
)
4043 struct lpfc_dmabuf
*mp
;
4046 struct lpfc_mbx_read_top
*la
;
4049 if (bf_get(lpfc_trailer_type
, acqe_fc
) !=
4050 LPFC_FC_LA_EVENT_TYPE_FC_LINK
) {
4051 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
4052 "2895 Non FC link Event detected.(%d)\n",
4053 bf_get(lpfc_trailer_type
, acqe_fc
));
4056 /* Keep the link status for extra SLI4 state machine reference */
4057 phba
->sli4_hba
.link_state
.speed
=
4058 lpfc_sli4_port_speed_parse(phba
, LPFC_TRAILER_CODE_FC
,
4059 bf_get(lpfc_acqe_fc_la_speed
, acqe_fc
));
4060 phba
->sli4_hba
.link_state
.duplex
= LPFC_ASYNC_LINK_DUPLEX_FULL
;
4061 phba
->sli4_hba
.link_state
.topology
=
4062 bf_get(lpfc_acqe_fc_la_topology
, acqe_fc
);
4063 phba
->sli4_hba
.link_state
.status
=
4064 bf_get(lpfc_acqe_fc_la_att_type
, acqe_fc
);
4065 phba
->sli4_hba
.link_state
.type
=
4066 bf_get(lpfc_acqe_fc_la_port_type
, acqe_fc
);
4067 phba
->sli4_hba
.link_state
.number
=
4068 bf_get(lpfc_acqe_fc_la_port_number
, acqe_fc
);
4069 phba
->sli4_hba
.link_state
.fault
=
4070 bf_get(lpfc_acqe_link_fault
, acqe_fc
);
4071 phba
->sli4_hba
.link_state
.logical_speed
=
4072 bf_get(lpfc_acqe_fc_la_llink_spd
, acqe_fc
) * 10;
4073 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4074 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
4075 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
4076 "%dMbps Fault:%d\n",
4077 phba
->sli4_hba
.link_state
.speed
,
4078 phba
->sli4_hba
.link_state
.topology
,
4079 phba
->sli4_hba
.link_state
.status
,
4080 phba
->sli4_hba
.link_state
.type
,
4081 phba
->sli4_hba
.link_state
.number
,
4082 phba
->sli4_hba
.link_state
.logical_speed
,
4083 phba
->sli4_hba
.link_state
.fault
);
4084 pmb
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4086 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
4087 "2897 The mboxq allocation failed\n");
4090 mp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
4092 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
4093 "2898 The lpfc_dmabuf allocation failed\n");
4096 mp
->virt
= lpfc_mbuf_alloc(phba
, 0, &mp
->phys
);
4098 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
4099 "2899 The mbuf allocation failed\n");
4100 goto out_free_dmabuf
;
4103 /* Cleanup any outstanding ELS commands */
4104 lpfc_els_flush_all_cmd(phba
);
4106 /* Block ELS IOCBs until we have done process link event */
4107 phba
->sli
.ring
[LPFC_ELS_RING
].flag
|= LPFC_STOP_IOCB_EVENT
;
4109 /* Update link event statistics */
4110 phba
->sli
.slistat
.link_event
++;
4112 /* Create lpfc_handle_latt mailbox command from link ACQE */
4113 lpfc_read_topology(phba
, pmb
, mp
);
4114 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_read_topology
;
4115 pmb
->vport
= phba
->pport
;
4117 if (phba
->sli4_hba
.link_state
.status
!= LPFC_FC_LA_TYPE_LINK_UP
) {
4118 /* Parse and translate status field */
4120 mb
->mbxStatus
= lpfc_sli4_parse_latt_fault(phba
,
4123 /* Parse and translate link attention fields */
4124 la
= (struct lpfc_mbx_read_top
*)&pmb
->u
.mb
.un
.varReadTop
;
4125 la
->eventTag
= acqe_fc
->event_tag
;
4126 bf_set(lpfc_mbx_read_top_att_type
, la
,
4127 LPFC_FC_LA_TYPE_LINK_DOWN
);
4129 /* Invoke the mailbox command callback function */
4130 lpfc_mbx_cmpl_read_topology(phba
, pmb
);
4135 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
4136 if (rc
== MBX_NOT_FINISHED
)
4137 goto out_free_dmabuf
;
4143 mempool_free(pmb
, phba
->mbox_mem_pool
);
4147 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
4148 * @phba: pointer to lpfc hba data structure.
4149 * @acqe_fc: pointer to the async SLI completion queue entry.
4151 * This routine is to handle the SLI4 asynchronous SLI events.
4154 lpfc_sli4_async_sli_evt(struct lpfc_hba
*phba
, struct lpfc_acqe_sli
*acqe_sli
)
4160 uint8_t operational
= 0;
4161 struct temp_event temp_event_data
;
4162 struct lpfc_acqe_misconfigured_event
*misconfigured
;
4163 struct Scsi_Host
*shost
;
4165 evt_type
= bf_get(lpfc_trailer_type
, acqe_sli
);
4167 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4168 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
4169 "x%08x SLI Event Type:%d\n",
4170 acqe_sli
->event_data1
, acqe_sli
->event_data2
,
4173 port_name
= phba
->Port
[0];
4174 if (port_name
== 0x00)
4175 port_name
= '?'; /* get port name is empty */
4178 case LPFC_SLI_EVENT_TYPE_OVER_TEMP
:
4179 temp_event_data
.event_type
= FC_REG_TEMPERATURE_EVENT
;
4180 temp_event_data
.event_code
= LPFC_THRESHOLD_TEMP
;
4181 temp_event_data
.data
= (uint32_t)acqe_sli
->event_data1
;
4183 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
4184 "3190 Over Temperature:%d Celsius- Port Name %c\n",
4185 acqe_sli
->event_data1
, port_name
);
4187 phba
->sfp_warning
|= LPFC_TRANSGRESSION_HIGH_TEMPERATURE
;
4188 shost
= lpfc_shost_from_vport(phba
->pport
);
4189 fc_host_post_vendor_event(shost
, fc_get_event_number(),
4190 sizeof(temp_event_data
),
4191 (char *)&temp_event_data
,
4192 SCSI_NL_VID_TYPE_PCI
4193 | PCI_VENDOR_ID_EMULEX
);
4195 case LPFC_SLI_EVENT_TYPE_NORM_TEMP
:
4196 temp_event_data
.event_type
= FC_REG_TEMPERATURE_EVENT
;
4197 temp_event_data
.event_code
= LPFC_NORMAL_TEMP
;
4198 temp_event_data
.data
= (uint32_t)acqe_sli
->event_data1
;
4200 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4201 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
4202 acqe_sli
->event_data1
, port_name
);
4204 shost
= lpfc_shost_from_vport(phba
->pport
);
4205 fc_host_post_vendor_event(shost
, fc_get_event_number(),
4206 sizeof(temp_event_data
),
4207 (char *)&temp_event_data
,
4208 SCSI_NL_VID_TYPE_PCI
4209 | PCI_VENDOR_ID_EMULEX
);
4211 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED
:
4212 misconfigured
= (struct lpfc_acqe_misconfigured_event
*)
4213 &acqe_sli
->event_data1
;
4215 /* fetch the status for this port */
4216 switch (phba
->sli4_hba
.lnk_info
.lnk_no
) {
4217 case LPFC_LINK_NUMBER_0
:
4218 status
= bf_get(lpfc_sli_misconfigured_port0_state
,
4219 &misconfigured
->theEvent
);
4220 operational
= bf_get(lpfc_sli_misconfigured_port0_op
,
4221 &misconfigured
->theEvent
);
4223 case LPFC_LINK_NUMBER_1
:
4224 status
= bf_get(lpfc_sli_misconfigured_port1_state
,
4225 &misconfigured
->theEvent
);
4226 operational
= bf_get(lpfc_sli_misconfigured_port1_op
,
4227 &misconfigured
->theEvent
);
4229 case LPFC_LINK_NUMBER_2
:
4230 status
= bf_get(lpfc_sli_misconfigured_port2_state
,
4231 &misconfigured
->theEvent
);
4232 operational
= bf_get(lpfc_sli_misconfigured_port2_op
,
4233 &misconfigured
->theEvent
);
4235 case LPFC_LINK_NUMBER_3
:
4236 status
= bf_get(lpfc_sli_misconfigured_port3_state
,
4237 &misconfigured
->theEvent
);
4238 operational
= bf_get(lpfc_sli_misconfigured_port3_op
,
4239 &misconfigured
->theEvent
);
4242 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
4244 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
4245 "event: Invalid link %d",
4246 phba
->sli4_hba
.lnk_info
.lnk_no
);
4250 /* Skip if optic state unchanged */
4251 if (phba
->sli4_hba
.lnk_info
.optic_state
== status
)
4255 case LPFC_SLI_EVENT_STATUS_VALID
:
4256 sprintf(message
, "Physical Link is functional");
4258 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT
:
4259 sprintf(message
, "Optics faulted/incorrectly "
4260 "installed/not installed - Reseat optics, "
4261 "if issue not resolved, replace.");
4263 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE
:
4265 "Optics of two types installed - Remove one "
4266 "optic or install matching pair of optics.");
4268 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED
:
4269 sprintf(message
, "Incompatible optics - Replace with "
4270 "compatible optics for card to function.");
4272 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED
:
4273 sprintf(message
, "Unqualified optics - Replace with "
4274 "Avago optics for Warranty and Technical "
4275 "Support - Link is%s operational",
4276 (operational
) ? "" : " not");
4278 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED
:
4279 sprintf(message
, "Uncertified optics - Replace with "
4280 "Avago-certified optics to enable link "
4281 "operation - Link is%s operational",
4282 (operational
) ? "" : " not");
4285 /* firmware is reporting a status we don't know about */
4286 sprintf(message
, "Unknown event status x%02x", status
);
4289 phba
->sli4_hba
.lnk_info
.optic_state
= status
;
4290 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
4291 "3176 Port Name %c %s\n", port_name
, message
);
4293 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT
:
4294 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4295 "3192 Remote DPort Test Initiated - "
4296 "Event Data1:x%08x Event Data2: x%08x\n",
4297 acqe_sli
->event_data1
, acqe_sli
->event_data2
);
4300 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4301 "3193 Async SLI event - Event Data1:x%08x Event Data2:"
4302 "x%08x SLI Event Type:%d\n",
4303 acqe_sli
->event_data1
, acqe_sli
->event_data2
,
4310 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
4311 * @vport: pointer to vport data structure.
4313 * This routine is to perform Clear Virtual Link (CVL) on a vport in
4314 * response to a CVL event.
4316 * Return the pointer to the ndlp with the vport if successful, otherwise
4319 static struct lpfc_nodelist
*
4320 lpfc_sli4_perform_vport_cvl(struct lpfc_vport
*vport
)
4322 struct lpfc_nodelist
*ndlp
;
4323 struct Scsi_Host
*shost
;
4324 struct lpfc_hba
*phba
;
4331 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
4333 /* Cannot find existing Fabric ndlp, so allocate a new one */
4334 ndlp
= mempool_alloc(phba
->nlp_mem_pool
, GFP_KERNEL
);
4337 lpfc_nlp_init(vport
, ndlp
, Fabric_DID
);
4338 /* Set the node type */
4339 ndlp
->nlp_type
|= NLP_FABRIC
;
4340 /* Put ndlp onto node list */
4341 lpfc_enqueue_node(vport
, ndlp
);
4342 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
4343 /* re-setup ndlp without removing from node list */
4344 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
4348 if ((phba
->pport
->port_state
< LPFC_FLOGI
) &&
4349 (phba
->pport
->port_state
!= LPFC_VPORT_FAILED
))
4351 /* If virtual link is not yet instantiated ignore CVL */
4352 if ((vport
!= phba
->pport
) && (vport
->port_state
< LPFC_FDISC
)
4353 && (vport
->port_state
!= LPFC_VPORT_FAILED
))
4355 shost
= lpfc_shost_from_vport(vport
);
4358 lpfc_linkdown_port(vport
);
4359 lpfc_cleanup_pending_mbox(vport
);
4360 spin_lock_irq(shost
->host_lock
);
4361 vport
->fc_flag
|= FC_VPORT_CVL_RCVD
;
4362 spin_unlock_irq(shost
->host_lock
);
4368 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
4369 * @vport: pointer to lpfc hba data structure.
4371 * This routine is to perform Clear Virtual Link (CVL) on all vports in
4372 * response to a FCF dead event.
4375 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba
*phba
)
4377 struct lpfc_vport
**vports
;
4380 vports
= lpfc_create_vport_work_array(phba
);
4382 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
4383 lpfc_sli4_perform_vport_cvl(vports
[i
]);
4384 lpfc_destroy_vport_work_array(phba
, vports
);
4388 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
4389 * @phba: pointer to lpfc hba data structure.
4390 * @acqe_link: pointer to the async fcoe completion queue entry.
4392 * This routine is to handle the SLI4 asynchronous fcoe event.
4395 lpfc_sli4_async_fip_evt(struct lpfc_hba
*phba
,
4396 struct lpfc_acqe_fip
*acqe_fip
)
4398 uint8_t event_type
= bf_get(lpfc_trailer_type
, acqe_fip
);
4400 struct lpfc_vport
*vport
;
4401 struct lpfc_nodelist
*ndlp
;
4402 struct Scsi_Host
*shost
;
4403 int active_vlink_present
;
4404 struct lpfc_vport
**vports
;
4407 phba
->fc_eventTag
= acqe_fip
->event_tag
;
4408 phba
->fcoe_eventtag
= acqe_fip
->event_tag
;
4409 switch (event_type
) {
4410 case LPFC_FIP_EVENT_TYPE_NEW_FCF
:
4411 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD
:
4412 if (event_type
== LPFC_FIP_EVENT_TYPE_NEW_FCF
)
4413 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
|
4415 "2546 New FCF event, evt_tag:x%x, "
4417 acqe_fip
->event_tag
,
4420 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
|
4422 "2788 FCF param modified event, "
4423 "evt_tag:x%x, index:x%x\n",
4424 acqe_fip
->event_tag
,
4426 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
4428 * During period of FCF discovery, read the FCF
4429 * table record indexed by the event to update
4430 * FCF roundrobin failover eligible FCF bmask.
4432 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
|
4434 "2779 Read FCF (x%x) for updating "
4435 "roundrobin FCF failover bmask\n",
4437 rc
= lpfc_sli4_read_fcf_rec(phba
, acqe_fip
->index
);
4440 /* If the FCF discovery is in progress, do nothing. */
4441 spin_lock_irq(&phba
->hbalock
);
4442 if (phba
->hba_flag
& FCF_TS_INPROG
) {
4443 spin_unlock_irq(&phba
->hbalock
);
4446 /* If fast FCF failover rescan event is pending, do nothing */
4447 if (phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
) {
4448 spin_unlock_irq(&phba
->hbalock
);
4452 /* If the FCF has been in discovered state, do nothing. */
4453 if (phba
->fcf
.fcf_flag
& FCF_SCAN_DONE
) {
4454 spin_unlock_irq(&phba
->hbalock
);
4457 spin_unlock_irq(&phba
->hbalock
);
4459 /* Otherwise, scan the entire FCF table and re-discover SAN */
4460 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
4461 "2770 Start FCF table scan per async FCF "
4462 "event, evt_tag:x%x, index:x%x\n",
4463 acqe_fip
->event_tag
, acqe_fip
->index
);
4464 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
4465 LPFC_FCOE_FCF_GET_FIRST
);
4467 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_DISCOVERY
,
4468 "2547 Issue FCF scan read FCF mailbox "
4469 "command failed (x%x)\n", rc
);
4472 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL
:
4473 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
4474 "2548 FCF Table full count 0x%x tag 0x%x\n",
4475 bf_get(lpfc_acqe_fip_fcf_count
, acqe_fip
),
4476 acqe_fip
->event_tag
);
4479 case LPFC_FIP_EVENT_TYPE_FCF_DEAD
:
4480 phba
->fcoe_cvl_eventtag
= acqe_fip
->event_tag
;
4481 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_DISCOVERY
,
4482 "2549 FCF (x%x) disconnected from network, "
4483 "tag:x%x\n", acqe_fip
->index
, acqe_fip
->event_tag
);
4485 * If we are in the middle of FCF failover process, clear
4486 * the corresponding FCF bit in the roundrobin bitmap.
4488 spin_lock_irq(&phba
->hbalock
);
4489 if ((phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) &&
4490 (phba
->fcf
.current_rec
.fcf_indx
!= acqe_fip
->index
)) {
4491 spin_unlock_irq(&phba
->hbalock
);
4492 /* Update FLOGI FCF failover eligible FCF bmask */
4493 lpfc_sli4_fcf_rr_index_clear(phba
, acqe_fip
->index
);
4496 spin_unlock_irq(&phba
->hbalock
);
4498 /* If the event is not for currently used fcf do nothing */
4499 if (phba
->fcf
.current_rec
.fcf_indx
!= acqe_fip
->index
)
4503 * Otherwise, request the port to rediscover the entire FCF
4504 * table for a fast recovery from case that the current FCF
4505 * is no longer valid as we are not in the middle of FCF
4506 * failover process already.
4508 spin_lock_irq(&phba
->hbalock
);
4509 /* Mark the fast failover process in progress */
4510 phba
->fcf
.fcf_flag
|= FCF_DEAD_DISC
;
4511 spin_unlock_irq(&phba
->hbalock
);
4513 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
4514 "2771 Start FCF fast failover process due to "
4515 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
4516 "\n", acqe_fip
->event_tag
, acqe_fip
->index
);
4517 rc
= lpfc_sli4_redisc_fcf_table(phba
);
4519 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
|
4521 "2772 Issue FCF rediscover mabilbox "
4522 "command failed, fail through to FCF "
4524 spin_lock_irq(&phba
->hbalock
);
4525 phba
->fcf
.fcf_flag
&= ~FCF_DEAD_DISC
;
4526 spin_unlock_irq(&phba
->hbalock
);
4528 * Last resort will fail over by treating this
4529 * as a link down to FCF registration.
4531 lpfc_sli4_fcf_dead_failthrough(phba
);
4533 /* Reset FCF roundrobin bmask for new discovery */
4534 lpfc_sli4_clear_fcf_rr_bmask(phba
);
4536 * Handling fast FCF failover to a DEAD FCF event is
4537 * considered equalivant to receiving CVL to all vports.
4539 lpfc_sli4_perform_all_vport_cvl(phba
);
4542 case LPFC_FIP_EVENT_TYPE_CVL
:
4543 phba
->fcoe_cvl_eventtag
= acqe_fip
->event_tag
;
4544 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_DISCOVERY
,
4545 "2718 Clear Virtual Link Received for VPI 0x%x"
4546 " tag 0x%x\n", acqe_fip
->index
, acqe_fip
->event_tag
);
4548 vport
= lpfc_find_vport_by_vpid(phba
,
4550 ndlp
= lpfc_sli4_perform_vport_cvl(vport
);
4553 active_vlink_present
= 0;
4555 vports
= lpfc_create_vport_work_array(phba
);
4557 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
;
4559 if ((!(vports
[i
]->fc_flag
&
4560 FC_VPORT_CVL_RCVD
)) &&
4561 (vports
[i
]->port_state
> LPFC_FDISC
)) {
4562 active_vlink_present
= 1;
4566 lpfc_destroy_vport_work_array(phba
, vports
);
4570 * Don't re-instantiate if vport is marked for deletion.
4571 * If we are here first then vport_delete is going to wait
4572 * for discovery to complete.
4574 if (!(vport
->load_flag
& FC_UNLOADING
) &&
4575 active_vlink_present
) {
4577 * If there are other active VLinks present,
4578 * re-instantiate the Vlink using FDISC.
4580 mod_timer(&ndlp
->nlp_delayfunc
,
4581 jiffies
+ msecs_to_jiffies(1000));
4582 shost
= lpfc_shost_from_vport(vport
);
4583 spin_lock_irq(shost
->host_lock
);
4584 ndlp
->nlp_flag
|= NLP_DELAY_TMO
;
4585 spin_unlock_irq(shost
->host_lock
);
4586 ndlp
->nlp_last_elscmd
= ELS_CMD_FDISC
;
4587 vport
->port_state
= LPFC_FDISC
;
4590 * Otherwise, we request port to rediscover
4591 * the entire FCF table for a fast recovery
4592 * from possible case that the current FCF
4593 * is no longer valid if we are not already
4594 * in the FCF failover process.
4596 spin_lock_irq(&phba
->hbalock
);
4597 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
4598 spin_unlock_irq(&phba
->hbalock
);
4601 /* Mark the fast failover process in progress */
4602 phba
->fcf
.fcf_flag
|= FCF_ACVL_DISC
;
4603 spin_unlock_irq(&phba
->hbalock
);
4604 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
|
4606 "2773 Start FCF failover per CVL, "
4607 "evt_tag:x%x\n", acqe_fip
->event_tag
);
4608 rc
= lpfc_sli4_redisc_fcf_table(phba
);
4610 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
|
4612 "2774 Issue FCF rediscover "
4613 "mabilbox command failed, "
4614 "through to CVL event\n");
4615 spin_lock_irq(&phba
->hbalock
);
4616 phba
->fcf
.fcf_flag
&= ~FCF_ACVL_DISC
;
4617 spin_unlock_irq(&phba
->hbalock
);
4619 * Last resort will be re-try on the
4620 * the current registered FCF entry.
4622 lpfc_retry_pport_discovery(phba
);
4625 * Reset FCF roundrobin bmask for new
4628 lpfc_sli4_clear_fcf_rr_bmask(phba
);
4632 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
4633 "0288 Unknown FCoE event type 0x%x event tag "
4634 "0x%x\n", event_type
, acqe_fip
->event_tag
);
4640 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
4641 * @phba: pointer to lpfc hba data structure.
4642 * @acqe_link: pointer to the async dcbx completion queue entry.
4644 * This routine is to handle the SLI4 asynchronous dcbx event.
4647 lpfc_sli4_async_dcbx_evt(struct lpfc_hba
*phba
,
4648 struct lpfc_acqe_dcbx
*acqe_dcbx
)
4650 phba
->fc_eventTag
= acqe_dcbx
->event_tag
;
4651 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
4652 "0290 The SLI4 DCBX asynchronous event is not "
4657 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
4658 * @phba: pointer to lpfc hba data structure.
4659 * @acqe_link: pointer to the async grp5 completion queue entry.
4661 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
4662 * is an asynchronous notified of a logical link speed change. The Port
4663 * reports the logical link speed in units of 10Mbps.
4666 lpfc_sli4_async_grp5_evt(struct lpfc_hba
*phba
,
4667 struct lpfc_acqe_grp5
*acqe_grp5
)
4669 uint16_t prev_ll_spd
;
4671 phba
->fc_eventTag
= acqe_grp5
->event_tag
;
4672 phba
->fcoe_eventtag
= acqe_grp5
->event_tag
;
4673 prev_ll_spd
= phba
->sli4_hba
.link_state
.logical_speed
;
4674 phba
->sli4_hba
.link_state
.logical_speed
=
4675 (bf_get(lpfc_acqe_grp5_llink_spd
, acqe_grp5
)) * 10;
4676 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4677 "2789 GRP5 Async Event: Updating logical link speed "
4678 "from %dMbps to %dMbps\n", prev_ll_spd
,
4679 phba
->sli4_hba
.link_state
.logical_speed
);
4683 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
4684 * @phba: pointer to lpfc hba data structure.
4686 * This routine is invoked by the worker thread to process all the pending
4687 * SLI4 asynchronous events.
4689 void lpfc_sli4_async_event_proc(struct lpfc_hba
*phba
)
4691 struct lpfc_cq_event
*cq_event
;
4693 /* First, declare the async event has been handled */
4694 spin_lock_irq(&phba
->hbalock
);
4695 phba
->hba_flag
&= ~ASYNC_EVENT
;
4696 spin_unlock_irq(&phba
->hbalock
);
4697 /* Now, handle all the async events */
4698 while (!list_empty(&phba
->sli4_hba
.sp_asynce_work_queue
)) {
4699 /* Get the first event from the head of the event queue */
4700 spin_lock_irq(&phba
->hbalock
);
4701 list_remove_head(&phba
->sli4_hba
.sp_asynce_work_queue
,
4702 cq_event
, struct lpfc_cq_event
, list
);
4703 spin_unlock_irq(&phba
->hbalock
);
4704 /* Process the asynchronous event */
4705 switch (bf_get(lpfc_trailer_code
, &cq_event
->cqe
.mcqe_cmpl
)) {
4706 case LPFC_TRAILER_CODE_LINK
:
4707 lpfc_sli4_async_link_evt(phba
,
4708 &cq_event
->cqe
.acqe_link
);
4710 case LPFC_TRAILER_CODE_FCOE
:
4711 lpfc_sli4_async_fip_evt(phba
, &cq_event
->cqe
.acqe_fip
);
4713 case LPFC_TRAILER_CODE_DCBX
:
4714 lpfc_sli4_async_dcbx_evt(phba
,
4715 &cq_event
->cqe
.acqe_dcbx
);
4717 case LPFC_TRAILER_CODE_GRP5
:
4718 lpfc_sli4_async_grp5_evt(phba
,
4719 &cq_event
->cqe
.acqe_grp5
);
4721 case LPFC_TRAILER_CODE_FC
:
4722 lpfc_sli4_async_fc_evt(phba
, &cq_event
->cqe
.acqe_fc
);
4724 case LPFC_TRAILER_CODE_SLI
:
4725 lpfc_sli4_async_sli_evt(phba
, &cq_event
->cqe
.acqe_sli
);
4728 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
4729 "1804 Invalid asynchrous event code: "
4730 "x%x\n", bf_get(lpfc_trailer_code
,
4731 &cq_event
->cqe
.mcqe_cmpl
));
4734 /* Free the completion event processed to the free pool */
4735 lpfc_sli4_cq_event_release(phba
, cq_event
);
4740 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
4741 * @phba: pointer to lpfc hba data structure.
4743 * This routine is invoked by the worker thread to process FCF table
4744 * rediscovery pending completion event.
4746 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba
*phba
)
4750 spin_lock_irq(&phba
->hbalock
);
4751 /* Clear FCF rediscovery timeout event */
4752 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_EVT
;
4753 /* Clear driver fast failover FCF record flag */
4754 phba
->fcf
.failover_rec
.flag
= 0;
4755 /* Set state for FCF fast failover */
4756 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
4757 spin_unlock_irq(&phba
->hbalock
);
4759 /* Scan FCF table from the first entry to re-discover SAN */
4760 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
4761 "2777 Start post-quiescent FCF table scan\n");
4762 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
4764 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_DISCOVERY
,
4765 "2747 Issue FCF scan read FCF mailbox "
4766 "command failed 0x%x\n", rc
);
4770 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
4771 * @phba: pointer to lpfc hba data structure.
4772 * @dev_grp: The HBA PCI-Device group number.
4774 * This routine is invoked to set up the per HBA PCI-Device group function
4775 * API jump table entries.
4777 * Return: 0 if success, otherwise -ENODEV
4780 lpfc_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
4784 /* Set up lpfc PCI-device group */
4785 phba
->pci_dev_grp
= dev_grp
;
4787 /* The LPFC_PCI_DEV_OC uses SLI4 */
4788 if (dev_grp
== LPFC_PCI_DEV_OC
)
4789 phba
->sli_rev
= LPFC_SLI_REV4
;
4791 /* Set up device INIT API function jump table */
4792 rc
= lpfc_init_api_table_setup(phba
, dev_grp
);
4795 /* Set up SCSI API function jump table */
4796 rc
= lpfc_scsi_api_table_setup(phba
, dev_grp
);
4799 /* Set up SLI API function jump table */
4800 rc
= lpfc_sli_api_table_setup(phba
, dev_grp
);
4803 /* Set up MBOX API function jump table */
4804 rc
= lpfc_mbox_api_table_setup(phba
, dev_grp
);
4812 * lpfc_log_intr_mode - Log the active interrupt mode
4813 * @phba: pointer to lpfc hba data structure.
4814 * @intr_mode: active interrupt mode adopted.
4816 * This routine it invoked to log the currently used active interrupt mode
4819 static void lpfc_log_intr_mode(struct lpfc_hba
*phba
, uint32_t intr_mode
)
4821 switch (intr_mode
) {
4823 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4824 "0470 Enable INTx interrupt mode.\n");
4827 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4828 "0481 Enabled MSI interrupt mode.\n");
4831 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4832 "0480 Enabled MSI-X interrupt mode.\n");
4835 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4836 "0482 Illegal interrupt mode.\n");
4843 * lpfc_enable_pci_dev - Enable a generic PCI device.
4844 * @phba: pointer to lpfc hba data structure.
4846 * This routine is invoked to enable the PCI device that is common to all
4851 * other values - error
4854 lpfc_enable_pci_dev(struct lpfc_hba
*phba
)
4856 struct pci_dev
*pdev
;
4858 /* Obtain PCI device reference */
4862 pdev
= phba
->pcidev
;
4863 /* Enable PCI device */
4864 if (pci_enable_device_mem(pdev
))
4866 /* Request PCI resource for the device */
4867 if (pci_request_mem_regions(pdev
, LPFC_DRIVER_NAME
))
4868 goto out_disable_device
;
4869 /* Set up device as PCI master and save state for EEH */
4870 pci_set_master(pdev
);
4871 pci_try_set_mwi(pdev
);
4872 pci_save_state(pdev
);
4874 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
4875 if (pci_is_pcie(pdev
))
4876 pdev
->needs_freset
= 1;
4881 pci_disable_device(pdev
);
4883 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4884 "1401 Failed to enable pci device\n");
4889 * lpfc_disable_pci_dev - Disable a generic PCI device.
4890 * @phba: pointer to lpfc hba data structure.
4892 * This routine is invoked to disable the PCI device that is common to all
4896 lpfc_disable_pci_dev(struct lpfc_hba
*phba
)
4898 struct pci_dev
*pdev
;
4900 /* Obtain PCI device reference */
4904 pdev
= phba
->pcidev
;
4905 /* Release PCI resource and disable PCI device */
4906 pci_release_mem_regions(pdev
);
4907 pci_disable_device(pdev
);
4913 * lpfc_reset_hba - Reset a hba
4914 * @phba: pointer to lpfc hba data structure.
4916 * This routine is invoked to reset a hba device. It brings the HBA
4917 * offline, performs a board restart, and then brings the board back
4918 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
4919 * on outstanding mailbox commands.
4922 lpfc_reset_hba(struct lpfc_hba
*phba
)
4924 /* If resets are disabled then set error state and return. */
4925 if (!phba
->cfg_enable_hba_reset
) {
4926 phba
->link_state
= LPFC_HBA_ERROR
;
4929 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)
4930 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
4932 lpfc_offline_prep(phba
, LPFC_MBX_NO_WAIT
);
4934 lpfc_sli_brdrestart(phba
);
4936 lpfc_unblock_mgmt_io(phba
);
4940 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
4941 * @phba: pointer to lpfc hba data structure.
4943 * This function enables the PCI SR-IOV virtual functions to a physical
4944 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4945 * enable the number of virtual functions to the physical function. As
4946 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4947 * API call does not considered as an error condition for most of the device.
4950 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba
*phba
)
4952 struct pci_dev
*pdev
= phba
->pcidev
;
4956 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_SRIOV
);
4960 pci_read_config_word(pdev
, pos
+ PCI_SRIOV_TOTAL_VF
, &nr_virtfn
);
4965 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4966 * @phba: pointer to lpfc hba data structure.
4967 * @nr_vfn: number of virtual functions to be enabled.
4969 * This function enables the PCI SR-IOV virtual functions to a physical
4970 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4971 * enable the number of virtual functions to the physical function. As
4972 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4973 * API call does not considered as an error condition for most of the device.
4976 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba
*phba
, int nr_vfn
)
4978 struct pci_dev
*pdev
= phba
->pcidev
;
4979 uint16_t max_nr_vfn
;
4982 max_nr_vfn
= lpfc_sli_sriov_nr_virtfn_get(phba
);
4983 if (nr_vfn
> max_nr_vfn
) {
4984 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4985 "3057 Requested vfs (%d) greater than "
4986 "supported vfs (%d)", nr_vfn
, max_nr_vfn
);
4990 rc
= pci_enable_sriov(pdev
, nr_vfn
);
4992 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
4993 "2806 Failed to enable sriov on this device "
4994 "with vfn number nr_vf:%d, rc:%d\n",
4997 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
4998 "2807 Successful enable sriov on this device "
4999 "with vfn number nr_vf:%d\n", nr_vfn
);
5004 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
5005 * @phba: pointer to lpfc hba data structure.
5007 * This routine is invoked to set up the driver internal resources specific to
5008 * support the SLI-3 HBA device it attached to.
5012 * other values - error
5015 lpfc_sli_driver_resource_setup(struct lpfc_hba
*phba
)
5017 struct lpfc_sli
*psli
;
5021 * Initialize timers used by driver
5024 /* Heartbeat timer */
5025 init_timer(&phba
->hb_tmofunc
);
5026 phba
->hb_tmofunc
.function
= lpfc_hb_timeout
;
5027 phba
->hb_tmofunc
.data
= (unsigned long)phba
;
5030 /* MBOX heartbeat timer */
5031 init_timer(&psli
->mbox_tmo
);
5032 psli
->mbox_tmo
.function
= lpfc_mbox_timeout
;
5033 psli
->mbox_tmo
.data
= (unsigned long) phba
;
5034 /* FCP polling mode timer */
5035 init_timer(&phba
->fcp_poll_timer
);
5036 phba
->fcp_poll_timer
.function
= lpfc_poll_timeout
;
5037 phba
->fcp_poll_timer
.data
= (unsigned long) phba
;
5038 /* Fabric block timer */
5039 init_timer(&phba
->fabric_block_timer
);
5040 phba
->fabric_block_timer
.function
= lpfc_fabric_block_timeout
;
5041 phba
->fabric_block_timer
.data
= (unsigned long) phba
;
5042 /* EA polling mode timer */
5043 init_timer(&phba
->eratt_poll
);
5044 phba
->eratt_poll
.function
= lpfc_poll_eratt
;
5045 phba
->eratt_poll
.data
= (unsigned long) phba
;
5047 /* Host attention work mask setup */
5048 phba
->work_ha_mask
= (HA_ERATT
| HA_MBATT
| HA_LATT
);
5049 phba
->work_ha_mask
|= (HA_RXMASK
<< (LPFC_ELS_RING
* 4));
5051 /* Get all the module params for configuring this host */
5052 lpfc_get_cfgparam(phba
);
5053 if (phba
->pcidev
->device
== PCI_DEVICE_ID_HORNET
) {
5054 phba
->menlo_flag
|= HBA_MENLO_SUPPORT
;
5055 /* check for menlo minimum sg count */
5056 if (phba
->cfg_sg_seg_cnt
< LPFC_DEFAULT_MENLO_SG_SEG_CNT
)
5057 phba
->cfg_sg_seg_cnt
= LPFC_DEFAULT_MENLO_SG_SEG_CNT
;
5060 if (!phba
->sli
.ring
)
5061 phba
->sli
.ring
= kzalloc(LPFC_SLI3_MAX_RING
*
5062 sizeof(struct lpfc_sli_ring
), GFP_KERNEL
);
5063 if (!phba
->sli
.ring
)
5067 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
5068 * used to create the sg_dma_buf_pool must be dynamically calculated.
5071 /* Initialize the host templates the configured values. */
5072 lpfc_vport_template
.sg_tablesize
= phba
->cfg_sg_seg_cnt
;
5073 lpfc_template_s3
.sg_tablesize
= phba
->cfg_sg_seg_cnt
;
5075 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
5076 if (phba
->cfg_enable_bg
) {
5078 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
5079 * the FCP rsp, and a BDE for each. Sice we have no control
5080 * over how many protection data segments the SCSI Layer
5081 * will hand us (ie: there could be one for every block
5082 * in the IO), we just allocate enough BDEs to accomidate
5083 * our max amount and we need to limit lpfc_sg_seg_cnt to
5084 * minimize the risk of running out.
5086 phba
->cfg_sg_dma_buf_size
= sizeof(struct fcp_cmnd
) +
5087 sizeof(struct fcp_rsp
) +
5088 (LPFC_MAX_SG_SEG_CNT
* sizeof(struct ulp_bde64
));
5090 if (phba
->cfg_sg_seg_cnt
> LPFC_MAX_SG_SEG_CNT_DIF
)
5091 phba
->cfg_sg_seg_cnt
= LPFC_MAX_SG_SEG_CNT_DIF
;
5093 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
5094 phba
->cfg_total_seg_cnt
= LPFC_MAX_SG_SEG_CNT
;
5097 * The scsi_buf for a regular I/O will hold the FCP cmnd,
5098 * the FCP rsp, a BDE for each, and a BDE for up to
5099 * cfg_sg_seg_cnt data segments.
5101 phba
->cfg_sg_dma_buf_size
= sizeof(struct fcp_cmnd
) +
5102 sizeof(struct fcp_rsp
) +
5103 ((phba
->cfg_sg_seg_cnt
+ 2) * sizeof(struct ulp_bde64
));
5105 /* Total BDEs in BPL for scsi_sg_list */
5106 phba
->cfg_total_seg_cnt
= phba
->cfg_sg_seg_cnt
+ 2;
5109 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
| LOG_FCP
,
5110 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
5111 phba
->cfg_sg_seg_cnt
, phba
->cfg_sg_dma_buf_size
,
5112 phba
->cfg_total_seg_cnt
);
5114 phba
->max_vpi
= LPFC_MAX_VPI
;
5115 /* This will be set to correct value after config_port mbox */
5116 phba
->max_vports
= 0;
5119 * Initialize the SLI Layer to run with lpfc HBAs.
5121 lpfc_sli_setup(phba
);
5122 lpfc_sli_queue_setup(phba
);
5124 /* Allocate device driver memory */
5125 if (lpfc_mem_alloc(phba
, BPL_ALIGN_SZ
))
5129 * Enable sr-iov virtual functions if supported and configured
5130 * through the module parameter.
5132 if (phba
->cfg_sriov_nr_virtfn
> 0) {
5133 rc
= lpfc_sli_probe_sriov_nr_virtfn(phba
,
5134 phba
->cfg_sriov_nr_virtfn
);
5136 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
5137 "2808 Requested number of SR-IOV "
5138 "virtual functions (%d) is not "
5140 phba
->cfg_sriov_nr_virtfn
);
5141 phba
->cfg_sriov_nr_virtfn
= 0;
5149 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
5150 * @phba: pointer to lpfc hba data structure.
5152 * This routine is invoked to unset the driver internal resources set up
5153 * specific for supporting the SLI-3 HBA device it attached to.
5156 lpfc_sli_driver_resource_unset(struct lpfc_hba
*phba
)
5158 /* Free device driver memory allocated */
5159 lpfc_mem_free_all(phba
);
5165 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
5166 * @phba: pointer to lpfc hba data structure.
5168 * This routine is invoked to set up the driver internal resources specific to
5169 * support the SLI-4 HBA device it attached to.
5173 * other values - error
5176 lpfc_sli4_driver_resource_setup(struct lpfc_hba
*phba
)
5178 struct lpfc_vector_map_info
*cpup
;
5179 struct lpfc_sli
*psli
;
5180 LPFC_MBOXQ_t
*mboxq
;
5181 int rc
, i
, hbq_count
, max_buf_size
;
5182 uint8_t pn_page
[LPFC_MAX_SUPPORTED_PAGES
] = {0};
5183 struct lpfc_mqe
*mqe
;
5185 int fof_vectors
= 0;
5187 /* Get all the module params for configuring this host */
5188 lpfc_get_cfgparam(phba
);
5190 /* Before proceed, wait for POST done and device ready */
5191 rc
= lpfc_sli4_post_status_check(phba
);
5196 * Initialize timers used by driver
5199 /* Heartbeat timer */
5200 init_timer(&phba
->hb_tmofunc
);
5201 phba
->hb_tmofunc
.function
= lpfc_hb_timeout
;
5202 phba
->hb_tmofunc
.data
= (unsigned long)phba
;
5203 init_timer(&phba
->rrq_tmr
);
5204 phba
->rrq_tmr
.function
= lpfc_rrq_timeout
;
5205 phba
->rrq_tmr
.data
= (unsigned long)phba
;
5208 /* MBOX heartbeat timer */
5209 init_timer(&psli
->mbox_tmo
);
5210 psli
->mbox_tmo
.function
= lpfc_mbox_timeout
;
5211 psli
->mbox_tmo
.data
= (unsigned long) phba
;
5212 /* Fabric block timer */
5213 init_timer(&phba
->fabric_block_timer
);
5214 phba
->fabric_block_timer
.function
= lpfc_fabric_block_timeout
;
5215 phba
->fabric_block_timer
.data
= (unsigned long) phba
;
5216 /* EA polling mode timer */
5217 init_timer(&phba
->eratt_poll
);
5218 phba
->eratt_poll
.function
= lpfc_poll_eratt
;
5219 phba
->eratt_poll
.data
= (unsigned long) phba
;
5220 /* FCF rediscover timer */
5221 init_timer(&phba
->fcf
.redisc_wait
);
5222 phba
->fcf
.redisc_wait
.function
= lpfc_sli4_fcf_redisc_wait_tmo
;
5223 phba
->fcf
.redisc_wait
.data
= (unsigned long)phba
;
5226 * Control structure for handling external multi-buffer mailbox
5227 * command pass-through.
5229 memset((uint8_t *)&phba
->mbox_ext_buf_ctx
, 0,
5230 sizeof(struct lpfc_mbox_ext_buf_ctx
));
5231 INIT_LIST_HEAD(&phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
);
5233 phba
->max_vpi
= LPFC_MAX_VPI
;
5235 /* This will be set to correct value after the read_config mbox */
5236 phba
->max_vports
= 0;
5238 /* Program the default value of vlan_id and fc_map */
5239 phba
->valid_vlan
= 0;
5240 phba
->fc_map
[0] = LPFC_FCOE_FCF_MAP0
;
5241 phba
->fc_map
[1] = LPFC_FCOE_FCF_MAP1
;
5242 phba
->fc_map
[2] = LPFC_FCOE_FCF_MAP2
;
5245 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
5246 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
5248 if (!phba
->sli
.ring
)
5249 phba
->sli
.ring
= kzalloc(
5250 (LPFC_SLI3_MAX_RING
+ phba
->cfg_fcp_io_channel
) *
5251 sizeof(struct lpfc_sli_ring
), GFP_KERNEL
);
5252 if (!phba
->sli
.ring
)
5256 * It doesn't matter what family our adapter is in, we are
5257 * limited to 2 Pages, 512 SGEs, for our SGL.
5258 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
5260 max_buf_size
= (2 * SLI4_PAGE_SIZE
);
5261 if (phba
->cfg_sg_seg_cnt
> LPFC_MAX_SGL_SEG_CNT
- 2)
5262 phba
->cfg_sg_seg_cnt
= LPFC_MAX_SGL_SEG_CNT
- 2;
5265 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
5266 * used to create the sg_dma_buf_pool must be dynamically calculated.
5269 if (phba
->cfg_enable_bg
) {
5271 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
5272 * the FCP rsp, and a SGE for each. Sice we have no control
5273 * over how many protection data segments the SCSI Layer
5274 * will hand us (ie: there could be one for every block
5275 * in the IO), we just allocate enough SGEs to accomidate
5276 * our max amount and we need to limit lpfc_sg_seg_cnt to
5277 * minimize the risk of running out.
5279 phba
->cfg_sg_dma_buf_size
= sizeof(struct fcp_cmnd
) +
5280 sizeof(struct fcp_rsp
) + max_buf_size
;
5282 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
5283 phba
->cfg_total_seg_cnt
= LPFC_MAX_SGL_SEG_CNT
;
5285 if (phba
->cfg_sg_seg_cnt
> LPFC_MAX_SG_SLI4_SEG_CNT_DIF
)
5286 phba
->cfg_sg_seg_cnt
= LPFC_MAX_SG_SLI4_SEG_CNT_DIF
;
5289 * The scsi_buf for a regular I/O will hold the FCP cmnd,
5290 * the FCP rsp, a SGE for each, and a SGE for up to
5291 * cfg_sg_seg_cnt data segments.
5293 phba
->cfg_sg_dma_buf_size
= sizeof(struct fcp_cmnd
) +
5294 sizeof(struct fcp_rsp
) +
5295 ((phba
->cfg_sg_seg_cnt
+ 2) * sizeof(struct sli4_sge
));
5297 /* Total SGEs for scsi_sg_list */
5298 phba
->cfg_total_seg_cnt
= phba
->cfg_sg_seg_cnt
+ 2;
5300 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need
5301 * to post 1 page for the SGL.
5305 /* Initialize the host templates with the updated values. */
5306 lpfc_vport_template
.sg_tablesize
= phba
->cfg_sg_seg_cnt
;
5307 lpfc_template
.sg_tablesize
= phba
->cfg_sg_seg_cnt
;
5309 if (phba
->cfg_sg_dma_buf_size
<= LPFC_MIN_SG_SLI4_BUF_SZ
)
5310 phba
->cfg_sg_dma_buf_size
= LPFC_MIN_SG_SLI4_BUF_SZ
;
5312 phba
->cfg_sg_dma_buf_size
=
5313 SLI4_PAGE_ALIGN(phba
->cfg_sg_dma_buf_size
);
5315 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
| LOG_FCP
,
5316 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
5317 phba
->cfg_sg_seg_cnt
, phba
->cfg_sg_dma_buf_size
,
5318 phba
->cfg_total_seg_cnt
);
5320 /* Initialize buffer queue management fields */
5321 hbq_count
= lpfc_sli_hbq_count();
5322 for (i
= 0; i
< hbq_count
; ++i
)
5323 INIT_LIST_HEAD(&phba
->hbqs
[i
].hbq_buffer_list
);
5324 INIT_LIST_HEAD(&phba
->rb_pend_list
);
5325 phba
->hbqs
[LPFC_ELS_HBQ
].hbq_alloc_buffer
= lpfc_sli4_rb_alloc
;
5326 phba
->hbqs
[LPFC_ELS_HBQ
].hbq_free_buffer
= lpfc_sli4_rb_free
;
5329 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
5331 /* Initialize the Abort scsi buffer list used by driver */
5332 spin_lock_init(&phba
->sli4_hba
.abts_scsi_buf_list_lock
);
5333 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_abts_scsi_buf_list
);
5334 /* This abort list used by worker thread */
5335 spin_lock_init(&phba
->sli4_hba
.abts_sgl_list_lock
);
5338 * Initialize driver internal slow-path work queues
5341 /* Driver internel slow-path CQ Event pool */
5342 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_cqe_event_pool
);
5343 /* Response IOCB work queue list */
5344 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_queue_event
);
5345 /* Asynchronous event CQ Event work queue list */
5346 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_asynce_work_queue
);
5347 /* Fast-path XRI aborted CQ Event work queue list */
5348 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_fcp_xri_aborted_work_queue
);
5349 /* Slow-path XRI aborted CQ Event work queue list */
5350 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
);
5351 /* Receive queue CQ Event work queue list */
5352 INIT_LIST_HEAD(&phba
->sli4_hba
.sp_unsol_work_queue
);
5354 /* Initialize extent block lists. */
5355 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_rpi_blk_list
);
5356 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_xri_blk_list
);
5357 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_vfi_blk_list
);
5358 INIT_LIST_HEAD(&phba
->lpfc_vpi_blk_list
);
5360 /* initialize optic_state to 0xFF */
5361 phba
->sli4_hba
.lnk_info
.optic_state
= 0xff;
5363 /* Initialize the driver internal SLI layer lists. */
5364 lpfc_sli_setup(phba
);
5365 lpfc_sli_queue_setup(phba
);
5367 /* Allocate device driver memory */
5368 rc
= lpfc_mem_alloc(phba
, SGL_ALIGN_SZ
);
5372 /* IF Type 2 ports get initialized now. */
5373 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
5374 LPFC_SLI_INTF_IF_TYPE_2
) {
5375 rc
= lpfc_pci_function_reset(phba
);
5378 phba
->temp_sensor_support
= 1;
5381 /* Create the bootstrap mailbox command */
5382 rc
= lpfc_create_bootstrap_mbox(phba
);
5386 /* Set up the host's endian order with the device. */
5387 rc
= lpfc_setup_endian_order(phba
);
5389 goto out_free_bsmbx
;
5391 /* Set up the hba's configuration parameters. */
5392 rc
= lpfc_sli4_read_config(phba
);
5394 goto out_free_bsmbx
;
5395 rc
= lpfc_mem_alloc_active_rrq_pool_s4(phba
);
5397 goto out_free_bsmbx
;
5399 /* IF Type 0 ports get initialized now. */
5400 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
5401 LPFC_SLI_INTF_IF_TYPE_0
) {
5402 rc
= lpfc_pci_function_reset(phba
);
5404 goto out_free_bsmbx
;
5407 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
,
5411 goto out_free_bsmbx
;
5414 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
5415 lpfc_supported_pages(mboxq
);
5416 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
5418 mqe
= &mboxq
->u
.mqe
;
5419 memcpy(&pn_page
[0], ((uint8_t *)&mqe
->un
.supp_pages
.word3
),
5420 LPFC_MAX_SUPPORTED_PAGES
);
5421 for (i
= 0; i
< LPFC_MAX_SUPPORTED_PAGES
; i
++) {
5422 switch (pn_page
[i
]) {
5423 case LPFC_SLI4_PARAMETERS
:
5424 phba
->sli4_hba
.pc_sli4_params
.supported
= 1;
5430 /* Read the port's SLI4 Parameters capabilities if supported. */
5431 if (phba
->sli4_hba
.pc_sli4_params
.supported
)
5432 rc
= lpfc_pc_sli4_params_get(phba
, mboxq
);
5434 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5436 goto out_free_bsmbx
;
5441 * Get sli4 parameters that override parameters from Port capabilities.
5442 * If this call fails, it isn't critical unless the SLI4 parameters come
5445 rc
= lpfc_get_sli4_parameters(phba
, mboxq
);
5447 if (phba
->sli4_hba
.extents_in_use
&&
5448 phba
->sli4_hba
.rpi_hdrs_in_use
) {
5449 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5450 "2999 Unsupported SLI4 Parameters "
5451 "Extents and RPI headers enabled.\n");
5452 goto out_free_bsmbx
;
5455 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5457 /* Verify OAS is supported */
5458 lpfc_sli4_oas_verify(phba
);
5462 /* Verify all the SLI4 queues */
5463 rc
= lpfc_sli4_queue_verify(phba
);
5465 goto out_free_bsmbx
;
5467 /* Create driver internal CQE event pool */
5468 rc
= lpfc_sli4_cq_event_pool_create(phba
);
5470 goto out_free_bsmbx
;
5472 /* Initialize sgl lists per host */
5473 lpfc_init_sgl_list(phba
);
5475 /* Allocate and initialize active sgl array */
5476 rc
= lpfc_init_active_sgl_array(phba
);
5478 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5479 "1430 Failed to initialize sgl list.\n");
5480 goto out_destroy_cq_event_pool
;
5482 rc
= lpfc_sli4_init_rpi_hdrs(phba
);
5484 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5485 "1432 Failed to initialize rpi headers.\n");
5486 goto out_free_active_sgl
;
5489 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
5490 longs
= (LPFC_SLI4_FCF_TBL_INDX_MAX
+ BITS_PER_LONG
- 1)/BITS_PER_LONG
;
5491 phba
->fcf
.fcf_rr_bmask
= kzalloc(longs
* sizeof(unsigned long),
5493 if (!phba
->fcf
.fcf_rr_bmask
) {
5494 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5495 "2759 Failed allocate memory for FCF round "
5496 "robin failover bmask\n");
5498 goto out_remove_rpi_hdrs
;
5501 phba
->sli4_hba
.fcp_eq_hdl
=
5502 kzalloc((sizeof(struct lpfc_fcp_eq_hdl
) *
5503 (fof_vectors
+ phba
->cfg_fcp_io_channel
)),
5505 if (!phba
->sli4_hba
.fcp_eq_hdl
) {
5506 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5507 "2572 Failed allocate memory for "
5508 "fast-path per-EQ handle array\n");
5510 goto out_free_fcf_rr_bmask
;
5513 phba
->sli4_hba
.msix_entries
= kzalloc((sizeof(struct msix_entry
) *
5515 phba
->cfg_fcp_io_channel
)), GFP_KERNEL
);
5516 if (!phba
->sli4_hba
.msix_entries
) {
5517 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5518 "2573 Failed allocate memory for msi-x "
5519 "interrupt vector entries\n");
5521 goto out_free_fcp_eq_hdl
;
5524 phba
->sli4_hba
.cpu_map
= kzalloc((sizeof(struct lpfc_vector_map_info
) *
5525 phba
->sli4_hba
.num_present_cpu
),
5527 if (!phba
->sli4_hba
.cpu_map
) {
5528 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5529 "3327 Failed allocate memory for msi-x "
5530 "interrupt vector mapping\n");
5534 if (lpfc_used_cpu
== NULL
) {
5535 lpfc_used_cpu
= kzalloc((sizeof(uint16_t) * lpfc_present_cpu
),
5537 if (!lpfc_used_cpu
) {
5538 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5539 "3335 Failed allocate memory for msi-x "
5540 "interrupt vector mapping\n");
5541 kfree(phba
->sli4_hba
.cpu_map
);
5545 for (i
= 0; i
< lpfc_present_cpu
; i
++)
5546 lpfc_used_cpu
[i
] = LPFC_VECTOR_MAP_EMPTY
;
5549 /* Initialize io channels for round robin */
5550 cpup
= phba
->sli4_hba
.cpu_map
;
5552 for (i
= 0; i
< phba
->sli4_hba
.num_present_cpu
; i
++) {
5553 cpup
->channel_id
= rc
;
5555 if (rc
>= phba
->cfg_fcp_io_channel
)
5560 * Enable sr-iov virtual functions if supported and configured
5561 * through the module parameter.
5563 if (phba
->cfg_sriov_nr_virtfn
> 0) {
5564 rc
= lpfc_sli_probe_sriov_nr_virtfn(phba
,
5565 phba
->cfg_sriov_nr_virtfn
);
5567 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
5568 "3020 Requested number of SR-IOV "
5569 "virtual functions (%d) is not "
5571 phba
->cfg_sriov_nr_virtfn
);
5572 phba
->cfg_sriov_nr_virtfn
= 0;
5579 kfree(phba
->sli4_hba
.msix_entries
);
5580 out_free_fcp_eq_hdl
:
5581 kfree(phba
->sli4_hba
.fcp_eq_hdl
);
5582 out_free_fcf_rr_bmask
:
5583 kfree(phba
->fcf
.fcf_rr_bmask
);
5584 out_remove_rpi_hdrs
:
5585 lpfc_sli4_remove_rpi_hdrs(phba
);
5586 out_free_active_sgl
:
5587 lpfc_free_active_sgl(phba
);
5588 out_destroy_cq_event_pool
:
5589 lpfc_sli4_cq_event_pool_destroy(phba
);
5591 lpfc_destroy_bootstrap_mbox(phba
);
5593 lpfc_mem_free(phba
);
5598 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
5599 * @phba: pointer to lpfc hba data structure.
5601 * This routine is invoked to unset the driver internal resources set up
5602 * specific for supporting the SLI-4 HBA device it attached to.
5605 lpfc_sli4_driver_resource_unset(struct lpfc_hba
*phba
)
5607 struct lpfc_fcf_conn_entry
*conn_entry
, *next_conn_entry
;
5609 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
5610 kfree(phba
->sli4_hba
.cpu_map
);
5611 phba
->sli4_hba
.num_present_cpu
= 0;
5612 phba
->sli4_hba
.num_online_cpu
= 0;
5613 phba
->sli4_hba
.curr_disp_cpu
= 0;
5615 /* Free memory allocated for msi-x interrupt vector entries */
5616 kfree(phba
->sli4_hba
.msix_entries
);
5618 /* Free memory allocated for fast-path work queue handles */
5619 kfree(phba
->sli4_hba
.fcp_eq_hdl
);
5621 /* Free the allocated rpi headers. */
5622 lpfc_sli4_remove_rpi_hdrs(phba
);
5623 lpfc_sli4_remove_rpis(phba
);
5625 /* Free eligible FCF index bmask */
5626 kfree(phba
->fcf
.fcf_rr_bmask
);
5628 /* Free the ELS sgl list */
5629 lpfc_free_active_sgl(phba
);
5630 lpfc_free_els_sgl_list(phba
);
5632 /* Free the completion queue EQ event pool */
5633 lpfc_sli4_cq_event_release_all(phba
);
5634 lpfc_sli4_cq_event_pool_destroy(phba
);
5636 /* Release resource identifiers. */
5637 lpfc_sli4_dealloc_resource_identifiers(phba
);
5639 /* Free the bsmbx region. */
5640 lpfc_destroy_bootstrap_mbox(phba
);
5642 /* Free the SLI Layer memory with SLI4 HBAs */
5643 lpfc_mem_free_all(phba
);
5645 /* Free the current connect table */
5646 list_for_each_entry_safe(conn_entry
, next_conn_entry
,
5647 &phba
->fcf_conn_rec_list
, list
) {
5648 list_del_init(&conn_entry
->list
);
5656 * lpfc_init_api_table_setup - Set up init api function jump table
5657 * @phba: The hba struct for which this call is being executed.
5658 * @dev_grp: The HBA PCI-Device group number.
5660 * This routine sets up the device INIT interface API function jump table
5663 * Returns: 0 - success, -ENODEV - failure.
5666 lpfc_init_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
5668 phba
->lpfc_hba_init_link
= lpfc_hba_init_link
;
5669 phba
->lpfc_hba_down_link
= lpfc_hba_down_link
;
5670 phba
->lpfc_selective_reset
= lpfc_selective_reset
;
5672 case LPFC_PCI_DEV_LP
:
5673 phba
->lpfc_hba_down_post
= lpfc_hba_down_post_s3
;
5674 phba
->lpfc_handle_eratt
= lpfc_handle_eratt_s3
;
5675 phba
->lpfc_stop_port
= lpfc_stop_port_s3
;
5677 case LPFC_PCI_DEV_OC
:
5678 phba
->lpfc_hba_down_post
= lpfc_hba_down_post_s4
;
5679 phba
->lpfc_handle_eratt
= lpfc_handle_eratt_s4
;
5680 phba
->lpfc_stop_port
= lpfc_stop_port_s4
;
5683 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5684 "1431 Invalid HBA PCI-device group: 0x%x\n",
5693 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
5694 * @phba: pointer to lpfc hba data structure.
5696 * This routine is invoked to set up the driver internal resources before the
5697 * device specific resource setup to support the HBA device it attached to.
5701 * other values - error
5704 lpfc_setup_driver_resource_phase1(struct lpfc_hba
*phba
)
5707 * Driver resources common to all SLI revisions
5709 atomic_set(&phba
->fast_event_count
, 0);
5710 spin_lock_init(&phba
->hbalock
);
5712 /* Initialize ndlp management spinlock */
5713 spin_lock_init(&phba
->ndlp_lock
);
5715 INIT_LIST_HEAD(&phba
->port_list
);
5716 INIT_LIST_HEAD(&phba
->work_list
);
5717 init_waitqueue_head(&phba
->wait_4_mlo_m_q
);
5719 /* Initialize the wait queue head for the kernel thread */
5720 init_waitqueue_head(&phba
->work_waitq
);
5722 /* Initialize the scsi buffer list used by driver for scsi IO */
5723 spin_lock_init(&phba
->scsi_buf_list_get_lock
);
5724 INIT_LIST_HEAD(&phba
->lpfc_scsi_buf_list_get
);
5725 spin_lock_init(&phba
->scsi_buf_list_put_lock
);
5726 INIT_LIST_HEAD(&phba
->lpfc_scsi_buf_list_put
);
5728 /* Initialize the fabric iocb list */
5729 INIT_LIST_HEAD(&phba
->fabric_iocb_list
);
5731 /* Initialize list to save ELS buffers */
5732 INIT_LIST_HEAD(&phba
->elsbuf
);
5734 /* Initialize FCF connection rec list */
5735 INIT_LIST_HEAD(&phba
->fcf_conn_rec_list
);
5737 /* Initialize OAS configuration list */
5738 spin_lock_init(&phba
->devicelock
);
5739 INIT_LIST_HEAD(&phba
->luns
);
5745 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
5746 * @phba: pointer to lpfc hba data structure.
5748 * This routine is invoked to set up the driver internal resources after the
5749 * device specific resource setup to support the HBA device it attached to.
5753 * other values - error
5756 lpfc_setup_driver_resource_phase2(struct lpfc_hba
*phba
)
5760 /* Startup the kernel thread for this host adapter. */
5761 phba
->worker_thread
= kthread_run(lpfc_do_work
, phba
,
5762 "lpfc_worker_%d", phba
->brd_no
);
5763 if (IS_ERR(phba
->worker_thread
)) {
5764 error
= PTR_ERR(phba
->worker_thread
);
5772 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
5773 * @phba: pointer to lpfc hba data structure.
5775 * This routine is invoked to unset the driver internal resources set up after
5776 * the device specific resource setup for supporting the HBA device it
5780 lpfc_unset_driver_resource_phase2(struct lpfc_hba
*phba
)
5782 /* Stop kernel worker thread */
5783 kthread_stop(phba
->worker_thread
);
5787 * lpfc_free_iocb_list - Free iocb list.
5788 * @phba: pointer to lpfc hba data structure.
5790 * This routine is invoked to free the driver's IOCB list and memory.
5793 lpfc_free_iocb_list(struct lpfc_hba
*phba
)
5795 struct lpfc_iocbq
*iocbq_entry
= NULL
, *iocbq_next
= NULL
;
5797 spin_lock_irq(&phba
->hbalock
);
5798 list_for_each_entry_safe(iocbq_entry
, iocbq_next
,
5799 &phba
->lpfc_iocb_list
, list
) {
5800 list_del(&iocbq_entry
->list
);
5802 phba
->total_iocbq_bufs
--;
5804 spin_unlock_irq(&phba
->hbalock
);
5810 * lpfc_init_iocb_list - Allocate and initialize iocb list.
5811 * @phba: pointer to lpfc hba data structure.
5813 * This routine is invoked to allocate and initizlize the driver's IOCB
5814 * list and set up the IOCB tag array accordingly.
5818 * other values - error
5821 lpfc_init_iocb_list(struct lpfc_hba
*phba
, int iocb_count
)
5823 struct lpfc_iocbq
*iocbq_entry
= NULL
;
5827 /* Initialize and populate the iocb list per host. */
5828 INIT_LIST_HEAD(&phba
->lpfc_iocb_list
);
5829 for (i
= 0; i
< iocb_count
; i
++) {
5830 iocbq_entry
= kzalloc(sizeof(struct lpfc_iocbq
), GFP_KERNEL
);
5831 if (iocbq_entry
== NULL
) {
5832 printk(KERN_ERR
"%s: only allocated %d iocbs of "
5833 "expected %d count. Unloading driver.\n",
5834 __func__
, i
, LPFC_IOCB_LIST_CNT
);
5835 goto out_free_iocbq
;
5838 iotag
= lpfc_sli_next_iotag(phba
, iocbq_entry
);
5841 printk(KERN_ERR
"%s: failed to allocate IOTAG. "
5842 "Unloading driver.\n", __func__
);
5843 goto out_free_iocbq
;
5845 iocbq_entry
->sli4_lxritag
= NO_XRI
;
5846 iocbq_entry
->sli4_xritag
= NO_XRI
;
5848 spin_lock_irq(&phba
->hbalock
);
5849 list_add(&iocbq_entry
->list
, &phba
->lpfc_iocb_list
);
5850 phba
->total_iocbq_bufs
++;
5851 spin_unlock_irq(&phba
->hbalock
);
5857 lpfc_free_iocb_list(phba
);
5863 * lpfc_free_sgl_list - Free a given sgl list.
5864 * @phba: pointer to lpfc hba data structure.
5865 * @sglq_list: pointer to the head of sgl list.
5867 * This routine is invoked to free a give sgl list and memory.
5870 lpfc_free_sgl_list(struct lpfc_hba
*phba
, struct list_head
*sglq_list
)
5872 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_next
= NULL
;
5874 list_for_each_entry_safe(sglq_entry
, sglq_next
, sglq_list
, list
) {
5875 list_del(&sglq_entry
->list
);
5876 lpfc_mbuf_free(phba
, sglq_entry
->virt
, sglq_entry
->phys
);
5882 * lpfc_free_els_sgl_list - Free els sgl list.
5883 * @phba: pointer to lpfc hba data structure.
5885 * This routine is invoked to free the driver's els sgl list and memory.
5888 lpfc_free_els_sgl_list(struct lpfc_hba
*phba
)
5890 LIST_HEAD(sglq_list
);
5891 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
5893 /* Retrieve all els sgls from driver list */
5894 spin_lock_irq(&phba
->hbalock
);
5895 spin_lock(&pring
->ring_lock
);
5896 list_splice_init(&phba
->sli4_hba
.lpfc_sgl_list
, &sglq_list
);
5897 spin_unlock(&pring
->ring_lock
);
5898 spin_unlock_irq(&phba
->hbalock
);
5900 /* Now free the sgl list */
5901 lpfc_free_sgl_list(phba
, &sglq_list
);
5905 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
5906 * @phba: pointer to lpfc hba data structure.
5908 * This routine is invoked to allocate the driver's active sgl memory.
5909 * This array will hold the sglq_entry's for active IOs.
5912 lpfc_init_active_sgl_array(struct lpfc_hba
*phba
)
5915 size
= sizeof(struct lpfc_sglq
*);
5916 size
*= phba
->sli4_hba
.max_cfg_param
.max_xri
;
5918 phba
->sli4_hba
.lpfc_sglq_active_list
=
5919 kzalloc(size
, GFP_KERNEL
);
5920 if (!phba
->sli4_hba
.lpfc_sglq_active_list
)
5926 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
5927 * @phba: pointer to lpfc hba data structure.
5929 * This routine is invoked to walk through the array of active sglq entries
5930 * and free all of the resources.
5931 * This is just a place holder for now.
5934 lpfc_free_active_sgl(struct lpfc_hba
*phba
)
5936 kfree(phba
->sli4_hba
.lpfc_sglq_active_list
);
5940 * lpfc_init_sgl_list - Allocate and initialize sgl list.
5941 * @phba: pointer to lpfc hba data structure.
5943 * This routine is invoked to allocate and initizlize the driver's sgl
5944 * list and set up the sgl xritag tag array accordingly.
5948 lpfc_init_sgl_list(struct lpfc_hba
*phba
)
5950 /* Initialize and populate the sglq list per host/VF. */
5951 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_sgl_list
);
5952 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_abts_els_sgl_list
);
5954 /* els xri-sgl book keeping */
5955 phba
->sli4_hba
.els_xri_cnt
= 0;
5957 /* scsi xri-buffer book keeping */
5958 phba
->sli4_hba
.scsi_xri_cnt
= 0;
5962 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
5963 * @phba: pointer to lpfc hba data structure.
5965 * This routine is invoked to post rpi header templates to the
5966 * port for those SLI4 ports that do not support extents. This routine
5967 * posts a PAGE_SIZE memory region to the port to hold up to
5968 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
5969 * and should be called only when interrupts are disabled.
5973 * -ERROR - otherwise.
5976 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba
*phba
)
5979 struct lpfc_rpi_hdr
*rpi_hdr
;
5981 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_rpi_hdr_list
);
5982 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
5984 if (phba
->sli4_hba
.extents_in_use
)
5987 rpi_hdr
= lpfc_sli4_create_rpi_hdr(phba
);
5989 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5990 "0391 Error during rpi post operation\n");
5991 lpfc_sli4_remove_rpis(phba
);
5999 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
6000 * @phba: pointer to lpfc hba data structure.
6002 * This routine is invoked to allocate a single 4KB memory region to
6003 * support rpis and stores them in the phba. This single region
6004 * provides support for up to 64 rpis. The region is used globally
6008 * A valid rpi hdr on success.
6009 * A NULL pointer on any failure.
6011 struct lpfc_rpi_hdr
*
6012 lpfc_sli4_create_rpi_hdr(struct lpfc_hba
*phba
)
6014 uint16_t rpi_limit
, curr_rpi_range
;
6015 struct lpfc_dmabuf
*dmabuf
;
6016 struct lpfc_rpi_hdr
*rpi_hdr
;
6020 * If the SLI4 port supports extents, posting the rpi header isn't
6021 * required. Set the expected maximum count and let the actual value
6022 * get set when extents are fully allocated.
6024 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
6026 if (phba
->sli4_hba
.extents_in_use
)
6029 /* The limit on the logical index is just the max_rpi count. */
6030 rpi_limit
= phba
->sli4_hba
.max_cfg_param
.rpi_base
+
6031 phba
->sli4_hba
.max_cfg_param
.max_rpi
- 1;
6033 spin_lock_irq(&phba
->hbalock
);
6035 * Establish the starting RPI in this header block. The starting
6036 * rpi is normalized to a zero base because the physical rpi is
6039 curr_rpi_range
= phba
->sli4_hba
.next_rpi
;
6040 spin_unlock_irq(&phba
->hbalock
);
6043 * The port has a limited number of rpis. The increment here
6044 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
6045 * and to allow the full max_rpi range per port.
6047 if ((curr_rpi_range
+ (LPFC_RPI_HDR_COUNT
- 1)) > rpi_limit
)
6048 rpi_count
= rpi_limit
- curr_rpi_range
;
6050 rpi_count
= LPFC_RPI_HDR_COUNT
;
6055 * First allocate the protocol header region for the port. The
6056 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
6058 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
6062 dmabuf
->virt
= dma_zalloc_coherent(&phba
->pcidev
->dev
,
6063 LPFC_HDR_TEMPLATE_SIZE
,
6064 &dmabuf
->phys
, GFP_KERNEL
);
6065 if (!dmabuf
->virt
) {
6067 goto err_free_dmabuf
;
6070 if (!IS_ALIGNED(dmabuf
->phys
, LPFC_HDR_TEMPLATE_SIZE
)) {
6072 goto err_free_coherent
;
6075 /* Save the rpi header data for cleanup later. */
6076 rpi_hdr
= kzalloc(sizeof(struct lpfc_rpi_hdr
), GFP_KERNEL
);
6078 goto err_free_coherent
;
6080 rpi_hdr
->dmabuf
= dmabuf
;
6081 rpi_hdr
->len
= LPFC_HDR_TEMPLATE_SIZE
;
6082 rpi_hdr
->page_count
= 1;
6083 spin_lock_irq(&phba
->hbalock
);
6085 /* The rpi_hdr stores the logical index only. */
6086 rpi_hdr
->start_rpi
= curr_rpi_range
;
6087 list_add_tail(&rpi_hdr
->list
, &phba
->sli4_hba
.lpfc_rpi_hdr_list
);
6090 * The next_rpi stores the next logical module-64 rpi value used
6091 * to post physical rpis in subsequent rpi postings.
6093 phba
->sli4_hba
.next_rpi
+= rpi_count
;
6094 spin_unlock_irq(&phba
->hbalock
);
6098 dma_free_coherent(&phba
->pcidev
->dev
, LPFC_HDR_TEMPLATE_SIZE
,
6099 dmabuf
->virt
, dmabuf
->phys
);
6106 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
6107 * @phba: pointer to lpfc hba data structure.
6109 * This routine is invoked to remove all memory resources allocated
6110 * to support rpis for SLI4 ports not supporting extents. This routine
6111 * presumes the caller has released all rpis consumed by fabric or port
6112 * logins and is prepared to have the header pages removed.
6115 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba
*phba
)
6117 struct lpfc_rpi_hdr
*rpi_hdr
, *next_rpi_hdr
;
6119 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
6122 list_for_each_entry_safe(rpi_hdr
, next_rpi_hdr
,
6123 &phba
->sli4_hba
.lpfc_rpi_hdr_list
, list
) {
6124 list_del(&rpi_hdr
->list
);
6125 dma_free_coherent(&phba
->pcidev
->dev
, rpi_hdr
->len
,
6126 rpi_hdr
->dmabuf
->virt
, rpi_hdr
->dmabuf
->phys
);
6127 kfree(rpi_hdr
->dmabuf
);
6131 /* There are no rpis available to the port now. */
6132 phba
->sli4_hba
.next_rpi
= 0;
6136 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
6137 * @pdev: pointer to pci device data structure.
6139 * This routine is invoked to allocate the driver hba data structure for an
6140 * HBA device. If the allocation is successful, the phba reference to the
6141 * PCI device data structure is set.
6144 * pointer to @phba - successful
6147 static struct lpfc_hba
*
6148 lpfc_hba_alloc(struct pci_dev
*pdev
)
6150 struct lpfc_hba
*phba
;
6152 /* Allocate memory for HBA structure */
6153 phba
= kzalloc(sizeof(struct lpfc_hba
), GFP_KERNEL
);
6155 dev_err(&pdev
->dev
, "failed to allocate hba struct\n");
6159 /* Set reference to PCI device in HBA structure */
6160 phba
->pcidev
= pdev
;
6162 /* Assign an unused board number */
6163 phba
->brd_no
= lpfc_get_instance();
6164 if (phba
->brd_no
< 0) {
6168 phba
->eratt_poll_interval
= LPFC_ERATT_POLL_INTERVAL
;
6170 spin_lock_init(&phba
->ct_ev_lock
);
6171 INIT_LIST_HEAD(&phba
->ct_ev_waiters
);
6177 * lpfc_hba_free - Free driver hba data structure with a device.
6178 * @phba: pointer to lpfc hba data structure.
6180 * This routine is invoked to free the driver hba data structure with an
6184 lpfc_hba_free(struct lpfc_hba
*phba
)
6186 /* Release the driver assigned board number */
6187 idr_remove(&lpfc_hba_index
, phba
->brd_no
);
6189 /* Free memory allocated with sli rings */
6190 kfree(phba
->sli
.ring
);
6191 phba
->sli
.ring
= NULL
;
6198 * lpfc_create_shost - Create hba physical port with associated scsi host.
6199 * @phba: pointer to lpfc hba data structure.
6201 * This routine is invoked to create HBA physical port and associate a SCSI
6206 * other values - error
6209 lpfc_create_shost(struct lpfc_hba
*phba
)
6211 struct lpfc_vport
*vport
;
6212 struct Scsi_Host
*shost
;
6214 /* Initialize HBA FC structure */
6215 phba
->fc_edtov
= FF_DEF_EDTOV
;
6216 phba
->fc_ratov
= FF_DEF_RATOV
;
6217 phba
->fc_altov
= FF_DEF_ALTOV
;
6218 phba
->fc_arbtov
= FF_DEF_ARBTOV
;
6220 atomic_set(&phba
->sdev_cnt
, 0);
6221 vport
= lpfc_create_port(phba
, phba
->brd_no
, &phba
->pcidev
->dev
);
6225 shost
= lpfc_shost_from_vport(vport
);
6226 phba
->pport
= vport
;
6227 lpfc_debugfs_initialize(vport
);
6228 /* Put reference to SCSI host to driver's device private data */
6229 pci_set_drvdata(phba
->pcidev
, shost
);
6232 * At this point we are fully registered with PSA. In addition,
6233 * any initial discovery should be completed.
6235 vport
->load_flag
|= FC_ALLOW_FDMI
;
6236 if (phba
->cfg_enable_SmartSAN
||
6237 (phba
->cfg_fdmi_on
== LPFC_FDMI_SUPPORT
)) {
6239 /* Setup appropriate attribute masks */
6240 vport
->fdmi_hba_mask
= LPFC_FDMI2_HBA_ATTR
;
6241 if (phba
->cfg_enable_SmartSAN
)
6242 vport
->fdmi_port_mask
= LPFC_FDMI2_SMART_ATTR
;
6244 vport
->fdmi_port_mask
= LPFC_FDMI2_PORT_ATTR
;
6250 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
6251 * @phba: pointer to lpfc hba data structure.
6253 * This routine is invoked to destroy HBA physical port and the associated
6257 lpfc_destroy_shost(struct lpfc_hba
*phba
)
6259 struct lpfc_vport
*vport
= phba
->pport
;
6261 /* Destroy physical port that associated with the SCSI host */
6262 destroy_port(vport
);
6268 * lpfc_setup_bg - Setup Block guard structures and debug areas.
6269 * @phba: pointer to lpfc hba data structure.
6270 * @shost: the shost to be used to detect Block guard settings.
6272 * This routine sets up the local Block guard protocol settings for @shost.
6273 * This routine also allocates memory for debugging bg buffers.
6276 lpfc_setup_bg(struct lpfc_hba
*phba
, struct Scsi_Host
*shost
)
6282 if (lpfc_prot_mask
&& lpfc_prot_guard
) {
6283 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
6284 "1478 Registering BlockGuard with the "
6287 old_mask
= lpfc_prot_mask
;
6288 old_guard
= lpfc_prot_guard
;
6290 /* Only allow supported values */
6291 lpfc_prot_mask
&= (SHOST_DIF_TYPE1_PROTECTION
|
6292 SHOST_DIX_TYPE0_PROTECTION
|
6293 SHOST_DIX_TYPE1_PROTECTION
);
6294 lpfc_prot_guard
&= (SHOST_DIX_GUARD_IP
| SHOST_DIX_GUARD_CRC
);
6296 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
6297 if (lpfc_prot_mask
== SHOST_DIX_TYPE1_PROTECTION
)
6298 lpfc_prot_mask
|= SHOST_DIF_TYPE1_PROTECTION
;
6300 if (lpfc_prot_mask
&& lpfc_prot_guard
) {
6301 if ((old_mask
!= lpfc_prot_mask
) ||
6302 (old_guard
!= lpfc_prot_guard
))
6303 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6304 "1475 Registering BlockGuard with the "
6305 "SCSI layer: mask %d guard %d\n",
6306 lpfc_prot_mask
, lpfc_prot_guard
);
6308 scsi_host_set_prot(shost
, lpfc_prot_mask
);
6309 scsi_host_set_guard(shost
, lpfc_prot_guard
);
6311 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6312 "1479 Not Registering BlockGuard with the SCSI "
6313 "layer, Bad protection parameters: %d %d\n",
6314 old_mask
, old_guard
);
6317 if (!_dump_buf_data
) {
6319 spin_lock_init(&_dump_buf_lock
);
6321 (char *) __get_free_pages(GFP_KERNEL
, pagecnt
);
6322 if (_dump_buf_data
) {
6323 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
6324 "9043 BLKGRD: allocated %d pages for "
6325 "_dump_buf_data at 0x%p\n",
6326 (1 << pagecnt
), _dump_buf_data
);
6327 _dump_buf_data_order
= pagecnt
;
6328 memset(_dump_buf_data
, 0,
6329 ((1 << PAGE_SHIFT
) << pagecnt
));
6334 if (!_dump_buf_data_order
)
6335 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
6336 "9044 BLKGRD: ERROR unable to allocate "
6337 "memory for hexdump\n");
6339 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
6340 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
6341 "\n", _dump_buf_data
);
6342 if (!_dump_buf_dif
) {
6345 (char *) __get_free_pages(GFP_KERNEL
, pagecnt
);
6346 if (_dump_buf_dif
) {
6347 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
6348 "9046 BLKGRD: allocated %d pages for "
6349 "_dump_buf_dif at 0x%p\n",
6350 (1 << pagecnt
), _dump_buf_dif
);
6351 _dump_buf_dif_order
= pagecnt
;
6352 memset(_dump_buf_dif
, 0,
6353 ((1 << PAGE_SHIFT
) << pagecnt
));
6358 if (!_dump_buf_dif_order
)
6359 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
6360 "9047 BLKGRD: ERROR unable to allocate "
6361 "memory for hexdump\n");
6363 lpfc_printf_log(phba
, KERN_ERR
, LOG_BG
,
6364 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
6369 * lpfc_post_init_setup - Perform necessary device post initialization setup.
6370 * @phba: pointer to lpfc hba data structure.
6372 * This routine is invoked to perform all the necessary post initialization
6373 * setup for the device.
6376 lpfc_post_init_setup(struct lpfc_hba
*phba
)
6378 struct Scsi_Host
*shost
;
6379 struct lpfc_adapter_event_header adapter_event
;
6381 /* Get the default values for Model Name and Description */
6382 lpfc_get_hba_model_desc(phba
, phba
->ModelName
, phba
->ModelDesc
);
6385 * hba setup may have changed the hba_queue_depth so we need to
6386 * adjust the value of can_queue.
6388 shost
= pci_get_drvdata(phba
->pcidev
);
6389 shost
->can_queue
= phba
->cfg_hba_queue_depth
- 10;
6390 if (phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
)
6391 lpfc_setup_bg(phba
, shost
);
6393 lpfc_host_attrib_init(shost
);
6395 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
6396 spin_lock_irq(shost
->host_lock
);
6397 lpfc_poll_start_timer(phba
);
6398 spin_unlock_irq(shost
->host_lock
);
6401 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
6402 "0428 Perform SCSI scan\n");
6403 /* Send board arrival event to upper layer */
6404 adapter_event
.event_type
= FC_REG_ADAPTER_EVENT
;
6405 adapter_event
.subcategory
= LPFC_EVENT_ARRIVAL
;
6406 fc_host_post_vendor_event(shost
, fc_get_event_number(),
6407 sizeof(adapter_event
),
6408 (char *) &adapter_event
,
6414 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
6415 * @phba: pointer to lpfc hba data structure.
6417 * This routine is invoked to set up the PCI device memory space for device
6418 * with SLI-3 interface spec.
6422 * other values - error
6425 lpfc_sli_pci_mem_setup(struct lpfc_hba
*phba
)
6427 struct pci_dev
*pdev
;
6428 unsigned long bar0map_len
, bar2map_len
;
6431 int error
= -ENODEV
;
6433 /* Obtain PCI device reference */
6437 pdev
= phba
->pcidev
;
6439 /* Set the device DMA mask size */
6440 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) != 0
6441 || pci_set_consistent_dma_mask(pdev
,DMA_BIT_MASK(64)) != 0) {
6442 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)) != 0
6443 || pci_set_consistent_dma_mask(pdev
,DMA_BIT_MASK(32)) != 0) {
6448 /* Get the bus address of Bar0 and Bar2 and the number of bytes
6449 * required by each mapping.
6451 phba
->pci_bar0_map
= pci_resource_start(pdev
, 0);
6452 bar0map_len
= pci_resource_len(pdev
, 0);
6454 phba
->pci_bar2_map
= pci_resource_start(pdev
, 2);
6455 bar2map_len
= pci_resource_len(pdev
, 2);
6457 /* Map HBA SLIM to a kernel virtual address. */
6458 phba
->slim_memmap_p
= ioremap(phba
->pci_bar0_map
, bar0map_len
);
6459 if (!phba
->slim_memmap_p
) {
6460 dev_printk(KERN_ERR
, &pdev
->dev
,
6461 "ioremap failed for SLIM memory.\n");
6465 /* Map HBA Control Registers to a kernel virtual address. */
6466 phba
->ctrl_regs_memmap_p
= ioremap(phba
->pci_bar2_map
, bar2map_len
);
6467 if (!phba
->ctrl_regs_memmap_p
) {
6468 dev_printk(KERN_ERR
, &pdev
->dev
,
6469 "ioremap failed for HBA control registers.\n");
6470 goto out_iounmap_slim
;
6473 /* Allocate memory for SLI-2 structures */
6474 phba
->slim2p
.virt
= dma_zalloc_coherent(&pdev
->dev
, SLI2_SLIM_SIZE
,
6475 &phba
->slim2p
.phys
, GFP_KERNEL
);
6476 if (!phba
->slim2p
.virt
)
6479 phba
->mbox
= phba
->slim2p
.virt
+ offsetof(struct lpfc_sli2_slim
, mbx
);
6480 phba
->mbox_ext
= (phba
->slim2p
.virt
+
6481 offsetof(struct lpfc_sli2_slim
, mbx_ext_words
));
6482 phba
->pcb
= (phba
->slim2p
.virt
+ offsetof(struct lpfc_sli2_slim
, pcb
));
6483 phba
->IOCBs
= (phba
->slim2p
.virt
+
6484 offsetof(struct lpfc_sli2_slim
, IOCBs
));
6486 phba
->hbqslimp
.virt
= dma_alloc_coherent(&pdev
->dev
,
6487 lpfc_sli_hbq_size(),
6488 &phba
->hbqslimp
.phys
,
6490 if (!phba
->hbqslimp
.virt
)
6493 hbq_count
= lpfc_sli_hbq_count();
6494 ptr
= phba
->hbqslimp
.virt
;
6495 for (i
= 0; i
< hbq_count
; ++i
) {
6496 phba
->hbqs
[i
].hbq_virt
= ptr
;
6497 INIT_LIST_HEAD(&phba
->hbqs
[i
].hbq_buffer_list
);
6498 ptr
+= (lpfc_hbq_defs
[i
]->entry_count
*
6499 sizeof(struct lpfc_hbq_entry
));
6501 phba
->hbqs
[LPFC_ELS_HBQ
].hbq_alloc_buffer
= lpfc_els_hbq_alloc
;
6502 phba
->hbqs
[LPFC_ELS_HBQ
].hbq_free_buffer
= lpfc_els_hbq_free
;
6504 memset(phba
->hbqslimp
.virt
, 0, lpfc_sli_hbq_size());
6506 INIT_LIST_HEAD(&phba
->rb_pend_list
);
6508 phba
->MBslimaddr
= phba
->slim_memmap_p
;
6509 phba
->HAregaddr
= phba
->ctrl_regs_memmap_p
+ HA_REG_OFFSET
;
6510 phba
->CAregaddr
= phba
->ctrl_regs_memmap_p
+ CA_REG_OFFSET
;
6511 phba
->HSregaddr
= phba
->ctrl_regs_memmap_p
+ HS_REG_OFFSET
;
6512 phba
->HCregaddr
= phba
->ctrl_regs_memmap_p
+ HC_REG_OFFSET
;
6517 dma_free_coherent(&pdev
->dev
, SLI2_SLIM_SIZE
,
6518 phba
->slim2p
.virt
, phba
->slim2p
.phys
);
6520 iounmap(phba
->ctrl_regs_memmap_p
);
6522 iounmap(phba
->slim_memmap_p
);
6528 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
6529 * @phba: pointer to lpfc hba data structure.
6531 * This routine is invoked to unset the PCI device memory space for device
6532 * with SLI-3 interface spec.
6535 lpfc_sli_pci_mem_unset(struct lpfc_hba
*phba
)
6537 struct pci_dev
*pdev
;
6539 /* Obtain PCI device reference */
6543 pdev
= phba
->pcidev
;
6545 /* Free coherent DMA memory allocated */
6546 dma_free_coherent(&pdev
->dev
, lpfc_sli_hbq_size(),
6547 phba
->hbqslimp
.virt
, phba
->hbqslimp
.phys
);
6548 dma_free_coherent(&pdev
->dev
, SLI2_SLIM_SIZE
,
6549 phba
->slim2p
.virt
, phba
->slim2p
.phys
);
6551 /* I/O memory unmap */
6552 iounmap(phba
->ctrl_regs_memmap_p
);
6553 iounmap(phba
->slim_memmap_p
);
6559 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
6560 * @phba: pointer to lpfc hba data structure.
6562 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
6563 * done and check status.
6565 * Return 0 if successful, otherwise -ENODEV.
6568 lpfc_sli4_post_status_check(struct lpfc_hba
*phba
)
6570 struct lpfc_register portsmphr_reg
, uerrlo_reg
, uerrhi_reg
;
6571 struct lpfc_register reg_data
;
6572 int i
, port_error
= 0;
6575 memset(&portsmphr_reg
, 0, sizeof(portsmphr_reg
));
6576 memset(®_data
, 0, sizeof(reg_data
));
6577 if (!phba
->sli4_hba
.PSMPHRregaddr
)
6580 /* Wait up to 30 seconds for the SLI Port POST done and ready */
6581 for (i
= 0; i
< 3000; i
++) {
6582 if (lpfc_readl(phba
->sli4_hba
.PSMPHRregaddr
,
6583 &portsmphr_reg
.word0
) ||
6584 (bf_get(lpfc_port_smphr_perr
, &portsmphr_reg
))) {
6585 /* Port has a fatal POST error, break out */
6586 port_error
= -ENODEV
;
6589 if (LPFC_POST_STAGE_PORT_READY
==
6590 bf_get(lpfc_port_smphr_port_status
, &portsmphr_reg
))
6596 * If there was a port error during POST, then don't proceed with
6597 * other register reads as the data may not be valid. Just exit.
6600 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6601 "1408 Port Failed POST - portsmphr=0x%x, "
6602 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
6603 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
6604 portsmphr_reg
.word0
,
6605 bf_get(lpfc_port_smphr_perr
, &portsmphr_reg
),
6606 bf_get(lpfc_port_smphr_sfi
, &portsmphr_reg
),
6607 bf_get(lpfc_port_smphr_nip
, &portsmphr_reg
),
6608 bf_get(lpfc_port_smphr_ipc
, &portsmphr_reg
),
6609 bf_get(lpfc_port_smphr_scr1
, &portsmphr_reg
),
6610 bf_get(lpfc_port_smphr_scr2
, &portsmphr_reg
),
6611 bf_get(lpfc_port_smphr_host_scratch
, &portsmphr_reg
),
6612 bf_get(lpfc_port_smphr_port_status
, &portsmphr_reg
));
6614 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
6615 "2534 Device Info: SLIFamily=0x%x, "
6616 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
6617 "SLIHint_2=0x%x, FT=0x%x\n",
6618 bf_get(lpfc_sli_intf_sli_family
,
6619 &phba
->sli4_hba
.sli_intf
),
6620 bf_get(lpfc_sli_intf_slirev
,
6621 &phba
->sli4_hba
.sli_intf
),
6622 bf_get(lpfc_sli_intf_if_type
,
6623 &phba
->sli4_hba
.sli_intf
),
6624 bf_get(lpfc_sli_intf_sli_hint1
,
6625 &phba
->sli4_hba
.sli_intf
),
6626 bf_get(lpfc_sli_intf_sli_hint2
,
6627 &phba
->sli4_hba
.sli_intf
),
6628 bf_get(lpfc_sli_intf_func_type
,
6629 &phba
->sli4_hba
.sli_intf
));
6631 * Check for other Port errors during the initialization
6632 * process. Fail the load if the port did not come up
6635 if_type
= bf_get(lpfc_sli_intf_if_type
,
6636 &phba
->sli4_hba
.sli_intf
);
6638 case LPFC_SLI_INTF_IF_TYPE_0
:
6639 phba
->sli4_hba
.ue_mask_lo
=
6640 readl(phba
->sli4_hba
.u
.if_type0
.UEMASKLOregaddr
);
6641 phba
->sli4_hba
.ue_mask_hi
=
6642 readl(phba
->sli4_hba
.u
.if_type0
.UEMASKHIregaddr
);
6644 readl(phba
->sli4_hba
.u
.if_type0
.UERRLOregaddr
);
6646 readl(phba
->sli4_hba
.u
.if_type0
.UERRHIregaddr
);
6647 if ((~phba
->sli4_hba
.ue_mask_lo
& uerrlo_reg
.word0
) ||
6648 (~phba
->sli4_hba
.ue_mask_hi
& uerrhi_reg
.word0
)) {
6649 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6650 "1422 Unrecoverable Error "
6651 "Detected during POST "
6652 "uerr_lo_reg=0x%x, "
6653 "uerr_hi_reg=0x%x, "
6654 "ue_mask_lo_reg=0x%x, "
6655 "ue_mask_hi_reg=0x%x\n",
6658 phba
->sli4_hba
.ue_mask_lo
,
6659 phba
->sli4_hba
.ue_mask_hi
);
6660 port_error
= -ENODEV
;
6663 case LPFC_SLI_INTF_IF_TYPE_2
:
6664 /* Final checks. The port status should be clean. */
6665 if (lpfc_readl(phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
,
6667 (bf_get(lpfc_sliport_status_err
, ®_data
) &&
6668 !bf_get(lpfc_sliport_status_rn
, ®_data
))) {
6669 phba
->work_status
[0] =
6670 readl(phba
->sli4_hba
.u
.if_type2
.
6672 phba
->work_status
[1] =
6673 readl(phba
->sli4_hba
.u
.if_type2
.
6675 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6676 "2888 Unrecoverable port error "
6677 "following POST: port status reg "
6678 "0x%x, port_smphr reg 0x%x, "
6679 "error 1=0x%x, error 2=0x%x\n",
6681 portsmphr_reg
.word0
,
6682 phba
->work_status
[0],
6683 phba
->work_status
[1]);
6684 port_error
= -ENODEV
;
6687 case LPFC_SLI_INTF_IF_TYPE_1
:
6696 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
6697 * @phba: pointer to lpfc hba data structure.
6698 * @if_type: The SLI4 interface type getting configured.
6700 * This routine is invoked to set up SLI4 BAR0 PCI config space register
6704 lpfc_sli4_bar0_register_memmap(struct lpfc_hba
*phba
, uint32_t if_type
)
6707 case LPFC_SLI_INTF_IF_TYPE_0
:
6708 phba
->sli4_hba
.u
.if_type0
.UERRLOregaddr
=
6709 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_UERR_STATUS_LO
;
6710 phba
->sli4_hba
.u
.if_type0
.UERRHIregaddr
=
6711 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_UERR_STATUS_HI
;
6712 phba
->sli4_hba
.u
.if_type0
.UEMASKLOregaddr
=
6713 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_UE_MASK_LO
;
6714 phba
->sli4_hba
.u
.if_type0
.UEMASKHIregaddr
=
6715 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_UE_MASK_HI
;
6716 phba
->sli4_hba
.SLIINTFregaddr
=
6717 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_SLI_INTF
;
6719 case LPFC_SLI_INTF_IF_TYPE_2
:
6720 phba
->sli4_hba
.u
.if_type2
.ERR1regaddr
=
6721 phba
->sli4_hba
.conf_regs_memmap_p
+
6722 LPFC_CTL_PORT_ER1_OFFSET
;
6723 phba
->sli4_hba
.u
.if_type2
.ERR2regaddr
=
6724 phba
->sli4_hba
.conf_regs_memmap_p
+
6725 LPFC_CTL_PORT_ER2_OFFSET
;
6726 phba
->sli4_hba
.u
.if_type2
.CTRLregaddr
=
6727 phba
->sli4_hba
.conf_regs_memmap_p
+
6728 LPFC_CTL_PORT_CTL_OFFSET
;
6729 phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
=
6730 phba
->sli4_hba
.conf_regs_memmap_p
+
6731 LPFC_CTL_PORT_STA_OFFSET
;
6732 phba
->sli4_hba
.SLIINTFregaddr
=
6733 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_SLI_INTF
;
6734 phba
->sli4_hba
.PSMPHRregaddr
=
6735 phba
->sli4_hba
.conf_regs_memmap_p
+
6736 LPFC_CTL_PORT_SEM_OFFSET
;
6737 phba
->sli4_hba
.RQDBregaddr
=
6738 phba
->sli4_hba
.conf_regs_memmap_p
+
6739 LPFC_ULP0_RQ_DOORBELL
;
6740 phba
->sli4_hba
.WQDBregaddr
=
6741 phba
->sli4_hba
.conf_regs_memmap_p
+
6742 LPFC_ULP0_WQ_DOORBELL
;
6743 phba
->sli4_hba
.EQCQDBregaddr
=
6744 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_EQCQ_DOORBELL
;
6745 phba
->sli4_hba
.MQDBregaddr
=
6746 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_MQ_DOORBELL
;
6747 phba
->sli4_hba
.BMBXregaddr
=
6748 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_BMBX
;
6750 case LPFC_SLI_INTF_IF_TYPE_1
:
6752 dev_printk(KERN_ERR
, &phba
->pcidev
->dev
,
6753 "FATAL - unsupported SLI4 interface type - %d\n",
6760 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
6761 * @phba: pointer to lpfc hba data structure.
6763 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
6767 lpfc_sli4_bar1_register_memmap(struct lpfc_hba
*phba
)
6769 phba
->sli4_hba
.PSMPHRregaddr
= phba
->sli4_hba
.ctrl_regs_memmap_p
+
6770 LPFC_SLIPORT_IF0_SMPHR
;
6771 phba
->sli4_hba
.ISRregaddr
= phba
->sli4_hba
.ctrl_regs_memmap_p
+
6773 phba
->sli4_hba
.IMRregaddr
= phba
->sli4_hba
.ctrl_regs_memmap_p
+
6775 phba
->sli4_hba
.ISCRregaddr
= phba
->sli4_hba
.ctrl_regs_memmap_p
+
6780 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
6781 * @phba: pointer to lpfc hba data structure.
6782 * @vf: virtual function number
6784 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
6785 * based on the given viftual function number, @vf.
6787 * Return 0 if successful, otherwise -ENODEV.
6790 lpfc_sli4_bar2_register_memmap(struct lpfc_hba
*phba
, uint32_t vf
)
6792 if (vf
> LPFC_VIR_FUNC_MAX
)
6795 phba
->sli4_hba
.RQDBregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
6796 vf
* LPFC_VFR_PAGE_SIZE
+
6797 LPFC_ULP0_RQ_DOORBELL
);
6798 phba
->sli4_hba
.WQDBregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
6799 vf
* LPFC_VFR_PAGE_SIZE
+
6800 LPFC_ULP0_WQ_DOORBELL
);
6801 phba
->sli4_hba
.EQCQDBregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
6802 vf
* LPFC_VFR_PAGE_SIZE
+ LPFC_EQCQ_DOORBELL
);
6803 phba
->sli4_hba
.MQDBregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
6804 vf
* LPFC_VFR_PAGE_SIZE
+ LPFC_MQ_DOORBELL
);
6805 phba
->sli4_hba
.BMBXregaddr
= (phba
->sli4_hba
.drbl_regs_memmap_p
+
6806 vf
* LPFC_VFR_PAGE_SIZE
+ LPFC_BMBX
);
6811 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
6812 * @phba: pointer to lpfc hba data structure.
6814 * This routine is invoked to create the bootstrap mailbox
6815 * region consistent with the SLI-4 interface spec. This
6816 * routine allocates all memory necessary to communicate
6817 * mailbox commands to the port and sets up all alignment
6818 * needs. No locks are expected to be held when calling
6823 * -ENOMEM - could not allocated memory.
6826 lpfc_create_bootstrap_mbox(struct lpfc_hba
*phba
)
6829 struct lpfc_dmabuf
*dmabuf
;
6830 struct dma_address
*dma_address
;
6834 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
6839 * The bootstrap mailbox region is comprised of 2 parts
6840 * plus an alignment restriction of 16 bytes.
6842 bmbx_size
= sizeof(struct lpfc_bmbx_create
) + (LPFC_ALIGN_16_BYTE
- 1);
6843 dmabuf
->virt
= dma_zalloc_coherent(&phba
->pcidev
->dev
, bmbx_size
,
6844 &dmabuf
->phys
, GFP_KERNEL
);
6845 if (!dmabuf
->virt
) {
6851 * Initialize the bootstrap mailbox pointers now so that the register
6852 * operations are simple later. The mailbox dma address is required
6853 * to be 16-byte aligned. Also align the virtual memory as each
6854 * maibox is copied into the bmbx mailbox region before issuing the
6855 * command to the port.
6857 phba
->sli4_hba
.bmbx
.dmabuf
= dmabuf
;
6858 phba
->sli4_hba
.bmbx
.bmbx_size
= bmbx_size
;
6860 phba
->sli4_hba
.bmbx
.avirt
= PTR_ALIGN(dmabuf
->virt
,
6861 LPFC_ALIGN_16_BYTE
);
6862 phba
->sli4_hba
.bmbx
.aphys
= ALIGN(dmabuf
->phys
,
6863 LPFC_ALIGN_16_BYTE
);
6866 * Set the high and low physical addresses now. The SLI4 alignment
6867 * requirement is 16 bytes and the mailbox is posted to the port
6868 * as two 30-bit addresses. The other data is a bit marking whether
6869 * the 30-bit address is the high or low address.
6870 * Upcast bmbx aphys to 64bits so shift instruction compiles
6871 * clean on 32 bit machines.
6873 dma_address
= &phba
->sli4_hba
.bmbx
.dma_address
;
6874 phys_addr
= (uint64_t)phba
->sli4_hba
.bmbx
.aphys
;
6875 pa_addr
= (uint32_t) ((phys_addr
>> 34) & 0x3fffffff);
6876 dma_address
->addr_hi
= (uint32_t) ((pa_addr
<< 2) |
6877 LPFC_BMBX_BIT1_ADDR_HI
);
6879 pa_addr
= (uint32_t) ((phba
->sli4_hba
.bmbx
.aphys
>> 4) & 0x3fffffff);
6880 dma_address
->addr_lo
= (uint32_t) ((pa_addr
<< 2) |
6881 LPFC_BMBX_BIT1_ADDR_LO
);
6886 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
6887 * @phba: pointer to lpfc hba data structure.
6889 * This routine is invoked to teardown the bootstrap mailbox
6890 * region and release all host resources. This routine requires
6891 * the caller to ensure all mailbox commands recovered, no
6892 * additional mailbox comands are sent, and interrupts are disabled
6893 * before calling this routine.
6897 lpfc_destroy_bootstrap_mbox(struct lpfc_hba
*phba
)
6899 dma_free_coherent(&phba
->pcidev
->dev
,
6900 phba
->sli4_hba
.bmbx
.bmbx_size
,
6901 phba
->sli4_hba
.bmbx
.dmabuf
->virt
,
6902 phba
->sli4_hba
.bmbx
.dmabuf
->phys
);
6904 kfree(phba
->sli4_hba
.bmbx
.dmabuf
);
6905 memset(&phba
->sli4_hba
.bmbx
, 0, sizeof(struct lpfc_bmbx
));
6909 * lpfc_sli4_read_config - Get the config parameters.
6910 * @phba: pointer to lpfc hba data structure.
6912 * This routine is invoked to read the configuration parameters from the HBA.
6913 * The configuration parameters are used to set the base and maximum values
6914 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
6915 * allocation for the port.
6919 * -ENOMEM - No available memory
6920 * -EIO - The mailbox failed to complete successfully.
6923 lpfc_sli4_read_config(struct lpfc_hba
*phba
)
6926 struct lpfc_mbx_read_config
*rd_config
;
6927 union lpfc_sli4_cfg_shdr
*shdr
;
6928 uint32_t shdr_status
, shdr_add_status
;
6929 struct lpfc_mbx_get_func_cfg
*get_func_cfg
;
6930 struct lpfc_rsrc_desc_fcfcoe
*desc
;
6932 int length
, i
, rc
= 0, rc2
;
6934 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6936 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6937 "2011 Unable to allocate memory for issuing "
6938 "SLI_CONFIG_SPECIAL mailbox command\n");
6942 lpfc_read_config(phba
, pmb
);
6944 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
6945 if (rc
!= MBX_SUCCESS
) {
6946 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6947 "2012 Mailbox failed , mbxCmd x%x "
6948 "READ_CONFIG, mbxStatus x%x\n",
6949 bf_get(lpfc_mqe_command
, &pmb
->u
.mqe
),
6950 bf_get(lpfc_mqe_status
, &pmb
->u
.mqe
));
6953 rd_config
= &pmb
->u
.mqe
.un
.rd_config
;
6954 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv
, rd_config
)) {
6955 phba
->sli4_hba
.lnk_info
.lnk_dv
= LPFC_LNK_DAT_VAL
;
6956 phba
->sli4_hba
.lnk_info
.lnk_tp
=
6957 bf_get(lpfc_mbx_rd_conf_lnk_type
, rd_config
);
6958 phba
->sli4_hba
.lnk_info
.lnk_no
=
6959 bf_get(lpfc_mbx_rd_conf_lnk_numb
, rd_config
);
6960 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
6961 "3081 lnk_type:%d, lnk_numb:%d\n",
6962 phba
->sli4_hba
.lnk_info
.lnk_tp
,
6963 phba
->sli4_hba
.lnk_info
.lnk_no
);
6965 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
6966 "3082 Mailbox (x%x) returned ldv:x0\n",
6967 bf_get(lpfc_mqe_command
, &pmb
->u
.mqe
));
6968 phba
->sli4_hba
.extents_in_use
=
6969 bf_get(lpfc_mbx_rd_conf_extnts_inuse
, rd_config
);
6970 phba
->sli4_hba
.max_cfg_param
.max_xri
=
6971 bf_get(lpfc_mbx_rd_conf_xri_count
, rd_config
);
6972 phba
->sli4_hba
.max_cfg_param
.xri_base
=
6973 bf_get(lpfc_mbx_rd_conf_xri_base
, rd_config
);
6974 phba
->sli4_hba
.max_cfg_param
.max_vpi
=
6975 bf_get(lpfc_mbx_rd_conf_vpi_count
, rd_config
);
6976 phba
->sli4_hba
.max_cfg_param
.vpi_base
=
6977 bf_get(lpfc_mbx_rd_conf_vpi_base
, rd_config
);
6978 phba
->sli4_hba
.max_cfg_param
.max_rpi
=
6979 bf_get(lpfc_mbx_rd_conf_rpi_count
, rd_config
);
6980 phba
->sli4_hba
.max_cfg_param
.rpi_base
=
6981 bf_get(lpfc_mbx_rd_conf_rpi_base
, rd_config
);
6982 phba
->sli4_hba
.max_cfg_param
.max_vfi
=
6983 bf_get(lpfc_mbx_rd_conf_vfi_count
, rd_config
);
6984 phba
->sli4_hba
.max_cfg_param
.vfi_base
=
6985 bf_get(lpfc_mbx_rd_conf_vfi_base
, rd_config
);
6986 phba
->sli4_hba
.max_cfg_param
.max_fcfi
=
6987 bf_get(lpfc_mbx_rd_conf_fcfi_count
, rd_config
);
6988 phba
->sli4_hba
.max_cfg_param
.max_eq
=
6989 bf_get(lpfc_mbx_rd_conf_eq_count
, rd_config
);
6990 phba
->sli4_hba
.max_cfg_param
.max_rq
=
6991 bf_get(lpfc_mbx_rd_conf_rq_count
, rd_config
);
6992 phba
->sli4_hba
.max_cfg_param
.max_wq
=
6993 bf_get(lpfc_mbx_rd_conf_wq_count
, rd_config
);
6994 phba
->sli4_hba
.max_cfg_param
.max_cq
=
6995 bf_get(lpfc_mbx_rd_conf_cq_count
, rd_config
);
6996 phba
->lmt
= bf_get(lpfc_mbx_rd_conf_lmt
, rd_config
);
6997 phba
->sli4_hba
.next_xri
= phba
->sli4_hba
.max_cfg_param
.xri_base
;
6998 phba
->vpi_base
= phba
->sli4_hba
.max_cfg_param
.vpi_base
;
6999 phba
->vfi_base
= phba
->sli4_hba
.max_cfg_param
.vfi_base
;
7000 phba
->max_vpi
= (phba
->sli4_hba
.max_cfg_param
.max_vpi
> 0) ?
7001 (phba
->sli4_hba
.max_cfg_param
.max_vpi
- 1) : 0;
7002 phba
->max_vports
= phba
->max_vpi
;
7003 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
7004 "2003 cfg params Extents? %d "
7010 phba
->sli4_hba
.extents_in_use
,
7011 phba
->sli4_hba
.max_cfg_param
.xri_base
,
7012 phba
->sli4_hba
.max_cfg_param
.max_xri
,
7013 phba
->sli4_hba
.max_cfg_param
.vpi_base
,
7014 phba
->sli4_hba
.max_cfg_param
.max_vpi
,
7015 phba
->sli4_hba
.max_cfg_param
.vfi_base
,
7016 phba
->sli4_hba
.max_cfg_param
.max_vfi
,
7017 phba
->sli4_hba
.max_cfg_param
.rpi_base
,
7018 phba
->sli4_hba
.max_cfg_param
.max_rpi
,
7019 phba
->sli4_hba
.max_cfg_param
.max_fcfi
);
7025 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
7026 length
= phba
->sli4_hba
.max_cfg_param
.max_xri
-
7027 lpfc_sli4_get_els_iocb_cnt(phba
);
7028 if (phba
->cfg_hba_queue_depth
> length
) {
7029 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
7030 "3361 HBA queue depth changed from %d to %d\n",
7031 phba
->cfg_hba_queue_depth
, length
);
7032 phba
->cfg_hba_queue_depth
= length
;
7035 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
7036 LPFC_SLI_INTF_IF_TYPE_2
)
7039 /* get the pf# and vf# for SLI4 if_type 2 port */
7040 length
= (sizeof(struct lpfc_mbx_get_func_cfg
) -
7041 sizeof(struct lpfc_sli4_cfg_mhdr
));
7042 lpfc_sli4_config(phba
, pmb
, LPFC_MBOX_SUBSYSTEM_COMMON
,
7043 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG
,
7044 length
, LPFC_SLI4_MBX_EMBED
);
7046 rc2
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
7047 shdr
= (union lpfc_sli4_cfg_shdr
*)
7048 &pmb
->u
.mqe
.un
.sli4_config
.header
.cfg_shdr
;
7049 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
7050 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
7051 if (rc2
|| shdr_status
|| shdr_add_status
) {
7052 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
7053 "3026 Mailbox failed , mbxCmd x%x "
7054 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
7055 bf_get(lpfc_mqe_command
, &pmb
->u
.mqe
),
7056 bf_get(lpfc_mqe_status
, &pmb
->u
.mqe
));
7060 /* search for fc_fcoe resrouce descriptor */
7061 get_func_cfg
= &pmb
->u
.mqe
.un
.get_func_cfg
;
7063 pdesc_0
= (char *)&get_func_cfg
->func_cfg
.desc
[0];
7064 desc
= (struct lpfc_rsrc_desc_fcfcoe
*)pdesc_0
;
7065 length
= bf_get(lpfc_rsrc_desc_fcfcoe_length
, desc
);
7066 if (length
== LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD
)
7067 length
= LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH
;
7068 else if (length
!= LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH
)
7071 for (i
= 0; i
< LPFC_RSRC_DESC_MAX_NUM
; i
++) {
7072 desc
= (struct lpfc_rsrc_desc_fcfcoe
*)(pdesc_0
+ length
* i
);
7073 if (LPFC_RSRC_DESC_TYPE_FCFCOE
==
7074 bf_get(lpfc_rsrc_desc_fcfcoe_type
, desc
)) {
7075 phba
->sli4_hba
.iov
.pf_number
=
7076 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum
, desc
);
7077 phba
->sli4_hba
.iov
.vf_number
=
7078 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum
, desc
);
7083 if (i
< LPFC_RSRC_DESC_MAX_NUM
)
7084 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
7085 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
7086 "vf_number:%d\n", phba
->sli4_hba
.iov
.pf_number
,
7087 phba
->sli4_hba
.iov
.vf_number
);
7089 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
7090 "3028 GET_FUNCTION_CONFIG: failed to find "
7091 "Resrouce Descriptor:x%x\n",
7092 LPFC_RSRC_DESC_TYPE_FCFCOE
);
7095 mempool_free(pmb
, phba
->mbox_mem_pool
);
7100 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
7101 * @phba: pointer to lpfc hba data structure.
7103 * This routine is invoked to setup the port-side endian order when
7104 * the port if_type is 0. This routine has no function for other
7109 * -ENOMEM - No available memory
7110 * -EIO - The mailbox failed to complete successfully.
7113 lpfc_setup_endian_order(struct lpfc_hba
*phba
)
7115 LPFC_MBOXQ_t
*mboxq
;
7116 uint32_t if_type
, rc
= 0;
7117 uint32_t endian_mb_data
[2] = {HOST_ENDIAN_LOW_WORD0
,
7118 HOST_ENDIAN_HIGH_WORD1
};
7120 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
7122 case LPFC_SLI_INTF_IF_TYPE_0
:
7123 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
,
7126 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7127 "0492 Unable to allocate memory for "
7128 "issuing SLI_CONFIG_SPECIAL mailbox "
7134 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
7135 * two words to contain special data values and no other data.
7137 memset(mboxq
, 0, sizeof(LPFC_MBOXQ_t
));
7138 memcpy(&mboxq
->u
.mqe
, &endian_mb_data
, sizeof(endian_mb_data
));
7139 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7140 if (rc
!= MBX_SUCCESS
) {
7141 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7142 "0493 SLI_CONFIG_SPECIAL mailbox "
7143 "failed with status x%x\n",
7147 mempool_free(mboxq
, phba
->mbox_mem_pool
);
7149 case LPFC_SLI_INTF_IF_TYPE_2
:
7150 case LPFC_SLI_INTF_IF_TYPE_1
:
7158 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
7159 * @phba: pointer to lpfc hba data structure.
7161 * This routine is invoked to check the user settable queue counts for EQs and
7162 * CQs. after this routine is called the counts will be set to valid values that
7163 * adhere to the constraints of the system's interrupt vectors and the port's
7168 * -ENOMEM - No available memory
7171 lpfc_sli4_queue_verify(struct lpfc_hba
*phba
)
7173 int cfg_fcp_io_channel
;
7176 int fof_vectors
= phba
->cfg_fof
? 1 : 0;
7179 * Sanity check for configured queue parameters against the run-time
7183 /* Sanity check on HBA EQ parameters */
7184 cfg_fcp_io_channel
= phba
->cfg_fcp_io_channel
;
7186 /* It doesn't make sense to have more io channels then online CPUs */
7187 for_each_present_cpu(cpu
) {
7188 if (cpu_online(cpu
))
7191 phba
->sli4_hba
.num_online_cpu
= i
;
7192 phba
->sli4_hba
.num_present_cpu
= lpfc_present_cpu
;
7193 phba
->sli4_hba
.curr_disp_cpu
= 0;
7195 if (i
< cfg_fcp_io_channel
) {
7196 lpfc_printf_log(phba
,
7198 "3188 Reducing IO channels to match number of "
7199 "online CPUs: from %d to %d\n",
7200 cfg_fcp_io_channel
, i
);
7201 cfg_fcp_io_channel
= i
;
7204 if (cfg_fcp_io_channel
+ fof_vectors
>
7205 phba
->sli4_hba
.max_cfg_param
.max_eq
) {
7206 if (phba
->sli4_hba
.max_cfg_param
.max_eq
<
7207 LPFC_FCP_IO_CHAN_MIN
) {
7208 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7209 "2574 Not enough EQs (%d) from the "
7210 "pci function for supporting FCP "
7212 phba
->sli4_hba
.max_cfg_param
.max_eq
,
7213 phba
->cfg_fcp_io_channel
);
7216 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7217 "2575 Reducing IO channels to match number of "
7218 "available EQs: from %d to %d\n",
7220 phba
->sli4_hba
.max_cfg_param
.max_eq
);
7221 cfg_fcp_io_channel
= phba
->sli4_hba
.max_cfg_param
.max_eq
-
7225 /* The actual number of FCP event queues adopted */
7226 phba
->cfg_fcp_io_channel
= cfg_fcp_io_channel
;
7228 /* Get EQ depth from module parameter, fake the default for now */
7229 phba
->sli4_hba
.eq_esize
= LPFC_EQE_SIZE_4B
;
7230 phba
->sli4_hba
.eq_ecount
= LPFC_EQE_DEF_COUNT
;
7232 /* Get CQ depth from module parameter, fake the default for now */
7233 phba
->sli4_hba
.cq_esize
= LPFC_CQE_SIZE
;
7234 phba
->sli4_hba
.cq_ecount
= LPFC_CQE_DEF_COUNT
;
7242 * lpfc_sli4_queue_create - Create all the SLI4 queues
7243 * @phba: pointer to lpfc hba data structure.
7245 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
7246 * operation. For each SLI4 queue type, the parameters such as queue entry
7247 * count (queue depth) shall be taken from the module parameter. For now,
7248 * we just use some constant number as place holder.
7252 * -ENOMEM - No availble memory
7253 * -EIO - The mailbox failed to complete successfully.
7256 lpfc_sli4_queue_create(struct lpfc_hba
*phba
)
7258 struct lpfc_queue
*qdesc
;
7262 * Create HBA Record arrays.
7264 if (!phba
->cfg_fcp_io_channel
)
7267 phba
->sli4_hba
.mq_esize
= LPFC_MQE_SIZE
;
7268 phba
->sli4_hba
.mq_ecount
= LPFC_MQE_DEF_COUNT
;
7269 phba
->sli4_hba
.wq_esize
= LPFC_WQE_SIZE
;
7270 phba
->sli4_hba
.wq_ecount
= LPFC_WQE_DEF_COUNT
;
7271 phba
->sli4_hba
.rq_esize
= LPFC_RQE_SIZE
;
7272 phba
->sli4_hba
.rq_ecount
= LPFC_RQE_DEF_COUNT
;
7274 phba
->sli4_hba
.hba_eq
= kzalloc((sizeof(struct lpfc_queue
*) *
7275 phba
->cfg_fcp_io_channel
), GFP_KERNEL
);
7276 if (!phba
->sli4_hba
.hba_eq
) {
7277 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7278 "2576 Failed allocate memory for "
7279 "fast-path EQ record array\n");
7283 phba
->sli4_hba
.fcp_cq
= kzalloc((sizeof(struct lpfc_queue
*) *
7284 phba
->cfg_fcp_io_channel
), GFP_KERNEL
);
7285 if (!phba
->sli4_hba
.fcp_cq
) {
7286 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7287 "2577 Failed allocate memory for fast-path "
7288 "CQ record array\n");
7292 phba
->sli4_hba
.fcp_wq
= kzalloc((sizeof(struct lpfc_queue
*) *
7293 phba
->cfg_fcp_io_channel
), GFP_KERNEL
);
7294 if (!phba
->sli4_hba
.fcp_wq
) {
7295 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7296 "2578 Failed allocate memory for fast-path "
7297 "WQ record array\n");
7302 * Since the first EQ can have multiple CQs associated with it,
7303 * this array is used to quickly see if we have a FCP fast-path
7306 phba
->sli4_hba
.fcp_cq_map
= kzalloc((sizeof(uint16_t) *
7307 phba
->cfg_fcp_io_channel
), GFP_KERNEL
);
7308 if (!phba
->sli4_hba
.fcp_cq_map
) {
7309 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7310 "2545 Failed allocate memory for fast-path "
7316 * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies
7317 * how many EQs to create.
7319 for (idx
= 0; idx
< phba
->cfg_fcp_io_channel
; idx
++) {
7322 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.eq_esize
,
7323 phba
->sli4_hba
.eq_ecount
);
7325 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7326 "0497 Failed allocate EQ (%d)\n", idx
);
7329 phba
->sli4_hba
.hba_eq
[idx
] = qdesc
;
7331 /* Create Fast Path FCP CQs */
7332 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.cq_esize
,
7333 phba
->sli4_hba
.cq_ecount
);
7335 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7336 "0499 Failed allocate fast-path FCP "
7340 phba
->sli4_hba
.fcp_cq
[idx
] = qdesc
;
7342 /* Create Fast Path FCP WQs */
7343 if (phba
->fcp_embed_io
) {
7344 qdesc
= lpfc_sli4_queue_alloc(phba
,
7346 LPFC_WQE128_DEF_COUNT
);
7348 qdesc
= lpfc_sli4_queue_alloc(phba
,
7349 phba
->sli4_hba
.wq_esize
,
7350 phba
->sli4_hba
.wq_ecount
);
7353 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7354 "0503 Failed allocate fast-path FCP "
7358 phba
->sli4_hba
.fcp_wq
[idx
] = qdesc
;
7363 * Create Slow Path Completion Queues (CQs)
7366 /* Create slow-path Mailbox Command Complete Queue */
7367 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.cq_esize
,
7368 phba
->sli4_hba
.cq_ecount
);
7370 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7371 "0500 Failed allocate slow-path mailbox CQ\n");
7374 phba
->sli4_hba
.mbx_cq
= qdesc
;
7376 /* Create slow-path ELS Complete Queue */
7377 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.cq_esize
,
7378 phba
->sli4_hba
.cq_ecount
);
7380 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7381 "0501 Failed allocate slow-path ELS CQ\n");
7384 phba
->sli4_hba
.els_cq
= qdesc
;
7388 * Create Slow Path Work Queues (WQs)
7391 /* Create Mailbox Command Queue */
7393 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.mq_esize
,
7394 phba
->sli4_hba
.mq_ecount
);
7396 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7397 "0505 Failed allocate slow-path MQ\n");
7400 phba
->sli4_hba
.mbx_wq
= qdesc
;
7403 * Create ELS Work Queues
7406 /* Create slow-path ELS Work Queue */
7407 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.wq_esize
,
7408 phba
->sli4_hba
.wq_ecount
);
7410 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7411 "0504 Failed allocate slow-path ELS WQ\n");
7414 phba
->sli4_hba
.els_wq
= qdesc
;
7417 * Create Receive Queue (RQ)
7420 /* Create Receive Queue for header */
7421 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.rq_esize
,
7422 phba
->sli4_hba
.rq_ecount
);
7424 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7425 "0506 Failed allocate receive HRQ\n");
7428 phba
->sli4_hba
.hdr_rq
= qdesc
;
7430 /* Create Receive Queue for data */
7431 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.rq_esize
,
7432 phba
->sli4_hba
.rq_ecount
);
7434 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7435 "0507 Failed allocate receive DRQ\n");
7438 phba
->sli4_hba
.dat_rq
= qdesc
;
7440 /* Create the Queues needed for Flash Optimized Fabric operations */
7442 lpfc_fof_queue_create(phba
);
7446 lpfc_sli4_queue_destroy(phba
);
7451 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
7452 * @phba: pointer to lpfc hba data structure.
7454 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
7459 * -ENOMEM - No available memory
7460 * -EIO - The mailbox failed to complete successfully.
7463 lpfc_sli4_queue_destroy(struct lpfc_hba
*phba
)
7468 lpfc_fof_queue_destroy(phba
);
7470 if (phba
->sli4_hba
.hba_eq
!= NULL
) {
7471 /* Release HBA event queue */
7472 for (idx
= 0; idx
< phba
->cfg_fcp_io_channel
; idx
++) {
7473 if (phba
->sli4_hba
.hba_eq
[idx
] != NULL
) {
7474 lpfc_sli4_queue_free(
7475 phba
->sli4_hba
.hba_eq
[idx
]);
7476 phba
->sli4_hba
.hba_eq
[idx
] = NULL
;
7479 kfree(phba
->sli4_hba
.hba_eq
);
7480 phba
->sli4_hba
.hba_eq
= NULL
;
7483 if (phba
->sli4_hba
.fcp_cq
!= NULL
) {
7484 /* Release FCP completion queue */
7485 for (idx
= 0; idx
< phba
->cfg_fcp_io_channel
; idx
++) {
7486 if (phba
->sli4_hba
.fcp_cq
[idx
] != NULL
) {
7487 lpfc_sli4_queue_free(
7488 phba
->sli4_hba
.fcp_cq
[idx
]);
7489 phba
->sli4_hba
.fcp_cq
[idx
] = NULL
;
7492 kfree(phba
->sli4_hba
.fcp_cq
);
7493 phba
->sli4_hba
.fcp_cq
= NULL
;
7496 if (phba
->sli4_hba
.fcp_wq
!= NULL
) {
7497 /* Release FCP work queue */
7498 for (idx
= 0; idx
< phba
->cfg_fcp_io_channel
; idx
++) {
7499 if (phba
->sli4_hba
.fcp_wq
[idx
] != NULL
) {
7500 lpfc_sli4_queue_free(
7501 phba
->sli4_hba
.fcp_wq
[idx
]);
7502 phba
->sli4_hba
.fcp_wq
[idx
] = NULL
;
7505 kfree(phba
->sli4_hba
.fcp_wq
);
7506 phba
->sli4_hba
.fcp_wq
= NULL
;
7509 /* Release FCP CQ mapping array */
7510 if (phba
->sli4_hba
.fcp_cq_map
!= NULL
) {
7511 kfree(phba
->sli4_hba
.fcp_cq_map
);
7512 phba
->sli4_hba
.fcp_cq_map
= NULL
;
7515 /* Release mailbox command work queue */
7516 if (phba
->sli4_hba
.mbx_wq
!= NULL
) {
7517 lpfc_sli4_queue_free(phba
->sli4_hba
.mbx_wq
);
7518 phba
->sli4_hba
.mbx_wq
= NULL
;
7521 /* Release ELS work queue */
7522 if (phba
->sli4_hba
.els_wq
!= NULL
) {
7523 lpfc_sli4_queue_free(phba
->sli4_hba
.els_wq
);
7524 phba
->sli4_hba
.els_wq
= NULL
;
7527 /* Release unsolicited receive queue */
7528 if (phba
->sli4_hba
.hdr_rq
!= NULL
) {
7529 lpfc_sli4_queue_free(phba
->sli4_hba
.hdr_rq
);
7530 phba
->sli4_hba
.hdr_rq
= NULL
;
7532 if (phba
->sli4_hba
.dat_rq
!= NULL
) {
7533 lpfc_sli4_queue_free(phba
->sli4_hba
.dat_rq
);
7534 phba
->sli4_hba
.dat_rq
= NULL
;
7537 /* Release ELS complete queue */
7538 if (phba
->sli4_hba
.els_cq
!= NULL
) {
7539 lpfc_sli4_queue_free(phba
->sli4_hba
.els_cq
);
7540 phba
->sli4_hba
.els_cq
= NULL
;
7543 /* Release mailbox command complete queue */
7544 if (phba
->sli4_hba
.mbx_cq
!= NULL
) {
7545 lpfc_sli4_queue_free(phba
->sli4_hba
.mbx_cq
);
7546 phba
->sli4_hba
.mbx_cq
= NULL
;
7553 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
7554 * @phba: pointer to lpfc hba data structure.
7556 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
7561 * -ENOMEM - No available memory
7562 * -EIO - The mailbox failed to complete successfully.
7565 lpfc_sli4_queue_setup(struct lpfc_hba
*phba
)
7567 struct lpfc_sli
*psli
= &phba
->sli
;
7568 struct lpfc_sli_ring
*pring
;
7570 int fcp_eqidx
, fcp_cqidx
, fcp_wqidx
;
7571 int fcp_cq_index
= 0;
7572 uint32_t shdr_status
, shdr_add_status
;
7573 union lpfc_sli4_cfg_shdr
*shdr
;
7574 LPFC_MBOXQ_t
*mboxq
;
7577 /* Check for dual-ULP support */
7578 mboxq
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
7580 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7581 "3249 Unable to allocate memory for "
7582 "QUERY_FW_CFG mailbox command\n");
7585 length
= (sizeof(struct lpfc_mbx_query_fw_config
) -
7586 sizeof(struct lpfc_sli4_cfg_mhdr
));
7587 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
7588 LPFC_MBOX_OPCODE_QUERY_FW_CFG
,
7589 length
, LPFC_SLI4_MBX_EMBED
);
7591 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7593 shdr
= (union lpfc_sli4_cfg_shdr
*)
7594 &mboxq
->u
.mqe
.un
.sli4_config
.header
.cfg_shdr
;
7595 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
7596 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
7597 if (shdr_status
|| shdr_add_status
|| rc
) {
7598 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7599 "3250 QUERY_FW_CFG mailbox failed with status "
7600 "x%x add_status x%x, mbx status x%x\n",
7601 shdr_status
, shdr_add_status
, rc
);
7602 if (rc
!= MBX_TIMEOUT
)
7603 mempool_free(mboxq
, phba
->mbox_mem_pool
);
7608 phba
->sli4_hba
.fw_func_mode
=
7609 mboxq
->u
.mqe
.un
.query_fw_cfg
.rsp
.function_mode
;
7610 phba
->sli4_hba
.ulp0_mode
= mboxq
->u
.mqe
.un
.query_fw_cfg
.rsp
.ulp0_mode
;
7611 phba
->sli4_hba
.ulp1_mode
= mboxq
->u
.mqe
.un
.query_fw_cfg
.rsp
.ulp1_mode
;
7612 phba
->sli4_hba
.physical_port
=
7613 mboxq
->u
.mqe
.un
.query_fw_cfg
.rsp
.physical_port
;
7614 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7615 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
7616 "ulp1_mode:x%x\n", phba
->sli4_hba
.fw_func_mode
,
7617 phba
->sli4_hba
.ulp0_mode
, phba
->sli4_hba
.ulp1_mode
);
7619 if (rc
!= MBX_TIMEOUT
)
7620 mempool_free(mboxq
, phba
->mbox_mem_pool
);
7623 * Set up HBA Event Queues (EQs)
7626 /* Set up HBA event queue */
7627 if (phba
->cfg_fcp_io_channel
&& !phba
->sli4_hba
.hba_eq
) {
7628 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7629 "3147 Fast-path EQs not allocated\n");
7633 for (fcp_eqidx
= 0; fcp_eqidx
< phba
->cfg_fcp_io_channel
; fcp_eqidx
++) {
7634 if (!phba
->sli4_hba
.hba_eq
[fcp_eqidx
]) {
7635 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7636 "0522 Fast-path EQ (%d) not "
7637 "allocated\n", fcp_eqidx
);
7639 goto out_destroy_hba_eq
;
7641 rc
= lpfc_eq_create(phba
, phba
->sli4_hba
.hba_eq
[fcp_eqidx
],
7642 (phba
->cfg_fcp_imax
/ phba
->cfg_fcp_io_channel
));
7644 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7645 "0523 Failed setup of fast-path EQ "
7646 "(%d), rc = 0x%x\n", fcp_eqidx
,
7648 goto out_destroy_hba_eq
;
7650 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7651 "2584 HBA EQ setup: "
7652 "queue[%d]-id=%d\n", fcp_eqidx
,
7653 phba
->sli4_hba
.hba_eq
[fcp_eqidx
]->queue_id
);
7656 /* Set up fast-path FCP Response Complete Queue */
7657 if (!phba
->sli4_hba
.fcp_cq
) {
7658 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7659 "3148 Fast-path FCP CQ array not "
7662 goto out_destroy_hba_eq
;
7665 for (fcp_cqidx
= 0; fcp_cqidx
< phba
->cfg_fcp_io_channel
; fcp_cqidx
++) {
7666 if (!phba
->sli4_hba
.fcp_cq
[fcp_cqidx
]) {
7667 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7668 "0526 Fast-path FCP CQ (%d) not "
7669 "allocated\n", fcp_cqidx
);
7671 goto out_destroy_fcp_cq
;
7673 rc
= lpfc_cq_create(phba
, phba
->sli4_hba
.fcp_cq
[fcp_cqidx
],
7674 phba
->sli4_hba
.hba_eq
[fcp_cqidx
], LPFC_WCQ
, LPFC_FCP
);
7676 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7677 "0527 Failed setup of fast-path FCP "
7678 "CQ (%d), rc = 0x%x\n", fcp_cqidx
,
7680 goto out_destroy_fcp_cq
;
7683 /* Setup fcp_cq_map for fast lookup */
7684 phba
->sli4_hba
.fcp_cq_map
[fcp_cqidx
] =
7685 phba
->sli4_hba
.fcp_cq
[fcp_cqidx
]->queue_id
;
7687 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7688 "2588 FCP CQ setup: cq[%d]-id=%d, "
7689 "parent seq[%d]-id=%d\n",
7691 phba
->sli4_hba
.fcp_cq
[fcp_cqidx
]->queue_id
,
7693 phba
->sli4_hba
.hba_eq
[fcp_cqidx
]->queue_id
);
7696 /* Set up fast-path FCP Work Queue */
7697 if (!phba
->sli4_hba
.fcp_wq
) {
7698 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7699 "3149 Fast-path FCP WQ array not "
7702 goto out_destroy_fcp_cq
;
7705 for (fcp_wqidx
= 0; fcp_wqidx
< phba
->cfg_fcp_io_channel
; fcp_wqidx
++) {
7706 if (!phba
->sli4_hba
.fcp_wq
[fcp_wqidx
]) {
7707 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7708 "0534 Fast-path FCP WQ (%d) not "
7709 "allocated\n", fcp_wqidx
);
7711 goto out_destroy_fcp_wq
;
7713 rc
= lpfc_wq_create(phba
, phba
->sli4_hba
.fcp_wq
[fcp_wqidx
],
7714 phba
->sli4_hba
.fcp_cq
[fcp_wqidx
],
7717 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7718 "0535 Failed setup of fast-path FCP "
7719 "WQ (%d), rc = 0x%x\n", fcp_wqidx
,
7721 goto out_destroy_fcp_wq
;
7724 /* Bind this WQ to the next FCP ring */
7725 pring
= &psli
->ring
[MAX_SLI3_CONFIGURED_RINGS
+ fcp_wqidx
];
7726 pring
->sli
.sli4
.wqp
= (void *)phba
->sli4_hba
.fcp_wq
[fcp_wqidx
];
7727 phba
->sli4_hba
.fcp_cq
[fcp_wqidx
]->pring
= pring
;
7729 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7730 "2591 FCP WQ setup: wq[%d]-id=%d, "
7731 "parent cq[%d]-id=%d\n",
7733 phba
->sli4_hba
.fcp_wq
[fcp_wqidx
]->queue_id
,
7735 phba
->sli4_hba
.fcp_cq
[fcp_wqidx
]->queue_id
);
7738 * Set up Complete Queues (CQs)
7741 /* Set up slow-path MBOX Complete Queue as the first CQ */
7742 if (!phba
->sli4_hba
.mbx_cq
) {
7743 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7744 "0528 Mailbox CQ not allocated\n");
7746 goto out_destroy_fcp_wq
;
7748 rc
= lpfc_cq_create(phba
, phba
->sli4_hba
.mbx_cq
,
7749 phba
->sli4_hba
.hba_eq
[0], LPFC_MCQ
, LPFC_MBOX
);
7751 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7752 "0529 Failed setup of slow-path mailbox CQ: "
7753 "rc = 0x%x\n", (uint32_t)rc
);
7754 goto out_destroy_fcp_wq
;
7756 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7757 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
7758 phba
->sli4_hba
.mbx_cq
->queue_id
,
7759 phba
->sli4_hba
.hba_eq
[0]->queue_id
);
7761 /* Set up slow-path ELS Complete Queue */
7762 if (!phba
->sli4_hba
.els_cq
) {
7763 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7764 "0530 ELS CQ not allocated\n");
7766 goto out_destroy_mbx_cq
;
7768 rc
= lpfc_cq_create(phba
, phba
->sli4_hba
.els_cq
,
7769 phba
->sli4_hba
.hba_eq
[0], LPFC_WCQ
, LPFC_ELS
);
7771 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7772 "0531 Failed setup of slow-path ELS CQ: "
7773 "rc = 0x%x\n", (uint32_t)rc
);
7774 goto out_destroy_mbx_cq
;
7776 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7777 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
7778 phba
->sli4_hba
.els_cq
->queue_id
,
7779 phba
->sli4_hba
.hba_eq
[0]->queue_id
);
7782 * Set up all the Work Queues (WQs)
7785 /* Set up Mailbox Command Queue */
7786 if (!phba
->sli4_hba
.mbx_wq
) {
7787 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7788 "0538 Slow-path MQ not allocated\n");
7790 goto out_destroy_els_cq
;
7792 rc
= lpfc_mq_create(phba
, phba
->sli4_hba
.mbx_wq
,
7793 phba
->sli4_hba
.mbx_cq
, LPFC_MBOX
);
7795 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7796 "0539 Failed setup of slow-path MQ: "
7798 goto out_destroy_els_cq
;
7800 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7801 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
7802 phba
->sli4_hba
.mbx_wq
->queue_id
,
7803 phba
->sli4_hba
.mbx_cq
->queue_id
);
7805 /* Set up slow-path ELS Work Queue */
7806 if (!phba
->sli4_hba
.els_wq
) {
7807 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7808 "0536 Slow-path ELS WQ not allocated\n");
7810 goto out_destroy_mbx_wq
;
7812 rc
= lpfc_wq_create(phba
, phba
->sli4_hba
.els_wq
,
7813 phba
->sli4_hba
.els_cq
, LPFC_ELS
);
7815 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7816 "0537 Failed setup of slow-path ELS WQ: "
7817 "rc = 0x%x\n", (uint32_t)rc
);
7818 goto out_destroy_mbx_wq
;
7821 /* Bind this WQ to the ELS ring */
7822 pring
= &psli
->ring
[LPFC_ELS_RING
];
7823 pring
->sli
.sli4
.wqp
= (void *)phba
->sli4_hba
.els_wq
;
7824 phba
->sli4_hba
.els_cq
->pring
= pring
;
7826 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7827 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
7828 phba
->sli4_hba
.els_wq
->queue_id
,
7829 phba
->sli4_hba
.els_cq
->queue_id
);
7832 * Create Receive Queue (RQ)
7834 if (!phba
->sli4_hba
.hdr_rq
|| !phba
->sli4_hba
.dat_rq
) {
7835 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7836 "0540 Receive Queue not allocated\n");
7838 goto out_destroy_els_wq
;
7841 lpfc_rq_adjust_repost(phba
, phba
->sli4_hba
.hdr_rq
, LPFC_ELS_HBQ
);
7842 lpfc_rq_adjust_repost(phba
, phba
->sli4_hba
.dat_rq
, LPFC_ELS_HBQ
);
7844 rc
= lpfc_rq_create(phba
, phba
->sli4_hba
.hdr_rq
, phba
->sli4_hba
.dat_rq
,
7845 phba
->sli4_hba
.els_cq
, LPFC_USOL
);
7847 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7848 "0541 Failed setup of Receive Queue: "
7849 "rc = 0x%x\n", (uint32_t)rc
);
7850 goto out_destroy_fcp_wq
;
7853 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7854 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
7855 "parent cq-id=%d\n",
7856 phba
->sli4_hba
.hdr_rq
->queue_id
,
7857 phba
->sli4_hba
.dat_rq
->queue_id
,
7858 phba
->sli4_hba
.els_cq
->queue_id
);
7860 if (phba
->cfg_fof
) {
7861 rc
= lpfc_fof_queue_setup(phba
);
7863 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7864 "0549 Failed setup of FOF Queues: "
7866 goto out_destroy_els_rq
;
7871 * Configure EQ delay multipier for interrupt coalescing using
7872 * MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time.
7874 for (fcp_eqidx
= 0; fcp_eqidx
< phba
->cfg_fcp_io_channel
;
7875 fcp_eqidx
+= LPFC_MAX_EQ_DELAY
)
7876 lpfc_modify_fcp_eq_delay(phba
, fcp_eqidx
);
7880 lpfc_rq_destroy(phba
, phba
->sli4_hba
.hdr_rq
, phba
->sli4_hba
.dat_rq
);
7882 lpfc_wq_destroy(phba
, phba
->sli4_hba
.els_wq
);
7884 lpfc_mq_destroy(phba
, phba
->sli4_hba
.mbx_wq
);
7886 lpfc_cq_destroy(phba
, phba
->sli4_hba
.els_cq
);
7888 lpfc_cq_destroy(phba
, phba
->sli4_hba
.mbx_cq
);
7890 for (--fcp_wqidx
; fcp_wqidx
>= 0; fcp_wqidx
--)
7891 lpfc_wq_destroy(phba
, phba
->sli4_hba
.fcp_wq
[fcp_wqidx
]);
7893 for (--fcp_cqidx
; fcp_cqidx
>= 0; fcp_cqidx
--)
7894 lpfc_cq_destroy(phba
, phba
->sli4_hba
.fcp_cq
[fcp_cqidx
]);
7896 for (--fcp_eqidx
; fcp_eqidx
>= 0; fcp_eqidx
--)
7897 lpfc_eq_destroy(phba
, phba
->sli4_hba
.hba_eq
[fcp_eqidx
]);
7903 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
7904 * @phba: pointer to lpfc hba data structure.
7906 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
7911 * -ENOMEM - No available memory
7912 * -EIO - The mailbox failed to complete successfully.
7915 lpfc_sli4_queue_unset(struct lpfc_hba
*phba
)
7919 /* Unset the queues created for Flash Optimized Fabric operations */
7921 lpfc_fof_queue_destroy(phba
);
7922 /* Unset mailbox command work queue */
7923 lpfc_mq_destroy(phba
, phba
->sli4_hba
.mbx_wq
);
7924 /* Unset ELS work queue */
7925 lpfc_wq_destroy(phba
, phba
->sli4_hba
.els_wq
);
7926 /* Unset unsolicited receive queue */
7927 lpfc_rq_destroy(phba
, phba
->sli4_hba
.hdr_rq
, phba
->sli4_hba
.dat_rq
);
7928 /* Unset FCP work queue */
7929 if (phba
->sli4_hba
.fcp_wq
) {
7930 for (fcp_qidx
= 0; fcp_qidx
< phba
->cfg_fcp_io_channel
;
7932 lpfc_wq_destroy(phba
, phba
->sli4_hba
.fcp_wq
[fcp_qidx
]);
7934 /* Unset mailbox command complete queue */
7935 lpfc_cq_destroy(phba
, phba
->sli4_hba
.mbx_cq
);
7936 /* Unset ELS complete queue */
7937 lpfc_cq_destroy(phba
, phba
->sli4_hba
.els_cq
);
7938 /* Unset FCP response complete queue */
7939 if (phba
->sli4_hba
.fcp_cq
) {
7940 for (fcp_qidx
= 0; fcp_qidx
< phba
->cfg_fcp_io_channel
;
7942 lpfc_cq_destroy(phba
, phba
->sli4_hba
.fcp_cq
[fcp_qidx
]);
7944 /* Unset fast-path event queue */
7945 if (phba
->sli4_hba
.hba_eq
) {
7946 for (fcp_qidx
= 0; fcp_qidx
< phba
->cfg_fcp_io_channel
;
7948 lpfc_eq_destroy(phba
, phba
->sli4_hba
.hba_eq
[fcp_qidx
]);
7953 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
7954 * @phba: pointer to lpfc hba data structure.
7956 * This routine is invoked to allocate and set up a pool of completion queue
7957 * events. The body of the completion queue event is a completion queue entry
7958 * CQE. For now, this pool is used for the interrupt service routine to queue
7959 * the following HBA completion queue events for the worker thread to process:
7960 * - Mailbox asynchronous events
7961 * - Receive queue completion unsolicited events
7962 * Later, this can be used for all the slow-path events.
7966 * -ENOMEM - No available memory
7969 lpfc_sli4_cq_event_pool_create(struct lpfc_hba
*phba
)
7971 struct lpfc_cq_event
*cq_event
;
7974 for (i
= 0; i
< (4 * phba
->sli4_hba
.cq_ecount
); i
++) {
7975 cq_event
= kmalloc(sizeof(struct lpfc_cq_event
), GFP_KERNEL
);
7977 goto out_pool_create_fail
;
7978 list_add_tail(&cq_event
->list
,
7979 &phba
->sli4_hba
.sp_cqe_event_pool
);
7983 out_pool_create_fail
:
7984 lpfc_sli4_cq_event_pool_destroy(phba
);
7989 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
7990 * @phba: pointer to lpfc hba data structure.
7992 * This routine is invoked to free the pool of completion queue events at
7993 * driver unload time. Note that, it is the responsibility of the driver
7994 * cleanup routine to free all the outstanding completion-queue events
7995 * allocated from this pool back into the pool before invoking this routine
7996 * to destroy the pool.
7999 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba
*phba
)
8001 struct lpfc_cq_event
*cq_event
, *next_cq_event
;
8003 list_for_each_entry_safe(cq_event
, next_cq_event
,
8004 &phba
->sli4_hba
.sp_cqe_event_pool
, list
) {
8005 list_del(&cq_event
->list
);
8011 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
8012 * @phba: pointer to lpfc hba data structure.
8014 * This routine is the lock free version of the API invoked to allocate a
8015 * completion-queue event from the free pool.
8017 * Return: Pointer to the newly allocated completion-queue event if successful
8020 struct lpfc_cq_event
*
8021 __lpfc_sli4_cq_event_alloc(struct lpfc_hba
*phba
)
8023 struct lpfc_cq_event
*cq_event
= NULL
;
8025 list_remove_head(&phba
->sli4_hba
.sp_cqe_event_pool
, cq_event
,
8026 struct lpfc_cq_event
, list
);
8031 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
8032 * @phba: pointer to lpfc hba data structure.
8034 * This routine is the lock version of the API invoked to allocate a
8035 * completion-queue event from the free pool.
8037 * Return: Pointer to the newly allocated completion-queue event if successful
8040 struct lpfc_cq_event
*
8041 lpfc_sli4_cq_event_alloc(struct lpfc_hba
*phba
)
8043 struct lpfc_cq_event
*cq_event
;
8044 unsigned long iflags
;
8046 spin_lock_irqsave(&phba
->hbalock
, iflags
);
8047 cq_event
= __lpfc_sli4_cq_event_alloc(phba
);
8048 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8053 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
8054 * @phba: pointer to lpfc hba data structure.
8055 * @cq_event: pointer to the completion queue event to be freed.
8057 * This routine is the lock free version of the API invoked to release a
8058 * completion-queue event back into the free pool.
8061 __lpfc_sli4_cq_event_release(struct lpfc_hba
*phba
,
8062 struct lpfc_cq_event
*cq_event
)
8064 list_add_tail(&cq_event
->list
, &phba
->sli4_hba
.sp_cqe_event_pool
);
8068 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
8069 * @phba: pointer to lpfc hba data structure.
8070 * @cq_event: pointer to the completion queue event to be freed.
8072 * This routine is the lock version of the API invoked to release a
8073 * completion-queue event back into the free pool.
8076 lpfc_sli4_cq_event_release(struct lpfc_hba
*phba
,
8077 struct lpfc_cq_event
*cq_event
)
8079 unsigned long iflags
;
8080 spin_lock_irqsave(&phba
->hbalock
, iflags
);
8081 __lpfc_sli4_cq_event_release(phba
, cq_event
);
8082 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8086 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
8087 * @phba: pointer to lpfc hba data structure.
8089 * This routine is to free all the pending completion-queue events to the
8090 * back into the free pool for device reset.
8093 lpfc_sli4_cq_event_release_all(struct lpfc_hba
*phba
)
8096 struct lpfc_cq_event
*cqe
;
8097 unsigned long iflags
;
8099 /* Retrieve all the pending WCQEs from pending WCQE lists */
8100 spin_lock_irqsave(&phba
->hbalock
, iflags
);
8101 /* Pending FCP XRI abort events */
8102 list_splice_init(&phba
->sli4_hba
.sp_fcp_xri_aborted_work_queue
,
8104 /* Pending ELS XRI abort events */
8105 list_splice_init(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
,
8107 /* Pending asynnc events */
8108 list_splice_init(&phba
->sli4_hba
.sp_asynce_work_queue
,
8110 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8112 while (!list_empty(&cqelist
)) {
8113 list_remove_head(&cqelist
, cqe
, struct lpfc_cq_event
, list
);
8114 lpfc_sli4_cq_event_release(phba
, cqe
);
8119 * lpfc_pci_function_reset - Reset pci function.
8120 * @phba: pointer to lpfc hba data structure.
8122 * This routine is invoked to request a PCI function reset. It will destroys
8123 * all resources assigned to the PCI function which originates this request.
8127 * -ENOMEM - No available memory
8128 * -EIO - The mailbox failed to complete successfully.
8131 lpfc_pci_function_reset(struct lpfc_hba
*phba
)
8133 LPFC_MBOXQ_t
*mboxq
;
8134 uint32_t rc
= 0, if_type
;
8135 uint32_t shdr_status
, shdr_add_status
;
8137 uint32_t port_reset
= 0;
8138 union lpfc_sli4_cfg_shdr
*shdr
;
8139 struct lpfc_register reg_data
;
8142 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
8144 case LPFC_SLI_INTF_IF_TYPE_0
:
8145 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
,
8148 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8149 "0494 Unable to allocate memory for "
8150 "issuing SLI_FUNCTION_RESET mailbox "
8155 /* Setup PCI function reset mailbox-ioctl command */
8156 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
8157 LPFC_MBOX_OPCODE_FUNCTION_RESET
, 0,
8158 LPFC_SLI4_MBX_EMBED
);
8159 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
8160 shdr
= (union lpfc_sli4_cfg_shdr
*)
8161 &mboxq
->u
.mqe
.un
.sli4_config
.header
.cfg_shdr
;
8162 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
8163 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
,
8165 if (rc
!= MBX_TIMEOUT
)
8166 mempool_free(mboxq
, phba
->mbox_mem_pool
);
8167 if (shdr_status
|| shdr_add_status
|| rc
) {
8168 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8169 "0495 SLI_FUNCTION_RESET mailbox "
8170 "failed with status x%x add_status x%x,"
8171 " mbx status x%x\n",
8172 shdr_status
, shdr_add_status
, rc
);
8176 case LPFC_SLI_INTF_IF_TYPE_2
:
8179 * Poll the Port Status Register and wait for RDY for
8180 * up to 30 seconds. If the port doesn't respond, treat
8183 for (rdy_chk
= 0; rdy_chk
< 1500; rdy_chk
++) {
8184 if (lpfc_readl(phba
->sli4_hba
.u
.if_type2
.
8185 STATUSregaddr
, ®_data
.word0
)) {
8189 if (bf_get(lpfc_sliport_status_rdy
, ®_data
))
8194 if (!bf_get(lpfc_sliport_status_rdy
, ®_data
)) {
8195 phba
->work_status
[0] = readl(
8196 phba
->sli4_hba
.u
.if_type2
.ERR1regaddr
);
8197 phba
->work_status
[1] = readl(
8198 phba
->sli4_hba
.u
.if_type2
.ERR2regaddr
);
8199 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8200 "2890 Port not ready, port status reg "
8201 "0x%x error 1=0x%x, error 2=0x%x\n",
8203 phba
->work_status
[0],
8204 phba
->work_status
[1]);
8211 * Reset the port now
8214 bf_set(lpfc_sliport_ctrl_end
, ®_data
,
8215 LPFC_SLIPORT_LITTLE_ENDIAN
);
8216 bf_set(lpfc_sliport_ctrl_ip
, ®_data
,
8217 LPFC_SLIPORT_INIT_PORT
);
8218 writel(reg_data
.word0
, phba
->sli4_hba
.u
.if_type2
.
8221 pci_read_config_word(phba
->pcidev
,
8222 PCI_DEVICE_ID
, &devid
);
8227 } else if (bf_get(lpfc_sliport_status_rn
, ®_data
)) {
8233 case LPFC_SLI_INTF_IF_TYPE_1
:
8239 /* Catch the not-ready port failure after a port reset. */
8241 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8242 "3317 HBA not functional: IP Reset Failed "
8243 "try: echo fw_reset > board_mode\n");
8251 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
8252 * @phba: pointer to lpfc hba data structure.
8254 * This routine is invoked to set up the PCI device memory space for device
8255 * with SLI-4 interface spec.
8259 * other values - error
8262 lpfc_sli4_pci_mem_setup(struct lpfc_hba
*phba
)
8264 struct pci_dev
*pdev
;
8265 unsigned long bar0map_len
, bar1map_len
, bar2map_len
;
8266 int error
= -ENODEV
;
8269 /* Obtain PCI device reference */
8273 pdev
= phba
->pcidev
;
8275 /* Set the device DMA mask size */
8276 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) != 0
8277 || pci_set_consistent_dma_mask(pdev
,DMA_BIT_MASK(64)) != 0) {
8278 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)) != 0
8279 || pci_set_consistent_dma_mask(pdev
,DMA_BIT_MASK(32)) != 0) {
8285 * The BARs and register set definitions and offset locations are
8286 * dependent on the if_type.
8288 if (pci_read_config_dword(pdev
, LPFC_SLI_INTF
,
8289 &phba
->sli4_hba
.sli_intf
.word0
)) {
8293 /* There is no SLI3 failback for SLI4 devices. */
8294 if (bf_get(lpfc_sli_intf_valid
, &phba
->sli4_hba
.sli_intf
) !=
8295 LPFC_SLI_INTF_VALID
) {
8296 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8297 "2894 SLI_INTF reg contents invalid "
8298 "sli_intf reg 0x%x\n",
8299 phba
->sli4_hba
.sli_intf
.word0
);
8303 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
8305 * Get the bus address of SLI4 device Bar regions and the
8306 * number of bytes required by each mapping. The mapping of the
8307 * particular PCI BARs regions is dependent on the type of
8310 if (pci_resource_start(pdev
, PCI_64BIT_BAR0
)) {
8311 phba
->pci_bar0_map
= pci_resource_start(pdev
, PCI_64BIT_BAR0
);
8312 bar0map_len
= pci_resource_len(pdev
, PCI_64BIT_BAR0
);
8315 * Map SLI4 PCI Config Space Register base to a kernel virtual
8318 phba
->sli4_hba
.conf_regs_memmap_p
=
8319 ioremap(phba
->pci_bar0_map
, bar0map_len
);
8320 if (!phba
->sli4_hba
.conf_regs_memmap_p
) {
8321 dev_printk(KERN_ERR
, &pdev
->dev
,
8322 "ioremap failed for SLI4 PCI config "
8326 phba
->pci_bar0_memmap_p
= phba
->sli4_hba
.conf_regs_memmap_p
;
8327 /* Set up BAR0 PCI config space register memory map */
8328 lpfc_sli4_bar0_register_memmap(phba
, if_type
);
8330 phba
->pci_bar0_map
= pci_resource_start(pdev
, 1);
8331 bar0map_len
= pci_resource_len(pdev
, 1);
8332 if (if_type
== LPFC_SLI_INTF_IF_TYPE_2
) {
8333 dev_printk(KERN_ERR
, &pdev
->dev
,
8334 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
8337 phba
->sli4_hba
.conf_regs_memmap_p
=
8338 ioremap(phba
->pci_bar0_map
, bar0map_len
);
8339 if (!phba
->sli4_hba
.conf_regs_memmap_p
) {
8340 dev_printk(KERN_ERR
, &pdev
->dev
,
8341 "ioremap failed for SLI4 PCI config "
8345 lpfc_sli4_bar0_register_memmap(phba
, if_type
);
8348 if ((if_type
== LPFC_SLI_INTF_IF_TYPE_0
) &&
8349 (pci_resource_start(pdev
, PCI_64BIT_BAR2
))) {
8351 * Map SLI4 if type 0 HBA Control Register base to a kernel
8352 * virtual address and setup the registers.
8354 phba
->pci_bar1_map
= pci_resource_start(pdev
, PCI_64BIT_BAR2
);
8355 bar1map_len
= pci_resource_len(pdev
, PCI_64BIT_BAR2
);
8356 phba
->sli4_hba
.ctrl_regs_memmap_p
=
8357 ioremap(phba
->pci_bar1_map
, bar1map_len
);
8358 if (!phba
->sli4_hba
.ctrl_regs_memmap_p
) {
8359 dev_printk(KERN_ERR
, &pdev
->dev
,
8360 "ioremap failed for SLI4 HBA control registers.\n");
8361 goto out_iounmap_conf
;
8363 phba
->pci_bar2_memmap_p
= phba
->sli4_hba
.ctrl_regs_memmap_p
;
8364 lpfc_sli4_bar1_register_memmap(phba
);
8367 if ((if_type
== LPFC_SLI_INTF_IF_TYPE_0
) &&
8368 (pci_resource_start(pdev
, PCI_64BIT_BAR4
))) {
8370 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
8371 * virtual address and setup the registers.
8373 phba
->pci_bar2_map
= pci_resource_start(pdev
, PCI_64BIT_BAR4
);
8374 bar2map_len
= pci_resource_len(pdev
, PCI_64BIT_BAR4
);
8375 phba
->sli4_hba
.drbl_regs_memmap_p
=
8376 ioremap(phba
->pci_bar2_map
, bar2map_len
);
8377 if (!phba
->sli4_hba
.drbl_regs_memmap_p
) {
8378 dev_printk(KERN_ERR
, &pdev
->dev
,
8379 "ioremap failed for SLI4 HBA doorbell registers.\n");
8380 goto out_iounmap_ctrl
;
8382 phba
->pci_bar4_memmap_p
= phba
->sli4_hba
.drbl_regs_memmap_p
;
8383 error
= lpfc_sli4_bar2_register_memmap(phba
, LPFC_VF0
);
8385 goto out_iounmap_all
;
8391 iounmap(phba
->sli4_hba
.drbl_regs_memmap_p
);
8393 iounmap(phba
->sli4_hba
.ctrl_regs_memmap_p
);
8395 iounmap(phba
->sli4_hba
.conf_regs_memmap_p
);
8401 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
8402 * @phba: pointer to lpfc hba data structure.
8404 * This routine is invoked to unset the PCI device memory space for device
8405 * with SLI-4 interface spec.
8408 lpfc_sli4_pci_mem_unset(struct lpfc_hba
*phba
)
8411 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
8414 case LPFC_SLI_INTF_IF_TYPE_0
:
8415 iounmap(phba
->sli4_hba
.drbl_regs_memmap_p
);
8416 iounmap(phba
->sli4_hba
.ctrl_regs_memmap_p
);
8417 iounmap(phba
->sli4_hba
.conf_regs_memmap_p
);
8419 case LPFC_SLI_INTF_IF_TYPE_2
:
8420 iounmap(phba
->sli4_hba
.conf_regs_memmap_p
);
8422 case LPFC_SLI_INTF_IF_TYPE_1
:
8424 dev_printk(KERN_ERR
, &phba
->pcidev
->dev
,
8425 "FATAL - unsupported SLI4 interface type - %d\n",
8432 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
8433 * @phba: pointer to lpfc hba data structure.
8435 * This routine is invoked to enable the MSI-X interrupt vectors to device
8436 * with SLI-3 interface specs. The kernel function pci_enable_msix_exact()
8437 * is called to enable the MSI-X vectors. Note that pci_enable_msix_exact(),
8438 * once invoked, enables either all or nothing, depending on the current
8439 * availability of PCI vector resources. The device driver is responsible
8440 * for calling the individual request_irq() to register each MSI-X vector
8441 * with a interrupt handler, which is done in this function. Note that
8442 * later when device is unloading, the driver should always call free_irq()
8443 * on all MSI-X vectors it has done request_irq() on before calling
8444 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
8445 * will be left with MSI-X enabled and leaks its vectors.
8449 * other values - error
8452 lpfc_sli_enable_msix(struct lpfc_hba
*phba
)
8457 /* Set up MSI-X multi-message vectors */
8458 for (i
= 0; i
< LPFC_MSIX_VECTORS
; i
++)
8459 phba
->msix_entries
[i
].entry
= i
;
8461 /* Configure MSI-X capability structure */
8462 rc
= pci_enable_msix_exact(phba
->pcidev
, phba
->msix_entries
,
8465 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8466 "0420 PCI enable MSI-X failed (%d)\n", rc
);
8469 for (i
= 0; i
< LPFC_MSIX_VECTORS
; i
++)
8470 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8471 "0477 MSI-X entry[%d]: vector=x%x "
8473 phba
->msix_entries
[i
].vector
,
8474 phba
->msix_entries
[i
].entry
);
8476 * Assign MSI-X vectors to interrupt handlers
8479 /* vector-0 is associated to slow-path handler */
8480 rc
= request_irq(phba
->msix_entries
[0].vector
,
8481 &lpfc_sli_sp_intr_handler
, 0,
8482 LPFC_SP_DRIVER_HANDLER_NAME
, phba
);
8484 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
8485 "0421 MSI-X slow-path request_irq failed "
8490 /* vector-1 is associated to fast-path handler */
8491 rc
= request_irq(phba
->msix_entries
[1].vector
,
8492 &lpfc_sli_fp_intr_handler
, 0,
8493 LPFC_FP_DRIVER_HANDLER_NAME
, phba
);
8496 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
8497 "0429 MSI-X fast-path request_irq failed "
8503 * Configure HBA MSI-X attention conditions to messages
8505 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
8509 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8510 "0474 Unable to allocate memory for issuing "
8511 "MBOX_CONFIG_MSI command\n");
8514 rc
= lpfc_config_msi(phba
, pmb
);
8517 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
8518 if (rc
!= MBX_SUCCESS
) {
8519 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
,
8520 "0351 Config MSI mailbox command failed, "
8521 "mbxCmd x%x, mbxStatus x%x\n",
8522 pmb
->u
.mb
.mbxCommand
, pmb
->u
.mb
.mbxStatus
);
8526 /* Free memory allocated for mailbox command */
8527 mempool_free(pmb
, phba
->mbox_mem_pool
);
8531 /* Free memory allocated for mailbox command */
8532 mempool_free(pmb
, phba
->mbox_mem_pool
);
8535 /* free the irq already requested */
8536 free_irq(phba
->msix_entries
[1].vector
, phba
);
8539 /* free the irq already requested */
8540 free_irq(phba
->msix_entries
[0].vector
, phba
);
8543 /* Unconfigure MSI-X capability structure */
8544 pci_disable_msix(phba
->pcidev
);
8551 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
8552 * @phba: pointer to lpfc hba data structure.
8554 * This routine is invoked to release the MSI-X vectors and then disable the
8555 * MSI-X interrupt mode to device with SLI-3 interface spec.
8558 lpfc_sli_disable_msix(struct lpfc_hba
*phba
)
8562 /* Free up MSI-X multi-message vectors */
8563 for (i
= 0; i
< LPFC_MSIX_VECTORS
; i
++)
8564 free_irq(phba
->msix_entries
[i
].vector
, phba
);
8566 pci_disable_msix(phba
->pcidev
);
8572 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
8573 * @phba: pointer to lpfc hba data structure.
8575 * This routine is invoked to enable the MSI interrupt mode to device with
8576 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
8577 * enable the MSI vector. The device driver is responsible for calling the
8578 * request_irq() to register MSI vector with a interrupt the handler, which
8579 * is done in this function.
8583 * other values - error
8586 lpfc_sli_enable_msi(struct lpfc_hba
*phba
)
8590 rc
= pci_enable_msi(phba
->pcidev
);
8592 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8593 "0462 PCI enable MSI mode success.\n");
8595 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8596 "0471 PCI enable MSI mode failed (%d)\n", rc
);
8600 rc
= request_irq(phba
->pcidev
->irq
, lpfc_sli_intr_handler
,
8601 0, LPFC_DRIVER_NAME
, phba
);
8603 pci_disable_msi(phba
->pcidev
);
8604 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
8605 "0478 MSI request_irq failed (%d)\n", rc
);
8611 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
8612 * @phba: pointer to lpfc hba data structure.
8614 * This routine is invoked to disable the MSI interrupt mode to device with
8615 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
8616 * done request_irq() on before calling pci_disable_msi(). Failure to do so
8617 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
8621 lpfc_sli_disable_msi(struct lpfc_hba
*phba
)
8623 free_irq(phba
->pcidev
->irq
, phba
);
8624 pci_disable_msi(phba
->pcidev
);
8629 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
8630 * @phba: pointer to lpfc hba data structure.
8632 * This routine is invoked to enable device interrupt and associate driver's
8633 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
8634 * spec. Depends on the interrupt mode configured to the driver, the driver
8635 * will try to fallback from the configured interrupt mode to an interrupt
8636 * mode which is supported by the platform, kernel, and device in the order
8638 * MSI-X -> MSI -> IRQ.
8642 * other values - error
8645 lpfc_sli_enable_intr(struct lpfc_hba
*phba
, uint32_t cfg_mode
)
8647 uint32_t intr_mode
= LPFC_INTR_ERROR
;
8650 if (cfg_mode
== 2) {
8651 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
8652 retval
= lpfc_sli_config_port(phba
, LPFC_SLI_REV3
);
8654 /* Now, try to enable MSI-X interrupt mode */
8655 retval
= lpfc_sli_enable_msix(phba
);
8657 /* Indicate initialization to MSI-X mode */
8658 phba
->intr_type
= MSIX
;
8664 /* Fallback to MSI if MSI-X initialization failed */
8665 if (cfg_mode
>= 1 && phba
->intr_type
== NONE
) {
8666 retval
= lpfc_sli_enable_msi(phba
);
8668 /* Indicate initialization to MSI mode */
8669 phba
->intr_type
= MSI
;
8674 /* Fallback to INTx if both MSI-X/MSI initalization failed */
8675 if (phba
->intr_type
== NONE
) {
8676 retval
= request_irq(phba
->pcidev
->irq
, lpfc_sli_intr_handler
,
8677 IRQF_SHARED
, LPFC_DRIVER_NAME
, phba
);
8679 /* Indicate initialization to INTx mode */
8680 phba
->intr_type
= INTx
;
8688 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
8689 * @phba: pointer to lpfc hba data structure.
8691 * This routine is invoked to disable device interrupt and disassociate the
8692 * driver's interrupt handler(s) from interrupt vector(s) to device with
8693 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
8694 * release the interrupt vector(s) for the message signaled interrupt.
8697 lpfc_sli_disable_intr(struct lpfc_hba
*phba
)
8699 /* Disable the currently initialized interrupt mode */
8700 if (phba
->intr_type
== MSIX
)
8701 lpfc_sli_disable_msix(phba
);
8702 else if (phba
->intr_type
== MSI
)
8703 lpfc_sli_disable_msi(phba
);
8704 else if (phba
->intr_type
== INTx
)
8705 free_irq(phba
->pcidev
->irq
, phba
);
8707 /* Reset interrupt management states */
8708 phba
->intr_type
= NONE
;
8709 phba
->sli
.slistat
.sli_intr
= 0;
8715 * lpfc_find_next_cpu - Find next available CPU that matches the phys_id
8716 * @phba: pointer to lpfc hba data structure.
8718 * Find next available CPU to use for IRQ to CPU affinity.
8721 lpfc_find_next_cpu(struct lpfc_hba
*phba
, uint32_t phys_id
)
8723 struct lpfc_vector_map_info
*cpup
;
8726 cpup
= phba
->sli4_hba
.cpu_map
;
8727 for (cpu
= 0; cpu
< phba
->sli4_hba
.num_present_cpu
; cpu
++) {
8728 /* CPU must be online */
8729 if (cpu_online(cpu
)) {
8730 if ((cpup
->irq
== LPFC_VECTOR_MAP_EMPTY
) &&
8731 (lpfc_used_cpu
[cpu
] == LPFC_VECTOR_MAP_EMPTY
) &&
8732 (cpup
->phys_id
== phys_id
)) {
8740 * If we get here, we have used ALL CPUs for the specific
8741 * phys_id. Now we need to clear out lpfc_used_cpu and start
8745 for (cpu
= 0; cpu
< phba
->sli4_hba
.num_present_cpu
; cpu
++) {
8746 if (lpfc_used_cpu
[cpu
] == phys_id
)
8747 lpfc_used_cpu
[cpu
] = LPFC_VECTOR_MAP_EMPTY
;
8750 cpup
= phba
->sli4_hba
.cpu_map
;
8751 for (cpu
= 0; cpu
< phba
->sli4_hba
.num_present_cpu
; cpu
++) {
8752 /* CPU must be online */
8753 if (cpu_online(cpu
)) {
8754 if ((cpup
->irq
== LPFC_VECTOR_MAP_EMPTY
) &&
8755 (cpup
->phys_id
== phys_id
)) {
8761 return LPFC_VECTOR_MAP_EMPTY
;
8765 * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors
8766 * @phba: pointer to lpfc hba data structure.
8767 * @vectors: number of HBA vectors
8769 * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector
8770 * affinization across multple physical CPUs (numa nodes).
8771 * In addition, this routine will assign an IO channel for each CPU
8772 * to use when issuing I/Os.
8775 lpfc_sli4_set_affinity(struct lpfc_hba
*phba
, int vectors
)
8777 int i
, idx
, saved_chann
, used_chann
, cpu
, phys_id
;
8778 int max_phys_id
, min_phys_id
;
8779 int num_io_channel
, first_cpu
, chan
;
8780 struct lpfc_vector_map_info
*cpup
;
8782 struct cpuinfo_x86
*cpuinfo
;
8784 uint8_t chann
[LPFC_FCP_IO_CHAN_MAX
+1];
8786 /* If there is no mapping, just return */
8787 if (!phba
->cfg_fcp_cpu_map
)
8790 /* Init cpu_map array */
8791 memset(phba
->sli4_hba
.cpu_map
, 0xff,
8792 (sizeof(struct lpfc_vector_map_info
) *
8793 phba
->sli4_hba
.num_present_cpu
));
8799 first_cpu
= LPFC_VECTOR_MAP_EMPTY
;
8801 /* Update CPU map with physical id and core id of each CPU */
8802 cpup
= phba
->sli4_hba
.cpu_map
;
8803 for (cpu
= 0; cpu
< phba
->sli4_hba
.num_present_cpu
; cpu
++) {
8805 cpuinfo
= &cpu_data(cpu
);
8806 cpup
->phys_id
= cpuinfo
->phys_proc_id
;
8807 cpup
->core_id
= cpuinfo
->cpu_core_id
;
8809 /* No distinction between CPUs for other platforms */
8814 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8815 "3328 CPU physid %d coreid %d\n",
8816 cpup
->phys_id
, cpup
->core_id
);
8818 if (cpup
->phys_id
> max_phys_id
)
8819 max_phys_id
= cpup
->phys_id
;
8820 if (cpup
->phys_id
< min_phys_id
)
8821 min_phys_id
= cpup
->phys_id
;
8825 phys_id
= min_phys_id
;
8826 /* Now associate the HBA vectors with specific CPUs */
8827 for (idx
= 0; idx
< vectors
; idx
++) {
8828 cpup
= phba
->sli4_hba
.cpu_map
;
8829 cpu
= lpfc_find_next_cpu(phba
, phys_id
);
8830 if (cpu
== LPFC_VECTOR_MAP_EMPTY
) {
8832 /* Try for all phys_id's */
8833 for (i
= 1; i
< max_phys_id
; i
++) {
8835 if (phys_id
> max_phys_id
)
8836 phys_id
= min_phys_id
;
8837 cpu
= lpfc_find_next_cpu(phba
, phys_id
);
8838 if (cpu
== LPFC_VECTOR_MAP_EMPTY
)
8843 /* Use round robin for scheduling */
8844 phba
->cfg_fcp_io_sched
= LPFC_FCP_SCHED_ROUND_ROBIN
;
8846 cpup
= phba
->sli4_hba
.cpu_map
;
8847 for (i
= 0; i
< phba
->sli4_hba
.num_present_cpu
; i
++) {
8848 cpup
->channel_id
= chan
;
8851 if (chan
>= phba
->cfg_fcp_io_channel
)
8855 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8856 "3329 Cannot set affinity:"
8857 "Error mapping vector %d (%d)\n",
8863 if (phba
->cfg_fcp_cpu_map
== LPFC_DRIVER_CPU_MAP
)
8864 lpfc_used_cpu
[cpu
] = phys_id
;
8866 /* Associate vector with selected CPU */
8867 cpup
->irq
= phba
->sli4_hba
.msix_entries
[idx
].vector
;
8869 /* Associate IO channel with selected CPU */
8870 cpup
->channel_id
= idx
;
8873 if (first_cpu
== LPFC_VECTOR_MAP_EMPTY
)
8876 /* Now affinitize to the selected CPU */
8877 i
= irq_set_affinity_hint(phba
->sli4_hba
.msix_entries
[idx
].
8878 vector
, get_cpu_mask(cpu
));
8880 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8881 "3330 Set Affinity: CPU %d channel %d "
8883 cpu
, cpup
->channel_id
,
8884 phba
->sli4_hba
.msix_entries
[idx
].vector
, i
);
8886 /* Spread vector mapping across multple physical CPU nodes */
8888 if (phys_id
> max_phys_id
)
8889 phys_id
= min_phys_id
;
8893 * Finally fill in the IO channel for any remaining CPUs.
8894 * At this point, all IO channels have been assigned to a specific
8895 * MSIx vector, mapped to a specific CPU.
8896 * Base the remaining IO channel assigned, to IO channels already
8897 * assigned to other CPUs on the same phys_id.
8899 for (i
= min_phys_id
; i
<= max_phys_id
; i
++) {
8901 * If there are no io channels already mapped to
8902 * this phys_id, just round robin thru the io_channels.
8903 * Setup chann[] for round robin.
8905 for (idx
= 0; idx
< phba
->cfg_fcp_io_channel
; idx
++)
8912 * First build a list of IO channels already assigned
8913 * to this phys_id before reassigning the same IO
8914 * channels to the remaining CPUs.
8916 cpup
= phba
->sli4_hba
.cpu_map
;
8919 for (idx
= 0; idx
< phba
->sli4_hba
.num_present_cpu
;
8921 if (cpup
->phys_id
== i
) {
8923 * Save any IO channels that are
8924 * already mapped to this phys_id.
8926 if (cpup
->irq
!= LPFC_VECTOR_MAP_EMPTY
) {
8928 LPFC_FCP_IO_CHAN_MAX
) {
8929 chann
[saved_chann
] =
8936 /* See if we are using round-robin */
8937 if (saved_chann
== 0)
8939 phba
->cfg_fcp_io_channel
;
8941 /* Associate next IO channel with CPU */
8942 cpup
->channel_id
= chann
[used_chann
];
8945 if (used_chann
== saved_chann
)
8948 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8949 "3331 Set IO_CHANN "
8950 "CPU %d channel %d\n",
8951 idx
, cpup
->channel_id
);
8955 if (cpu
>= phba
->sli4_hba
.num_present_cpu
) {
8956 cpup
= phba
->sli4_hba
.cpu_map
;
8964 if (phba
->sli4_hba
.num_online_cpu
!= phba
->sli4_hba
.num_present_cpu
) {
8965 cpup
= phba
->sli4_hba
.cpu_map
;
8966 for (idx
= 0; idx
< phba
->sli4_hba
.num_present_cpu
; idx
++) {
8967 if (cpup
->channel_id
== LPFC_VECTOR_MAP_EMPTY
) {
8968 cpup
->channel_id
= 0;
8971 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8972 "3332 Assign IO_CHANN "
8973 "CPU %d channel %d\n",
8974 idx
, cpup
->channel_id
);
8981 if (num_io_channel
!= phba
->sli4_hba
.num_present_cpu
)
8982 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8983 "3333 Set affinity mismatch:"
8984 "%d chann != %d cpus: %d vectors\n",
8985 num_io_channel
, phba
->sli4_hba
.num_present_cpu
,
8988 /* Enable using cpu affinity for scheduling */
8989 phba
->cfg_fcp_io_sched
= LPFC_FCP_SCHED_BY_CPU
;
8995 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
8996 * @phba: pointer to lpfc hba data structure.
8998 * This routine is invoked to enable the MSI-X interrupt vectors to device
8999 * with SLI-4 interface spec. The kernel function pci_enable_msix_range()
9000 * is called to enable the MSI-X vectors. The device driver is responsible
9001 * for calling the individual request_irq() to register each MSI-X vector
9002 * with a interrupt handler, which is done in this function. Note that
9003 * later when device is unloading, the driver should always call free_irq()
9004 * on all MSI-X vectors it has done request_irq() on before calling
9005 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
9006 * will be left with MSI-X enabled and leaks its vectors.
9010 * other values - error
9013 lpfc_sli4_enable_msix(struct lpfc_hba
*phba
)
9015 int vectors
, rc
, index
;
9017 /* Set up MSI-X multi-message vectors */
9018 for (index
= 0; index
< phba
->cfg_fcp_io_channel
; index
++)
9019 phba
->sli4_hba
.msix_entries
[index
].entry
= index
;
9021 /* Configure MSI-X capability structure */
9022 vectors
= phba
->cfg_fcp_io_channel
;
9023 if (phba
->cfg_fof
) {
9024 phba
->sli4_hba
.msix_entries
[index
].entry
= index
;
9027 rc
= pci_enable_msix_range(phba
->pcidev
, phba
->sli4_hba
.msix_entries
,
9030 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9031 "0484 PCI enable MSI-X failed (%d)\n", rc
);
9036 /* Log MSI-X vector assignment */
9037 for (index
= 0; index
< vectors
; index
++)
9038 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9039 "0489 MSI-X entry[%d]: vector=x%x "
9040 "message=%d\n", index
,
9041 phba
->sli4_hba
.msix_entries
[index
].vector
,
9042 phba
->sli4_hba
.msix_entries
[index
].entry
);
9044 /* Assign MSI-X vectors to interrupt handlers */
9045 for (index
= 0; index
< vectors
; index
++) {
9046 memset(&phba
->sli4_hba
.handler_name
[index
], 0, 16);
9047 snprintf((char *)&phba
->sli4_hba
.handler_name
[index
],
9048 LPFC_SLI4_HANDLER_NAME_SZ
,
9049 LPFC_DRIVER_HANDLER_NAME
"%d", index
);
9051 phba
->sli4_hba
.fcp_eq_hdl
[index
].idx
= index
;
9052 phba
->sli4_hba
.fcp_eq_hdl
[index
].phba
= phba
;
9053 atomic_set(&phba
->sli4_hba
.fcp_eq_hdl
[index
].fcp_eq_in_use
, 1);
9054 if (phba
->cfg_fof
&& (index
== (vectors
- 1)))
9056 phba
->sli4_hba
.msix_entries
[index
].vector
,
9057 &lpfc_sli4_fof_intr_handler
, 0,
9058 (char *)&phba
->sli4_hba
.handler_name
[index
],
9059 &phba
->sli4_hba
.fcp_eq_hdl
[index
]);
9062 phba
->sli4_hba
.msix_entries
[index
].vector
,
9063 &lpfc_sli4_hba_intr_handler
, 0,
9064 (char *)&phba
->sli4_hba
.handler_name
[index
],
9065 &phba
->sli4_hba
.fcp_eq_hdl
[index
]);
9067 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
9068 "0486 MSI-X fast-path (%d) "
9069 "request_irq failed (%d)\n", index
, rc
);
9077 if (vectors
!= phba
->cfg_fcp_io_channel
) {
9078 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9079 "3238 Reducing IO channels to match number of "
9080 "MSI-X vectors, requested %d got %d\n",
9081 phba
->cfg_fcp_io_channel
, vectors
);
9082 phba
->cfg_fcp_io_channel
= vectors
;
9085 if (!shost_use_blk_mq(lpfc_shost_from_vport(phba
->pport
)))
9086 lpfc_sli4_set_affinity(phba
, vectors
);
9090 /* free the irq already requested */
9091 for (--index
; index
>= 0; index
--) {
9092 irq_set_affinity_hint(phba
->sli4_hba
.msix_entries
[index
].
9094 free_irq(phba
->sli4_hba
.msix_entries
[index
].vector
,
9095 &phba
->sli4_hba
.fcp_eq_hdl
[index
]);
9098 /* Unconfigure MSI-X capability structure */
9099 pci_disable_msix(phba
->pcidev
);
9106 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
9107 * @phba: pointer to lpfc hba data structure.
9109 * This routine is invoked to release the MSI-X vectors and then disable the
9110 * MSI-X interrupt mode to device with SLI-4 interface spec.
9113 lpfc_sli4_disable_msix(struct lpfc_hba
*phba
)
9117 /* Free up MSI-X multi-message vectors */
9118 for (index
= 0; index
< phba
->cfg_fcp_io_channel
; index
++) {
9119 irq_set_affinity_hint(phba
->sli4_hba
.msix_entries
[index
].
9121 free_irq(phba
->sli4_hba
.msix_entries
[index
].vector
,
9122 &phba
->sli4_hba
.fcp_eq_hdl
[index
]);
9124 if (phba
->cfg_fof
) {
9125 free_irq(phba
->sli4_hba
.msix_entries
[index
].vector
,
9126 &phba
->sli4_hba
.fcp_eq_hdl
[index
]);
9129 pci_disable_msix(phba
->pcidev
);
9135 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
9136 * @phba: pointer to lpfc hba data structure.
9138 * This routine is invoked to enable the MSI interrupt mode to device with
9139 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
9140 * to enable the MSI vector. The device driver is responsible for calling
9141 * the request_irq() to register MSI vector with a interrupt the handler,
9142 * which is done in this function.
9146 * other values - error
9149 lpfc_sli4_enable_msi(struct lpfc_hba
*phba
)
9153 rc
= pci_enable_msi(phba
->pcidev
);
9155 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9156 "0487 PCI enable MSI mode success.\n");
9158 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9159 "0488 PCI enable MSI mode failed (%d)\n", rc
);
9163 rc
= request_irq(phba
->pcidev
->irq
, lpfc_sli4_intr_handler
,
9164 0, LPFC_DRIVER_NAME
, phba
);
9166 pci_disable_msi(phba
->pcidev
);
9167 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
9168 "0490 MSI request_irq failed (%d)\n", rc
);
9172 for (index
= 0; index
< phba
->cfg_fcp_io_channel
; index
++) {
9173 phba
->sli4_hba
.fcp_eq_hdl
[index
].idx
= index
;
9174 phba
->sli4_hba
.fcp_eq_hdl
[index
].phba
= phba
;
9177 if (phba
->cfg_fof
) {
9178 phba
->sli4_hba
.fcp_eq_hdl
[index
].idx
= index
;
9179 phba
->sli4_hba
.fcp_eq_hdl
[index
].phba
= phba
;
9185 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
9186 * @phba: pointer to lpfc hba data structure.
9188 * This routine is invoked to disable the MSI interrupt mode to device with
9189 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
9190 * done request_irq() on before calling pci_disable_msi(). Failure to do so
9191 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
9195 lpfc_sli4_disable_msi(struct lpfc_hba
*phba
)
9197 free_irq(phba
->pcidev
->irq
, phba
);
9198 pci_disable_msi(phba
->pcidev
);
9203 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
9204 * @phba: pointer to lpfc hba data structure.
9206 * This routine is invoked to enable device interrupt and associate driver's
9207 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
9208 * interface spec. Depends on the interrupt mode configured to the driver,
9209 * the driver will try to fallback from the configured interrupt mode to an
9210 * interrupt mode which is supported by the platform, kernel, and device in
9212 * MSI-X -> MSI -> IRQ.
9216 * other values - error
9219 lpfc_sli4_enable_intr(struct lpfc_hba
*phba
, uint32_t cfg_mode
)
9221 uint32_t intr_mode
= LPFC_INTR_ERROR
;
9224 if (cfg_mode
== 2) {
9225 /* Preparation before conf_msi mbox cmd */
9228 /* Now, try to enable MSI-X interrupt mode */
9229 retval
= lpfc_sli4_enable_msix(phba
);
9231 /* Indicate initialization to MSI-X mode */
9232 phba
->intr_type
= MSIX
;
9238 /* Fallback to MSI if MSI-X initialization failed */
9239 if (cfg_mode
>= 1 && phba
->intr_type
== NONE
) {
9240 retval
= lpfc_sli4_enable_msi(phba
);
9242 /* Indicate initialization to MSI mode */
9243 phba
->intr_type
= MSI
;
9248 /* Fallback to INTx if both MSI-X/MSI initalization failed */
9249 if (phba
->intr_type
== NONE
) {
9250 retval
= request_irq(phba
->pcidev
->irq
, lpfc_sli4_intr_handler
,
9251 IRQF_SHARED
, LPFC_DRIVER_NAME
, phba
);
9253 /* Indicate initialization to INTx mode */
9254 phba
->intr_type
= INTx
;
9256 for (index
= 0; index
< phba
->cfg_fcp_io_channel
;
9258 phba
->sli4_hba
.fcp_eq_hdl
[index
].idx
= index
;
9259 phba
->sli4_hba
.fcp_eq_hdl
[index
].phba
= phba
;
9260 atomic_set(&phba
->sli4_hba
.fcp_eq_hdl
[index
].
9263 if (phba
->cfg_fof
) {
9264 phba
->sli4_hba
.fcp_eq_hdl
[index
].idx
= index
;
9265 phba
->sli4_hba
.fcp_eq_hdl
[index
].phba
= phba
;
9266 atomic_set(&phba
->sli4_hba
.fcp_eq_hdl
[index
].
9275 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
9276 * @phba: pointer to lpfc hba data structure.
9278 * This routine is invoked to disable device interrupt and disassociate
9279 * the driver's interrupt handler(s) from interrupt vector(s) to device
9280 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
9281 * will release the interrupt vector(s) for the message signaled interrupt.
9284 lpfc_sli4_disable_intr(struct lpfc_hba
*phba
)
9286 /* Disable the currently initialized interrupt mode */
9287 if (phba
->intr_type
== MSIX
)
9288 lpfc_sli4_disable_msix(phba
);
9289 else if (phba
->intr_type
== MSI
)
9290 lpfc_sli4_disable_msi(phba
);
9291 else if (phba
->intr_type
== INTx
)
9292 free_irq(phba
->pcidev
->irq
, phba
);
9294 /* Reset interrupt management states */
9295 phba
->intr_type
= NONE
;
9296 phba
->sli
.slistat
.sli_intr
= 0;
9302 * lpfc_unset_hba - Unset SLI3 hba device initialization
9303 * @phba: pointer to lpfc hba data structure.
9305 * This routine is invoked to unset the HBA device initialization steps to
9306 * a device with SLI-3 interface spec.
9309 lpfc_unset_hba(struct lpfc_hba
*phba
)
9311 struct lpfc_vport
*vport
= phba
->pport
;
9312 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
9314 spin_lock_irq(shost
->host_lock
);
9315 vport
->load_flag
|= FC_UNLOADING
;
9316 spin_unlock_irq(shost
->host_lock
);
9318 kfree(phba
->vpi_bmask
);
9319 kfree(phba
->vpi_ids
);
9321 lpfc_stop_hba_timers(phba
);
9323 phba
->pport
->work_port_events
= 0;
9325 lpfc_sli_hba_down(phba
);
9327 lpfc_sli_brdrestart(phba
);
9329 lpfc_sli_disable_intr(phba
);
9335 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
9336 * @phba: Pointer to HBA context object.
9338 * This function is called in the SLI4 code path to wait for completion
9339 * of device's XRIs exchange busy. It will check the XRI exchange busy
9340 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
9341 * that, it will check the XRI exchange busy on outstanding FCP and ELS
9342 * I/Os every 30 seconds, log error message, and wait forever. Only when
9343 * all XRI exchange busy complete, the driver unload shall proceed with
9344 * invoking the function reset ioctl mailbox command to the CNA and the
9345 * the rest of the driver unload resource release.
9348 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba
*phba
)
9351 int fcp_xri_cmpl
= list_empty(&phba
->sli4_hba
.lpfc_abts_scsi_buf_list
);
9352 int els_xri_cmpl
= list_empty(&phba
->sli4_hba
.lpfc_abts_els_sgl_list
);
9354 while (!fcp_xri_cmpl
|| !els_xri_cmpl
) {
9355 if (wait_time
> LPFC_XRI_EXCH_BUSY_WAIT_TMO
) {
9357 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9358 "2877 FCP XRI exchange busy "
9359 "wait time: %d seconds.\n",
9362 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9363 "2878 ELS XRI exchange busy "
9364 "wait time: %d seconds.\n",
9366 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2
);
9367 wait_time
+= LPFC_XRI_EXCH_BUSY_WAIT_T2
;
9369 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1
);
9370 wait_time
+= LPFC_XRI_EXCH_BUSY_WAIT_T1
;
9373 list_empty(&phba
->sli4_hba
.lpfc_abts_scsi_buf_list
);
9375 list_empty(&phba
->sli4_hba
.lpfc_abts_els_sgl_list
);
9380 * lpfc_sli4_hba_unset - Unset the fcoe hba
9381 * @phba: Pointer to HBA context object.
9383 * This function is called in the SLI4 code path to reset the HBA's FCoE
9384 * function. The caller is not required to hold any lock. This routine
9385 * issues PCI function reset mailbox command to reset the FCoE function.
9386 * At the end of the function, it calls lpfc_hba_down_post function to
9387 * free any pending commands.
9390 lpfc_sli4_hba_unset(struct lpfc_hba
*phba
)
9393 LPFC_MBOXQ_t
*mboxq
;
9394 struct pci_dev
*pdev
= phba
->pcidev
;
9396 lpfc_stop_hba_timers(phba
);
9397 phba
->sli4_hba
.intr_enable
= 0;
9400 * Gracefully wait out the potential current outstanding asynchronous
9404 /* First, block any pending async mailbox command from posted */
9405 spin_lock_irq(&phba
->hbalock
);
9406 phba
->sli
.sli_flag
|= LPFC_SLI_ASYNC_MBX_BLK
;
9407 spin_unlock_irq(&phba
->hbalock
);
9408 /* Now, trying to wait it out if we can */
9409 while (phba
->sli
.sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
9411 if (++wait_cnt
> LPFC_ACTIVE_MBOX_WAIT_CNT
)
9414 /* Forcefully release the outstanding mailbox command if timed out */
9415 if (phba
->sli
.sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
9416 spin_lock_irq(&phba
->hbalock
);
9417 mboxq
= phba
->sli
.mbox_active
;
9418 mboxq
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
9419 __lpfc_mbox_cmpl_put(phba
, mboxq
);
9420 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
9421 phba
->sli
.mbox_active
= NULL
;
9422 spin_unlock_irq(&phba
->hbalock
);
9425 /* Abort all iocbs associated with the hba */
9426 lpfc_sli_hba_iocb_abort(phba
);
9428 /* Wait for completion of device XRI exchange busy */
9429 lpfc_sli4_xri_exchange_busy_wait(phba
);
9431 /* Disable PCI subsystem interrupt */
9432 lpfc_sli4_disable_intr(phba
);
9434 /* Disable SR-IOV if enabled */
9435 if (phba
->cfg_sriov_nr_virtfn
)
9436 pci_disable_sriov(pdev
);
9438 /* Stop kthread signal shall trigger work_done one more time */
9439 kthread_stop(phba
->worker_thread
);
9441 /* Reset SLI4 HBA FCoE function */
9442 lpfc_pci_function_reset(phba
);
9443 lpfc_sli4_queue_destroy(phba
);
9445 /* Stop the SLI4 device port */
9446 phba
->pport
->work_port_events
= 0;
9450 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
9451 * @phba: Pointer to HBA context object.
9452 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
9454 * This function is called in the SLI4 code path to read the port's
9455 * sli4 capabilities.
9457 * This function may be be called from any context that can block-wait
9458 * for the completion. The expectation is that this routine is called
9459 * typically from probe_one or from the online routine.
9462 lpfc_pc_sli4_params_get(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
9465 struct lpfc_mqe
*mqe
;
9466 struct lpfc_pc_sli4_params
*sli4_params
;
9470 mqe
= &mboxq
->u
.mqe
;
9472 /* Read the port's SLI4 Parameters port capabilities */
9473 lpfc_pc_sli4_params(mboxq
);
9474 if (!phba
->sli4_hba
.intr_enable
)
9475 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
9477 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mboxq
);
9478 rc
= lpfc_sli_issue_mbox_wait(phba
, mboxq
, mbox_tmo
);
9484 sli4_params
= &phba
->sli4_hba
.pc_sli4_params
;
9485 sli4_params
->if_type
= bf_get(if_type
, &mqe
->un
.sli4_params
);
9486 sli4_params
->sli_rev
= bf_get(sli_rev
, &mqe
->un
.sli4_params
);
9487 sli4_params
->sli_family
= bf_get(sli_family
, &mqe
->un
.sli4_params
);
9488 sli4_params
->featurelevel_1
= bf_get(featurelevel_1
,
9489 &mqe
->un
.sli4_params
);
9490 sli4_params
->featurelevel_2
= bf_get(featurelevel_2
,
9491 &mqe
->un
.sli4_params
);
9492 sli4_params
->proto_types
= mqe
->un
.sli4_params
.word3
;
9493 sli4_params
->sge_supp_len
= mqe
->un
.sli4_params
.sge_supp_len
;
9494 sli4_params
->if_page_sz
= bf_get(if_page_sz
, &mqe
->un
.sli4_params
);
9495 sli4_params
->rq_db_window
= bf_get(rq_db_window
, &mqe
->un
.sli4_params
);
9496 sli4_params
->loopbk_scope
= bf_get(loopbk_scope
, &mqe
->un
.sli4_params
);
9497 sli4_params
->eq_pages_max
= bf_get(eq_pages
, &mqe
->un
.sli4_params
);
9498 sli4_params
->eqe_size
= bf_get(eqe_size
, &mqe
->un
.sli4_params
);
9499 sli4_params
->cq_pages_max
= bf_get(cq_pages
, &mqe
->un
.sli4_params
);
9500 sli4_params
->cqe_size
= bf_get(cqe_size
, &mqe
->un
.sli4_params
);
9501 sli4_params
->mq_pages_max
= bf_get(mq_pages
, &mqe
->un
.sli4_params
);
9502 sli4_params
->mqe_size
= bf_get(mqe_size
, &mqe
->un
.sli4_params
);
9503 sli4_params
->mq_elem_cnt
= bf_get(mq_elem_cnt
, &mqe
->un
.sli4_params
);
9504 sli4_params
->wq_pages_max
= bf_get(wq_pages
, &mqe
->un
.sli4_params
);
9505 sli4_params
->wqe_size
= bf_get(wqe_size
, &mqe
->un
.sli4_params
);
9506 sli4_params
->rq_pages_max
= bf_get(rq_pages
, &mqe
->un
.sli4_params
);
9507 sli4_params
->rqe_size
= bf_get(rqe_size
, &mqe
->un
.sli4_params
);
9508 sli4_params
->hdr_pages_max
= bf_get(hdr_pages
, &mqe
->un
.sli4_params
);
9509 sli4_params
->hdr_size
= bf_get(hdr_size
, &mqe
->un
.sli4_params
);
9510 sli4_params
->hdr_pp_align
= bf_get(hdr_pp_align
, &mqe
->un
.sli4_params
);
9511 sli4_params
->sgl_pages_max
= bf_get(sgl_pages
, &mqe
->un
.sli4_params
);
9512 sli4_params
->sgl_pp_align
= bf_get(sgl_pp_align
, &mqe
->un
.sli4_params
);
9514 /* Make sure that sge_supp_len can be handled by the driver */
9515 if (sli4_params
->sge_supp_len
> LPFC_MAX_SGE_SIZE
)
9516 sli4_params
->sge_supp_len
= LPFC_MAX_SGE_SIZE
;
9522 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
9523 * @phba: Pointer to HBA context object.
9524 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
9526 * This function is called in the SLI4 code path to read the port's
9527 * sli4 capabilities.
9529 * This function may be be called from any context that can block-wait
9530 * for the completion. The expectation is that this routine is called
9531 * typically from probe_one or from the online routine.
9534 lpfc_get_sli4_parameters(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
9537 struct lpfc_mqe
*mqe
= &mboxq
->u
.mqe
;
9538 struct lpfc_pc_sli4_params
*sli4_params
;
9541 struct lpfc_sli4_parameters
*mbx_sli4_parameters
;
9544 * By default, the driver assumes the SLI4 port requires RPI
9545 * header postings. The SLI4_PARAM response will correct this
9548 phba
->sli4_hba
.rpi_hdrs_in_use
= 1;
9550 /* Read the port's SLI4 Config Parameters */
9551 length
= (sizeof(struct lpfc_mbx_get_sli4_parameters
) -
9552 sizeof(struct lpfc_sli4_cfg_mhdr
));
9553 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
9554 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS
,
9555 length
, LPFC_SLI4_MBX_EMBED
);
9556 if (!phba
->sli4_hba
.intr_enable
)
9557 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
9559 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mboxq
);
9560 rc
= lpfc_sli_issue_mbox_wait(phba
, mboxq
, mbox_tmo
);
9564 sli4_params
= &phba
->sli4_hba
.pc_sli4_params
;
9565 mbx_sli4_parameters
= &mqe
->un
.get_sli4_parameters
.sli4_parameters
;
9566 sli4_params
->if_type
= bf_get(cfg_if_type
, mbx_sli4_parameters
);
9567 sli4_params
->sli_rev
= bf_get(cfg_sli_rev
, mbx_sli4_parameters
);
9568 sli4_params
->sli_family
= bf_get(cfg_sli_family
, mbx_sli4_parameters
);
9569 sli4_params
->featurelevel_1
= bf_get(cfg_sli_hint_1
,
9570 mbx_sli4_parameters
);
9571 sli4_params
->featurelevel_2
= bf_get(cfg_sli_hint_2
,
9572 mbx_sli4_parameters
);
9573 if (bf_get(cfg_phwq
, mbx_sli4_parameters
))
9574 phba
->sli3_options
|= LPFC_SLI4_PHWQ_ENABLED
;
9576 phba
->sli3_options
&= ~LPFC_SLI4_PHWQ_ENABLED
;
9577 sli4_params
->sge_supp_len
= mbx_sli4_parameters
->sge_supp_len
;
9578 sli4_params
->loopbk_scope
= bf_get(loopbk_scope
, mbx_sli4_parameters
);
9579 sli4_params
->oas_supported
= bf_get(cfg_oas
, mbx_sli4_parameters
);
9580 sli4_params
->cqv
= bf_get(cfg_cqv
, mbx_sli4_parameters
);
9581 sli4_params
->mqv
= bf_get(cfg_mqv
, mbx_sli4_parameters
);
9582 sli4_params
->wqv
= bf_get(cfg_wqv
, mbx_sli4_parameters
);
9583 sli4_params
->rqv
= bf_get(cfg_rqv
, mbx_sli4_parameters
);
9584 sli4_params
->wqsize
= bf_get(cfg_wqsize
, mbx_sli4_parameters
);
9585 sli4_params
->sgl_pages_max
= bf_get(cfg_sgl_page_cnt
,
9586 mbx_sli4_parameters
);
9587 sli4_params
->sgl_pp_align
= bf_get(cfg_sgl_pp_align
,
9588 mbx_sli4_parameters
);
9589 phba
->sli4_hba
.extents_in_use
= bf_get(cfg_ext
, mbx_sli4_parameters
);
9590 phba
->sli4_hba
.rpi_hdrs_in_use
= bf_get(cfg_hdrr
, mbx_sli4_parameters
);
9592 /* Make sure that sge_supp_len can be handled by the driver */
9593 if (sli4_params
->sge_supp_len
> LPFC_MAX_SGE_SIZE
)
9594 sli4_params
->sge_supp_len
= LPFC_MAX_SGE_SIZE
;
9597 * Issue IOs with CDB embedded in WQE to minimized the number
9598 * of DMAs the firmware has to do. Setting this to 1 also forces
9599 * the driver to use 128 bytes WQEs for FCP IOs.
9601 if (bf_get(cfg_ext_embed_cb
, mbx_sli4_parameters
))
9602 phba
->fcp_embed_io
= 1;
9604 phba
->fcp_embed_io
= 0;
9607 * Check if the SLI port supports MDS Diagnostics
9609 if (bf_get(cfg_mds_diags
, mbx_sli4_parameters
))
9610 phba
->mds_diags_support
= 1;
9612 phba
->mds_diags_support
= 0;
9617 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
9618 * @pdev: pointer to PCI device
9619 * @pid: pointer to PCI device identifier
9621 * This routine is to be called to attach a device with SLI-3 interface spec
9622 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
9623 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
9624 * information of the device and driver to see if the driver state that it can
9625 * support this kind of device. If the match is successful, the driver core
9626 * invokes this routine. If this routine determines it can claim the HBA, it
9627 * does all the initialization that it needs to do to handle the HBA properly.
9630 * 0 - driver can claim the device
9631 * negative value - driver can not claim the device
9634 lpfc_pci_probe_one_s3(struct pci_dev
*pdev
, const struct pci_device_id
*pid
)
9636 struct lpfc_hba
*phba
;
9637 struct lpfc_vport
*vport
= NULL
;
9638 struct Scsi_Host
*shost
= NULL
;
9640 uint32_t cfg_mode
, intr_mode
;
9642 /* Allocate memory for HBA structure */
9643 phba
= lpfc_hba_alloc(pdev
);
9647 /* Perform generic PCI device enabling operation */
9648 error
= lpfc_enable_pci_dev(phba
);
9652 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
9653 error
= lpfc_api_table_setup(phba
, LPFC_PCI_DEV_LP
);
9655 goto out_disable_pci_dev
;
9657 /* Set up SLI-3 specific device PCI memory space */
9658 error
= lpfc_sli_pci_mem_setup(phba
);
9660 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9661 "1402 Failed to set up pci memory space.\n");
9662 goto out_disable_pci_dev
;
9665 /* Set up phase-1 common device driver resources */
9666 error
= lpfc_setup_driver_resource_phase1(phba
);
9668 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9669 "1403 Failed to set up driver resource.\n");
9670 goto out_unset_pci_mem_s3
;
9673 /* Set up SLI-3 specific device driver resources */
9674 error
= lpfc_sli_driver_resource_setup(phba
);
9676 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9677 "1404 Failed to set up driver resource.\n");
9678 goto out_unset_pci_mem_s3
;
9681 /* Initialize and populate the iocb list per host */
9682 error
= lpfc_init_iocb_list(phba
, LPFC_IOCB_LIST_CNT
);
9684 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9685 "1405 Failed to initialize iocb list.\n");
9686 goto out_unset_driver_resource_s3
;
9689 /* Set up common device driver resources */
9690 error
= lpfc_setup_driver_resource_phase2(phba
);
9692 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9693 "1406 Failed to set up driver resource.\n");
9694 goto out_free_iocb_list
;
9697 /* Get the default values for Model Name and Description */
9698 lpfc_get_hba_model_desc(phba
, phba
->ModelName
, phba
->ModelDesc
);
9700 /* Create SCSI host to the physical port */
9701 error
= lpfc_create_shost(phba
);
9703 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9704 "1407 Failed to create scsi host.\n");
9705 goto out_unset_driver_resource
;
9708 /* Configure sysfs attributes */
9709 vport
= phba
->pport
;
9710 error
= lpfc_alloc_sysfs_attr(vport
);
9712 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9713 "1476 Failed to allocate sysfs attr\n");
9714 goto out_destroy_shost
;
9717 shost
= lpfc_shost_from_vport(vport
); /* save shost for error cleanup */
9718 /* Now, trying to enable interrupt and bring up the device */
9719 cfg_mode
= phba
->cfg_use_msi
;
9721 /* Put device to a known state before enabling interrupt */
9722 lpfc_stop_port(phba
);
9723 /* Configure and enable interrupt */
9724 intr_mode
= lpfc_sli_enable_intr(phba
, cfg_mode
);
9725 if (intr_mode
== LPFC_INTR_ERROR
) {
9726 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9727 "0431 Failed to enable interrupt.\n");
9729 goto out_free_sysfs_attr
;
9731 /* SLI-3 HBA setup */
9732 if (lpfc_sli_hba_setup(phba
)) {
9733 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9734 "1477 Failed to set up hba\n");
9736 goto out_remove_device
;
9739 /* Wait 50ms for the interrupts of previous mailbox commands */
9741 /* Check active interrupts on message signaled interrupts */
9742 if (intr_mode
== 0 ||
9743 phba
->sli
.slistat
.sli_intr
> LPFC_MSIX_VECTORS
) {
9744 /* Log the current active interrupt mode */
9745 phba
->intr_mode
= intr_mode
;
9746 lpfc_log_intr_mode(phba
, intr_mode
);
9749 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9750 "0447 Configure interrupt mode (%d) "
9751 "failed active interrupt test.\n",
9753 /* Disable the current interrupt mode */
9754 lpfc_sli_disable_intr(phba
);
9755 /* Try next level of interrupt mode */
9756 cfg_mode
= --intr_mode
;
9760 /* Perform post initialization setup */
9761 lpfc_post_init_setup(phba
);
9763 /* Check if there are static vports to be created. */
9764 lpfc_create_static_vport(phba
);
9769 lpfc_unset_hba(phba
);
9770 out_free_sysfs_attr
:
9771 lpfc_free_sysfs_attr(vport
);
9773 lpfc_destroy_shost(phba
);
9774 out_unset_driver_resource
:
9775 lpfc_unset_driver_resource_phase2(phba
);
9777 lpfc_free_iocb_list(phba
);
9778 out_unset_driver_resource_s3
:
9779 lpfc_sli_driver_resource_unset(phba
);
9780 out_unset_pci_mem_s3
:
9781 lpfc_sli_pci_mem_unset(phba
);
9782 out_disable_pci_dev
:
9783 lpfc_disable_pci_dev(phba
);
9785 scsi_host_put(shost
);
9787 lpfc_hba_free(phba
);
9792 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
9793 * @pdev: pointer to PCI device
9795 * This routine is to be called to disattach a device with SLI-3 interface
9796 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
9797 * removed from PCI bus, it performs all the necessary cleanup for the HBA
9798 * device to be removed from the PCI subsystem properly.
9801 lpfc_pci_remove_one_s3(struct pci_dev
*pdev
)
9803 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
9804 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
9805 struct lpfc_vport
**vports
;
9806 struct lpfc_hba
*phba
= vport
->phba
;
9809 spin_lock_irq(&phba
->hbalock
);
9810 vport
->load_flag
|= FC_UNLOADING
;
9811 spin_unlock_irq(&phba
->hbalock
);
9813 lpfc_free_sysfs_attr(vport
);
9815 /* Release all the vports against this physical port */
9816 vports
= lpfc_create_vport_work_array(phba
);
9818 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
9819 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
9821 fc_vport_terminate(vports
[i
]->fc_vport
);
9823 lpfc_destroy_vport_work_array(phba
, vports
);
9825 /* Remove FC host and then SCSI host with the physical port */
9826 fc_remove_host(shost
);
9827 scsi_remove_host(shost
);
9828 lpfc_cleanup(vport
);
9831 * Bring down the SLI Layer. This step disable all interrupts,
9832 * clears the rings, discards all mailbox commands, and resets
9836 /* HBA interrupt will be disabled after this call */
9837 lpfc_sli_hba_down(phba
);
9838 /* Stop kthread signal shall trigger work_done one more time */
9839 kthread_stop(phba
->worker_thread
);
9840 /* Final cleanup of txcmplq and reset the HBA */
9841 lpfc_sli_brdrestart(phba
);
9843 kfree(phba
->vpi_bmask
);
9844 kfree(phba
->vpi_ids
);
9846 lpfc_stop_hba_timers(phba
);
9847 spin_lock_irq(&phba
->hbalock
);
9848 list_del_init(&vport
->listentry
);
9849 spin_unlock_irq(&phba
->hbalock
);
9851 lpfc_debugfs_terminate(vport
);
9853 /* Disable SR-IOV if enabled */
9854 if (phba
->cfg_sriov_nr_virtfn
)
9855 pci_disable_sriov(pdev
);
9857 /* Disable interrupt */
9858 lpfc_sli_disable_intr(phba
);
9860 scsi_host_put(shost
);
9863 * Call scsi_free before mem_free since scsi bufs are released to their
9864 * corresponding pools here.
9866 lpfc_scsi_free(phba
);
9867 lpfc_mem_free_all(phba
);
9869 dma_free_coherent(&pdev
->dev
, lpfc_sli_hbq_size(),
9870 phba
->hbqslimp
.virt
, phba
->hbqslimp
.phys
);
9872 /* Free resources associated with SLI2 interface */
9873 dma_free_coherent(&pdev
->dev
, SLI2_SLIM_SIZE
,
9874 phba
->slim2p
.virt
, phba
->slim2p
.phys
);
9876 /* unmap adapter SLIM and Control Registers */
9877 iounmap(phba
->ctrl_regs_memmap_p
);
9878 iounmap(phba
->slim_memmap_p
);
9880 lpfc_hba_free(phba
);
9882 pci_release_mem_regions(pdev
);
9883 pci_disable_device(pdev
);
9887 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
9888 * @pdev: pointer to PCI device
9889 * @msg: power management message
9891 * This routine is to be called from the kernel's PCI subsystem to support
9892 * system Power Management (PM) to device with SLI-3 interface spec. When
9893 * PM invokes this method, it quiesces the device by stopping the driver's
9894 * worker thread for the device, turning off device's interrupt and DMA,
9895 * and bring the device offline. Note that as the driver implements the
9896 * minimum PM requirements to a power-aware driver's PM support for the
9897 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
9898 * to the suspend() method call will be treated as SUSPEND and the driver will
9899 * fully reinitialize its device during resume() method call, the driver will
9900 * set device to PCI_D3hot state in PCI config space instead of setting it
9901 * according to the @msg provided by the PM.
9904 * 0 - driver suspended the device
9908 lpfc_pci_suspend_one_s3(struct pci_dev
*pdev
, pm_message_t msg
)
9910 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
9911 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
9913 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9914 "0473 PCI device Power Management suspend.\n");
9916 /* Bring down the device */
9917 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
9919 kthread_stop(phba
->worker_thread
);
9921 /* Disable interrupt from device */
9922 lpfc_sli_disable_intr(phba
);
9924 /* Save device state to PCI config space */
9925 pci_save_state(pdev
);
9926 pci_set_power_state(pdev
, PCI_D3hot
);
9932 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
9933 * @pdev: pointer to PCI device
9935 * This routine is to be called from the kernel's PCI subsystem to support
9936 * system Power Management (PM) to device with SLI-3 interface spec. When PM
9937 * invokes this method, it restores the device's PCI config space state and
9938 * fully reinitializes the device and brings it online. Note that as the
9939 * driver implements the minimum PM requirements to a power-aware driver's
9940 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
9941 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
9942 * driver will fully reinitialize its device during resume() method call,
9943 * the device will be set to PCI_D0 directly in PCI config space before
9944 * restoring the state.
9947 * 0 - driver suspended the device
9951 lpfc_pci_resume_one_s3(struct pci_dev
*pdev
)
9953 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
9954 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
9958 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
9959 "0452 PCI device Power Management resume.\n");
9961 /* Restore device state from PCI config space */
9962 pci_set_power_state(pdev
, PCI_D0
);
9963 pci_restore_state(pdev
);
9966 * As the new kernel behavior of pci_restore_state() API call clears
9967 * device saved_state flag, need to save the restored state again.
9969 pci_save_state(pdev
);
9971 if (pdev
->is_busmaster
)
9972 pci_set_master(pdev
);
9974 /* Startup the kernel thread for this host adapter. */
9975 phba
->worker_thread
= kthread_run(lpfc_do_work
, phba
,
9976 "lpfc_worker_%d", phba
->brd_no
);
9977 if (IS_ERR(phba
->worker_thread
)) {
9978 error
= PTR_ERR(phba
->worker_thread
);
9979 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9980 "0434 PM resume failed to start worker "
9981 "thread: error=x%x.\n", error
);
9985 /* Configure and enable interrupt */
9986 intr_mode
= lpfc_sli_enable_intr(phba
, phba
->intr_mode
);
9987 if (intr_mode
== LPFC_INTR_ERROR
) {
9988 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9989 "0430 PM resume Failed to enable interrupt\n");
9992 phba
->intr_mode
= intr_mode
;
9994 /* Restart HBA and bring it online */
9995 lpfc_sli_brdrestart(phba
);
9998 /* Log the current active interrupt mode */
9999 lpfc_log_intr_mode(phba
, phba
->intr_mode
);
10005 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
10006 * @phba: pointer to lpfc hba data structure.
10008 * This routine is called to prepare the SLI3 device for PCI slot recover. It
10009 * aborts all the outstanding SCSI I/Os to the pci device.
10012 lpfc_sli_prep_dev_for_recover(struct lpfc_hba
*phba
)
10014 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10015 "2723 PCI channel I/O abort preparing for recovery\n");
10018 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
10019 * and let the SCSI mid-layer to retry them to recover.
10021 lpfc_sli_abort_fcp_rings(phba
);
10025 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
10026 * @phba: pointer to lpfc hba data structure.
10028 * This routine is called to prepare the SLI3 device for PCI slot reset. It
10029 * disables the device interrupt and pci device, and aborts the internal FCP
10033 lpfc_sli_prep_dev_for_reset(struct lpfc_hba
*phba
)
10035 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10036 "2710 PCI channel disable preparing for reset\n");
10038 /* Block any management I/Os to the device */
10039 lpfc_block_mgmt_io(phba
, LPFC_MBX_WAIT
);
10041 /* Block all SCSI devices' I/Os on the host */
10042 lpfc_scsi_dev_block(phba
);
10044 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
10045 lpfc_sli_flush_fcp_rings(phba
);
10047 /* stop all timers */
10048 lpfc_stop_hba_timers(phba
);
10050 /* Disable interrupt and pci device */
10051 lpfc_sli_disable_intr(phba
);
10052 pci_disable_device(phba
->pcidev
);
10056 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
10057 * @phba: pointer to lpfc hba data structure.
10059 * This routine is called to prepare the SLI3 device for PCI slot permanently
10060 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
10064 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba
*phba
)
10066 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10067 "2711 PCI channel permanent disable for failure\n");
10068 /* Block all SCSI devices' I/Os on the host */
10069 lpfc_scsi_dev_block(phba
);
10071 /* stop all timers */
10072 lpfc_stop_hba_timers(phba
);
10074 /* Clean up all driver's outstanding SCSI I/Os */
10075 lpfc_sli_flush_fcp_rings(phba
);
10079 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
10080 * @pdev: pointer to PCI device.
10081 * @state: the current PCI connection state.
10083 * This routine is called from the PCI subsystem for I/O error handling to
10084 * device with SLI-3 interface spec. This function is called by the PCI
10085 * subsystem after a PCI bus error affecting this device has been detected.
10086 * When this function is invoked, it will need to stop all the I/Os and
10087 * interrupt(s) to the device. Once that is done, it will return
10088 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
10092 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
10093 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
10094 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10096 static pci_ers_result_t
10097 lpfc_io_error_detected_s3(struct pci_dev
*pdev
, pci_channel_state_t state
)
10099 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10100 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
10103 case pci_channel_io_normal
:
10104 /* Non-fatal error, prepare for recovery */
10105 lpfc_sli_prep_dev_for_recover(phba
);
10106 return PCI_ERS_RESULT_CAN_RECOVER
;
10107 case pci_channel_io_frozen
:
10108 /* Fatal error, prepare for slot reset */
10109 lpfc_sli_prep_dev_for_reset(phba
);
10110 return PCI_ERS_RESULT_NEED_RESET
;
10111 case pci_channel_io_perm_failure
:
10112 /* Permanent failure, prepare for device down */
10113 lpfc_sli_prep_dev_for_perm_failure(phba
);
10114 return PCI_ERS_RESULT_DISCONNECT
;
10116 /* Unknown state, prepare and request slot reset */
10117 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10118 "0472 Unknown PCI error state: x%x\n", state
);
10119 lpfc_sli_prep_dev_for_reset(phba
);
10120 return PCI_ERS_RESULT_NEED_RESET
;
10125 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
10126 * @pdev: pointer to PCI device.
10128 * This routine is called from the PCI subsystem for error handling to
10129 * device with SLI-3 interface spec. This is called after PCI bus has been
10130 * reset to restart the PCI card from scratch, as if from a cold-boot.
10131 * During the PCI subsystem error recovery, after driver returns
10132 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
10133 * recovery and then call this routine before calling the .resume method
10134 * to recover the device. This function will initialize the HBA device,
10135 * enable the interrupt, but it will just put the HBA to offline state
10136 * without passing any I/O traffic.
10139 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
10140 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10142 static pci_ers_result_t
10143 lpfc_io_slot_reset_s3(struct pci_dev
*pdev
)
10145 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10146 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
10147 struct lpfc_sli
*psli
= &phba
->sli
;
10148 uint32_t intr_mode
;
10150 dev_printk(KERN_INFO
, &pdev
->dev
, "recovering from a slot reset.\n");
10151 if (pci_enable_device_mem(pdev
)) {
10152 printk(KERN_ERR
"lpfc: Cannot re-enable "
10153 "PCI device after reset.\n");
10154 return PCI_ERS_RESULT_DISCONNECT
;
10157 pci_restore_state(pdev
);
10160 * As the new kernel behavior of pci_restore_state() API call clears
10161 * device saved_state flag, need to save the restored state again.
10163 pci_save_state(pdev
);
10165 if (pdev
->is_busmaster
)
10166 pci_set_master(pdev
);
10168 spin_lock_irq(&phba
->hbalock
);
10169 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
10170 spin_unlock_irq(&phba
->hbalock
);
10172 /* Configure and enable interrupt */
10173 intr_mode
= lpfc_sli_enable_intr(phba
, phba
->intr_mode
);
10174 if (intr_mode
== LPFC_INTR_ERROR
) {
10175 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10176 "0427 Cannot re-enable interrupt after "
10178 return PCI_ERS_RESULT_DISCONNECT
;
10180 phba
->intr_mode
= intr_mode
;
10182 /* Take device offline, it will perform cleanup */
10183 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
10184 lpfc_offline(phba
);
10185 lpfc_sli_brdrestart(phba
);
10187 /* Log the current active interrupt mode */
10188 lpfc_log_intr_mode(phba
, phba
->intr_mode
);
10190 return PCI_ERS_RESULT_RECOVERED
;
10194 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
10195 * @pdev: pointer to PCI device
10197 * This routine is called from the PCI subsystem for error handling to device
10198 * with SLI-3 interface spec. It is called when kernel error recovery tells
10199 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
10200 * error recovery. After this call, traffic can start to flow from this device
10204 lpfc_io_resume_s3(struct pci_dev
*pdev
)
10206 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10207 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
10209 /* Bring device online, it will be no-op for non-fatal error resume */
10212 /* Clean up Advanced Error Reporting (AER) if needed */
10213 if (phba
->hba_flag
& HBA_AER_ENABLED
)
10214 pci_cleanup_aer_uncorrect_error_status(pdev
);
10218 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
10219 * @phba: pointer to lpfc hba data structure.
10221 * returns the number of ELS/CT IOCBs to reserve
10224 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba
*phba
)
10226 int max_xri
= phba
->sli4_hba
.max_cfg_param
.max_xri
;
10228 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
10229 if (max_xri
<= 100)
10231 else if (max_xri
<= 256)
10233 else if (max_xri
<= 512)
10235 else if (max_xri
<= 1024)
10237 else if (max_xri
<= 1536)
10239 else if (max_xri
<= 2048)
10248 * lpfc_write_firmware - attempt to write a firmware image to the port
10249 * @fw: pointer to firmware image returned from request_firmware.
10250 * @phba: pointer to lpfc hba data structure.
10254 lpfc_write_firmware(const struct firmware
*fw
, void *context
)
10256 struct lpfc_hba
*phba
= (struct lpfc_hba
*)context
;
10257 char fwrev
[FW_REV_STR_SIZE
];
10258 struct lpfc_grp_hdr
*image
;
10259 struct list_head dma_buffer_list
;
10261 struct lpfc_dmabuf
*dmabuf
, *next
;
10262 uint32_t offset
= 0, temp_offset
= 0;
10264 /* It can be null in no-wait mode, sanity check */
10269 image
= (struct lpfc_grp_hdr
*)fw
->data
;
10271 INIT_LIST_HEAD(&dma_buffer_list
);
10272 if ((be32_to_cpu(image
->magic_number
) != LPFC_GROUP_OJECT_MAGIC_NUM
) ||
10273 (bf_get_be32(lpfc_grp_hdr_file_type
, image
) !=
10274 LPFC_FILE_TYPE_GROUP
) ||
10275 (bf_get_be32(lpfc_grp_hdr_id
, image
) != LPFC_FILE_ID_GROUP
) ||
10276 (be32_to_cpu(image
->size
) != fw
->size
)) {
10277 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10278 "3022 Invalid FW image found. "
10279 "Magic:%x Type:%x ID:%x\n",
10280 be32_to_cpu(image
->magic_number
),
10281 bf_get_be32(lpfc_grp_hdr_file_type
, image
),
10282 bf_get_be32(lpfc_grp_hdr_id
, image
));
10286 lpfc_decode_firmware_rev(phba
, fwrev
, 1);
10287 if (strncmp(fwrev
, image
->revision
, strnlen(image
->revision
, 16))) {
10288 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10289 "3023 Updating Firmware, Current Version:%s "
10290 "New Version:%s\n",
10291 fwrev
, image
->revision
);
10292 for (i
= 0; i
< LPFC_MBX_WR_CONFIG_MAX_BDE
; i
++) {
10293 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
),
10299 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
,
10303 if (!dmabuf
->virt
) {
10308 list_add_tail(&dmabuf
->list
, &dma_buffer_list
);
10310 while (offset
< fw
->size
) {
10311 temp_offset
= offset
;
10312 list_for_each_entry(dmabuf
, &dma_buffer_list
, list
) {
10313 if (temp_offset
+ SLI4_PAGE_SIZE
> fw
->size
) {
10314 memcpy(dmabuf
->virt
,
10315 fw
->data
+ temp_offset
,
10316 fw
->size
- temp_offset
);
10317 temp_offset
= fw
->size
;
10320 memcpy(dmabuf
->virt
, fw
->data
+ temp_offset
,
10322 temp_offset
+= SLI4_PAGE_SIZE
;
10324 rc
= lpfc_wr_object(phba
, &dma_buffer_list
,
10325 (fw
->size
- offset
), &offset
);
10333 list_for_each_entry_safe(dmabuf
, next
, &dma_buffer_list
, list
) {
10334 list_del(&dmabuf
->list
);
10335 dma_free_coherent(&phba
->pcidev
->dev
, SLI4_PAGE_SIZE
,
10336 dmabuf
->virt
, dmabuf
->phys
);
10339 release_firmware(fw
);
10341 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10342 "3024 Firmware update done: %d.\n", rc
);
10347 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
10348 * @phba: pointer to lpfc hba data structure.
10350 * This routine is called to perform Linux generic firmware upgrade on device
10351 * that supports such feature.
10354 lpfc_sli4_request_firmware_update(struct lpfc_hba
*phba
, uint8_t fw_upgrade
)
10356 uint8_t file_name
[ELX_MODEL_NAME_SIZE
];
10358 const struct firmware
*fw
;
10360 /* Only supported on SLI4 interface type 2 for now */
10361 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
10362 LPFC_SLI_INTF_IF_TYPE_2
)
10365 snprintf(file_name
, ELX_MODEL_NAME_SIZE
, "%s.grp", phba
->ModelName
);
10367 if (fw_upgrade
== INT_FW_UPGRADE
) {
10368 ret
= request_firmware_nowait(THIS_MODULE
, FW_ACTION_HOTPLUG
,
10369 file_name
, &phba
->pcidev
->dev
,
10370 GFP_KERNEL
, (void *)phba
,
10371 lpfc_write_firmware
);
10372 } else if (fw_upgrade
== RUN_FW_UPGRADE
) {
10373 ret
= request_firmware(&fw
, file_name
, &phba
->pcidev
->dev
);
10375 lpfc_write_firmware(fw
, (void *)phba
);
10384 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
10385 * @pdev: pointer to PCI device
10386 * @pid: pointer to PCI device identifier
10388 * This routine is called from the kernel's PCI subsystem to device with
10389 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
10390 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
10391 * information of the device and driver to see if the driver state that it
10392 * can support this kind of device. If the match is successful, the driver
10393 * core invokes this routine. If this routine determines it can claim the HBA,
10394 * it does all the initialization that it needs to do to handle the HBA
10398 * 0 - driver can claim the device
10399 * negative value - driver can not claim the device
10402 lpfc_pci_probe_one_s4(struct pci_dev
*pdev
, const struct pci_device_id
*pid
)
10404 struct lpfc_hba
*phba
;
10405 struct lpfc_vport
*vport
= NULL
;
10406 struct Scsi_Host
*shost
= NULL
;
10408 uint32_t cfg_mode
, intr_mode
;
10409 int adjusted_fcp_io_channel
;
10411 /* Allocate memory for HBA structure */
10412 phba
= lpfc_hba_alloc(pdev
);
10416 /* Perform generic PCI device enabling operation */
10417 error
= lpfc_enable_pci_dev(phba
);
10419 goto out_free_phba
;
10421 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
10422 error
= lpfc_api_table_setup(phba
, LPFC_PCI_DEV_OC
);
10424 goto out_disable_pci_dev
;
10426 /* Set up SLI-4 specific device PCI memory space */
10427 error
= lpfc_sli4_pci_mem_setup(phba
);
10429 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10430 "1410 Failed to set up pci memory space.\n");
10431 goto out_disable_pci_dev
;
10434 /* Set up phase-1 common device driver resources */
10435 error
= lpfc_setup_driver_resource_phase1(phba
);
10437 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10438 "1411 Failed to set up driver resource.\n");
10439 goto out_unset_pci_mem_s4
;
10442 /* Set up SLI-4 Specific device driver resources */
10443 error
= lpfc_sli4_driver_resource_setup(phba
);
10445 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10446 "1412 Failed to set up driver resource.\n");
10447 goto out_unset_pci_mem_s4
;
10450 /* Initialize and populate the iocb list per host */
10452 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
10453 "2821 initialize iocb list %d.\n",
10454 phba
->cfg_iocb_cnt
*1024);
10455 error
= lpfc_init_iocb_list(phba
, phba
->cfg_iocb_cnt
*1024);
10458 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10459 "1413 Failed to initialize iocb list.\n");
10460 goto out_unset_driver_resource_s4
;
10463 INIT_LIST_HEAD(&phba
->active_rrq_list
);
10464 INIT_LIST_HEAD(&phba
->fcf
.fcf_pri_list
);
10466 /* Set up common device driver resources */
10467 error
= lpfc_setup_driver_resource_phase2(phba
);
10469 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10470 "1414 Failed to set up driver resource.\n");
10471 goto out_free_iocb_list
;
10474 /* Get the default values for Model Name and Description */
10475 lpfc_get_hba_model_desc(phba
, phba
->ModelName
, phba
->ModelDesc
);
10477 /* Create SCSI host to the physical port */
10478 error
= lpfc_create_shost(phba
);
10480 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10481 "1415 Failed to create scsi host.\n");
10482 goto out_unset_driver_resource
;
10485 /* Configure sysfs attributes */
10486 vport
= phba
->pport
;
10487 error
= lpfc_alloc_sysfs_attr(vport
);
10489 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10490 "1416 Failed to allocate sysfs attr\n");
10491 goto out_destroy_shost
;
10494 shost
= lpfc_shost_from_vport(vport
); /* save shost for error cleanup */
10495 /* Now, trying to enable interrupt and bring up the device */
10496 cfg_mode
= phba
->cfg_use_msi
;
10498 /* Put device to a known state before enabling interrupt */
10499 lpfc_stop_port(phba
);
10500 /* Configure and enable interrupt */
10501 intr_mode
= lpfc_sli4_enable_intr(phba
, cfg_mode
);
10502 if (intr_mode
== LPFC_INTR_ERROR
) {
10503 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10504 "0426 Failed to enable interrupt.\n");
10506 goto out_free_sysfs_attr
;
10508 /* Default to single EQ for non-MSI-X */
10509 if (phba
->intr_type
!= MSIX
)
10510 adjusted_fcp_io_channel
= 1;
10512 adjusted_fcp_io_channel
= phba
->cfg_fcp_io_channel
;
10513 phba
->cfg_fcp_io_channel
= adjusted_fcp_io_channel
;
10514 /* Set up SLI-4 HBA */
10515 if (lpfc_sli4_hba_setup(phba
)) {
10516 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10517 "1421 Failed to set up hba\n");
10519 goto out_disable_intr
;
10522 /* Log the current active interrupt mode */
10523 phba
->intr_mode
= intr_mode
;
10524 lpfc_log_intr_mode(phba
, intr_mode
);
10526 /* Perform post initialization setup */
10527 lpfc_post_init_setup(phba
);
10529 /* check for firmware upgrade or downgrade */
10530 if (phba
->cfg_request_firmware_upgrade
)
10531 lpfc_sli4_request_firmware_update(phba
, INT_FW_UPGRADE
);
10533 /* Check if there are static vports to be created. */
10534 lpfc_create_static_vport(phba
);
10538 lpfc_sli4_disable_intr(phba
);
10539 out_free_sysfs_attr
:
10540 lpfc_free_sysfs_attr(vport
);
10542 lpfc_destroy_shost(phba
);
10543 out_unset_driver_resource
:
10544 lpfc_unset_driver_resource_phase2(phba
);
10545 out_free_iocb_list
:
10546 lpfc_free_iocb_list(phba
);
10547 out_unset_driver_resource_s4
:
10548 lpfc_sli4_driver_resource_unset(phba
);
10549 out_unset_pci_mem_s4
:
10550 lpfc_sli4_pci_mem_unset(phba
);
10551 out_disable_pci_dev
:
10552 lpfc_disable_pci_dev(phba
);
10554 scsi_host_put(shost
);
10556 lpfc_hba_free(phba
);
10561 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
10562 * @pdev: pointer to PCI device
10564 * This routine is called from the kernel's PCI subsystem to device with
10565 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
10566 * removed from PCI bus, it performs all the necessary cleanup for the HBA
10567 * device to be removed from the PCI subsystem properly.
10570 lpfc_pci_remove_one_s4(struct pci_dev
*pdev
)
10572 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10573 struct lpfc_vport
*vport
= (struct lpfc_vport
*) shost
->hostdata
;
10574 struct lpfc_vport
**vports
;
10575 struct lpfc_hba
*phba
= vport
->phba
;
10578 /* Mark the device unloading flag */
10579 spin_lock_irq(&phba
->hbalock
);
10580 vport
->load_flag
|= FC_UNLOADING
;
10581 spin_unlock_irq(&phba
->hbalock
);
10583 /* Free the HBA sysfs attributes */
10584 lpfc_free_sysfs_attr(vport
);
10586 /* Release all the vports against this physical port */
10587 vports
= lpfc_create_vport_work_array(phba
);
10588 if (vports
!= NULL
)
10589 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
10590 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
10592 fc_vport_terminate(vports
[i
]->fc_vport
);
10594 lpfc_destroy_vport_work_array(phba
, vports
);
10596 /* Remove FC host and then SCSI host with the physical port */
10597 fc_remove_host(shost
);
10598 scsi_remove_host(shost
);
10600 /* Perform cleanup on the physical port */
10601 lpfc_cleanup(vport
);
10604 * Bring down the SLI Layer. This step disables all interrupts,
10605 * clears the rings, discards all mailbox commands, and resets
10606 * the HBA FCoE function.
10608 lpfc_debugfs_terminate(vport
);
10609 lpfc_sli4_hba_unset(phba
);
10611 spin_lock_irq(&phba
->hbalock
);
10612 list_del_init(&vport
->listentry
);
10613 spin_unlock_irq(&phba
->hbalock
);
10615 /* Perform scsi free before driver resource_unset since scsi
10616 * buffers are released to their corresponding pools here.
10618 lpfc_scsi_free(phba
);
10620 lpfc_sli4_driver_resource_unset(phba
);
10622 /* Unmap adapter Control and Doorbell registers */
10623 lpfc_sli4_pci_mem_unset(phba
);
10625 /* Release PCI resources and disable device's PCI function */
10626 scsi_host_put(shost
);
10627 lpfc_disable_pci_dev(phba
);
10629 /* Finally, free the driver's device data structure */
10630 lpfc_hba_free(phba
);
10636 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
10637 * @pdev: pointer to PCI device
10638 * @msg: power management message
10640 * This routine is called from the kernel's PCI subsystem to support system
10641 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
10642 * this method, it quiesces the device by stopping the driver's worker
10643 * thread for the device, turning off device's interrupt and DMA, and bring
10644 * the device offline. Note that as the driver implements the minimum PM
10645 * requirements to a power-aware driver's PM support for suspend/resume -- all
10646 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
10647 * method call will be treated as SUSPEND and the driver will fully
10648 * reinitialize its device during resume() method call, the driver will set
10649 * device to PCI_D3hot state in PCI config space instead of setting it
10650 * according to the @msg provided by the PM.
10653 * 0 - driver suspended the device
10657 lpfc_pci_suspend_one_s4(struct pci_dev
*pdev
, pm_message_t msg
)
10659 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10660 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
10662 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
10663 "2843 PCI device Power Management suspend.\n");
10665 /* Bring down the device */
10666 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
10667 lpfc_offline(phba
);
10668 kthread_stop(phba
->worker_thread
);
10670 /* Disable interrupt from device */
10671 lpfc_sli4_disable_intr(phba
);
10672 lpfc_sli4_queue_destroy(phba
);
10674 /* Save device state to PCI config space */
10675 pci_save_state(pdev
);
10676 pci_set_power_state(pdev
, PCI_D3hot
);
10682 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
10683 * @pdev: pointer to PCI device
10685 * This routine is called from the kernel's PCI subsystem to support system
10686 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
10687 * this method, it restores the device's PCI config space state and fully
10688 * reinitializes the device and brings it online. Note that as the driver
10689 * implements the minimum PM requirements to a power-aware driver's PM for
10690 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
10691 * to the suspend() method call will be treated as SUSPEND and the driver
10692 * will fully reinitialize its device during resume() method call, the device
10693 * will be set to PCI_D0 directly in PCI config space before restoring the
10697 * 0 - driver suspended the device
10701 lpfc_pci_resume_one_s4(struct pci_dev
*pdev
)
10703 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10704 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
10705 uint32_t intr_mode
;
10708 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
10709 "0292 PCI device Power Management resume.\n");
10711 /* Restore device state from PCI config space */
10712 pci_set_power_state(pdev
, PCI_D0
);
10713 pci_restore_state(pdev
);
10716 * As the new kernel behavior of pci_restore_state() API call clears
10717 * device saved_state flag, need to save the restored state again.
10719 pci_save_state(pdev
);
10721 if (pdev
->is_busmaster
)
10722 pci_set_master(pdev
);
10724 /* Startup the kernel thread for this host adapter. */
10725 phba
->worker_thread
= kthread_run(lpfc_do_work
, phba
,
10726 "lpfc_worker_%d", phba
->brd_no
);
10727 if (IS_ERR(phba
->worker_thread
)) {
10728 error
= PTR_ERR(phba
->worker_thread
);
10729 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10730 "0293 PM resume failed to start worker "
10731 "thread: error=x%x.\n", error
);
10735 /* Configure and enable interrupt */
10736 intr_mode
= lpfc_sli4_enable_intr(phba
, phba
->intr_mode
);
10737 if (intr_mode
== LPFC_INTR_ERROR
) {
10738 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10739 "0294 PM resume Failed to enable interrupt\n");
10742 phba
->intr_mode
= intr_mode
;
10744 /* Restart HBA and bring it online */
10745 lpfc_sli_brdrestart(phba
);
10748 /* Log the current active interrupt mode */
10749 lpfc_log_intr_mode(phba
, phba
->intr_mode
);
10755 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
10756 * @phba: pointer to lpfc hba data structure.
10758 * This routine is called to prepare the SLI4 device for PCI slot recover. It
10759 * aborts all the outstanding SCSI I/Os to the pci device.
10762 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba
*phba
)
10764 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10765 "2828 PCI channel I/O abort preparing for recovery\n");
10767 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
10768 * and let the SCSI mid-layer to retry them to recover.
10770 lpfc_sli_abort_fcp_rings(phba
);
10774 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
10775 * @phba: pointer to lpfc hba data structure.
10777 * This routine is called to prepare the SLI4 device for PCI slot reset. It
10778 * disables the device interrupt and pci device, and aborts the internal FCP
10782 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba
*phba
)
10784 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10785 "2826 PCI channel disable preparing for reset\n");
10787 /* Block any management I/Os to the device */
10788 lpfc_block_mgmt_io(phba
, LPFC_MBX_NO_WAIT
);
10790 /* Block all SCSI devices' I/Os on the host */
10791 lpfc_scsi_dev_block(phba
);
10793 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
10794 lpfc_sli_flush_fcp_rings(phba
);
10796 /* stop all timers */
10797 lpfc_stop_hba_timers(phba
);
10799 /* Disable interrupt and pci device */
10800 lpfc_sli4_disable_intr(phba
);
10801 lpfc_sli4_queue_destroy(phba
);
10802 pci_disable_device(phba
->pcidev
);
10806 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
10807 * @phba: pointer to lpfc hba data structure.
10809 * This routine is called to prepare the SLI4 device for PCI slot permanently
10810 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
10814 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba
*phba
)
10816 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10817 "2827 PCI channel permanent disable for failure\n");
10819 /* Block all SCSI devices' I/Os on the host */
10820 lpfc_scsi_dev_block(phba
);
10822 /* stop all timers */
10823 lpfc_stop_hba_timers(phba
);
10825 /* Clean up all driver's outstanding SCSI I/Os */
10826 lpfc_sli_flush_fcp_rings(phba
);
10830 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
10831 * @pdev: pointer to PCI device.
10832 * @state: the current PCI connection state.
10834 * This routine is called from the PCI subsystem for error handling to device
10835 * with SLI-4 interface spec. This function is called by the PCI subsystem
10836 * after a PCI bus error affecting this device has been detected. When this
10837 * function is invoked, it will need to stop all the I/Os and interrupt(s)
10838 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
10839 * for the PCI subsystem to perform proper recovery as desired.
10842 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
10843 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10845 static pci_ers_result_t
10846 lpfc_io_error_detected_s4(struct pci_dev
*pdev
, pci_channel_state_t state
)
10848 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10849 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
10852 case pci_channel_io_normal
:
10853 /* Non-fatal error, prepare for recovery */
10854 lpfc_sli4_prep_dev_for_recover(phba
);
10855 return PCI_ERS_RESULT_CAN_RECOVER
;
10856 case pci_channel_io_frozen
:
10857 /* Fatal error, prepare for slot reset */
10858 lpfc_sli4_prep_dev_for_reset(phba
);
10859 return PCI_ERS_RESULT_NEED_RESET
;
10860 case pci_channel_io_perm_failure
:
10861 /* Permanent failure, prepare for device down */
10862 lpfc_sli4_prep_dev_for_perm_failure(phba
);
10863 return PCI_ERS_RESULT_DISCONNECT
;
10865 /* Unknown state, prepare and request slot reset */
10866 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10867 "2825 Unknown PCI error state: x%x\n", state
);
10868 lpfc_sli4_prep_dev_for_reset(phba
);
10869 return PCI_ERS_RESULT_NEED_RESET
;
10874 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
10875 * @pdev: pointer to PCI device.
10877 * This routine is called from the PCI subsystem for error handling to device
10878 * with SLI-4 interface spec. It is called after PCI bus has been reset to
10879 * restart the PCI card from scratch, as if from a cold-boot. During the
10880 * PCI subsystem error recovery, after the driver returns
10881 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
10882 * recovery and then call this routine before calling the .resume method to
10883 * recover the device. This function will initialize the HBA device, enable
10884 * the interrupt, but it will just put the HBA to offline state without
10885 * passing any I/O traffic.
10888 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
10889 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10891 static pci_ers_result_t
10892 lpfc_io_slot_reset_s4(struct pci_dev
*pdev
)
10894 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10895 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
10896 struct lpfc_sli
*psli
= &phba
->sli
;
10897 uint32_t intr_mode
;
10899 dev_printk(KERN_INFO
, &pdev
->dev
, "recovering from a slot reset.\n");
10900 if (pci_enable_device_mem(pdev
)) {
10901 printk(KERN_ERR
"lpfc: Cannot re-enable "
10902 "PCI device after reset.\n");
10903 return PCI_ERS_RESULT_DISCONNECT
;
10906 pci_restore_state(pdev
);
10909 * As the new kernel behavior of pci_restore_state() API call clears
10910 * device saved_state flag, need to save the restored state again.
10912 pci_save_state(pdev
);
10914 if (pdev
->is_busmaster
)
10915 pci_set_master(pdev
);
10917 spin_lock_irq(&phba
->hbalock
);
10918 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
10919 spin_unlock_irq(&phba
->hbalock
);
10921 /* Configure and enable interrupt */
10922 intr_mode
= lpfc_sli4_enable_intr(phba
, phba
->intr_mode
);
10923 if (intr_mode
== LPFC_INTR_ERROR
) {
10924 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10925 "2824 Cannot re-enable interrupt after "
10927 return PCI_ERS_RESULT_DISCONNECT
;
10929 phba
->intr_mode
= intr_mode
;
10931 /* Log the current active interrupt mode */
10932 lpfc_log_intr_mode(phba
, phba
->intr_mode
);
10934 return PCI_ERS_RESULT_RECOVERED
;
10938 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
10939 * @pdev: pointer to PCI device
10941 * This routine is called from the PCI subsystem for error handling to device
10942 * with SLI-4 interface spec. It is called when kernel error recovery tells
10943 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
10944 * error recovery. After this call, traffic can start to flow from this device
10948 lpfc_io_resume_s4(struct pci_dev
*pdev
)
10950 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
10951 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
10954 * In case of slot reset, as function reset is performed through
10955 * mailbox command which needs DMA to be enabled, this operation
10956 * has to be moved to the io resume phase. Taking device offline
10957 * will perform the necessary cleanup.
10959 if (!(phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)) {
10960 /* Perform device reset */
10961 lpfc_offline_prep(phba
, LPFC_MBX_WAIT
);
10962 lpfc_offline(phba
);
10963 lpfc_sli_brdrestart(phba
);
10964 /* Bring the device back online */
10968 /* Clean up Advanced Error Reporting (AER) if needed */
10969 if (phba
->hba_flag
& HBA_AER_ENABLED
)
10970 pci_cleanup_aer_uncorrect_error_status(pdev
);
10974 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
10975 * @pdev: pointer to PCI device
10976 * @pid: pointer to PCI device identifier
10978 * This routine is to be registered to the kernel's PCI subsystem. When an
10979 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
10980 * at PCI device-specific information of the device and driver to see if the
10981 * driver state that it can support this kind of device. If the match is
10982 * successful, the driver core invokes this routine. This routine dispatches
10983 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
10984 * do all the initialization that it needs to do to handle the HBA device
10988 * 0 - driver can claim the device
10989 * negative value - driver can not claim the device
10992 lpfc_pci_probe_one(struct pci_dev
*pdev
, const struct pci_device_id
*pid
)
10995 struct lpfc_sli_intf intf
;
10997 if (pci_read_config_dword(pdev
, LPFC_SLI_INTF
, &intf
.word0
))
11000 if ((bf_get(lpfc_sli_intf_valid
, &intf
) == LPFC_SLI_INTF_VALID
) &&
11001 (bf_get(lpfc_sli_intf_slirev
, &intf
) == LPFC_SLI_INTF_REV_SLI4
))
11002 rc
= lpfc_pci_probe_one_s4(pdev
, pid
);
11004 rc
= lpfc_pci_probe_one_s3(pdev
, pid
);
11010 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
11011 * @pdev: pointer to PCI device
11013 * This routine is to be registered to the kernel's PCI subsystem. When an
11014 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
11015 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
11016 * remove routine, which will perform all the necessary cleanup for the
11017 * device to be removed from the PCI subsystem properly.
11020 lpfc_pci_remove_one(struct pci_dev
*pdev
)
11022 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
11023 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
11025 switch (phba
->pci_dev_grp
) {
11026 case LPFC_PCI_DEV_LP
:
11027 lpfc_pci_remove_one_s3(pdev
);
11029 case LPFC_PCI_DEV_OC
:
11030 lpfc_pci_remove_one_s4(pdev
);
11033 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11034 "1424 Invalid PCI device group: 0x%x\n",
11035 phba
->pci_dev_grp
);
11042 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
11043 * @pdev: pointer to PCI device
11044 * @msg: power management message
11046 * This routine is to be registered to the kernel's PCI subsystem to support
11047 * system Power Management (PM). When PM invokes this method, it dispatches
11048 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
11049 * suspend the device.
11052 * 0 - driver suspended the device
11056 lpfc_pci_suspend_one(struct pci_dev
*pdev
, pm_message_t msg
)
11058 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
11059 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
11062 switch (phba
->pci_dev_grp
) {
11063 case LPFC_PCI_DEV_LP
:
11064 rc
= lpfc_pci_suspend_one_s3(pdev
, msg
);
11066 case LPFC_PCI_DEV_OC
:
11067 rc
= lpfc_pci_suspend_one_s4(pdev
, msg
);
11070 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11071 "1425 Invalid PCI device group: 0x%x\n",
11072 phba
->pci_dev_grp
);
11079 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
11080 * @pdev: pointer to PCI device
11082 * This routine is to be registered to the kernel's PCI subsystem to support
11083 * system Power Management (PM). When PM invokes this method, it dispatches
11084 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
11085 * resume the device.
11088 * 0 - driver suspended the device
11092 lpfc_pci_resume_one(struct pci_dev
*pdev
)
11094 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
11095 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
11098 switch (phba
->pci_dev_grp
) {
11099 case LPFC_PCI_DEV_LP
:
11100 rc
= lpfc_pci_resume_one_s3(pdev
);
11102 case LPFC_PCI_DEV_OC
:
11103 rc
= lpfc_pci_resume_one_s4(pdev
);
11106 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11107 "1426 Invalid PCI device group: 0x%x\n",
11108 phba
->pci_dev_grp
);
11115 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
11116 * @pdev: pointer to PCI device.
11117 * @state: the current PCI connection state.
11119 * This routine is registered to the PCI subsystem for error handling. This
11120 * function is called by the PCI subsystem after a PCI bus error affecting
11121 * this device has been detected. When this routine is invoked, it dispatches
11122 * the action to the proper SLI-3 or SLI-4 device error detected handling
11123 * routine, which will perform the proper error detected operation.
11126 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
11127 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
11129 static pci_ers_result_t
11130 lpfc_io_error_detected(struct pci_dev
*pdev
, pci_channel_state_t state
)
11132 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
11133 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
11134 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
11136 switch (phba
->pci_dev_grp
) {
11137 case LPFC_PCI_DEV_LP
:
11138 rc
= lpfc_io_error_detected_s3(pdev
, state
);
11140 case LPFC_PCI_DEV_OC
:
11141 rc
= lpfc_io_error_detected_s4(pdev
, state
);
11144 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11145 "1427 Invalid PCI device group: 0x%x\n",
11146 phba
->pci_dev_grp
);
11153 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
11154 * @pdev: pointer to PCI device.
11156 * This routine is registered to the PCI subsystem for error handling. This
11157 * function is called after PCI bus has been reset to restart the PCI card
11158 * from scratch, as if from a cold-boot. When this routine is invoked, it
11159 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
11160 * routine, which will perform the proper device reset.
11163 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
11164 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
11166 static pci_ers_result_t
11167 lpfc_io_slot_reset(struct pci_dev
*pdev
)
11169 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
11170 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
11171 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
11173 switch (phba
->pci_dev_grp
) {
11174 case LPFC_PCI_DEV_LP
:
11175 rc
= lpfc_io_slot_reset_s3(pdev
);
11177 case LPFC_PCI_DEV_OC
:
11178 rc
= lpfc_io_slot_reset_s4(pdev
);
11181 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11182 "1428 Invalid PCI device group: 0x%x\n",
11183 phba
->pci_dev_grp
);
11190 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
11191 * @pdev: pointer to PCI device
11193 * This routine is registered to the PCI subsystem for error handling. It
11194 * is called when kernel error recovery tells the lpfc driver that it is
11195 * OK to resume normal PCI operation after PCI bus error recovery. When
11196 * this routine is invoked, it dispatches the action to the proper SLI-3
11197 * or SLI-4 device io_resume routine, which will resume the device operation.
11200 lpfc_io_resume(struct pci_dev
*pdev
)
11202 struct Scsi_Host
*shost
= pci_get_drvdata(pdev
);
11203 struct lpfc_hba
*phba
= ((struct lpfc_vport
*)shost
->hostdata
)->phba
;
11205 switch (phba
->pci_dev_grp
) {
11206 case LPFC_PCI_DEV_LP
:
11207 lpfc_io_resume_s3(pdev
);
11209 case LPFC_PCI_DEV_OC
:
11210 lpfc_io_resume_s4(pdev
);
11213 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11214 "1429 Invalid PCI device group: 0x%x\n",
11215 phba
->pci_dev_grp
);
11222 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
11223 * @phba: pointer to lpfc hba data structure.
11225 * This routine checks to see if OAS is supported for this adapter. If
11226 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
11227 * the enable oas flag is cleared and the pool created for OAS device data
11232 lpfc_sli4_oas_verify(struct lpfc_hba
*phba
)
11235 if (!phba
->cfg_EnableXLane
)
11238 if (phba
->sli4_hba
.pc_sli4_params
.oas_supported
) {
11242 if (phba
->device_data_mem_pool
)
11243 mempool_destroy(phba
->device_data_mem_pool
);
11244 phba
->device_data_mem_pool
= NULL
;
11251 * lpfc_fof_queue_setup - Set up all the fof queues
11252 * @phba: pointer to lpfc hba data structure.
11254 * This routine is invoked to set up all the fof queues for the FC HBA
11259 * -ENOMEM - No available memory
11262 lpfc_fof_queue_setup(struct lpfc_hba
*phba
)
11264 struct lpfc_sli
*psli
= &phba
->sli
;
11267 rc
= lpfc_eq_create(phba
, phba
->sli4_hba
.fof_eq
, LPFC_MAX_IMAX
);
11271 if (phba
->cfg_fof
) {
11273 rc
= lpfc_cq_create(phba
, phba
->sli4_hba
.oas_cq
,
11274 phba
->sli4_hba
.fof_eq
, LPFC_WCQ
, LPFC_FCP
);
11278 rc
= lpfc_wq_create(phba
, phba
->sli4_hba
.oas_wq
,
11279 phba
->sli4_hba
.oas_cq
, LPFC_FCP
);
11283 phba
->sli4_hba
.oas_cq
->pring
= &psli
->ring
[LPFC_FCP_OAS_RING
];
11284 phba
->sli4_hba
.oas_ring
= &psli
->ring
[LPFC_FCP_OAS_RING
];
11290 lpfc_cq_destroy(phba
, phba
->sli4_hba
.oas_cq
);
11292 lpfc_eq_destroy(phba
, phba
->sli4_hba
.fof_eq
);
11298 * lpfc_fof_queue_create - Create all the fof queues
11299 * @phba: pointer to lpfc hba data structure.
11301 * This routine is invoked to allocate all the fof queues for the FC HBA
11302 * operation. For each SLI4 queue type, the parameters such as queue entry
11303 * count (queue depth) shall be taken from the module parameter. For now,
11304 * we just use some constant number as place holder.
11308 * -ENOMEM - No availble memory
11309 * -EIO - The mailbox failed to complete successfully.
11312 lpfc_fof_queue_create(struct lpfc_hba
*phba
)
11314 struct lpfc_queue
*qdesc
;
11316 /* Create FOF EQ */
11317 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.eq_esize
,
11318 phba
->sli4_hba
.eq_ecount
);
11322 phba
->sli4_hba
.fof_eq
= qdesc
;
11324 if (phba
->cfg_fof
) {
11326 /* Create OAS CQ */
11327 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.cq_esize
,
11328 phba
->sli4_hba
.cq_ecount
);
11332 phba
->sli4_hba
.oas_cq
= qdesc
;
11334 /* Create OAS WQ */
11335 qdesc
= lpfc_sli4_queue_alloc(phba
, phba
->sli4_hba
.wq_esize
,
11336 phba
->sli4_hba
.wq_ecount
);
11340 phba
->sli4_hba
.oas_wq
= qdesc
;
11346 lpfc_fof_queue_destroy(phba
);
11351 * lpfc_fof_queue_destroy - Destroy all the fof queues
11352 * @phba: pointer to lpfc hba data structure.
11354 * This routine is invoked to release all the SLI4 queues with the FC HBA
11361 lpfc_fof_queue_destroy(struct lpfc_hba
*phba
)
11363 /* Release FOF Event queue */
11364 if (phba
->sli4_hba
.fof_eq
!= NULL
) {
11365 lpfc_sli4_queue_free(phba
->sli4_hba
.fof_eq
);
11366 phba
->sli4_hba
.fof_eq
= NULL
;
11369 /* Release OAS Completion queue */
11370 if (phba
->sli4_hba
.oas_cq
!= NULL
) {
11371 lpfc_sli4_queue_free(phba
->sli4_hba
.oas_cq
);
11372 phba
->sli4_hba
.oas_cq
= NULL
;
11375 /* Release OAS Work queue */
11376 if (phba
->sli4_hba
.oas_wq
!= NULL
) {
11377 lpfc_sli4_queue_free(phba
->sli4_hba
.oas_wq
);
11378 phba
->sli4_hba
.oas_wq
= NULL
;
11383 MODULE_DEVICE_TABLE(pci
, lpfc_id_table
);
11385 static const struct pci_error_handlers lpfc_err_handler
= {
11386 .error_detected
= lpfc_io_error_detected
,
11387 .slot_reset
= lpfc_io_slot_reset
,
11388 .resume
= lpfc_io_resume
,
11391 static struct pci_driver lpfc_driver
= {
11392 .name
= LPFC_DRIVER_NAME
,
11393 .id_table
= lpfc_id_table
,
11394 .probe
= lpfc_pci_probe_one
,
11395 .remove
= lpfc_pci_remove_one
,
11396 .suspend
= lpfc_pci_suspend_one
,
11397 .resume
= lpfc_pci_resume_one
,
11398 .err_handler
= &lpfc_err_handler
,
11401 static const struct file_operations lpfc_mgmt_fop
= {
11402 .owner
= THIS_MODULE
,
11405 static struct miscdevice lpfc_mgmt_dev
= {
11406 .minor
= MISC_DYNAMIC_MINOR
,
11407 .name
= "lpfcmgmt",
11408 .fops
= &lpfc_mgmt_fop
,
11412 * lpfc_init - lpfc module initialization routine
11414 * This routine is to be invoked when the lpfc module is loaded into the
11415 * kernel. The special kernel macro module_init() is used to indicate the
11416 * role of this routine to the kernel as lpfc module entry point.
11420 * -ENOMEM - FC attach transport failed
11421 * all others - failed
11429 printk(LPFC_MODULE_DESC
"\n");
11430 printk(LPFC_COPYRIGHT
"\n");
11432 error
= misc_register(&lpfc_mgmt_dev
);
11434 printk(KERN_ERR
"Could not register lpfcmgmt device, "
11435 "misc_register returned with status %d", error
);
11437 lpfc_transport_functions
.vport_create
= lpfc_vport_create
;
11438 lpfc_transport_functions
.vport_delete
= lpfc_vport_delete
;
11439 lpfc_transport_template
=
11440 fc_attach_transport(&lpfc_transport_functions
);
11441 if (lpfc_transport_template
== NULL
)
11443 lpfc_vport_transport_template
=
11444 fc_attach_transport(&lpfc_vport_transport_functions
);
11445 if (lpfc_vport_transport_template
== NULL
) {
11446 fc_release_transport(lpfc_transport_template
);
11450 /* Initialize in case vector mapping is needed */
11451 lpfc_used_cpu
= NULL
;
11452 lpfc_present_cpu
= 0;
11453 for_each_present_cpu(cpu
)
11454 lpfc_present_cpu
++;
11456 error
= pci_register_driver(&lpfc_driver
);
11458 fc_release_transport(lpfc_transport_template
);
11459 fc_release_transport(lpfc_vport_transport_template
);
11466 * lpfc_exit - lpfc module removal routine
11468 * This routine is invoked when the lpfc module is removed from the kernel.
11469 * The special kernel macro module_exit() is used to indicate the role of
11470 * this routine to the kernel as lpfc module exit point.
11475 misc_deregister(&lpfc_mgmt_dev
);
11476 pci_unregister_driver(&lpfc_driver
);
11477 fc_release_transport(lpfc_transport_template
);
11478 fc_release_transport(lpfc_vport_transport_template
);
11479 if (_dump_buf_data
) {
11480 printk(KERN_ERR
"9062 BLKGRD: freeing %lu pages for "
11481 "_dump_buf_data at 0x%p\n",
11482 (1L << _dump_buf_data_order
), _dump_buf_data
);
11483 free_pages((unsigned long)_dump_buf_data
, _dump_buf_data_order
);
11486 if (_dump_buf_dif
) {
11487 printk(KERN_ERR
"9049 BLKGRD: freeing %lu pages for "
11488 "_dump_buf_dif at 0x%p\n",
11489 (1L << _dump_buf_dif_order
), _dump_buf_dif
);
11490 free_pages((unsigned long)_dump_buf_dif
, _dump_buf_dif_order
);
11492 kfree(lpfc_used_cpu
);
11493 idr_destroy(&lpfc_hba_index
);
11496 module_init(lpfc_init
);
11497 module_exit(lpfc_exit
);
11498 MODULE_LICENSE("GPL");
11499 MODULE_DESCRIPTION(LPFC_MODULE_DESC
);
11500 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
11501 MODULE_VERSION("0:" LPFC_DRIVER_VERSION
);