[SCSI] lpfc 8.3.28: Miscellaneous fixes in sysfs and mgmt interfaces
[deliverable/linux.git] / drivers / scsi / lpfc / lpfc_init.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
792581de 4 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
21
dea3101e 22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
acf3368f 27#include <linux/module.h>
dea3101e 28#include <linux/kthread.h>
29#include <linux/pci.h>
30#include <linux/spinlock.h>
92d7f7b0 31#include <linux/ctype.h>
0d878419 32#include <linux/aer.h>
5a0e3ad6 33#include <linux/slab.h>
52d52440 34#include <linux/firmware.h>
dea3101e 35
91886523 36#include <scsi/scsi.h>
dea3101e 37#include <scsi/scsi_device.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_transport_fc.h>
40
da0436e9 41#include "lpfc_hw4.h"
dea3101e 42#include "lpfc_hw.h"
43#include "lpfc_sli.h"
da0436e9 44#include "lpfc_sli4.h"
ea2151b4 45#include "lpfc_nl.h"
dea3101e 46#include "lpfc_disc.h"
47#include "lpfc_scsi.h"
48#include "lpfc.h"
49#include "lpfc_logmsg.h"
50#include "lpfc_crtn.h"
92d7f7b0 51#include "lpfc_vport.h"
dea3101e 52#include "lpfc_version.h"
53
81301a9b
JS
54char *_dump_buf_data;
55unsigned long _dump_buf_data_order;
56char *_dump_buf_dif;
57unsigned long _dump_buf_dif_order;
58spinlock_t _dump_buf_lock;
59
dea3101e 60static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
61static int lpfc_post_rcv_buf(struct lpfc_hba *);
5350d872 62static int lpfc_sli4_queue_verify(struct lpfc_hba *);
da0436e9
JS
63static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
64static int lpfc_setup_endian_order(struct lpfc_hba *);
65static int lpfc_sli4_read_config(struct lpfc_hba *);
66static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
67static void lpfc_free_sgl_list(struct lpfc_hba *);
68static int lpfc_init_sgl_list(struct lpfc_hba *);
69static int lpfc_init_active_sgl_array(struct lpfc_hba *);
70static void lpfc_free_active_sgl(struct lpfc_hba *);
71static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
72static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
73static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
74static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
75static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
dea3101e 76
77static struct scsi_transport_template *lpfc_transport_template = NULL;
92d7f7b0 78static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
dea3101e 79static DEFINE_IDR(lpfc_hba_index);
80
e59058c4 81/**
3621a710 82 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
e59058c4
JS
83 * @phba: pointer to lpfc hba data structure.
84 *
85 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
86 * mailbox command. It retrieves the revision information from the HBA and
87 * collects the Vital Product Data (VPD) about the HBA for preparing the
88 * configuration of the HBA.
89 *
90 * Return codes:
91 * 0 - success.
92 * -ERESTART - requests the SLI layer to reset the HBA and try again.
93 * Any other value - indicates an error.
94 **/
dea3101e 95int
2e0fef85 96lpfc_config_port_prep(struct lpfc_hba *phba)
dea3101e 97{
98 lpfc_vpd_t *vp = &phba->vpd;
99 int i = 0, rc;
100 LPFC_MBOXQ_t *pmb;
101 MAILBOX_t *mb;
102 char *lpfc_vpd_data = NULL;
103 uint16_t offset = 0;
104 static char licensed[56] =
105 "key unlock for use with gnu public licensed code only\0";
65a29c16 106 static int init_key = 1;
dea3101e 107
108 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
109 if (!pmb) {
2e0fef85 110 phba->link_state = LPFC_HBA_ERROR;
dea3101e 111 return -ENOMEM;
112 }
113
04c68496 114 mb = &pmb->u.mb;
2e0fef85 115 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 116
117 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
65a29c16
JS
118 if (init_key) {
119 uint32_t *ptext = (uint32_t *) licensed;
dea3101e 120
65a29c16
JS
121 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
122 *ptext = cpu_to_be32(*ptext);
123 init_key = 0;
124 }
dea3101e 125
126 lpfc_read_nv(phba, pmb);
127 memset((char*)mb->un.varRDnvp.rsvd3, 0,
128 sizeof (mb->un.varRDnvp.rsvd3));
129 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
130 sizeof (licensed));
131
132 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
133
134 if (rc != MBX_SUCCESS) {
ed957684 135 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
e8b62011 136 "0324 Config Port initialization "
dea3101e 137 "error, mbxCmd x%x READ_NVPARM, "
138 "mbxStatus x%x\n",
dea3101e 139 mb->mbxCommand, mb->mbxStatus);
140 mempool_free(pmb, phba->mbox_mem_pool);
141 return -ERESTART;
142 }
143 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
2e0fef85
JS
144 sizeof(phba->wwnn));
145 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
146 sizeof(phba->wwpn));
dea3101e 147 }
148
92d7f7b0
JS
149 phba->sli3_options = 0x0;
150
dea3101e 151 /* Setup and issue mailbox READ REV command */
152 lpfc_read_rev(phba, pmb);
153 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
154 if (rc != MBX_SUCCESS) {
ed957684 155 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 156 "0439 Adapter failed to init, mbxCmd x%x "
dea3101e 157 "READ_REV, mbxStatus x%x\n",
dea3101e 158 mb->mbxCommand, mb->mbxStatus);
159 mempool_free( pmb, phba->mbox_mem_pool);
160 return -ERESTART;
161 }
162
92d7f7b0 163
1de933f3
JSEC
164 /*
165 * The value of rr must be 1 since the driver set the cv field to 1.
166 * This setting requires the FW to set all revision fields.
dea3101e 167 */
1de933f3 168 if (mb->un.varRdRev.rr == 0) {
dea3101e 169 vp->rev.rBit = 0;
1de933f3 170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011
JS
171 "0440 Adapter failed to init, READ_REV has "
172 "missing revision information.\n");
dea3101e 173 mempool_free(pmb, phba->mbox_mem_pool);
174 return -ERESTART;
dea3101e 175 }
176
495a714c
JS
177 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
178 mempool_free(pmb, phba->mbox_mem_pool);
ed957684 179 return -EINVAL;
495a714c 180 }
ed957684 181
dea3101e 182 /* Save information as VPD data */
1de933f3 183 vp->rev.rBit = 1;
92d7f7b0 184 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
1de933f3
JSEC
185 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
186 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
187 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
188 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
dea3101e 189 vp->rev.biuRev = mb->un.varRdRev.biuRev;
190 vp->rev.smRev = mb->un.varRdRev.smRev;
191 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
192 vp->rev.endecRev = mb->un.varRdRev.endecRev;
193 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
194 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
195 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
196 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
197 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
198 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
199
92d7f7b0
JS
200 /* If the sli feature level is less then 9, we must
201 * tear down all RPIs and VPIs on link down if NPIV
202 * is enabled.
203 */
204 if (vp->rev.feaLevelHigh < 9)
205 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
206
dea3101e 207 if (lpfc_is_LC_HBA(phba->pcidev->device))
208 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
209 sizeof (phba->RandomData));
210
dea3101e 211 /* Get adapter VPD information */
dea3101e 212 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
213 if (!lpfc_vpd_data)
d7c255b2 214 goto out_free_mbox;
dea3101e 215 do {
a0c87cbd 216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
dea3101e 217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
218
219 if (rc != MBX_SUCCESS) {
220 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011 221 "0441 VPD not present on adapter, "
dea3101e 222 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
dea3101e 223 mb->mbxCommand, mb->mbxStatus);
74b72a59 224 mb->un.varDmp.word_cnt = 0;
dea3101e 225 }
04c68496
JS
226 /* dump mem may return a zero when finished or we got a
227 * mailbox error, either way we are done.
228 */
229 if (mb->un.varDmp.word_cnt == 0)
230 break;
74b72a59
JW
231 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
232 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
d7c255b2
JS
233 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
234 lpfc_vpd_data + offset,
92d7f7b0 235 mb->un.varDmp.word_cnt);
dea3101e 236 offset += mb->un.varDmp.word_cnt;
74b72a59
JW
237 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
238 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
dea3101e 239
240 kfree(lpfc_vpd_data);
dea3101e 241out_free_mbox:
242 mempool_free(pmb, phba->mbox_mem_pool);
243 return 0;
244}
245
e59058c4 246/**
3621a710 247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
e59058c4
JS
248 * @phba: pointer to lpfc hba data structure.
249 * @pmboxq: pointer to the driver internal queue element for mailbox command.
250 *
251 * This is the completion handler for driver's configuring asynchronous event
252 * mailbox command to the device. If the mailbox command returns successfully,
253 * it will set internal async event support flag to 1; otherwise, it will
254 * set internal async event support flag to 0.
255 **/
57127f15
JS
256static void
257lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
258{
04c68496 259 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
57127f15
JS
260 phba->temp_sensor_support = 1;
261 else
262 phba->temp_sensor_support = 0;
263 mempool_free(pmboxq, phba->mbox_mem_pool);
264 return;
265}
266
97207482 267/**
3621a710 268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
97207482
JS
269 * @phba: pointer to lpfc hba data structure.
270 * @pmboxq: pointer to the driver internal queue element for mailbox command.
271 *
272 * This is the completion handler for dump mailbox command for getting
273 * wake up parameters. When this command complete, the response contain
274 * Option rom version of the HBA. This function translate the version number
275 * into a human readable string and store it in OptionROMVersion.
276 **/
277static void
278lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
279{
280 struct prog_id *prg;
281 uint32_t prog_id_word;
282 char dist = ' ';
283 /* character array used for decoding dist type. */
284 char dist_char[] = "nabx";
285
04c68496 286 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
9f1e1b50 287 mempool_free(pmboxq, phba->mbox_mem_pool);
97207482 288 return;
9f1e1b50 289 }
97207482
JS
290
291 prg = (struct prog_id *) &prog_id_word;
292
293 /* word 7 contain option rom version */
04c68496 294 prog_id_word = pmboxq->u.mb.un.varWords[7];
97207482
JS
295
296 /* Decode the Option rom version word to a readable string */
297 if (prg->dist < 4)
298 dist = dist_char[prg->dist];
299
300 if ((prg->dist == 3) && (prg->num == 0))
301 sprintf(phba->OptionROMVersion, "%d.%d%d",
302 prg->ver, prg->rev, prg->lev);
303 else
304 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
305 prg->ver, prg->rev, prg->lev,
306 dist, prg->num);
9f1e1b50 307 mempool_free(pmboxq, phba->mbox_mem_pool);
97207482
JS
308 return;
309}
310
0558056c
JS
311/**
312 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
313 * cfg_soft_wwnn, cfg_soft_wwpn
314 * @vport: pointer to lpfc vport data structure.
315 *
316 *
317 * Return codes
318 * None.
319 **/
320void
321lpfc_update_vport_wwn(struct lpfc_vport *vport)
322{
323 /* If the soft name exists then update it using the service params */
324 if (vport->phba->cfg_soft_wwnn)
325 u64_to_wwn(vport->phba->cfg_soft_wwnn,
326 vport->fc_sparam.nodeName.u.wwn);
327 if (vport->phba->cfg_soft_wwpn)
328 u64_to_wwn(vport->phba->cfg_soft_wwpn,
329 vport->fc_sparam.portName.u.wwn);
330
331 /*
332 * If the name is empty or there exists a soft name
333 * then copy the service params name, otherwise use the fc name
334 */
335 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
336 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
337 sizeof(struct lpfc_name));
338 else
339 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
340 sizeof(struct lpfc_name));
341
342 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
343 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
344 sizeof(struct lpfc_name));
345 else
346 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
347 sizeof(struct lpfc_name));
348}
349
e59058c4 350/**
3621a710 351 * lpfc_config_port_post - Perform lpfc initialization after config port
e59058c4
JS
352 * @phba: pointer to lpfc hba data structure.
353 *
354 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
355 * command call. It performs all internal resource and state setups on the
356 * port: post IOCB buffers, enable appropriate host interrupt attentions,
357 * ELS ring timers, etc.
358 *
359 * Return codes
360 * 0 - success.
361 * Any other value - error.
362 **/
dea3101e 363int
2e0fef85 364lpfc_config_port_post(struct lpfc_hba *phba)
dea3101e 365{
2e0fef85 366 struct lpfc_vport *vport = phba->pport;
a257bf90 367 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 368 LPFC_MBOXQ_t *pmb;
369 MAILBOX_t *mb;
370 struct lpfc_dmabuf *mp;
371 struct lpfc_sli *psli = &phba->sli;
372 uint32_t status, timeout;
2e0fef85
JS
373 int i, j;
374 int rc;
dea3101e 375
7af67051
JS
376 spin_lock_irq(&phba->hbalock);
377 /*
378 * If the Config port completed correctly the HBA is not
379 * over heated any more.
380 */
381 if (phba->over_temp_state == HBA_OVER_TEMP)
382 phba->over_temp_state = HBA_NORMAL_TEMP;
383 spin_unlock_irq(&phba->hbalock);
384
dea3101e 385 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
386 if (!pmb) {
2e0fef85 387 phba->link_state = LPFC_HBA_ERROR;
dea3101e 388 return -ENOMEM;
389 }
04c68496 390 mb = &pmb->u.mb;
dea3101e 391
dea3101e 392 /* Get login parameters for NID. */
9f1177a3
JS
393 rc = lpfc_read_sparam(phba, pmb, 0);
394 if (rc) {
395 mempool_free(pmb, phba->mbox_mem_pool);
396 return -ENOMEM;
397 }
398
ed957684 399 pmb->vport = vport;
dea3101e 400 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
ed957684 401 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 402 "0448 Adapter failed init, mbxCmd x%x "
dea3101e 403 "READ_SPARM mbxStatus x%x\n",
dea3101e 404 mb->mbxCommand, mb->mbxStatus);
2e0fef85 405 phba->link_state = LPFC_HBA_ERROR;
dea3101e 406 mp = (struct lpfc_dmabuf *) pmb->context1;
9f1177a3 407 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 408 lpfc_mbuf_free(phba, mp->virt, mp->phys);
409 kfree(mp);
410 return -EIO;
411 }
412
413 mp = (struct lpfc_dmabuf *) pmb->context1;
414
2e0fef85 415 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
dea3101e 416 lpfc_mbuf_free(phba, mp->virt, mp->phys);
417 kfree(mp);
418 pmb->context1 = NULL;
0558056c 419 lpfc_update_vport_wwn(vport);
a257bf90
JS
420
421 /* Update the fc_host data structures with new wwn. */
422 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
423 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
21e9a0a5 424 fc_host_max_npiv_vports(shost) = phba->max_vpi;
a257bf90 425
dea3101e 426 /* If no serial number in VPD data, use low 6 bytes of WWNN */
427 /* This should be consolidated into parse_vpd ? - mr */
428 if (phba->SerialNumber[0] == 0) {
429 uint8_t *outptr;
430
2e0fef85 431 outptr = &vport->fc_nodename.u.s.IEEE[0];
dea3101e 432 for (i = 0; i < 12; i++) {
433 status = *outptr++;
434 j = ((status & 0xf0) >> 4);
435 if (j <= 9)
436 phba->SerialNumber[i] =
437 (char)((uint8_t) 0x30 + (uint8_t) j);
438 else
439 phba->SerialNumber[i] =
440 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
441 i++;
442 j = (status & 0xf);
443 if (j <= 9)
444 phba->SerialNumber[i] =
445 (char)((uint8_t) 0x30 + (uint8_t) j);
446 else
447 phba->SerialNumber[i] =
448 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
449 }
450 }
451
dea3101e 452 lpfc_read_config(phba, pmb);
ed957684 453 pmb->vport = vport;
dea3101e 454 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
ed957684 455 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 456 "0453 Adapter failed to init, mbxCmd x%x "
dea3101e 457 "READ_CONFIG, mbxStatus x%x\n",
dea3101e 458 mb->mbxCommand, mb->mbxStatus);
2e0fef85 459 phba->link_state = LPFC_HBA_ERROR;
dea3101e 460 mempool_free( pmb, phba->mbox_mem_pool);
461 return -EIO;
462 }
463
a0c87cbd
JS
464 /* Check if the port is disabled */
465 lpfc_sli_read_link_ste(phba);
466
dea3101e 467 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
468 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
469 phba->cfg_hba_queue_depth =
f1126688
JS
470 (mb->un.varRdConfig.max_xri + 1) -
471 lpfc_sli4_get_els_iocb_cnt(phba);
dea3101e 472
473 phba->lmt = mb->un.varRdConfig.lmt;
74b72a59
JW
474
475 /* Get the default values for Model Name and Description */
476 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
477
2e0fef85 478 phba->link_state = LPFC_LINK_DOWN;
dea3101e 479
0b727fea 480 /* Only process IOCBs on ELS ring till hba_state is READY */
a4bc3379
JS
481 if (psli->ring[psli->extra_ring].cmdringaddr)
482 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
dea3101e 483 if (psli->ring[psli->fcp_ring].cmdringaddr)
484 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
485 if (psli->ring[psli->next_ring].cmdringaddr)
486 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
487
488 /* Post receive buffers for desired rings */
ed957684
JS
489 if (phba->sli_rev != 3)
490 lpfc_post_rcv_buf(phba);
dea3101e 491
9399627f
JS
492 /*
493 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
494 */
495 if (phba->intr_type == MSIX) {
496 rc = lpfc_config_msi(phba, pmb);
497 if (rc) {
498 mempool_free(pmb, phba->mbox_mem_pool);
499 return -EIO;
500 }
501 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
502 if (rc != MBX_SUCCESS) {
503 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
504 "0352 Config MSI mailbox command "
505 "failed, mbxCmd x%x, mbxStatus x%x\n",
04c68496
JS
506 pmb->u.mb.mbxCommand,
507 pmb->u.mb.mbxStatus);
9399627f
JS
508 mempool_free(pmb, phba->mbox_mem_pool);
509 return -EIO;
510 }
511 }
512
04c68496 513 spin_lock_irq(&phba->hbalock);
9399627f
JS
514 /* Initialize ERATT handling flag */
515 phba->hba_flag &= ~HBA_ERATT_HANDLED;
516
dea3101e 517 /* Enable appropriate host interrupts */
9940b97b
JS
518 if (lpfc_readl(phba->HCregaddr, &status)) {
519 spin_unlock_irq(&phba->hbalock);
520 return -EIO;
521 }
dea3101e 522 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
523 if (psli->num_rings > 0)
524 status |= HC_R0INT_ENA;
525 if (psli->num_rings > 1)
526 status |= HC_R1INT_ENA;
527 if (psli->num_rings > 2)
528 status |= HC_R2INT_ENA;
529 if (psli->num_rings > 3)
530 status |= HC_R3INT_ENA;
531
875fbdfe
JSEC
532 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
533 (phba->cfg_poll & DISABLE_FCP_RING_INT))
9399627f 534 status &= ~(HC_R0INT_ENA);
875fbdfe 535
dea3101e 536 writel(status, phba->HCregaddr);
537 readl(phba->HCregaddr); /* flush */
2e0fef85 538 spin_unlock_irq(&phba->hbalock);
dea3101e 539
9399627f
JS
540 /* Set up ring-0 (ELS) timer */
541 timeout = phba->fc_ratov * 2;
2e0fef85 542 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
9399627f 543 /* Set up heart beat (HB) timer */
858c9f6c
JS
544 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
545 phba->hb_outstanding = 0;
546 phba->last_completion_time = jiffies;
9399627f
JS
547 /* Set up error attention (ERATT) polling timer */
548 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
dea3101e 549
a0c87cbd
JS
550 if (phba->hba_flag & LINK_DISABLED) {
551 lpfc_printf_log(phba,
552 KERN_ERR, LOG_INIT,
553 "2598 Adapter Link is disabled.\n");
554 lpfc_down_link(phba, pmb);
555 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
556 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
557 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
558 lpfc_printf_log(phba,
559 KERN_ERR, LOG_INIT,
560 "2599 Adapter failed to issue DOWN_LINK"
561 " mbox command rc 0x%x\n", rc);
562
563 mempool_free(pmb, phba->mbox_mem_pool);
564 return -EIO;
565 }
e40a02c1 566 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
026abb87
JS
567 mempool_free(pmb, phba->mbox_mem_pool);
568 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
569 if (rc)
570 return rc;
dea3101e 571 }
572 /* MBOX buffer will be freed in mbox compl */
57127f15 573 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9f1177a3
JS
574 if (!pmb) {
575 phba->link_state = LPFC_HBA_ERROR;
576 return -ENOMEM;
577 }
578
57127f15
JS
579 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
580 pmb->mbox_cmpl = lpfc_config_async_cmpl;
581 pmb->vport = phba->pport;
582 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
dea3101e 583
57127f15
JS
584 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
585 lpfc_printf_log(phba,
586 KERN_ERR,
587 LOG_INIT,
588 "0456 Adapter failed to issue "
e4e74273 589 "ASYNCEVT_ENABLE mbox status x%x\n",
57127f15
JS
590 rc);
591 mempool_free(pmb, phba->mbox_mem_pool);
592 }
97207482
JS
593
594 /* Get Option rom version */
595 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9f1177a3
JS
596 if (!pmb) {
597 phba->link_state = LPFC_HBA_ERROR;
598 return -ENOMEM;
599 }
600
97207482
JS
601 lpfc_dump_wakeup_param(phba, pmb);
602 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
603 pmb->vport = phba->pport;
604 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
605
606 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
607 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
e4e74273 608 "to get Option ROM version status x%x\n", rc);
97207482
JS
609 mempool_free(pmb, phba->mbox_mem_pool);
610 }
611
d7c255b2 612 return 0;
ce8b3ce5
JS
613}
614
84d1b006
JS
615/**
616 * lpfc_hba_init_link - Initialize the FC link
617 * @phba: pointer to lpfc hba data structure.
6e7288d9 618 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
84d1b006
JS
619 *
620 * This routine will issue the INIT_LINK mailbox command call.
621 * It is available to other drivers through the lpfc_hba data
622 * structure for use as a delayed link up mechanism with the
623 * module parameter lpfc_suppress_link_up.
624 *
625 * Return code
626 * 0 - success
627 * Any other value - error
628 **/
629int
6e7288d9 630lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
84d1b006
JS
631{
632 struct lpfc_vport *vport = phba->pport;
633 LPFC_MBOXQ_t *pmb;
634 MAILBOX_t *mb;
635 int rc;
636
637 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
638 if (!pmb) {
639 phba->link_state = LPFC_HBA_ERROR;
640 return -ENOMEM;
641 }
642 mb = &pmb->u.mb;
643 pmb->vport = vport;
644
026abb87
JS
645 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
646 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
647 !(phba->lmt & LMT_1Gb)) ||
648 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
649 !(phba->lmt & LMT_2Gb)) ||
650 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
651 !(phba->lmt & LMT_4Gb)) ||
652 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
653 !(phba->lmt & LMT_8Gb)) ||
654 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
655 !(phba->lmt & LMT_10Gb)) ||
656 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
657 !(phba->lmt & LMT_16Gb))) {
658 /* Reset link speed to auto */
659 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
660 "1302 Invalid speed for this board:%d "
661 "Reset link speed to auto.\n",
662 phba->cfg_link_speed);
663 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
664 }
76a95d75 665 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
84d1b006
JS
666 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
667 lpfc_set_loopback_flag(phba);
6e7288d9 668 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
76a95d75 669 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
84d1b006
JS
670 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
671 "0498 Adapter failed to init, mbxCmd x%x "
672 "INIT_LINK, mbxStatus x%x\n",
673 mb->mbxCommand, mb->mbxStatus);
76a95d75
JS
674 if (phba->sli_rev <= LPFC_SLI_REV3) {
675 /* Clear all interrupt enable conditions */
676 writel(0, phba->HCregaddr);
677 readl(phba->HCregaddr); /* flush */
678 /* Clear all pending interrupts */
679 writel(0xffffffff, phba->HAregaddr);
680 readl(phba->HAregaddr); /* flush */
681 }
84d1b006 682 phba->link_state = LPFC_HBA_ERROR;
6e7288d9 683 if (rc != MBX_BUSY || flag == MBX_POLL)
84d1b006
JS
684 mempool_free(pmb, phba->mbox_mem_pool);
685 return -EIO;
686 }
e40a02c1 687 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
6e7288d9
JS
688 if (flag == MBX_POLL)
689 mempool_free(pmb, phba->mbox_mem_pool);
84d1b006
JS
690
691 return 0;
692}
693
694/**
695 * lpfc_hba_down_link - this routine downs the FC link
6e7288d9
JS
696 * @phba: pointer to lpfc hba data structure.
697 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
84d1b006
JS
698 *
699 * This routine will issue the DOWN_LINK mailbox command call.
700 * It is available to other drivers through the lpfc_hba data
701 * structure for use to stop the link.
702 *
703 * Return code
704 * 0 - success
705 * Any other value - error
706 **/
707int
6e7288d9 708lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
84d1b006
JS
709{
710 LPFC_MBOXQ_t *pmb;
711 int rc;
712
713 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
714 if (!pmb) {
715 phba->link_state = LPFC_HBA_ERROR;
716 return -ENOMEM;
717 }
718
719 lpfc_printf_log(phba,
720 KERN_ERR, LOG_INIT,
721 "0491 Adapter Link is disabled.\n");
722 lpfc_down_link(phba, pmb);
723 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6e7288d9 724 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
84d1b006
JS
725 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
726 lpfc_printf_log(phba,
727 KERN_ERR, LOG_INIT,
728 "2522 Adapter failed to issue DOWN_LINK"
729 " mbox command rc 0x%x\n", rc);
730
731 mempool_free(pmb, phba->mbox_mem_pool);
732 return -EIO;
733 }
6e7288d9
JS
734 if (flag == MBX_POLL)
735 mempool_free(pmb, phba->mbox_mem_pool);
736
84d1b006
JS
737 return 0;
738}
739
e59058c4 740/**
3621a710 741 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
e59058c4
JS
742 * @phba: pointer to lpfc HBA data structure.
743 *
744 * This routine will do LPFC uninitialization before the HBA is reset when
745 * bringing down the SLI Layer.
746 *
747 * Return codes
748 * 0 - success.
749 * Any other value - error.
750 **/
dea3101e 751int
2e0fef85 752lpfc_hba_down_prep(struct lpfc_hba *phba)
dea3101e 753{
1b32f6aa
JS
754 struct lpfc_vport **vports;
755 int i;
3772a991
JS
756
757 if (phba->sli_rev <= LPFC_SLI_REV3) {
758 /* Disable interrupts */
759 writel(0, phba->HCregaddr);
760 readl(phba->HCregaddr); /* flush */
761 }
dea3101e 762
1b32f6aa
JS
763 if (phba->pport->load_flag & FC_UNLOADING)
764 lpfc_cleanup_discovery_resources(phba->pport);
765 else {
766 vports = lpfc_create_vport_work_array(phba);
767 if (vports != NULL)
3772a991
JS
768 for (i = 0; i <= phba->max_vports &&
769 vports[i] != NULL; i++)
1b32f6aa
JS
770 lpfc_cleanup_discovery_resources(vports[i]);
771 lpfc_destroy_vport_work_array(phba, vports);
7f5f3d0d
JS
772 }
773 return 0;
dea3101e 774}
775
e59058c4 776/**
3772a991 777 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
e59058c4
JS
778 * @phba: pointer to lpfc HBA data structure.
779 *
780 * This routine will do uninitialization after the HBA is reset when bring
781 * down the SLI Layer.
782 *
783 * Return codes
af901ca1 784 * 0 - success.
e59058c4
JS
785 * Any other value - error.
786 **/
3772a991
JS
787static int
788lpfc_hba_down_post_s3(struct lpfc_hba *phba)
41415862
JW
789{
790 struct lpfc_sli *psli = &phba->sli;
791 struct lpfc_sli_ring *pring;
792 struct lpfc_dmabuf *mp, *next_mp;
09372820 793 LIST_HEAD(completions);
41415862
JW
794 int i;
795
92d7f7b0
JS
796 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
797 lpfc_sli_hbqbuf_free_all(phba);
798 else {
799 /* Cleanup preposted buffers on the ELS ring */
800 pring = &psli->ring[LPFC_ELS_RING];
801 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
802 list_del(&mp->list);
803 pring->postbufq_cnt--;
804 lpfc_mbuf_free(phba, mp->virt, mp->phys);
805 kfree(mp);
806 }
41415862
JW
807 }
808
09372820 809 spin_lock_irq(&phba->hbalock);
41415862
JW
810 for (i = 0; i < psli->num_rings; i++) {
811 pring = &psli->ring[i];
09372820
JS
812
813 /* At this point in time the HBA is either reset or DOA. Either
814 * way, nothing should be on txcmplq as it will NEVER complete.
815 */
816 list_splice_init(&pring->txcmplq, &completions);
817 pring->txcmplq_cnt = 0;
818 spin_unlock_irq(&phba->hbalock);
819
a257bf90
JS
820 /* Cancel all the IOCBs from the completions list */
821 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
822 IOERR_SLI_ABORTED);
09372820 823
41415862 824 lpfc_sli_abort_iocb_ring(phba, pring);
09372820 825 spin_lock_irq(&phba->hbalock);
41415862 826 }
09372820 827 spin_unlock_irq(&phba->hbalock);
41415862
JW
828
829 return 0;
830}
5af5eee7 831
da0436e9
JS
832/**
833 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
834 * @phba: pointer to lpfc HBA data structure.
835 *
836 * This routine will do uninitialization after the HBA is reset when bring
837 * down the SLI Layer.
838 *
839 * Return codes
af901ca1 840 * 0 - success.
da0436e9
JS
841 * Any other value - error.
842 **/
843static int
844lpfc_hba_down_post_s4(struct lpfc_hba *phba)
845{
846 struct lpfc_scsi_buf *psb, *psb_next;
847 LIST_HEAD(aborts);
848 int ret;
849 unsigned long iflag = 0;
0f65ff68
JS
850 struct lpfc_sglq *sglq_entry = NULL;
851
da0436e9
JS
852 ret = lpfc_hba_down_post_s3(phba);
853 if (ret)
854 return ret;
855 /* At this point in time the HBA is either reset or DOA. Either
856 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
857 * on the lpfc_sgl_list so that it can either be freed if the
858 * driver is unloading or reposted if the driver is restarting
859 * the port.
860 */
861 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
862 /* scsl_buf_list */
863 /* abts_sgl_list_lock required because worker thread uses this
864 * list.
865 */
866 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
0f65ff68
JS
867 list_for_each_entry(sglq_entry,
868 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
869 sglq_entry->state = SGL_FREED;
870
da0436e9
JS
871 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
872 &phba->sli4_hba.lpfc_sgl_list);
873 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
874 /* abts_scsi_buf_list_lock required because worker thread uses this
875 * list.
876 */
877 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
878 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
879 &aborts);
880 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
881 spin_unlock_irq(&phba->hbalock);
882
883 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
884 psb->pCmd = NULL;
885 psb->status = IOSTAT_SUCCESS;
886 }
887 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
888 list_splice(&aborts, &phba->lpfc_scsi_buf_list);
889 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
890 return 0;
891}
892
893/**
894 * lpfc_hba_down_post - Wrapper func for hba down post routine
895 * @phba: pointer to lpfc HBA data structure.
896 *
897 * This routine wraps the actual SLI3 or SLI4 routine for performing
898 * uninitialization after the HBA is reset when bring down the SLI Layer.
899 *
900 * Return codes
af901ca1 901 * 0 - success.
da0436e9
JS
902 * Any other value - error.
903 **/
904int
905lpfc_hba_down_post(struct lpfc_hba *phba)
906{
907 return (*phba->lpfc_hba_down_post)(phba);
908}
41415862 909
e59058c4 910/**
3621a710 911 * lpfc_hb_timeout - The HBA-timer timeout handler
e59058c4
JS
912 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
913 *
914 * This is the HBA-timer timeout handler registered to the lpfc driver. When
915 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
916 * work-port-events bitmap and the worker thread is notified. This timeout
917 * event will be used by the worker thread to invoke the actual timeout
918 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
919 * be performed in the timeout handler and the HBA timeout event bit shall
920 * be cleared by the worker thread after it has taken the event bitmap out.
921 **/
a6ababd2 922static void
858c9f6c
JS
923lpfc_hb_timeout(unsigned long ptr)
924{
925 struct lpfc_hba *phba;
5e9d9b82 926 uint32_t tmo_posted;
858c9f6c
JS
927 unsigned long iflag;
928
929 phba = (struct lpfc_hba *)ptr;
9399627f
JS
930
931 /* Check for heart beat timeout conditions */
858c9f6c 932 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
5e9d9b82
JS
933 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
934 if (!tmo_posted)
858c9f6c
JS
935 phba->pport->work_port_events |= WORKER_HB_TMO;
936 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
937
9399627f 938 /* Tell the worker thread there is work to do */
5e9d9b82
JS
939 if (!tmo_posted)
940 lpfc_worker_wake_up(phba);
858c9f6c
JS
941 return;
942}
943
19ca7609
JS
944/**
945 * lpfc_rrq_timeout - The RRQ-timer timeout handler
946 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
947 *
948 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
949 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
950 * work-port-events bitmap and the worker thread is notified. This timeout
951 * event will be used by the worker thread to invoke the actual timeout
952 * handler routine, lpfc_rrq_handler. Any periodical operations will
953 * be performed in the timeout handler and the RRQ timeout event bit shall
954 * be cleared by the worker thread after it has taken the event bitmap out.
955 **/
956static void
957lpfc_rrq_timeout(unsigned long ptr)
958{
959 struct lpfc_hba *phba;
19ca7609
JS
960 unsigned long iflag;
961
962 phba = (struct lpfc_hba *)ptr;
963 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1151e3ec 964 phba->hba_flag |= HBA_RRQ_ACTIVE;
19ca7609 965 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1151e3ec 966 lpfc_worker_wake_up(phba);
19ca7609
JS
967}
968
e59058c4 969/**
3621a710 970 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
e59058c4
JS
971 * @phba: pointer to lpfc hba data structure.
972 * @pmboxq: pointer to the driver internal queue element for mailbox command.
973 *
974 * This is the callback function to the lpfc heart-beat mailbox command.
975 * If configured, the lpfc driver issues the heart-beat mailbox command to
976 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
977 * heart-beat mailbox command is issued, the driver shall set up heart-beat
978 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
979 * heart-beat outstanding state. Once the mailbox command comes back and
980 * no error conditions detected, the heart-beat mailbox command timer is
981 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
982 * state is cleared for the next heart-beat. If the timer expired with the
983 * heart-beat outstanding state set, the driver will put the HBA offline.
984 **/
858c9f6c
JS
985static void
986lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
987{
988 unsigned long drvr_flag;
989
990 spin_lock_irqsave(&phba->hbalock, drvr_flag);
991 phba->hb_outstanding = 0;
992 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
993
9399627f 994 /* Check and reset heart-beat timer is necessary */
858c9f6c
JS
995 mempool_free(pmboxq, phba->mbox_mem_pool);
996 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
997 !(phba->link_state == LPFC_HBA_ERROR) &&
51ef4c26 998 !(phba->pport->load_flag & FC_UNLOADING))
858c9f6c
JS
999 mod_timer(&phba->hb_tmofunc,
1000 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1001 return;
1002}
1003
e59058c4 1004/**
3621a710 1005 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
e59058c4
JS
1006 * @phba: pointer to lpfc hba data structure.
1007 *
1008 * This is the actual HBA-timer timeout handler to be invoked by the worker
1009 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1010 * handler performs any periodic operations needed for the device. If such
1011 * periodic event has already been attended to either in the interrupt handler
1012 * or by processing slow-ring or fast-ring events within the HBA-timer
1013 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1014 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1015 * is configured and there is no heart-beat mailbox command outstanding, a
1016 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1017 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1018 * to offline.
1019 **/
858c9f6c
JS
1020void
1021lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1022{
45ed1190 1023 struct lpfc_vport **vports;
858c9f6c 1024 LPFC_MBOXQ_t *pmboxq;
0ff10d46 1025 struct lpfc_dmabuf *buf_ptr;
45ed1190 1026 int retval, i;
858c9f6c 1027 struct lpfc_sli *psli = &phba->sli;
0ff10d46 1028 LIST_HEAD(completions);
858c9f6c 1029
45ed1190
JS
1030 vports = lpfc_create_vport_work_array(phba);
1031 if (vports != NULL)
1032 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1033 lpfc_rcv_seq_check_edtov(vports[i]);
1034 lpfc_destroy_vport_work_array(phba, vports);
1035
858c9f6c 1036 if ((phba->link_state == LPFC_HBA_ERROR) ||
51ef4c26 1037 (phba->pport->load_flag & FC_UNLOADING) ||
858c9f6c
JS
1038 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1039 return;
1040
1041 spin_lock_irq(&phba->pport->work_port_lock);
858c9f6c
JS
1042
1043 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
1044 jiffies)) {
1045 spin_unlock_irq(&phba->pport->work_port_lock);
1046 if (!phba->hb_outstanding)
1047 mod_timer(&phba->hb_tmofunc,
1048 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1049 else
1050 mod_timer(&phba->hb_tmofunc,
1051 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1052 return;
1053 }
1054 spin_unlock_irq(&phba->pport->work_port_lock);
1055
0ff10d46
JS
1056 if (phba->elsbuf_cnt &&
1057 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1058 spin_lock_irq(&phba->hbalock);
1059 list_splice_init(&phba->elsbuf, &completions);
1060 phba->elsbuf_cnt = 0;
1061 phba->elsbuf_prev_cnt = 0;
1062 spin_unlock_irq(&phba->hbalock);
1063
1064 while (!list_empty(&completions)) {
1065 list_remove_head(&completions, buf_ptr,
1066 struct lpfc_dmabuf, list);
1067 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1068 kfree(buf_ptr);
1069 }
1070 }
1071 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1072
858c9f6c 1073 /* If there is no heart beat outstanding, issue a heartbeat command */
13815c83
JS
1074 if (phba->cfg_enable_hba_heartbeat) {
1075 if (!phba->hb_outstanding) {
bc73905a
JS
1076 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1077 (list_empty(&psli->mboxq))) {
1078 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1079 GFP_KERNEL);
1080 if (!pmboxq) {
1081 mod_timer(&phba->hb_tmofunc,
1082 jiffies +
1083 HZ * LPFC_HB_MBOX_INTERVAL);
1084 return;
1085 }
1086
1087 lpfc_heart_beat(phba, pmboxq);
1088 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1089 pmboxq->vport = phba->pport;
1090 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1091 MBX_NOWAIT);
1092
1093 if (retval != MBX_BUSY &&
1094 retval != MBX_SUCCESS) {
1095 mempool_free(pmboxq,
1096 phba->mbox_mem_pool);
1097 mod_timer(&phba->hb_tmofunc,
1098 jiffies +
1099 HZ * LPFC_HB_MBOX_INTERVAL);
1100 return;
1101 }
1102 phba->skipped_hb = 0;
1103 phba->hb_outstanding = 1;
1104 } else if (time_before_eq(phba->last_completion_time,
1105 phba->skipped_hb)) {
1106 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1107 "2857 Last completion time not "
1108 " updated in %d ms\n",
1109 jiffies_to_msecs(jiffies
1110 - phba->last_completion_time));
1111 } else
1112 phba->skipped_hb = jiffies;
1113
858c9f6c 1114 mod_timer(&phba->hb_tmofunc,
13815c83 1115 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
858c9f6c 1116 return;
13815c83
JS
1117 } else {
1118 /*
1119 * If heart beat timeout called with hb_outstanding set
dcf2a4e0
JS
1120 * we need to give the hb mailbox cmd a chance to
1121 * complete or TMO.
13815c83 1122 */
dcf2a4e0
JS
1123 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1124 "0459 Adapter heartbeat still out"
1125 "standing:last compl time was %d ms.\n",
1126 jiffies_to_msecs(jiffies
1127 - phba->last_completion_time));
1128 mod_timer(&phba->hb_tmofunc,
1129 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
858c9f6c 1130 }
858c9f6c
JS
1131 }
1132}
1133
e59058c4 1134/**
3621a710 1135 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
e59058c4
JS
1136 * @phba: pointer to lpfc hba data structure.
1137 *
1138 * This routine is called to bring the HBA offline when HBA hardware error
1139 * other than Port Error 6 has been detected.
1140 **/
09372820
JS
1141static void
1142lpfc_offline_eratt(struct lpfc_hba *phba)
1143{
1144 struct lpfc_sli *psli = &phba->sli;
1145
1146 spin_lock_irq(&phba->hbalock);
f4b4c68f 1147 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
09372820
JS
1148 spin_unlock_irq(&phba->hbalock);
1149 lpfc_offline_prep(phba);
1150
1151 lpfc_offline(phba);
1152 lpfc_reset_barrier(phba);
f4b4c68f 1153 spin_lock_irq(&phba->hbalock);
09372820 1154 lpfc_sli_brdreset(phba);
f4b4c68f 1155 spin_unlock_irq(&phba->hbalock);
09372820
JS
1156 lpfc_hba_down_post(phba);
1157 lpfc_sli_brdready(phba, HS_MBRDY);
1158 lpfc_unblock_mgmt_io(phba);
1159 phba->link_state = LPFC_HBA_ERROR;
1160 return;
1161}
1162
da0436e9
JS
1163/**
1164 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1165 * @phba: pointer to lpfc hba data structure.
1166 *
1167 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1168 * other than Port Error 6 has been detected.
1169 **/
1170static void
1171lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1172{
1173 lpfc_offline_prep(phba);
1174 lpfc_offline(phba);
1175 lpfc_sli4_brdreset(phba);
1176 lpfc_hba_down_post(phba);
1177 lpfc_sli4_post_status_check(phba);
1178 lpfc_unblock_mgmt_io(phba);
1179 phba->link_state = LPFC_HBA_ERROR;
1180}
1181
a257bf90
JS
1182/**
1183 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1184 * @phba: pointer to lpfc hba data structure.
1185 *
1186 * This routine is invoked to handle the deferred HBA hardware error
1187 * conditions. This type of error is indicated by HBA by setting ER1
1188 * and another ER bit in the host status register. The driver will
1189 * wait until the ER1 bit clears before handling the error condition.
1190 **/
1191static void
1192lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1193{
1194 uint32_t old_host_status = phba->work_hs;
1195 struct lpfc_sli_ring *pring;
1196 struct lpfc_sli *psli = &phba->sli;
1197
f4b4c68f
JS
1198 /* If the pci channel is offline, ignore possible errors,
1199 * since we cannot communicate with the pci card anyway.
1200 */
1201 if (pci_channel_offline(phba->pcidev)) {
1202 spin_lock_irq(&phba->hbalock);
1203 phba->hba_flag &= ~DEFER_ERATT;
1204 spin_unlock_irq(&phba->hbalock);
1205 return;
1206 }
1207
a257bf90
JS
1208 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1209 "0479 Deferred Adapter Hardware Error "
1210 "Data: x%x x%x x%x\n",
1211 phba->work_hs,
1212 phba->work_status[0], phba->work_status[1]);
1213
1214 spin_lock_irq(&phba->hbalock);
f4b4c68f 1215 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
a257bf90
JS
1216 spin_unlock_irq(&phba->hbalock);
1217
1218
1219 /*
1220 * Firmware stops when it triggred erratt. That could cause the I/Os
1221 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1222 * SCSI layer retry it after re-establishing link.
1223 */
1224 pring = &psli->ring[psli->fcp_ring];
1225 lpfc_sli_abort_iocb_ring(phba, pring);
1226
1227 /*
1228 * There was a firmware error. Take the hba offline and then
1229 * attempt to restart it.
1230 */
1231 lpfc_offline_prep(phba);
1232 lpfc_offline(phba);
1233
1234 /* Wait for the ER1 bit to clear.*/
1235 while (phba->work_hs & HS_FFER1) {
1236 msleep(100);
9940b97b
JS
1237 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1238 phba->work_hs = UNPLUG_ERR ;
1239 break;
1240 }
a257bf90
JS
1241 /* If driver is unloading let the worker thread continue */
1242 if (phba->pport->load_flag & FC_UNLOADING) {
1243 phba->work_hs = 0;
1244 break;
1245 }
1246 }
1247
1248 /*
1249 * This is to ptrotect against a race condition in which
1250 * first write to the host attention register clear the
1251 * host status register.
1252 */
1253 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1254 phba->work_hs = old_host_status & ~HS_FFER1;
1255
3772a991 1256 spin_lock_irq(&phba->hbalock);
a257bf90 1257 phba->hba_flag &= ~DEFER_ERATT;
3772a991 1258 spin_unlock_irq(&phba->hbalock);
a257bf90
JS
1259 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1260 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1261}
1262
3772a991
JS
1263static void
1264lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1265{
1266 struct lpfc_board_event_header board_event;
1267 struct Scsi_Host *shost;
1268
1269 board_event.event_type = FC_REG_BOARD_EVENT;
1270 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1271 shost = lpfc_shost_from_vport(phba->pport);
1272 fc_host_post_vendor_event(shost, fc_get_event_number(),
1273 sizeof(board_event),
1274 (char *) &board_event,
1275 LPFC_NL_VENDOR_ID);
1276}
1277
e59058c4 1278/**
3772a991 1279 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
e59058c4
JS
1280 * @phba: pointer to lpfc hba data structure.
1281 *
1282 * This routine is invoked to handle the following HBA hardware error
1283 * conditions:
1284 * 1 - HBA error attention interrupt
1285 * 2 - DMA ring index out of range
1286 * 3 - Mailbox command came back as unknown
1287 **/
3772a991
JS
1288static void
1289lpfc_handle_eratt_s3(struct lpfc_hba *phba)
dea3101e 1290{
2e0fef85 1291 struct lpfc_vport *vport = phba->pport;
2e0fef85 1292 struct lpfc_sli *psli = &phba->sli;
dea3101e 1293 struct lpfc_sli_ring *pring;
d2873e4c 1294 uint32_t event_data;
57127f15
JS
1295 unsigned long temperature;
1296 struct temp_event temp_event_data;
92d7f7b0 1297 struct Scsi_Host *shost;
2e0fef85 1298
8d63f375 1299 /* If the pci channel is offline, ignore possible errors,
3772a991
JS
1300 * since we cannot communicate with the pci card anyway.
1301 */
1302 if (pci_channel_offline(phba->pcidev)) {
1303 spin_lock_irq(&phba->hbalock);
1304 phba->hba_flag &= ~DEFER_ERATT;
1305 spin_unlock_irq(&phba->hbalock);
8d63f375 1306 return;
3772a991
JS
1307 }
1308
13815c83
JS
1309 /* If resets are disabled then leave the HBA alone and return */
1310 if (!phba->cfg_enable_hba_reset)
1311 return;
dea3101e 1312
ea2151b4 1313 /* Send an internal error event to mgmt application */
3772a991 1314 lpfc_board_errevt_to_mgmt(phba);
ea2151b4 1315
a257bf90
JS
1316 if (phba->hba_flag & DEFER_ERATT)
1317 lpfc_handle_deferred_eratt(phba);
1318
dcf2a4e0
JS
1319 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1320 if (phba->work_hs & HS_FFER6)
1321 /* Re-establishing Link */
1322 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1323 "1301 Re-establishing Link "
1324 "Data: x%x x%x x%x\n",
1325 phba->work_hs, phba->work_status[0],
1326 phba->work_status[1]);
1327 if (phba->work_hs & HS_FFER8)
1328 /* Device Zeroization */
1329 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1330 "2861 Host Authentication device "
1331 "zeroization Data:x%x x%x x%x\n",
1332 phba->work_hs, phba->work_status[0],
1333 phba->work_status[1]);
58da1ffb 1334
92d7f7b0 1335 spin_lock_irq(&phba->hbalock);
f4b4c68f 1336 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
92d7f7b0 1337 spin_unlock_irq(&phba->hbalock);
dea3101e 1338
1339 /*
1340 * Firmware stops when it triggled erratt with HS_FFER6.
1341 * That could cause the I/Os dropped by the firmware.
1342 * Error iocb (I/O) on txcmplq and let the SCSI layer
1343 * retry it after re-establishing link.
1344 */
1345 pring = &psli->ring[psli->fcp_ring];
1346 lpfc_sli_abort_iocb_ring(phba, pring);
1347
dea3101e 1348 /*
1349 * There was a firmware error. Take the hba offline and then
1350 * attempt to restart it.
1351 */
46fa311e 1352 lpfc_offline_prep(phba);
dea3101e 1353 lpfc_offline(phba);
41415862 1354 lpfc_sli_brdrestart(phba);
dea3101e 1355 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
46fa311e 1356 lpfc_unblock_mgmt_io(phba);
dea3101e 1357 return;
1358 }
46fa311e 1359 lpfc_unblock_mgmt_io(phba);
57127f15
JS
1360 } else if (phba->work_hs & HS_CRIT_TEMP) {
1361 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1362 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1363 temp_event_data.event_code = LPFC_CRIT_TEMP;
1364 temp_event_data.data = (uint32_t)temperature;
1365
1366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d7c255b2 1367 "0406 Adapter maximum temperature exceeded "
57127f15
JS
1368 "(%ld), taking this port offline "
1369 "Data: x%x x%x x%x\n",
1370 temperature, phba->work_hs,
1371 phba->work_status[0], phba->work_status[1]);
1372
1373 shost = lpfc_shost_from_vport(phba->pport);
1374 fc_host_post_vendor_event(shost, fc_get_event_number(),
1375 sizeof(temp_event_data),
1376 (char *) &temp_event_data,
1377 SCSI_NL_VID_TYPE_PCI
1378 | PCI_VENDOR_ID_EMULEX);
1379
7af67051 1380 spin_lock_irq(&phba->hbalock);
7af67051
JS
1381 phba->over_temp_state = HBA_OVER_TEMP;
1382 spin_unlock_irq(&phba->hbalock);
09372820 1383 lpfc_offline_eratt(phba);
57127f15 1384
dea3101e 1385 } else {
1386 /* The if clause above forces this code path when the status
9399627f
JS
1387 * failure is a value other than FFER6. Do not call the offline
1388 * twice. This is the adapter hardware error path.
dea3101e 1389 */
1390 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 1391 "0457 Adapter Hardware Error "
dea3101e 1392 "Data: x%x x%x x%x\n",
e8b62011 1393 phba->work_hs,
dea3101e 1394 phba->work_status[0], phba->work_status[1]);
1395
d2873e4c 1396 event_data = FC_REG_DUMP_EVENT;
92d7f7b0 1397 shost = lpfc_shost_from_vport(vport);
2e0fef85 1398 fc_host_post_vendor_event(shost, fc_get_event_number(),
d2873e4c
JS
1399 sizeof(event_data), (char *) &event_data,
1400 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1401
09372820 1402 lpfc_offline_eratt(phba);
dea3101e 1403 }
9399627f 1404 return;
dea3101e 1405}
1406
da0436e9
JS
1407/**
1408 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1409 * @phba: pointer to lpfc hba data structure.
1410 *
1411 * This routine is invoked to handle the SLI4 HBA hardware error attention
1412 * conditions.
1413 **/
1414static void
1415lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1416{
1417 struct lpfc_vport *vport = phba->pport;
1418 uint32_t event_data;
1419 struct Scsi_Host *shost;
2fcee4bf
JS
1420 uint32_t if_type;
1421 struct lpfc_register portstat_reg;
73d91e50 1422 int rc;
da0436e9
JS
1423
1424 /* If the pci channel is offline, ignore possible errors, since
1425 * we cannot communicate with the pci card anyway.
1426 */
1427 if (pci_channel_offline(phba->pcidev))
1428 return;
1429 /* If resets are disabled then leave the HBA alone and return */
1430 if (!phba->cfg_enable_hba_reset)
1431 return;
1432
1433 /* Send an internal error event to mgmt application */
1434 lpfc_board_errevt_to_mgmt(phba);
1435
1436 /* For now, the actual action for SLI4 device handling is not
1437 * specified yet, just treated it as adaptor hardware failure
1438 */
da0436e9
JS
1439 event_data = FC_REG_DUMP_EVENT;
1440 shost = lpfc_shost_from_vport(vport);
1441 fc_host_post_vendor_event(shost, fc_get_event_number(),
1442 sizeof(event_data), (char *) &event_data,
1443 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1444
2fcee4bf
JS
1445 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1446 switch (if_type) {
1447 case LPFC_SLI_INTF_IF_TYPE_0:
1448 lpfc_sli4_offline_eratt(phba);
1449 break;
1450 case LPFC_SLI_INTF_IF_TYPE_2:
1451 portstat_reg.word0 =
1452 readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
1453
1454 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1455 /* TODO: Register for Overtemp async events. */
1456 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1457 "2889 Port Overtemperature event, "
026abb87 1458 "taking port offline\n");
2fcee4bf
JS
1459 spin_lock_irq(&phba->hbalock);
1460 phba->over_temp_state = HBA_OVER_TEMP;
1461 spin_unlock_irq(&phba->hbalock);
1462 lpfc_sli4_offline_eratt(phba);
1463 return;
1464 }
73d91e50
JS
1465 /*
1466 * On error status condition, driver need to wait for port
1467 * ready before performing reset.
1468 */
1469 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1470 if (!rc) {
079b5c91 1471 /* need reset: attempt for port recovery */
2fcee4bf
JS
1472 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1473 "2887 Port Error: Attempting "
1474 "Port Recovery\n");
079b5c91
JS
1475 lpfc_offline_prep(phba);
1476 lpfc_offline(phba);
1477 lpfc_sli_brdrestart(phba);
1478 if (lpfc_online(phba) == 0) {
1479 lpfc_unblock_mgmt_io(phba);
1480 return;
1481 }
1482 /* fall through for not able to recover */
2fcee4bf
JS
1483 }
1484 lpfc_sli4_offline_eratt(phba);
1485 break;
1486 case LPFC_SLI_INTF_IF_TYPE_1:
1487 default:
1488 break;
1489 }
da0436e9
JS
1490}
1491
1492/**
1493 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1494 * @phba: pointer to lpfc HBA data structure.
1495 *
1496 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1497 * routine from the API jump table function pointer from the lpfc_hba struct.
1498 *
1499 * Return codes
af901ca1 1500 * 0 - success.
da0436e9
JS
1501 * Any other value - error.
1502 **/
1503void
1504lpfc_handle_eratt(struct lpfc_hba *phba)
1505{
1506 (*phba->lpfc_handle_eratt)(phba);
1507}
1508
e59058c4 1509/**
3621a710 1510 * lpfc_handle_latt - The HBA link event handler
e59058c4
JS
1511 * @phba: pointer to lpfc hba data structure.
1512 *
1513 * This routine is invoked from the worker thread to handle a HBA host
1514 * attention link event.
1515 **/
dea3101e 1516void
2e0fef85 1517lpfc_handle_latt(struct lpfc_hba *phba)
dea3101e 1518{
2e0fef85
JS
1519 struct lpfc_vport *vport = phba->pport;
1520 struct lpfc_sli *psli = &phba->sli;
dea3101e 1521 LPFC_MBOXQ_t *pmb;
1522 volatile uint32_t control;
1523 struct lpfc_dmabuf *mp;
09372820 1524 int rc = 0;
dea3101e 1525
1526 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
09372820
JS
1527 if (!pmb) {
1528 rc = 1;
dea3101e 1529 goto lpfc_handle_latt_err_exit;
09372820 1530 }
dea3101e 1531
1532 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
09372820
JS
1533 if (!mp) {
1534 rc = 2;
dea3101e 1535 goto lpfc_handle_latt_free_pmb;
09372820 1536 }
dea3101e 1537
1538 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
09372820
JS
1539 if (!mp->virt) {
1540 rc = 3;
dea3101e 1541 goto lpfc_handle_latt_free_mp;
09372820 1542 }
dea3101e 1543
6281bfe0 1544 /* Cleanup any outstanding ELS commands */
549e55cd 1545 lpfc_els_flush_all_cmd(phba);
dea3101e 1546
1547 psli->slistat.link_event++;
76a95d75
JS
1548 lpfc_read_topology(phba, pmb, mp);
1549 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2e0fef85 1550 pmb->vport = vport;
0d2b6b83
JS
1551 /* Block ELS IOCBs until we have processed this mbox command */
1552 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
0b727fea 1553 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
09372820
JS
1554 if (rc == MBX_NOT_FINISHED) {
1555 rc = 4;
14691150 1556 goto lpfc_handle_latt_free_mbuf;
09372820 1557 }
dea3101e 1558
1559 /* Clear Link Attention in HA REG */
2e0fef85 1560 spin_lock_irq(&phba->hbalock);
dea3101e 1561 writel(HA_LATT, phba->HAregaddr);
1562 readl(phba->HAregaddr); /* flush */
2e0fef85 1563 spin_unlock_irq(&phba->hbalock);
dea3101e 1564
1565 return;
1566
14691150 1567lpfc_handle_latt_free_mbuf:
0d2b6b83 1568 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
14691150 1569 lpfc_mbuf_free(phba, mp->virt, mp->phys);
dea3101e 1570lpfc_handle_latt_free_mp:
1571 kfree(mp);
1572lpfc_handle_latt_free_pmb:
1dcb58e5 1573 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 1574lpfc_handle_latt_err_exit:
1575 /* Enable Link attention interrupts */
2e0fef85 1576 spin_lock_irq(&phba->hbalock);
dea3101e 1577 psli->sli_flag |= LPFC_PROCESS_LA;
1578 control = readl(phba->HCregaddr);
1579 control |= HC_LAINT_ENA;
1580 writel(control, phba->HCregaddr);
1581 readl(phba->HCregaddr); /* flush */
1582
1583 /* Clear Link Attention in HA REG */
1584 writel(HA_LATT, phba->HAregaddr);
1585 readl(phba->HAregaddr); /* flush */
2e0fef85 1586 spin_unlock_irq(&phba->hbalock);
dea3101e 1587 lpfc_linkdown(phba);
2e0fef85 1588 phba->link_state = LPFC_HBA_ERROR;
dea3101e 1589
09372820
JS
1590 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1591 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
dea3101e 1592
1593 return;
1594}
1595
e59058c4 1596/**
3621a710 1597 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
e59058c4
JS
1598 * @phba: pointer to lpfc hba data structure.
1599 * @vpd: pointer to the vital product data.
1600 * @len: length of the vital product data in bytes.
1601 *
1602 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1603 * an array of characters. In this routine, the ModelName, ProgramType, and
1604 * ModelDesc, etc. fields of the phba data structure will be populated.
1605 *
1606 * Return codes
1607 * 0 - pointer to the VPD passed in is NULL
1608 * 1 - success
1609 **/
3772a991 1610int
2e0fef85 1611lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
dea3101e 1612{
1613 uint8_t lenlo, lenhi;
07da60c1 1614 int Length;
dea3101e 1615 int i, j;
1616 int finished = 0;
1617 int index = 0;
1618
1619 if (!vpd)
1620 return 0;
1621
1622 /* Vital Product */
ed957684 1623 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011 1624 "0455 Vital Product Data: x%x x%x x%x x%x\n",
dea3101e 1625 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1626 (uint32_t) vpd[3]);
74b72a59 1627 while (!finished && (index < (len - 4))) {
dea3101e 1628 switch (vpd[index]) {
1629 case 0x82:
74b72a59 1630 case 0x91:
dea3101e 1631 index += 1;
1632 lenlo = vpd[index];
1633 index += 1;
1634 lenhi = vpd[index];
1635 index += 1;
1636 i = ((((unsigned short)lenhi) << 8) + lenlo);
1637 index += i;
1638 break;
1639 case 0x90:
1640 index += 1;
1641 lenlo = vpd[index];
1642 index += 1;
1643 lenhi = vpd[index];
1644 index += 1;
1645 Length = ((((unsigned short)lenhi) << 8) + lenlo);
74b72a59
JW
1646 if (Length > len - index)
1647 Length = len - index;
dea3101e 1648 while (Length > 0) {
1649 /* Look for Serial Number */
1650 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1651 index += 2;
1652 i = vpd[index];
1653 index += 1;
1654 j = 0;
1655 Length -= (3+i);
1656 while(i--) {
1657 phba->SerialNumber[j++] = vpd[index++];
1658 if (j == 31)
1659 break;
1660 }
1661 phba->SerialNumber[j] = 0;
1662 continue;
1663 }
1664 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1665 phba->vpd_flag |= VPD_MODEL_DESC;
1666 index += 2;
1667 i = vpd[index];
1668 index += 1;
1669 j = 0;
1670 Length -= (3+i);
1671 while(i--) {
1672 phba->ModelDesc[j++] = vpd[index++];
1673 if (j == 255)
1674 break;
1675 }
1676 phba->ModelDesc[j] = 0;
1677 continue;
1678 }
1679 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1680 phba->vpd_flag |= VPD_MODEL_NAME;
1681 index += 2;
1682 i = vpd[index];
1683 index += 1;
1684 j = 0;
1685 Length -= (3+i);
1686 while(i--) {
1687 phba->ModelName[j++] = vpd[index++];
1688 if (j == 79)
1689 break;
1690 }
1691 phba->ModelName[j] = 0;
1692 continue;
1693 }
1694 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1695 phba->vpd_flag |= VPD_PROGRAM_TYPE;
1696 index += 2;
1697 i = vpd[index];
1698 index += 1;
1699 j = 0;
1700 Length -= (3+i);
1701 while(i--) {
1702 phba->ProgramType[j++] = vpd[index++];
1703 if (j == 255)
1704 break;
1705 }
1706 phba->ProgramType[j] = 0;
1707 continue;
1708 }
1709 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1710 phba->vpd_flag |= VPD_PORT;
1711 index += 2;
1712 i = vpd[index];
1713 index += 1;
1714 j = 0;
1715 Length -= (3+i);
1716 while(i--) {
cd1c8301
JS
1717 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1718 (phba->sli4_hba.pport_name_sta ==
1719 LPFC_SLI4_PPNAME_GET)) {
1720 j++;
1721 index++;
1722 } else
1723 phba->Port[j++] = vpd[index++];
1724 if (j == 19)
1725 break;
dea3101e 1726 }
cd1c8301
JS
1727 if ((phba->sli_rev != LPFC_SLI_REV4) ||
1728 (phba->sli4_hba.pport_name_sta ==
1729 LPFC_SLI4_PPNAME_NON))
1730 phba->Port[j] = 0;
dea3101e 1731 continue;
1732 }
1733 else {
1734 index += 2;
1735 i = vpd[index];
1736 index += 1;
1737 index += i;
1738 Length -= (3 + i);
1739 }
1740 }
1741 finished = 0;
1742 break;
1743 case 0x78:
1744 finished = 1;
1745 break;
1746 default:
1747 index ++;
1748 break;
1749 }
74b72a59 1750 }
dea3101e 1751
1752 return(1);
1753}
1754
e59058c4 1755/**
3621a710 1756 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
e59058c4
JS
1757 * @phba: pointer to lpfc hba data structure.
1758 * @mdp: pointer to the data structure to hold the derived model name.
1759 * @descp: pointer to the data structure to hold the derived description.
1760 *
1761 * This routine retrieves HBA's description based on its registered PCI device
1762 * ID. The @descp passed into this function points to an array of 256 chars. It
1763 * shall be returned with the model name, maximum speed, and the host bus type.
1764 * The @mdp passed into this function points to an array of 80 chars. When the
1765 * function returns, the @mdp will be filled with the model name.
1766 **/
dea3101e 1767static void
2e0fef85 1768lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
dea3101e 1769{
1770 lpfc_vpd_t *vp;
fefcb2b6 1771 uint16_t dev_id = phba->pcidev->device;
74b72a59 1772 int max_speed;
84774a4d 1773 int GE = 0;
da0436e9 1774 int oneConnect = 0; /* default is not a oneConnect */
74b72a59 1775 struct {
a747c9ce
JS
1776 char *name;
1777 char *bus;
1778 char *function;
1779 } m = {"<Unknown>", "", ""};
74b72a59
JW
1780
1781 if (mdp && mdp[0] != '\0'
1782 && descp && descp[0] != '\0')
1783 return;
1784
c0c11512
JS
1785 if (phba->lmt & LMT_16Gb)
1786 max_speed = 16;
1787 else if (phba->lmt & LMT_10Gb)
74b72a59
JW
1788 max_speed = 10;
1789 else if (phba->lmt & LMT_8Gb)
1790 max_speed = 8;
1791 else if (phba->lmt & LMT_4Gb)
1792 max_speed = 4;
1793 else if (phba->lmt & LMT_2Gb)
1794 max_speed = 2;
1795 else
1796 max_speed = 1;
dea3101e 1797
1798 vp = &phba->vpd;
dea3101e 1799
e4adb204 1800 switch (dev_id) {
06325e74 1801 case PCI_DEVICE_ID_FIREFLY:
a747c9ce 1802 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
06325e74 1803 break;
dea3101e 1804 case PCI_DEVICE_ID_SUPERFLY:
1805 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
a747c9ce
JS
1806 m = (typeof(m)){"LP7000", "PCI",
1807 "Fibre Channel Adapter"};
dea3101e 1808 else
a747c9ce
JS
1809 m = (typeof(m)){"LP7000E", "PCI",
1810 "Fibre Channel Adapter"};
dea3101e 1811 break;
1812 case PCI_DEVICE_ID_DRAGONFLY:
a747c9ce
JS
1813 m = (typeof(m)){"LP8000", "PCI",
1814 "Fibre Channel Adapter"};
dea3101e 1815 break;
1816 case PCI_DEVICE_ID_CENTAUR:
1817 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
a747c9ce
JS
1818 m = (typeof(m)){"LP9002", "PCI",
1819 "Fibre Channel Adapter"};
dea3101e 1820 else
a747c9ce
JS
1821 m = (typeof(m)){"LP9000", "PCI",
1822 "Fibre Channel Adapter"};
dea3101e 1823 break;
1824 case PCI_DEVICE_ID_RFLY:
a747c9ce
JS
1825 m = (typeof(m)){"LP952", "PCI",
1826 "Fibre Channel Adapter"};
dea3101e 1827 break;
1828 case PCI_DEVICE_ID_PEGASUS:
a747c9ce
JS
1829 m = (typeof(m)){"LP9802", "PCI-X",
1830 "Fibre Channel Adapter"};
dea3101e 1831 break;
1832 case PCI_DEVICE_ID_THOR:
a747c9ce
JS
1833 m = (typeof(m)){"LP10000", "PCI-X",
1834 "Fibre Channel Adapter"};
dea3101e 1835 break;
1836 case PCI_DEVICE_ID_VIPER:
a747c9ce
JS
1837 m = (typeof(m)){"LPX1000", "PCI-X",
1838 "Fibre Channel Adapter"};
dea3101e 1839 break;
1840 case PCI_DEVICE_ID_PFLY:
a747c9ce
JS
1841 m = (typeof(m)){"LP982", "PCI-X",
1842 "Fibre Channel Adapter"};
dea3101e 1843 break;
1844 case PCI_DEVICE_ID_TFLY:
a747c9ce
JS
1845 m = (typeof(m)){"LP1050", "PCI-X",
1846 "Fibre Channel Adapter"};
dea3101e 1847 break;
1848 case PCI_DEVICE_ID_HELIOS:
a747c9ce
JS
1849 m = (typeof(m)){"LP11000", "PCI-X2",
1850 "Fibre Channel Adapter"};
dea3101e 1851 break;
e4adb204 1852 case PCI_DEVICE_ID_HELIOS_SCSP:
a747c9ce
JS
1853 m = (typeof(m)){"LP11000-SP", "PCI-X2",
1854 "Fibre Channel Adapter"};
e4adb204
JSEC
1855 break;
1856 case PCI_DEVICE_ID_HELIOS_DCSP:
a747c9ce
JS
1857 m = (typeof(m)){"LP11002-SP", "PCI-X2",
1858 "Fibre Channel Adapter"};
e4adb204
JSEC
1859 break;
1860 case PCI_DEVICE_ID_NEPTUNE:
a747c9ce 1861 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
e4adb204
JSEC
1862 break;
1863 case PCI_DEVICE_ID_NEPTUNE_SCSP:
a747c9ce 1864 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
e4adb204
JSEC
1865 break;
1866 case PCI_DEVICE_ID_NEPTUNE_DCSP:
a747c9ce 1867 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
e4adb204 1868 break;
dea3101e 1869 case PCI_DEVICE_ID_BMID:
a747c9ce 1870 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
dea3101e 1871 break;
1872 case PCI_DEVICE_ID_BSMB:
a747c9ce 1873 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
dea3101e 1874 break;
1875 case PCI_DEVICE_ID_ZEPHYR:
a747c9ce 1876 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
dea3101e 1877 break;
e4adb204 1878 case PCI_DEVICE_ID_ZEPHYR_SCSP:
a747c9ce 1879 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
e4adb204
JSEC
1880 break;
1881 case PCI_DEVICE_ID_ZEPHYR_DCSP:
a747c9ce 1882 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
a257bf90 1883 GE = 1;
e4adb204 1884 break;
dea3101e 1885 case PCI_DEVICE_ID_ZMID:
a747c9ce 1886 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
dea3101e 1887 break;
1888 case PCI_DEVICE_ID_ZSMB:
a747c9ce 1889 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
dea3101e 1890 break;
1891 case PCI_DEVICE_ID_LP101:
a747c9ce 1892 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
dea3101e 1893 break;
1894 case PCI_DEVICE_ID_LP10000S:
a747c9ce 1895 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
06325e74 1896 break;
e4adb204 1897 case PCI_DEVICE_ID_LP11000S:
a747c9ce 1898 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
18a3b596 1899 break;
e4adb204 1900 case PCI_DEVICE_ID_LPE11000S:
a747c9ce 1901 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
5cc36b3c 1902 break;
b87eab38 1903 case PCI_DEVICE_ID_SAT:
a747c9ce 1904 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
1905 break;
1906 case PCI_DEVICE_ID_SAT_MID:
a747c9ce 1907 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
1908 break;
1909 case PCI_DEVICE_ID_SAT_SMB:
a747c9ce 1910 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
1911 break;
1912 case PCI_DEVICE_ID_SAT_DCSP:
a747c9ce 1913 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
1914 break;
1915 case PCI_DEVICE_ID_SAT_SCSP:
a747c9ce 1916 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
1917 break;
1918 case PCI_DEVICE_ID_SAT_S:
a747c9ce 1919 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
b87eab38 1920 break;
84774a4d 1921 case PCI_DEVICE_ID_HORNET:
a747c9ce 1922 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
84774a4d
JS
1923 GE = 1;
1924 break;
1925 case PCI_DEVICE_ID_PROTEUS_VF:
a747c9ce
JS
1926 m = (typeof(m)){"LPev12000", "PCIe IOV",
1927 "Fibre Channel Adapter"};
84774a4d
JS
1928 break;
1929 case PCI_DEVICE_ID_PROTEUS_PF:
a747c9ce
JS
1930 m = (typeof(m)){"LPev12000", "PCIe IOV",
1931 "Fibre Channel Adapter"};
84774a4d
JS
1932 break;
1933 case PCI_DEVICE_ID_PROTEUS_S:
a747c9ce
JS
1934 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1935 "Fibre Channel Adapter"};
84774a4d 1936 break;
da0436e9
JS
1937 case PCI_DEVICE_ID_TIGERSHARK:
1938 oneConnect = 1;
a747c9ce 1939 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
da0436e9 1940 break;
a747c9ce 1941 case PCI_DEVICE_ID_TOMCAT:
6669f9bb 1942 oneConnect = 1;
a747c9ce
JS
1943 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1944 break;
1945 case PCI_DEVICE_ID_FALCON:
1946 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1947 "EmulexSecure Fibre"};
6669f9bb 1948 break;
98fc5dd9
JS
1949 case PCI_DEVICE_ID_BALIUS:
1950 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
1951 "Fibre Channel Adapter"};
1952 break;
085c647c 1953 case PCI_DEVICE_ID_LANCER_FC:
c0c11512
JS
1954 case PCI_DEVICE_ID_LANCER_FC_VF:
1955 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
085c647c
JS
1956 break;
1957 case PCI_DEVICE_ID_LANCER_FCOE:
c0c11512 1958 case PCI_DEVICE_ID_LANCER_FCOE_VF:
085c647c 1959 oneConnect = 1;
079b5c91 1960 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
085c647c 1961 break;
5cc36b3c 1962 default:
a747c9ce 1963 m = (typeof(m)){"Unknown", "", ""};
e4adb204 1964 break;
dea3101e 1965 }
74b72a59
JW
1966
1967 if (mdp && mdp[0] == '\0')
1968 snprintf(mdp, 79,"%s", m.name);
c0c11512
JS
1969 /*
1970 * oneConnect hba requires special processing, they are all initiators
da0436e9
JS
1971 * and we put the port number on the end
1972 */
1973 if (descp && descp[0] == '\0') {
1974 if (oneConnect)
1975 snprintf(descp, 255,
a747c9ce
JS
1976 "Emulex OneConnect %s, %s Initiator, Port %s",
1977 m.name, m.function,
da0436e9
JS
1978 phba->Port);
1979 else
1980 snprintf(descp, 255,
1981 "Emulex %s %d%s %s %s",
a747c9ce
JS
1982 m.name, max_speed, (GE) ? "GE" : "Gb",
1983 m.bus, m.function);
da0436e9 1984 }
dea3101e 1985}
1986
e59058c4 1987/**
3621a710 1988 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
e59058c4
JS
1989 * @phba: pointer to lpfc hba data structure.
1990 * @pring: pointer to a IOCB ring.
1991 * @cnt: the number of IOCBs to be posted to the IOCB ring.
1992 *
1993 * This routine posts a given number of IOCBs with the associated DMA buffer
1994 * descriptors specified by the cnt argument to the given IOCB ring.
1995 *
1996 * Return codes
1997 * The number of IOCBs NOT able to be posted to the IOCB ring.
1998 **/
dea3101e 1999int
495a714c 2000lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
dea3101e 2001{
2002 IOCB_t *icmd;
0bd4ca25 2003 struct lpfc_iocbq *iocb;
dea3101e 2004 struct lpfc_dmabuf *mp1, *mp2;
2005
2006 cnt += pring->missbufcnt;
2007
2008 /* While there are buffers to post */
2009 while (cnt > 0) {
2010 /* Allocate buffer for command iocb */
0bd4ca25 2011 iocb = lpfc_sli_get_iocbq(phba);
dea3101e 2012 if (iocb == NULL) {
2013 pring->missbufcnt = cnt;
2014 return cnt;
2015 }
dea3101e 2016 icmd = &iocb->iocb;
2017
2018 /* 2 buffers can be posted per command */
2019 /* Allocate buffer to post */
2020 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2021 if (mp1)
98c9ea5c
JS
2022 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2023 if (!mp1 || !mp1->virt) {
c9475cb0 2024 kfree(mp1);
604a3e30 2025 lpfc_sli_release_iocbq(phba, iocb);
dea3101e 2026 pring->missbufcnt = cnt;
2027 return cnt;
2028 }
2029
2030 INIT_LIST_HEAD(&mp1->list);
2031 /* Allocate buffer to post */
2032 if (cnt > 1) {
2033 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2034 if (mp2)
2035 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2036 &mp2->phys);
98c9ea5c 2037 if (!mp2 || !mp2->virt) {
c9475cb0 2038 kfree(mp2);
dea3101e 2039 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2040 kfree(mp1);
604a3e30 2041 lpfc_sli_release_iocbq(phba, iocb);
dea3101e 2042 pring->missbufcnt = cnt;
2043 return cnt;
2044 }
2045
2046 INIT_LIST_HEAD(&mp2->list);
2047 } else {
2048 mp2 = NULL;
2049 }
2050
2051 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2052 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2053 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2054 icmd->ulpBdeCount = 1;
2055 cnt--;
2056 if (mp2) {
2057 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2058 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2059 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2060 cnt--;
2061 icmd->ulpBdeCount = 2;
2062 }
2063
2064 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2065 icmd->ulpLe = 1;
2066
3772a991
JS
2067 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2068 IOCB_ERROR) {
dea3101e 2069 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2070 kfree(mp1);
2071 cnt++;
2072 if (mp2) {
2073 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2074 kfree(mp2);
2075 cnt++;
2076 }
604a3e30 2077 lpfc_sli_release_iocbq(phba, iocb);
dea3101e 2078 pring->missbufcnt = cnt;
dea3101e 2079 return cnt;
2080 }
dea3101e 2081 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
92d7f7b0 2082 if (mp2)
dea3101e 2083 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
dea3101e 2084 }
2085 pring->missbufcnt = 0;
2086 return 0;
2087}
2088
e59058c4 2089/**
3621a710 2090 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
e59058c4
JS
2091 * @phba: pointer to lpfc hba data structure.
2092 *
2093 * This routine posts initial receive IOCB buffers to the ELS ring. The
2094 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2095 * set to 64 IOCBs.
2096 *
2097 * Return codes
2098 * 0 - success (currently always success)
2099 **/
dea3101e 2100static int
2e0fef85 2101lpfc_post_rcv_buf(struct lpfc_hba *phba)
dea3101e 2102{
2103 struct lpfc_sli *psli = &phba->sli;
2104
2105 /* Ring 0, ELS / CT buffers */
495a714c 2106 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
dea3101e 2107 /* Ring 2 - FCP no buffers needed */
2108
2109 return 0;
2110}
2111
2112#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2113
e59058c4 2114/**
3621a710 2115 * lpfc_sha_init - Set up initial array of hash table entries
e59058c4
JS
2116 * @HashResultPointer: pointer to an array as hash table.
2117 *
2118 * This routine sets up the initial values to the array of hash table entries
2119 * for the LC HBAs.
2120 **/
dea3101e 2121static void
2122lpfc_sha_init(uint32_t * HashResultPointer)
2123{
2124 HashResultPointer[0] = 0x67452301;
2125 HashResultPointer[1] = 0xEFCDAB89;
2126 HashResultPointer[2] = 0x98BADCFE;
2127 HashResultPointer[3] = 0x10325476;
2128 HashResultPointer[4] = 0xC3D2E1F0;
2129}
2130
e59058c4 2131/**
3621a710 2132 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
e59058c4
JS
2133 * @HashResultPointer: pointer to an initial/result hash table.
2134 * @HashWorkingPointer: pointer to an working hash table.
2135 *
2136 * This routine iterates an initial hash table pointed by @HashResultPointer
2137 * with the values from the working hash table pointeed by @HashWorkingPointer.
2138 * The results are putting back to the initial hash table, returned through
2139 * the @HashResultPointer as the result hash table.
2140 **/
dea3101e 2141static void
2142lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2143{
2144 int t;
2145 uint32_t TEMP;
2146 uint32_t A, B, C, D, E;
2147 t = 16;
2148 do {
2149 HashWorkingPointer[t] =
2150 S(1,
2151 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2152 8] ^
2153 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2154 } while (++t <= 79);
2155 t = 0;
2156 A = HashResultPointer[0];
2157 B = HashResultPointer[1];
2158 C = HashResultPointer[2];
2159 D = HashResultPointer[3];
2160 E = HashResultPointer[4];
2161
2162 do {
2163 if (t < 20) {
2164 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2165 } else if (t < 40) {
2166 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2167 } else if (t < 60) {
2168 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2169 } else {
2170 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2171 }
2172 TEMP += S(5, A) + E + HashWorkingPointer[t];
2173 E = D;
2174 D = C;
2175 C = S(30, B);
2176 B = A;
2177 A = TEMP;
2178 } while (++t <= 79);
2179
2180 HashResultPointer[0] += A;
2181 HashResultPointer[1] += B;
2182 HashResultPointer[2] += C;
2183 HashResultPointer[3] += D;
2184 HashResultPointer[4] += E;
2185
2186}
2187
e59058c4 2188/**
3621a710 2189 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
e59058c4
JS
2190 * @RandomChallenge: pointer to the entry of host challenge random number array.
2191 * @HashWorking: pointer to the entry of the working hash array.
2192 *
2193 * This routine calculates the working hash array referred by @HashWorking
2194 * from the challenge random numbers associated with the host, referred by
2195 * @RandomChallenge. The result is put into the entry of the working hash
2196 * array and returned by reference through @HashWorking.
2197 **/
dea3101e 2198static void
2199lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2200{
2201 *HashWorking = (*RandomChallenge ^ *HashWorking);
2202}
2203
e59058c4 2204/**
3621a710 2205 * lpfc_hba_init - Perform special handling for LC HBA initialization
e59058c4
JS
2206 * @phba: pointer to lpfc hba data structure.
2207 * @hbainit: pointer to an array of unsigned 32-bit integers.
2208 *
2209 * This routine performs the special handling for LC HBA initialization.
2210 **/
dea3101e 2211void
2212lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2213{
2214 int t;
2215 uint32_t *HashWorking;
2e0fef85 2216 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
dea3101e 2217
bbfbbbc1 2218 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
dea3101e 2219 if (!HashWorking)
2220 return;
2221
dea3101e 2222 HashWorking[0] = HashWorking[78] = *pwwnn++;
2223 HashWorking[1] = HashWorking[79] = *pwwnn;
2224
2225 for (t = 0; t < 7; t++)
2226 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2227
2228 lpfc_sha_init(hbainit);
2229 lpfc_sha_iterate(hbainit, HashWorking);
2230 kfree(HashWorking);
2231}
2232
e59058c4 2233/**
3621a710 2234 * lpfc_cleanup - Performs vport cleanups before deleting a vport
e59058c4
JS
2235 * @vport: pointer to a virtual N_Port data structure.
2236 *
2237 * This routine performs the necessary cleanups before deleting the @vport.
2238 * It invokes the discovery state machine to perform necessary state
2239 * transitions and to release the ndlps associated with the @vport. Note,
2240 * the physical port is treated as @vport 0.
2241 **/
87af33fe 2242void
2e0fef85 2243lpfc_cleanup(struct lpfc_vport *vport)
dea3101e 2244{
87af33fe 2245 struct lpfc_hba *phba = vport->phba;
dea3101e 2246 struct lpfc_nodelist *ndlp, *next_ndlp;
a8adb832 2247 int i = 0;
dea3101e 2248
87af33fe
JS
2249 if (phba->link_state > LPFC_LINK_DOWN)
2250 lpfc_port_link_failure(vport);
2251
2252 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
2253 if (!NLP_CHK_NODE_ACT(ndlp)) {
2254 ndlp = lpfc_enable_node(vport, ndlp,
2255 NLP_STE_UNUSED_NODE);
2256 if (!ndlp)
2257 continue;
2258 spin_lock_irq(&phba->ndlp_lock);
2259 NLP_SET_FREE_REQ(ndlp);
2260 spin_unlock_irq(&phba->ndlp_lock);
2261 /* Trigger the release of the ndlp memory */
2262 lpfc_nlp_put(ndlp);
2263 continue;
2264 }
2265 spin_lock_irq(&phba->ndlp_lock);
2266 if (NLP_CHK_FREE_REQ(ndlp)) {
2267 /* The ndlp should not be in memory free mode already */
2268 spin_unlock_irq(&phba->ndlp_lock);
2269 continue;
2270 } else
2271 /* Indicate request for freeing ndlp memory */
2272 NLP_SET_FREE_REQ(ndlp);
2273 spin_unlock_irq(&phba->ndlp_lock);
2274
58da1ffb
JS
2275 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2276 ndlp->nlp_DID == Fabric_DID) {
2277 /* Just free up ndlp with Fabric_DID for vports */
2278 lpfc_nlp_put(ndlp);
2279 continue;
2280 }
2281
87af33fe
JS
2282 if (ndlp->nlp_type & NLP_FABRIC)
2283 lpfc_disc_state_machine(vport, ndlp, NULL,
2284 NLP_EVT_DEVICE_RECOVERY);
e47c9093 2285
87af33fe
JS
2286 lpfc_disc_state_machine(vport, ndlp, NULL,
2287 NLP_EVT_DEVICE_RM);
495a714c 2288
87af33fe
JS
2289 }
2290
a8adb832
JS
2291 /* At this point, ALL ndlp's should be gone
2292 * because of the previous NLP_EVT_DEVICE_RM.
2293 * Lets wait for this to happen, if needed.
2294 */
87af33fe 2295 while (!list_empty(&vport->fc_nodes)) {
a8adb832 2296 if (i++ > 3000) {
87af33fe 2297 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
a8adb832 2298 "0233 Nodelist not empty\n");
e47c9093
JS
2299 list_for_each_entry_safe(ndlp, next_ndlp,
2300 &vport->fc_nodes, nlp_listp) {
2301 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2302 LOG_NODE,
d7c255b2 2303 "0282 did:x%x ndlp:x%p "
e47c9093
JS
2304 "usgmap:x%x refcnt:%d\n",
2305 ndlp->nlp_DID, (void *)ndlp,
2306 ndlp->nlp_usg_map,
2307 atomic_read(
2308 &ndlp->kref.refcount));
2309 }
a8adb832 2310 break;
87af33fe 2311 }
a8adb832
JS
2312
2313 /* Wait for any activity on ndlps to settle */
2314 msleep(10);
87af33fe 2315 }
1151e3ec 2316 lpfc_cleanup_vports_rrqs(vport, NULL);
dea3101e 2317}
2318
e59058c4 2319/**
3621a710 2320 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
e59058c4
JS
2321 * @vport: pointer to a virtual N_Port data structure.
2322 *
2323 * This routine stops all the timers associated with a @vport. This function
2324 * is invoked before disabling or deleting a @vport. Note that the physical
2325 * port is treated as @vport 0.
2326 **/
92d7f7b0
JS
2327void
2328lpfc_stop_vport_timers(struct lpfc_vport *vport)
dea3101e 2329{
92d7f7b0
JS
2330 del_timer_sync(&vport->els_tmofunc);
2331 del_timer_sync(&vport->fc_fdmitmo);
92494144 2332 del_timer_sync(&vport->delayed_disc_tmo);
92d7f7b0
JS
2333 lpfc_can_disctmo(vport);
2334 return;
dea3101e 2335}
2336
ecfd03c6
JS
2337/**
2338 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2339 * @phba: pointer to lpfc hba data structure.
2340 *
2341 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2342 * caller of this routine should already hold the host lock.
2343 **/
2344void
2345__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2346{
5ac6b303
JS
2347 /* Clear pending FCF rediscovery wait flag */
2348 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2349
ecfd03c6
JS
2350 /* Now, try to stop the timer */
2351 del_timer(&phba->fcf.redisc_wait);
2352}
2353
2354/**
2355 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2356 * @phba: pointer to lpfc hba data structure.
2357 *
2358 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2359 * checks whether the FCF rediscovery wait timer is pending with the host
2360 * lock held before proceeding with disabling the timer and clearing the
2361 * wait timer pendig flag.
2362 **/
2363void
2364lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2365{
2366 spin_lock_irq(&phba->hbalock);
2367 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2368 /* FCF rediscovery timer already fired or stopped */
2369 spin_unlock_irq(&phba->hbalock);
2370 return;
2371 }
2372 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
5ac6b303
JS
2373 /* Clear failover in progress flags */
2374 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
ecfd03c6
JS
2375 spin_unlock_irq(&phba->hbalock);
2376}
2377
e59058c4 2378/**
3772a991 2379 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
e59058c4
JS
2380 * @phba: pointer to lpfc hba data structure.
2381 *
2382 * This routine stops all the timers associated with a HBA. This function is
2383 * invoked before either putting a HBA offline or unloading the driver.
2384 **/
3772a991
JS
2385void
2386lpfc_stop_hba_timers(struct lpfc_hba *phba)
dea3101e 2387{
51ef4c26 2388 lpfc_stop_vport_timers(phba->pport);
2e0fef85 2389 del_timer_sync(&phba->sli.mbox_tmo);
92d7f7b0 2390 del_timer_sync(&phba->fabric_block_timer);
9399627f 2391 del_timer_sync(&phba->eratt_poll);
3772a991 2392 del_timer_sync(&phba->hb_tmofunc);
1151e3ec
JS
2393 if (phba->sli_rev == LPFC_SLI_REV4) {
2394 del_timer_sync(&phba->rrq_tmr);
2395 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2396 }
3772a991
JS
2397 phba->hb_outstanding = 0;
2398
2399 switch (phba->pci_dev_grp) {
2400 case LPFC_PCI_DEV_LP:
2401 /* Stop any LightPulse device specific driver timers */
2402 del_timer_sync(&phba->fcp_poll_timer);
2403 break;
2404 case LPFC_PCI_DEV_OC:
2405 /* Stop any OneConnect device sepcific driver timers */
ecfd03c6 2406 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3772a991
JS
2407 break;
2408 default:
2409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2410 "0297 Invalid device group (x%x)\n",
2411 phba->pci_dev_grp);
2412 break;
2413 }
2e0fef85 2414 return;
dea3101e 2415}
2416
e59058c4 2417/**
3621a710 2418 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
e59058c4
JS
2419 * @phba: pointer to lpfc hba data structure.
2420 *
2421 * This routine marks a HBA's management interface as blocked. Once the HBA's
2422 * management interface is marked as blocked, all the user space access to
2423 * the HBA, whether they are from sysfs interface or libdfc interface will
2424 * all be blocked. The HBA is set to block the management interface when the
2425 * driver prepares the HBA interface for online or offline.
2426 **/
a6ababd2
AB
2427static void
2428lpfc_block_mgmt_io(struct lpfc_hba * phba)
2429{
2430 unsigned long iflag;
6e7288d9
JS
2431 uint8_t actcmd = MBX_HEARTBEAT;
2432 unsigned long timeout;
2433
a183a15f 2434 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
a6ababd2
AB
2435 spin_lock_irqsave(&phba->hbalock, iflag);
2436 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
a183a15f 2437 if (phba->sli.mbox_active) {
6e7288d9 2438 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
a183a15f
JS
2439 /* Determine how long we might wait for the active mailbox
2440 * command to be gracefully completed by firmware.
2441 */
2442 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2443 phba->sli.mbox_active) * 1000) + jiffies;
2444 }
a6ababd2 2445 spin_unlock_irqrestore(&phba->hbalock, iflag);
a183a15f 2446
6e7288d9
JS
2447 /* Wait for the outstnading mailbox command to complete */
2448 while (phba->sli.mbox_active) {
2449 /* Check active mailbox complete status every 2ms */
2450 msleep(2);
2451 if (time_after(jiffies, timeout)) {
2452 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2453 "2813 Mgmt IO is Blocked %x "
2454 "- mbox cmd %x still active\n",
2455 phba->sli.sli_flag, actcmd);
2456 break;
2457 }
2458 }
a6ababd2
AB
2459}
2460
e59058c4 2461/**
3621a710 2462 * lpfc_online - Initialize and bring a HBA online
e59058c4
JS
2463 * @phba: pointer to lpfc hba data structure.
2464 *
2465 * This routine initializes the HBA and brings a HBA online. During this
2466 * process, the management interface is blocked to prevent user space access
2467 * to the HBA interfering with the driver initialization.
2468 *
2469 * Return codes
2470 * 0 - successful
2471 * 1 - failed
2472 **/
dea3101e 2473int
2e0fef85 2474lpfc_online(struct lpfc_hba *phba)
dea3101e 2475{
372bd282 2476 struct lpfc_vport *vport;
549e55cd
JS
2477 struct lpfc_vport **vports;
2478 int i;
2e0fef85 2479
dea3101e 2480 if (!phba)
2481 return 0;
372bd282 2482 vport = phba->pport;
dea3101e 2483
2e0fef85 2484 if (!(vport->fc_flag & FC_OFFLINE_MODE))
dea3101e 2485 return 0;
2486
ed957684 2487 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
e8b62011 2488 "0458 Bring Adapter online\n");
dea3101e 2489
46fa311e
JS
2490 lpfc_block_mgmt_io(phba);
2491
2492 if (!lpfc_sli_queue_setup(phba)) {
2493 lpfc_unblock_mgmt_io(phba);
dea3101e 2494 return 1;
46fa311e 2495 }
dea3101e 2496
da0436e9
JS
2497 if (phba->sli_rev == LPFC_SLI_REV4) {
2498 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2499 lpfc_unblock_mgmt_io(phba);
2500 return 1;
2501 }
2502 } else {
2503 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2504 lpfc_unblock_mgmt_io(phba);
2505 return 1;
2506 }
46fa311e 2507 }
dea3101e 2508
549e55cd
JS
2509 vports = lpfc_create_vport_work_array(phba);
2510 if (vports != NULL)
da0436e9 2511 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd
JS
2512 struct Scsi_Host *shost;
2513 shost = lpfc_shost_from_vport(vports[i]);
2514 spin_lock_irq(shost->host_lock);
2515 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2516 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2517 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
1c6834a7
JS
2518 if (phba->sli_rev == LPFC_SLI_REV4)
2519 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
549e55cd
JS
2520 spin_unlock_irq(shost->host_lock);
2521 }
09372820 2522 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 2523
46fa311e 2524 lpfc_unblock_mgmt_io(phba);
dea3101e 2525 return 0;
2526}
2527
e59058c4 2528/**
3621a710 2529 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
e59058c4
JS
2530 * @phba: pointer to lpfc hba data structure.
2531 *
2532 * This routine marks a HBA's management interface as not blocked. Once the
2533 * HBA's management interface is marked as not blocked, all the user space
2534 * access to the HBA, whether they are from sysfs interface or libdfc
2535 * interface will be allowed. The HBA is set to block the management interface
2536 * when the driver prepares the HBA interface for online or offline and then
2537 * set to unblock the management interface afterwards.
2538 **/
46fa311e
JS
2539void
2540lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2541{
2542 unsigned long iflag;
2543
2e0fef85
JS
2544 spin_lock_irqsave(&phba->hbalock, iflag);
2545 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2546 spin_unlock_irqrestore(&phba->hbalock, iflag);
46fa311e
JS
2547}
2548
e59058c4 2549/**
3621a710 2550 * lpfc_offline_prep - Prepare a HBA to be brought offline
e59058c4
JS
2551 * @phba: pointer to lpfc hba data structure.
2552 *
2553 * This routine is invoked to prepare a HBA to be brought offline. It performs
2554 * unregistration login to all the nodes on all vports and flushes the mailbox
2555 * queue to make it ready to be brought offline.
2556 **/
46fa311e
JS
2557void
2558lpfc_offline_prep(struct lpfc_hba * phba)
2559{
2e0fef85 2560 struct lpfc_vport *vport = phba->pport;
46fa311e 2561 struct lpfc_nodelist *ndlp, *next_ndlp;
87af33fe 2562 struct lpfc_vport **vports;
72100cc4 2563 struct Scsi_Host *shost;
87af33fe 2564 int i;
dea3101e 2565
2e0fef85 2566 if (vport->fc_flag & FC_OFFLINE_MODE)
46fa311e 2567 return;
dea3101e 2568
46fa311e 2569 lpfc_block_mgmt_io(phba);
dea3101e 2570
2571 lpfc_linkdown(phba);
2572
87af33fe
JS
2573 /* Issue an unreg_login to all nodes on all vports */
2574 vports = lpfc_create_vport_work_array(phba);
2575 if (vports != NULL) {
da0436e9 2576 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
a8adb832
JS
2577 if (vports[i]->load_flag & FC_UNLOADING)
2578 continue;
72100cc4
JS
2579 shost = lpfc_shost_from_vport(vports[i]);
2580 spin_lock_irq(shost->host_lock);
c868595d 2581 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
695a814e
JS
2582 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2583 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
72100cc4 2584 spin_unlock_irq(shost->host_lock);
695a814e 2585
87af33fe
JS
2586 shost = lpfc_shost_from_vport(vports[i]);
2587 list_for_each_entry_safe(ndlp, next_ndlp,
2588 &vports[i]->fc_nodes,
2589 nlp_listp) {
e47c9093
JS
2590 if (!NLP_CHK_NODE_ACT(ndlp))
2591 continue;
87af33fe
JS
2592 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2593 continue;
2594 if (ndlp->nlp_type & NLP_FABRIC) {
2595 lpfc_disc_state_machine(vports[i], ndlp,
2596 NULL, NLP_EVT_DEVICE_RECOVERY);
2597 lpfc_disc_state_machine(vports[i], ndlp,
2598 NULL, NLP_EVT_DEVICE_RM);
2599 }
2600 spin_lock_irq(shost->host_lock);
2601 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2602 spin_unlock_irq(shost->host_lock);
2603 lpfc_unreg_rpi(vports[i], ndlp);
2604 }
2605 }
2606 }
09372820 2607 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 2608
04c68496 2609 lpfc_sli_mbox_sys_shutdown(phba);
46fa311e
JS
2610}
2611
e59058c4 2612/**
3621a710 2613 * lpfc_offline - Bring a HBA offline
e59058c4
JS
2614 * @phba: pointer to lpfc hba data structure.
2615 *
2616 * This routine actually brings a HBA offline. It stops all the timers
2617 * associated with the HBA, brings down the SLI layer, and eventually
2618 * marks the HBA as in offline state for the upper layer protocol.
2619 **/
46fa311e 2620void
2e0fef85 2621lpfc_offline(struct lpfc_hba *phba)
46fa311e 2622{
549e55cd
JS
2623 struct Scsi_Host *shost;
2624 struct lpfc_vport **vports;
2625 int i;
46fa311e 2626
549e55cd 2627 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
46fa311e 2628 return;
688a8863 2629
da0436e9
JS
2630 /* stop port and all timers associated with this hba */
2631 lpfc_stop_port(phba);
51ef4c26
JS
2632 vports = lpfc_create_vport_work_array(phba);
2633 if (vports != NULL)
da0436e9 2634 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
51ef4c26 2635 lpfc_stop_vport_timers(vports[i]);
09372820 2636 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0 2637 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
e8b62011 2638 "0460 Bring Adapter offline\n");
dea3101e 2639 /* Bring down the SLI Layer and cleanup. The HBA is offline
2640 now. */
2641 lpfc_sli_hba_down(phba);
92d7f7b0 2642 spin_lock_irq(&phba->hbalock);
7054a606 2643 phba->work_ha = 0;
92d7f7b0 2644 spin_unlock_irq(&phba->hbalock);
549e55cd
JS
2645 vports = lpfc_create_vport_work_array(phba);
2646 if (vports != NULL)
da0436e9 2647 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd 2648 shost = lpfc_shost_from_vport(vports[i]);
549e55cd
JS
2649 spin_lock_irq(shost->host_lock);
2650 vports[i]->work_port_events = 0;
2651 vports[i]->fc_flag |= FC_OFFLINE_MODE;
2652 spin_unlock_irq(shost->host_lock);
2653 }
09372820 2654 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 2655}
2656
e59058c4 2657/**
3621a710 2658 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
e59058c4
JS
2659 * @phba: pointer to lpfc hba data structure.
2660 *
2661 * This routine is to free all the SCSI buffers and IOCBs from the driver
2662 * list back to kernel. It is called from lpfc_pci_remove_one to free
2663 * the internal resources before the device is removed from the system.
2664 *
2665 * Return codes
2666 * 0 - successful (for now, it always returns 0)
2667 **/
dea3101e 2668static int
2e0fef85 2669lpfc_scsi_free(struct lpfc_hba *phba)
dea3101e 2670{
2671 struct lpfc_scsi_buf *sb, *sb_next;
2672 struct lpfc_iocbq *io, *io_next;
2673
2e0fef85 2674 spin_lock_irq(&phba->hbalock);
dea3101e 2675 /* Release all the lpfc_scsi_bufs maintained by this host. */
1c6f4ef5 2676 spin_lock(&phba->scsi_buf_list_lock);
dea3101e 2677 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2678 list_del(&sb->list);
2679 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
92d7f7b0 2680 sb->dma_handle);
dea3101e 2681 kfree(sb);
2682 phba->total_scsi_bufs--;
2683 }
1c6f4ef5 2684 spin_unlock(&phba->scsi_buf_list_lock);
dea3101e 2685
2686 /* Release all the lpfc_iocbq entries maintained by this host. */
2687 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2688 list_del(&io->list);
2689 kfree(io);
2690 phba->total_iocbq_bufs--;
2691 }
6d368e53 2692
2e0fef85 2693 spin_unlock_irq(&phba->hbalock);
dea3101e 2694 return 0;
2695}
2696
e59058c4 2697/**
3621a710 2698 * lpfc_create_port - Create an FC port
e59058c4
JS
2699 * @phba: pointer to lpfc hba data structure.
2700 * @instance: a unique integer ID to this FC port.
2701 * @dev: pointer to the device data structure.
2702 *
2703 * This routine creates a FC port for the upper layer protocol. The FC port
2704 * can be created on top of either a physical port or a virtual port provided
2705 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2706 * and associates the FC port created before adding the shost into the SCSI
2707 * layer.
2708 *
2709 * Return codes
2710 * @vport - pointer to the virtual N_Port data structure.
2711 * NULL - port create failed.
2712 **/
2e0fef85 2713struct lpfc_vport *
3de2a653 2714lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
47a8617c 2715{
2e0fef85
JS
2716 struct lpfc_vport *vport;
2717 struct Scsi_Host *shost;
2718 int error = 0;
47a8617c 2719
3de2a653
JS
2720 if (dev != &phba->pcidev->dev)
2721 shost = scsi_host_alloc(&lpfc_vport_template,
2722 sizeof(struct lpfc_vport));
2723 else
2724 shost = scsi_host_alloc(&lpfc_template,
2725 sizeof(struct lpfc_vport));
2e0fef85
JS
2726 if (!shost)
2727 goto out;
47a8617c 2728
2e0fef85
JS
2729 vport = (struct lpfc_vport *) shost->hostdata;
2730 vport->phba = phba;
2e0fef85 2731 vport->load_flag |= FC_LOADING;
92d7f7b0 2732 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
7f5f3d0d 2733 vport->fc_rscn_flush = 0;
47a8617c 2734
3de2a653 2735 lpfc_get_vport_cfgparam(vport);
2e0fef85
JS
2736 shost->unique_id = instance;
2737 shost->max_id = LPFC_MAX_TARGET;
3de2a653 2738 shost->max_lun = vport->cfg_max_luns;
2e0fef85
JS
2739 shost->this_id = -1;
2740 shost->max_cmd_len = 16;
da0436e9 2741 if (phba->sli_rev == LPFC_SLI_REV4) {
28baac74 2742 shost->dma_boundary =
cb5172ea 2743 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
da0436e9
JS
2744 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2745 }
81301a9b 2746
47a8617c 2747 /*
2e0fef85
JS
2748 * Set initial can_queue value since 0 is no longer supported and
2749 * scsi_add_host will fail. This will be adjusted later based on the
2750 * max xri value determined in hba setup.
47a8617c 2751 */
2e0fef85 2752 shost->can_queue = phba->cfg_hba_queue_depth - 10;
3de2a653 2753 if (dev != &phba->pcidev->dev) {
92d7f7b0
JS
2754 shost->transportt = lpfc_vport_transport_template;
2755 vport->port_type = LPFC_NPIV_PORT;
2756 } else {
2757 shost->transportt = lpfc_transport_template;
2758 vport->port_type = LPFC_PHYSICAL_PORT;
2759 }
47a8617c 2760
2e0fef85
JS
2761 /* Initialize all internally managed lists. */
2762 INIT_LIST_HEAD(&vport->fc_nodes);
da0436e9 2763 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2e0fef85 2764 spin_lock_init(&vport->work_port_lock);
47a8617c 2765
2e0fef85
JS
2766 init_timer(&vport->fc_disctmo);
2767 vport->fc_disctmo.function = lpfc_disc_timeout;
92d7f7b0 2768 vport->fc_disctmo.data = (unsigned long)vport;
47a8617c 2769
2e0fef85
JS
2770 init_timer(&vport->fc_fdmitmo);
2771 vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
92d7f7b0 2772 vport->fc_fdmitmo.data = (unsigned long)vport;
47a8617c 2773
2e0fef85
JS
2774 init_timer(&vport->els_tmofunc);
2775 vport->els_tmofunc.function = lpfc_els_timeout;
92d7f7b0 2776 vport->els_tmofunc.data = (unsigned long)vport;
92494144
JS
2777
2778 init_timer(&vport->delayed_disc_tmo);
2779 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
2780 vport->delayed_disc_tmo.data = (unsigned long)vport;
2781
d139b9bd 2782 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2e0fef85
JS
2783 if (error)
2784 goto out_put_shost;
47a8617c 2785
549e55cd 2786 spin_lock_irq(&phba->hbalock);
2e0fef85 2787 list_add_tail(&vport->listentry, &phba->port_list);
549e55cd 2788 spin_unlock_irq(&phba->hbalock);
2e0fef85 2789 return vport;
47a8617c 2790
2e0fef85
JS
2791out_put_shost:
2792 scsi_host_put(shost);
2793out:
2794 return NULL;
47a8617c
JS
2795}
2796
e59058c4 2797/**
3621a710 2798 * destroy_port - destroy an FC port
e59058c4
JS
2799 * @vport: pointer to an lpfc virtual N_Port data structure.
2800 *
2801 * This routine destroys a FC port from the upper layer protocol. All the
2802 * resources associated with the port are released.
2803 **/
2e0fef85
JS
2804void
2805destroy_port(struct lpfc_vport *vport)
47a8617c 2806{
92d7f7b0
JS
2807 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2808 struct lpfc_hba *phba = vport->phba;
47a8617c 2809
858c9f6c 2810 lpfc_debugfs_terminate(vport);
92d7f7b0
JS
2811 fc_remove_host(shost);
2812 scsi_remove_host(shost);
47a8617c 2813
92d7f7b0
JS
2814 spin_lock_irq(&phba->hbalock);
2815 list_del_init(&vport->listentry);
2816 spin_unlock_irq(&phba->hbalock);
47a8617c 2817
92d7f7b0 2818 lpfc_cleanup(vport);
47a8617c 2819 return;
47a8617c
JS
2820}
2821
e59058c4 2822/**
3621a710 2823 * lpfc_get_instance - Get a unique integer ID
e59058c4
JS
2824 *
2825 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2826 * uses the kernel idr facility to perform the task.
2827 *
2828 * Return codes:
2829 * instance - a unique integer ID allocated as the new instance.
2830 * -1 - lpfc get instance failed.
2831 **/
92d7f7b0
JS
2832int
2833lpfc_get_instance(void)
2834{
2835 int instance = 0;
47a8617c 2836
92d7f7b0
JS
2837 /* Assign an unused number */
2838 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2839 return -1;
2840 if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2841 return -1;
2842 return instance;
47a8617c
JS
2843}
2844
e59058c4 2845/**
3621a710 2846 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
e59058c4
JS
2847 * @shost: pointer to SCSI host data structure.
2848 * @time: elapsed time of the scan in jiffies.
2849 *
2850 * This routine is called by the SCSI layer with a SCSI host to determine
2851 * whether the scan host is finished.
2852 *
2853 * Note: there is no scan_start function as adapter initialization will have
2854 * asynchronously kicked off the link initialization.
2855 *
2856 * Return codes
2857 * 0 - SCSI host scan is not over yet.
2858 * 1 - SCSI host scan is over.
2859 **/
47a8617c
JS
2860int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2861{
2e0fef85
JS
2862 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2863 struct lpfc_hba *phba = vport->phba;
858c9f6c 2864 int stat = 0;
47a8617c 2865
858c9f6c
JS
2866 spin_lock_irq(shost->host_lock);
2867
51ef4c26 2868 if (vport->load_flag & FC_UNLOADING) {
858c9f6c
JS
2869 stat = 1;
2870 goto finished;
2871 }
2e0fef85
JS
2872 if (time >= 30 * HZ) {
2873 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
2874 "0461 Scanning longer than 30 "
2875 "seconds. Continuing initialization\n");
858c9f6c 2876 stat = 1;
47a8617c 2877 goto finished;
2e0fef85
JS
2878 }
2879 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2880 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
2881 "0465 Link down longer than 15 "
2882 "seconds. Continuing initialization\n");
858c9f6c 2883 stat = 1;
47a8617c 2884 goto finished;
2e0fef85 2885 }
47a8617c 2886
2e0fef85 2887 if (vport->port_state != LPFC_VPORT_READY)
858c9f6c 2888 goto finished;
2e0fef85 2889 if (vport->num_disc_nodes || vport->fc_prli_sent)
858c9f6c 2890 goto finished;
2e0fef85 2891 if (vport->fc_map_cnt == 0 && time < 2 * HZ)
858c9f6c 2892 goto finished;
2e0fef85 2893 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
858c9f6c
JS
2894 goto finished;
2895
2896 stat = 1;
47a8617c
JS
2897
2898finished:
858c9f6c
JS
2899 spin_unlock_irq(shost->host_lock);
2900 return stat;
92d7f7b0 2901}
47a8617c 2902
e59058c4 2903/**
3621a710 2904 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
e59058c4
JS
2905 * @shost: pointer to SCSI host data structure.
2906 *
2907 * This routine initializes a given SCSI host attributes on a FC port. The
2908 * SCSI host can be either on top of a physical port or a virtual port.
2909 **/
92d7f7b0
JS
2910void lpfc_host_attrib_init(struct Scsi_Host *shost)
2911{
2912 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2913 struct lpfc_hba *phba = vport->phba;
47a8617c 2914 /*
2e0fef85 2915 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
47a8617c
JS
2916 */
2917
2e0fef85
JS
2918 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2919 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
47a8617c
JS
2920 fc_host_supported_classes(shost) = FC_COS_CLASS3;
2921
2922 memset(fc_host_supported_fc4s(shost), 0,
2e0fef85 2923 sizeof(fc_host_supported_fc4s(shost)));
47a8617c
JS
2924 fc_host_supported_fc4s(shost)[2] = 1;
2925 fc_host_supported_fc4s(shost)[7] = 1;
2926
92d7f7b0
JS
2927 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2928 sizeof fc_host_symbolic_name(shost));
47a8617c
JS
2929
2930 fc_host_supported_speeds(shost) = 0;
88a2cfbb
JS
2931 if (phba->lmt & LMT_16Gb)
2932 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
47a8617c
JS
2933 if (phba->lmt & LMT_10Gb)
2934 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
a8adb832
JS
2935 if (phba->lmt & LMT_8Gb)
2936 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
47a8617c
JS
2937 if (phba->lmt & LMT_4Gb)
2938 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2939 if (phba->lmt & LMT_2Gb)
2940 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2941 if (phba->lmt & LMT_1Gb)
2942 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2943
2944 fc_host_maxframe_size(shost) =
2e0fef85
JS
2945 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2946 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
47a8617c 2947
0af5d708
MC
2948 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
2949
47a8617c
JS
2950 /* This value is also unchanging */
2951 memset(fc_host_active_fc4s(shost), 0,
2e0fef85 2952 sizeof(fc_host_active_fc4s(shost)));
47a8617c
JS
2953 fc_host_active_fc4s(shost)[2] = 1;
2954 fc_host_active_fc4s(shost)[7] = 1;
2955
92d7f7b0 2956 fc_host_max_npiv_vports(shost) = phba->max_vpi;
47a8617c 2957 spin_lock_irq(shost->host_lock);
51ef4c26 2958 vport->load_flag &= ~FC_LOADING;
47a8617c 2959 spin_unlock_irq(shost->host_lock);
47a8617c 2960}
dea3101e 2961
e59058c4 2962/**
da0436e9 2963 * lpfc_stop_port_s3 - Stop SLI3 device port
e59058c4
JS
2964 * @phba: pointer to lpfc hba data structure.
2965 *
da0436e9
JS
2966 * This routine is invoked to stop an SLI3 device port, it stops the device
2967 * from generating interrupts and stops the device driver's timers for the
2968 * device.
e59058c4 2969 **/
da0436e9
JS
2970static void
2971lpfc_stop_port_s3(struct lpfc_hba *phba)
db2378e0 2972{
da0436e9
JS
2973 /* Clear all interrupt enable conditions */
2974 writel(0, phba->HCregaddr);
2975 readl(phba->HCregaddr); /* flush */
2976 /* Clear all pending interrupts */
2977 writel(0xffffffff, phba->HAregaddr);
2978 readl(phba->HAregaddr); /* flush */
db2378e0 2979
da0436e9
JS
2980 /* Reset some HBA SLI setup states */
2981 lpfc_stop_hba_timers(phba);
2982 phba->pport->work_port_events = 0;
2983}
db2378e0 2984
da0436e9
JS
2985/**
2986 * lpfc_stop_port_s4 - Stop SLI4 device port
2987 * @phba: pointer to lpfc hba data structure.
2988 *
2989 * This routine is invoked to stop an SLI4 device port, it stops the device
2990 * from generating interrupts and stops the device driver's timers for the
2991 * device.
2992 **/
2993static void
2994lpfc_stop_port_s4(struct lpfc_hba *phba)
2995{
2996 /* Reset some HBA SLI4 setup states */
2997 lpfc_stop_hba_timers(phba);
2998 phba->pport->work_port_events = 0;
2999 phba->sli4_hba.intr_enable = 0;
da0436e9 3000}
9399627f 3001
da0436e9
JS
3002/**
3003 * lpfc_stop_port - Wrapper function for stopping hba port
3004 * @phba: Pointer to HBA context object.
3005 *
3006 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
3007 * the API jump table function pointer from the lpfc_hba struct.
3008 **/
3009void
3010lpfc_stop_port(struct lpfc_hba *phba)
3011{
3012 phba->lpfc_stop_port(phba);
3013}
db2378e0 3014
ecfd03c6
JS
3015/**
3016 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
3017 * @phba: Pointer to hba for which this call is being executed.
3018 *
3019 * This routine starts the timer waiting for the FCF rediscovery to complete.
3020 **/
3021void
3022lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
3023{
3024 unsigned long fcf_redisc_wait_tmo =
3025 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
3026 /* Start fcf rediscovery wait period timer */
3027 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
3028 spin_lock_irq(&phba->hbalock);
3029 /* Allow action to new fcf asynchronous event */
3030 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
3031 /* Mark the FCF rediscovery pending state */
3032 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
3033 spin_unlock_irq(&phba->hbalock);
3034}
3035
3036/**
3037 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
3038 * @ptr: Map to lpfc_hba data structure pointer.
3039 *
3040 * This routine is invoked when waiting for FCF table rediscover has been
3041 * timed out. If new FCF record(s) has (have) been discovered during the
3042 * wait period, a new FCF event shall be added to the FCOE async event
3043 * list, and then worker thread shall be waked up for processing from the
3044 * worker thread context.
3045 **/
3046void
3047lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
3048{
3049 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
3050
3051 /* Don't send FCF rediscovery event if timer cancelled */
3052 spin_lock_irq(&phba->hbalock);
3053 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3054 spin_unlock_irq(&phba->hbalock);
3055 return;
3056 }
3057 /* Clear FCF rediscovery timer pending flag */
3058 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3059 /* FCF rediscovery event to worker thread */
3060 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
3061 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 3062 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 3063 "2776 FCF rediscover quiescent timer expired\n");
ecfd03c6
JS
3064 /* wake up worker thread */
3065 lpfc_worker_wake_up(phba);
3066}
3067
e59058c4 3068/**
da0436e9 3069 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
e59058c4 3070 * @phba: pointer to lpfc hba data structure.
da0436e9 3071 * @acqe_link: pointer to the async link completion queue entry.
e59058c4 3072 *
da0436e9
JS
3073 * This routine is to parse the SLI4 link-attention link fault code and
3074 * translate it into the base driver's read link attention mailbox command
3075 * status.
3076 *
3077 * Return: Link-attention status in terms of base driver's coding.
e59058c4 3078 **/
da0436e9
JS
3079static uint16_t
3080lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3081 struct lpfc_acqe_link *acqe_link)
db2378e0 3082{
da0436e9 3083 uint16_t latt_fault;
9399627f 3084
da0436e9
JS
3085 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3086 case LPFC_ASYNC_LINK_FAULT_NONE:
3087 case LPFC_ASYNC_LINK_FAULT_LOCAL:
3088 case LPFC_ASYNC_LINK_FAULT_REMOTE:
3089 latt_fault = 0;
3090 break;
3091 default:
3092 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3093 "0398 Invalid link fault code: x%x\n",
3094 bf_get(lpfc_acqe_link_fault, acqe_link));
3095 latt_fault = MBXERR_ERROR;
3096 break;
3097 }
3098 return latt_fault;
db2378e0
JS
3099}
3100
5b75da2f 3101/**
da0436e9 3102 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5b75da2f 3103 * @phba: pointer to lpfc hba data structure.
da0436e9 3104 * @acqe_link: pointer to the async link completion queue entry.
5b75da2f 3105 *
da0436e9
JS
3106 * This routine is to parse the SLI4 link attention type and translate it
3107 * into the base driver's link attention type coding.
5b75da2f 3108 *
da0436e9
JS
3109 * Return: Link attention type in terms of base driver's coding.
3110 **/
3111static uint8_t
3112lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3113 struct lpfc_acqe_link *acqe_link)
5b75da2f 3114{
da0436e9 3115 uint8_t att_type;
5b75da2f 3116
da0436e9
JS
3117 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3118 case LPFC_ASYNC_LINK_STATUS_DOWN:
3119 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
76a95d75 3120 att_type = LPFC_ATT_LINK_DOWN;
da0436e9
JS
3121 break;
3122 case LPFC_ASYNC_LINK_STATUS_UP:
3123 /* Ignore physical link up events - wait for logical link up */
76a95d75 3124 att_type = LPFC_ATT_RESERVED;
da0436e9
JS
3125 break;
3126 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
76a95d75 3127 att_type = LPFC_ATT_LINK_UP;
da0436e9
JS
3128 break;
3129 default:
3130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3131 "0399 Invalid link attention type: x%x\n",
3132 bf_get(lpfc_acqe_link_status, acqe_link));
76a95d75 3133 att_type = LPFC_ATT_RESERVED;
da0436e9 3134 break;
5b75da2f 3135 }
da0436e9 3136 return att_type;
5b75da2f
JS
3137}
3138
3139/**
da0436e9 3140 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
5b75da2f 3141 * @phba: pointer to lpfc hba data structure.
da0436e9 3142 * @acqe_link: pointer to the async link completion queue entry.
5b75da2f 3143 *
da0436e9
JS
3144 * This routine is to parse the SLI4 link-attention link speed and translate
3145 * it into the base driver's link-attention link speed coding.
3146 *
3147 * Return: Link-attention link speed in terms of base driver's coding.
3148 **/
3149static uint8_t
3150lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3151 struct lpfc_acqe_link *acqe_link)
5b75da2f 3152{
da0436e9
JS
3153 uint8_t link_speed;
3154
3155 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3156 case LPFC_ASYNC_LINK_SPEED_ZERO:
da0436e9 3157 case LPFC_ASYNC_LINK_SPEED_10MBPS:
da0436e9 3158 case LPFC_ASYNC_LINK_SPEED_100MBPS:
76a95d75 3159 link_speed = LPFC_LINK_SPEED_UNKNOWN;
da0436e9
JS
3160 break;
3161 case LPFC_ASYNC_LINK_SPEED_1GBPS:
76a95d75 3162 link_speed = LPFC_LINK_SPEED_1GHZ;
da0436e9
JS
3163 break;
3164 case LPFC_ASYNC_LINK_SPEED_10GBPS:
76a95d75 3165 link_speed = LPFC_LINK_SPEED_10GHZ;
da0436e9
JS
3166 break;
3167 default:
3168 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3169 "0483 Invalid link-attention link speed: x%x\n",
3170 bf_get(lpfc_acqe_link_speed, acqe_link));
76a95d75 3171 link_speed = LPFC_LINK_SPEED_UNKNOWN;
da0436e9
JS
3172 break;
3173 }
3174 return link_speed;
3175}
3176
3177/**
70f3c073 3178 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
da0436e9
JS
3179 * @phba: pointer to lpfc hba data structure.
3180 * @acqe_link: pointer to the async link completion queue entry.
3181 *
70f3c073 3182 * This routine is to handle the SLI4 asynchronous FCoE link event.
da0436e9
JS
3183 **/
3184static void
3185lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3186 struct lpfc_acqe_link *acqe_link)
3187{
3188 struct lpfc_dmabuf *mp;
3189 LPFC_MBOXQ_t *pmb;
3190 MAILBOX_t *mb;
76a95d75 3191 struct lpfc_mbx_read_top *la;
da0436e9 3192 uint8_t att_type;
76a95d75 3193 int rc;
da0436e9
JS
3194
3195 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
76a95d75 3196 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
da0436e9 3197 return;
32b9793f 3198 phba->fcoe_eventtag = acqe_link->event_tag;
da0436e9
JS
3199 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3200 if (!pmb) {
3201 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3202 "0395 The mboxq allocation failed\n");
3203 return;
3204 }
3205 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3206 if (!mp) {
3207 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3208 "0396 The lpfc_dmabuf allocation failed\n");
3209 goto out_free_pmb;
3210 }
3211 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3212 if (!mp->virt) {
3213 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3214 "0397 The mbuf allocation failed\n");
3215 goto out_free_dmabuf;
3216 }
3217
3218 /* Cleanup any outstanding ELS commands */
3219 lpfc_els_flush_all_cmd(phba);
3220
3221 /* Block ELS IOCBs until we have done process link event */
3222 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3223
3224 /* Update link event statistics */
3225 phba->sli.slistat.link_event++;
3226
76a95d75
JS
3227 /* Create lpfc_handle_latt mailbox command from link ACQE */
3228 lpfc_read_topology(phba, pmb, mp);
3229 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
da0436e9
JS
3230 pmb->vport = phba->pport;
3231
da0436e9
JS
3232 /* Keep the link status for extra SLI4 state machine reference */
3233 phba->sli4_hba.link_state.speed =
3234 bf_get(lpfc_acqe_link_speed, acqe_link);
3235 phba->sli4_hba.link_state.duplex =
3236 bf_get(lpfc_acqe_link_duplex, acqe_link);
3237 phba->sli4_hba.link_state.status =
3238 bf_get(lpfc_acqe_link_status, acqe_link);
70f3c073
JS
3239 phba->sli4_hba.link_state.type =
3240 bf_get(lpfc_acqe_link_type, acqe_link);
3241 phba->sli4_hba.link_state.number =
3242 bf_get(lpfc_acqe_link_number, acqe_link);
da0436e9
JS
3243 phba->sli4_hba.link_state.fault =
3244 bf_get(lpfc_acqe_link_fault, acqe_link);
65467b6b 3245 phba->sli4_hba.link_state.logical_speed =
70f3c073
JS
3246 bf_get(lpfc_acqe_logical_link_speed, acqe_link);
3247 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
c31098ce
JS
3248 "2900 Async FC/FCoE Link event - Speed:%dGBit "
3249 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3250 "Logical speed:%dMbps Fault:%d\n",
70f3c073
JS
3251 phba->sli4_hba.link_state.speed,
3252 phba->sli4_hba.link_state.topology,
3253 phba->sli4_hba.link_state.status,
3254 phba->sli4_hba.link_state.type,
3255 phba->sli4_hba.link_state.number,
3256 phba->sli4_hba.link_state.logical_speed * 10,
3257 phba->sli4_hba.link_state.fault);
76a95d75
JS
3258 /*
3259 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3260 * topology info. Note: Optional for non FC-AL ports.
3261 */
3262 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3263 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3264 if (rc == MBX_NOT_FINISHED)
3265 goto out_free_dmabuf;
3266 return;
3267 }
3268 /*
3269 * For FCoE Mode: fill in all the topology information we need and call
3270 * the READ_TOPOLOGY completion routine to continue without actually
3271 * sending the READ_TOPOLOGY mailbox command to the port.
3272 */
3273 /* Parse and translate status field */
3274 mb = &pmb->u.mb;
3275 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3276
3277 /* Parse and translate link attention fields */
3278 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3279 la->eventTag = acqe_link->event_tag;
3280 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
3281 bf_set(lpfc_mbx_read_top_link_spd, la,
3282 lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
3283
3284 /* Fake the the following irrelvant fields */
3285 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
3286 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
3287 bf_set(lpfc_mbx_read_top_il, la, 0);
3288 bf_set(lpfc_mbx_read_top_pb, la, 0);
3289 bf_set(lpfc_mbx_read_top_fa, la, 0);
3290 bf_set(lpfc_mbx_read_top_mm, la, 0);
da0436e9
JS
3291
3292 /* Invoke the lpfc_handle_latt mailbox command callback function */
76a95d75 3293 lpfc_mbx_cmpl_read_topology(phba, pmb);
da0436e9 3294
5b75da2f 3295 return;
da0436e9
JS
3296
3297out_free_dmabuf:
3298 kfree(mp);
3299out_free_pmb:
3300 mempool_free(pmb, phba->mbox_mem_pool);
3301}
3302
70f3c073
JS
3303/**
3304 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
3305 * @phba: pointer to lpfc hba data structure.
3306 * @acqe_fc: pointer to the async fc completion queue entry.
3307 *
3308 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
3309 * that the event was received and then issue a read_topology mailbox command so
3310 * that the rest of the driver will treat it the same as SLI3.
3311 **/
3312static void
3313lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3314{
3315 struct lpfc_dmabuf *mp;
3316 LPFC_MBOXQ_t *pmb;
3317 int rc;
3318
3319 if (bf_get(lpfc_trailer_type, acqe_fc) !=
3320 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
3321 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3322 "2895 Non FC link Event detected.(%d)\n",
3323 bf_get(lpfc_trailer_type, acqe_fc));
3324 return;
3325 }
3326 /* Keep the link status for extra SLI4 state machine reference */
3327 phba->sli4_hba.link_state.speed =
3328 bf_get(lpfc_acqe_fc_la_speed, acqe_fc);
3329 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
3330 phba->sli4_hba.link_state.topology =
3331 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
3332 phba->sli4_hba.link_state.status =
3333 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
3334 phba->sli4_hba.link_state.type =
3335 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
3336 phba->sli4_hba.link_state.number =
3337 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
3338 phba->sli4_hba.link_state.fault =
3339 bf_get(lpfc_acqe_link_fault, acqe_fc);
3340 phba->sli4_hba.link_state.logical_speed =
3341 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc);
3342 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3343 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
3344 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
3345 "%dMbps Fault:%d\n",
3346 phba->sli4_hba.link_state.speed,
3347 phba->sli4_hba.link_state.topology,
3348 phba->sli4_hba.link_state.status,
3349 phba->sli4_hba.link_state.type,
3350 phba->sli4_hba.link_state.number,
3351 phba->sli4_hba.link_state.logical_speed * 10,
3352 phba->sli4_hba.link_state.fault);
3353 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3354 if (!pmb) {
3355 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3356 "2897 The mboxq allocation failed\n");
3357 return;
3358 }
3359 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3360 if (!mp) {
3361 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3362 "2898 The lpfc_dmabuf allocation failed\n");
3363 goto out_free_pmb;
3364 }
3365 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3366 if (!mp->virt) {
3367 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3368 "2899 The mbuf allocation failed\n");
3369 goto out_free_dmabuf;
3370 }
3371
3372 /* Cleanup any outstanding ELS commands */
3373 lpfc_els_flush_all_cmd(phba);
3374
3375 /* Block ELS IOCBs until we have done process link event */
3376 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3377
3378 /* Update link event statistics */
3379 phba->sli.slistat.link_event++;
3380
3381 /* Create lpfc_handle_latt mailbox command from link ACQE */
3382 lpfc_read_topology(phba, pmb, mp);
3383 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3384 pmb->vport = phba->pport;
3385
3386 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3387 if (rc == MBX_NOT_FINISHED)
3388 goto out_free_dmabuf;
3389 return;
3390
3391out_free_dmabuf:
3392 kfree(mp);
3393out_free_pmb:
3394 mempool_free(pmb, phba->mbox_mem_pool);
3395}
3396
3397/**
3398 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
3399 * @phba: pointer to lpfc hba data structure.
3400 * @acqe_fc: pointer to the async SLI completion queue entry.
3401 *
3402 * This routine is to handle the SLI4 asynchronous SLI events.
3403 **/
3404static void
3405lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
3406{
3407 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3408 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
3409 "x%08x SLI Event Type:%d",
3410 acqe_sli->event_data1, acqe_sli->event_data2,
3411 bf_get(lpfc_trailer_type, acqe_sli));
3412 return;
3413}
3414
fc2b989b
JS
3415/**
3416 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3417 * @vport: pointer to vport data structure.
3418 *
3419 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3420 * response to a CVL event.
3421 *
3422 * Return the pointer to the ndlp with the vport if successful, otherwise
3423 * return NULL.
3424 **/
3425static struct lpfc_nodelist *
3426lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3427{
3428 struct lpfc_nodelist *ndlp;
3429 struct Scsi_Host *shost;
3430 struct lpfc_hba *phba;
3431
3432 if (!vport)
3433 return NULL;
fc2b989b
JS
3434 phba = vport->phba;
3435 if (!phba)
3436 return NULL;
78730cfe
JS
3437 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3438 if (!ndlp) {
3439 /* Cannot find existing Fabric ndlp, so allocate a new one */
3440 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3441 if (!ndlp)
3442 return 0;
3443 lpfc_nlp_init(vport, ndlp, Fabric_DID);
3444 /* Set the node type */
3445 ndlp->nlp_type |= NLP_FABRIC;
3446 /* Put ndlp onto node list */
3447 lpfc_enqueue_node(vport, ndlp);
3448 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
3449 /* re-setup ndlp without removing from node list */
3450 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3451 if (!ndlp)
3452 return 0;
3453 }
63e801ce
JS
3454 if ((phba->pport->port_state < LPFC_FLOGI) &&
3455 (phba->pport->port_state != LPFC_VPORT_FAILED))
fc2b989b
JS
3456 return NULL;
3457 /* If virtual link is not yet instantiated ignore CVL */
63e801ce
JS
3458 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
3459 && (vport->port_state != LPFC_VPORT_FAILED))
fc2b989b
JS
3460 return NULL;
3461 shost = lpfc_shost_from_vport(vport);
3462 if (!shost)
3463 return NULL;
3464 lpfc_linkdown_port(vport);
3465 lpfc_cleanup_pending_mbox(vport);
3466 spin_lock_irq(shost->host_lock);
3467 vport->fc_flag |= FC_VPORT_CVL_RCVD;
3468 spin_unlock_irq(shost->host_lock);
3469
3470 return ndlp;
3471}
3472
3473/**
3474 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3475 * @vport: pointer to lpfc hba data structure.
3476 *
3477 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3478 * response to a FCF dead event.
3479 **/
3480static void
3481lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3482{
3483 struct lpfc_vport **vports;
3484 int i;
3485
3486 vports = lpfc_create_vport_work_array(phba);
3487 if (vports)
3488 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3489 lpfc_sli4_perform_vport_cvl(vports[i]);
3490 lpfc_destroy_vport_work_array(phba, vports);
3491}
3492
da0436e9 3493/**
76a95d75 3494 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
da0436e9
JS
3495 * @phba: pointer to lpfc hba data structure.
3496 * @acqe_link: pointer to the async fcoe completion queue entry.
3497 *
3498 * This routine is to handle the SLI4 asynchronous fcoe event.
3499 **/
3500static void
76a95d75 3501lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
70f3c073 3502 struct lpfc_acqe_fip *acqe_fip)
da0436e9 3503{
70f3c073 3504 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
da0436e9 3505 int rc;
6669f9bb
JS
3506 struct lpfc_vport *vport;
3507 struct lpfc_nodelist *ndlp;
3508 struct Scsi_Host *shost;
695a814e
JS
3509 int active_vlink_present;
3510 struct lpfc_vport **vports;
3511 int i;
da0436e9 3512
70f3c073
JS
3513 phba->fc_eventTag = acqe_fip->event_tag;
3514 phba->fcoe_eventtag = acqe_fip->event_tag;
da0436e9 3515 switch (event_type) {
70f3c073
JS
3516 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
3517 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
3518 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
999d813f
JS
3519 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3520 LOG_DISCOVERY,
a93ff37a
JS
3521 "2546 New FCF event, evt_tag:x%x, "
3522 "index:x%x\n",
70f3c073
JS
3523 acqe_fip->event_tag,
3524 acqe_fip->index);
999d813f
JS
3525 else
3526 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3527 LOG_DISCOVERY,
a93ff37a
JS
3528 "2788 FCF param modified event, "
3529 "evt_tag:x%x, index:x%x\n",
70f3c073
JS
3530 acqe_fip->event_tag,
3531 acqe_fip->index);
38b92ef8 3532 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
0c9ab6f5
JS
3533 /*
3534 * During period of FCF discovery, read the FCF
3535 * table record indexed by the event to update
a93ff37a 3536 * FCF roundrobin failover eligible FCF bmask.
0c9ab6f5
JS
3537 */
3538 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3539 LOG_DISCOVERY,
a93ff37a
JS
3540 "2779 Read FCF (x%x) for updating "
3541 "roundrobin FCF failover bmask\n",
70f3c073
JS
3542 acqe_fip->index);
3543 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
0c9ab6f5 3544 }
38b92ef8
JS
3545
3546 /* If the FCF discovery is in progress, do nothing. */
3804dc84 3547 spin_lock_irq(&phba->hbalock);
a93ff37a 3548 if (phba->hba_flag & FCF_TS_INPROG) {
38b92ef8
JS
3549 spin_unlock_irq(&phba->hbalock);
3550 break;
3551 }
3552 /* If fast FCF failover rescan event is pending, do nothing */
3553 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3554 spin_unlock_irq(&phba->hbalock);
3555 break;
3556 }
3557
3558 /* If the FCF has been in discovered state, do nothing. */
3804dc84
JS
3559 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3560 spin_unlock_irq(&phba->hbalock);
3561 break;
3562 }
3563 spin_unlock_irq(&phba->hbalock);
38b92ef8 3564
0c9ab6f5
JS
3565 /* Otherwise, scan the entire FCF table and re-discover SAN */
3566 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
a93ff37a
JS
3567 "2770 Start FCF table scan per async FCF "
3568 "event, evt_tag:x%x, index:x%x\n",
70f3c073 3569 acqe_fip->event_tag, acqe_fip->index);
0c9ab6f5
JS
3570 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3571 LPFC_FCOE_FCF_GET_FIRST);
da0436e9 3572 if (rc)
0c9ab6f5
JS
3573 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3574 "2547 Issue FCF scan read FCF mailbox "
a93ff37a 3575 "command failed (x%x)\n", rc);
da0436e9
JS
3576 break;
3577
70f3c073 3578 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
da0436e9 3579 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e4e74273 3580 "2548 FCF Table full count 0x%x tag 0x%x\n",
70f3c073
JS
3581 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
3582 acqe_fip->event_tag);
da0436e9
JS
3583 break;
3584
70f3c073 3585 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
0c9ab6f5 3586 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
a93ff37a 3587 "2549 FCF (x%x) disconnected from network, "
70f3c073 3588 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
38b92ef8
JS
3589 /*
3590 * If we are in the middle of FCF failover process, clear
3591 * the corresponding FCF bit in the roundrobin bitmap.
da0436e9 3592 */
fc2b989b 3593 spin_lock_irq(&phba->hbalock);
0c9ab6f5 3594 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
fc2b989b 3595 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 3596 /* Update FLOGI FCF failover eligible FCF bmask */
70f3c073 3597 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
fc2b989b
JS
3598 break;
3599 }
38b92ef8
JS
3600 spin_unlock_irq(&phba->hbalock);
3601
3602 /* If the event is not for currently used fcf do nothing */
70f3c073 3603 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
38b92ef8
JS
3604 break;
3605
3606 /*
3607 * Otherwise, request the port to rediscover the entire FCF
3608 * table for a fast recovery from case that the current FCF
3609 * is no longer valid as we are not in the middle of FCF
3610 * failover process already.
3611 */
3612 spin_lock_irq(&phba->hbalock);
fc2b989b 3613 /* Mark the fast failover process in progress */
0c9ab6f5 3614 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
fc2b989b 3615 spin_unlock_irq(&phba->hbalock);
38b92ef8 3616
0c9ab6f5
JS
3617 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3618 "2771 Start FCF fast failover process due to "
3619 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
70f3c073 3620 "\n", acqe_fip->event_tag, acqe_fip->index);
fc2b989b
JS
3621 rc = lpfc_sli4_redisc_fcf_table(phba);
3622 if (rc) {
0c9ab6f5
JS
3623 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3624 LOG_DISCOVERY,
3625 "2772 Issue FCF rediscover mabilbox "
3626 "command failed, fail through to FCF "
3627 "dead event\n");
fc2b989b 3628 spin_lock_irq(&phba->hbalock);
0c9ab6f5 3629 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
fc2b989b
JS
3630 spin_unlock_irq(&phba->hbalock);
3631 /*
3632 * Last resort will fail over by treating this
3633 * as a link down to FCF registration.
3634 */
3635 lpfc_sli4_fcf_dead_failthrough(phba);
38b92ef8
JS
3636 } else {
3637 /* Reset FCF roundrobin bmask for new discovery */
7d791df7 3638 lpfc_sli4_clear_fcf_rr_bmask(phba);
38b92ef8
JS
3639 /*
3640 * Handling fast FCF failover to a DEAD FCF event is
3641 * considered equalivant to receiving CVL to all vports.
fc2b989b
JS
3642 */
3643 lpfc_sli4_perform_all_vport_cvl(phba);
38b92ef8 3644 }
da0436e9 3645 break;
70f3c073 3646 case LPFC_FIP_EVENT_TYPE_CVL:
0c9ab6f5 3647 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
6669f9bb 3648 "2718 Clear Virtual Link Received for VPI 0x%x"
70f3c073 3649 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6d368e53 3650
6669f9bb 3651 vport = lpfc_find_vport_by_vpid(phba,
5248a749 3652 acqe_fip->index);
fc2b989b 3653 ndlp = lpfc_sli4_perform_vport_cvl(vport);
6669f9bb
JS
3654 if (!ndlp)
3655 break;
695a814e
JS
3656 active_vlink_present = 0;
3657
3658 vports = lpfc_create_vport_work_array(phba);
3659 if (vports) {
3660 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3661 i++) {
3662 if ((!(vports[i]->fc_flag &
3663 FC_VPORT_CVL_RCVD)) &&
3664 (vports[i]->port_state > LPFC_FDISC)) {
3665 active_vlink_present = 1;
3666 break;
3667 }
3668 }
3669 lpfc_destroy_vport_work_array(phba, vports);
3670 }
3671
3672 if (active_vlink_present) {
3673 /*
3674 * If there are other active VLinks present,
3675 * re-instantiate the Vlink using FDISC.
3676 */
6669f9bb 3677 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
fc2b989b 3678 shost = lpfc_shost_from_vport(vport);
6669f9bb
JS
3679 spin_lock_irq(shost->host_lock);
3680 ndlp->nlp_flag |= NLP_DELAY_TMO;
3681 spin_unlock_irq(shost->host_lock);
695a814e
JS
3682 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3683 vport->port_state = LPFC_FDISC;
3684 } else {
ecfd03c6
JS
3685 /*
3686 * Otherwise, we request port to rediscover
3687 * the entire FCF table for a fast recovery
3688 * from possible case that the current FCF
0c9ab6f5
JS
3689 * is no longer valid if we are not already
3690 * in the FCF failover process.
ecfd03c6 3691 */
fc2b989b 3692 spin_lock_irq(&phba->hbalock);
0c9ab6f5 3693 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
fc2b989b
JS
3694 spin_unlock_irq(&phba->hbalock);
3695 break;
3696 }
3697 /* Mark the fast failover process in progress */
0c9ab6f5 3698 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
fc2b989b 3699 spin_unlock_irq(&phba->hbalock);
0c9ab6f5
JS
3700 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3701 LOG_DISCOVERY,
a93ff37a 3702 "2773 Start FCF failover per CVL, "
70f3c073 3703 "evt_tag:x%x\n", acqe_fip->event_tag);
ecfd03c6 3704 rc = lpfc_sli4_redisc_fcf_table(phba);
fc2b989b 3705 if (rc) {
0c9ab6f5
JS
3706 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3707 LOG_DISCOVERY,
3708 "2774 Issue FCF rediscover "
3709 "mabilbox command failed, "
3710 "through to CVL event\n");
fc2b989b 3711 spin_lock_irq(&phba->hbalock);
0c9ab6f5 3712 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
fc2b989b 3713 spin_unlock_irq(&phba->hbalock);
ecfd03c6
JS
3714 /*
3715 * Last resort will be re-try on the
3716 * the current registered FCF entry.
3717 */
3718 lpfc_retry_pport_discovery(phba);
38b92ef8
JS
3719 } else
3720 /*
3721 * Reset FCF roundrobin bmask for new
3722 * discovery.
3723 */
7d791df7 3724 lpfc_sli4_clear_fcf_rr_bmask(phba);
6669f9bb
JS
3725 }
3726 break;
da0436e9
JS
3727 default:
3728 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3729 "0288 Unknown FCoE event type 0x%x event tag "
70f3c073 3730 "0x%x\n", event_type, acqe_fip->event_tag);
da0436e9
JS
3731 break;
3732 }
3733}
3734
3735/**
3736 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
3737 * @phba: pointer to lpfc hba data structure.
3738 * @acqe_link: pointer to the async dcbx completion queue entry.
3739 *
3740 * This routine is to handle the SLI4 asynchronous dcbx event.
3741 **/
3742static void
3743lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3744 struct lpfc_acqe_dcbx *acqe_dcbx)
3745{
4d9ab994 3746 phba->fc_eventTag = acqe_dcbx->event_tag;
da0436e9
JS
3747 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3748 "0290 The SLI4 DCBX asynchronous event is not "
3749 "handled yet\n");
3750}
3751
b19a061a
JS
3752/**
3753 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
3754 * @phba: pointer to lpfc hba data structure.
3755 * @acqe_link: pointer to the async grp5 completion queue entry.
3756 *
3757 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
3758 * is an asynchronous notified of a logical link speed change. The Port
3759 * reports the logical link speed in units of 10Mbps.
3760 **/
3761static void
3762lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
3763 struct lpfc_acqe_grp5 *acqe_grp5)
3764{
3765 uint16_t prev_ll_spd;
3766
3767 phba->fc_eventTag = acqe_grp5->event_tag;
3768 phba->fcoe_eventtag = acqe_grp5->event_tag;
3769 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
3770 phba->sli4_hba.link_state.logical_speed =
3771 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
3772 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3773 "2789 GRP5 Async Event: Updating logical link speed "
3774 "from %dMbps to %dMbps\n", (prev_ll_spd * 10),
3775 (phba->sli4_hba.link_state.logical_speed*10));
3776}
3777
da0436e9
JS
3778/**
3779 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3780 * @phba: pointer to lpfc hba data structure.
3781 *
3782 * This routine is invoked by the worker thread to process all the pending
3783 * SLI4 asynchronous events.
3784 **/
3785void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3786{
3787 struct lpfc_cq_event *cq_event;
3788
3789 /* First, declare the async event has been handled */
3790 spin_lock_irq(&phba->hbalock);
3791 phba->hba_flag &= ~ASYNC_EVENT;
3792 spin_unlock_irq(&phba->hbalock);
3793 /* Now, handle all the async events */
3794 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3795 /* Get the first event from the head of the event queue */
3796 spin_lock_irq(&phba->hbalock);
3797 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3798 cq_event, struct lpfc_cq_event, list);
3799 spin_unlock_irq(&phba->hbalock);
3800 /* Process the asynchronous event */
3801 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3802 case LPFC_TRAILER_CODE_LINK:
3803 lpfc_sli4_async_link_evt(phba,
3804 &cq_event->cqe.acqe_link);
3805 break;
3806 case LPFC_TRAILER_CODE_FCOE:
70f3c073 3807 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
da0436e9
JS
3808 break;
3809 case LPFC_TRAILER_CODE_DCBX:
3810 lpfc_sli4_async_dcbx_evt(phba,
3811 &cq_event->cqe.acqe_dcbx);
3812 break;
b19a061a
JS
3813 case LPFC_TRAILER_CODE_GRP5:
3814 lpfc_sli4_async_grp5_evt(phba,
3815 &cq_event->cqe.acqe_grp5);
3816 break;
70f3c073
JS
3817 case LPFC_TRAILER_CODE_FC:
3818 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
3819 break;
3820 case LPFC_TRAILER_CODE_SLI:
3821 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
3822 break;
da0436e9
JS
3823 default:
3824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3825 "1804 Invalid asynchrous event code: "
3826 "x%x\n", bf_get(lpfc_trailer_code,
3827 &cq_event->cqe.mcqe_cmpl));
3828 break;
3829 }
3830 /* Free the completion event processed to the free pool */
3831 lpfc_sli4_cq_event_release(phba, cq_event);
3832 }
3833}
3834
ecfd03c6
JS
3835/**
3836 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3837 * @phba: pointer to lpfc hba data structure.
3838 *
3839 * This routine is invoked by the worker thread to process FCF table
3840 * rediscovery pending completion event.
3841 **/
3842void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3843{
3844 int rc;
3845
3846 spin_lock_irq(&phba->hbalock);
3847 /* Clear FCF rediscovery timeout event */
3848 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3849 /* Clear driver fast failover FCF record flag */
3850 phba->fcf.failover_rec.flag = 0;
3851 /* Set state for FCF fast failover */
3852 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3853 spin_unlock_irq(&phba->hbalock);
3854
3855 /* Scan FCF table from the first entry to re-discover SAN */
0c9ab6f5 3856 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
a93ff37a 3857 "2777 Start post-quiescent FCF table scan\n");
0c9ab6f5 3858 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
ecfd03c6 3859 if (rc)
0c9ab6f5
JS
3860 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3861 "2747 Issue FCF scan read FCF mailbox "
3862 "command failed 0x%x\n", rc);
ecfd03c6
JS
3863}
3864
da0436e9
JS
3865/**
3866 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3867 * @phba: pointer to lpfc hba data structure.
3868 * @dev_grp: The HBA PCI-Device group number.
3869 *
3870 * This routine is invoked to set up the per HBA PCI-Device group function
3871 * API jump table entries.
3872 *
3873 * Return: 0 if success, otherwise -ENODEV
3874 **/
3875int
3876lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3877{
3878 int rc;
3879
3880 /* Set up lpfc PCI-device group */
3881 phba->pci_dev_grp = dev_grp;
3882
3883 /* The LPFC_PCI_DEV_OC uses SLI4 */
3884 if (dev_grp == LPFC_PCI_DEV_OC)
3885 phba->sli_rev = LPFC_SLI_REV4;
3886
3887 /* Set up device INIT API function jump table */
3888 rc = lpfc_init_api_table_setup(phba, dev_grp);
3889 if (rc)
3890 return -ENODEV;
3891 /* Set up SCSI API function jump table */
3892 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3893 if (rc)
3894 return -ENODEV;
3895 /* Set up SLI API function jump table */
3896 rc = lpfc_sli_api_table_setup(phba, dev_grp);
3897 if (rc)
3898 return -ENODEV;
3899 /* Set up MBOX API function jump table */
3900 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3901 if (rc)
3902 return -ENODEV;
3903
3904 return 0;
5b75da2f
JS
3905}
3906
3907/**
3621a710 3908 * lpfc_log_intr_mode - Log the active interrupt mode
5b75da2f
JS
3909 * @phba: pointer to lpfc hba data structure.
3910 * @intr_mode: active interrupt mode adopted.
3911 *
3912 * This routine it invoked to log the currently used active interrupt mode
3913 * to the device.
3772a991
JS
3914 **/
3915static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
5b75da2f
JS
3916{
3917 switch (intr_mode) {
3918 case 0:
3919 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3920 "0470 Enable INTx interrupt mode.\n");
3921 break;
3922 case 1:
3923 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3924 "0481 Enabled MSI interrupt mode.\n");
3925 break;
3926 case 2:
3927 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3928 "0480 Enabled MSI-X interrupt mode.\n");
3929 break;
3930 default:
3931 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3932 "0482 Illegal interrupt mode.\n");
3933 break;
3934 }
3935 return;
3936}
3937
5b75da2f 3938/**
3772a991 3939 * lpfc_enable_pci_dev - Enable a generic PCI device.
5b75da2f
JS
3940 * @phba: pointer to lpfc hba data structure.
3941 *
3772a991
JS
3942 * This routine is invoked to enable the PCI device that is common to all
3943 * PCI devices.
5b75da2f
JS
3944 *
3945 * Return codes
af901ca1 3946 * 0 - successful
3772a991 3947 * other values - error
5b75da2f 3948 **/
3772a991
JS
3949static int
3950lpfc_enable_pci_dev(struct lpfc_hba *phba)
5b75da2f 3951{
3772a991 3952 struct pci_dev *pdev;
079b5c91 3953 int bars = 0;
5b75da2f 3954
3772a991
JS
3955 /* Obtain PCI device reference */
3956 if (!phba->pcidev)
3957 goto out_error;
3958 else
3959 pdev = phba->pcidev;
3960 /* Select PCI BARs */
3961 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3962 /* Enable PCI device */
3963 if (pci_enable_device_mem(pdev))
3964 goto out_error;
3965 /* Request PCI resource for the device */
3966 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3967 goto out_disable_device;
3968 /* Set up device as PCI master and save state for EEH */
3969 pci_set_master(pdev);
3970 pci_try_set_mwi(pdev);
3971 pci_save_state(pdev);
5b75da2f 3972
0558056c
JS
3973 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
3974 if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
3975 pdev->needs_freset = 1;
3976
3772a991 3977 return 0;
5b75da2f 3978
3772a991
JS
3979out_disable_device:
3980 pci_disable_device(pdev);
3981out_error:
079b5c91
JS
3982 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3983 "1401 Failed to enable pci device, bars:x%x\n", bars);
3772a991 3984 return -ENODEV;
5b75da2f
JS
3985}
3986
3987/**
3772a991 3988 * lpfc_disable_pci_dev - Disable a generic PCI device.
5b75da2f
JS
3989 * @phba: pointer to lpfc hba data structure.
3990 *
3772a991
JS
3991 * This routine is invoked to disable the PCI device that is common to all
3992 * PCI devices.
5b75da2f
JS
3993 **/
3994static void
3772a991 3995lpfc_disable_pci_dev(struct lpfc_hba *phba)
5b75da2f 3996{
3772a991
JS
3997 struct pci_dev *pdev;
3998 int bars;
5b75da2f 3999
3772a991
JS
4000 /* Obtain PCI device reference */
4001 if (!phba->pcidev)
4002 return;
4003 else
4004 pdev = phba->pcidev;
4005 /* Select PCI BARs */
4006 bars = pci_select_bars(pdev, IORESOURCE_MEM);
4007 /* Release PCI resource and disable PCI device */
4008 pci_release_selected_regions(pdev, bars);
4009 pci_disable_device(pdev);
4010 /* Null out PCI private reference to driver */
4011 pci_set_drvdata(pdev, NULL);
5b75da2f
JS
4012
4013 return;
4014}
4015
e59058c4 4016/**
3772a991
JS
4017 * lpfc_reset_hba - Reset a hba
4018 * @phba: pointer to lpfc hba data structure.
e59058c4 4019 *
3772a991
JS
4020 * This routine is invoked to reset a hba device. It brings the HBA
4021 * offline, performs a board restart, and then brings the board back
4022 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
4023 * on outstanding mailbox commands.
e59058c4 4024 **/
3772a991
JS
4025void
4026lpfc_reset_hba(struct lpfc_hba *phba)
dea3101e 4027{
3772a991
JS
4028 /* If resets are disabled then set error state and return. */
4029 if (!phba->cfg_enable_hba_reset) {
4030 phba->link_state = LPFC_HBA_ERROR;
4031 return;
4032 }
4033 lpfc_offline_prep(phba);
4034 lpfc_offline(phba);
4035 lpfc_sli_brdrestart(phba);
4036 lpfc_online(phba);
4037 lpfc_unblock_mgmt_io(phba);
4038}
dea3101e 4039
0a96e975
JS
4040/**
4041 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
4042 * @phba: pointer to lpfc hba data structure.
4043 *
4044 * This function enables the PCI SR-IOV virtual functions to a physical
4045 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4046 * enable the number of virtual functions to the physical function. As
4047 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4048 * API call does not considered as an error condition for most of the device.
4049 **/
4050uint16_t
4051lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
4052{
4053 struct pci_dev *pdev = phba->pcidev;
4054 uint16_t nr_virtfn;
4055 int pos;
4056
0a96e975
JS
4057 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4058 if (pos == 0)
4059 return 0;
4060
4061 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
4062 return nr_virtfn;
4063}
4064
912e3acd
JS
4065/**
4066 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4067 * @phba: pointer to lpfc hba data structure.
4068 * @nr_vfn: number of virtual functions to be enabled.
4069 *
4070 * This function enables the PCI SR-IOV virtual functions to a physical
4071 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4072 * enable the number of virtual functions to the physical function. As
4073 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4074 * API call does not considered as an error condition for most of the device.
4075 **/
4076int
4077lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4078{
4079 struct pci_dev *pdev = phba->pcidev;
0a96e975 4080 uint16_t max_nr_vfn;
912e3acd
JS
4081 int rc;
4082
0a96e975
JS
4083 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
4084 if (nr_vfn > max_nr_vfn) {
4085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4086 "3057 Requested vfs (%d) greater than "
4087 "supported vfs (%d)", nr_vfn, max_nr_vfn);
4088 return -EINVAL;
4089 }
4090
912e3acd
JS
4091 rc = pci_enable_sriov(pdev, nr_vfn);
4092 if (rc) {
4093 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4094 "2806 Failed to enable sriov on this device "
4095 "with vfn number nr_vf:%d, rc:%d\n",
4096 nr_vfn, rc);
4097 } else
4098 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4099 "2807 Successful enable sriov on this device "
4100 "with vfn number nr_vf:%d\n", nr_vfn);
4101 return rc;
4102}
4103
3772a991
JS
4104/**
4105 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4106 * @phba: pointer to lpfc hba data structure.
4107 *
4108 * This routine is invoked to set up the driver internal resources specific to
4109 * support the SLI-3 HBA device it attached to.
4110 *
4111 * Return codes
af901ca1 4112 * 0 - successful
3772a991
JS
4113 * other values - error
4114 **/
4115static int
4116lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4117{
4118 struct lpfc_sli *psli;
912e3acd 4119 int rc;
dea3101e 4120
2e0fef85 4121 /*
3772a991 4122 * Initialize timers used by driver
2e0fef85 4123 */
dea3101e 4124
3772a991 4125 /* Heartbeat timer */
858c9f6c
JS
4126 init_timer(&phba->hb_tmofunc);
4127 phba->hb_tmofunc.function = lpfc_hb_timeout;
4128 phba->hb_tmofunc.data = (unsigned long)phba;
4129
dea3101e 4130 psli = &phba->sli;
3772a991 4131 /* MBOX heartbeat timer */
dea3101e 4132 init_timer(&psli->mbox_tmo);
4133 psli->mbox_tmo.function = lpfc_mbox_timeout;
2e0fef85 4134 psli->mbox_tmo.data = (unsigned long) phba;
3772a991 4135 /* FCP polling mode timer */
875fbdfe
JSEC
4136 init_timer(&phba->fcp_poll_timer);
4137 phba->fcp_poll_timer.function = lpfc_poll_timeout;
2e0fef85 4138 phba->fcp_poll_timer.data = (unsigned long) phba;
3772a991 4139 /* Fabric block timer */
92d7f7b0
JS
4140 init_timer(&phba->fabric_block_timer);
4141 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4142 phba->fabric_block_timer.data = (unsigned long) phba;
3772a991 4143 /* EA polling mode timer */
9399627f
JS
4144 init_timer(&phba->eratt_poll);
4145 phba->eratt_poll.function = lpfc_poll_eratt;
4146 phba->eratt_poll.data = (unsigned long) phba;
dea3101e 4147
3772a991
JS
4148 /* Host attention work mask setup */
4149 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
4150 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
dea3101e 4151
3772a991
JS
4152 /* Get all the module params for configuring this host */
4153 lpfc_get_cfgparam(phba);
49198b37
JS
4154 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
4155 phba->menlo_flag |= HBA_MENLO_SUPPORT;
4156 /* check for menlo minimum sg count */
4157 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
4158 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
4159 }
4160
dea3101e 4161 /*
3772a991
JS
4162 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4163 * used to create the sg_dma_buf_pool must be dynamically calculated.
4164 * 2 segments are added since the IOCB needs a command and response bde.
dea3101e 4165 */
3772a991
JS
4166 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4167 sizeof(struct fcp_rsp) +
4168 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4169
4170 if (phba->cfg_enable_bg) {
4171 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4172 phba->cfg_sg_dma_buf_size +=
4173 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
901a920f 4174 }
dea3101e 4175
3772a991
JS
4176 /* Also reinitialize the host templates with new values. */
4177 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4178 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
dea3101e 4179
3772a991
JS
4180 phba->max_vpi = LPFC_MAX_VPI;
4181 /* This will be set to correct value after config_port mbox */
4182 phba->max_vports = 0;
dea3101e 4183
3772a991
JS
4184 /*
4185 * Initialize the SLI Layer to run with lpfc HBAs.
4186 */
4187 lpfc_sli_setup(phba);
4188 lpfc_sli_queue_setup(phba);
ed957684 4189
3772a991
JS
4190 /* Allocate device driver memory */
4191 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4192 return -ENOMEM;
51ef4c26 4193
912e3acd
JS
4194 /*
4195 * Enable sr-iov virtual functions if supported and configured
4196 * through the module parameter.
4197 */
4198 if (phba->cfg_sriov_nr_virtfn > 0) {
4199 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4200 phba->cfg_sriov_nr_virtfn);
4201 if (rc) {
4202 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4203 "2808 Requested number of SR-IOV "
4204 "virtual functions (%d) is not "
4205 "supported\n",
4206 phba->cfg_sriov_nr_virtfn);
4207 phba->cfg_sriov_nr_virtfn = 0;
4208 }
4209 }
4210
3772a991
JS
4211 return 0;
4212}
ed957684 4213
3772a991
JS
4214/**
4215 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
4216 * @phba: pointer to lpfc hba data structure.
4217 *
4218 * This routine is invoked to unset the driver internal resources set up
4219 * specific for supporting the SLI-3 HBA device it attached to.
4220 **/
4221static void
4222lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
4223{
4224 /* Free device driver memory allocated */
4225 lpfc_mem_free_all(phba);
3163f725 4226
3772a991
JS
4227 return;
4228}
dea3101e 4229
3772a991 4230/**
da0436e9 4231 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3772a991
JS
4232 * @phba: pointer to lpfc hba data structure.
4233 *
da0436e9
JS
4234 * This routine is invoked to set up the driver internal resources specific to
4235 * support the SLI-4 HBA device it attached to.
3772a991
JS
4236 *
4237 * Return codes
af901ca1 4238 * 0 - successful
da0436e9 4239 * other values - error
3772a991
JS
4240 **/
4241static int
da0436e9 4242lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3772a991 4243{
da0436e9 4244 struct lpfc_sli *psli;
28baac74
JS
4245 LPFC_MBOXQ_t *mboxq;
4246 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
4247 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4248 struct lpfc_mqe *mqe;
085c647c 4249 int longs, sli_family;
da0436e9
JS
4250
4251 /* Before proceed, wait for POST done and device ready */
4252 rc = lpfc_sli4_post_status_check(phba);
4253 if (rc)
4254 return -ENODEV;
4255
3772a991 4256 /*
da0436e9 4257 * Initialize timers used by driver
3772a991 4258 */
3772a991 4259
da0436e9
JS
4260 /* Heartbeat timer */
4261 init_timer(&phba->hb_tmofunc);
4262 phba->hb_tmofunc.function = lpfc_hb_timeout;
4263 phba->hb_tmofunc.data = (unsigned long)phba;
19ca7609
JS
4264 init_timer(&phba->rrq_tmr);
4265 phba->rrq_tmr.function = lpfc_rrq_timeout;
4266 phba->rrq_tmr.data = (unsigned long)phba;
3772a991 4267
da0436e9
JS
4268 psli = &phba->sli;
4269 /* MBOX heartbeat timer */
4270 init_timer(&psli->mbox_tmo);
4271 psli->mbox_tmo.function = lpfc_mbox_timeout;
4272 psli->mbox_tmo.data = (unsigned long) phba;
4273 /* Fabric block timer */
4274 init_timer(&phba->fabric_block_timer);
4275 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4276 phba->fabric_block_timer.data = (unsigned long) phba;
4277 /* EA polling mode timer */
4278 init_timer(&phba->eratt_poll);
4279 phba->eratt_poll.function = lpfc_poll_eratt;
4280 phba->eratt_poll.data = (unsigned long) phba;
ecfd03c6
JS
4281 /* FCF rediscover timer */
4282 init_timer(&phba->fcf.redisc_wait);
4283 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
4284 phba->fcf.redisc_wait.data = (unsigned long)phba;
4285
7ad20aa9
JS
4286 /*
4287 * Control structure for handling external multi-buffer mailbox
4288 * command pass-through.
4289 */
4290 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
4291 sizeof(struct lpfc_mbox_ext_buf_ctx));
4292 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4293
da0436e9
JS
4294 /*
4295 * We need to do a READ_CONFIG mailbox command here before
4296 * calling lpfc_get_cfgparam. For VFs this will report the
4297 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
4298 * All of the resources allocated
4299 * for this Port are tied to these values.
4300 */
4301 /* Get all the module params for configuring this host */
4302 lpfc_get_cfgparam(phba);
4303 phba->max_vpi = LPFC_MAX_VPI;
4304 /* This will be set to correct value after the read_config mbox */
4305 phba->max_vports = 0;
3772a991 4306
da0436e9
JS
4307 /* Program the default value of vlan_id and fc_map */
4308 phba->valid_vlan = 0;
4309 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4310 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4311 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3772a991 4312
da0436e9
JS
4313 /*
4314 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4315 * used to create the sg_dma_buf_pool must be dynamically calculated.
4316 * 2 segments are added since the IOCB needs a command and response bde.
4317 * To insure that the scsi sgl does not cross a 4k page boundary only
28baac74 4318 * sgl sizes of must be a power of 2.
da0436e9 4319 */
28baac74
JS
4320 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
4321 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
085c647c
JS
4322
4323 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
4324 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
4325 switch (sli_family) {
4326 case LPFC_SLI_INTF_FAMILY_BE2:
4327 case LPFC_SLI_INTF_FAMILY_BE3:
4328 /* There is a single hint for BE - 2 pages per BPL. */
4329 if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
4330 LPFC_SLI_INTF_SLI_HINT1_1)
4331 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
4332 break;
4333 case LPFC_SLI_INTF_FAMILY_LNCR_A0:
4334 case LPFC_SLI_INTF_FAMILY_LNCR_B0:
4335 default:
4336 break;
4337 }
28baac74
JS
4338 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
4339 dma_buf_size < max_buf_size && buf_size > dma_buf_size;
4340 dma_buf_size = dma_buf_size << 1)
4341 ;
4342 if (dma_buf_size == max_buf_size)
4343 phba->cfg_sg_seg_cnt = (dma_buf_size -
4344 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
4345 (2 * sizeof(struct sli4_sge))) /
4346 sizeof(struct sli4_sge);
4347 phba->cfg_sg_dma_buf_size = dma_buf_size;
3772a991 4348
da0436e9
JS
4349 /* Initialize buffer queue management fields */
4350 hbq_count = lpfc_sli_hbq_count();
4351 for (i = 0; i < hbq_count; ++i)
4352 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4353 INIT_LIST_HEAD(&phba->rb_pend_list);
4354 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
4355 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3772a991 4356
da0436e9
JS
4357 /*
4358 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
4359 */
4360 /* Initialize the Abort scsi buffer list used by driver */
4361 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
4362 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
4363 /* This abort list used by worker thread */
4364 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
3772a991 4365
da0436e9 4366 /*
6d368e53 4367 * Initialize driver internal slow-path work queues
da0436e9 4368 */
3772a991 4369
da0436e9
JS
4370 /* Driver internel slow-path CQ Event pool */
4371 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
4372 /* Response IOCB work queue list */
45ed1190 4373 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
da0436e9
JS
4374 /* Asynchronous event CQ Event work queue list */
4375 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
4376 /* Fast-path XRI aborted CQ Event work queue list */
4377 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
4378 /* Slow-path XRI aborted CQ Event work queue list */
4379 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
4380 /* Receive queue CQ Event work queue list */
4381 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4382
6d368e53
JS
4383 /* Initialize extent block lists. */
4384 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
4385 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
4386 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
4387 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
4388
da0436e9
JS
4389 /* Initialize the driver internal SLI layer lists. */
4390 lpfc_sli_setup(phba);
4391 lpfc_sli_queue_setup(phba);
3772a991 4392
da0436e9
JS
4393 /* Allocate device driver memory */
4394 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
4395 if (rc)
4396 return -ENOMEM;
4397
2fcee4bf
JS
4398 /* IF Type 2 ports get initialized now. */
4399 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4400 LPFC_SLI_INTF_IF_TYPE_2) {
4401 rc = lpfc_pci_function_reset(phba);
4402 if (unlikely(rc))
4403 return -ENODEV;
4404 }
4405
da0436e9
JS
4406 /* Create the bootstrap mailbox command */
4407 rc = lpfc_create_bootstrap_mbox(phba);
4408 if (unlikely(rc))
4409 goto out_free_mem;
4410
4411 /* Set up the host's endian order with the device. */
4412 rc = lpfc_setup_endian_order(phba);
4413 if (unlikely(rc))
4414 goto out_free_bsmbx;
4415
4416 /* Set up the hba's configuration parameters. */
4417 rc = lpfc_sli4_read_config(phba);
4418 if (unlikely(rc))
4419 goto out_free_bsmbx;
4420
2fcee4bf
JS
4421 /* IF Type 0 ports get initialized now. */
4422 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4423 LPFC_SLI_INTF_IF_TYPE_0) {
4424 rc = lpfc_pci_function_reset(phba);
4425 if (unlikely(rc))
4426 goto out_free_bsmbx;
4427 }
da0436e9 4428
cb5172ea
JS
4429 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4430 GFP_KERNEL);
4431 if (!mboxq) {
4432 rc = -ENOMEM;
4433 goto out_free_bsmbx;
4434 }
4435
fedd3b7b 4436 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
cb5172ea
JS
4437 lpfc_supported_pages(mboxq);
4438 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
fedd3b7b
JS
4439 if (!rc) {
4440 mqe = &mboxq->u.mqe;
4441 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4442 LPFC_MAX_SUPPORTED_PAGES);
4443 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4444 switch (pn_page[i]) {
4445 case LPFC_SLI4_PARAMETERS:
4446 phba->sli4_hba.pc_sli4_params.supported = 1;
4447 break;
4448 default:
4449 break;
4450 }
4451 }
4452 /* Read the port's SLI4 Parameters capabilities if supported. */
4453 if (phba->sli4_hba.pc_sli4_params.supported)
4454 rc = lpfc_pc_sli4_params_get(phba, mboxq);
4455 if (rc) {
4456 mempool_free(mboxq, phba->mbox_mem_pool);
4457 rc = -EIO;
4458 goto out_free_bsmbx;
cb5172ea
JS
4459 }
4460 }
fedd3b7b
JS
4461 /*
4462 * Get sli4 parameters that override parameters from Port capabilities.
6d368e53
JS
4463 * If this call fails, it isn't critical unless the SLI4 parameters come
4464 * back in conflict.
fedd3b7b 4465 */
6d368e53
JS
4466 rc = lpfc_get_sli4_parameters(phba, mboxq);
4467 if (rc) {
4468 if (phba->sli4_hba.extents_in_use &&
4469 phba->sli4_hba.rpi_hdrs_in_use) {
4470 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4471 "2999 Unsupported SLI4 Parameters "
4472 "Extents and RPI headers enabled.\n");
4473 goto out_free_bsmbx;
4474 }
4475 }
cb5172ea 4476 mempool_free(mboxq, phba->mbox_mem_pool);
5350d872
JS
4477 /* Verify all the SLI4 queues */
4478 rc = lpfc_sli4_queue_verify(phba);
da0436e9
JS
4479 if (rc)
4480 goto out_free_bsmbx;
4481
4482 /* Create driver internal CQE event pool */
4483 rc = lpfc_sli4_cq_event_pool_create(phba);
4484 if (rc)
5350d872 4485 goto out_free_bsmbx;
da0436e9
JS
4486
4487 /* Initialize and populate the iocb list per host */
4488 rc = lpfc_init_sgl_list(phba);
4489 if (rc) {
4490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4491 "1400 Failed to initialize sgl list.\n");
4492 goto out_destroy_cq_event_pool;
4493 }
4494 rc = lpfc_init_active_sgl_array(phba);
4495 if (rc) {
4496 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4497 "1430 Failed to initialize sgl list.\n");
4498 goto out_free_sgl_list;
4499 }
da0436e9
JS
4500 rc = lpfc_sli4_init_rpi_hdrs(phba);
4501 if (rc) {
4502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4503 "1432 Failed to initialize rpi headers.\n");
4504 goto out_free_active_sgl;
4505 }
4506
a93ff37a 4507 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
0c9ab6f5
JS
4508 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4509 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4510 GFP_KERNEL);
4511 if (!phba->fcf.fcf_rr_bmask) {
4512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4513 "2759 Failed allocate memory for FCF round "
4514 "robin failover bmask\n");
0558056c 4515 rc = -ENOMEM;
0c9ab6f5
JS
4516 goto out_remove_rpi_hdrs;
4517 }
4518
5350d872
JS
4519 /*
4520 * The cfg_fcp_eq_count can be zero whenever there is exactly one
4521 * interrupt vector. This is not an error
4522 */
4523 if (phba->cfg_fcp_eq_count) {
4524 phba->sli4_hba.fcp_eq_hdl =
4525 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
da0436e9 4526 phba->cfg_fcp_eq_count), GFP_KERNEL);
5350d872
JS
4527 if (!phba->sli4_hba.fcp_eq_hdl) {
4528 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4529 "2572 Failed allocate memory for "
4530 "fast-path per-EQ handle array\n");
4531 rc = -ENOMEM;
4532 goto out_free_fcf_rr_bmask;
4533 }
da0436e9
JS
4534 }
4535
4536 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4537 phba->sli4_hba.cfg_eqn), GFP_KERNEL);
4538 if (!phba->sli4_hba.msix_entries) {
4539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4540 "2573 Failed allocate memory for msi-x "
4541 "interrupt vector entries\n");
0558056c 4542 rc = -ENOMEM;
da0436e9
JS
4543 goto out_free_fcp_eq_hdl;
4544 }
4545
912e3acd
JS
4546 /*
4547 * Enable sr-iov virtual functions if supported and configured
4548 * through the module parameter.
4549 */
4550 if (phba->cfg_sriov_nr_virtfn > 0) {
4551 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4552 phba->cfg_sriov_nr_virtfn);
4553 if (rc) {
4554 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4555 "3020 Requested number of SR-IOV "
4556 "virtual functions (%d) is not "
4557 "supported\n",
4558 phba->cfg_sriov_nr_virtfn);
4559 phba->cfg_sriov_nr_virtfn = 0;
4560 }
4561 }
4562
5248a749 4563 return 0;
da0436e9
JS
4564
4565out_free_fcp_eq_hdl:
4566 kfree(phba->sli4_hba.fcp_eq_hdl);
0c9ab6f5
JS
4567out_free_fcf_rr_bmask:
4568 kfree(phba->fcf.fcf_rr_bmask);
da0436e9
JS
4569out_remove_rpi_hdrs:
4570 lpfc_sli4_remove_rpi_hdrs(phba);
4571out_free_active_sgl:
4572 lpfc_free_active_sgl(phba);
4573out_free_sgl_list:
4574 lpfc_free_sgl_list(phba);
4575out_destroy_cq_event_pool:
4576 lpfc_sli4_cq_event_pool_destroy(phba);
da0436e9
JS
4577out_free_bsmbx:
4578 lpfc_destroy_bootstrap_mbox(phba);
4579out_free_mem:
4580 lpfc_mem_free(phba);
4581 return rc;
4582}
4583
4584/**
4585 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
4586 * @phba: pointer to lpfc hba data structure.
4587 *
4588 * This routine is invoked to unset the driver internal resources set up
4589 * specific for supporting the SLI-4 HBA device it attached to.
4590 **/
4591static void
4592lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4593{
4594 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4595
da0436e9
JS
4596 /* Free memory allocated for msi-x interrupt vector entries */
4597 kfree(phba->sli4_hba.msix_entries);
4598
4599 /* Free memory allocated for fast-path work queue handles */
4600 kfree(phba->sli4_hba.fcp_eq_hdl);
4601
4602 /* Free the allocated rpi headers. */
4603 lpfc_sli4_remove_rpi_hdrs(phba);
d11e31dd 4604 lpfc_sli4_remove_rpis(phba);
da0436e9 4605
0c9ab6f5
JS
4606 /* Free eligible FCF index bmask */
4607 kfree(phba->fcf.fcf_rr_bmask);
4608
da0436e9
JS
4609 /* Free the ELS sgl list */
4610 lpfc_free_active_sgl(phba);
4611 lpfc_free_sgl_list(phba);
4612
4613 /* Free the SCSI sgl management array */
4614 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4615
da0436e9
JS
4616 /* Free the completion queue EQ event pool */
4617 lpfc_sli4_cq_event_release_all(phba);
4618 lpfc_sli4_cq_event_pool_destroy(phba);
4619
6d368e53
JS
4620 /* Release resource identifiers. */
4621 lpfc_sli4_dealloc_resource_identifiers(phba);
4622
da0436e9
JS
4623 /* Free the bsmbx region. */
4624 lpfc_destroy_bootstrap_mbox(phba);
4625
4626 /* Free the SLI Layer memory with SLI4 HBAs */
4627 lpfc_mem_free_all(phba);
4628
4629 /* Free the current connect table */
4630 list_for_each_entry_safe(conn_entry, next_conn_entry,
4d9ab994
JS
4631 &phba->fcf_conn_rec_list, list) {
4632 list_del_init(&conn_entry->list);
da0436e9 4633 kfree(conn_entry);
4d9ab994 4634 }
da0436e9
JS
4635
4636 return;
4637}
4638
4639/**
25985edc 4640 * lpfc_init_api_table_setup - Set up init api function jump table
da0436e9
JS
4641 * @phba: The hba struct for which this call is being executed.
4642 * @dev_grp: The HBA PCI-Device group number.
4643 *
4644 * This routine sets up the device INIT interface API function jump table
4645 * in @phba struct.
4646 *
4647 * Returns: 0 - success, -ENODEV - failure.
4648 **/
4649int
4650lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4651{
84d1b006
JS
4652 phba->lpfc_hba_init_link = lpfc_hba_init_link;
4653 phba->lpfc_hba_down_link = lpfc_hba_down_link;
7f86059a 4654 phba->lpfc_selective_reset = lpfc_selective_reset;
da0436e9
JS
4655 switch (dev_grp) {
4656 case LPFC_PCI_DEV_LP:
4657 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
4658 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
4659 phba->lpfc_stop_port = lpfc_stop_port_s3;
4660 break;
4661 case LPFC_PCI_DEV_OC:
4662 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
4663 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
4664 phba->lpfc_stop_port = lpfc_stop_port_s4;
4665 break;
4666 default:
4667 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4668 "1431 Invalid HBA PCI-device group: 0x%x\n",
4669 dev_grp);
4670 return -ENODEV;
4671 break;
4672 }
4673 return 0;
4674}
4675
4676/**
4677 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
4678 * @phba: pointer to lpfc hba data structure.
4679 *
4680 * This routine is invoked to set up the driver internal resources before the
4681 * device specific resource setup to support the HBA device it attached to.
4682 *
4683 * Return codes
af901ca1 4684 * 0 - successful
da0436e9
JS
4685 * other values - error
4686 **/
4687static int
4688lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
4689{
4690 /*
4691 * Driver resources common to all SLI revisions
4692 */
4693 atomic_set(&phba->fast_event_count, 0);
4694 spin_lock_init(&phba->hbalock);
4695
4696 /* Initialize ndlp management spinlock */
4697 spin_lock_init(&phba->ndlp_lock);
4698
4699 INIT_LIST_HEAD(&phba->port_list);
4700 INIT_LIST_HEAD(&phba->work_list);
4701 init_waitqueue_head(&phba->wait_4_mlo_m_q);
4702
4703 /* Initialize the wait queue head for the kernel thread */
4704 init_waitqueue_head(&phba->work_waitq);
4705
4706 /* Initialize the scsi buffer list used by driver for scsi IO */
4707 spin_lock_init(&phba->scsi_buf_list_lock);
4708 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
4709
4710 /* Initialize the fabric iocb list */
4711 INIT_LIST_HEAD(&phba->fabric_iocb_list);
4712
4713 /* Initialize list to save ELS buffers */
4714 INIT_LIST_HEAD(&phba->elsbuf);
4715
4716 /* Initialize FCF connection rec list */
4717 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
4718
4719 return 0;
4720}
4721
4722/**
4723 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
4724 * @phba: pointer to lpfc hba data structure.
4725 *
4726 * This routine is invoked to set up the driver internal resources after the
4727 * device specific resource setup to support the HBA device it attached to.
4728 *
4729 * Return codes
af901ca1 4730 * 0 - successful
da0436e9
JS
4731 * other values - error
4732 **/
4733static int
4734lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
4735{
4736 int error;
4737
4738 /* Startup the kernel thread for this host adapter. */
4739 phba->worker_thread = kthread_run(lpfc_do_work, phba,
4740 "lpfc_worker_%d", phba->brd_no);
4741 if (IS_ERR(phba->worker_thread)) {
4742 error = PTR_ERR(phba->worker_thread);
4743 return error;
3772a991
JS
4744 }
4745
4746 return 0;
4747}
4748
4749/**
4750 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
4751 * @phba: pointer to lpfc hba data structure.
4752 *
4753 * This routine is invoked to unset the driver internal resources set up after
4754 * the device specific resource setup for supporting the HBA device it
4755 * attached to.
4756 **/
4757static void
4758lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
4759{
4760 /* Stop kernel worker thread */
4761 kthread_stop(phba->worker_thread);
4762}
4763
4764/**
4765 * lpfc_free_iocb_list - Free iocb list.
4766 * @phba: pointer to lpfc hba data structure.
4767 *
4768 * This routine is invoked to free the driver's IOCB list and memory.
4769 **/
4770static void
4771lpfc_free_iocb_list(struct lpfc_hba *phba)
4772{
4773 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
4774
4775 spin_lock_irq(&phba->hbalock);
4776 list_for_each_entry_safe(iocbq_entry, iocbq_next,
4777 &phba->lpfc_iocb_list, list) {
4778 list_del(&iocbq_entry->list);
4779 kfree(iocbq_entry);
4780 phba->total_iocbq_bufs--;
98c9ea5c 4781 }
3772a991
JS
4782 spin_unlock_irq(&phba->hbalock);
4783
4784 return;
4785}
4786
4787/**
4788 * lpfc_init_iocb_list - Allocate and initialize iocb list.
4789 * @phba: pointer to lpfc hba data structure.
4790 *
4791 * This routine is invoked to allocate and initizlize the driver's IOCB
4792 * list and set up the IOCB tag array accordingly.
4793 *
4794 * Return codes
af901ca1 4795 * 0 - successful
3772a991
JS
4796 * other values - error
4797 **/
4798static int
4799lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4800{
4801 struct lpfc_iocbq *iocbq_entry = NULL;
4802 uint16_t iotag;
4803 int i;
dea3101e 4804
4805 /* Initialize and populate the iocb list per host. */
4806 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3772a991 4807 for (i = 0; i < iocb_count; i++) {
dd00cc48 4808 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
dea3101e 4809 if (iocbq_entry == NULL) {
4810 printk(KERN_ERR "%s: only allocated %d iocbs of "
4811 "expected %d count. Unloading driver.\n",
cadbd4a5 4812 __func__, i, LPFC_IOCB_LIST_CNT);
dea3101e 4813 goto out_free_iocbq;
4814 }
4815
604a3e30
JB
4816 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
4817 if (iotag == 0) {
3772a991 4818 kfree(iocbq_entry);
604a3e30 4819 printk(KERN_ERR "%s: failed to allocate IOTAG. "
3772a991 4820 "Unloading driver.\n", __func__);
604a3e30
JB
4821 goto out_free_iocbq;
4822 }
6d368e53 4823 iocbq_entry->sli4_lxritag = NO_XRI;
3772a991 4824 iocbq_entry->sli4_xritag = NO_XRI;
2e0fef85
JS
4825
4826 spin_lock_irq(&phba->hbalock);
dea3101e 4827 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
4828 phba->total_iocbq_bufs++;
2e0fef85 4829 spin_unlock_irq(&phba->hbalock);
dea3101e 4830 }
4831
3772a991 4832 return 0;
dea3101e 4833
3772a991
JS
4834out_free_iocbq:
4835 lpfc_free_iocb_list(phba);
dea3101e 4836
3772a991
JS
4837 return -ENOMEM;
4838}
5e9d9b82 4839
3772a991 4840/**
da0436e9
JS
4841 * lpfc_free_sgl_list - Free sgl list.
4842 * @phba: pointer to lpfc hba data structure.
3772a991 4843 *
da0436e9 4844 * This routine is invoked to free the driver's sgl list and memory.
3772a991 4845 **/
da0436e9
JS
4846static void
4847lpfc_free_sgl_list(struct lpfc_hba *phba)
3772a991 4848{
da0436e9
JS
4849 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4850 LIST_HEAD(sglq_list);
dea3101e 4851
da0436e9
JS
4852 spin_lock_irq(&phba->hbalock);
4853 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
4854 spin_unlock_irq(&phba->hbalock);
dea3101e 4855
da0436e9
JS
4856 list_for_each_entry_safe(sglq_entry, sglq_next,
4857 &sglq_list, list) {
4858 list_del(&sglq_entry->list);
4859 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
4860 kfree(sglq_entry);
4861 phba->sli4_hba.total_sglq_bufs--;
4862 }
da0436e9
JS
4863 kfree(phba->sli4_hba.lpfc_els_sgl_array);
4864}
92d7f7b0 4865
da0436e9
JS
4866/**
4867 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
4868 * @phba: pointer to lpfc hba data structure.
4869 *
4870 * This routine is invoked to allocate the driver's active sgl memory.
4871 * This array will hold the sglq_entry's for active IOs.
4872 **/
4873static int
4874lpfc_init_active_sgl_array(struct lpfc_hba *phba)
4875{
4876 int size;
4877 size = sizeof(struct lpfc_sglq *);
4878 size *= phba->sli4_hba.max_cfg_param.max_xri;
4879
4880 phba->sli4_hba.lpfc_sglq_active_list =
4881 kzalloc(size, GFP_KERNEL);
4882 if (!phba->sli4_hba.lpfc_sglq_active_list)
4883 return -ENOMEM;
4884 return 0;
3772a991
JS
4885}
4886
4887/**
da0436e9 4888 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3772a991
JS
4889 * @phba: pointer to lpfc hba data structure.
4890 *
da0436e9
JS
4891 * This routine is invoked to walk through the array of active sglq entries
4892 * and free all of the resources.
4893 * This is just a place holder for now.
3772a991
JS
4894 **/
4895static void
da0436e9 4896lpfc_free_active_sgl(struct lpfc_hba *phba)
3772a991 4897{
da0436e9 4898 kfree(phba->sli4_hba.lpfc_sglq_active_list);
3772a991
JS
4899}
4900
4901/**
da0436e9 4902 * lpfc_init_sgl_list - Allocate and initialize sgl list.
3772a991
JS
4903 * @phba: pointer to lpfc hba data structure.
4904 *
da0436e9
JS
4905 * This routine is invoked to allocate and initizlize the driver's sgl
4906 * list and set up the sgl xritag tag array accordingly.
3772a991
JS
4907 *
4908 * Return codes
af901ca1 4909 * 0 - successful
da0436e9 4910 * other values - error
3772a991
JS
4911 **/
4912static int
da0436e9 4913lpfc_init_sgl_list(struct lpfc_hba *phba)
3772a991 4914{
da0436e9
JS
4915 struct lpfc_sglq *sglq_entry = NULL;
4916 int i;
4917 int els_xri_cnt;
4918
4919 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4920 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6d368e53 4921 "2400 ELS XRI count %d.\n",
da0436e9
JS
4922 els_xri_cnt);
4923 /* Initialize and populate the sglq list per host/VF. */
4924 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
4925 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
4926
4927 /* Sanity check on XRI management */
4928 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
4929 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4930 "2562 No room left for SCSI XRI allocation: "
4931 "max_xri=%d, els_xri=%d\n",
4932 phba->sli4_hba.max_cfg_param.max_xri,
4933 els_xri_cnt);
4934 return -ENOMEM;
4935 }
3772a991 4936
da0436e9
JS
4937 /* Allocate memory for the ELS XRI management array */
4938 phba->sli4_hba.lpfc_els_sgl_array =
4939 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
4940 GFP_KERNEL);
0ff10d46 4941
da0436e9
JS
4942 if (!phba->sli4_hba.lpfc_els_sgl_array) {
4943 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4944 "2401 Failed to allocate memory for ELS "
4945 "XRI management array of size %d.\n",
4946 els_xri_cnt);
4947 return -ENOMEM;
4948 }
2e0fef85 4949
da0436e9
JS
4950 /* Keep the SCSI XRI into the XRI management array */
4951 phba->sli4_hba.scsi_xri_max =
4952 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4953 phba->sli4_hba.scsi_xri_cnt = 0;
da0436e9
JS
4954 phba->sli4_hba.lpfc_scsi_psb_array =
4955 kzalloc((sizeof(struct lpfc_scsi_buf *) *
4956 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
4957
4958 if (!phba->sli4_hba.lpfc_scsi_psb_array) {
4959 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4960 "2563 Failed to allocate memory for SCSI "
4961 "XRI management array of size %d.\n",
4962 phba->sli4_hba.scsi_xri_max);
4963 kfree(phba->sli4_hba.lpfc_els_sgl_array);
4964 return -ENOMEM;
4965 }
4966
4967 for (i = 0; i < els_xri_cnt; i++) {
4968 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
4969 if (sglq_entry == NULL) {
4970 printk(KERN_ERR "%s: only allocated %d sgls of "
4971 "expected %d count. Unloading driver.\n",
4972 __func__, i, els_xri_cnt);
4973 goto out_free_mem;
4974 }
4975
da0436e9
JS
4976 sglq_entry->buff_type = GEN_BUFF_TYPE;
4977 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4978 if (sglq_entry->virt == NULL) {
4979 kfree(sglq_entry);
4980 printk(KERN_ERR "%s: failed to allocate mbuf.\n"
4981 "Unloading driver.\n", __func__);
4982 goto out_free_mem;
4983 }
4984 sglq_entry->sgl = sglq_entry->virt;
4985 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4986
4987 /* The list order is used by later block SGL registraton */
4988 spin_lock_irq(&phba->hbalock);
0f65ff68 4989 sglq_entry->state = SGL_FREED;
da0436e9
JS
4990 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
4991 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
4992 phba->sli4_hba.total_sglq_bufs++;
4993 spin_unlock_irq(&phba->hbalock);
4994 }
4995 return 0;
4996
4997out_free_mem:
4998 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4999 lpfc_free_sgl_list(phba);
5000 return -ENOMEM;
5001}
5002
5003/**
5004 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
5005 * @phba: pointer to lpfc hba data structure.
5006 *
5007 * This routine is invoked to post rpi header templates to the
88a2cfbb 5008 * port for those SLI4 ports that do not support extents. This routine
da0436e9 5009 * posts a PAGE_SIZE memory region to the port to hold up to
88a2cfbb
JS
5010 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
5011 * and should be called only when interrupts are disabled.
da0436e9
JS
5012 *
5013 * Return codes
af901ca1 5014 * 0 - successful
88a2cfbb 5015 * -ERROR - otherwise.
da0436e9
JS
5016 **/
5017int
5018lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
5019{
5020 int rc = 0;
da0436e9
JS
5021 struct lpfc_rpi_hdr *rpi_hdr;
5022
5023 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
da0436e9 5024 /*
6d368e53
JS
5025 * If the SLI4 port supports extents, posting the rpi header isn't
5026 * required. Set the expected maximum count and let the actual value
5027 * get set when extents are fully allocated.
da0436e9 5028 */
6d368e53
JS
5029 if (!phba->sli4_hba.rpi_hdrs_in_use) {
5030 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
5031 return rc;
5032 }
5033 if (phba->sli4_hba.extents_in_use)
5034 return -EIO;
da0436e9
JS
5035
5036 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
5037 if (!rpi_hdr) {
5038 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5039 "0391 Error during rpi post operation\n");
5040 lpfc_sli4_remove_rpis(phba);
5041 rc = -ENODEV;
5042 }
5043
5044 return rc;
5045}
5046
5047/**
5048 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
5049 * @phba: pointer to lpfc hba data structure.
5050 *
5051 * This routine is invoked to allocate a single 4KB memory region to
5052 * support rpis and stores them in the phba. This single region
5053 * provides support for up to 64 rpis. The region is used globally
5054 * by the device.
5055 *
5056 * Returns:
5057 * A valid rpi hdr on success.
5058 * A NULL pointer on any failure.
5059 **/
5060struct lpfc_rpi_hdr *
5061lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5062{
5063 uint16_t rpi_limit, curr_rpi_range;
5064 struct lpfc_dmabuf *dmabuf;
5065 struct lpfc_rpi_hdr *rpi_hdr;
9589b062 5066 uint32_t rpi_count;
da0436e9 5067
6d368e53
JS
5068 /*
5069 * If the SLI4 port supports extents, posting the rpi header isn't
5070 * required. Set the expected maximum count and let the actual value
5071 * get set when extents are fully allocated.
5072 */
5073 if (!phba->sli4_hba.rpi_hdrs_in_use)
5074 return NULL;
5075 if (phba->sli4_hba.extents_in_use)
5076 return NULL;
5077
5078 /* The limit on the logical index is just the max_rpi count. */
da0436e9 5079 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
6d368e53 5080 phba->sli4_hba.max_cfg_param.max_rpi - 1;
da0436e9
JS
5081
5082 spin_lock_irq(&phba->hbalock);
6d368e53
JS
5083 /*
5084 * Establish the starting RPI in this header block. The starting
5085 * rpi is normalized to a zero base because the physical rpi is
5086 * port based.
5087 */
5088 curr_rpi_range = phba->sli4_hba.next_rpi -
5089 phba->sli4_hba.max_cfg_param.rpi_base;
da0436e9
JS
5090 spin_unlock_irq(&phba->hbalock);
5091
5092 /*
5093 * The port has a limited number of rpis. The increment here
5094 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
5095 * and to allow the full max_rpi range per port.
5096 */
5097 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
9589b062
JS
5098 rpi_count = rpi_limit - curr_rpi_range;
5099 else
5100 rpi_count = LPFC_RPI_HDR_COUNT;
da0436e9 5101
6d368e53
JS
5102 if (!rpi_count)
5103 return NULL;
da0436e9
JS
5104 /*
5105 * First allocate the protocol header region for the port. The
5106 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
5107 */
5108 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5109 if (!dmabuf)
5110 return NULL;
5111
5112 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5113 LPFC_HDR_TEMPLATE_SIZE,
5114 &dmabuf->phys,
5115 GFP_KERNEL);
5116 if (!dmabuf->virt) {
5117 rpi_hdr = NULL;
5118 goto err_free_dmabuf;
5119 }
5120
5121 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
5122 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
5123 rpi_hdr = NULL;
5124 goto err_free_coherent;
5125 }
5126
5127 /* Save the rpi header data for cleanup later. */
5128 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
5129 if (!rpi_hdr)
5130 goto err_free_coherent;
5131
5132 rpi_hdr->dmabuf = dmabuf;
5133 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
5134 rpi_hdr->page_count = 1;
5135 spin_lock_irq(&phba->hbalock);
6d368e53
JS
5136
5137 /* The rpi_hdr stores the logical index only. */
5138 rpi_hdr->start_rpi = curr_rpi_range;
da0436e9
JS
5139 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
5140
5141 /*
6d368e53
JS
5142 * The next_rpi stores the next logical module-64 rpi value used
5143 * to post physical rpis in subsequent rpi postings.
da0436e9 5144 */
9589b062 5145 phba->sli4_hba.next_rpi += rpi_count;
da0436e9
JS
5146 spin_unlock_irq(&phba->hbalock);
5147 return rpi_hdr;
5148
5149 err_free_coherent:
5150 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
5151 dmabuf->virt, dmabuf->phys);
5152 err_free_dmabuf:
5153 kfree(dmabuf);
5154 return NULL;
5155}
5156
5157/**
5158 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
5159 * @phba: pointer to lpfc hba data structure.
5160 *
5161 * This routine is invoked to remove all memory resources allocated
6d368e53
JS
5162 * to support rpis for SLI4 ports not supporting extents. This routine
5163 * presumes the caller has released all rpis consumed by fabric or port
5164 * logins and is prepared to have the header pages removed.
da0436e9
JS
5165 **/
5166void
5167lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
5168{
5169 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
5170
6d368e53
JS
5171 if (!phba->sli4_hba.rpi_hdrs_in_use)
5172 goto exit;
5173
da0436e9
JS
5174 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
5175 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
5176 list_del(&rpi_hdr->list);
5177 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
5178 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
5179 kfree(rpi_hdr->dmabuf);
5180 kfree(rpi_hdr);
5181 }
6d368e53
JS
5182 exit:
5183 /* There are no rpis available to the port now. */
5184 phba->sli4_hba.next_rpi = 0;
da0436e9
JS
5185}
5186
5187/**
5188 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
5189 * @pdev: pointer to pci device data structure.
5190 *
5191 * This routine is invoked to allocate the driver hba data structure for an
5192 * HBA device. If the allocation is successful, the phba reference to the
5193 * PCI device data structure is set.
5194 *
5195 * Return codes
af901ca1 5196 * pointer to @phba - successful
da0436e9
JS
5197 * NULL - error
5198 **/
5199static struct lpfc_hba *
5200lpfc_hba_alloc(struct pci_dev *pdev)
5201{
5202 struct lpfc_hba *phba;
5203
5204 /* Allocate memory for HBA structure */
5205 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
5206 if (!phba) {
e34ccdfe 5207 dev_err(&pdev->dev, "failed to allocate hba struct\n");
da0436e9
JS
5208 return NULL;
5209 }
5210
5211 /* Set reference to PCI device in HBA structure */
5212 phba->pcidev = pdev;
5213
5214 /* Assign an unused board number */
5215 phba->brd_no = lpfc_get_instance();
5216 if (phba->brd_no < 0) {
5217 kfree(phba);
5218 return NULL;
5219 }
5220
4fede78f 5221 spin_lock_init(&phba->ct_ev_lock);
f1c3b0fc
JS
5222 INIT_LIST_HEAD(&phba->ct_ev_waiters);
5223
da0436e9
JS
5224 return phba;
5225}
5226
5227/**
5228 * lpfc_hba_free - Free driver hba data structure with a device.
5229 * @phba: pointer to lpfc hba data structure.
5230 *
5231 * This routine is invoked to free the driver hba data structure with an
5232 * HBA device.
5233 **/
5234static void
5235lpfc_hba_free(struct lpfc_hba *phba)
5236{
5237 /* Release the driver assigned board number */
5238 idr_remove(&lpfc_hba_index, phba->brd_no);
5239
5240 kfree(phba);
5241 return;
5242}
5243
5244/**
5245 * lpfc_create_shost - Create hba physical port with associated scsi host.
5246 * @phba: pointer to lpfc hba data structure.
5247 *
5248 * This routine is invoked to create HBA physical port and associate a SCSI
5249 * host with it.
5250 *
5251 * Return codes
af901ca1 5252 * 0 - successful
da0436e9
JS
5253 * other values - error
5254 **/
5255static int
5256lpfc_create_shost(struct lpfc_hba *phba)
5257{
5258 struct lpfc_vport *vport;
5259 struct Scsi_Host *shost;
5260
5261 /* Initialize HBA FC structure */
5262 phba->fc_edtov = FF_DEF_EDTOV;
5263 phba->fc_ratov = FF_DEF_RATOV;
5264 phba->fc_altov = FF_DEF_ALTOV;
5265 phba->fc_arbtov = FF_DEF_ARBTOV;
5266
d7c47992 5267 atomic_set(&phba->sdev_cnt, 0);
da0436e9
JS
5268 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
5269 if (!vport)
5270 return -ENODEV;
5271
5272 shost = lpfc_shost_from_vport(vport);
5273 phba->pport = vport;
5274 lpfc_debugfs_initialize(vport);
5275 /* Put reference to SCSI host to driver's device private data */
5276 pci_set_drvdata(phba->pcidev, shost);
2e0fef85 5277
3772a991
JS
5278 return 0;
5279}
db2378e0 5280
3772a991
JS
5281/**
5282 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
5283 * @phba: pointer to lpfc hba data structure.
5284 *
5285 * This routine is invoked to destroy HBA physical port and the associated
5286 * SCSI host.
5287 **/
5288static void
5289lpfc_destroy_shost(struct lpfc_hba *phba)
5290{
5291 struct lpfc_vport *vport = phba->pport;
5292
5293 /* Destroy physical port that associated with the SCSI host */
5294 destroy_port(vport);
5295
5296 return;
5297}
5298
5299/**
5300 * lpfc_setup_bg - Setup Block guard structures and debug areas.
5301 * @phba: pointer to lpfc hba data structure.
5302 * @shost: the shost to be used to detect Block guard settings.
5303 *
5304 * This routine sets up the local Block guard protocol settings for @shost.
5305 * This routine also allocates memory for debugging bg buffers.
5306 **/
5307static void
5308lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
5309{
5310 int pagecnt = 10;
5311 if (lpfc_prot_mask && lpfc_prot_guard) {
5312 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5313 "1478 Registering BlockGuard with the "
5314 "SCSI layer\n");
5315 scsi_host_set_prot(shost, lpfc_prot_mask);
5316 scsi_host_set_guard(shost, lpfc_prot_guard);
5317 }
5318 if (!_dump_buf_data) {
5319 while (pagecnt) {
5320 spin_lock_init(&_dump_buf_lock);
5321 _dump_buf_data =
5322 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5323 if (_dump_buf_data) {
6a9c52cf
JS
5324 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5325 "9043 BLKGRD: allocated %d pages for "
3772a991
JS
5326 "_dump_buf_data at 0x%p\n",
5327 (1 << pagecnt), _dump_buf_data);
5328 _dump_buf_data_order = pagecnt;
5329 memset(_dump_buf_data, 0,
5330 ((1 << PAGE_SHIFT) << pagecnt));
5331 break;
5332 } else
5333 --pagecnt;
5334 }
5335 if (!_dump_buf_data_order)
6a9c52cf
JS
5336 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5337 "9044 BLKGRD: ERROR unable to allocate "
3772a991
JS
5338 "memory for hexdump\n");
5339 } else
6a9c52cf
JS
5340 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5341 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
3772a991
JS
5342 "\n", _dump_buf_data);
5343 if (!_dump_buf_dif) {
5344 while (pagecnt) {
5345 _dump_buf_dif =
5346 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5347 if (_dump_buf_dif) {
6a9c52cf
JS
5348 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5349 "9046 BLKGRD: allocated %d pages for "
3772a991
JS
5350 "_dump_buf_dif at 0x%p\n",
5351 (1 << pagecnt), _dump_buf_dif);
5352 _dump_buf_dif_order = pagecnt;
5353 memset(_dump_buf_dif, 0,
5354 ((1 << PAGE_SHIFT) << pagecnt));
5355 break;
5356 } else
5357 --pagecnt;
5358 }
5359 if (!_dump_buf_dif_order)
6a9c52cf
JS
5360 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5361 "9047 BLKGRD: ERROR unable to allocate "
3772a991
JS
5362 "memory for hexdump\n");
5363 } else
6a9c52cf
JS
5364 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5365 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
3772a991
JS
5366 _dump_buf_dif);
5367}
5368
5369/**
5370 * lpfc_post_init_setup - Perform necessary device post initialization setup.
5371 * @phba: pointer to lpfc hba data structure.
5372 *
5373 * This routine is invoked to perform all the necessary post initialization
5374 * setup for the device.
5375 **/
5376static void
5377lpfc_post_init_setup(struct lpfc_hba *phba)
5378{
5379 struct Scsi_Host *shost;
5380 struct lpfc_adapter_event_header adapter_event;
5381
5382 /* Get the default values for Model Name and Description */
5383 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
5384
5385 /*
5386 * hba setup may have changed the hba_queue_depth so we need to
5387 * adjust the value of can_queue.
5388 */
5389 shost = pci_get_drvdata(phba->pcidev);
5390 shost->can_queue = phba->cfg_hba_queue_depth - 10;
5391 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
5392 lpfc_setup_bg(phba, shost);
5393
5394 lpfc_host_attrib_init(shost);
5395
5396 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
5397 spin_lock_irq(shost->host_lock);
5398 lpfc_poll_start_timer(phba);
5399 spin_unlock_irq(shost->host_lock);
5400 }
5401
5402 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5403 "0428 Perform SCSI scan\n");
5404 /* Send board arrival event to upper layer */
5405 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
5406 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
5407 fc_host_post_vendor_event(shost, fc_get_event_number(),
5408 sizeof(adapter_event),
5409 (char *) &adapter_event,
5410 LPFC_NL_VENDOR_ID);
5411 return;
5412}
5413
5414/**
5415 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
5416 * @phba: pointer to lpfc hba data structure.
5417 *
5418 * This routine is invoked to set up the PCI device memory space for device
5419 * with SLI-3 interface spec.
5420 *
5421 * Return codes
af901ca1 5422 * 0 - successful
3772a991
JS
5423 * other values - error
5424 **/
5425static int
5426lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5427{
5428 struct pci_dev *pdev;
5429 unsigned long bar0map_len, bar2map_len;
5430 int i, hbq_count;
5431 void *ptr;
5432 int error = -ENODEV;
5433
5434 /* Obtain PCI device reference */
5435 if (!phba->pcidev)
5436 return error;
5437 else
5438 pdev = phba->pcidev;
5439
5440 /* Set the device DMA mask size */
8e68597d
MR
5441 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5442 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5443 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5444 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
3772a991 5445 return error;
8e68597d
MR
5446 }
5447 }
3772a991
JS
5448
5449 /* Get the bus address of Bar0 and Bar2 and the number of bytes
5450 * required by each mapping.
5451 */
5452 phba->pci_bar0_map = pci_resource_start(pdev, 0);
5453 bar0map_len = pci_resource_len(pdev, 0);
5454
5455 phba->pci_bar2_map = pci_resource_start(pdev, 2);
5456 bar2map_len = pci_resource_len(pdev, 2);
5457
5458 /* Map HBA SLIM to a kernel virtual address. */
5459 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
5460 if (!phba->slim_memmap_p) {
5461 dev_printk(KERN_ERR, &pdev->dev,
5462 "ioremap failed for SLIM memory.\n");
5463 goto out;
5464 }
5465
5466 /* Map HBA Control Registers to a kernel virtual address. */
5467 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
5468 if (!phba->ctrl_regs_memmap_p) {
5469 dev_printk(KERN_ERR, &pdev->dev,
5470 "ioremap failed for HBA control registers.\n");
5471 goto out_iounmap_slim;
5472 }
5473
5474 /* Allocate memory for SLI-2 structures */
5475 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
5476 SLI2_SLIM_SIZE,
5477 &phba->slim2p.phys,
5478 GFP_KERNEL);
5479 if (!phba->slim2p.virt)
5480 goto out_iounmap;
5481
5482 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5483 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
7a470277
JS
5484 phba->mbox_ext = (phba->slim2p.virt +
5485 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
3772a991
JS
5486 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5487 phba->IOCBs = (phba->slim2p.virt +
5488 offsetof(struct lpfc_sli2_slim, IOCBs));
5489
5490 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
5491 lpfc_sli_hbq_size(),
5492 &phba->hbqslimp.phys,
5493 GFP_KERNEL);
5494 if (!phba->hbqslimp.virt)
5495 goto out_free_slim;
5496
5497 hbq_count = lpfc_sli_hbq_count();
5498 ptr = phba->hbqslimp.virt;
5499 for (i = 0; i < hbq_count; ++i) {
5500 phba->hbqs[i].hbq_virt = ptr;
5501 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5502 ptr += (lpfc_hbq_defs[i]->entry_count *
5503 sizeof(struct lpfc_hbq_entry));
5504 }
5505 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
5506 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
5507
5508 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
5509
5510 INIT_LIST_HEAD(&phba->rb_pend_list);
5511
5512 phba->MBslimaddr = phba->slim_memmap_p;
5513 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
5514 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
5515 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
5516 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
5517
5518 return 0;
5519
5520out_free_slim:
5521 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5522 phba->slim2p.virt, phba->slim2p.phys);
5523out_iounmap:
5524 iounmap(phba->ctrl_regs_memmap_p);
5525out_iounmap_slim:
5526 iounmap(phba->slim_memmap_p);
5527out:
5528 return error;
5529}
5530
5531/**
5532 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
5533 * @phba: pointer to lpfc hba data structure.
5534 *
5535 * This routine is invoked to unset the PCI device memory space for device
5536 * with SLI-3 interface spec.
5537 **/
5538static void
5539lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
5540{
5541 struct pci_dev *pdev;
5542
5543 /* Obtain PCI device reference */
5544 if (!phba->pcidev)
5545 return;
5546 else
5547 pdev = phba->pcidev;
5548
5549 /* Free coherent DMA memory allocated */
5550 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
5551 phba->hbqslimp.virt, phba->hbqslimp.phys);
5552 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5553 phba->slim2p.virt, phba->slim2p.phys);
5554
5555 /* I/O memory unmap */
5556 iounmap(phba->ctrl_regs_memmap_p);
5557 iounmap(phba->slim_memmap_p);
5558
5559 return;
5560}
5561
5562/**
da0436e9 5563 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
3772a991
JS
5564 * @phba: pointer to lpfc hba data structure.
5565 *
da0436e9
JS
5566 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
5567 * done and check status.
3772a991 5568 *
da0436e9 5569 * Return 0 if successful, otherwise -ENODEV.
3772a991 5570 **/
da0436e9
JS
5571int
5572lpfc_sli4_post_status_check(struct lpfc_hba *phba)
3772a991 5573{
2fcee4bf
JS
5574 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
5575 struct lpfc_register reg_data;
5576 int i, port_error = 0;
5577 uint32_t if_type;
3772a991 5578
9940b97b
JS
5579 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
5580 memset(&reg_data, 0, sizeof(reg_data));
2fcee4bf 5581 if (!phba->sli4_hba.PSMPHRregaddr)
da0436e9 5582 return -ENODEV;
3772a991 5583
da0436e9
JS
5584 /* Wait up to 30 seconds for the SLI Port POST done and ready */
5585 for (i = 0; i < 3000; i++) {
9940b97b
JS
5586 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
5587 &portsmphr_reg.word0) ||
5588 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
2fcee4bf 5589 /* Port has a fatal POST error, break out */
da0436e9
JS
5590 port_error = -ENODEV;
5591 break;
5592 }
2fcee4bf
JS
5593 if (LPFC_POST_STAGE_PORT_READY ==
5594 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
da0436e9 5595 break;
da0436e9 5596 msleep(10);
3772a991
JS
5597 }
5598
2fcee4bf
JS
5599 /*
5600 * If there was a port error during POST, then don't proceed with
5601 * other register reads as the data may not be valid. Just exit.
5602 */
5603 if (port_error) {
da0436e9 5604 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2fcee4bf
JS
5605 "1408 Port Failed POST - portsmphr=0x%x, "
5606 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
5607 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
5608 portsmphr_reg.word0,
5609 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
5610 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
5611 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
5612 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
5613 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
5614 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
5615 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
5616 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
5617 } else {
28baac74 5618 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2fcee4bf
JS
5619 "2534 Device Info: SLIFamily=0x%x, "
5620 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
5621 "SLIHint_2=0x%x, FT=0x%x\n",
28baac74
JS
5622 bf_get(lpfc_sli_intf_sli_family,
5623 &phba->sli4_hba.sli_intf),
5624 bf_get(lpfc_sli_intf_slirev,
5625 &phba->sli4_hba.sli_intf),
085c647c
JS
5626 bf_get(lpfc_sli_intf_if_type,
5627 &phba->sli4_hba.sli_intf),
5628 bf_get(lpfc_sli_intf_sli_hint1,
28baac74 5629 &phba->sli4_hba.sli_intf),
085c647c
JS
5630 bf_get(lpfc_sli_intf_sli_hint2,
5631 &phba->sli4_hba.sli_intf),
5632 bf_get(lpfc_sli_intf_func_type,
28baac74 5633 &phba->sli4_hba.sli_intf));
2fcee4bf
JS
5634 /*
5635 * Check for other Port errors during the initialization
5636 * process. Fail the load if the port did not come up
5637 * correctly.
5638 */
5639 if_type = bf_get(lpfc_sli_intf_if_type,
5640 &phba->sli4_hba.sli_intf);
5641 switch (if_type) {
5642 case LPFC_SLI_INTF_IF_TYPE_0:
5643 phba->sli4_hba.ue_mask_lo =
5644 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
5645 phba->sli4_hba.ue_mask_hi =
5646 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
5647 uerrlo_reg.word0 =
5648 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
5649 uerrhi_reg.word0 =
5650 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
5651 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5652 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5653 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5654 "1422 Unrecoverable Error "
5655 "Detected during POST "
5656 "uerr_lo_reg=0x%x, "
5657 "uerr_hi_reg=0x%x, "
5658 "ue_mask_lo_reg=0x%x, "
5659 "ue_mask_hi_reg=0x%x\n",
5660 uerrlo_reg.word0,
5661 uerrhi_reg.word0,
5662 phba->sli4_hba.ue_mask_lo,
5663 phba->sli4_hba.ue_mask_hi);
5664 port_error = -ENODEV;
5665 }
5666 break;
5667 case LPFC_SLI_INTF_IF_TYPE_2:
5668 /* Final checks. The port status should be clean. */
9940b97b
JS
5669 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
5670 &reg_data.word0) ||
0558056c
JS
5671 (bf_get(lpfc_sliport_status_err, &reg_data) &&
5672 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
2fcee4bf
JS
5673 phba->work_status[0] =
5674 readl(phba->sli4_hba.u.if_type2.
5675 ERR1regaddr);
5676 phba->work_status[1] =
5677 readl(phba->sli4_hba.u.if_type2.
5678 ERR2regaddr);
5679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5680 "2888 Port Error Detected "
5681 "during POST: "
5682 "port status reg 0x%x, "
5683 "port_smphr reg 0x%x, "
5684 "error 1=0x%x, error 2=0x%x\n",
5685 reg_data.word0,
5686 portsmphr_reg.word0,
5687 phba->work_status[0],
5688 phba->work_status[1]);
5689 port_error = -ENODEV;
5690 }
5691 break;
5692 case LPFC_SLI_INTF_IF_TYPE_1:
5693 default:
5694 break;
5695 }
28baac74 5696 }
da0436e9
JS
5697 return port_error;
5698}
3772a991 5699
da0436e9
JS
5700/**
5701 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
5702 * @phba: pointer to lpfc hba data structure.
2fcee4bf 5703 * @if_type: The SLI4 interface type getting configured.
da0436e9
JS
5704 *
5705 * This routine is invoked to set up SLI4 BAR0 PCI config space register
5706 * memory map.
5707 **/
5708static void
2fcee4bf
JS
5709lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
5710{
5711 switch (if_type) {
5712 case LPFC_SLI_INTF_IF_TYPE_0:
5713 phba->sli4_hba.u.if_type0.UERRLOregaddr =
5714 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
5715 phba->sli4_hba.u.if_type0.UERRHIregaddr =
5716 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
5717 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
5718 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
5719 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
5720 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
5721 phba->sli4_hba.SLIINTFregaddr =
5722 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5723 break;
5724 case LPFC_SLI_INTF_IF_TYPE_2:
5725 phba->sli4_hba.u.if_type2.ERR1regaddr =
88a2cfbb
JS
5726 phba->sli4_hba.conf_regs_memmap_p +
5727 LPFC_CTL_PORT_ER1_OFFSET;
2fcee4bf 5728 phba->sli4_hba.u.if_type2.ERR2regaddr =
88a2cfbb
JS
5729 phba->sli4_hba.conf_regs_memmap_p +
5730 LPFC_CTL_PORT_ER2_OFFSET;
2fcee4bf 5731 phba->sli4_hba.u.if_type2.CTRLregaddr =
88a2cfbb
JS
5732 phba->sli4_hba.conf_regs_memmap_p +
5733 LPFC_CTL_PORT_CTL_OFFSET;
2fcee4bf 5734 phba->sli4_hba.u.if_type2.STATUSregaddr =
88a2cfbb
JS
5735 phba->sli4_hba.conf_regs_memmap_p +
5736 LPFC_CTL_PORT_STA_OFFSET;
2fcee4bf
JS
5737 phba->sli4_hba.SLIINTFregaddr =
5738 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5739 phba->sli4_hba.PSMPHRregaddr =
88a2cfbb
JS
5740 phba->sli4_hba.conf_regs_memmap_p +
5741 LPFC_CTL_PORT_SEM_OFFSET;
2fcee4bf
JS
5742 phba->sli4_hba.RQDBregaddr =
5743 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
5744 phba->sli4_hba.WQDBregaddr =
5745 phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL;
5746 phba->sli4_hba.EQCQDBregaddr =
5747 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
5748 phba->sli4_hba.MQDBregaddr =
5749 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
5750 phba->sli4_hba.BMBXregaddr =
5751 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
5752 break;
5753 case LPFC_SLI_INTF_IF_TYPE_1:
5754 default:
5755 dev_printk(KERN_ERR, &phba->pcidev->dev,
5756 "FATAL - unsupported SLI4 interface type - %d\n",
5757 if_type);
5758 break;
5759 }
da0436e9 5760}
3772a991 5761
da0436e9
JS
5762/**
5763 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
5764 * @phba: pointer to lpfc hba data structure.
5765 *
5766 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
5767 * memory map.
5768 **/
5769static void
5770lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
5771{
2fcee4bf
JS
5772 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5773 LPFC_SLIPORT_IF0_SMPHR;
da0436e9 5774 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
2fcee4bf 5775 LPFC_HST_ISR0;
da0436e9 5776 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
2fcee4bf 5777 LPFC_HST_IMR0;
da0436e9 5778 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
2fcee4bf 5779 LPFC_HST_ISCR0;
3772a991
JS
5780}
5781
5782/**
da0436e9 5783 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
3772a991 5784 * @phba: pointer to lpfc hba data structure.
da0436e9 5785 * @vf: virtual function number
3772a991 5786 *
da0436e9
JS
5787 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
5788 * based on the given viftual function number, @vf.
5789 *
5790 * Return 0 if successful, otherwise -ENODEV.
3772a991 5791 **/
da0436e9
JS
5792static int
5793lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
3772a991 5794{
da0436e9
JS
5795 if (vf > LPFC_VIR_FUNC_MAX)
5796 return -ENODEV;
3772a991 5797
da0436e9
JS
5798 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5799 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
5800 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5801 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
5802 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5803 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
5804 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5805 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
5806 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5807 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
5808 return 0;
3772a991
JS
5809}
5810
5811/**
da0436e9 5812 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
3772a991
JS
5813 * @phba: pointer to lpfc hba data structure.
5814 *
da0436e9
JS
5815 * This routine is invoked to create the bootstrap mailbox
5816 * region consistent with the SLI-4 interface spec. This
5817 * routine allocates all memory necessary to communicate
5818 * mailbox commands to the port and sets up all alignment
5819 * needs. No locks are expected to be held when calling
5820 * this routine.
3772a991
JS
5821 *
5822 * Return codes
af901ca1 5823 * 0 - successful
d439d286 5824 * -ENOMEM - could not allocated memory.
da0436e9 5825 **/
3772a991 5826static int
da0436e9 5827lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
3772a991 5828{
da0436e9
JS
5829 uint32_t bmbx_size;
5830 struct lpfc_dmabuf *dmabuf;
5831 struct dma_address *dma_address;
5832 uint32_t pa_addr;
5833 uint64_t phys_addr;
5834
5835 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5836 if (!dmabuf)
5837 return -ENOMEM;
3772a991 5838
da0436e9
JS
5839 /*
5840 * The bootstrap mailbox region is comprised of 2 parts
5841 * plus an alignment restriction of 16 bytes.
5842 */
5843 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
5844 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5845 bmbx_size,
5846 &dmabuf->phys,
5847 GFP_KERNEL);
5848 if (!dmabuf->virt) {
5849 kfree(dmabuf);
5850 return -ENOMEM;
3772a991 5851 }
da0436e9 5852 memset(dmabuf->virt, 0, bmbx_size);
3772a991 5853
da0436e9
JS
5854 /*
5855 * Initialize the bootstrap mailbox pointers now so that the register
5856 * operations are simple later. The mailbox dma address is required
5857 * to be 16-byte aligned. Also align the virtual memory as each
5858 * maibox is copied into the bmbx mailbox region before issuing the
5859 * command to the port.
5860 */
5861 phba->sli4_hba.bmbx.dmabuf = dmabuf;
5862 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
5863
5864 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
5865 LPFC_ALIGN_16_BYTE);
5866 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
5867 LPFC_ALIGN_16_BYTE);
5868
5869 /*
5870 * Set the high and low physical addresses now. The SLI4 alignment
5871 * requirement is 16 bytes and the mailbox is posted to the port
5872 * as two 30-bit addresses. The other data is a bit marking whether
5873 * the 30-bit address is the high or low address.
5874 * Upcast bmbx aphys to 64bits so shift instruction compiles
5875 * clean on 32 bit machines.
5876 */
5877 dma_address = &phba->sli4_hba.bmbx.dma_address;
5878 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
5879 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
5880 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
5881 LPFC_BMBX_BIT1_ADDR_HI);
5882
5883 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
5884 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
5885 LPFC_BMBX_BIT1_ADDR_LO);
5886 return 0;
3772a991
JS
5887}
5888
5889/**
da0436e9 5890 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
3772a991
JS
5891 * @phba: pointer to lpfc hba data structure.
5892 *
da0436e9
JS
5893 * This routine is invoked to teardown the bootstrap mailbox
5894 * region and release all host resources. This routine requires
5895 * the caller to ensure all mailbox commands recovered, no
5896 * additional mailbox comands are sent, and interrupts are disabled
5897 * before calling this routine.
5898 *
5899 **/
3772a991 5900static void
da0436e9 5901lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
3772a991 5902{
da0436e9
JS
5903 dma_free_coherent(&phba->pcidev->dev,
5904 phba->sli4_hba.bmbx.bmbx_size,
5905 phba->sli4_hba.bmbx.dmabuf->virt,
5906 phba->sli4_hba.bmbx.dmabuf->phys);
5907
5908 kfree(phba->sli4_hba.bmbx.dmabuf);
5909 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
3772a991
JS
5910}
5911
5912/**
da0436e9 5913 * lpfc_sli4_read_config - Get the config parameters.
3772a991
JS
5914 * @phba: pointer to lpfc hba data structure.
5915 *
da0436e9
JS
5916 * This routine is invoked to read the configuration parameters from the HBA.
5917 * The configuration parameters are used to set the base and maximum values
5918 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
5919 * allocation for the port.
3772a991
JS
5920 *
5921 * Return codes
af901ca1 5922 * 0 - successful
25985edc 5923 * -ENOMEM - No available memory
d439d286 5924 * -EIO - The mailbox failed to complete successfully.
3772a991 5925 **/
da0436e9
JS
5926static int
5927lpfc_sli4_read_config(struct lpfc_hba *phba)
3772a991 5928{
da0436e9
JS
5929 LPFC_MBOXQ_t *pmb;
5930 struct lpfc_mbx_read_config *rd_config;
912e3acd
JS
5931 union lpfc_sli4_cfg_shdr *shdr;
5932 uint32_t shdr_status, shdr_add_status;
5933 struct lpfc_mbx_get_func_cfg *get_func_cfg;
5934 struct lpfc_rsrc_desc_fcfcoe *desc;
5935 uint32_t desc_count;
5936 int length, i, rc = 0;
3772a991 5937
da0436e9
JS
5938 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5939 if (!pmb) {
5940 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5941 "2011 Unable to allocate memory for issuing "
5942 "SLI_CONFIG_SPECIAL mailbox command\n");
5943 return -ENOMEM;
3772a991
JS
5944 }
5945
da0436e9 5946 lpfc_read_config(phba, pmb);
3772a991 5947
da0436e9
JS
5948 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5949 if (rc != MBX_SUCCESS) {
5950 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5951 "2012 Mailbox failed , mbxCmd x%x "
5952 "READ_CONFIG, mbxStatus x%x\n",
5953 bf_get(lpfc_mqe_command, &pmb->u.mqe),
5954 bf_get(lpfc_mqe_status, &pmb->u.mqe));
5955 rc = -EIO;
5956 } else {
5957 rd_config = &pmb->u.mqe.un.rd_config;
6d368e53
JS
5958 phba->sli4_hba.extents_in_use =
5959 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
da0436e9
JS
5960 phba->sli4_hba.max_cfg_param.max_xri =
5961 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5962 phba->sli4_hba.max_cfg_param.xri_base =
5963 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
5964 phba->sli4_hba.max_cfg_param.max_vpi =
5965 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
5966 phba->sli4_hba.max_cfg_param.vpi_base =
5967 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
5968 phba->sli4_hba.max_cfg_param.max_rpi =
5969 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
5970 phba->sli4_hba.max_cfg_param.rpi_base =
5971 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
5972 phba->sli4_hba.max_cfg_param.max_vfi =
5973 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
5974 phba->sli4_hba.max_cfg_param.vfi_base =
5975 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5976 phba->sli4_hba.max_cfg_param.max_fcfi =
5977 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
da0436e9
JS
5978 phba->sli4_hba.max_cfg_param.max_eq =
5979 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
5980 phba->sli4_hba.max_cfg_param.max_rq =
5981 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
5982 phba->sli4_hba.max_cfg_param.max_wq =
5983 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
5984 phba->sli4_hba.max_cfg_param.max_cq =
5985 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
5986 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
5987 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
5988 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
5989 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
5990 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
5ffc266e
JS
5991 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
5992 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
da0436e9
JS
5993 phba->max_vports = phba->max_vpi;
5994 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6d368e53
JS
5995 "2003 cfg params Extents? %d "
5996 "XRI(B:%d M:%d), "
da0436e9
JS
5997 "VPI(B:%d M:%d) "
5998 "VFI(B:%d M:%d) "
5999 "RPI(B:%d M:%d) "
6d368e53
JS
6000 "FCFI(Count:%d)\n",
6001 phba->sli4_hba.extents_in_use,
da0436e9
JS
6002 phba->sli4_hba.max_cfg_param.xri_base,
6003 phba->sli4_hba.max_cfg_param.max_xri,
6004 phba->sli4_hba.max_cfg_param.vpi_base,
6005 phba->sli4_hba.max_cfg_param.max_vpi,
6006 phba->sli4_hba.max_cfg_param.vfi_base,
6007 phba->sli4_hba.max_cfg_param.max_vfi,
6008 phba->sli4_hba.max_cfg_param.rpi_base,
6009 phba->sli4_hba.max_cfg_param.max_rpi,
da0436e9 6010 phba->sli4_hba.max_cfg_param.max_fcfi);
3772a991 6011 }
912e3acd
JS
6012
6013 if (rc)
6014 goto read_cfg_out;
da0436e9
JS
6015
6016 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
b92938b4
JS
6017 if (phba->cfg_hba_queue_depth >
6018 (phba->sli4_hba.max_cfg_param.max_xri -
6019 lpfc_sli4_get_els_iocb_cnt(phba)))
da0436e9 6020 phba->cfg_hba_queue_depth =
b92938b4
JS
6021 phba->sli4_hba.max_cfg_param.max_xri -
6022 lpfc_sli4_get_els_iocb_cnt(phba);
912e3acd
JS
6023
6024 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
6025 LPFC_SLI_INTF_IF_TYPE_2)
6026 goto read_cfg_out;
6027
6028 /* get the pf# and vf# for SLI4 if_type 2 port */
6029 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
6030 sizeof(struct lpfc_sli4_cfg_mhdr));
6031 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
6032 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
6033 length, LPFC_SLI4_MBX_EMBED);
6034
6035 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6036 shdr = (union lpfc_sli4_cfg_shdr *)
6037 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
6038 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6039 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6040 if (rc || shdr_status || shdr_add_status) {
6041 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6042 "3026 Mailbox failed , mbxCmd x%x "
6043 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6044 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6045 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6046 rc = -EIO;
6047 goto read_cfg_out;
6048 }
6049
6050 /* search for fc_fcoe resrouce descriptor */
6051 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6052 desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6053
6054 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6055 desc = (struct lpfc_rsrc_desc_fcfcoe *)
6056 &get_func_cfg->func_cfg.desc[i];
6057 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
6058 bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
6059 phba->sli4_hba.iov.pf_number =
6060 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6061 phba->sli4_hba.iov.vf_number =
6062 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
6063 break;
6064 }
6065 }
6066
6067 if (i < LPFC_RSRC_DESC_MAX_NUM)
6068 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6069 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6070 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6071 phba->sli4_hba.iov.vf_number);
6072 else {
6073 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6074 "3028 GET_FUNCTION_CONFIG: failed to find "
6075 "Resrouce Descriptor:x%x\n",
6076 LPFC_RSRC_DESC_TYPE_FCFCOE);
6077 rc = -EIO;
6078 }
6079
6080read_cfg_out:
6081 mempool_free(pmb, phba->mbox_mem_pool);
da0436e9 6082 return rc;
3772a991
JS
6083}
6084
6085/**
2fcee4bf 6086 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
3772a991
JS
6087 * @phba: pointer to lpfc hba data structure.
6088 *
2fcee4bf
JS
6089 * This routine is invoked to setup the port-side endian order when
6090 * the port if_type is 0. This routine has no function for other
6091 * if_types.
da0436e9
JS
6092 *
6093 * Return codes
af901ca1 6094 * 0 - successful
25985edc 6095 * -ENOMEM - No available memory
d439d286 6096 * -EIO - The mailbox failed to complete successfully.
3772a991 6097 **/
da0436e9
JS
6098static int
6099lpfc_setup_endian_order(struct lpfc_hba *phba)
3772a991 6100{
da0436e9 6101 LPFC_MBOXQ_t *mboxq;
2fcee4bf 6102 uint32_t if_type, rc = 0;
da0436e9
JS
6103 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
6104 HOST_ENDIAN_HIGH_WORD1};
3772a991 6105
2fcee4bf
JS
6106 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6107 switch (if_type) {
6108 case LPFC_SLI_INTF_IF_TYPE_0:
6109 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6110 GFP_KERNEL);
6111 if (!mboxq) {
6112 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6113 "0492 Unable to allocate memory for "
6114 "issuing SLI_CONFIG_SPECIAL mailbox "
6115 "command\n");
6116 return -ENOMEM;
6117 }
3772a991 6118
2fcee4bf
JS
6119 /*
6120 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
6121 * two words to contain special data values and no other data.
6122 */
6123 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
6124 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
6125 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6126 if (rc != MBX_SUCCESS) {
6127 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6128 "0493 SLI_CONFIG_SPECIAL mailbox "
6129 "failed with status x%x\n",
6130 rc);
6131 rc = -EIO;
6132 }
6133 mempool_free(mboxq, phba->mbox_mem_pool);
6134 break;
6135 case LPFC_SLI_INTF_IF_TYPE_2:
6136 case LPFC_SLI_INTF_IF_TYPE_1:
6137 default:
6138 break;
da0436e9 6139 }
da0436e9 6140 return rc;
3772a991
JS
6141}
6142
6143/**
5350d872 6144 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
3772a991
JS
6145 * @phba: pointer to lpfc hba data structure.
6146 *
5350d872
JS
6147 * This routine is invoked to check the user settable queue counts for EQs and
6148 * CQs. after this routine is called the counts will be set to valid values that
6149 * adhere to the constraints of the system's interrupt vectors and the port's
6150 * queue resources.
da0436e9
JS
6151 *
6152 * Return codes
af901ca1 6153 * 0 - successful
25985edc 6154 * -ENOMEM - No available memory
3772a991 6155 **/
da0436e9 6156static int
5350d872 6157lpfc_sli4_queue_verify(struct lpfc_hba *phba)
3772a991 6158{
da0436e9
JS
6159 int cfg_fcp_wq_count;
6160 int cfg_fcp_eq_count;
3772a991 6161
da0436e9
JS
6162 /*
6163 * Sanity check for confiugred queue parameters against the run-time
6164 * device parameters
6165 */
3772a991 6166
da0436e9
JS
6167 /* Sanity check on FCP fast-path WQ parameters */
6168 cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
6169 if (cfg_fcp_wq_count >
6170 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
6171 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
6172 LPFC_SP_WQN_DEF;
6173 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
6174 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6175 "2581 Not enough WQs (%d) from "
6176 "the pci function for supporting "
6177 "FCP WQs (%d)\n",
6178 phba->sli4_hba.max_cfg_param.max_wq,
6179 phba->cfg_fcp_wq_count);
6180 goto out_error;
6181 }
6182 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6183 "2582 Not enough WQs (%d) from the pci "
6184 "function for supporting the requested "
6185 "FCP WQs (%d), the actual FCP WQs can "
6186 "be supported: %d\n",
6187 phba->sli4_hba.max_cfg_param.max_wq,
6188 phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
6189 }
6190 /* The actual number of FCP work queues adopted */
6191 phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
6192
6193 /* Sanity check on FCP fast-path EQ parameters */
6194 cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
6195 if (cfg_fcp_eq_count >
6196 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
6197 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
6198 LPFC_SP_EQN_DEF;
6199 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
6200 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6201 "2574 Not enough EQs (%d) from the "
6202 "pci function for supporting FCP "
6203 "EQs (%d)\n",
6204 phba->sli4_hba.max_cfg_param.max_eq,
6205 phba->cfg_fcp_eq_count);
6206 goto out_error;
6207 }
6208 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6209 "2575 Not enough EQs (%d) from the pci "
6210 "function for supporting the requested "
6211 "FCP EQs (%d), the actual FCP EQs can "
6212 "be supported: %d\n",
6213 phba->sli4_hba.max_cfg_param.max_eq,
6214 phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
6215 }
6216 /* It does not make sense to have more EQs than WQs */
6217 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
6218 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6a9c52cf
JS
6219 "2593 The FCP EQ count(%d) cannot be greater "
6220 "than the FCP WQ count(%d), limiting the "
6221 "FCP EQ count to %d\n", cfg_fcp_eq_count,
da0436e9
JS
6222 phba->cfg_fcp_wq_count,
6223 phba->cfg_fcp_wq_count);
6224 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
6225 }
6226 /* The actual number of FCP event queues adopted */
6227 phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
6228 /* The overall number of event queues used */
6229 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
3772a991 6230
da0436e9
JS
6231 /* Get EQ depth from module parameter, fake the default for now */
6232 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
6233 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
3772a991 6234
5350d872
JS
6235 /* Get CQ depth from module parameter, fake the default for now */
6236 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
6237 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
6238
6239 return 0;
6240out_error:
6241 return -ENOMEM;
6242}
6243
6244/**
6245 * lpfc_sli4_queue_create - Create all the SLI4 queues
6246 * @phba: pointer to lpfc hba data structure.
6247 *
6248 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
6249 * operation. For each SLI4 queue type, the parameters such as queue entry
6250 * count (queue depth) shall be taken from the module parameter. For now,
6251 * we just use some constant number as place holder.
6252 *
6253 * Return codes
6254 * 0 - sucessful
6255 * -ENOMEM - No availble memory
6256 * -EIO - The mailbox failed to complete successfully.
6257 **/
6258int
6259lpfc_sli4_queue_create(struct lpfc_hba *phba)
6260{
6261 struct lpfc_queue *qdesc;
6262 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6263
6264 /*
6265 * Create Event Queues (EQs)
6266 */
6267
da0436e9
JS
6268 /* Create slow path event queue */
6269 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6270 phba->sli4_hba.eq_ecount);
6271 if (!qdesc) {
6272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6273 "0496 Failed allocate slow-path EQ\n");
6274 goto out_error;
6275 }
6276 phba->sli4_hba.sp_eq = qdesc;
6277
5350d872
JS
6278 /*
6279 * Create fast-path FCP Event Queue(s). The cfg_fcp_eq_count can be
6280 * zero whenever there is exactly one interrupt vector. This is not
6281 * an error.
6282 */
6283 if (phba->cfg_fcp_eq_count) {
6284 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
6285 phba->cfg_fcp_eq_count), GFP_KERNEL);
6286 if (!phba->sli4_hba.fp_eq) {
6287 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6288 "2576 Failed allocate memory for "
6289 "fast-path EQ record array\n");
6290 goto out_free_sp_eq;
6291 }
da0436e9
JS
6292 }
6293 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6294 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6295 phba->sli4_hba.eq_ecount);
6296 if (!qdesc) {
6297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6298 "0497 Failed allocate fast-path EQ\n");
6299 goto out_free_fp_eq;
6300 }
6301 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
6302 }
6303
6304 /*
6305 * Create Complete Queues (CQs)
6306 */
6307
da0436e9
JS
6308 /* Create slow-path Mailbox Command Complete Queue */
6309 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6310 phba->sli4_hba.cq_ecount);
6311 if (!qdesc) {
6312 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6313 "0500 Failed allocate slow-path mailbox CQ\n");
6314 goto out_free_fp_eq;
6315 }
6316 phba->sli4_hba.mbx_cq = qdesc;
6317
6318 /* Create slow-path ELS Complete Queue */
6319 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6320 phba->sli4_hba.cq_ecount);
6321 if (!qdesc) {
6322 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6323 "0501 Failed allocate slow-path ELS CQ\n");
6324 goto out_free_mbx_cq;
6325 }
6326 phba->sli4_hba.els_cq = qdesc;
6327
da0436e9 6328
5350d872
JS
6329 /*
6330 * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs.
6331 * If there are no FCP EQs then create exactly one FCP CQ.
6332 */
6333 if (phba->cfg_fcp_eq_count)
6334 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6335 phba->cfg_fcp_eq_count),
6336 GFP_KERNEL);
6337 else
6338 phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *),
6339 GFP_KERNEL);
da0436e9
JS
6340 if (!phba->sli4_hba.fcp_cq) {
6341 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6342 "2577 Failed allocate memory for fast-path "
6343 "CQ record array\n");
4d9ab994 6344 goto out_free_els_cq;
da0436e9 6345 }
5350d872
JS
6346 fcp_cqidx = 0;
6347 do {
da0436e9
JS
6348 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6349 phba->sli4_hba.cq_ecount);
6350 if (!qdesc) {
6351 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6352 "0499 Failed allocate fast-path FCP "
6353 "CQ (%d)\n", fcp_cqidx);
6354 goto out_free_fcp_cq;
6355 }
6356 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5350d872 6357 } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
da0436e9
JS
6358
6359 /* Create Mailbox Command Queue */
6360 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6361 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6362
6363 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
6364 phba->sli4_hba.mq_ecount);
6365 if (!qdesc) {
6366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6367 "0505 Failed allocate slow-path MQ\n");
6368 goto out_free_fcp_cq;
6369 }
6370 phba->sli4_hba.mbx_wq = qdesc;
6371
6372 /*
6373 * Create all the Work Queues (WQs)
6374 */
6375 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6376 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6377
6378 /* Create slow-path ELS Work Queue */
6379 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6380 phba->sli4_hba.wq_ecount);
6381 if (!qdesc) {
6382 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6383 "0504 Failed allocate slow-path ELS WQ\n");
6384 goto out_free_mbx_wq;
6385 }
6386 phba->sli4_hba.els_wq = qdesc;
6387
6388 /* Create fast-path FCP Work Queue(s) */
6389 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6390 phba->cfg_fcp_wq_count), GFP_KERNEL);
6391 if (!phba->sli4_hba.fcp_wq) {
6392 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6393 "2578 Failed allocate memory for fast-path "
6394 "WQ record array\n");
6395 goto out_free_els_wq;
6396 }
6397 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6398 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6399 phba->sli4_hba.wq_ecount);
6400 if (!qdesc) {
6401 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6402 "0503 Failed allocate fast-path FCP "
6403 "WQ (%d)\n", fcp_wqidx);
6404 goto out_free_fcp_wq;
6405 }
6406 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
6407 }
6408
6409 /*
6410 * Create Receive Queue (RQ)
6411 */
6412 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6413 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6414
6415 /* Create Receive Queue for header */
6416 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6417 phba->sli4_hba.rq_ecount);
6418 if (!qdesc) {
6419 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6420 "0506 Failed allocate receive HRQ\n");
6421 goto out_free_fcp_wq;
6422 }
6423 phba->sli4_hba.hdr_rq = qdesc;
6424
6425 /* Create Receive Queue for data */
6426 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6427 phba->sli4_hba.rq_ecount);
6428 if (!qdesc) {
6429 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6430 "0507 Failed allocate receive DRQ\n");
6431 goto out_free_hdr_rq;
6432 }
6433 phba->sli4_hba.dat_rq = qdesc;
6434
6435 return 0;
6436
6437out_free_hdr_rq:
6438 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6439 phba->sli4_hba.hdr_rq = NULL;
6440out_free_fcp_wq:
6441 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
6442 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
6443 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
6444 }
6445 kfree(phba->sli4_hba.fcp_wq);
6446out_free_els_wq:
6447 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6448 phba->sli4_hba.els_wq = NULL;
6449out_free_mbx_wq:
6450 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6451 phba->sli4_hba.mbx_wq = NULL;
6452out_free_fcp_cq:
6453 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
6454 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
6455 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
6456 }
6457 kfree(phba->sli4_hba.fcp_cq);
da0436e9
JS
6458out_free_els_cq:
6459 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6460 phba->sli4_hba.els_cq = NULL;
6461out_free_mbx_cq:
6462 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6463 phba->sli4_hba.mbx_cq = NULL;
6464out_free_fp_eq:
6465 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
6466 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
6467 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
6468 }
6469 kfree(phba->sli4_hba.fp_eq);
6470out_free_sp_eq:
6471 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6472 phba->sli4_hba.sp_eq = NULL;
6473out_error:
6474 return -ENOMEM;
6475}
6476
6477/**
6478 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
6479 * @phba: pointer to lpfc hba data structure.
6480 *
6481 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
6482 * operation.
6483 *
6484 * Return codes
af901ca1 6485 * 0 - successful
25985edc 6486 * -ENOMEM - No available memory
d439d286 6487 * -EIO - The mailbox failed to complete successfully.
da0436e9 6488 **/
5350d872 6489void
da0436e9
JS
6490lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6491{
6492 int fcp_qidx;
6493
6494 /* Release mailbox command work queue */
6495 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6496 phba->sli4_hba.mbx_wq = NULL;
6497
6498 /* Release ELS work queue */
6499 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6500 phba->sli4_hba.els_wq = NULL;
6501
6502 /* Release FCP work queue */
6503 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6504 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
6505 kfree(phba->sli4_hba.fcp_wq);
6506 phba->sli4_hba.fcp_wq = NULL;
6507
6508 /* Release unsolicited receive queue */
6509 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6510 phba->sli4_hba.hdr_rq = NULL;
6511 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
6512 phba->sli4_hba.dat_rq = NULL;
6513
da0436e9
JS
6514 /* Release ELS complete queue */
6515 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6516 phba->sli4_hba.els_cq = NULL;
6517
6518 /* Release mailbox command complete queue */
6519 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6520 phba->sli4_hba.mbx_cq = NULL;
6521
6522 /* Release FCP response complete queue */
0558056c
JS
6523 fcp_qidx = 0;
6524 do
da0436e9 6525 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
0558056c 6526 while (++fcp_qidx < phba->cfg_fcp_eq_count);
da0436e9
JS
6527 kfree(phba->sli4_hba.fcp_cq);
6528 phba->sli4_hba.fcp_cq = NULL;
6529
6530 /* Release fast-path event queue */
6531 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6532 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
6533 kfree(phba->sli4_hba.fp_eq);
6534 phba->sli4_hba.fp_eq = NULL;
6535
6536 /* Release slow-path event queue */
6537 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6538 phba->sli4_hba.sp_eq = NULL;
6539
6540 return;
6541}
6542
6543/**
6544 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
6545 * @phba: pointer to lpfc hba data structure.
6546 *
6547 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
6548 * operation.
6549 *
6550 * Return codes
af901ca1 6551 * 0 - successful
25985edc 6552 * -ENOMEM - No available memory
d439d286 6553 * -EIO - The mailbox failed to complete successfully.
da0436e9
JS
6554 **/
6555int
6556lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6557{
6558 int rc = -ENOMEM;
6559 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6560 int fcp_cq_index = 0;
6561
6562 /*
6563 * Set up Event Queues (EQs)
6564 */
6565
6566 /* Set up slow-path event queue */
6567 if (!phba->sli4_hba.sp_eq) {
6568 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6569 "0520 Slow-path EQ not allocated\n");
6570 goto out_error;
6571 }
6572 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
6573 LPFC_SP_DEF_IMAX);
6574 if (rc) {
6575 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6576 "0521 Failed setup of slow-path EQ: "
6577 "rc = 0x%x\n", rc);
6578 goto out_error;
6579 }
6580 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6581 "2583 Slow-path EQ setup: queue-id=%d\n",
6582 phba->sli4_hba.sp_eq->queue_id);
6583
6584 /* Set up fast-path event queue */
6585 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6586 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
6587 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6588 "0522 Fast-path EQ (%d) not "
6589 "allocated\n", fcp_eqidx);
6590 goto out_destroy_fp_eq;
6591 }
6592 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
6593 phba->cfg_fcp_imax);
6594 if (rc) {
6595 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6596 "0523 Failed setup of fast-path EQ "
6597 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
6598 goto out_destroy_fp_eq;
6599 }
6600 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6601 "2584 Fast-path EQ setup: "
6602 "queue[%d]-id=%d\n", fcp_eqidx,
6603 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
6604 }
6605
6606 /*
6607 * Set up Complete Queues (CQs)
6608 */
6609
6610 /* Set up slow-path MBOX Complete Queue as the first CQ */
6611 if (!phba->sli4_hba.mbx_cq) {
6612 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6613 "0528 Mailbox CQ not allocated\n");
6614 goto out_destroy_fp_eq;
6615 }
6616 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
6617 LPFC_MCQ, LPFC_MBOX);
6618 if (rc) {
6619 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6620 "0529 Failed setup of slow-path mailbox CQ: "
6621 "rc = 0x%x\n", rc);
6622 goto out_destroy_fp_eq;
6623 }
6624 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6625 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
6626 phba->sli4_hba.mbx_cq->queue_id,
6627 phba->sli4_hba.sp_eq->queue_id);
6628
6629 /* Set up slow-path ELS Complete Queue */
6630 if (!phba->sli4_hba.els_cq) {
6631 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6632 "0530 ELS CQ not allocated\n");
6633 goto out_destroy_mbx_cq;
6634 }
6635 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
6636 LPFC_WCQ, LPFC_ELS);
6637 if (rc) {
6638 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6639 "0531 Failed setup of slow-path ELS CQ: "
6640 "rc = 0x%x\n", rc);
6641 goto out_destroy_mbx_cq;
6642 }
6643 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6644 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
6645 phba->sli4_hba.els_cq->queue_id,
6646 phba->sli4_hba.sp_eq->queue_id);
6647
da0436e9 6648 /* Set up fast-path FCP Response Complete Queue */
0558056c
JS
6649 fcp_cqidx = 0;
6650 do {
da0436e9
JS
6651 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6652 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6653 "0526 Fast-path FCP CQ (%d) not "
6654 "allocated\n", fcp_cqidx);
6655 goto out_destroy_fcp_cq;
6656 }
0558056c
JS
6657 if (phba->cfg_fcp_eq_count)
6658 rc = lpfc_cq_create(phba,
6659 phba->sli4_hba.fcp_cq[fcp_cqidx],
6660 phba->sli4_hba.fp_eq[fcp_cqidx],
6661 LPFC_WCQ, LPFC_FCP);
6662 else
6663 rc = lpfc_cq_create(phba,
6664 phba->sli4_hba.fcp_cq[fcp_cqidx],
6665 phba->sli4_hba.sp_eq,
6666 LPFC_WCQ, LPFC_FCP);
da0436e9
JS
6667 if (rc) {
6668 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6669 "0527 Failed setup of fast-path FCP "
6670 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6671 goto out_destroy_fcp_cq;
6672 }
6673 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6674 "2588 FCP CQ setup: cq[%d]-id=%d, "
0558056c 6675 "parent %seq[%d]-id=%d\n",
da0436e9
JS
6676 fcp_cqidx,
6677 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
0558056c 6678 (phba->cfg_fcp_eq_count) ? "" : "sp_",
da0436e9 6679 fcp_cqidx,
0558056c
JS
6680 (phba->cfg_fcp_eq_count) ?
6681 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
6682 phba->sli4_hba.sp_eq->queue_id);
6683 } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
da0436e9
JS
6684
6685 /*
6686 * Set up all the Work Queues (WQs)
6687 */
6688
6689 /* Set up Mailbox Command Queue */
6690 if (!phba->sli4_hba.mbx_wq) {
6691 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6692 "0538 Slow-path MQ not allocated\n");
6693 goto out_destroy_fcp_cq;
6694 }
6695 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
6696 phba->sli4_hba.mbx_cq, LPFC_MBOX);
6697 if (rc) {
6698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6699 "0539 Failed setup of slow-path MQ: "
6700 "rc = 0x%x\n", rc);
6701 goto out_destroy_fcp_cq;
6702 }
6703 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6704 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
6705 phba->sli4_hba.mbx_wq->queue_id,
6706 phba->sli4_hba.mbx_cq->queue_id);
6707
6708 /* Set up slow-path ELS Work Queue */
6709 if (!phba->sli4_hba.els_wq) {
6710 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6711 "0536 Slow-path ELS WQ not allocated\n");
6712 goto out_destroy_mbx_wq;
6713 }
6714 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
6715 phba->sli4_hba.els_cq, LPFC_ELS);
6716 if (rc) {
6717 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6718 "0537 Failed setup of slow-path ELS WQ: "
6719 "rc = 0x%x\n", rc);
6720 goto out_destroy_mbx_wq;
6721 }
6722 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6723 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
6724 phba->sli4_hba.els_wq->queue_id,
6725 phba->sli4_hba.els_cq->queue_id);
6726
6727 /* Set up fast-path FCP Work Queue */
6728 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6729 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6730 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6731 "0534 Fast-path FCP WQ (%d) not "
6732 "allocated\n", fcp_wqidx);
6733 goto out_destroy_fcp_wq;
6734 }
6735 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
6736 phba->sli4_hba.fcp_cq[fcp_cq_index],
6737 LPFC_FCP);
6738 if (rc) {
6739 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6740 "0535 Failed setup of fast-path FCP "
6741 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
6742 goto out_destroy_fcp_wq;
6743 }
6744 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6745 "2591 FCP WQ setup: wq[%d]-id=%d, "
6746 "parent cq[%d]-id=%d\n",
6747 fcp_wqidx,
6748 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
6749 fcp_cq_index,
6750 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6751 /* Round robin FCP Work Queue's Completion Queue assignment */
0558056c
JS
6752 if (phba->cfg_fcp_eq_count)
6753 fcp_cq_index = ((fcp_cq_index + 1) %
6754 phba->cfg_fcp_eq_count);
da0436e9
JS
6755 }
6756
6757 /*
6758 * Create Receive Queue (RQ)
6759 */
6760 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
6761 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6762 "0540 Receive Queue not allocated\n");
6763 goto out_destroy_fcp_wq;
6764 }
73d91e50
JS
6765
6766 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
6767 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
6768
da0436e9 6769 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
4d9ab994 6770 phba->sli4_hba.els_cq, LPFC_USOL);
da0436e9
JS
6771 if (rc) {
6772 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6773 "0541 Failed setup of Receive Queue: "
6774 "rc = 0x%x\n", rc);
6775 goto out_destroy_fcp_wq;
6776 }
73d91e50 6777
da0436e9
JS
6778 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6779 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
6780 "parent cq-id=%d\n",
6781 phba->sli4_hba.hdr_rq->queue_id,
6782 phba->sli4_hba.dat_rq->queue_id,
4d9ab994 6783 phba->sli4_hba.els_cq->queue_id);
da0436e9
JS
6784 return 0;
6785
6786out_destroy_fcp_wq:
6787 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6788 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6789 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6790out_destroy_mbx_wq:
6791 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6792out_destroy_fcp_cq:
6793 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6794 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
da0436e9
JS
6795 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6796out_destroy_mbx_cq:
6797 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6798out_destroy_fp_eq:
6799 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6800 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6801 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6802out_error:
6803 return rc;
6804}
6805
6806/**
6807 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
6808 * @phba: pointer to lpfc hba data structure.
6809 *
6810 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
6811 * operation.
6812 *
6813 * Return codes
af901ca1 6814 * 0 - successful
25985edc 6815 * -ENOMEM - No available memory
d439d286 6816 * -EIO - The mailbox failed to complete successfully.
da0436e9
JS
6817 **/
6818void
6819lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6820{
6821 int fcp_qidx;
6822
6823 /* Unset mailbox command work queue */
6824 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6825 /* Unset ELS work queue */
6826 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6827 /* Unset unsolicited receive queue */
6828 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
6829 /* Unset FCP work queue */
6830 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6831 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
6832 /* Unset mailbox command complete queue */
6833 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6834 /* Unset ELS complete queue */
6835 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
da0436e9 6836 /* Unset FCP response complete queue */
5350d872
JS
6837 fcp_qidx = 0;
6838 do {
da0436e9 6839 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
5350d872 6840 } while (++fcp_qidx < phba->cfg_fcp_eq_count);
da0436e9
JS
6841 /* Unset fast-path event queue */
6842 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6843 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
6844 /* Unset slow-path event queue */
6845 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6846}
6847
6848/**
6849 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
6850 * @phba: pointer to lpfc hba data structure.
6851 *
6852 * This routine is invoked to allocate and set up a pool of completion queue
6853 * events. The body of the completion queue event is a completion queue entry
6854 * CQE. For now, this pool is used for the interrupt service routine to queue
6855 * the following HBA completion queue events for the worker thread to process:
6856 * - Mailbox asynchronous events
6857 * - Receive queue completion unsolicited events
6858 * Later, this can be used for all the slow-path events.
6859 *
6860 * Return codes
af901ca1 6861 * 0 - successful
25985edc 6862 * -ENOMEM - No available memory
da0436e9
JS
6863 **/
6864static int
6865lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
6866{
6867 struct lpfc_cq_event *cq_event;
6868 int i;
6869
6870 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
6871 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
6872 if (!cq_event)
6873 goto out_pool_create_fail;
6874 list_add_tail(&cq_event->list,
6875 &phba->sli4_hba.sp_cqe_event_pool);
6876 }
6877 return 0;
6878
6879out_pool_create_fail:
6880 lpfc_sli4_cq_event_pool_destroy(phba);
6881 return -ENOMEM;
6882}
6883
6884/**
6885 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
6886 * @phba: pointer to lpfc hba data structure.
6887 *
6888 * This routine is invoked to free the pool of completion queue events at
6889 * driver unload time. Note that, it is the responsibility of the driver
6890 * cleanup routine to free all the outstanding completion-queue events
6891 * allocated from this pool back into the pool before invoking this routine
6892 * to destroy the pool.
6893 **/
6894static void
6895lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
6896{
6897 struct lpfc_cq_event *cq_event, *next_cq_event;
6898
6899 list_for_each_entry_safe(cq_event, next_cq_event,
6900 &phba->sli4_hba.sp_cqe_event_pool, list) {
6901 list_del(&cq_event->list);
6902 kfree(cq_event);
6903 }
6904}
6905
6906/**
6907 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6908 * @phba: pointer to lpfc hba data structure.
6909 *
6910 * This routine is the lock free version of the API invoked to allocate a
6911 * completion-queue event from the free pool.
6912 *
6913 * Return: Pointer to the newly allocated completion-queue event if successful
6914 * NULL otherwise.
6915 **/
6916struct lpfc_cq_event *
6917__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6918{
6919 struct lpfc_cq_event *cq_event = NULL;
6920
6921 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
6922 struct lpfc_cq_event, list);
6923 return cq_event;
6924}
6925
6926/**
6927 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6928 * @phba: pointer to lpfc hba data structure.
6929 *
6930 * This routine is the lock version of the API invoked to allocate a
6931 * completion-queue event from the free pool.
6932 *
6933 * Return: Pointer to the newly allocated completion-queue event if successful
6934 * NULL otherwise.
6935 **/
6936struct lpfc_cq_event *
6937lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6938{
6939 struct lpfc_cq_event *cq_event;
6940 unsigned long iflags;
6941
6942 spin_lock_irqsave(&phba->hbalock, iflags);
6943 cq_event = __lpfc_sli4_cq_event_alloc(phba);
6944 spin_unlock_irqrestore(&phba->hbalock, iflags);
6945 return cq_event;
6946}
6947
6948/**
6949 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6950 * @phba: pointer to lpfc hba data structure.
6951 * @cq_event: pointer to the completion queue event to be freed.
6952 *
6953 * This routine is the lock free version of the API invoked to release a
6954 * completion-queue event back into the free pool.
6955 **/
6956void
6957__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6958 struct lpfc_cq_event *cq_event)
6959{
6960 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
6961}
6962
6963/**
6964 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6965 * @phba: pointer to lpfc hba data structure.
6966 * @cq_event: pointer to the completion queue event to be freed.
6967 *
6968 * This routine is the lock version of the API invoked to release a
6969 * completion-queue event back into the free pool.
6970 **/
6971void
6972lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6973 struct lpfc_cq_event *cq_event)
6974{
6975 unsigned long iflags;
6976 spin_lock_irqsave(&phba->hbalock, iflags);
6977 __lpfc_sli4_cq_event_release(phba, cq_event);
6978 spin_unlock_irqrestore(&phba->hbalock, iflags);
6979}
6980
6981/**
6982 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
6983 * @phba: pointer to lpfc hba data structure.
6984 *
6985 * This routine is to free all the pending completion-queue events to the
6986 * back into the free pool for device reset.
6987 **/
6988static void
6989lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
6990{
6991 LIST_HEAD(cqelist);
6992 struct lpfc_cq_event *cqe;
6993 unsigned long iflags;
6994
6995 /* Retrieve all the pending WCQEs from pending WCQE lists */
6996 spin_lock_irqsave(&phba->hbalock, iflags);
6997 /* Pending FCP XRI abort events */
6998 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
6999 &cqelist);
7000 /* Pending ELS XRI abort events */
7001 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
7002 &cqelist);
7003 /* Pending asynnc events */
7004 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
7005 &cqelist);
7006 spin_unlock_irqrestore(&phba->hbalock, iflags);
7007
7008 while (!list_empty(&cqelist)) {
7009 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
7010 lpfc_sli4_cq_event_release(phba, cqe);
7011 }
7012}
7013
7014/**
7015 * lpfc_pci_function_reset - Reset pci function.
7016 * @phba: pointer to lpfc hba data structure.
7017 *
7018 * This routine is invoked to request a PCI function reset. It will destroys
7019 * all resources assigned to the PCI function which originates this request.
7020 *
7021 * Return codes
af901ca1 7022 * 0 - successful
25985edc 7023 * -ENOMEM - No available memory
d439d286 7024 * -EIO - The mailbox failed to complete successfully.
da0436e9
JS
7025 **/
7026int
7027lpfc_pci_function_reset(struct lpfc_hba *phba)
7028{
7029 LPFC_MBOXQ_t *mboxq;
2fcee4bf 7030 uint32_t rc = 0, if_type;
da0436e9 7031 uint32_t shdr_status, shdr_add_status;
2fcee4bf 7032 uint32_t rdy_chk, num_resets = 0, reset_again = 0;
da0436e9 7033 union lpfc_sli4_cfg_shdr *shdr;
2fcee4bf 7034 struct lpfc_register reg_data;
da0436e9 7035
2fcee4bf
JS
7036 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7037 switch (if_type) {
7038 case LPFC_SLI_INTF_IF_TYPE_0:
7039 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7040 GFP_KERNEL);
7041 if (!mboxq) {
7042 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7043 "0494 Unable to allocate memory for "
7044 "issuing SLI_FUNCTION_RESET mailbox "
7045 "command\n");
7046 return -ENOMEM;
7047 }
da0436e9 7048
2fcee4bf
JS
7049 /* Setup PCI function reset mailbox-ioctl command */
7050 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7051 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
7052 LPFC_SLI4_MBX_EMBED);
7053 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7054 shdr = (union lpfc_sli4_cfg_shdr *)
7055 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7056 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7057 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7058 &shdr->response);
7059 if (rc != MBX_TIMEOUT)
7060 mempool_free(mboxq, phba->mbox_mem_pool);
7061 if (shdr_status || shdr_add_status || rc) {
7062 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7063 "0495 SLI_FUNCTION_RESET mailbox "
7064 "failed with status x%x add_status x%x,"
7065 " mbx status x%x\n",
7066 shdr_status, shdr_add_status, rc);
7067 rc = -ENXIO;
7068 }
7069 break;
7070 case LPFC_SLI_INTF_IF_TYPE_2:
7071 for (num_resets = 0;
7072 num_resets < MAX_IF_TYPE_2_RESETS;
7073 num_resets++) {
7074 reg_data.word0 = 0;
7075 bf_set(lpfc_sliport_ctrl_end, &reg_data,
7076 LPFC_SLIPORT_LITTLE_ENDIAN);
7077 bf_set(lpfc_sliport_ctrl_ip, &reg_data,
7078 LPFC_SLIPORT_INIT_PORT);
7079 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
7080 CTRLregaddr);
7081
7082 /*
7083 * Poll the Port Status Register and wait for RDY for
7084 * up to 10 seconds. If the port doesn't respond, treat
7085 * it as an error. If the port responds with RN, start
7086 * the loop again.
7087 */
7088 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
73d91e50 7089 msleep(10);
9940b97b
JS
7090 if (lpfc_readl(phba->sli4_hba.u.if_type2.
7091 STATUSregaddr, &reg_data.word0)) {
7092 rc = -ENODEV;
73d91e50 7093 goto out;
9940b97b 7094 }
2fcee4bf
JS
7095 if (bf_get(lpfc_sliport_status_rdy, &reg_data))
7096 break;
7097 if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
7098 reset_again++;
7099 break;
7100 }
2fcee4bf
JS
7101 }
7102
7103 /*
7104 * If the port responds to the init request with
7105 * reset needed, delay for a bit and restart the loop.
7106 */
7107 if (reset_again) {
7108 msleep(10);
7109 reset_again = 0;
7110 continue;
7111 }
7112
7113 /* Detect any port errors. */
2fcee4bf
JS
7114 if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
7115 (rdy_chk >= 1000)) {
7116 phba->work_status[0] = readl(
7117 phba->sli4_hba.u.if_type2.ERR1regaddr);
7118 phba->work_status[1] = readl(
7119 phba->sli4_hba.u.if_type2.ERR2regaddr);
7120 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7121 "2890 Port Error Detected "
7122 "during Port Reset: "
7123 "port status reg 0x%x, "
7124 "error 1=0x%x, error 2=0x%x\n",
7125 reg_data.word0,
7126 phba->work_status[0],
7127 phba->work_status[1]);
7128 rc = -ENODEV;
7129 }
7130
7131 /*
7132 * Terminate the outer loop provided the Port indicated
7133 * ready within 10 seconds.
7134 */
7135 if (rdy_chk < 1000)
7136 break;
7137 }
0558056c
JS
7138 /* delay driver action following IF_TYPE_2 function reset */
7139 msleep(100);
2fcee4bf
JS
7140 break;
7141 case LPFC_SLI_INTF_IF_TYPE_1:
7142 default:
7143 break;
da0436e9 7144 }
2fcee4bf 7145
73d91e50 7146out:
2fcee4bf
JS
7147 /* Catch the not-ready port failure after a port reset. */
7148 if (num_resets >= MAX_IF_TYPE_2_RESETS)
7149 rc = -ENODEV;
7150
da0436e9
JS
7151 return rc;
7152}
7153
7154/**
7155 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
7156 * @phba: pointer to lpfc hba data structure.
7157 * @cnt: number of nop mailbox commands to send.
7158 *
7159 * This routine is invoked to send a number @cnt of NOP mailbox command and
7160 * wait for each command to complete.
7161 *
7162 * Return: the number of NOP mailbox command completed.
7163 **/
7164static int
7165lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
7166{
7167 LPFC_MBOXQ_t *mboxq;
7168 int length, cmdsent;
7169 uint32_t mbox_tmo;
7170 uint32_t rc = 0;
7171 uint32_t shdr_status, shdr_add_status;
7172 union lpfc_sli4_cfg_shdr *shdr;
7173
7174 if (cnt == 0) {
7175 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7176 "2518 Requested to send 0 NOP mailbox cmd\n");
7177 return cnt;
7178 }
7179
7180 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7181 if (!mboxq) {
7182 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7183 "2519 Unable to allocate memory for issuing "
7184 "NOP mailbox command\n");
7185 return 0;
7186 }
7187
7188 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
7189 length = (sizeof(struct lpfc_mbx_nop) -
7190 sizeof(struct lpfc_sli4_cfg_mhdr));
7191 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7192 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
7193
da0436e9
JS
7194 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
7195 if (!phba->sli4_hba.intr_enable)
7196 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
a183a15f
JS
7197 else {
7198 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
da0436e9 7199 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
a183a15f 7200 }
da0436e9
JS
7201 if (rc == MBX_TIMEOUT)
7202 break;
7203 /* Check return status */
7204 shdr = (union lpfc_sli4_cfg_shdr *)
7205 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7206 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7207 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7208 &shdr->response);
7209 if (shdr_status || shdr_add_status || rc) {
7210 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7211 "2520 NOP mailbox command failed "
7212 "status x%x add_status x%x mbx "
7213 "status x%x\n", shdr_status,
7214 shdr_add_status, rc);
7215 break;
7216 }
7217 }
7218
7219 if (rc != MBX_TIMEOUT)
7220 mempool_free(mboxq, phba->mbox_mem_pool);
7221
7222 return cmdsent;
7223}
7224
da0436e9
JS
7225/**
7226 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
7227 * @phba: pointer to lpfc hba data structure.
7228 *
7229 * This routine is invoked to set up the PCI device memory space for device
7230 * with SLI-4 interface spec.
7231 *
7232 * Return codes
af901ca1 7233 * 0 - successful
da0436e9
JS
7234 * other values - error
7235 **/
7236static int
7237lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7238{
7239 struct pci_dev *pdev;
7240 unsigned long bar0map_len, bar1map_len, bar2map_len;
7241 int error = -ENODEV;
2fcee4bf 7242 uint32_t if_type;
da0436e9
JS
7243
7244 /* Obtain PCI device reference */
7245 if (!phba->pcidev)
7246 return error;
7247 else
7248 pdev = phba->pcidev;
7249
7250 /* Set the device DMA mask size */
8e68597d
MR
7251 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
7252 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
7253 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
7254 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
da0436e9 7255 return error;
8e68597d
MR
7256 }
7257 }
da0436e9 7258
2fcee4bf
JS
7259 /*
7260 * The BARs and register set definitions and offset locations are
7261 * dependent on the if_type.
7262 */
7263 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
7264 &phba->sli4_hba.sli_intf.word0)) {
7265 return error;
7266 }
7267
7268 /* There is no SLI3 failback for SLI4 devices. */
7269 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
7270 LPFC_SLI_INTF_VALID) {
7271 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7272 "2894 SLI_INTF reg contents invalid "
7273 "sli_intf reg 0x%x\n",
7274 phba->sli4_hba.sli_intf.word0);
7275 return error;
7276 }
7277
7278 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7279 /*
7280 * Get the bus address of SLI4 device Bar regions and the
7281 * number of bytes required by each mapping. The mapping of the
7282 * particular PCI BARs regions is dependent on the type of
7283 * SLI4 device.
da0436e9 7284 */
1dfb5a47
JS
7285 if (pci_resource_start(pdev, 0)) {
7286 phba->pci_bar0_map = pci_resource_start(pdev, 0);
7287 bar0map_len = pci_resource_len(pdev, 0);
2fcee4bf
JS
7288
7289 /*
7290 * Map SLI4 PCI Config Space Register base to a kernel virtual
7291 * addr
7292 */
7293 phba->sli4_hba.conf_regs_memmap_p =
7294 ioremap(phba->pci_bar0_map, bar0map_len);
7295 if (!phba->sli4_hba.conf_regs_memmap_p) {
7296 dev_printk(KERN_ERR, &pdev->dev,
7297 "ioremap failed for SLI4 PCI config "
7298 "registers.\n");
7299 goto out;
7300 }
7301 /* Set up BAR0 PCI config space register memory map */
7302 lpfc_sli4_bar0_register_memmap(phba, if_type);
1dfb5a47
JS
7303 } else {
7304 phba->pci_bar0_map = pci_resource_start(pdev, 1);
7305 bar0map_len = pci_resource_len(pdev, 1);
2fcee4bf
JS
7306 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
7307 dev_printk(KERN_ERR, &pdev->dev,
7308 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
7309 goto out;
7310 }
7311 phba->sli4_hba.conf_regs_memmap_p =
da0436e9 7312 ioremap(phba->pci_bar0_map, bar0map_len);
2fcee4bf
JS
7313 if (!phba->sli4_hba.conf_regs_memmap_p) {
7314 dev_printk(KERN_ERR, &pdev->dev,
7315 "ioremap failed for SLI4 PCI config "
7316 "registers.\n");
7317 goto out;
7318 }
7319 lpfc_sli4_bar0_register_memmap(phba, if_type);
da0436e9
JS
7320 }
7321
c31098ce
JS
7322 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7323 (pci_resource_start(pdev, 2))) {
2fcee4bf
JS
7324 /*
7325 * Map SLI4 if type 0 HBA Control Register base to a kernel
7326 * virtual address and setup the registers.
7327 */
7328 phba->pci_bar1_map = pci_resource_start(pdev, 2);
7329 bar1map_len = pci_resource_len(pdev, 2);
7330 phba->sli4_hba.ctrl_regs_memmap_p =
da0436e9 7331 ioremap(phba->pci_bar1_map, bar1map_len);
2fcee4bf
JS
7332 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
7333 dev_printk(KERN_ERR, &pdev->dev,
da0436e9 7334 "ioremap failed for SLI4 HBA control registers.\n");
2fcee4bf
JS
7335 goto out_iounmap_conf;
7336 }
7337 lpfc_sli4_bar1_register_memmap(phba);
da0436e9
JS
7338 }
7339
c31098ce
JS
7340 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7341 (pci_resource_start(pdev, 4))) {
2fcee4bf
JS
7342 /*
7343 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
7344 * virtual address and setup the registers.
7345 */
7346 phba->pci_bar2_map = pci_resource_start(pdev, 4);
7347 bar2map_len = pci_resource_len(pdev, 4);
7348 phba->sli4_hba.drbl_regs_memmap_p =
da0436e9 7349 ioremap(phba->pci_bar2_map, bar2map_len);
2fcee4bf
JS
7350 if (!phba->sli4_hba.drbl_regs_memmap_p) {
7351 dev_printk(KERN_ERR, &pdev->dev,
da0436e9 7352 "ioremap failed for SLI4 HBA doorbell registers.\n");
2fcee4bf
JS
7353 goto out_iounmap_ctrl;
7354 }
7355 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
7356 if (error)
7357 goto out_iounmap_all;
da0436e9
JS
7358 }
7359
da0436e9
JS
7360 return 0;
7361
7362out_iounmap_all:
7363 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7364out_iounmap_ctrl:
7365 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7366out_iounmap_conf:
7367 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7368out:
7369 return error;
7370}
7371
7372/**
7373 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
7374 * @phba: pointer to lpfc hba data structure.
7375 *
7376 * This routine is invoked to unset the PCI device memory space for device
7377 * with SLI-4 interface spec.
7378 **/
7379static void
7380lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
7381{
7382 struct pci_dev *pdev;
7383
7384 /* Obtain PCI device reference */
7385 if (!phba->pcidev)
7386 return;
7387 else
7388 pdev = phba->pcidev;
7389
7390 /* Free coherent DMA memory allocated */
7391
7392 /* Unmap I/O memory space */
7393 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7394 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7395 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7396
7397 return;
7398}
7399
7400/**
7401 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
7402 * @phba: pointer to lpfc hba data structure.
7403 *
7404 * This routine is invoked to enable the MSI-X interrupt vectors to device
7405 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
7406 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
7407 * invoked, enables either all or nothing, depending on the current
7408 * availability of PCI vector resources. The device driver is responsible
7409 * for calling the individual request_irq() to register each MSI-X vector
7410 * with a interrupt handler, which is done in this function. Note that
7411 * later when device is unloading, the driver should always call free_irq()
7412 * on all MSI-X vectors it has done request_irq() on before calling
7413 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
7414 * will be left with MSI-X enabled and leaks its vectors.
7415 *
7416 * Return codes
af901ca1 7417 * 0 - successful
da0436e9
JS
7418 * other values - error
7419 **/
7420static int
7421lpfc_sli_enable_msix(struct lpfc_hba *phba)
7422{
7423 int rc, i;
7424 LPFC_MBOXQ_t *pmb;
7425
7426 /* Set up MSI-X multi-message vectors */
7427 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7428 phba->msix_entries[i].entry = i;
7429
7430 /* Configure MSI-X capability structure */
7431 rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
7432 ARRAY_SIZE(phba->msix_entries));
7433 if (rc) {
7434 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7435 "0420 PCI enable MSI-X failed (%d)\n", rc);
7436 goto msi_fail_out;
7437 }
7438 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7439 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7440 "0477 MSI-X entry[%d]: vector=x%x "
7441 "message=%d\n", i,
7442 phba->msix_entries[i].vector,
7443 phba->msix_entries[i].entry);
7444 /*
7445 * Assign MSI-X vectors to interrupt handlers
7446 */
7447
7448 /* vector-0 is associated to slow-path handler */
7449 rc = request_irq(phba->msix_entries[0].vector,
7450 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
7451 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7452 if (rc) {
7453 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7454 "0421 MSI-X slow-path request_irq failed "
7455 "(%d)\n", rc);
7456 goto msi_fail_out;
7457 }
7458
7459 /* vector-1 is associated to fast-path handler */
7460 rc = request_irq(phba->msix_entries[1].vector,
7461 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
7462 LPFC_FP_DRIVER_HANDLER_NAME, phba);
7463
7464 if (rc) {
7465 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7466 "0429 MSI-X fast-path request_irq failed "
7467 "(%d)\n", rc);
7468 goto irq_fail_out;
7469 }
7470
7471 /*
7472 * Configure HBA MSI-X attention conditions to messages
7473 */
7474 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7475
7476 if (!pmb) {
7477 rc = -ENOMEM;
7478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7479 "0474 Unable to allocate memory for issuing "
7480 "MBOX_CONFIG_MSI command\n");
7481 goto mem_fail_out;
7482 }
7483 rc = lpfc_config_msi(phba, pmb);
7484 if (rc)
7485 goto mbx_fail_out;
7486 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
7487 if (rc != MBX_SUCCESS) {
7488 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
7489 "0351 Config MSI mailbox command failed, "
7490 "mbxCmd x%x, mbxStatus x%x\n",
7491 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
7492 goto mbx_fail_out;
7493 }
7494
7495 /* Free memory allocated for mailbox command */
7496 mempool_free(pmb, phba->mbox_mem_pool);
7497 return rc;
7498
7499mbx_fail_out:
7500 /* Free memory allocated for mailbox command */
7501 mempool_free(pmb, phba->mbox_mem_pool);
7502
7503mem_fail_out:
7504 /* free the irq already requested */
7505 free_irq(phba->msix_entries[1].vector, phba);
7506
7507irq_fail_out:
7508 /* free the irq already requested */
7509 free_irq(phba->msix_entries[0].vector, phba);
7510
7511msi_fail_out:
7512 /* Unconfigure MSI-X capability structure */
7513 pci_disable_msix(phba->pcidev);
7514 return rc;
7515}
7516
7517/**
7518 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
7519 * @phba: pointer to lpfc hba data structure.
7520 *
7521 * This routine is invoked to release the MSI-X vectors and then disable the
7522 * MSI-X interrupt mode to device with SLI-3 interface spec.
7523 **/
7524static void
7525lpfc_sli_disable_msix(struct lpfc_hba *phba)
7526{
7527 int i;
7528
7529 /* Free up MSI-X multi-message vectors */
7530 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7531 free_irq(phba->msix_entries[i].vector, phba);
7532 /* Disable MSI-X */
7533 pci_disable_msix(phba->pcidev);
7534
7535 return;
7536}
7537
7538/**
7539 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
7540 * @phba: pointer to lpfc hba data structure.
7541 *
7542 * This routine is invoked to enable the MSI interrupt mode to device with
7543 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
7544 * enable the MSI vector. The device driver is responsible for calling the
7545 * request_irq() to register MSI vector with a interrupt the handler, which
7546 * is done in this function.
7547 *
7548 * Return codes
af901ca1 7549 * 0 - successful
da0436e9
JS
7550 * other values - error
7551 */
7552static int
7553lpfc_sli_enable_msi(struct lpfc_hba *phba)
7554{
7555 int rc;
7556
7557 rc = pci_enable_msi(phba->pcidev);
7558 if (!rc)
7559 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7560 "0462 PCI enable MSI mode success.\n");
7561 else {
7562 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7563 "0471 PCI enable MSI mode failed (%d)\n", rc);
7564 return rc;
7565 }
7566
7567 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7568 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7569 if (rc) {
7570 pci_disable_msi(phba->pcidev);
7571 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7572 "0478 MSI request_irq failed (%d)\n", rc);
7573 }
7574 return rc;
7575}
7576
7577/**
7578 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
7579 * @phba: pointer to lpfc hba data structure.
7580 *
7581 * This routine is invoked to disable the MSI interrupt mode to device with
7582 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
7583 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7584 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7585 * its vector.
7586 */
7587static void
7588lpfc_sli_disable_msi(struct lpfc_hba *phba)
7589{
7590 free_irq(phba->pcidev->irq, phba);
7591 pci_disable_msi(phba->pcidev);
7592 return;
7593}
7594
7595/**
7596 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
7597 * @phba: pointer to lpfc hba data structure.
7598 *
7599 * This routine is invoked to enable device interrupt and associate driver's
7600 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
7601 * spec. Depends on the interrupt mode configured to the driver, the driver
7602 * will try to fallback from the configured interrupt mode to an interrupt
7603 * mode which is supported by the platform, kernel, and device in the order
7604 * of:
7605 * MSI-X -> MSI -> IRQ.
7606 *
7607 * Return codes
af901ca1 7608 * 0 - successful
da0436e9
JS
7609 * other values - error
7610 **/
7611static uint32_t
7612lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7613{
7614 uint32_t intr_mode = LPFC_INTR_ERROR;
7615 int retval;
7616
7617 if (cfg_mode == 2) {
7618 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
7619 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
7620 if (!retval) {
7621 /* Now, try to enable MSI-X interrupt mode */
7622 retval = lpfc_sli_enable_msix(phba);
7623 if (!retval) {
7624 /* Indicate initialization to MSI-X mode */
7625 phba->intr_type = MSIX;
7626 intr_mode = 2;
7627 }
7628 }
7629 }
7630
7631 /* Fallback to MSI if MSI-X initialization failed */
7632 if (cfg_mode >= 1 && phba->intr_type == NONE) {
7633 retval = lpfc_sli_enable_msi(phba);
7634 if (!retval) {
7635 /* Indicate initialization to MSI mode */
7636 phba->intr_type = MSI;
7637 intr_mode = 1;
7638 }
7639 }
7640
7641 /* Fallback to INTx if both MSI-X/MSI initalization failed */
7642 if (phba->intr_type == NONE) {
7643 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7644 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7645 if (!retval) {
7646 /* Indicate initialization to INTx mode */
7647 phba->intr_type = INTx;
7648 intr_mode = 0;
7649 }
7650 }
7651 return intr_mode;
7652}
7653
7654/**
7655 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
7656 * @phba: pointer to lpfc hba data structure.
7657 *
7658 * This routine is invoked to disable device interrupt and disassociate the
7659 * driver's interrupt handler(s) from interrupt vector(s) to device with
7660 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
7661 * release the interrupt vector(s) for the message signaled interrupt.
7662 **/
7663static void
7664lpfc_sli_disable_intr(struct lpfc_hba *phba)
7665{
7666 /* Disable the currently initialized interrupt mode */
7667 if (phba->intr_type == MSIX)
7668 lpfc_sli_disable_msix(phba);
7669 else if (phba->intr_type == MSI)
7670 lpfc_sli_disable_msi(phba);
7671 else if (phba->intr_type == INTx)
7672 free_irq(phba->pcidev->irq, phba);
7673
7674 /* Reset interrupt management states */
7675 phba->intr_type = NONE;
7676 phba->sli.slistat.sli_intr = 0;
7677
7678 return;
7679}
7680
7681/**
7682 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
7683 * @phba: pointer to lpfc hba data structure.
7684 *
7685 * This routine is invoked to enable the MSI-X interrupt vectors to device
7686 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
7687 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
7688 * enables either all or nothing, depending on the current availability of
7689 * PCI vector resources. The device driver is responsible for calling the
7690 * individual request_irq() to register each MSI-X vector with a interrupt
7691 * handler, which is done in this function. Note that later when device is
7692 * unloading, the driver should always call free_irq() on all MSI-X vectors
7693 * it has done request_irq() on before calling pci_disable_msix(). Failure
7694 * to do so results in a BUG_ON() and a device will be left with MSI-X
7695 * enabled and leaks its vectors.
7696 *
7697 * Return codes
af901ca1 7698 * 0 - successful
da0436e9
JS
7699 * other values - error
7700 **/
7701static int
7702lpfc_sli4_enable_msix(struct lpfc_hba *phba)
7703{
75baf696 7704 int vectors, rc, index;
da0436e9
JS
7705
7706 /* Set up MSI-X multi-message vectors */
7707 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
7708 phba->sli4_hba.msix_entries[index].entry = index;
7709
7710 /* Configure MSI-X capability structure */
75baf696
JS
7711 vectors = phba->sli4_hba.cfg_eqn;
7712enable_msix_vectors:
da0436e9 7713 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
75baf696
JS
7714 vectors);
7715 if (rc > 1) {
7716 vectors = rc;
7717 goto enable_msix_vectors;
7718 } else if (rc) {
da0436e9
JS
7719 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7720 "0484 PCI enable MSI-X failed (%d)\n", rc);
7721 goto msi_fail_out;
7722 }
75baf696 7723
da0436e9 7724 /* Log MSI-X vector assignment */
75baf696 7725 for (index = 0; index < vectors; index++)
da0436e9
JS
7726 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7727 "0489 MSI-X entry[%d]: vector=x%x "
7728 "message=%d\n", index,
7729 phba->sli4_hba.msix_entries[index].vector,
7730 phba->sli4_hba.msix_entries[index].entry);
7731 /*
7732 * Assign MSI-X vectors to interrupt handlers
7733 */
0558056c
JS
7734 if (vectors > 1)
7735 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7736 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
7737 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7738 else
7739 /* All Interrupts need to be handled by one EQ */
7740 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7741 &lpfc_sli4_intr_handler, IRQF_SHARED,
7742 LPFC_DRIVER_NAME, phba);
da0436e9
JS
7743 if (rc) {
7744 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7745 "0485 MSI-X slow-path request_irq failed "
7746 "(%d)\n", rc);
7747 goto msi_fail_out;
7748 }
7749
7750 /* The rest of the vector(s) are associated to fast-path handler(s) */
75baf696 7751 for (index = 1; index < vectors; index++) {
da0436e9
JS
7752 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
7753 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
7754 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
7755 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
7756 LPFC_FP_DRIVER_HANDLER_NAME,
7757 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7758 if (rc) {
7759 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7760 "0486 MSI-X fast-path (%d) "
7761 "request_irq failed (%d)\n", index, rc);
7762 goto cfg_fail_out;
7763 }
7764 }
75baf696 7765 phba->sli4_hba.msix_vec_nr = vectors;
da0436e9
JS
7766
7767 return rc;
7768
7769cfg_fail_out:
7770 /* free the irq already requested */
7771 for (--index; index >= 1; index--)
7772 free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
7773 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7774
7775 /* free the irq already requested */
7776 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7777
7778msi_fail_out:
7779 /* Unconfigure MSI-X capability structure */
7780 pci_disable_msix(phba->pcidev);
7781 return rc;
7782}
7783
7784/**
7785 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
7786 * @phba: pointer to lpfc hba data structure.
7787 *
7788 * This routine is invoked to release the MSI-X vectors and then disable the
7789 * MSI-X interrupt mode to device with SLI-4 interface spec.
7790 **/
7791static void
7792lpfc_sli4_disable_msix(struct lpfc_hba *phba)
7793{
7794 int index;
7795
7796 /* Free up MSI-X multi-message vectors */
7797 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7798
75baf696 7799 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
da0436e9
JS
7800 free_irq(phba->sli4_hba.msix_entries[index].vector,
7801 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
75baf696 7802
da0436e9
JS
7803 /* Disable MSI-X */
7804 pci_disable_msix(phba->pcidev);
7805
7806 return;
7807}
7808
7809/**
7810 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
7811 * @phba: pointer to lpfc hba data structure.
7812 *
7813 * This routine is invoked to enable the MSI interrupt mode to device with
7814 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
7815 * to enable the MSI vector. The device driver is responsible for calling
7816 * the request_irq() to register MSI vector with a interrupt the handler,
7817 * which is done in this function.
7818 *
7819 * Return codes
af901ca1 7820 * 0 - successful
da0436e9
JS
7821 * other values - error
7822 **/
7823static int
7824lpfc_sli4_enable_msi(struct lpfc_hba *phba)
7825{
7826 int rc, index;
7827
7828 rc = pci_enable_msi(phba->pcidev);
7829 if (!rc)
7830 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7831 "0487 PCI enable MSI mode success.\n");
7832 else {
7833 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7834 "0488 PCI enable MSI mode failed (%d)\n", rc);
7835 return rc;
7836 }
7837
7838 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7839 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7840 if (rc) {
7841 pci_disable_msi(phba->pcidev);
7842 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7843 "0490 MSI request_irq failed (%d)\n", rc);
75baf696 7844 return rc;
da0436e9
JS
7845 }
7846
7847 for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
7848 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7849 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7850 }
7851
75baf696 7852 return 0;
da0436e9
JS
7853}
7854
7855/**
7856 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
7857 * @phba: pointer to lpfc hba data structure.
7858 *
7859 * This routine is invoked to disable the MSI interrupt mode to device with
7860 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
7861 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7862 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7863 * its vector.
7864 **/
7865static void
7866lpfc_sli4_disable_msi(struct lpfc_hba *phba)
7867{
7868 free_irq(phba->pcidev->irq, phba);
7869 pci_disable_msi(phba->pcidev);
7870 return;
7871}
7872
7873/**
7874 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
7875 * @phba: pointer to lpfc hba data structure.
7876 *
7877 * This routine is invoked to enable device interrupt and associate driver's
7878 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
7879 * interface spec. Depends on the interrupt mode configured to the driver,
7880 * the driver will try to fallback from the configured interrupt mode to an
7881 * interrupt mode which is supported by the platform, kernel, and device in
7882 * the order of:
7883 * MSI-X -> MSI -> IRQ.
7884 *
7885 * Return codes
af901ca1 7886 * 0 - successful
da0436e9
JS
7887 * other values - error
7888 **/
7889static uint32_t
7890lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7891{
7892 uint32_t intr_mode = LPFC_INTR_ERROR;
7893 int retval, index;
7894
7895 if (cfg_mode == 2) {
7896 /* Preparation before conf_msi mbox cmd */
7897 retval = 0;
7898 if (!retval) {
7899 /* Now, try to enable MSI-X interrupt mode */
7900 retval = lpfc_sli4_enable_msix(phba);
7901 if (!retval) {
7902 /* Indicate initialization to MSI-X mode */
7903 phba->intr_type = MSIX;
7904 intr_mode = 2;
7905 }
7906 }
7907 }
7908
7909 /* Fallback to MSI if MSI-X initialization failed */
7910 if (cfg_mode >= 1 && phba->intr_type == NONE) {
7911 retval = lpfc_sli4_enable_msi(phba);
7912 if (!retval) {
7913 /* Indicate initialization to MSI mode */
7914 phba->intr_type = MSI;
7915 intr_mode = 1;
7916 }
7917 }
7918
7919 /* Fallback to INTx if both MSI-X/MSI initalization failed */
7920 if (phba->intr_type == NONE) {
7921 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7922 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7923 if (!retval) {
7924 /* Indicate initialization to INTx mode */
7925 phba->intr_type = INTx;
7926 intr_mode = 0;
7927 for (index = 0; index < phba->cfg_fcp_eq_count;
7928 index++) {
7929 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7930 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7931 }
7932 }
7933 }
7934 return intr_mode;
7935}
7936
7937/**
7938 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
7939 * @phba: pointer to lpfc hba data structure.
7940 *
7941 * This routine is invoked to disable device interrupt and disassociate
7942 * the driver's interrupt handler(s) from interrupt vector(s) to device
7943 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
7944 * will release the interrupt vector(s) for the message signaled interrupt.
7945 **/
7946static void
7947lpfc_sli4_disable_intr(struct lpfc_hba *phba)
7948{
7949 /* Disable the currently initialized interrupt mode */
7950 if (phba->intr_type == MSIX)
7951 lpfc_sli4_disable_msix(phba);
7952 else if (phba->intr_type == MSI)
7953 lpfc_sli4_disable_msi(phba);
7954 else if (phba->intr_type == INTx)
7955 free_irq(phba->pcidev->irq, phba);
7956
7957 /* Reset interrupt management states */
7958 phba->intr_type = NONE;
7959 phba->sli.slistat.sli_intr = 0;
7960
7961 return;
7962}
7963
7964/**
7965 * lpfc_unset_hba - Unset SLI3 hba device initialization
7966 * @phba: pointer to lpfc hba data structure.
7967 *
7968 * This routine is invoked to unset the HBA device initialization steps to
7969 * a device with SLI-3 interface spec.
7970 **/
7971static void
7972lpfc_unset_hba(struct lpfc_hba *phba)
7973{
7974 struct lpfc_vport *vport = phba->pport;
7975 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7976
7977 spin_lock_irq(shost->host_lock);
7978 vport->load_flag |= FC_UNLOADING;
7979 spin_unlock_irq(shost->host_lock);
7980
7981 lpfc_stop_hba_timers(phba);
7982
7983 phba->pport->work_port_events = 0;
7984
7985 lpfc_sli_hba_down(phba);
7986
7987 lpfc_sli_brdrestart(phba);
7988
7989 lpfc_sli_disable_intr(phba);
7990
7991 return;
7992}
7993
7994/**
7995 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
7996 * @phba: pointer to lpfc hba data structure.
7997 *
7998 * This routine is invoked to unset the HBA device initialization steps to
7999 * a device with SLI-4 interface spec.
8000 **/
8001static void
8002lpfc_sli4_unset_hba(struct lpfc_hba *phba)
8003{
8004 struct lpfc_vport *vport = phba->pport;
8005 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8006
8007 spin_lock_irq(shost->host_lock);
8008 vport->load_flag |= FC_UNLOADING;
8009 spin_unlock_irq(shost->host_lock);
8010
8011 phba->pport->work_port_events = 0;
8012
3677a3a7
JS
8013 /* Stop the SLI4 device port */
8014 lpfc_stop_port(phba);
da0436e9
JS
8015
8016 lpfc_sli4_disable_intr(phba);
8017
3677a3a7
JS
8018 /* Reset SLI4 HBA FCoE function */
8019 lpfc_pci_function_reset(phba);
5350d872 8020 lpfc_sli4_queue_destroy(phba);
3677a3a7 8021
da0436e9
JS
8022 return;
8023}
8024
5af5eee7
JS
8025/**
8026 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
8027 * @phba: Pointer to HBA context object.
8028 *
8029 * This function is called in the SLI4 code path to wait for completion
8030 * of device's XRIs exchange busy. It will check the XRI exchange busy
8031 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
8032 * that, it will check the XRI exchange busy on outstanding FCP and ELS
8033 * I/Os every 30 seconds, log error message, and wait forever. Only when
8034 * all XRI exchange busy complete, the driver unload shall proceed with
8035 * invoking the function reset ioctl mailbox command to the CNA and the
8036 * the rest of the driver unload resource release.
8037 **/
8038static void
8039lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
8040{
8041 int wait_time = 0;
8042 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
8043 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8044
8045 while (!fcp_xri_cmpl || !els_xri_cmpl) {
8046 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
8047 if (!fcp_xri_cmpl)
8048 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8049 "2877 FCP XRI exchange busy "
8050 "wait time: %d seconds.\n",
8051 wait_time/1000);
8052 if (!els_xri_cmpl)
8053 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8054 "2878 ELS XRI exchange busy "
8055 "wait time: %d seconds.\n",
8056 wait_time/1000);
8057 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
8058 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
8059 } else {
8060 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
8061 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
8062 }
8063 fcp_xri_cmpl =
8064 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
8065 els_xri_cmpl =
8066 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8067 }
8068}
8069
da0436e9
JS
8070/**
8071 * lpfc_sli4_hba_unset - Unset the fcoe hba
8072 * @phba: Pointer to HBA context object.
8073 *
8074 * This function is called in the SLI4 code path to reset the HBA's FCoE
8075 * function. The caller is not required to hold any lock. This routine
8076 * issues PCI function reset mailbox command to reset the FCoE function.
8077 * At the end of the function, it calls lpfc_hba_down_post function to
8078 * free any pending commands.
8079 **/
8080static void
8081lpfc_sli4_hba_unset(struct lpfc_hba *phba)
8082{
8083 int wait_cnt = 0;
8084 LPFC_MBOXQ_t *mboxq;
912e3acd 8085 struct pci_dev *pdev = phba->pcidev;
da0436e9
JS
8086
8087 lpfc_stop_hba_timers(phba);
8088 phba->sli4_hba.intr_enable = 0;
8089
8090 /*
8091 * Gracefully wait out the potential current outstanding asynchronous
8092 * mailbox command.
8093 */
8094
8095 /* First, block any pending async mailbox command from posted */
8096 spin_lock_irq(&phba->hbalock);
8097 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8098 spin_unlock_irq(&phba->hbalock);
8099 /* Now, trying to wait it out if we can */
8100 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8101 msleep(10);
8102 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
8103 break;
8104 }
8105 /* Forcefully release the outstanding mailbox command if timed out */
8106 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8107 spin_lock_irq(&phba->hbalock);
8108 mboxq = phba->sli.mbox_active;
8109 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8110 __lpfc_mbox_cmpl_put(phba, mboxq);
8111 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8112 phba->sli.mbox_active = NULL;
8113 spin_unlock_irq(&phba->hbalock);
8114 }
8115
5af5eee7
JS
8116 /* Abort all iocbs associated with the hba */
8117 lpfc_sli_hba_iocb_abort(phba);
8118
8119 /* Wait for completion of device XRI exchange busy */
8120 lpfc_sli4_xri_exchange_busy_wait(phba);
8121
da0436e9
JS
8122 /* Disable PCI subsystem interrupt */
8123 lpfc_sli4_disable_intr(phba);
8124
912e3acd
JS
8125 /* Disable SR-IOV if enabled */
8126 if (phba->cfg_sriov_nr_virtfn)
8127 pci_disable_sriov(pdev);
8128
da0436e9
JS
8129 /* Stop kthread signal shall trigger work_done one more time */
8130 kthread_stop(phba->worker_thread);
8131
3677a3a7
JS
8132 /* Reset SLI4 HBA FCoE function */
8133 lpfc_pci_function_reset(phba);
5350d872 8134 lpfc_sli4_queue_destroy(phba);
3677a3a7 8135
da0436e9
JS
8136 /* Stop the SLI4 device port */
8137 phba->pport->work_port_events = 0;
8138}
8139
28baac74
JS
8140 /**
8141 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
8142 * @phba: Pointer to HBA context object.
8143 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
8144 *
8145 * This function is called in the SLI4 code path to read the port's
8146 * sli4 capabilities.
8147 *
8148 * This function may be be called from any context that can block-wait
8149 * for the completion. The expectation is that this routine is called
8150 * typically from probe_one or from the online routine.
8151 **/
8152int
8153lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8154{
8155 int rc;
8156 struct lpfc_mqe *mqe;
8157 struct lpfc_pc_sli4_params *sli4_params;
8158 uint32_t mbox_tmo;
8159
8160 rc = 0;
8161 mqe = &mboxq->u.mqe;
8162
8163 /* Read the port's SLI4 Parameters port capabilities */
fedd3b7b 8164 lpfc_pc_sli4_params(mboxq);
28baac74
JS
8165 if (!phba->sli4_hba.intr_enable)
8166 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8167 else {
a183a15f 8168 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
28baac74
JS
8169 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8170 }
8171
8172 if (unlikely(rc))
8173 return 1;
8174
8175 sli4_params = &phba->sli4_hba.pc_sli4_params;
8176 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
8177 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
8178 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
8179 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
8180 &mqe->un.sli4_params);
8181 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
8182 &mqe->un.sli4_params);
8183 sli4_params->proto_types = mqe->un.sli4_params.word3;
8184 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
8185 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
8186 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
8187 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
8188 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
8189 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
8190 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
8191 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
8192 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
8193 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
8194 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
8195 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
8196 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
8197 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
8198 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
8199 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
8200 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
8201 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
8202 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
8203 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
0558056c
JS
8204
8205 /* Make sure that sge_supp_len can be handled by the driver */
8206 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8207 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8208
28baac74
JS
8209 return rc;
8210}
8211
fedd3b7b
JS
8212/**
8213 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
8214 * @phba: Pointer to HBA context object.
8215 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
8216 *
8217 * This function is called in the SLI4 code path to read the port's
8218 * sli4 capabilities.
8219 *
8220 * This function may be be called from any context that can block-wait
8221 * for the completion. The expectation is that this routine is called
8222 * typically from probe_one or from the online routine.
8223 **/
8224int
8225lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8226{
8227 int rc;
8228 struct lpfc_mqe *mqe = &mboxq->u.mqe;
8229 struct lpfc_pc_sli4_params *sli4_params;
a183a15f 8230 uint32_t mbox_tmo;
fedd3b7b
JS
8231 int length;
8232 struct lpfc_sli4_parameters *mbx_sli4_parameters;
8233
6d368e53
JS
8234 /*
8235 * By default, the driver assumes the SLI4 port requires RPI
8236 * header postings. The SLI4_PARAM response will correct this
8237 * assumption.
8238 */
8239 phba->sli4_hba.rpi_hdrs_in_use = 1;
8240
fedd3b7b
JS
8241 /* Read the port's SLI4 Config Parameters */
8242 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
8243 sizeof(struct lpfc_sli4_cfg_mhdr));
8244 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8245 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
8246 length, LPFC_SLI4_MBX_EMBED);
8247 if (!phba->sli4_hba.intr_enable)
8248 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
a183a15f
JS
8249 else {
8250 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
8251 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8252 }
fedd3b7b
JS
8253 if (unlikely(rc))
8254 return rc;
8255 sli4_params = &phba->sli4_hba.pc_sli4_params;
8256 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
8257 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
8258 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
8259 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
8260 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
8261 mbx_sli4_parameters);
8262 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
8263 mbx_sli4_parameters);
8264 if (bf_get(cfg_phwq, mbx_sli4_parameters))
8265 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
8266 else
8267 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
8268 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
8269 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
8270 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
8271 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
8272 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
8273 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
8274 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
8275 mbx_sli4_parameters);
8276 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
8277 mbx_sli4_parameters);
6d368e53
JS
8278 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
8279 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
0558056c
JS
8280
8281 /* Make sure that sge_supp_len can be handled by the driver */
8282 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8283 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8284
fedd3b7b
JS
8285 return 0;
8286}
8287
da0436e9
JS
8288/**
8289 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
8290 * @pdev: pointer to PCI device
8291 * @pid: pointer to PCI device identifier
8292 *
8293 * This routine is to be called to attach a device with SLI-3 interface spec
8294 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
8295 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8296 * information of the device and driver to see if the driver state that it can
8297 * support this kind of device. If the match is successful, the driver core
8298 * invokes this routine. If this routine determines it can claim the HBA, it
8299 * does all the initialization that it needs to do to handle the HBA properly.
8300 *
8301 * Return code
8302 * 0 - driver can claim the device
8303 * negative value - driver can not claim the device
8304 **/
8305static int __devinit
8306lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
8307{
8308 struct lpfc_hba *phba;
8309 struct lpfc_vport *vport = NULL;
6669f9bb 8310 struct Scsi_Host *shost = NULL;
da0436e9
JS
8311 int error;
8312 uint32_t cfg_mode, intr_mode;
8313
8314 /* Allocate memory for HBA structure */
8315 phba = lpfc_hba_alloc(pdev);
8316 if (!phba)
8317 return -ENOMEM;
8318
8319 /* Perform generic PCI device enabling operation */
8320 error = lpfc_enable_pci_dev(phba);
079b5c91 8321 if (error)
da0436e9 8322 goto out_free_phba;
da0436e9
JS
8323
8324 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
8325 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
8326 if (error)
8327 goto out_disable_pci_dev;
8328
8329 /* Set up SLI-3 specific device PCI memory space */
8330 error = lpfc_sli_pci_mem_setup(phba);
8331 if (error) {
8332 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8333 "1402 Failed to set up pci memory space.\n");
8334 goto out_disable_pci_dev;
8335 }
8336
8337 /* Set up phase-1 common device driver resources */
8338 error = lpfc_setup_driver_resource_phase1(phba);
8339 if (error) {
8340 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8341 "1403 Failed to set up driver resource.\n");
8342 goto out_unset_pci_mem_s3;
8343 }
8344
8345 /* Set up SLI-3 specific device driver resources */
8346 error = lpfc_sli_driver_resource_setup(phba);
8347 if (error) {
8348 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8349 "1404 Failed to set up driver resource.\n");
8350 goto out_unset_pci_mem_s3;
8351 }
8352
8353 /* Initialize and populate the iocb list per host */
8354 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
8355 if (error) {
8356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8357 "1405 Failed to initialize iocb list.\n");
8358 goto out_unset_driver_resource_s3;
8359 }
8360
8361 /* Set up common device driver resources */
8362 error = lpfc_setup_driver_resource_phase2(phba);
8363 if (error) {
8364 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8365 "1406 Failed to set up driver resource.\n");
8366 goto out_free_iocb_list;
8367 }
8368
079b5c91
JS
8369 /* Get the default values for Model Name and Description */
8370 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
8371
da0436e9
JS
8372 /* Create SCSI host to the physical port */
8373 error = lpfc_create_shost(phba);
8374 if (error) {
8375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8376 "1407 Failed to create scsi host.\n");
8377 goto out_unset_driver_resource;
8378 }
8379
8380 /* Configure sysfs attributes */
8381 vport = phba->pport;
8382 error = lpfc_alloc_sysfs_attr(vport);
8383 if (error) {
8384 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8385 "1476 Failed to allocate sysfs attr\n");
8386 goto out_destroy_shost;
8387 }
8388
6669f9bb 8389 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
da0436e9
JS
8390 /* Now, trying to enable interrupt and bring up the device */
8391 cfg_mode = phba->cfg_use_msi;
8392 while (true) {
8393 /* Put device to a known state before enabling interrupt */
8394 lpfc_stop_port(phba);
8395 /* Configure and enable interrupt */
8396 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
8397 if (intr_mode == LPFC_INTR_ERROR) {
8398 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8399 "0431 Failed to enable interrupt.\n");
8400 error = -ENODEV;
8401 goto out_free_sysfs_attr;
8402 }
8403 /* SLI-3 HBA setup */
8404 if (lpfc_sli_hba_setup(phba)) {
8405 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8406 "1477 Failed to set up hba\n");
8407 error = -ENODEV;
8408 goto out_remove_device;
8409 }
8410
8411 /* Wait 50ms for the interrupts of previous mailbox commands */
8412 msleep(50);
8413 /* Check active interrupts on message signaled interrupts */
8414 if (intr_mode == 0 ||
8415 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
8416 /* Log the current active interrupt mode */
8417 phba->intr_mode = intr_mode;
8418 lpfc_log_intr_mode(phba, intr_mode);
8419 break;
8420 } else {
8421 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8422 "0447 Configure interrupt mode (%d) "
8423 "failed active interrupt test.\n",
8424 intr_mode);
8425 /* Disable the current interrupt mode */
8426 lpfc_sli_disable_intr(phba);
8427 /* Try next level of interrupt mode */
8428 cfg_mode = --intr_mode;
8429 }
8430 }
8431
8432 /* Perform post initialization setup */
8433 lpfc_post_init_setup(phba);
8434
8435 /* Check if there are static vports to be created. */
8436 lpfc_create_static_vport(phba);
8437
8438 return 0;
8439
8440out_remove_device:
8441 lpfc_unset_hba(phba);
8442out_free_sysfs_attr:
8443 lpfc_free_sysfs_attr(vport);
8444out_destroy_shost:
8445 lpfc_destroy_shost(phba);
8446out_unset_driver_resource:
8447 lpfc_unset_driver_resource_phase2(phba);
8448out_free_iocb_list:
8449 lpfc_free_iocb_list(phba);
8450out_unset_driver_resource_s3:
8451 lpfc_sli_driver_resource_unset(phba);
8452out_unset_pci_mem_s3:
8453 lpfc_sli_pci_mem_unset(phba);
8454out_disable_pci_dev:
8455 lpfc_disable_pci_dev(phba);
6669f9bb
JS
8456 if (shost)
8457 scsi_host_put(shost);
da0436e9
JS
8458out_free_phba:
8459 lpfc_hba_free(phba);
8460 return error;
8461}
8462
8463/**
8464 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
8465 * @pdev: pointer to PCI device
8466 *
8467 * This routine is to be called to disattach a device with SLI-3 interface
8468 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
8469 * removed from PCI bus, it performs all the necessary cleanup for the HBA
8470 * device to be removed from the PCI subsystem properly.
8471 **/
8472static void __devexit
8473lpfc_pci_remove_one_s3(struct pci_dev *pdev)
8474{
8475 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8476 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8477 struct lpfc_vport **vports;
8478 struct lpfc_hba *phba = vport->phba;
8479 int i;
8480 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
8481
8482 spin_lock_irq(&phba->hbalock);
8483 vport->load_flag |= FC_UNLOADING;
8484 spin_unlock_irq(&phba->hbalock);
8485
8486 lpfc_free_sysfs_attr(vport);
8487
8488 /* Release all the vports against this physical port */
8489 vports = lpfc_create_vport_work_array(phba);
8490 if (vports != NULL)
8491 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
8492 fc_vport_terminate(vports[i]->fc_vport);
8493 lpfc_destroy_vport_work_array(phba, vports);
8494
8495 /* Remove FC host and then SCSI host with the physical port */
8496 fc_remove_host(shost);
8497 scsi_remove_host(shost);
8498 lpfc_cleanup(vport);
8499
8500 /*
8501 * Bring down the SLI Layer. This step disable all interrupts,
8502 * clears the rings, discards all mailbox commands, and resets
8503 * the HBA.
8504 */
8505
48e34d0f 8506 /* HBA interrupt will be disabled after this call */
da0436e9
JS
8507 lpfc_sli_hba_down(phba);
8508 /* Stop kthread signal shall trigger work_done one more time */
8509 kthread_stop(phba->worker_thread);
8510 /* Final cleanup of txcmplq and reset the HBA */
8511 lpfc_sli_brdrestart(phba);
8512
8513 lpfc_stop_hba_timers(phba);
8514 spin_lock_irq(&phba->hbalock);
8515 list_del_init(&vport->listentry);
8516 spin_unlock_irq(&phba->hbalock);
8517
8518 lpfc_debugfs_terminate(vport);
8519
912e3acd
JS
8520 /* Disable SR-IOV if enabled */
8521 if (phba->cfg_sriov_nr_virtfn)
8522 pci_disable_sriov(pdev);
8523
da0436e9
JS
8524 /* Disable interrupt */
8525 lpfc_sli_disable_intr(phba);
8526
8527 pci_set_drvdata(pdev, NULL);
8528 scsi_host_put(shost);
8529
8530 /*
8531 * Call scsi_free before mem_free since scsi bufs are released to their
8532 * corresponding pools here.
8533 */
8534 lpfc_scsi_free(phba);
8535 lpfc_mem_free_all(phba);
8536
8537 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
8538 phba->hbqslimp.virt, phba->hbqslimp.phys);
8539
8540 /* Free resources associated with SLI2 interface */
8541 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8542 phba->slim2p.virt, phba->slim2p.phys);
8543
8544 /* unmap adapter SLIM and Control Registers */
8545 iounmap(phba->ctrl_regs_memmap_p);
8546 iounmap(phba->slim_memmap_p);
8547
8548 lpfc_hba_free(phba);
8549
8550 pci_release_selected_regions(pdev, bars);
8551 pci_disable_device(pdev);
8552}
8553
8554/**
8555 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
8556 * @pdev: pointer to PCI device
8557 * @msg: power management message
8558 *
8559 * This routine is to be called from the kernel's PCI subsystem to support
8560 * system Power Management (PM) to device with SLI-3 interface spec. When
8561 * PM invokes this method, it quiesces the device by stopping the driver's
8562 * worker thread for the device, turning off device's interrupt and DMA,
8563 * and bring the device offline. Note that as the driver implements the
8564 * minimum PM requirements to a power-aware driver's PM support for the
8565 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
8566 * to the suspend() method call will be treated as SUSPEND and the driver will
8567 * fully reinitialize its device during resume() method call, the driver will
8568 * set device to PCI_D3hot state in PCI config space instead of setting it
8569 * according to the @msg provided by the PM.
8570 *
8571 * Return code
8572 * 0 - driver suspended the device
8573 * Error otherwise
8574 **/
8575static int
8576lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
8577{
8578 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8579 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8580
8581 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8582 "0473 PCI device Power Management suspend.\n");
8583
8584 /* Bring down the device */
8585 lpfc_offline_prep(phba);
8586 lpfc_offline(phba);
8587 kthread_stop(phba->worker_thread);
8588
8589 /* Disable interrupt from device */
8590 lpfc_sli_disable_intr(phba);
8591
8592 /* Save device state to PCI config space */
8593 pci_save_state(pdev);
8594 pci_set_power_state(pdev, PCI_D3hot);
8595
8596 return 0;
8597}
8598
8599/**
8600 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
8601 * @pdev: pointer to PCI device
8602 *
8603 * This routine is to be called from the kernel's PCI subsystem to support
8604 * system Power Management (PM) to device with SLI-3 interface spec. When PM
8605 * invokes this method, it restores the device's PCI config space state and
8606 * fully reinitializes the device and brings it online. Note that as the
8607 * driver implements the minimum PM requirements to a power-aware driver's
8608 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
8609 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
8610 * driver will fully reinitialize its device during resume() method call,
8611 * the device will be set to PCI_D0 directly in PCI config space before
8612 * restoring the state.
8613 *
8614 * Return code
8615 * 0 - driver suspended the device
8616 * Error otherwise
8617 **/
8618static int
8619lpfc_pci_resume_one_s3(struct pci_dev *pdev)
8620{
8621 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8622 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8623 uint32_t intr_mode;
8624 int error;
8625
8626 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8627 "0452 PCI device Power Management resume.\n");
8628
8629 /* Restore device state from PCI config space */
8630 pci_set_power_state(pdev, PCI_D0);
8631 pci_restore_state(pdev);
0d878419 8632
1dfb5a47
JS
8633 /*
8634 * As the new kernel behavior of pci_restore_state() API call clears
8635 * device saved_state flag, need to save the restored state again.
8636 */
8637 pci_save_state(pdev);
8638
da0436e9
JS
8639 if (pdev->is_busmaster)
8640 pci_set_master(pdev);
8641
8642 /* Startup the kernel thread for this host adapter. */
8643 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8644 "lpfc_worker_%d", phba->brd_no);
8645 if (IS_ERR(phba->worker_thread)) {
8646 error = PTR_ERR(phba->worker_thread);
8647 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8648 "0434 PM resume failed to start worker "
8649 "thread: error=x%x.\n", error);
8650 return error;
8651 }
8652
8653 /* Configure and enable interrupt */
8654 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8655 if (intr_mode == LPFC_INTR_ERROR) {
8656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8657 "0430 PM resume Failed to enable interrupt\n");
8658 return -EIO;
8659 } else
8660 phba->intr_mode = intr_mode;
8661
8662 /* Restart HBA and bring it online */
8663 lpfc_sli_brdrestart(phba);
8664 lpfc_online(phba);
8665
8666 /* Log the current active interrupt mode */
8667 lpfc_log_intr_mode(phba, phba->intr_mode);
8668
8669 return 0;
8670}
8671
891478a2
JS
8672/**
8673 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
8674 * @phba: pointer to lpfc hba data structure.
8675 *
8676 * This routine is called to prepare the SLI3 device for PCI slot recover. It
e2af0d2e 8677 * aborts all the outstanding SCSI I/Os to the pci device.
891478a2
JS
8678 **/
8679static void
8680lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
8681{
e2af0d2e
JS
8682 struct lpfc_sli *psli = &phba->sli;
8683 struct lpfc_sli_ring *pring;
8684
891478a2
JS
8685 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8686 "2723 PCI channel I/O abort preparing for recovery\n");
e2af0d2e
JS
8687
8688 /*
8689 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
8690 * and let the SCSI mid-layer to retry them to recover.
8691 */
8692 pring = &psli->ring[psli->fcp_ring];
8693 lpfc_sli_abort_iocb_ring(phba, pring);
891478a2
JS
8694}
8695
0d878419
JS
8696/**
8697 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
8698 * @phba: pointer to lpfc hba data structure.
8699 *
8700 * This routine is called to prepare the SLI3 device for PCI slot reset. It
8701 * disables the device interrupt and pci device, and aborts the internal FCP
8702 * pending I/Os.
8703 **/
8704static void
8705lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
8706{
0d878419 8707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
891478a2 8708 "2710 PCI channel disable preparing for reset\n");
e2af0d2e 8709
75baf696
JS
8710 /* Block any management I/Os to the device */
8711 lpfc_block_mgmt_io(phba);
8712
e2af0d2e
JS
8713 /* Block all SCSI devices' I/Os on the host */
8714 lpfc_scsi_dev_block(phba);
8715
8716 /* stop all timers */
8717 lpfc_stop_hba_timers(phba);
8718
0d878419
JS
8719 /* Disable interrupt and pci device */
8720 lpfc_sli_disable_intr(phba);
8721 pci_disable_device(phba->pcidev);
75baf696 8722
e2af0d2e
JS
8723 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
8724 lpfc_sli_flush_fcp_rings(phba);
0d878419
JS
8725}
8726
8727/**
8728 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
8729 * @phba: pointer to lpfc hba data structure.
8730 *
8731 * This routine is called to prepare the SLI3 device for PCI slot permanently
8732 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
8733 * pending I/Os.
8734 **/
8735static void
75baf696 8736lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
0d878419
JS
8737{
8738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
891478a2 8739 "2711 PCI channel permanent disable for failure\n");
e2af0d2e
JS
8740 /* Block all SCSI devices' I/Os on the host */
8741 lpfc_scsi_dev_block(phba);
8742
8743 /* stop all timers */
8744 lpfc_stop_hba_timers(phba);
8745
0d878419
JS
8746 /* Clean up all driver's outstanding SCSI I/Os */
8747 lpfc_sli_flush_fcp_rings(phba);
8748}
8749
da0436e9
JS
8750/**
8751 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
8752 * @pdev: pointer to PCI device.
8753 * @state: the current PCI connection state.
8754 *
8755 * This routine is called from the PCI subsystem for I/O error handling to
8756 * device with SLI-3 interface spec. This function is called by the PCI
8757 * subsystem after a PCI bus error affecting this device has been detected.
8758 * When this function is invoked, it will need to stop all the I/Os and
8759 * interrupt(s) to the device. Once that is done, it will return
8760 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
8761 * as desired.
8762 *
8763 * Return codes
0d878419 8764 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
da0436e9
JS
8765 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8766 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8767 **/
8768static pci_ers_result_t
8769lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
8770{
8771 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8772 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
da0436e9 8773
0d878419
JS
8774 switch (state) {
8775 case pci_channel_io_normal:
891478a2
JS
8776 /* Non-fatal error, prepare for recovery */
8777 lpfc_sli_prep_dev_for_recover(phba);
0d878419
JS
8778 return PCI_ERS_RESULT_CAN_RECOVER;
8779 case pci_channel_io_frozen:
8780 /* Fatal error, prepare for slot reset */
8781 lpfc_sli_prep_dev_for_reset(phba);
8782 return PCI_ERS_RESULT_NEED_RESET;
8783 case pci_channel_io_perm_failure:
8784 /* Permanent failure, prepare for device down */
75baf696 8785 lpfc_sli_prep_dev_for_perm_failure(phba);
da0436e9 8786 return PCI_ERS_RESULT_DISCONNECT;
0d878419
JS
8787 default:
8788 /* Unknown state, prepare and request slot reset */
8789 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8790 "0472 Unknown PCI error state: x%x\n", state);
8791 lpfc_sli_prep_dev_for_reset(phba);
8792 return PCI_ERS_RESULT_NEED_RESET;
da0436e9 8793 }
da0436e9
JS
8794}
8795
8796/**
8797 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
8798 * @pdev: pointer to PCI device.
8799 *
8800 * This routine is called from the PCI subsystem for error handling to
8801 * device with SLI-3 interface spec. This is called after PCI bus has been
8802 * reset to restart the PCI card from scratch, as if from a cold-boot.
8803 * During the PCI subsystem error recovery, after driver returns
8804 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
8805 * recovery and then call this routine before calling the .resume method
8806 * to recover the device. This function will initialize the HBA device,
8807 * enable the interrupt, but it will just put the HBA to offline state
8808 * without passing any I/O traffic.
8809 *
8810 * Return codes
8811 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
8812 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8813 */
8814static pci_ers_result_t
8815lpfc_io_slot_reset_s3(struct pci_dev *pdev)
8816{
8817 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8818 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8819 struct lpfc_sli *psli = &phba->sli;
8820 uint32_t intr_mode;
8821
8822 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
8823 if (pci_enable_device_mem(pdev)) {
8824 printk(KERN_ERR "lpfc: Cannot re-enable "
8825 "PCI device after reset.\n");
8826 return PCI_ERS_RESULT_DISCONNECT;
8827 }
8828
8829 pci_restore_state(pdev);
1dfb5a47
JS
8830
8831 /*
8832 * As the new kernel behavior of pci_restore_state() API call clears
8833 * device saved_state flag, need to save the restored state again.
8834 */
8835 pci_save_state(pdev);
8836
da0436e9
JS
8837 if (pdev->is_busmaster)
8838 pci_set_master(pdev);
8839
8840 spin_lock_irq(&phba->hbalock);
8841 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8842 spin_unlock_irq(&phba->hbalock);
8843
8844 /* Configure and enable interrupt */
8845 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8846 if (intr_mode == LPFC_INTR_ERROR) {
8847 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8848 "0427 Cannot re-enable interrupt after "
8849 "slot reset.\n");
8850 return PCI_ERS_RESULT_DISCONNECT;
8851 } else
8852 phba->intr_mode = intr_mode;
8853
75baf696
JS
8854 /* Take device offline, it will perform cleanup */
8855 lpfc_offline_prep(phba);
da0436e9
JS
8856 lpfc_offline(phba);
8857 lpfc_sli_brdrestart(phba);
8858
8859 /* Log the current active interrupt mode */
8860 lpfc_log_intr_mode(phba, phba->intr_mode);
8861
8862 return PCI_ERS_RESULT_RECOVERED;
8863}
8864
8865/**
8866 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
8867 * @pdev: pointer to PCI device
8868 *
8869 * This routine is called from the PCI subsystem for error handling to device
8870 * with SLI-3 interface spec. It is called when kernel error recovery tells
8871 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8872 * error recovery. After this call, traffic can start to flow from this device
8873 * again.
8874 */
8875static void
8876lpfc_io_resume_s3(struct pci_dev *pdev)
8877{
8878 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8879 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3772a991 8880
e2af0d2e 8881 /* Bring device online, it will be no-op for non-fatal error resume */
da0436e9 8882 lpfc_online(phba);
0d878419
JS
8883
8884 /* Clean up Advanced Error Reporting (AER) if needed */
8885 if (phba->hba_flag & HBA_AER_ENABLED)
8886 pci_cleanup_aer_uncorrect_error_status(pdev);
da0436e9 8887}
3772a991 8888
da0436e9
JS
8889/**
8890 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
8891 * @phba: pointer to lpfc hba data structure.
8892 *
8893 * returns the number of ELS/CT IOCBs to reserve
8894 **/
8895int
8896lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
8897{
8898 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
8899
f1126688
JS
8900 if (phba->sli_rev == LPFC_SLI_REV4) {
8901 if (max_xri <= 100)
6a9c52cf 8902 return 10;
f1126688 8903 else if (max_xri <= 256)
6a9c52cf 8904 return 25;
f1126688 8905 else if (max_xri <= 512)
6a9c52cf 8906 return 50;
f1126688 8907 else if (max_xri <= 1024)
6a9c52cf 8908 return 100;
f1126688 8909 else
6a9c52cf 8910 return 150;
f1126688
JS
8911 } else
8912 return 0;
3772a991
JS
8913}
8914
52d52440
JS
8915/**
8916 * lpfc_write_firmware - attempt to write a firmware image to the port
8917 * @phba: pointer to lpfc hba data structure.
8918 * @fw: pointer to firmware image returned from request_firmware.
8919 *
8920 * returns the number of bytes written if write is successful.
8921 * returns a negative error value if there were errors.
8922 * returns 0 if firmware matches currently active firmware on port.
8923 **/
8924int
8925lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
8926{
8927 char fwrev[32];
8928 struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
8929 struct list_head dma_buffer_list;
8930 int i, rc = 0;
8931 struct lpfc_dmabuf *dmabuf, *next;
8932 uint32_t offset = 0, temp_offset = 0;
8933
8934 INIT_LIST_HEAD(&dma_buffer_list);
079b5c91
JS
8935 if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
8936 (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
8937 LPFC_FILE_TYPE_GROUP) ||
8938 (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
8939 (be32_to_cpu(image->size) != fw->size)) {
52d52440
JS
8940 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8941 "3022 Invalid FW image found. "
079b5c91
JS
8942 "Magic:%x Type:%x ID:%x\n",
8943 be32_to_cpu(image->magic_number),
8944 bf_get_be32(lpfc_grp_hdr_file_type, image),
8945 bf_get_be32(lpfc_grp_hdr_id, image));
52d52440
JS
8946 return -EINVAL;
8947 }
8948 lpfc_decode_firmware_rev(phba, fwrev, 1);
88a2cfbb 8949 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
52d52440
JS
8950 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8951 "3023 Updating Firmware. Current Version:%s "
8952 "New Version:%s\n",
88a2cfbb 8953 fwrev, image->revision);
52d52440
JS
8954 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
8955 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
8956 GFP_KERNEL);
8957 if (!dmabuf) {
8958 rc = -ENOMEM;
8959 goto out;
8960 }
8961 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8962 SLI4_PAGE_SIZE,
8963 &dmabuf->phys,
8964 GFP_KERNEL);
8965 if (!dmabuf->virt) {
8966 kfree(dmabuf);
8967 rc = -ENOMEM;
8968 goto out;
8969 }
8970 list_add_tail(&dmabuf->list, &dma_buffer_list);
8971 }
8972 while (offset < fw->size) {
8973 temp_offset = offset;
8974 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
079b5c91 8975 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
52d52440
JS
8976 memcpy(dmabuf->virt,
8977 fw->data + temp_offset,
079b5c91
JS
8978 fw->size - temp_offset);
8979 temp_offset = fw->size;
52d52440
JS
8980 break;
8981 }
52d52440
JS
8982 memcpy(dmabuf->virt, fw->data + temp_offset,
8983 SLI4_PAGE_SIZE);
88a2cfbb 8984 temp_offset += SLI4_PAGE_SIZE;
52d52440
JS
8985 }
8986 rc = lpfc_wr_object(phba, &dma_buffer_list,
8987 (fw->size - offset), &offset);
8988 if (rc) {
8989 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8990 "3024 Firmware update failed. "
8991 "%d\n", rc);
8992 goto out;
8993 }
8994 }
8995 rc = offset;
8996 }
8997out:
8998 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
8999 list_del(&dmabuf->list);
9000 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
9001 dmabuf->virt, dmabuf->phys);
9002 kfree(dmabuf);
9003 }
9004 return rc;
9005}
9006
3772a991 9007/**
da0436e9 9008 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
3772a991
JS
9009 * @pdev: pointer to PCI device
9010 * @pid: pointer to PCI device identifier
9011 *
da0436e9
JS
9012 * This routine is called from the kernel's PCI subsystem to device with
9013 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
3772a991 9014 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
da0436e9
JS
9015 * information of the device and driver to see if the driver state that it
9016 * can support this kind of device. If the match is successful, the driver
9017 * core invokes this routine. If this routine determines it can claim the HBA,
9018 * it does all the initialization that it needs to do to handle the HBA
9019 * properly.
3772a991
JS
9020 *
9021 * Return code
9022 * 0 - driver can claim the device
9023 * negative value - driver can not claim the device
9024 **/
9025static int __devinit
da0436e9 9026lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
3772a991
JS
9027{
9028 struct lpfc_hba *phba;
9029 struct lpfc_vport *vport = NULL;
6669f9bb 9030 struct Scsi_Host *shost = NULL;
3772a991
JS
9031 int error;
9032 uint32_t cfg_mode, intr_mode;
da0436e9 9033 int mcnt;
0558056c 9034 int adjusted_fcp_eq_count;
52d52440
JS
9035 const struct firmware *fw;
9036 uint8_t file_name[16];
3772a991
JS
9037
9038 /* Allocate memory for HBA structure */
9039 phba = lpfc_hba_alloc(pdev);
9040 if (!phba)
9041 return -ENOMEM;
9042
9043 /* Perform generic PCI device enabling operation */
9044 error = lpfc_enable_pci_dev(phba);
079b5c91 9045 if (error)
3772a991 9046 goto out_free_phba;
3772a991 9047
da0436e9
JS
9048 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
9049 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
3772a991
JS
9050 if (error)
9051 goto out_disable_pci_dev;
9052
da0436e9
JS
9053 /* Set up SLI-4 specific device PCI memory space */
9054 error = lpfc_sli4_pci_mem_setup(phba);
3772a991
JS
9055 if (error) {
9056 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 9057 "1410 Failed to set up pci memory space.\n");
3772a991
JS
9058 goto out_disable_pci_dev;
9059 }
9060
9061 /* Set up phase-1 common device driver resources */
9062 error = lpfc_setup_driver_resource_phase1(phba);
9063 if (error) {
9064 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9
JS
9065 "1411 Failed to set up driver resource.\n");
9066 goto out_unset_pci_mem_s4;
3772a991
JS
9067 }
9068
da0436e9
JS
9069 /* Set up SLI-4 Specific device driver resources */
9070 error = lpfc_sli4_driver_resource_setup(phba);
3772a991
JS
9071 if (error) {
9072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9
JS
9073 "1412 Failed to set up driver resource.\n");
9074 goto out_unset_pci_mem_s4;
3772a991
JS
9075 }
9076
9077 /* Initialize and populate the iocb list per host */
2a9bf3d0
JS
9078
9079 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9080 "2821 initialize iocb list %d.\n",
9081 phba->cfg_iocb_cnt*1024);
9082 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
9083
3772a991
JS
9084 if (error) {
9085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9
JS
9086 "1413 Failed to initialize iocb list.\n");
9087 goto out_unset_driver_resource_s4;
3772a991
JS
9088 }
9089
19ca7609 9090 INIT_LIST_HEAD(&phba->active_rrq_list);
7d791df7 9091 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
19ca7609 9092
3772a991
JS
9093 /* Set up common device driver resources */
9094 error = lpfc_setup_driver_resource_phase2(phba);
9095 if (error) {
9096 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 9097 "1414 Failed to set up driver resource.\n");
3772a991
JS
9098 goto out_free_iocb_list;
9099 }
9100
079b5c91
JS
9101 /* Get the default values for Model Name and Description */
9102 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9103
3772a991
JS
9104 /* Create SCSI host to the physical port */
9105 error = lpfc_create_shost(phba);
9106 if (error) {
9107 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 9108 "1415 Failed to create scsi host.\n");
3772a991
JS
9109 goto out_unset_driver_resource;
9110 }
9399627f 9111
5b75da2f 9112 /* Configure sysfs attributes */
3772a991
JS
9113 vport = phba->pport;
9114 error = lpfc_alloc_sysfs_attr(vport);
9115 if (error) {
9399627f 9116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 9117 "1416 Failed to allocate sysfs attr\n");
3772a991 9118 goto out_destroy_shost;
98c9ea5c 9119 }
875fbdfe 9120
6669f9bb 9121 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
3772a991 9122 /* Now, trying to enable interrupt and bring up the device */
5b75da2f
JS
9123 cfg_mode = phba->cfg_use_msi;
9124 while (true) {
3772a991
JS
9125 /* Put device to a known state before enabling interrupt */
9126 lpfc_stop_port(phba);
5b75da2f 9127 /* Configure and enable interrupt */
da0436e9 9128 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
5b75da2f
JS
9129 if (intr_mode == LPFC_INTR_ERROR) {
9130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 9131 "0426 Failed to enable interrupt.\n");
3772a991 9132 error = -ENODEV;
5b75da2f
JS
9133 goto out_free_sysfs_attr;
9134 }
0558056c 9135 /* Default to single EQ for non-MSI-X */
def9c7a9 9136 if (phba->intr_type != MSIX)
0558056c
JS
9137 adjusted_fcp_eq_count = 0;
9138 else if (phba->sli4_hba.msix_vec_nr <
9139 phba->cfg_fcp_eq_count + 1)
9140 adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
9141 else
9142 adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
0558056c 9143 phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
da0436e9
JS
9144 /* Set up SLI-4 HBA */
9145 if (lpfc_sli4_hba_setup(phba)) {
5b75da2f 9146 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 9147 "1421 Failed to set up hba\n");
5b75da2f 9148 error = -ENODEV;
da0436e9 9149 goto out_disable_intr;
5b75da2f
JS
9150 }
9151
da0436e9
JS
9152 /* Send NOP mbx cmds for non-INTx mode active interrupt test */
9153 if (intr_mode != 0)
9154 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
9155 LPFC_ACT_INTR_CNT);
9156
9157 /* Check active interrupts received only for MSI/MSI-X */
3772a991 9158 if (intr_mode == 0 ||
da0436e9 9159 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
5b75da2f
JS
9160 /* Log the current active interrupt mode */
9161 phba->intr_mode = intr_mode;
9162 lpfc_log_intr_mode(phba, intr_mode);
9163 break;
5b75da2f 9164 }
da0436e9
JS
9165 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9166 "0451 Configure interrupt mode (%d) "
9167 "failed active interrupt test.\n",
9168 intr_mode);
2fcee4bf
JS
9169 /* Unset the previous SLI-4 HBA setup. */
9170 /*
9171 * TODO: Is this operation compatible with IF TYPE 2
9172 * devices? All port state is deleted and cleared.
9173 */
da0436e9
JS
9174 lpfc_sli4_unset_hba(phba);
9175 /* Try next level of interrupt mode */
9176 cfg_mode = --intr_mode;
98c9ea5c 9177 }
858c9f6c 9178
3772a991
JS
9179 /* Perform post initialization setup */
9180 lpfc_post_init_setup(phba);
dea3101e 9181
026abb87
JS
9182 /* check for firmware upgrade or downgrade (if_type 2 only) */
9183 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9184 LPFC_SLI_INTF_IF_TYPE_2) {
9185 snprintf(file_name, 16, "%s.grp", phba->ModelName);
9186 error = request_firmware(&fw, file_name, &phba->pcidev->dev);
9187 if (!error) {
9188 lpfc_write_firmware(phba, fw);
9189 release_firmware(fw);
9190 }
52d52440
JS
9191 }
9192
1c6834a7
JS
9193 /* Check if there are static vports to be created. */
9194 lpfc_create_static_vport(phba);
dea3101e 9195 return 0;
9196
da0436e9
JS
9197out_disable_intr:
9198 lpfc_sli4_disable_intr(phba);
5b75da2f
JS
9199out_free_sysfs_attr:
9200 lpfc_free_sysfs_attr(vport);
3772a991
JS
9201out_destroy_shost:
9202 lpfc_destroy_shost(phba);
9203out_unset_driver_resource:
9204 lpfc_unset_driver_resource_phase2(phba);
9205out_free_iocb_list:
9206 lpfc_free_iocb_list(phba);
da0436e9
JS
9207out_unset_driver_resource_s4:
9208 lpfc_sli4_driver_resource_unset(phba);
9209out_unset_pci_mem_s4:
9210 lpfc_sli4_pci_mem_unset(phba);
3772a991
JS
9211out_disable_pci_dev:
9212 lpfc_disable_pci_dev(phba);
6669f9bb
JS
9213 if (shost)
9214 scsi_host_put(shost);
2e0fef85 9215out_free_phba:
3772a991 9216 lpfc_hba_free(phba);
dea3101e 9217 return error;
9218}
9219
e59058c4 9220/**
da0436e9 9221 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
e59058c4
JS
9222 * @pdev: pointer to PCI device
9223 *
da0436e9
JS
9224 * This routine is called from the kernel's PCI subsystem to device with
9225 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
3772a991
JS
9226 * removed from PCI bus, it performs all the necessary cleanup for the HBA
9227 * device to be removed from the PCI subsystem properly.
e59058c4 9228 **/
dea3101e 9229static void __devexit
da0436e9 9230lpfc_pci_remove_one_s4(struct pci_dev *pdev)
dea3101e 9231{
da0436e9 9232 struct Scsi_Host *shost = pci_get_drvdata(pdev);
2e0fef85 9233 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
eada272d 9234 struct lpfc_vport **vports;
da0436e9 9235 struct lpfc_hba *phba = vport->phba;
eada272d 9236 int i;
8a4df120 9237
da0436e9 9238 /* Mark the device unloading flag */
549e55cd 9239 spin_lock_irq(&phba->hbalock);
51ef4c26 9240 vport->load_flag |= FC_UNLOADING;
549e55cd 9241 spin_unlock_irq(&phba->hbalock);
2e0fef85 9242
da0436e9 9243 /* Free the HBA sysfs attributes */
858c9f6c
JS
9244 lpfc_free_sysfs_attr(vport);
9245
eada272d
JS
9246 /* Release all the vports against this physical port */
9247 vports = lpfc_create_vport_work_array(phba);
9248 if (vports != NULL)
3772a991 9249 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
eada272d
JS
9250 fc_vport_terminate(vports[i]->fc_vport);
9251 lpfc_destroy_vport_work_array(phba, vports);
9252
9253 /* Remove FC host and then SCSI host with the physical port */
858c9f6c
JS
9254 fc_remove_host(shost);
9255 scsi_remove_host(shost);
da0436e9
JS
9256
9257 /* Perform cleanup on the physical port */
87af33fe
JS
9258 lpfc_cleanup(vport);
9259
2e0fef85 9260 /*
da0436e9 9261 * Bring down the SLI Layer. This step disables all interrupts,
2e0fef85 9262 * clears the rings, discards all mailbox commands, and resets
da0436e9 9263 * the HBA FCoE function.
2e0fef85 9264 */
da0436e9
JS
9265 lpfc_debugfs_terminate(vport);
9266 lpfc_sli4_hba_unset(phba);
a257bf90 9267
858c9f6c
JS
9268 spin_lock_irq(&phba->hbalock);
9269 list_del_init(&vport->listentry);
9270 spin_unlock_irq(&phba->hbalock);
9271
3677a3a7 9272 /* Perform scsi free before driver resource_unset since scsi
da0436e9 9273 * buffers are released to their corresponding pools here.
2e0fef85
JS
9274 */
9275 lpfc_scsi_free(phba);
da0436e9 9276 lpfc_sli4_driver_resource_unset(phba);
ed957684 9277
da0436e9
JS
9278 /* Unmap adapter Control and Doorbell registers */
9279 lpfc_sli4_pci_mem_unset(phba);
2e0fef85 9280
da0436e9
JS
9281 /* Release PCI resources and disable device's PCI function */
9282 scsi_host_put(shost);
9283 lpfc_disable_pci_dev(phba);
2e0fef85 9284
da0436e9 9285 /* Finally, free the driver's device data structure */
3772a991 9286 lpfc_hba_free(phba);
2e0fef85 9287
da0436e9 9288 return;
dea3101e 9289}
9290
3a55b532 9291/**
da0436e9 9292 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
3a55b532
JS
9293 * @pdev: pointer to PCI device
9294 * @msg: power management message
9295 *
da0436e9
JS
9296 * This routine is called from the kernel's PCI subsystem to support system
9297 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
9298 * this method, it quiesces the device by stopping the driver's worker
9299 * thread for the device, turning off device's interrupt and DMA, and bring
9300 * the device offline. Note that as the driver implements the minimum PM
9301 * requirements to a power-aware driver's PM support for suspend/resume -- all
9302 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
9303 * method call will be treated as SUSPEND and the driver will fully
9304 * reinitialize its device during resume() method call, the driver will set
9305 * device to PCI_D3hot state in PCI config space instead of setting it
3772a991 9306 * according to the @msg provided by the PM.
3a55b532
JS
9307 *
9308 * Return code
3772a991
JS
9309 * 0 - driver suspended the device
9310 * Error otherwise
3a55b532
JS
9311 **/
9312static int
da0436e9 9313lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
3a55b532
JS
9314{
9315 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9316 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9317
9318 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
75baf696 9319 "2843 PCI device Power Management suspend.\n");
3a55b532
JS
9320
9321 /* Bring down the device */
9322 lpfc_offline_prep(phba);
9323 lpfc_offline(phba);
9324 kthread_stop(phba->worker_thread);
9325
9326 /* Disable interrupt from device */
da0436e9 9327 lpfc_sli4_disable_intr(phba);
5350d872 9328 lpfc_sli4_queue_destroy(phba);
3a55b532
JS
9329
9330 /* Save device state to PCI config space */
9331 pci_save_state(pdev);
9332 pci_set_power_state(pdev, PCI_D3hot);
9333
9334 return 0;
9335}
9336
9337/**
da0436e9 9338 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
3a55b532
JS
9339 * @pdev: pointer to PCI device
9340 *
da0436e9
JS
9341 * This routine is called from the kernel's PCI subsystem to support system
9342 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
9343 * this method, it restores the device's PCI config space state and fully
9344 * reinitializes the device and brings it online. Note that as the driver
9345 * implements the minimum PM requirements to a power-aware driver's PM for
9346 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
9347 * to the suspend() method call will be treated as SUSPEND and the driver
9348 * will fully reinitialize its device during resume() method call, the device
9349 * will be set to PCI_D0 directly in PCI config space before restoring the
9350 * state.
3a55b532
JS
9351 *
9352 * Return code
3772a991
JS
9353 * 0 - driver suspended the device
9354 * Error otherwise
3a55b532
JS
9355 **/
9356static int
da0436e9 9357lpfc_pci_resume_one_s4(struct pci_dev *pdev)
3a55b532
JS
9358{
9359 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9360 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
5b75da2f 9361 uint32_t intr_mode;
3a55b532
JS
9362 int error;
9363
9364 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
da0436e9 9365 "0292 PCI device Power Management resume.\n");
3a55b532
JS
9366
9367 /* Restore device state from PCI config space */
9368 pci_set_power_state(pdev, PCI_D0);
9369 pci_restore_state(pdev);
1dfb5a47
JS
9370
9371 /*
9372 * As the new kernel behavior of pci_restore_state() API call clears
9373 * device saved_state flag, need to save the restored state again.
9374 */
9375 pci_save_state(pdev);
9376
3a55b532
JS
9377 if (pdev->is_busmaster)
9378 pci_set_master(pdev);
9379
da0436e9 9380 /* Startup the kernel thread for this host adapter. */
3a55b532
JS
9381 phba->worker_thread = kthread_run(lpfc_do_work, phba,
9382 "lpfc_worker_%d", phba->brd_no);
9383 if (IS_ERR(phba->worker_thread)) {
9384 error = PTR_ERR(phba->worker_thread);
9385 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 9386 "0293 PM resume failed to start worker "
3a55b532
JS
9387 "thread: error=x%x.\n", error);
9388 return error;
9389 }
9390
5b75da2f 9391 /* Configure and enable interrupt */
da0436e9 9392 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
5b75da2f 9393 if (intr_mode == LPFC_INTR_ERROR) {
3a55b532 9394 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 9395 "0294 PM resume Failed to enable interrupt\n");
5b75da2f
JS
9396 return -EIO;
9397 } else
9398 phba->intr_mode = intr_mode;
3a55b532
JS
9399
9400 /* Restart HBA and bring it online */
9401 lpfc_sli_brdrestart(phba);
9402 lpfc_online(phba);
9403
5b75da2f
JS
9404 /* Log the current active interrupt mode */
9405 lpfc_log_intr_mode(phba, phba->intr_mode);
9406
3a55b532
JS
9407 return 0;
9408}
9409
75baf696
JS
9410/**
9411 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
9412 * @phba: pointer to lpfc hba data structure.
9413 *
9414 * This routine is called to prepare the SLI4 device for PCI slot recover. It
9415 * aborts all the outstanding SCSI I/Os to the pci device.
9416 **/
9417static void
9418lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
9419{
9420 struct lpfc_sli *psli = &phba->sli;
9421 struct lpfc_sli_ring *pring;
9422
9423 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9424 "2828 PCI channel I/O abort preparing for recovery\n");
9425 /*
9426 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
9427 * and let the SCSI mid-layer to retry them to recover.
9428 */
9429 pring = &psli->ring[psli->fcp_ring];
9430 lpfc_sli_abort_iocb_ring(phba, pring);
9431}
9432
9433/**
9434 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
9435 * @phba: pointer to lpfc hba data structure.
9436 *
9437 * This routine is called to prepare the SLI4 device for PCI slot reset. It
9438 * disables the device interrupt and pci device, and aborts the internal FCP
9439 * pending I/Os.
9440 **/
9441static void
9442lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
9443{
9444 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9445 "2826 PCI channel disable preparing for reset\n");
9446
9447 /* Block any management I/Os to the device */
9448 lpfc_block_mgmt_io(phba);
9449
9450 /* Block all SCSI devices' I/Os on the host */
9451 lpfc_scsi_dev_block(phba);
9452
9453 /* stop all timers */
9454 lpfc_stop_hba_timers(phba);
9455
9456 /* Disable interrupt and pci device */
9457 lpfc_sli4_disable_intr(phba);
5350d872 9458 lpfc_sli4_queue_destroy(phba);
75baf696
JS
9459 pci_disable_device(phba->pcidev);
9460
9461 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9462 lpfc_sli_flush_fcp_rings(phba);
9463}
9464
9465/**
9466 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
9467 * @phba: pointer to lpfc hba data structure.
9468 *
9469 * This routine is called to prepare the SLI4 device for PCI slot permanently
9470 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
9471 * pending I/Os.
9472 **/
9473static void
9474lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
9475{
9476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9477 "2827 PCI channel permanent disable for failure\n");
9478
9479 /* Block all SCSI devices' I/Os on the host */
9480 lpfc_scsi_dev_block(phba);
9481
9482 /* stop all timers */
9483 lpfc_stop_hba_timers(phba);
9484
9485 /* Clean up all driver's outstanding SCSI I/Os */
9486 lpfc_sli_flush_fcp_rings(phba);
9487}
9488
8d63f375 9489/**
da0436e9 9490 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
e59058c4
JS
9491 * @pdev: pointer to PCI device.
9492 * @state: the current PCI connection state.
8d63f375 9493 *
da0436e9
JS
9494 * This routine is called from the PCI subsystem for error handling to device
9495 * with SLI-4 interface spec. This function is called by the PCI subsystem
9496 * after a PCI bus error affecting this device has been detected. When this
9497 * function is invoked, it will need to stop all the I/Os and interrupt(s)
9498 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
9499 * for the PCI subsystem to perform proper recovery as desired.
e59058c4
JS
9500 *
9501 * Return codes
3772a991
JS
9502 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9503 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
e59058c4 9504 **/
3772a991 9505static pci_ers_result_t
da0436e9 9506lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8d63f375 9507{
75baf696
JS
9508 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9509 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9510
9511 switch (state) {
9512 case pci_channel_io_normal:
9513 /* Non-fatal error, prepare for recovery */
9514 lpfc_sli4_prep_dev_for_recover(phba);
9515 return PCI_ERS_RESULT_CAN_RECOVER;
9516 case pci_channel_io_frozen:
9517 /* Fatal error, prepare for slot reset */
9518 lpfc_sli4_prep_dev_for_reset(phba);
9519 return PCI_ERS_RESULT_NEED_RESET;
9520 case pci_channel_io_perm_failure:
9521 /* Permanent failure, prepare for device down */
9522 lpfc_sli4_prep_dev_for_perm_failure(phba);
9523 return PCI_ERS_RESULT_DISCONNECT;
9524 default:
9525 /* Unknown state, prepare and request slot reset */
9526 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9527 "2825 Unknown PCI error state: x%x\n", state);
9528 lpfc_sli4_prep_dev_for_reset(phba);
9529 return PCI_ERS_RESULT_NEED_RESET;
9530 }
8d63f375
LV
9531}
9532
9533/**
da0436e9 9534 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
e59058c4
JS
9535 * @pdev: pointer to PCI device.
9536 *
da0436e9
JS
9537 * This routine is called from the PCI subsystem for error handling to device
9538 * with SLI-4 interface spec. It is called after PCI bus has been reset to
9539 * restart the PCI card from scratch, as if from a cold-boot. During the
9540 * PCI subsystem error recovery, after the driver returns
3772a991 9541 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
da0436e9
JS
9542 * recovery and then call this routine before calling the .resume method to
9543 * recover the device. This function will initialize the HBA device, enable
9544 * the interrupt, but it will just put the HBA to offline state without
9545 * passing any I/O traffic.
8d63f375 9546 *
e59058c4 9547 * Return codes
3772a991
JS
9548 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
9549 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8d63f375 9550 */
3772a991 9551static pci_ers_result_t
da0436e9 9552lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8d63f375 9553{
75baf696
JS
9554 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9555 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9556 struct lpfc_sli *psli = &phba->sli;
9557 uint32_t intr_mode;
9558
9559 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
9560 if (pci_enable_device_mem(pdev)) {
9561 printk(KERN_ERR "lpfc: Cannot re-enable "
9562 "PCI device after reset.\n");
9563 return PCI_ERS_RESULT_DISCONNECT;
9564 }
9565
9566 pci_restore_state(pdev);
0a96e975
JS
9567
9568 /*
9569 * As the new kernel behavior of pci_restore_state() API call clears
9570 * device saved_state flag, need to save the restored state again.
9571 */
9572 pci_save_state(pdev);
9573
75baf696
JS
9574 if (pdev->is_busmaster)
9575 pci_set_master(pdev);
9576
9577 spin_lock_irq(&phba->hbalock);
9578 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9579 spin_unlock_irq(&phba->hbalock);
9580
9581 /* Configure and enable interrupt */
9582 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
9583 if (intr_mode == LPFC_INTR_ERROR) {
9584 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9585 "2824 Cannot re-enable interrupt after "
9586 "slot reset.\n");
9587 return PCI_ERS_RESULT_DISCONNECT;
9588 } else
9589 phba->intr_mode = intr_mode;
9590
9591 /* Log the current active interrupt mode */
9592 lpfc_log_intr_mode(phba, phba->intr_mode);
9593
8d63f375
LV
9594 return PCI_ERS_RESULT_RECOVERED;
9595}
9596
9597/**
da0436e9 9598 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
e59058c4 9599 * @pdev: pointer to PCI device
8d63f375 9600 *
3772a991 9601 * This routine is called from the PCI subsystem for error handling to device
da0436e9 9602 * with SLI-4 interface spec. It is called when kernel error recovery tells
3772a991
JS
9603 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
9604 * error recovery. After this call, traffic can start to flow from this device
9605 * again.
da0436e9 9606 **/
3772a991 9607static void
da0436e9 9608lpfc_io_resume_s4(struct pci_dev *pdev)
8d63f375 9609{
75baf696
JS
9610 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9611 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9612
9613 /*
9614 * In case of slot reset, as function reset is performed through
9615 * mailbox command which needs DMA to be enabled, this operation
9616 * has to be moved to the io resume phase. Taking device offline
9617 * will perform the necessary cleanup.
9618 */
9619 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
9620 /* Perform device reset */
9621 lpfc_offline_prep(phba);
9622 lpfc_offline(phba);
9623 lpfc_sli_brdrestart(phba);
9624 /* Bring the device back online */
9625 lpfc_online(phba);
9626 }
9627
9628 /* Clean up Advanced Error Reporting (AER) if needed */
9629 if (phba->hba_flag & HBA_AER_ENABLED)
9630 pci_cleanup_aer_uncorrect_error_status(pdev);
8d63f375
LV
9631}
9632
3772a991
JS
9633/**
9634 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
9635 * @pdev: pointer to PCI device
9636 * @pid: pointer to PCI device identifier
9637 *
9638 * This routine is to be registered to the kernel's PCI subsystem. When an
9639 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
9640 * at PCI device-specific information of the device and driver to see if the
9641 * driver state that it can support this kind of device. If the match is
9642 * successful, the driver core invokes this routine. This routine dispatches
9643 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
9644 * do all the initialization that it needs to do to handle the HBA device
9645 * properly.
9646 *
9647 * Return code
9648 * 0 - driver can claim the device
9649 * negative value - driver can not claim the device
9650 **/
9651static int __devinit
9652lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
9653{
9654 int rc;
8fa38513 9655 struct lpfc_sli_intf intf;
3772a991 9656
28baac74 9657 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
3772a991
JS
9658 return -ENODEV;
9659
8fa38513 9660 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
28baac74 9661 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
da0436e9 9662 rc = lpfc_pci_probe_one_s4(pdev, pid);
8fa38513 9663 else
3772a991 9664 rc = lpfc_pci_probe_one_s3(pdev, pid);
8fa38513 9665
3772a991
JS
9666 return rc;
9667}
9668
9669/**
9670 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
9671 * @pdev: pointer to PCI device
9672 *
9673 * This routine is to be registered to the kernel's PCI subsystem. When an
9674 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
9675 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
9676 * remove routine, which will perform all the necessary cleanup for the
9677 * device to be removed from the PCI subsystem properly.
9678 **/
9679static void __devexit
9680lpfc_pci_remove_one(struct pci_dev *pdev)
9681{
9682 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9683 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9684
9685 switch (phba->pci_dev_grp) {
9686 case LPFC_PCI_DEV_LP:
9687 lpfc_pci_remove_one_s3(pdev);
9688 break;
da0436e9
JS
9689 case LPFC_PCI_DEV_OC:
9690 lpfc_pci_remove_one_s4(pdev);
9691 break;
3772a991
JS
9692 default:
9693 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9694 "1424 Invalid PCI device group: 0x%x\n",
9695 phba->pci_dev_grp);
9696 break;
9697 }
9698 return;
9699}
9700
9701/**
9702 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
9703 * @pdev: pointer to PCI device
9704 * @msg: power management message
9705 *
9706 * This routine is to be registered to the kernel's PCI subsystem to support
9707 * system Power Management (PM). When PM invokes this method, it dispatches
9708 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
9709 * suspend the device.
9710 *
9711 * Return code
9712 * 0 - driver suspended the device
9713 * Error otherwise
9714 **/
9715static int
9716lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
9717{
9718 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9719 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9720 int rc = -ENODEV;
9721
9722 switch (phba->pci_dev_grp) {
9723 case LPFC_PCI_DEV_LP:
9724 rc = lpfc_pci_suspend_one_s3(pdev, msg);
9725 break;
da0436e9
JS
9726 case LPFC_PCI_DEV_OC:
9727 rc = lpfc_pci_suspend_one_s4(pdev, msg);
9728 break;
3772a991
JS
9729 default:
9730 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9731 "1425 Invalid PCI device group: 0x%x\n",
9732 phba->pci_dev_grp);
9733 break;
9734 }
9735 return rc;
9736}
9737
9738/**
9739 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
9740 * @pdev: pointer to PCI device
9741 *
9742 * This routine is to be registered to the kernel's PCI subsystem to support
9743 * system Power Management (PM). When PM invokes this method, it dispatches
9744 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
9745 * resume the device.
9746 *
9747 * Return code
9748 * 0 - driver suspended the device
9749 * Error otherwise
9750 **/
9751static int
9752lpfc_pci_resume_one(struct pci_dev *pdev)
9753{
9754 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9755 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9756 int rc = -ENODEV;
9757
9758 switch (phba->pci_dev_grp) {
9759 case LPFC_PCI_DEV_LP:
9760 rc = lpfc_pci_resume_one_s3(pdev);
9761 break;
da0436e9
JS
9762 case LPFC_PCI_DEV_OC:
9763 rc = lpfc_pci_resume_one_s4(pdev);
9764 break;
3772a991
JS
9765 default:
9766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9767 "1426 Invalid PCI device group: 0x%x\n",
9768 phba->pci_dev_grp);
9769 break;
9770 }
9771 return rc;
9772}
9773
9774/**
9775 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
9776 * @pdev: pointer to PCI device.
9777 * @state: the current PCI connection state.
9778 *
9779 * This routine is registered to the PCI subsystem for error handling. This
9780 * function is called by the PCI subsystem after a PCI bus error affecting
9781 * this device has been detected. When this routine is invoked, it dispatches
9782 * the action to the proper SLI-3 or SLI-4 device error detected handling
9783 * routine, which will perform the proper error detected operation.
9784 *
9785 * Return codes
9786 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9787 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9788 **/
9789static pci_ers_result_t
9790lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
9791{
9792 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9793 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9794 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
9795
9796 switch (phba->pci_dev_grp) {
9797 case LPFC_PCI_DEV_LP:
9798 rc = lpfc_io_error_detected_s3(pdev, state);
9799 break;
da0436e9
JS
9800 case LPFC_PCI_DEV_OC:
9801 rc = lpfc_io_error_detected_s4(pdev, state);
9802 break;
3772a991
JS
9803 default:
9804 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9805 "1427 Invalid PCI device group: 0x%x\n",
9806 phba->pci_dev_grp);
9807 break;
9808 }
9809 return rc;
9810}
9811
9812/**
9813 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
9814 * @pdev: pointer to PCI device.
9815 *
9816 * This routine is registered to the PCI subsystem for error handling. This
9817 * function is called after PCI bus has been reset to restart the PCI card
9818 * from scratch, as if from a cold-boot. When this routine is invoked, it
9819 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
9820 * routine, which will perform the proper device reset.
9821 *
9822 * Return codes
9823 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
9824 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9825 **/
9826static pci_ers_result_t
9827lpfc_io_slot_reset(struct pci_dev *pdev)
9828{
9829 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9830 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9831 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
9832
9833 switch (phba->pci_dev_grp) {
9834 case LPFC_PCI_DEV_LP:
9835 rc = lpfc_io_slot_reset_s3(pdev);
9836 break;
da0436e9
JS
9837 case LPFC_PCI_DEV_OC:
9838 rc = lpfc_io_slot_reset_s4(pdev);
9839 break;
3772a991
JS
9840 default:
9841 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9842 "1428 Invalid PCI device group: 0x%x\n",
9843 phba->pci_dev_grp);
9844 break;
9845 }
9846 return rc;
9847}
9848
9849/**
9850 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
9851 * @pdev: pointer to PCI device
9852 *
9853 * This routine is registered to the PCI subsystem for error handling. It
9854 * is called when kernel error recovery tells the lpfc driver that it is
9855 * OK to resume normal PCI operation after PCI bus error recovery. When
9856 * this routine is invoked, it dispatches the action to the proper SLI-3
9857 * or SLI-4 device io_resume routine, which will resume the device operation.
9858 **/
9859static void
9860lpfc_io_resume(struct pci_dev *pdev)
9861{
9862 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9863 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9864
9865 switch (phba->pci_dev_grp) {
9866 case LPFC_PCI_DEV_LP:
9867 lpfc_io_resume_s3(pdev);
9868 break;
da0436e9
JS
9869 case LPFC_PCI_DEV_OC:
9870 lpfc_io_resume_s4(pdev);
9871 break;
3772a991
JS
9872 default:
9873 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9874 "1429 Invalid PCI device group: 0x%x\n",
9875 phba->pci_dev_grp);
9876 break;
9877 }
9878 return;
9879}
9880
dea3101e 9881static struct pci_device_id lpfc_id_table[] = {
9882 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
9883 PCI_ANY_ID, PCI_ANY_ID, },
06325e74
JSEC
9884 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
9885 PCI_ANY_ID, PCI_ANY_ID, },
dea3101e 9886 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
9887 PCI_ANY_ID, PCI_ANY_ID, },
9888 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
9889 PCI_ANY_ID, PCI_ANY_ID, },
9890 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
9891 PCI_ANY_ID, PCI_ANY_ID, },
9892 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
9893 PCI_ANY_ID, PCI_ANY_ID, },
9894 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
9895 PCI_ANY_ID, PCI_ANY_ID, },
9896 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
9897 PCI_ANY_ID, PCI_ANY_ID, },
9898 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
9899 PCI_ANY_ID, PCI_ANY_ID, },
e4adb204
JSEC
9900 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
9901 PCI_ANY_ID, PCI_ANY_ID, },
9902 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
9903 PCI_ANY_ID, PCI_ANY_ID, },
9904 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
9905 PCI_ANY_ID, PCI_ANY_ID, },
dea3101e 9906 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
9907 PCI_ANY_ID, PCI_ANY_ID, },
e4adb204
JSEC
9908 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
9909 PCI_ANY_ID, PCI_ANY_ID, },
9910 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
9911 PCI_ANY_ID, PCI_ANY_ID, },
dea3101e 9912 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
9913 PCI_ANY_ID, PCI_ANY_ID, },
9914 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
9915 PCI_ANY_ID, PCI_ANY_ID, },
9916 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
9917 PCI_ANY_ID, PCI_ANY_ID, },
84774a4d
JS
9918 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
9919 PCI_ANY_ID, PCI_ANY_ID, },
e4adb204
JSEC
9920 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
9921 PCI_ANY_ID, PCI_ANY_ID, },
9922 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
9923 PCI_ANY_ID, PCI_ANY_ID, },
dea3101e 9924 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
9925 PCI_ANY_ID, PCI_ANY_ID, },
9926 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
9927 PCI_ANY_ID, PCI_ANY_ID, },
9928 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
9929 PCI_ANY_ID, PCI_ANY_ID, },
9930 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
9931 PCI_ANY_ID, PCI_ANY_ID, },
9932 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
9933 PCI_ANY_ID, PCI_ANY_ID, },
e4adb204
JSEC
9934 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
9935 PCI_ANY_ID, PCI_ANY_ID, },
9936 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
9937 PCI_ANY_ID, PCI_ANY_ID, },
b87eab38
JS
9938 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
9939 PCI_ANY_ID, PCI_ANY_ID, },
9940 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
9941 PCI_ANY_ID, PCI_ANY_ID, },
9942 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
9943 PCI_ANY_ID, PCI_ANY_ID, },
9944 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
9945 PCI_ANY_ID, PCI_ANY_ID, },
9946 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
9947 PCI_ANY_ID, PCI_ANY_ID, },
9948 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
9949 PCI_ANY_ID, PCI_ANY_ID, },
84774a4d
JS
9950 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
9951 PCI_ANY_ID, PCI_ANY_ID, },
9952 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
9953 PCI_ANY_ID, PCI_ANY_ID, },
9954 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
9955 PCI_ANY_ID, PCI_ANY_ID, },
3772a991
JS
9956 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
9957 PCI_ANY_ID, PCI_ANY_ID, },
a747c9ce
JS
9958 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
9959 PCI_ANY_ID, PCI_ANY_ID, },
9960 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
6669f9bb 9961 PCI_ANY_ID, PCI_ANY_ID, },
98fc5dd9
JS
9962 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
9963 PCI_ANY_ID, PCI_ANY_ID, },
085c647c
JS
9964 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
9965 PCI_ANY_ID, PCI_ANY_ID, },
9966 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
9967 PCI_ANY_ID, PCI_ANY_ID, },
c0c11512
JS
9968 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
9969 PCI_ANY_ID, PCI_ANY_ID, },
9970 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
9971 PCI_ANY_ID, PCI_ANY_ID, },
dea3101e 9972 { 0 }
9973};
9974
9975MODULE_DEVICE_TABLE(pci, lpfc_id_table);
9976
8d63f375
LV
9977static struct pci_error_handlers lpfc_err_handler = {
9978 .error_detected = lpfc_io_error_detected,
9979 .slot_reset = lpfc_io_slot_reset,
9980 .resume = lpfc_io_resume,
9981};
9982
dea3101e 9983static struct pci_driver lpfc_driver = {
9984 .name = LPFC_DRIVER_NAME,
9985 .id_table = lpfc_id_table,
9986 .probe = lpfc_pci_probe_one,
9987 .remove = __devexit_p(lpfc_pci_remove_one),
3a55b532 9988 .suspend = lpfc_pci_suspend_one,
3772a991 9989 .resume = lpfc_pci_resume_one,
2e0fef85 9990 .err_handler = &lpfc_err_handler,
dea3101e 9991};
9992
e59058c4 9993/**
3621a710 9994 * lpfc_init - lpfc module initialization routine
e59058c4
JS
9995 *
9996 * This routine is to be invoked when the lpfc module is loaded into the
9997 * kernel. The special kernel macro module_init() is used to indicate the
9998 * role of this routine to the kernel as lpfc module entry point.
9999 *
10000 * Return codes
10001 * 0 - successful
10002 * -ENOMEM - FC attach transport failed
10003 * all others - failed
10004 */
dea3101e 10005static int __init
10006lpfc_init(void)
10007{
10008 int error = 0;
10009
10010 printk(LPFC_MODULE_DESC "\n");
c44ce173 10011 printk(LPFC_COPYRIGHT "\n");
dea3101e 10012
7ee5d43e
JS
10013 if (lpfc_enable_npiv) {
10014 lpfc_transport_functions.vport_create = lpfc_vport_create;
10015 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
10016 }
dea3101e 10017 lpfc_transport_template =
10018 fc_attach_transport(&lpfc_transport_functions);
7ee5d43e 10019 if (lpfc_transport_template == NULL)
dea3101e 10020 return -ENOMEM;
7ee5d43e 10021 if (lpfc_enable_npiv) {
7ee5d43e 10022 lpfc_vport_transport_template =
98c9ea5c
JS
10023 fc_attach_transport(&lpfc_vport_transport_functions);
10024 if (lpfc_vport_transport_template == NULL) {
10025 fc_release_transport(lpfc_transport_template);
7ee5d43e 10026 return -ENOMEM;
98c9ea5c 10027 }
7ee5d43e 10028 }
dea3101e 10029 error = pci_register_driver(&lpfc_driver);
92d7f7b0 10030 if (error) {
dea3101e 10031 fc_release_transport(lpfc_transport_template);
d7c255b2
JS
10032 if (lpfc_enable_npiv)
10033 fc_release_transport(lpfc_vport_transport_template);
92d7f7b0 10034 }
dea3101e 10035
10036 return error;
10037}
10038
e59058c4 10039/**
3621a710 10040 * lpfc_exit - lpfc module removal routine
e59058c4
JS
10041 *
10042 * This routine is invoked when the lpfc module is removed from the kernel.
10043 * The special kernel macro module_exit() is used to indicate the role of
10044 * this routine to the kernel as lpfc module exit point.
10045 */
dea3101e 10046static void __exit
10047lpfc_exit(void)
10048{
10049 pci_unregister_driver(&lpfc_driver);
10050 fc_release_transport(lpfc_transport_template);
7ee5d43e
JS
10051 if (lpfc_enable_npiv)
10052 fc_release_transport(lpfc_vport_transport_template);
81301a9b 10053 if (_dump_buf_data) {
6a9c52cf
JS
10054 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
10055 "_dump_buf_data at 0x%p\n",
81301a9b
JS
10056 (1L << _dump_buf_data_order), _dump_buf_data);
10057 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
10058 }
10059
10060 if (_dump_buf_dif) {
6a9c52cf
JS
10061 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
10062 "_dump_buf_dif at 0x%p\n",
81301a9b
JS
10063 (1L << _dump_buf_dif_order), _dump_buf_dif);
10064 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
10065 }
dea3101e 10066}
10067
10068module_init(lpfc_init);
10069module_exit(lpfc_exit);
10070MODULE_LICENSE("GPL");
10071MODULE_DESCRIPTION(LPFC_MODULE_DESC);
10072MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
10073MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
This page took 1.139055 seconds and 5 git commands to generate.