[SCSI] lpfc 8.3.45: Fixed driver error messages after firmware download
[deliverable/linux.git] / drivers / scsi / lpfc / lpfc_init.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
92c13f29 4 * Copyright (C) 2004-2013 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
21
dea3101e 22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
acf3368f 27#include <linux/module.h>
dea3101e 28#include <linux/kthread.h>
29#include <linux/pci.h>
30#include <linux/spinlock.h>
92d7f7b0 31#include <linux/ctype.h>
0d878419 32#include <linux/aer.h>
5a0e3ad6 33#include <linux/slab.h>
52d52440 34#include <linux/firmware.h>
3ef6d24c 35#include <linux/miscdevice.h>
7bb03bbf 36#include <linux/percpu.h>
dea3101e 37
91886523 38#include <scsi/scsi.h>
dea3101e 39#include <scsi/scsi_device.h>
40#include <scsi/scsi_host.h>
41#include <scsi/scsi_transport_fc.h>
42
da0436e9 43#include "lpfc_hw4.h"
dea3101e 44#include "lpfc_hw.h"
45#include "lpfc_sli.h"
da0436e9 46#include "lpfc_sli4.h"
ea2151b4 47#include "lpfc_nl.h"
dea3101e 48#include "lpfc_disc.h"
49#include "lpfc_scsi.h"
50#include "lpfc.h"
51#include "lpfc_logmsg.h"
52#include "lpfc_crtn.h"
92d7f7b0 53#include "lpfc_vport.h"
dea3101e 54#include "lpfc_version.h"
55
81301a9b
JS
56char *_dump_buf_data;
57unsigned long _dump_buf_data_order;
58char *_dump_buf_dif;
59unsigned long _dump_buf_dif_order;
60spinlock_t _dump_buf_lock;
61
7bb03bbf 62/* Used when mapping IRQ vectors in a driver centric manner */
b246de17
JS
63uint16_t *lpfc_used_cpu;
64uint32_t lpfc_present_cpu;
7bb03bbf 65
dea3101e 66static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
67static int lpfc_post_rcv_buf(struct lpfc_hba *);
5350d872 68static int lpfc_sli4_queue_verify(struct lpfc_hba *);
da0436e9
JS
69static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
70static int lpfc_setup_endian_order(struct lpfc_hba *);
da0436e9 71static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
8a9d2e80
JS
72static void lpfc_free_els_sgl_list(struct lpfc_hba *);
73static void lpfc_init_sgl_list(struct lpfc_hba *);
da0436e9
JS
74static int lpfc_init_active_sgl_array(struct lpfc_hba *);
75static void lpfc_free_active_sgl(struct lpfc_hba *);
76static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
77static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
78static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
79static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
80static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
618a5230
JS
81static void lpfc_sli4_disable_intr(struct lpfc_hba *);
82static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
1ba981fd 83static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
dea3101e 84
85static struct scsi_transport_template *lpfc_transport_template = NULL;
92d7f7b0 86static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
dea3101e 87static DEFINE_IDR(lpfc_hba_index);
88
e59058c4 89/**
3621a710 90 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
e59058c4
JS
91 * @phba: pointer to lpfc hba data structure.
92 *
93 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
94 * mailbox command. It retrieves the revision information from the HBA and
95 * collects the Vital Product Data (VPD) about the HBA for preparing the
96 * configuration of the HBA.
97 *
98 * Return codes:
99 * 0 - success.
100 * -ERESTART - requests the SLI layer to reset the HBA and try again.
101 * Any other value - indicates an error.
102 **/
dea3101e 103int
2e0fef85 104lpfc_config_port_prep(struct lpfc_hba *phba)
dea3101e 105{
106 lpfc_vpd_t *vp = &phba->vpd;
107 int i = 0, rc;
108 LPFC_MBOXQ_t *pmb;
109 MAILBOX_t *mb;
110 char *lpfc_vpd_data = NULL;
111 uint16_t offset = 0;
112 static char licensed[56] =
113 "key unlock for use with gnu public licensed code only\0";
65a29c16 114 static int init_key = 1;
dea3101e 115
116 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
117 if (!pmb) {
2e0fef85 118 phba->link_state = LPFC_HBA_ERROR;
dea3101e 119 return -ENOMEM;
120 }
121
04c68496 122 mb = &pmb->u.mb;
2e0fef85 123 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 124
125 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
65a29c16
JS
126 if (init_key) {
127 uint32_t *ptext = (uint32_t *) licensed;
dea3101e 128
65a29c16
JS
129 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
130 *ptext = cpu_to_be32(*ptext);
131 init_key = 0;
132 }
dea3101e 133
134 lpfc_read_nv(phba, pmb);
135 memset((char*)mb->un.varRDnvp.rsvd3, 0,
136 sizeof (mb->un.varRDnvp.rsvd3));
137 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
138 sizeof (licensed));
139
140 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
141
142 if (rc != MBX_SUCCESS) {
ed957684 143 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
e8b62011 144 "0324 Config Port initialization "
dea3101e 145 "error, mbxCmd x%x READ_NVPARM, "
146 "mbxStatus x%x\n",
dea3101e 147 mb->mbxCommand, mb->mbxStatus);
148 mempool_free(pmb, phba->mbox_mem_pool);
149 return -ERESTART;
150 }
151 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
2e0fef85
JS
152 sizeof(phba->wwnn));
153 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
154 sizeof(phba->wwpn));
dea3101e 155 }
156
92d7f7b0
JS
157 phba->sli3_options = 0x0;
158
dea3101e 159 /* Setup and issue mailbox READ REV command */
160 lpfc_read_rev(phba, pmb);
161 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
162 if (rc != MBX_SUCCESS) {
ed957684 163 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 164 "0439 Adapter failed to init, mbxCmd x%x "
dea3101e 165 "READ_REV, mbxStatus x%x\n",
dea3101e 166 mb->mbxCommand, mb->mbxStatus);
167 mempool_free( pmb, phba->mbox_mem_pool);
168 return -ERESTART;
169 }
170
92d7f7b0 171
1de933f3
JSEC
172 /*
173 * The value of rr must be 1 since the driver set the cv field to 1.
174 * This setting requires the FW to set all revision fields.
dea3101e 175 */
1de933f3 176 if (mb->un.varRdRev.rr == 0) {
dea3101e 177 vp->rev.rBit = 0;
1de933f3 178 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011
JS
179 "0440 Adapter failed to init, READ_REV has "
180 "missing revision information.\n");
dea3101e 181 mempool_free(pmb, phba->mbox_mem_pool);
182 return -ERESTART;
dea3101e 183 }
184
495a714c
JS
185 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
186 mempool_free(pmb, phba->mbox_mem_pool);
ed957684 187 return -EINVAL;
495a714c 188 }
ed957684 189
dea3101e 190 /* Save information as VPD data */
1de933f3 191 vp->rev.rBit = 1;
92d7f7b0 192 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
1de933f3
JSEC
193 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
194 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
195 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
196 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
dea3101e 197 vp->rev.biuRev = mb->un.varRdRev.biuRev;
198 vp->rev.smRev = mb->un.varRdRev.smRev;
199 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
200 vp->rev.endecRev = mb->un.varRdRev.endecRev;
201 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
202 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
203 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
204 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
205 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
206 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
207
92d7f7b0
JS
208 /* If the sli feature level is less then 9, we must
209 * tear down all RPIs and VPIs on link down if NPIV
210 * is enabled.
211 */
212 if (vp->rev.feaLevelHigh < 9)
213 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
214
dea3101e 215 if (lpfc_is_LC_HBA(phba->pcidev->device))
216 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
217 sizeof (phba->RandomData));
218
dea3101e 219 /* Get adapter VPD information */
dea3101e 220 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
221 if (!lpfc_vpd_data)
d7c255b2 222 goto out_free_mbox;
dea3101e 223 do {
a0c87cbd 224 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
dea3101e 225 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
226
227 if (rc != MBX_SUCCESS) {
228 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011 229 "0441 VPD not present on adapter, "
dea3101e 230 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
dea3101e 231 mb->mbxCommand, mb->mbxStatus);
74b72a59 232 mb->un.varDmp.word_cnt = 0;
dea3101e 233 }
04c68496
JS
234 /* dump mem may return a zero when finished or we got a
235 * mailbox error, either way we are done.
236 */
237 if (mb->un.varDmp.word_cnt == 0)
238 break;
74b72a59
JW
239 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
240 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
d7c255b2
JS
241 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
242 lpfc_vpd_data + offset,
92d7f7b0 243 mb->un.varDmp.word_cnt);
dea3101e 244 offset += mb->un.varDmp.word_cnt;
74b72a59
JW
245 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
246 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
dea3101e 247
248 kfree(lpfc_vpd_data);
dea3101e 249out_free_mbox:
250 mempool_free(pmb, phba->mbox_mem_pool);
251 return 0;
252}
253
e59058c4 254/**
3621a710 255 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
e59058c4
JS
256 * @phba: pointer to lpfc hba data structure.
257 * @pmboxq: pointer to the driver internal queue element for mailbox command.
258 *
259 * This is the completion handler for driver's configuring asynchronous event
260 * mailbox command to the device. If the mailbox command returns successfully,
261 * it will set internal async event support flag to 1; otherwise, it will
262 * set internal async event support flag to 0.
263 **/
57127f15
JS
264static void
265lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
266{
04c68496 267 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
57127f15
JS
268 phba->temp_sensor_support = 1;
269 else
270 phba->temp_sensor_support = 0;
271 mempool_free(pmboxq, phba->mbox_mem_pool);
272 return;
273}
274
97207482 275/**
3621a710 276 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
97207482
JS
277 * @phba: pointer to lpfc hba data structure.
278 * @pmboxq: pointer to the driver internal queue element for mailbox command.
279 *
280 * This is the completion handler for dump mailbox command for getting
281 * wake up parameters. When this command complete, the response contain
282 * Option rom version of the HBA. This function translate the version number
283 * into a human readable string and store it in OptionROMVersion.
284 **/
285static void
286lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
287{
288 struct prog_id *prg;
289 uint32_t prog_id_word;
290 char dist = ' ';
291 /* character array used for decoding dist type. */
292 char dist_char[] = "nabx";
293
04c68496 294 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
9f1e1b50 295 mempool_free(pmboxq, phba->mbox_mem_pool);
97207482 296 return;
9f1e1b50 297 }
97207482
JS
298
299 prg = (struct prog_id *) &prog_id_word;
300
301 /* word 7 contain option rom version */
04c68496 302 prog_id_word = pmboxq->u.mb.un.varWords[7];
97207482
JS
303
304 /* Decode the Option rom version word to a readable string */
305 if (prg->dist < 4)
306 dist = dist_char[prg->dist];
307
308 if ((prg->dist == 3) && (prg->num == 0))
309 sprintf(phba->OptionROMVersion, "%d.%d%d",
310 prg->ver, prg->rev, prg->lev);
311 else
312 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
313 prg->ver, prg->rev, prg->lev,
314 dist, prg->num);
9f1e1b50 315 mempool_free(pmboxq, phba->mbox_mem_pool);
97207482
JS
316 return;
317}
318
0558056c
JS
319/**
320 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
321 * cfg_soft_wwnn, cfg_soft_wwpn
322 * @vport: pointer to lpfc vport data structure.
323 *
324 *
325 * Return codes
326 * None.
327 **/
328void
329lpfc_update_vport_wwn(struct lpfc_vport *vport)
330{
331 /* If the soft name exists then update it using the service params */
332 if (vport->phba->cfg_soft_wwnn)
333 u64_to_wwn(vport->phba->cfg_soft_wwnn,
334 vport->fc_sparam.nodeName.u.wwn);
335 if (vport->phba->cfg_soft_wwpn)
336 u64_to_wwn(vport->phba->cfg_soft_wwpn,
337 vport->fc_sparam.portName.u.wwn);
338
339 /*
340 * If the name is empty or there exists a soft name
341 * then copy the service params name, otherwise use the fc name
342 */
343 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
344 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
345 sizeof(struct lpfc_name));
346 else
347 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
348 sizeof(struct lpfc_name));
349
350 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
351 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
352 sizeof(struct lpfc_name));
353 else
354 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
355 sizeof(struct lpfc_name));
356}
357
e59058c4 358/**
3621a710 359 * lpfc_config_port_post - Perform lpfc initialization after config port
e59058c4
JS
360 * @phba: pointer to lpfc hba data structure.
361 *
362 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
363 * command call. It performs all internal resource and state setups on the
364 * port: post IOCB buffers, enable appropriate host interrupt attentions,
365 * ELS ring timers, etc.
366 *
367 * Return codes
368 * 0 - success.
369 * Any other value - error.
370 **/
dea3101e 371int
2e0fef85 372lpfc_config_port_post(struct lpfc_hba *phba)
dea3101e 373{
2e0fef85 374 struct lpfc_vport *vport = phba->pport;
a257bf90 375 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 376 LPFC_MBOXQ_t *pmb;
377 MAILBOX_t *mb;
378 struct lpfc_dmabuf *mp;
379 struct lpfc_sli *psli = &phba->sli;
380 uint32_t status, timeout;
2e0fef85
JS
381 int i, j;
382 int rc;
dea3101e 383
7af67051
JS
384 spin_lock_irq(&phba->hbalock);
385 /*
386 * If the Config port completed correctly the HBA is not
387 * over heated any more.
388 */
389 if (phba->over_temp_state == HBA_OVER_TEMP)
390 phba->over_temp_state = HBA_NORMAL_TEMP;
391 spin_unlock_irq(&phba->hbalock);
392
dea3101e 393 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
394 if (!pmb) {
2e0fef85 395 phba->link_state = LPFC_HBA_ERROR;
dea3101e 396 return -ENOMEM;
397 }
04c68496 398 mb = &pmb->u.mb;
dea3101e 399
dea3101e 400 /* Get login parameters for NID. */
9f1177a3
JS
401 rc = lpfc_read_sparam(phba, pmb, 0);
402 if (rc) {
403 mempool_free(pmb, phba->mbox_mem_pool);
404 return -ENOMEM;
405 }
406
ed957684 407 pmb->vport = vport;
dea3101e 408 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
ed957684 409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 410 "0448 Adapter failed init, mbxCmd x%x "
dea3101e 411 "READ_SPARM mbxStatus x%x\n",
dea3101e 412 mb->mbxCommand, mb->mbxStatus);
2e0fef85 413 phba->link_state = LPFC_HBA_ERROR;
dea3101e 414 mp = (struct lpfc_dmabuf *) pmb->context1;
9f1177a3 415 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 416 lpfc_mbuf_free(phba, mp->virt, mp->phys);
417 kfree(mp);
418 return -EIO;
419 }
420
421 mp = (struct lpfc_dmabuf *) pmb->context1;
422
2e0fef85 423 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
dea3101e 424 lpfc_mbuf_free(phba, mp->virt, mp->phys);
425 kfree(mp);
426 pmb->context1 = NULL;
0558056c 427 lpfc_update_vport_wwn(vport);
a257bf90
JS
428
429 /* Update the fc_host data structures with new wwn. */
430 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
431 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
21e9a0a5 432 fc_host_max_npiv_vports(shost) = phba->max_vpi;
a257bf90 433
dea3101e 434 /* If no serial number in VPD data, use low 6 bytes of WWNN */
435 /* This should be consolidated into parse_vpd ? - mr */
436 if (phba->SerialNumber[0] == 0) {
437 uint8_t *outptr;
438
2e0fef85 439 outptr = &vport->fc_nodename.u.s.IEEE[0];
dea3101e 440 for (i = 0; i < 12; i++) {
441 status = *outptr++;
442 j = ((status & 0xf0) >> 4);
443 if (j <= 9)
444 phba->SerialNumber[i] =
445 (char)((uint8_t) 0x30 + (uint8_t) j);
446 else
447 phba->SerialNumber[i] =
448 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
449 i++;
450 j = (status & 0xf);
451 if (j <= 9)
452 phba->SerialNumber[i] =
453 (char)((uint8_t) 0x30 + (uint8_t) j);
454 else
455 phba->SerialNumber[i] =
456 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
457 }
458 }
459
dea3101e 460 lpfc_read_config(phba, pmb);
ed957684 461 pmb->vport = vport;
dea3101e 462 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
ed957684 463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 464 "0453 Adapter failed to init, mbxCmd x%x "
dea3101e 465 "READ_CONFIG, mbxStatus x%x\n",
dea3101e 466 mb->mbxCommand, mb->mbxStatus);
2e0fef85 467 phba->link_state = LPFC_HBA_ERROR;
dea3101e 468 mempool_free( pmb, phba->mbox_mem_pool);
469 return -EIO;
470 }
471
a0c87cbd
JS
472 /* Check if the port is disabled */
473 lpfc_sli_read_link_ste(phba);
474
dea3101e 475 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
572709e2
JS
476 i = (mb->un.varRdConfig.max_xri + 1);
477 if (phba->cfg_hba_queue_depth > i) {
478 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
479 "3359 HBA queue depth changed from %d to %d\n",
480 phba->cfg_hba_queue_depth, i);
481 phba->cfg_hba_queue_depth = i;
482 }
483
484 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
485 i = (mb->un.varRdConfig.max_xri >> 3);
486 if (phba->pport->cfg_lun_queue_depth > i) {
487 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
488 "3360 LUN queue depth changed from %d to %d\n",
489 phba->pport->cfg_lun_queue_depth, i);
490 phba->pport->cfg_lun_queue_depth = i;
491 }
dea3101e 492
493 phba->lmt = mb->un.varRdConfig.lmt;
74b72a59
JW
494
495 /* Get the default values for Model Name and Description */
496 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
497
2e0fef85 498 phba->link_state = LPFC_LINK_DOWN;
dea3101e 499
0b727fea 500 /* Only process IOCBs on ELS ring till hba_state is READY */
7e56aa25 501 if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr)
a4bc3379 502 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
7e56aa25 503 if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr)
dea3101e 504 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
7e56aa25 505 if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr)
dea3101e 506 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
507
508 /* Post receive buffers for desired rings */
ed957684
JS
509 if (phba->sli_rev != 3)
510 lpfc_post_rcv_buf(phba);
dea3101e 511
9399627f
JS
512 /*
513 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
514 */
515 if (phba->intr_type == MSIX) {
516 rc = lpfc_config_msi(phba, pmb);
517 if (rc) {
518 mempool_free(pmb, phba->mbox_mem_pool);
519 return -EIO;
520 }
521 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
522 if (rc != MBX_SUCCESS) {
523 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
524 "0352 Config MSI mailbox command "
525 "failed, mbxCmd x%x, mbxStatus x%x\n",
04c68496
JS
526 pmb->u.mb.mbxCommand,
527 pmb->u.mb.mbxStatus);
9399627f
JS
528 mempool_free(pmb, phba->mbox_mem_pool);
529 return -EIO;
530 }
531 }
532
04c68496 533 spin_lock_irq(&phba->hbalock);
9399627f
JS
534 /* Initialize ERATT handling flag */
535 phba->hba_flag &= ~HBA_ERATT_HANDLED;
536
dea3101e 537 /* Enable appropriate host interrupts */
9940b97b
JS
538 if (lpfc_readl(phba->HCregaddr, &status)) {
539 spin_unlock_irq(&phba->hbalock);
540 return -EIO;
541 }
dea3101e 542 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
543 if (psli->num_rings > 0)
544 status |= HC_R0INT_ENA;
545 if (psli->num_rings > 1)
546 status |= HC_R1INT_ENA;
547 if (psli->num_rings > 2)
548 status |= HC_R2INT_ENA;
549 if (psli->num_rings > 3)
550 status |= HC_R3INT_ENA;
551
875fbdfe
JSEC
552 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
553 (phba->cfg_poll & DISABLE_FCP_RING_INT))
9399627f 554 status &= ~(HC_R0INT_ENA);
875fbdfe 555
dea3101e 556 writel(status, phba->HCregaddr);
557 readl(phba->HCregaddr); /* flush */
2e0fef85 558 spin_unlock_irq(&phba->hbalock);
dea3101e 559
9399627f
JS
560 /* Set up ring-0 (ELS) timer */
561 timeout = phba->fc_ratov * 2;
256ec0d0
JS
562 mod_timer(&vport->els_tmofunc,
563 jiffies + msecs_to_jiffies(1000 * timeout));
9399627f 564 /* Set up heart beat (HB) timer */
256ec0d0
JS
565 mod_timer(&phba->hb_tmofunc,
566 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
858c9f6c
JS
567 phba->hb_outstanding = 0;
568 phba->last_completion_time = jiffies;
9399627f 569 /* Set up error attention (ERATT) polling timer */
256ec0d0
JS
570 mod_timer(&phba->eratt_poll,
571 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
dea3101e 572
a0c87cbd
JS
573 if (phba->hba_flag & LINK_DISABLED) {
574 lpfc_printf_log(phba,
575 KERN_ERR, LOG_INIT,
576 "2598 Adapter Link is disabled.\n");
577 lpfc_down_link(phba, pmb);
578 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
579 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
580 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
581 lpfc_printf_log(phba,
582 KERN_ERR, LOG_INIT,
583 "2599 Adapter failed to issue DOWN_LINK"
584 " mbox command rc 0x%x\n", rc);
585
586 mempool_free(pmb, phba->mbox_mem_pool);
587 return -EIO;
588 }
e40a02c1 589 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
026abb87
JS
590 mempool_free(pmb, phba->mbox_mem_pool);
591 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
592 if (rc)
593 return rc;
dea3101e 594 }
595 /* MBOX buffer will be freed in mbox compl */
57127f15 596 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9f1177a3
JS
597 if (!pmb) {
598 phba->link_state = LPFC_HBA_ERROR;
599 return -ENOMEM;
600 }
601
57127f15
JS
602 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
603 pmb->mbox_cmpl = lpfc_config_async_cmpl;
604 pmb->vport = phba->pport;
605 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
dea3101e 606
57127f15
JS
607 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
608 lpfc_printf_log(phba,
609 KERN_ERR,
610 LOG_INIT,
611 "0456 Adapter failed to issue "
e4e74273 612 "ASYNCEVT_ENABLE mbox status x%x\n",
57127f15
JS
613 rc);
614 mempool_free(pmb, phba->mbox_mem_pool);
615 }
97207482
JS
616
617 /* Get Option rom version */
618 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9f1177a3
JS
619 if (!pmb) {
620 phba->link_state = LPFC_HBA_ERROR;
621 return -ENOMEM;
622 }
623
97207482
JS
624 lpfc_dump_wakeup_param(phba, pmb);
625 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
626 pmb->vport = phba->pport;
627 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
628
629 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
630 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
e4e74273 631 "to get Option ROM version status x%x\n", rc);
97207482
JS
632 mempool_free(pmb, phba->mbox_mem_pool);
633 }
634
d7c255b2 635 return 0;
ce8b3ce5
JS
636}
637
84d1b006
JS
638/**
639 * lpfc_hba_init_link - Initialize the FC link
640 * @phba: pointer to lpfc hba data structure.
6e7288d9 641 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
84d1b006
JS
642 *
643 * This routine will issue the INIT_LINK mailbox command call.
644 * It is available to other drivers through the lpfc_hba data
645 * structure for use as a delayed link up mechanism with the
646 * module parameter lpfc_suppress_link_up.
647 *
648 * Return code
649 * 0 - success
650 * Any other value - error
651 **/
652int
6e7288d9 653lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
1b51197d
JS
654{
655 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
656}
657
658/**
659 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
660 * @phba: pointer to lpfc hba data structure.
661 * @fc_topology: desired fc topology.
662 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
663 *
664 * This routine will issue the INIT_LINK mailbox command call.
665 * It is available to other drivers through the lpfc_hba data
666 * structure for use as a delayed link up mechanism with the
667 * module parameter lpfc_suppress_link_up.
668 *
669 * Return code
670 * 0 - success
671 * Any other value - error
672 **/
673int
674lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
675 uint32_t flag)
84d1b006
JS
676{
677 struct lpfc_vport *vport = phba->pport;
678 LPFC_MBOXQ_t *pmb;
679 MAILBOX_t *mb;
680 int rc;
681
682 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
683 if (!pmb) {
684 phba->link_state = LPFC_HBA_ERROR;
685 return -ENOMEM;
686 }
687 mb = &pmb->u.mb;
688 pmb->vport = vport;
689
026abb87
JS
690 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
691 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
692 !(phba->lmt & LMT_1Gb)) ||
693 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
694 !(phba->lmt & LMT_2Gb)) ||
695 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
696 !(phba->lmt & LMT_4Gb)) ||
697 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
698 !(phba->lmt & LMT_8Gb)) ||
699 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
700 !(phba->lmt & LMT_10Gb)) ||
701 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
702 !(phba->lmt & LMT_16Gb))) {
703 /* Reset link speed to auto */
704 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
705 "1302 Invalid speed for this board:%d "
706 "Reset link speed to auto.\n",
707 phba->cfg_link_speed);
708 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
709 }
1b51197d 710 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
84d1b006 711 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1b51197d
JS
712 if (phba->sli_rev < LPFC_SLI_REV4)
713 lpfc_set_loopback_flag(phba);
6e7288d9 714 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
76a95d75 715 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
84d1b006
JS
716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
717 "0498 Adapter failed to init, mbxCmd x%x "
718 "INIT_LINK, mbxStatus x%x\n",
719 mb->mbxCommand, mb->mbxStatus);
76a95d75
JS
720 if (phba->sli_rev <= LPFC_SLI_REV3) {
721 /* Clear all interrupt enable conditions */
722 writel(0, phba->HCregaddr);
723 readl(phba->HCregaddr); /* flush */
724 /* Clear all pending interrupts */
725 writel(0xffffffff, phba->HAregaddr);
726 readl(phba->HAregaddr); /* flush */
727 }
84d1b006 728 phba->link_state = LPFC_HBA_ERROR;
6e7288d9 729 if (rc != MBX_BUSY || flag == MBX_POLL)
84d1b006
JS
730 mempool_free(pmb, phba->mbox_mem_pool);
731 return -EIO;
732 }
e40a02c1 733 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
6e7288d9
JS
734 if (flag == MBX_POLL)
735 mempool_free(pmb, phba->mbox_mem_pool);
84d1b006
JS
736
737 return 0;
738}
739
740/**
741 * lpfc_hba_down_link - this routine downs the FC link
6e7288d9
JS
742 * @phba: pointer to lpfc hba data structure.
743 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
84d1b006
JS
744 *
745 * This routine will issue the DOWN_LINK mailbox command call.
746 * It is available to other drivers through the lpfc_hba data
747 * structure for use to stop the link.
748 *
749 * Return code
750 * 0 - success
751 * Any other value - error
752 **/
753int
6e7288d9 754lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
84d1b006
JS
755{
756 LPFC_MBOXQ_t *pmb;
757 int rc;
758
759 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
760 if (!pmb) {
761 phba->link_state = LPFC_HBA_ERROR;
762 return -ENOMEM;
763 }
764
765 lpfc_printf_log(phba,
766 KERN_ERR, LOG_INIT,
767 "0491 Adapter Link is disabled.\n");
768 lpfc_down_link(phba, pmb);
769 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6e7288d9 770 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
84d1b006
JS
771 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
772 lpfc_printf_log(phba,
773 KERN_ERR, LOG_INIT,
774 "2522 Adapter failed to issue DOWN_LINK"
775 " mbox command rc 0x%x\n", rc);
776
777 mempool_free(pmb, phba->mbox_mem_pool);
778 return -EIO;
779 }
6e7288d9
JS
780 if (flag == MBX_POLL)
781 mempool_free(pmb, phba->mbox_mem_pool);
782
84d1b006
JS
783 return 0;
784}
785
e59058c4 786/**
3621a710 787 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
e59058c4
JS
788 * @phba: pointer to lpfc HBA data structure.
789 *
790 * This routine will do LPFC uninitialization before the HBA is reset when
791 * bringing down the SLI Layer.
792 *
793 * Return codes
794 * 0 - success.
795 * Any other value - error.
796 **/
dea3101e 797int
2e0fef85 798lpfc_hba_down_prep(struct lpfc_hba *phba)
dea3101e 799{
1b32f6aa
JS
800 struct lpfc_vport **vports;
801 int i;
3772a991
JS
802
803 if (phba->sli_rev <= LPFC_SLI_REV3) {
804 /* Disable interrupts */
805 writel(0, phba->HCregaddr);
806 readl(phba->HCregaddr); /* flush */
807 }
dea3101e 808
1b32f6aa
JS
809 if (phba->pport->load_flag & FC_UNLOADING)
810 lpfc_cleanup_discovery_resources(phba->pport);
811 else {
812 vports = lpfc_create_vport_work_array(phba);
813 if (vports != NULL)
3772a991
JS
814 for (i = 0; i <= phba->max_vports &&
815 vports[i] != NULL; i++)
1b32f6aa
JS
816 lpfc_cleanup_discovery_resources(vports[i]);
817 lpfc_destroy_vport_work_array(phba, vports);
7f5f3d0d
JS
818 }
819 return 0;
dea3101e 820}
821
e59058c4 822/**
3772a991 823 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
e59058c4
JS
824 * @phba: pointer to lpfc HBA data structure.
825 *
826 * This routine will do uninitialization after the HBA is reset when bring
827 * down the SLI Layer.
828 *
829 * Return codes
af901ca1 830 * 0 - success.
e59058c4
JS
831 * Any other value - error.
832 **/
3772a991
JS
833static int
834lpfc_hba_down_post_s3(struct lpfc_hba *phba)
41415862
JW
835{
836 struct lpfc_sli *psli = &phba->sli;
837 struct lpfc_sli_ring *pring;
838 struct lpfc_dmabuf *mp, *next_mp;
09372820 839 LIST_HEAD(completions);
41415862
JW
840 int i;
841
92d7f7b0
JS
842 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
843 lpfc_sli_hbqbuf_free_all(phba);
844 else {
845 /* Cleanup preposted buffers on the ELS ring */
846 pring = &psli->ring[LPFC_ELS_RING];
847 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
848 list_del(&mp->list);
849 pring->postbufq_cnt--;
850 lpfc_mbuf_free(phba, mp->virt, mp->phys);
851 kfree(mp);
852 }
41415862
JW
853 }
854
09372820 855 spin_lock_irq(&phba->hbalock);
41415862
JW
856 for (i = 0; i < psli->num_rings; i++) {
857 pring = &psli->ring[i];
09372820
JS
858
859 /* At this point in time the HBA is either reset or DOA. Either
860 * way, nothing should be on txcmplq as it will NEVER complete.
861 */
862 list_splice_init(&pring->txcmplq, &completions);
09372820
JS
863 spin_unlock_irq(&phba->hbalock);
864
a257bf90
JS
865 /* Cancel all the IOCBs from the completions list */
866 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
867 IOERR_SLI_ABORTED);
09372820 868
41415862 869 lpfc_sli_abort_iocb_ring(phba, pring);
09372820 870 spin_lock_irq(&phba->hbalock);
41415862 871 }
09372820 872 spin_unlock_irq(&phba->hbalock);
41415862
JW
873
874 return 0;
875}
5af5eee7 876
da0436e9
JS
877/**
878 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
879 * @phba: pointer to lpfc HBA data structure.
880 *
881 * This routine will do uninitialization after the HBA is reset when bring
882 * down the SLI Layer.
883 *
884 * Return codes
af901ca1 885 * 0 - success.
da0436e9
JS
886 * Any other value - error.
887 **/
888static int
889lpfc_hba_down_post_s4(struct lpfc_hba *phba)
890{
891 struct lpfc_scsi_buf *psb, *psb_next;
892 LIST_HEAD(aborts);
893 int ret;
894 unsigned long iflag = 0;
0f65ff68
JS
895 struct lpfc_sglq *sglq_entry = NULL;
896
da0436e9
JS
897 ret = lpfc_hba_down_post_s3(phba);
898 if (ret)
899 return ret;
900 /* At this point in time the HBA is either reset or DOA. Either
901 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
902 * on the lpfc_sgl_list so that it can either be freed if the
903 * driver is unloading or reposted if the driver is restarting
904 * the port.
905 */
906 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
907 /* scsl_buf_list */
908 /* abts_sgl_list_lock required because worker thread uses this
909 * list.
910 */
911 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
0f65ff68
JS
912 list_for_each_entry(sglq_entry,
913 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
914 sglq_entry->state = SGL_FREED;
915
da0436e9
JS
916 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
917 &phba->sli4_hba.lpfc_sgl_list);
918 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
919 /* abts_scsi_buf_list_lock required because worker thread uses this
920 * list.
921 */
922 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
923 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
924 &aborts);
925 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
926 spin_unlock_irq(&phba->hbalock);
927
928 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
929 psb->pCmd = NULL;
930 psb->status = IOSTAT_SUCCESS;
931 }
a40fc5f0
JS
932 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
933 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
934 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
da0436e9
JS
935 return 0;
936}
937
938/**
939 * lpfc_hba_down_post - Wrapper func for hba down post routine
940 * @phba: pointer to lpfc HBA data structure.
941 *
942 * This routine wraps the actual SLI3 or SLI4 routine for performing
943 * uninitialization after the HBA is reset when bring down the SLI Layer.
944 *
945 * Return codes
af901ca1 946 * 0 - success.
da0436e9
JS
947 * Any other value - error.
948 **/
949int
950lpfc_hba_down_post(struct lpfc_hba *phba)
951{
952 return (*phba->lpfc_hba_down_post)(phba);
953}
41415862 954
e59058c4 955/**
3621a710 956 * lpfc_hb_timeout - The HBA-timer timeout handler
e59058c4
JS
957 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
958 *
959 * This is the HBA-timer timeout handler registered to the lpfc driver. When
960 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
961 * work-port-events bitmap and the worker thread is notified. This timeout
962 * event will be used by the worker thread to invoke the actual timeout
963 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
964 * be performed in the timeout handler and the HBA timeout event bit shall
965 * be cleared by the worker thread after it has taken the event bitmap out.
966 **/
a6ababd2 967static void
858c9f6c
JS
968lpfc_hb_timeout(unsigned long ptr)
969{
970 struct lpfc_hba *phba;
5e9d9b82 971 uint32_t tmo_posted;
858c9f6c
JS
972 unsigned long iflag;
973
974 phba = (struct lpfc_hba *)ptr;
9399627f
JS
975
976 /* Check for heart beat timeout conditions */
858c9f6c 977 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
5e9d9b82
JS
978 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
979 if (!tmo_posted)
858c9f6c
JS
980 phba->pport->work_port_events |= WORKER_HB_TMO;
981 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
982
9399627f 983 /* Tell the worker thread there is work to do */
5e9d9b82
JS
984 if (!tmo_posted)
985 lpfc_worker_wake_up(phba);
858c9f6c
JS
986 return;
987}
988
19ca7609
JS
989/**
990 * lpfc_rrq_timeout - The RRQ-timer timeout handler
991 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
992 *
993 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
994 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
995 * work-port-events bitmap and the worker thread is notified. This timeout
996 * event will be used by the worker thread to invoke the actual timeout
997 * handler routine, lpfc_rrq_handler. Any periodical operations will
998 * be performed in the timeout handler and the RRQ timeout event bit shall
999 * be cleared by the worker thread after it has taken the event bitmap out.
1000 **/
1001static void
1002lpfc_rrq_timeout(unsigned long ptr)
1003{
1004 struct lpfc_hba *phba;
19ca7609
JS
1005 unsigned long iflag;
1006
1007 phba = (struct lpfc_hba *)ptr;
1008 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1151e3ec 1009 phba->hba_flag |= HBA_RRQ_ACTIVE;
19ca7609 1010 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1151e3ec 1011 lpfc_worker_wake_up(phba);
19ca7609
JS
1012}
1013
e59058c4 1014/**
3621a710 1015 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
e59058c4
JS
1016 * @phba: pointer to lpfc hba data structure.
1017 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1018 *
1019 * This is the callback function to the lpfc heart-beat mailbox command.
1020 * If configured, the lpfc driver issues the heart-beat mailbox command to
1021 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1022 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1023 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1024 * heart-beat outstanding state. Once the mailbox command comes back and
1025 * no error conditions detected, the heart-beat mailbox command timer is
1026 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1027 * state is cleared for the next heart-beat. If the timer expired with the
1028 * heart-beat outstanding state set, the driver will put the HBA offline.
1029 **/
858c9f6c
JS
1030static void
1031lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1032{
1033 unsigned long drvr_flag;
1034
1035 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1036 phba->hb_outstanding = 0;
1037 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1038
9399627f 1039 /* Check and reset heart-beat timer is necessary */
858c9f6c
JS
1040 mempool_free(pmboxq, phba->mbox_mem_pool);
1041 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1042 !(phba->link_state == LPFC_HBA_ERROR) &&
51ef4c26 1043 !(phba->pport->load_flag & FC_UNLOADING))
858c9f6c 1044 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1045 jiffies +
1046 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
858c9f6c
JS
1047 return;
1048}
1049
e59058c4 1050/**
3621a710 1051 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
e59058c4
JS
1052 * @phba: pointer to lpfc hba data structure.
1053 *
1054 * This is the actual HBA-timer timeout handler to be invoked by the worker
1055 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1056 * handler performs any periodic operations needed for the device. If such
1057 * periodic event has already been attended to either in the interrupt handler
1058 * or by processing slow-ring or fast-ring events within the HBA-timer
1059 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1060 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1061 * is configured and there is no heart-beat mailbox command outstanding, a
1062 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1063 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1064 * to offline.
1065 **/
858c9f6c
JS
1066void
1067lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1068{
45ed1190 1069 struct lpfc_vport **vports;
858c9f6c 1070 LPFC_MBOXQ_t *pmboxq;
0ff10d46 1071 struct lpfc_dmabuf *buf_ptr;
45ed1190 1072 int retval, i;
858c9f6c 1073 struct lpfc_sli *psli = &phba->sli;
0ff10d46 1074 LIST_HEAD(completions);
858c9f6c 1075
45ed1190
JS
1076 vports = lpfc_create_vport_work_array(phba);
1077 if (vports != NULL)
1078 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1079 lpfc_rcv_seq_check_edtov(vports[i]);
1080 lpfc_destroy_vport_work_array(phba, vports);
1081
858c9f6c 1082 if ((phba->link_state == LPFC_HBA_ERROR) ||
51ef4c26 1083 (phba->pport->load_flag & FC_UNLOADING) ||
858c9f6c
JS
1084 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1085 return;
1086
1087 spin_lock_irq(&phba->pport->work_port_lock);
858c9f6c 1088
256ec0d0
JS
1089 if (time_after(phba->last_completion_time +
1090 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1091 jiffies)) {
858c9f6c
JS
1092 spin_unlock_irq(&phba->pport->work_port_lock);
1093 if (!phba->hb_outstanding)
1094 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1095 jiffies +
1096 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
858c9f6c
JS
1097 else
1098 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1099 jiffies +
1100 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
858c9f6c
JS
1101 return;
1102 }
1103 spin_unlock_irq(&phba->pport->work_port_lock);
1104
0ff10d46
JS
1105 if (phba->elsbuf_cnt &&
1106 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1107 spin_lock_irq(&phba->hbalock);
1108 list_splice_init(&phba->elsbuf, &completions);
1109 phba->elsbuf_cnt = 0;
1110 phba->elsbuf_prev_cnt = 0;
1111 spin_unlock_irq(&phba->hbalock);
1112
1113 while (!list_empty(&completions)) {
1114 list_remove_head(&completions, buf_ptr,
1115 struct lpfc_dmabuf, list);
1116 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1117 kfree(buf_ptr);
1118 }
1119 }
1120 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1121
858c9f6c 1122 /* If there is no heart beat outstanding, issue a heartbeat command */
13815c83
JS
1123 if (phba->cfg_enable_hba_heartbeat) {
1124 if (!phba->hb_outstanding) {
bc73905a
JS
1125 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1126 (list_empty(&psli->mboxq))) {
1127 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1128 GFP_KERNEL);
1129 if (!pmboxq) {
1130 mod_timer(&phba->hb_tmofunc,
1131 jiffies +
256ec0d0
JS
1132 msecs_to_jiffies(1000 *
1133 LPFC_HB_MBOX_INTERVAL));
bc73905a
JS
1134 return;
1135 }
1136
1137 lpfc_heart_beat(phba, pmboxq);
1138 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1139 pmboxq->vport = phba->pport;
1140 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1141 MBX_NOWAIT);
1142
1143 if (retval != MBX_BUSY &&
1144 retval != MBX_SUCCESS) {
1145 mempool_free(pmboxq,
1146 phba->mbox_mem_pool);
1147 mod_timer(&phba->hb_tmofunc,
1148 jiffies +
256ec0d0
JS
1149 msecs_to_jiffies(1000 *
1150 LPFC_HB_MBOX_INTERVAL));
bc73905a
JS
1151 return;
1152 }
1153 phba->skipped_hb = 0;
1154 phba->hb_outstanding = 1;
1155 } else if (time_before_eq(phba->last_completion_time,
1156 phba->skipped_hb)) {
1157 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1158 "2857 Last completion time not "
1159 " updated in %d ms\n",
1160 jiffies_to_msecs(jiffies
1161 - phba->last_completion_time));
1162 } else
1163 phba->skipped_hb = jiffies;
1164
858c9f6c 1165 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1166 jiffies +
1167 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
858c9f6c 1168 return;
13815c83
JS
1169 } else {
1170 /*
1171 * If heart beat timeout called with hb_outstanding set
dcf2a4e0
JS
1172 * we need to give the hb mailbox cmd a chance to
1173 * complete or TMO.
13815c83 1174 */
dcf2a4e0
JS
1175 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1176 "0459 Adapter heartbeat still out"
1177 "standing:last compl time was %d ms.\n",
1178 jiffies_to_msecs(jiffies
1179 - phba->last_completion_time));
1180 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1181 jiffies +
1182 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
858c9f6c 1183 }
858c9f6c
JS
1184 }
1185}
1186
e59058c4 1187/**
3621a710 1188 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
e59058c4
JS
1189 * @phba: pointer to lpfc hba data structure.
1190 *
1191 * This routine is called to bring the HBA offline when HBA hardware error
1192 * other than Port Error 6 has been detected.
1193 **/
09372820
JS
1194static void
1195lpfc_offline_eratt(struct lpfc_hba *phba)
1196{
1197 struct lpfc_sli *psli = &phba->sli;
1198
1199 spin_lock_irq(&phba->hbalock);
f4b4c68f 1200 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
09372820 1201 spin_unlock_irq(&phba->hbalock);
618a5230 1202 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
09372820
JS
1203
1204 lpfc_offline(phba);
1205 lpfc_reset_barrier(phba);
f4b4c68f 1206 spin_lock_irq(&phba->hbalock);
09372820 1207 lpfc_sli_brdreset(phba);
f4b4c68f 1208 spin_unlock_irq(&phba->hbalock);
09372820
JS
1209 lpfc_hba_down_post(phba);
1210 lpfc_sli_brdready(phba, HS_MBRDY);
1211 lpfc_unblock_mgmt_io(phba);
1212 phba->link_state = LPFC_HBA_ERROR;
1213 return;
1214}
1215
da0436e9
JS
1216/**
1217 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1218 * @phba: pointer to lpfc hba data structure.
1219 *
1220 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1221 * other than Port Error 6 has been detected.
1222 **/
a88dbb6a 1223void
da0436e9
JS
1224lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1225{
618a5230 1226 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
da0436e9
JS
1227 lpfc_offline(phba);
1228 lpfc_sli4_brdreset(phba);
1229 lpfc_hba_down_post(phba);
1230 lpfc_sli4_post_status_check(phba);
1231 lpfc_unblock_mgmt_io(phba);
1232 phba->link_state = LPFC_HBA_ERROR;
1233}
1234
a257bf90
JS
1235/**
1236 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1237 * @phba: pointer to lpfc hba data structure.
1238 *
1239 * This routine is invoked to handle the deferred HBA hardware error
1240 * conditions. This type of error is indicated by HBA by setting ER1
1241 * and another ER bit in the host status register. The driver will
1242 * wait until the ER1 bit clears before handling the error condition.
1243 **/
1244static void
1245lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1246{
1247 uint32_t old_host_status = phba->work_hs;
1248 struct lpfc_sli_ring *pring;
1249 struct lpfc_sli *psli = &phba->sli;
1250
f4b4c68f
JS
1251 /* If the pci channel is offline, ignore possible errors,
1252 * since we cannot communicate with the pci card anyway.
1253 */
1254 if (pci_channel_offline(phba->pcidev)) {
1255 spin_lock_irq(&phba->hbalock);
1256 phba->hba_flag &= ~DEFER_ERATT;
1257 spin_unlock_irq(&phba->hbalock);
1258 return;
1259 }
1260
a257bf90
JS
1261 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1262 "0479 Deferred Adapter Hardware Error "
1263 "Data: x%x x%x x%x\n",
1264 phba->work_hs,
1265 phba->work_status[0], phba->work_status[1]);
1266
1267 spin_lock_irq(&phba->hbalock);
f4b4c68f 1268 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
a257bf90
JS
1269 spin_unlock_irq(&phba->hbalock);
1270
1271
1272 /*
1273 * Firmware stops when it triggred erratt. That could cause the I/Os
1274 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1275 * SCSI layer retry it after re-establishing link.
1276 */
1277 pring = &psli->ring[psli->fcp_ring];
1278 lpfc_sli_abort_iocb_ring(phba, pring);
1279
1280 /*
1281 * There was a firmware error. Take the hba offline and then
1282 * attempt to restart it.
1283 */
618a5230 1284 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
a257bf90
JS
1285 lpfc_offline(phba);
1286
1287 /* Wait for the ER1 bit to clear.*/
1288 while (phba->work_hs & HS_FFER1) {
1289 msleep(100);
9940b97b
JS
1290 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1291 phba->work_hs = UNPLUG_ERR ;
1292 break;
1293 }
a257bf90
JS
1294 /* If driver is unloading let the worker thread continue */
1295 if (phba->pport->load_flag & FC_UNLOADING) {
1296 phba->work_hs = 0;
1297 break;
1298 }
1299 }
1300
1301 /*
1302 * This is to ptrotect against a race condition in which
1303 * first write to the host attention register clear the
1304 * host status register.
1305 */
1306 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1307 phba->work_hs = old_host_status & ~HS_FFER1;
1308
3772a991 1309 spin_lock_irq(&phba->hbalock);
a257bf90 1310 phba->hba_flag &= ~DEFER_ERATT;
3772a991 1311 spin_unlock_irq(&phba->hbalock);
a257bf90
JS
1312 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1313 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1314}
1315
3772a991
JS
1316static void
1317lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1318{
1319 struct lpfc_board_event_header board_event;
1320 struct Scsi_Host *shost;
1321
1322 board_event.event_type = FC_REG_BOARD_EVENT;
1323 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1324 shost = lpfc_shost_from_vport(phba->pport);
1325 fc_host_post_vendor_event(shost, fc_get_event_number(),
1326 sizeof(board_event),
1327 (char *) &board_event,
1328 LPFC_NL_VENDOR_ID);
1329}
1330
e59058c4 1331/**
3772a991 1332 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
e59058c4
JS
1333 * @phba: pointer to lpfc hba data structure.
1334 *
1335 * This routine is invoked to handle the following HBA hardware error
1336 * conditions:
1337 * 1 - HBA error attention interrupt
1338 * 2 - DMA ring index out of range
1339 * 3 - Mailbox command came back as unknown
1340 **/
3772a991
JS
1341static void
1342lpfc_handle_eratt_s3(struct lpfc_hba *phba)
dea3101e 1343{
2e0fef85 1344 struct lpfc_vport *vport = phba->pport;
2e0fef85 1345 struct lpfc_sli *psli = &phba->sli;
dea3101e 1346 struct lpfc_sli_ring *pring;
d2873e4c 1347 uint32_t event_data;
57127f15
JS
1348 unsigned long temperature;
1349 struct temp_event temp_event_data;
92d7f7b0 1350 struct Scsi_Host *shost;
2e0fef85 1351
8d63f375 1352 /* If the pci channel is offline, ignore possible errors,
3772a991
JS
1353 * since we cannot communicate with the pci card anyway.
1354 */
1355 if (pci_channel_offline(phba->pcidev)) {
1356 spin_lock_irq(&phba->hbalock);
1357 phba->hba_flag &= ~DEFER_ERATT;
1358 spin_unlock_irq(&phba->hbalock);
8d63f375 1359 return;
3772a991
JS
1360 }
1361
13815c83
JS
1362 /* If resets are disabled then leave the HBA alone and return */
1363 if (!phba->cfg_enable_hba_reset)
1364 return;
dea3101e 1365
ea2151b4 1366 /* Send an internal error event to mgmt application */
3772a991 1367 lpfc_board_errevt_to_mgmt(phba);
ea2151b4 1368
a257bf90
JS
1369 if (phba->hba_flag & DEFER_ERATT)
1370 lpfc_handle_deferred_eratt(phba);
1371
dcf2a4e0
JS
1372 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1373 if (phba->work_hs & HS_FFER6)
1374 /* Re-establishing Link */
1375 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1376 "1301 Re-establishing Link "
1377 "Data: x%x x%x x%x\n",
1378 phba->work_hs, phba->work_status[0],
1379 phba->work_status[1]);
1380 if (phba->work_hs & HS_FFER8)
1381 /* Device Zeroization */
1382 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1383 "2861 Host Authentication device "
1384 "zeroization Data:x%x x%x x%x\n",
1385 phba->work_hs, phba->work_status[0],
1386 phba->work_status[1]);
58da1ffb 1387
92d7f7b0 1388 spin_lock_irq(&phba->hbalock);
f4b4c68f 1389 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
92d7f7b0 1390 spin_unlock_irq(&phba->hbalock);
dea3101e 1391
1392 /*
1393 * Firmware stops when it triggled erratt with HS_FFER6.
1394 * That could cause the I/Os dropped by the firmware.
1395 * Error iocb (I/O) on txcmplq and let the SCSI layer
1396 * retry it after re-establishing link.
1397 */
1398 pring = &psli->ring[psli->fcp_ring];
1399 lpfc_sli_abort_iocb_ring(phba, pring);
1400
dea3101e 1401 /*
1402 * There was a firmware error. Take the hba offline and then
1403 * attempt to restart it.
1404 */
618a5230 1405 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
dea3101e 1406 lpfc_offline(phba);
41415862 1407 lpfc_sli_brdrestart(phba);
dea3101e 1408 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
46fa311e 1409 lpfc_unblock_mgmt_io(phba);
dea3101e 1410 return;
1411 }
46fa311e 1412 lpfc_unblock_mgmt_io(phba);
57127f15
JS
1413 } else if (phba->work_hs & HS_CRIT_TEMP) {
1414 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1415 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1416 temp_event_data.event_code = LPFC_CRIT_TEMP;
1417 temp_event_data.data = (uint32_t)temperature;
1418
1419 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d7c255b2 1420 "0406 Adapter maximum temperature exceeded "
57127f15
JS
1421 "(%ld), taking this port offline "
1422 "Data: x%x x%x x%x\n",
1423 temperature, phba->work_hs,
1424 phba->work_status[0], phba->work_status[1]);
1425
1426 shost = lpfc_shost_from_vport(phba->pport);
1427 fc_host_post_vendor_event(shost, fc_get_event_number(),
1428 sizeof(temp_event_data),
1429 (char *) &temp_event_data,
1430 SCSI_NL_VID_TYPE_PCI
1431 | PCI_VENDOR_ID_EMULEX);
1432
7af67051 1433 spin_lock_irq(&phba->hbalock);
7af67051
JS
1434 phba->over_temp_state = HBA_OVER_TEMP;
1435 spin_unlock_irq(&phba->hbalock);
09372820 1436 lpfc_offline_eratt(phba);
57127f15 1437
dea3101e 1438 } else {
1439 /* The if clause above forces this code path when the status
9399627f
JS
1440 * failure is a value other than FFER6. Do not call the offline
1441 * twice. This is the adapter hardware error path.
dea3101e 1442 */
1443 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 1444 "0457 Adapter Hardware Error "
dea3101e 1445 "Data: x%x x%x x%x\n",
e8b62011 1446 phba->work_hs,
dea3101e 1447 phba->work_status[0], phba->work_status[1]);
1448
d2873e4c 1449 event_data = FC_REG_DUMP_EVENT;
92d7f7b0 1450 shost = lpfc_shost_from_vport(vport);
2e0fef85 1451 fc_host_post_vendor_event(shost, fc_get_event_number(),
d2873e4c
JS
1452 sizeof(event_data), (char *) &event_data,
1453 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1454
09372820 1455 lpfc_offline_eratt(phba);
dea3101e 1456 }
9399627f 1457 return;
dea3101e 1458}
1459
618a5230
JS
1460/**
1461 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1462 * @phba: pointer to lpfc hba data structure.
1463 * @mbx_action: flag for mailbox shutdown action.
1464 *
1465 * This routine is invoked to perform an SLI4 port PCI function reset in
1466 * response to port status register polling attention. It waits for port
1467 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1468 * During this process, interrupt vectors are freed and later requested
1469 * for handling possible port resource change.
1470 **/
1471static int
e10b2022
JS
1472lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1473 bool en_rn_msg)
618a5230
JS
1474{
1475 int rc;
1476 uint32_t intr_mode;
1477
1478 /*
1479 * On error status condition, driver need to wait for port
1480 * ready before performing reset.
1481 */
1482 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1483 if (!rc) {
1484 /* need reset: attempt for port recovery */
e10b2022
JS
1485 if (en_rn_msg)
1486 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1487 "2887 Reset Needed: Attempting Port "
1488 "Recovery...\n");
618a5230
JS
1489 lpfc_offline_prep(phba, mbx_action);
1490 lpfc_offline(phba);
1491 /* release interrupt for possible resource change */
1492 lpfc_sli4_disable_intr(phba);
1493 lpfc_sli_brdrestart(phba);
1494 /* request and enable interrupt */
1495 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1496 if (intr_mode == LPFC_INTR_ERROR) {
1497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1498 "3175 Failed to enable interrupt\n");
1499 return -EIO;
1500 } else {
1501 phba->intr_mode = intr_mode;
1502 }
1503 rc = lpfc_online(phba);
1504 if (rc == 0)
1505 lpfc_unblock_mgmt_io(phba);
1506 }
1507 return rc;
1508}
1509
da0436e9
JS
1510/**
1511 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1512 * @phba: pointer to lpfc hba data structure.
1513 *
1514 * This routine is invoked to handle the SLI4 HBA hardware error attention
1515 * conditions.
1516 **/
1517static void
1518lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1519{
1520 struct lpfc_vport *vport = phba->pport;
1521 uint32_t event_data;
1522 struct Scsi_Host *shost;
2fcee4bf 1523 uint32_t if_type;
2e90f4b5
JS
1524 struct lpfc_register portstat_reg = {0};
1525 uint32_t reg_err1, reg_err2;
1526 uint32_t uerrlo_reg, uemasklo_reg;
1527 uint32_t pci_rd_rc1, pci_rd_rc2;
e10b2022 1528 bool en_rn_msg = true;
73d91e50 1529 int rc;
da0436e9
JS
1530
1531 /* If the pci channel is offline, ignore possible errors, since
1532 * we cannot communicate with the pci card anyway.
1533 */
1534 if (pci_channel_offline(phba->pcidev))
1535 return;
1536 /* If resets are disabled then leave the HBA alone and return */
1537 if (!phba->cfg_enable_hba_reset)
1538 return;
1539
2fcee4bf
JS
1540 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1541 switch (if_type) {
1542 case LPFC_SLI_INTF_IF_TYPE_0:
2e90f4b5
JS
1543 pci_rd_rc1 = lpfc_readl(
1544 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1545 &uerrlo_reg);
1546 pci_rd_rc2 = lpfc_readl(
1547 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1548 &uemasklo_reg);
1549 /* consider PCI bus read error as pci_channel_offline */
1550 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1551 return;
2fcee4bf
JS
1552 lpfc_sli4_offline_eratt(phba);
1553 break;
1554 case LPFC_SLI_INTF_IF_TYPE_2:
2e90f4b5
JS
1555 pci_rd_rc1 = lpfc_readl(
1556 phba->sli4_hba.u.if_type2.STATUSregaddr,
1557 &portstat_reg.word0);
1558 /* consider PCI bus read error as pci_channel_offline */
6b5151fd
JS
1559 if (pci_rd_rc1 == -EIO) {
1560 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1561 "3151 PCI bus read access failure: x%x\n",
1562 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2e90f4b5 1563 return;
6b5151fd 1564 }
2e90f4b5
JS
1565 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1566 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2fcee4bf
JS
1567 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1568 /* TODO: Register for Overtemp async events. */
1569 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1570 "2889 Port Overtemperature event, "
026abb87 1571 "taking port offline\n");
2fcee4bf
JS
1572 spin_lock_irq(&phba->hbalock);
1573 phba->over_temp_state = HBA_OVER_TEMP;
1574 spin_unlock_irq(&phba->hbalock);
1575 lpfc_sli4_offline_eratt(phba);
2e90f4b5 1576 break;
2fcee4bf 1577 }
2e90f4b5 1578 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
e10b2022 1579 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2e90f4b5 1580 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e10b2022
JS
1581 "3143 Port Down: Firmware Update "
1582 "Detected\n");
1583 en_rn_msg = false;
1584 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2e90f4b5
JS
1585 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1586 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1587 "3144 Port Down: Debug Dump\n");
1588 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1589 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1590 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1591 "3145 Port Down: Provisioning\n");
618a5230
JS
1592
1593 /* Check port status register for function reset */
e10b2022
JS
1594 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
1595 en_rn_msg);
618a5230
JS
1596 if (rc == 0) {
1597 /* don't report event on forced debug dump */
1598 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1599 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1600 return;
1601 else
1602 break;
2fcee4bf 1603 }
618a5230 1604 /* fall through for not able to recover */
6b5151fd
JS
1605 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1606 "3152 Unrecoverable error, bring the port "
1607 "offline\n");
2fcee4bf
JS
1608 lpfc_sli4_offline_eratt(phba);
1609 break;
1610 case LPFC_SLI_INTF_IF_TYPE_1:
1611 default:
1612 break;
1613 }
2e90f4b5
JS
1614 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1615 "3123 Report dump event to upper layer\n");
1616 /* Send an internal error event to mgmt application */
1617 lpfc_board_errevt_to_mgmt(phba);
1618
1619 event_data = FC_REG_DUMP_EVENT;
1620 shost = lpfc_shost_from_vport(vport);
1621 fc_host_post_vendor_event(shost, fc_get_event_number(),
1622 sizeof(event_data), (char *) &event_data,
1623 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
da0436e9
JS
1624}
1625
1626/**
1627 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1628 * @phba: pointer to lpfc HBA data structure.
1629 *
1630 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1631 * routine from the API jump table function pointer from the lpfc_hba struct.
1632 *
1633 * Return codes
af901ca1 1634 * 0 - success.
da0436e9
JS
1635 * Any other value - error.
1636 **/
1637void
1638lpfc_handle_eratt(struct lpfc_hba *phba)
1639{
1640 (*phba->lpfc_handle_eratt)(phba);
1641}
1642
e59058c4 1643/**
3621a710 1644 * lpfc_handle_latt - The HBA link event handler
e59058c4
JS
1645 * @phba: pointer to lpfc hba data structure.
1646 *
1647 * This routine is invoked from the worker thread to handle a HBA host
1648 * attention link event.
1649 **/
dea3101e 1650void
2e0fef85 1651lpfc_handle_latt(struct lpfc_hba *phba)
dea3101e 1652{
2e0fef85
JS
1653 struct lpfc_vport *vport = phba->pport;
1654 struct lpfc_sli *psli = &phba->sli;
dea3101e 1655 LPFC_MBOXQ_t *pmb;
1656 volatile uint32_t control;
1657 struct lpfc_dmabuf *mp;
09372820 1658 int rc = 0;
dea3101e 1659
1660 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
09372820
JS
1661 if (!pmb) {
1662 rc = 1;
dea3101e 1663 goto lpfc_handle_latt_err_exit;
09372820 1664 }
dea3101e 1665
1666 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
09372820
JS
1667 if (!mp) {
1668 rc = 2;
dea3101e 1669 goto lpfc_handle_latt_free_pmb;
09372820 1670 }
dea3101e 1671
1672 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
09372820
JS
1673 if (!mp->virt) {
1674 rc = 3;
dea3101e 1675 goto lpfc_handle_latt_free_mp;
09372820 1676 }
dea3101e 1677
6281bfe0 1678 /* Cleanup any outstanding ELS commands */
549e55cd 1679 lpfc_els_flush_all_cmd(phba);
dea3101e 1680
1681 psli->slistat.link_event++;
76a95d75
JS
1682 lpfc_read_topology(phba, pmb, mp);
1683 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2e0fef85 1684 pmb->vport = vport;
0d2b6b83
JS
1685 /* Block ELS IOCBs until we have processed this mbox command */
1686 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
0b727fea 1687 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
09372820
JS
1688 if (rc == MBX_NOT_FINISHED) {
1689 rc = 4;
14691150 1690 goto lpfc_handle_latt_free_mbuf;
09372820 1691 }
dea3101e 1692
1693 /* Clear Link Attention in HA REG */
2e0fef85 1694 spin_lock_irq(&phba->hbalock);
dea3101e 1695 writel(HA_LATT, phba->HAregaddr);
1696 readl(phba->HAregaddr); /* flush */
2e0fef85 1697 spin_unlock_irq(&phba->hbalock);
dea3101e 1698
1699 return;
1700
14691150 1701lpfc_handle_latt_free_mbuf:
0d2b6b83 1702 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
14691150 1703 lpfc_mbuf_free(phba, mp->virt, mp->phys);
dea3101e 1704lpfc_handle_latt_free_mp:
1705 kfree(mp);
1706lpfc_handle_latt_free_pmb:
1dcb58e5 1707 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 1708lpfc_handle_latt_err_exit:
1709 /* Enable Link attention interrupts */
2e0fef85 1710 spin_lock_irq(&phba->hbalock);
dea3101e 1711 psli->sli_flag |= LPFC_PROCESS_LA;
1712 control = readl(phba->HCregaddr);
1713 control |= HC_LAINT_ENA;
1714 writel(control, phba->HCregaddr);
1715 readl(phba->HCregaddr); /* flush */
1716
1717 /* Clear Link Attention in HA REG */
1718 writel(HA_LATT, phba->HAregaddr);
1719 readl(phba->HAregaddr); /* flush */
2e0fef85 1720 spin_unlock_irq(&phba->hbalock);
dea3101e 1721 lpfc_linkdown(phba);
2e0fef85 1722 phba->link_state = LPFC_HBA_ERROR;
dea3101e 1723
09372820
JS
1724 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1725 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
dea3101e 1726
1727 return;
1728}
1729
e59058c4 1730/**
3621a710 1731 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
e59058c4
JS
1732 * @phba: pointer to lpfc hba data structure.
1733 * @vpd: pointer to the vital product data.
1734 * @len: length of the vital product data in bytes.
1735 *
1736 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1737 * an array of characters. In this routine, the ModelName, ProgramType, and
1738 * ModelDesc, etc. fields of the phba data structure will be populated.
1739 *
1740 * Return codes
1741 * 0 - pointer to the VPD passed in is NULL
1742 * 1 - success
1743 **/
3772a991 1744int
2e0fef85 1745lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
dea3101e 1746{
1747 uint8_t lenlo, lenhi;
07da60c1 1748 int Length;
dea3101e 1749 int i, j;
1750 int finished = 0;
1751 int index = 0;
1752
1753 if (!vpd)
1754 return 0;
1755
1756 /* Vital Product */
ed957684 1757 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011 1758 "0455 Vital Product Data: x%x x%x x%x x%x\n",
dea3101e 1759 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1760 (uint32_t) vpd[3]);
74b72a59 1761 while (!finished && (index < (len - 4))) {
dea3101e 1762 switch (vpd[index]) {
1763 case 0x82:
74b72a59 1764 case 0x91:
dea3101e 1765 index += 1;
1766 lenlo = vpd[index];
1767 index += 1;
1768 lenhi = vpd[index];
1769 index += 1;
1770 i = ((((unsigned short)lenhi) << 8) + lenlo);
1771 index += i;
1772 break;
1773 case 0x90:
1774 index += 1;
1775 lenlo = vpd[index];
1776 index += 1;
1777 lenhi = vpd[index];
1778 index += 1;
1779 Length = ((((unsigned short)lenhi) << 8) + lenlo);
74b72a59
JW
1780 if (Length > len - index)
1781 Length = len - index;
dea3101e 1782 while (Length > 0) {
1783 /* Look for Serial Number */
1784 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1785 index += 2;
1786 i = vpd[index];
1787 index += 1;
1788 j = 0;
1789 Length -= (3+i);
1790 while(i--) {
1791 phba->SerialNumber[j++] = vpd[index++];
1792 if (j == 31)
1793 break;
1794 }
1795 phba->SerialNumber[j] = 0;
1796 continue;
1797 }
1798 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1799 phba->vpd_flag |= VPD_MODEL_DESC;
1800 index += 2;
1801 i = vpd[index];
1802 index += 1;
1803 j = 0;
1804 Length -= (3+i);
1805 while(i--) {
1806 phba->ModelDesc[j++] = vpd[index++];
1807 if (j == 255)
1808 break;
1809 }
1810 phba->ModelDesc[j] = 0;
1811 continue;
1812 }
1813 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1814 phba->vpd_flag |= VPD_MODEL_NAME;
1815 index += 2;
1816 i = vpd[index];
1817 index += 1;
1818 j = 0;
1819 Length -= (3+i);
1820 while(i--) {
1821 phba->ModelName[j++] = vpd[index++];
1822 if (j == 79)
1823 break;
1824 }
1825 phba->ModelName[j] = 0;
1826 continue;
1827 }
1828 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1829 phba->vpd_flag |= VPD_PROGRAM_TYPE;
1830 index += 2;
1831 i = vpd[index];
1832 index += 1;
1833 j = 0;
1834 Length -= (3+i);
1835 while(i--) {
1836 phba->ProgramType[j++] = vpd[index++];
1837 if (j == 255)
1838 break;
1839 }
1840 phba->ProgramType[j] = 0;
1841 continue;
1842 }
1843 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1844 phba->vpd_flag |= VPD_PORT;
1845 index += 2;
1846 i = vpd[index];
1847 index += 1;
1848 j = 0;
1849 Length -= (3+i);
1850 while(i--) {
cd1c8301
JS
1851 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1852 (phba->sli4_hba.pport_name_sta ==
1853 LPFC_SLI4_PPNAME_GET)) {
1854 j++;
1855 index++;
1856 } else
1857 phba->Port[j++] = vpd[index++];
1858 if (j == 19)
1859 break;
dea3101e 1860 }
cd1c8301
JS
1861 if ((phba->sli_rev != LPFC_SLI_REV4) ||
1862 (phba->sli4_hba.pport_name_sta ==
1863 LPFC_SLI4_PPNAME_NON))
1864 phba->Port[j] = 0;
dea3101e 1865 continue;
1866 }
1867 else {
1868 index += 2;
1869 i = vpd[index];
1870 index += 1;
1871 index += i;
1872 Length -= (3 + i);
1873 }
1874 }
1875 finished = 0;
1876 break;
1877 case 0x78:
1878 finished = 1;
1879 break;
1880 default:
1881 index ++;
1882 break;
1883 }
74b72a59 1884 }
dea3101e 1885
1886 return(1);
1887}
1888
e59058c4 1889/**
3621a710 1890 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
e59058c4
JS
1891 * @phba: pointer to lpfc hba data structure.
1892 * @mdp: pointer to the data structure to hold the derived model name.
1893 * @descp: pointer to the data structure to hold the derived description.
1894 *
1895 * This routine retrieves HBA's description based on its registered PCI device
1896 * ID. The @descp passed into this function points to an array of 256 chars. It
1897 * shall be returned with the model name, maximum speed, and the host bus type.
1898 * The @mdp passed into this function points to an array of 80 chars. When the
1899 * function returns, the @mdp will be filled with the model name.
1900 **/
dea3101e 1901static void
2e0fef85 1902lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
dea3101e 1903{
1904 lpfc_vpd_t *vp;
fefcb2b6 1905 uint16_t dev_id = phba->pcidev->device;
74b72a59 1906 int max_speed;
84774a4d 1907 int GE = 0;
da0436e9 1908 int oneConnect = 0; /* default is not a oneConnect */
74b72a59 1909 struct {
a747c9ce
JS
1910 char *name;
1911 char *bus;
1912 char *function;
1913 } m = {"<Unknown>", "", ""};
74b72a59
JW
1914
1915 if (mdp && mdp[0] != '\0'
1916 && descp && descp[0] != '\0')
1917 return;
1918
c0c11512
JS
1919 if (phba->lmt & LMT_16Gb)
1920 max_speed = 16;
1921 else if (phba->lmt & LMT_10Gb)
74b72a59
JW
1922 max_speed = 10;
1923 else if (phba->lmt & LMT_8Gb)
1924 max_speed = 8;
1925 else if (phba->lmt & LMT_4Gb)
1926 max_speed = 4;
1927 else if (phba->lmt & LMT_2Gb)
1928 max_speed = 2;
4169d868 1929 else if (phba->lmt & LMT_1Gb)
74b72a59 1930 max_speed = 1;
4169d868
JS
1931 else
1932 max_speed = 0;
dea3101e 1933
1934 vp = &phba->vpd;
dea3101e 1935
e4adb204 1936 switch (dev_id) {
06325e74 1937 case PCI_DEVICE_ID_FIREFLY:
a747c9ce 1938 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
06325e74 1939 break;
dea3101e 1940 case PCI_DEVICE_ID_SUPERFLY:
1941 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
a747c9ce
JS
1942 m = (typeof(m)){"LP7000", "PCI",
1943 "Fibre Channel Adapter"};
dea3101e 1944 else
a747c9ce
JS
1945 m = (typeof(m)){"LP7000E", "PCI",
1946 "Fibre Channel Adapter"};
dea3101e 1947 break;
1948 case PCI_DEVICE_ID_DRAGONFLY:
a747c9ce
JS
1949 m = (typeof(m)){"LP8000", "PCI",
1950 "Fibre Channel Adapter"};
dea3101e 1951 break;
1952 case PCI_DEVICE_ID_CENTAUR:
1953 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
a747c9ce
JS
1954 m = (typeof(m)){"LP9002", "PCI",
1955 "Fibre Channel Adapter"};
dea3101e 1956 else
a747c9ce
JS
1957 m = (typeof(m)){"LP9000", "PCI",
1958 "Fibre Channel Adapter"};
dea3101e 1959 break;
1960 case PCI_DEVICE_ID_RFLY:
a747c9ce
JS
1961 m = (typeof(m)){"LP952", "PCI",
1962 "Fibre Channel Adapter"};
dea3101e 1963 break;
1964 case PCI_DEVICE_ID_PEGASUS:
a747c9ce
JS
1965 m = (typeof(m)){"LP9802", "PCI-X",
1966 "Fibre Channel Adapter"};
dea3101e 1967 break;
1968 case PCI_DEVICE_ID_THOR:
a747c9ce
JS
1969 m = (typeof(m)){"LP10000", "PCI-X",
1970 "Fibre Channel Adapter"};
dea3101e 1971 break;
1972 case PCI_DEVICE_ID_VIPER:
a747c9ce
JS
1973 m = (typeof(m)){"LPX1000", "PCI-X",
1974 "Fibre Channel Adapter"};
dea3101e 1975 break;
1976 case PCI_DEVICE_ID_PFLY:
a747c9ce
JS
1977 m = (typeof(m)){"LP982", "PCI-X",
1978 "Fibre Channel Adapter"};
dea3101e 1979 break;
1980 case PCI_DEVICE_ID_TFLY:
a747c9ce
JS
1981 m = (typeof(m)){"LP1050", "PCI-X",
1982 "Fibre Channel Adapter"};
dea3101e 1983 break;
1984 case PCI_DEVICE_ID_HELIOS:
a747c9ce
JS
1985 m = (typeof(m)){"LP11000", "PCI-X2",
1986 "Fibre Channel Adapter"};
dea3101e 1987 break;
e4adb204 1988 case PCI_DEVICE_ID_HELIOS_SCSP:
a747c9ce
JS
1989 m = (typeof(m)){"LP11000-SP", "PCI-X2",
1990 "Fibre Channel Adapter"};
e4adb204
JSEC
1991 break;
1992 case PCI_DEVICE_ID_HELIOS_DCSP:
a747c9ce
JS
1993 m = (typeof(m)){"LP11002-SP", "PCI-X2",
1994 "Fibre Channel Adapter"};
e4adb204
JSEC
1995 break;
1996 case PCI_DEVICE_ID_NEPTUNE:
a747c9ce 1997 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
e4adb204
JSEC
1998 break;
1999 case PCI_DEVICE_ID_NEPTUNE_SCSP:
a747c9ce 2000 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
e4adb204
JSEC
2001 break;
2002 case PCI_DEVICE_ID_NEPTUNE_DCSP:
a747c9ce 2003 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
e4adb204 2004 break;
dea3101e 2005 case PCI_DEVICE_ID_BMID:
a747c9ce 2006 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
dea3101e 2007 break;
2008 case PCI_DEVICE_ID_BSMB:
a747c9ce 2009 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
dea3101e 2010 break;
2011 case PCI_DEVICE_ID_ZEPHYR:
a747c9ce 2012 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
dea3101e 2013 break;
e4adb204 2014 case PCI_DEVICE_ID_ZEPHYR_SCSP:
a747c9ce 2015 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
e4adb204
JSEC
2016 break;
2017 case PCI_DEVICE_ID_ZEPHYR_DCSP:
a747c9ce 2018 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
a257bf90 2019 GE = 1;
e4adb204 2020 break;
dea3101e 2021 case PCI_DEVICE_ID_ZMID:
a747c9ce 2022 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
dea3101e 2023 break;
2024 case PCI_DEVICE_ID_ZSMB:
a747c9ce 2025 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
dea3101e 2026 break;
2027 case PCI_DEVICE_ID_LP101:
a747c9ce 2028 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
dea3101e 2029 break;
2030 case PCI_DEVICE_ID_LP10000S:
a747c9ce 2031 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
06325e74 2032 break;
e4adb204 2033 case PCI_DEVICE_ID_LP11000S:
a747c9ce 2034 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
18a3b596 2035 break;
e4adb204 2036 case PCI_DEVICE_ID_LPE11000S:
a747c9ce 2037 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
5cc36b3c 2038 break;
b87eab38 2039 case PCI_DEVICE_ID_SAT:
a747c9ce 2040 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2041 break;
2042 case PCI_DEVICE_ID_SAT_MID:
a747c9ce 2043 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2044 break;
2045 case PCI_DEVICE_ID_SAT_SMB:
a747c9ce 2046 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2047 break;
2048 case PCI_DEVICE_ID_SAT_DCSP:
a747c9ce 2049 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2050 break;
2051 case PCI_DEVICE_ID_SAT_SCSP:
a747c9ce 2052 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2053 break;
2054 case PCI_DEVICE_ID_SAT_S:
a747c9ce 2055 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
b87eab38 2056 break;
84774a4d 2057 case PCI_DEVICE_ID_HORNET:
a747c9ce 2058 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
84774a4d
JS
2059 GE = 1;
2060 break;
2061 case PCI_DEVICE_ID_PROTEUS_VF:
a747c9ce
JS
2062 m = (typeof(m)){"LPev12000", "PCIe IOV",
2063 "Fibre Channel Adapter"};
84774a4d
JS
2064 break;
2065 case PCI_DEVICE_ID_PROTEUS_PF:
a747c9ce
JS
2066 m = (typeof(m)){"LPev12000", "PCIe IOV",
2067 "Fibre Channel Adapter"};
84774a4d
JS
2068 break;
2069 case PCI_DEVICE_ID_PROTEUS_S:
a747c9ce
JS
2070 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2071 "Fibre Channel Adapter"};
84774a4d 2072 break;
da0436e9
JS
2073 case PCI_DEVICE_ID_TIGERSHARK:
2074 oneConnect = 1;
a747c9ce 2075 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
da0436e9 2076 break;
a747c9ce 2077 case PCI_DEVICE_ID_TOMCAT:
6669f9bb 2078 oneConnect = 1;
a747c9ce
JS
2079 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2080 break;
2081 case PCI_DEVICE_ID_FALCON:
2082 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2083 "EmulexSecure Fibre"};
6669f9bb 2084 break;
98fc5dd9
JS
2085 case PCI_DEVICE_ID_BALIUS:
2086 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2087 "Fibre Channel Adapter"};
2088 break;
085c647c 2089 case PCI_DEVICE_ID_LANCER_FC:
c0c11512
JS
2090 case PCI_DEVICE_ID_LANCER_FC_VF:
2091 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
085c647c
JS
2092 break;
2093 case PCI_DEVICE_ID_LANCER_FCOE:
c0c11512 2094 case PCI_DEVICE_ID_LANCER_FCOE_VF:
085c647c 2095 oneConnect = 1;
079b5c91 2096 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
085c647c 2097 break;
f8cafd38
JS
2098 case PCI_DEVICE_ID_SKYHAWK:
2099 case PCI_DEVICE_ID_SKYHAWK_VF:
2100 oneConnect = 1;
2101 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2102 break;
5cc36b3c 2103 default:
a747c9ce 2104 m = (typeof(m)){"Unknown", "", ""};
e4adb204 2105 break;
dea3101e 2106 }
74b72a59
JW
2107
2108 if (mdp && mdp[0] == '\0')
2109 snprintf(mdp, 79,"%s", m.name);
c0c11512
JS
2110 /*
2111 * oneConnect hba requires special processing, they are all initiators
da0436e9
JS
2112 * and we put the port number on the end
2113 */
2114 if (descp && descp[0] == '\0') {
2115 if (oneConnect)
2116 snprintf(descp, 255,
4169d868 2117 "Emulex OneConnect %s, %s Initiator %s",
a747c9ce 2118 m.name, m.function,
da0436e9 2119 phba->Port);
4169d868
JS
2120 else if (max_speed == 0)
2121 snprintf(descp, 255,
2122 "Emulex %s %s %s ",
2123 m.name, m.bus, m.function);
da0436e9
JS
2124 else
2125 snprintf(descp, 255,
2126 "Emulex %s %d%s %s %s",
a747c9ce
JS
2127 m.name, max_speed, (GE) ? "GE" : "Gb",
2128 m.bus, m.function);
da0436e9 2129 }
dea3101e 2130}
2131
e59058c4 2132/**
3621a710 2133 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
e59058c4
JS
2134 * @phba: pointer to lpfc hba data structure.
2135 * @pring: pointer to a IOCB ring.
2136 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2137 *
2138 * This routine posts a given number of IOCBs with the associated DMA buffer
2139 * descriptors specified by the cnt argument to the given IOCB ring.
2140 *
2141 * Return codes
2142 * The number of IOCBs NOT able to be posted to the IOCB ring.
2143 **/
dea3101e 2144int
495a714c 2145lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
dea3101e 2146{
2147 IOCB_t *icmd;
0bd4ca25 2148 struct lpfc_iocbq *iocb;
dea3101e 2149 struct lpfc_dmabuf *mp1, *mp2;
2150
2151 cnt += pring->missbufcnt;
2152
2153 /* While there are buffers to post */
2154 while (cnt > 0) {
2155 /* Allocate buffer for command iocb */
0bd4ca25 2156 iocb = lpfc_sli_get_iocbq(phba);
dea3101e 2157 if (iocb == NULL) {
2158 pring->missbufcnt = cnt;
2159 return cnt;
2160 }
dea3101e 2161 icmd = &iocb->iocb;
2162
2163 /* 2 buffers can be posted per command */
2164 /* Allocate buffer to post */
2165 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2166 if (mp1)
98c9ea5c
JS
2167 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2168 if (!mp1 || !mp1->virt) {
c9475cb0 2169 kfree(mp1);
604a3e30 2170 lpfc_sli_release_iocbq(phba, iocb);
dea3101e 2171 pring->missbufcnt = cnt;
2172 return cnt;
2173 }
2174
2175 INIT_LIST_HEAD(&mp1->list);
2176 /* Allocate buffer to post */
2177 if (cnt > 1) {
2178 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2179 if (mp2)
2180 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2181 &mp2->phys);
98c9ea5c 2182 if (!mp2 || !mp2->virt) {
c9475cb0 2183 kfree(mp2);
dea3101e 2184 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2185 kfree(mp1);
604a3e30 2186 lpfc_sli_release_iocbq(phba, iocb);
dea3101e 2187 pring->missbufcnt = cnt;
2188 return cnt;
2189 }
2190
2191 INIT_LIST_HEAD(&mp2->list);
2192 } else {
2193 mp2 = NULL;
2194 }
2195
2196 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2197 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2198 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2199 icmd->ulpBdeCount = 1;
2200 cnt--;
2201 if (mp2) {
2202 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2203 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2204 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2205 cnt--;
2206 icmd->ulpBdeCount = 2;
2207 }
2208
2209 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2210 icmd->ulpLe = 1;
2211
3772a991
JS
2212 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2213 IOCB_ERROR) {
dea3101e 2214 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2215 kfree(mp1);
2216 cnt++;
2217 if (mp2) {
2218 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2219 kfree(mp2);
2220 cnt++;
2221 }
604a3e30 2222 lpfc_sli_release_iocbq(phba, iocb);
dea3101e 2223 pring->missbufcnt = cnt;
dea3101e 2224 return cnt;
2225 }
dea3101e 2226 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
92d7f7b0 2227 if (mp2)
dea3101e 2228 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
dea3101e 2229 }
2230 pring->missbufcnt = 0;
2231 return 0;
2232}
2233
e59058c4 2234/**
3621a710 2235 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
e59058c4
JS
2236 * @phba: pointer to lpfc hba data structure.
2237 *
2238 * This routine posts initial receive IOCB buffers to the ELS ring. The
2239 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2240 * set to 64 IOCBs.
2241 *
2242 * Return codes
2243 * 0 - success (currently always success)
2244 **/
dea3101e 2245static int
2e0fef85 2246lpfc_post_rcv_buf(struct lpfc_hba *phba)
dea3101e 2247{
2248 struct lpfc_sli *psli = &phba->sli;
2249
2250 /* Ring 0, ELS / CT buffers */
495a714c 2251 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
dea3101e 2252 /* Ring 2 - FCP no buffers needed */
2253
2254 return 0;
2255}
2256
2257#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2258
e59058c4 2259/**
3621a710 2260 * lpfc_sha_init - Set up initial array of hash table entries
e59058c4
JS
2261 * @HashResultPointer: pointer to an array as hash table.
2262 *
2263 * This routine sets up the initial values to the array of hash table entries
2264 * for the LC HBAs.
2265 **/
dea3101e 2266static void
2267lpfc_sha_init(uint32_t * HashResultPointer)
2268{
2269 HashResultPointer[0] = 0x67452301;
2270 HashResultPointer[1] = 0xEFCDAB89;
2271 HashResultPointer[2] = 0x98BADCFE;
2272 HashResultPointer[3] = 0x10325476;
2273 HashResultPointer[4] = 0xC3D2E1F0;
2274}
2275
e59058c4 2276/**
3621a710 2277 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
e59058c4
JS
2278 * @HashResultPointer: pointer to an initial/result hash table.
2279 * @HashWorkingPointer: pointer to an working hash table.
2280 *
2281 * This routine iterates an initial hash table pointed by @HashResultPointer
2282 * with the values from the working hash table pointeed by @HashWorkingPointer.
2283 * The results are putting back to the initial hash table, returned through
2284 * the @HashResultPointer as the result hash table.
2285 **/
dea3101e 2286static void
2287lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2288{
2289 int t;
2290 uint32_t TEMP;
2291 uint32_t A, B, C, D, E;
2292 t = 16;
2293 do {
2294 HashWorkingPointer[t] =
2295 S(1,
2296 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2297 8] ^
2298 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2299 } while (++t <= 79);
2300 t = 0;
2301 A = HashResultPointer[0];
2302 B = HashResultPointer[1];
2303 C = HashResultPointer[2];
2304 D = HashResultPointer[3];
2305 E = HashResultPointer[4];
2306
2307 do {
2308 if (t < 20) {
2309 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2310 } else if (t < 40) {
2311 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2312 } else if (t < 60) {
2313 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2314 } else {
2315 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2316 }
2317 TEMP += S(5, A) + E + HashWorkingPointer[t];
2318 E = D;
2319 D = C;
2320 C = S(30, B);
2321 B = A;
2322 A = TEMP;
2323 } while (++t <= 79);
2324
2325 HashResultPointer[0] += A;
2326 HashResultPointer[1] += B;
2327 HashResultPointer[2] += C;
2328 HashResultPointer[3] += D;
2329 HashResultPointer[4] += E;
2330
2331}
2332
e59058c4 2333/**
3621a710 2334 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
e59058c4
JS
2335 * @RandomChallenge: pointer to the entry of host challenge random number array.
2336 * @HashWorking: pointer to the entry of the working hash array.
2337 *
2338 * This routine calculates the working hash array referred by @HashWorking
2339 * from the challenge random numbers associated with the host, referred by
2340 * @RandomChallenge. The result is put into the entry of the working hash
2341 * array and returned by reference through @HashWorking.
2342 **/
dea3101e 2343static void
2344lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2345{
2346 *HashWorking = (*RandomChallenge ^ *HashWorking);
2347}
2348
e59058c4 2349/**
3621a710 2350 * lpfc_hba_init - Perform special handling for LC HBA initialization
e59058c4
JS
2351 * @phba: pointer to lpfc hba data structure.
2352 * @hbainit: pointer to an array of unsigned 32-bit integers.
2353 *
2354 * This routine performs the special handling for LC HBA initialization.
2355 **/
dea3101e 2356void
2357lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2358{
2359 int t;
2360 uint32_t *HashWorking;
2e0fef85 2361 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
dea3101e 2362
bbfbbbc1 2363 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
dea3101e 2364 if (!HashWorking)
2365 return;
2366
dea3101e 2367 HashWorking[0] = HashWorking[78] = *pwwnn++;
2368 HashWorking[1] = HashWorking[79] = *pwwnn;
2369
2370 for (t = 0; t < 7; t++)
2371 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2372
2373 lpfc_sha_init(hbainit);
2374 lpfc_sha_iterate(hbainit, HashWorking);
2375 kfree(HashWorking);
2376}
2377
e59058c4 2378/**
3621a710 2379 * lpfc_cleanup - Performs vport cleanups before deleting a vport
e59058c4
JS
2380 * @vport: pointer to a virtual N_Port data structure.
2381 *
2382 * This routine performs the necessary cleanups before deleting the @vport.
2383 * It invokes the discovery state machine to perform necessary state
2384 * transitions and to release the ndlps associated with the @vport. Note,
2385 * the physical port is treated as @vport 0.
2386 **/
87af33fe 2387void
2e0fef85 2388lpfc_cleanup(struct lpfc_vport *vport)
dea3101e 2389{
87af33fe 2390 struct lpfc_hba *phba = vport->phba;
dea3101e 2391 struct lpfc_nodelist *ndlp, *next_ndlp;
a8adb832 2392 int i = 0;
dea3101e 2393
87af33fe
JS
2394 if (phba->link_state > LPFC_LINK_DOWN)
2395 lpfc_port_link_failure(vport);
2396
2397 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
2398 if (!NLP_CHK_NODE_ACT(ndlp)) {
2399 ndlp = lpfc_enable_node(vport, ndlp,
2400 NLP_STE_UNUSED_NODE);
2401 if (!ndlp)
2402 continue;
2403 spin_lock_irq(&phba->ndlp_lock);
2404 NLP_SET_FREE_REQ(ndlp);
2405 spin_unlock_irq(&phba->ndlp_lock);
2406 /* Trigger the release of the ndlp memory */
2407 lpfc_nlp_put(ndlp);
2408 continue;
2409 }
2410 spin_lock_irq(&phba->ndlp_lock);
2411 if (NLP_CHK_FREE_REQ(ndlp)) {
2412 /* The ndlp should not be in memory free mode already */
2413 spin_unlock_irq(&phba->ndlp_lock);
2414 continue;
2415 } else
2416 /* Indicate request for freeing ndlp memory */
2417 NLP_SET_FREE_REQ(ndlp);
2418 spin_unlock_irq(&phba->ndlp_lock);
2419
58da1ffb
JS
2420 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2421 ndlp->nlp_DID == Fabric_DID) {
2422 /* Just free up ndlp with Fabric_DID for vports */
2423 lpfc_nlp_put(ndlp);
2424 continue;
2425 }
2426
eff4a01b
JS
2427 /* take care of nodes in unused state before the state
2428 * machine taking action.
2429 */
2430 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2431 lpfc_nlp_put(ndlp);
2432 continue;
2433 }
2434
87af33fe
JS
2435 if (ndlp->nlp_type & NLP_FABRIC)
2436 lpfc_disc_state_machine(vport, ndlp, NULL,
2437 NLP_EVT_DEVICE_RECOVERY);
e47c9093 2438
87af33fe
JS
2439 lpfc_disc_state_machine(vport, ndlp, NULL,
2440 NLP_EVT_DEVICE_RM);
2441 }
2442
a8adb832
JS
2443 /* At this point, ALL ndlp's should be gone
2444 * because of the previous NLP_EVT_DEVICE_RM.
2445 * Lets wait for this to happen, if needed.
2446 */
87af33fe 2447 while (!list_empty(&vport->fc_nodes)) {
a8adb832 2448 if (i++ > 3000) {
87af33fe 2449 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
a8adb832 2450 "0233 Nodelist not empty\n");
e47c9093
JS
2451 list_for_each_entry_safe(ndlp, next_ndlp,
2452 &vport->fc_nodes, nlp_listp) {
2453 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2454 LOG_NODE,
d7c255b2 2455 "0282 did:x%x ndlp:x%p "
e47c9093
JS
2456 "usgmap:x%x refcnt:%d\n",
2457 ndlp->nlp_DID, (void *)ndlp,
2458 ndlp->nlp_usg_map,
2459 atomic_read(
2460 &ndlp->kref.refcount));
2461 }
a8adb832 2462 break;
87af33fe 2463 }
a8adb832
JS
2464
2465 /* Wait for any activity on ndlps to settle */
2466 msleep(10);
87af33fe 2467 }
1151e3ec 2468 lpfc_cleanup_vports_rrqs(vport, NULL);
dea3101e 2469}
2470
e59058c4 2471/**
3621a710 2472 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
e59058c4
JS
2473 * @vport: pointer to a virtual N_Port data structure.
2474 *
2475 * This routine stops all the timers associated with a @vport. This function
2476 * is invoked before disabling or deleting a @vport. Note that the physical
2477 * port is treated as @vport 0.
2478 **/
92d7f7b0
JS
2479void
2480lpfc_stop_vport_timers(struct lpfc_vport *vport)
dea3101e 2481{
92d7f7b0
JS
2482 del_timer_sync(&vport->els_tmofunc);
2483 del_timer_sync(&vport->fc_fdmitmo);
92494144 2484 del_timer_sync(&vport->delayed_disc_tmo);
92d7f7b0
JS
2485 lpfc_can_disctmo(vport);
2486 return;
dea3101e 2487}
2488
ecfd03c6
JS
2489/**
2490 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2491 * @phba: pointer to lpfc hba data structure.
2492 *
2493 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2494 * caller of this routine should already hold the host lock.
2495 **/
2496void
2497__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2498{
5ac6b303
JS
2499 /* Clear pending FCF rediscovery wait flag */
2500 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2501
ecfd03c6
JS
2502 /* Now, try to stop the timer */
2503 del_timer(&phba->fcf.redisc_wait);
2504}
2505
2506/**
2507 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2508 * @phba: pointer to lpfc hba data structure.
2509 *
2510 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2511 * checks whether the FCF rediscovery wait timer is pending with the host
2512 * lock held before proceeding with disabling the timer and clearing the
2513 * wait timer pendig flag.
2514 **/
2515void
2516lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2517{
2518 spin_lock_irq(&phba->hbalock);
2519 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2520 /* FCF rediscovery timer already fired or stopped */
2521 spin_unlock_irq(&phba->hbalock);
2522 return;
2523 }
2524 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
5ac6b303
JS
2525 /* Clear failover in progress flags */
2526 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
ecfd03c6
JS
2527 spin_unlock_irq(&phba->hbalock);
2528}
2529
e59058c4 2530/**
3772a991 2531 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
e59058c4
JS
2532 * @phba: pointer to lpfc hba data structure.
2533 *
2534 * This routine stops all the timers associated with a HBA. This function is
2535 * invoked before either putting a HBA offline or unloading the driver.
2536 **/
3772a991
JS
2537void
2538lpfc_stop_hba_timers(struct lpfc_hba *phba)
dea3101e 2539{
51ef4c26 2540 lpfc_stop_vport_timers(phba->pport);
2e0fef85 2541 del_timer_sync(&phba->sli.mbox_tmo);
92d7f7b0 2542 del_timer_sync(&phba->fabric_block_timer);
9399627f 2543 del_timer_sync(&phba->eratt_poll);
3772a991 2544 del_timer_sync(&phba->hb_tmofunc);
1151e3ec
JS
2545 if (phba->sli_rev == LPFC_SLI_REV4) {
2546 del_timer_sync(&phba->rrq_tmr);
2547 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2548 }
3772a991
JS
2549 phba->hb_outstanding = 0;
2550
2551 switch (phba->pci_dev_grp) {
2552 case LPFC_PCI_DEV_LP:
2553 /* Stop any LightPulse device specific driver timers */
2554 del_timer_sync(&phba->fcp_poll_timer);
2555 break;
2556 case LPFC_PCI_DEV_OC:
2557 /* Stop any OneConnect device sepcific driver timers */
ecfd03c6 2558 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3772a991
JS
2559 break;
2560 default:
2561 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2562 "0297 Invalid device group (x%x)\n",
2563 phba->pci_dev_grp);
2564 break;
2565 }
2e0fef85 2566 return;
dea3101e 2567}
2568
e59058c4 2569/**
3621a710 2570 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
e59058c4
JS
2571 * @phba: pointer to lpfc hba data structure.
2572 *
2573 * This routine marks a HBA's management interface as blocked. Once the HBA's
2574 * management interface is marked as blocked, all the user space access to
2575 * the HBA, whether they are from sysfs interface or libdfc interface will
2576 * all be blocked. The HBA is set to block the management interface when the
2577 * driver prepares the HBA interface for online or offline.
2578 **/
a6ababd2 2579static void
618a5230 2580lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
a6ababd2
AB
2581{
2582 unsigned long iflag;
6e7288d9
JS
2583 uint8_t actcmd = MBX_HEARTBEAT;
2584 unsigned long timeout;
2585
a6ababd2
AB
2586 spin_lock_irqsave(&phba->hbalock, iflag);
2587 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
618a5230
JS
2588 spin_unlock_irqrestore(&phba->hbalock, iflag);
2589 if (mbx_action == LPFC_MBX_NO_WAIT)
2590 return;
2591 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2592 spin_lock_irqsave(&phba->hbalock, iflag);
a183a15f 2593 if (phba->sli.mbox_active) {
6e7288d9 2594 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
a183a15f
JS
2595 /* Determine how long we might wait for the active mailbox
2596 * command to be gracefully completed by firmware.
2597 */
2598 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2599 phba->sli.mbox_active) * 1000) + jiffies;
2600 }
a6ababd2 2601 spin_unlock_irqrestore(&phba->hbalock, iflag);
a183a15f 2602
6e7288d9
JS
2603 /* Wait for the outstnading mailbox command to complete */
2604 while (phba->sli.mbox_active) {
2605 /* Check active mailbox complete status every 2ms */
2606 msleep(2);
2607 if (time_after(jiffies, timeout)) {
2608 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2609 "2813 Mgmt IO is Blocked %x "
2610 "- mbox cmd %x still active\n",
2611 phba->sli.sli_flag, actcmd);
2612 break;
2613 }
2614 }
a6ababd2
AB
2615}
2616
6b5151fd
JS
2617/**
2618 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
2619 * @phba: pointer to lpfc hba data structure.
2620 *
2621 * Allocate RPIs for all active remote nodes. This is needed whenever
2622 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
2623 * is to fixup the temporary rpi assignments.
2624 **/
2625void
2626lpfc_sli4_node_prep(struct lpfc_hba *phba)
2627{
2628 struct lpfc_nodelist *ndlp, *next_ndlp;
2629 struct lpfc_vport **vports;
2630 int i;
2631
2632 if (phba->sli_rev != LPFC_SLI_REV4)
2633 return;
2634
2635 vports = lpfc_create_vport_work_array(phba);
2636 if (vports != NULL) {
2637 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2638 if (vports[i]->load_flag & FC_UNLOADING)
2639 continue;
2640
2641 list_for_each_entry_safe(ndlp, next_ndlp,
2642 &vports[i]->fc_nodes,
2643 nlp_listp) {
2644 if (NLP_CHK_NODE_ACT(ndlp))
2645 ndlp->nlp_rpi =
2646 lpfc_sli4_alloc_rpi(phba);
2647 }
2648 }
2649 }
2650 lpfc_destroy_vport_work_array(phba, vports);
2651}
2652
e59058c4 2653/**
3621a710 2654 * lpfc_online - Initialize and bring a HBA online
e59058c4
JS
2655 * @phba: pointer to lpfc hba data structure.
2656 *
2657 * This routine initializes the HBA and brings a HBA online. During this
2658 * process, the management interface is blocked to prevent user space access
2659 * to the HBA interfering with the driver initialization.
2660 *
2661 * Return codes
2662 * 0 - successful
2663 * 1 - failed
2664 **/
dea3101e 2665int
2e0fef85 2666lpfc_online(struct lpfc_hba *phba)
dea3101e 2667{
372bd282 2668 struct lpfc_vport *vport;
549e55cd
JS
2669 struct lpfc_vport **vports;
2670 int i;
16a3a208 2671 bool vpis_cleared = false;
2e0fef85 2672
dea3101e 2673 if (!phba)
2674 return 0;
372bd282 2675 vport = phba->pport;
dea3101e 2676
2e0fef85 2677 if (!(vport->fc_flag & FC_OFFLINE_MODE))
dea3101e 2678 return 0;
2679
ed957684 2680 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
e8b62011 2681 "0458 Bring Adapter online\n");
dea3101e 2682
618a5230 2683 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
46fa311e
JS
2684
2685 if (!lpfc_sli_queue_setup(phba)) {
2686 lpfc_unblock_mgmt_io(phba);
dea3101e 2687 return 1;
46fa311e 2688 }
dea3101e 2689
da0436e9
JS
2690 if (phba->sli_rev == LPFC_SLI_REV4) {
2691 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2692 lpfc_unblock_mgmt_io(phba);
2693 return 1;
2694 }
16a3a208
JS
2695 spin_lock_irq(&phba->hbalock);
2696 if (!phba->sli4_hba.max_cfg_param.vpi_used)
2697 vpis_cleared = true;
2698 spin_unlock_irq(&phba->hbalock);
da0436e9
JS
2699 } else {
2700 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2701 lpfc_unblock_mgmt_io(phba);
2702 return 1;
2703 }
46fa311e 2704 }
dea3101e 2705
549e55cd
JS
2706 vports = lpfc_create_vport_work_array(phba);
2707 if (vports != NULL)
da0436e9 2708 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd
JS
2709 struct Scsi_Host *shost;
2710 shost = lpfc_shost_from_vport(vports[i]);
2711 spin_lock_irq(shost->host_lock);
2712 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2713 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2714 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
16a3a208 2715 if (phba->sli_rev == LPFC_SLI_REV4) {
1c6834a7 2716 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
16a3a208
JS
2717 if ((vpis_cleared) &&
2718 (vports[i]->port_type !=
2719 LPFC_PHYSICAL_PORT))
2720 vports[i]->vpi = 0;
2721 }
549e55cd
JS
2722 spin_unlock_irq(shost->host_lock);
2723 }
09372820 2724 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 2725
46fa311e 2726 lpfc_unblock_mgmt_io(phba);
dea3101e 2727 return 0;
2728}
2729
e59058c4 2730/**
3621a710 2731 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
e59058c4
JS
2732 * @phba: pointer to lpfc hba data structure.
2733 *
2734 * This routine marks a HBA's management interface as not blocked. Once the
2735 * HBA's management interface is marked as not blocked, all the user space
2736 * access to the HBA, whether they are from sysfs interface or libdfc
2737 * interface will be allowed. The HBA is set to block the management interface
2738 * when the driver prepares the HBA interface for online or offline and then
2739 * set to unblock the management interface afterwards.
2740 **/
46fa311e
JS
2741void
2742lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2743{
2744 unsigned long iflag;
2745
2e0fef85
JS
2746 spin_lock_irqsave(&phba->hbalock, iflag);
2747 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2748 spin_unlock_irqrestore(&phba->hbalock, iflag);
46fa311e
JS
2749}
2750
e59058c4 2751/**
3621a710 2752 * lpfc_offline_prep - Prepare a HBA to be brought offline
e59058c4
JS
2753 * @phba: pointer to lpfc hba data structure.
2754 *
2755 * This routine is invoked to prepare a HBA to be brought offline. It performs
2756 * unregistration login to all the nodes on all vports and flushes the mailbox
2757 * queue to make it ready to be brought offline.
2758 **/
46fa311e 2759void
618a5230 2760lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
46fa311e 2761{
2e0fef85 2762 struct lpfc_vport *vport = phba->pport;
46fa311e 2763 struct lpfc_nodelist *ndlp, *next_ndlp;
87af33fe 2764 struct lpfc_vport **vports;
72100cc4 2765 struct Scsi_Host *shost;
87af33fe 2766 int i;
dea3101e 2767
2e0fef85 2768 if (vport->fc_flag & FC_OFFLINE_MODE)
46fa311e 2769 return;
dea3101e 2770
618a5230 2771 lpfc_block_mgmt_io(phba, mbx_action);
dea3101e 2772
2773 lpfc_linkdown(phba);
2774
87af33fe
JS
2775 /* Issue an unreg_login to all nodes on all vports */
2776 vports = lpfc_create_vport_work_array(phba);
2777 if (vports != NULL) {
da0436e9 2778 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
a8adb832
JS
2779 if (vports[i]->load_flag & FC_UNLOADING)
2780 continue;
72100cc4
JS
2781 shost = lpfc_shost_from_vport(vports[i]);
2782 spin_lock_irq(shost->host_lock);
c868595d 2783 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
695a814e
JS
2784 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2785 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
72100cc4 2786 spin_unlock_irq(shost->host_lock);
695a814e 2787
87af33fe
JS
2788 shost = lpfc_shost_from_vport(vports[i]);
2789 list_for_each_entry_safe(ndlp, next_ndlp,
2790 &vports[i]->fc_nodes,
2791 nlp_listp) {
e47c9093
JS
2792 if (!NLP_CHK_NODE_ACT(ndlp))
2793 continue;
87af33fe
JS
2794 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2795 continue;
2796 if (ndlp->nlp_type & NLP_FABRIC) {
2797 lpfc_disc_state_machine(vports[i], ndlp,
2798 NULL, NLP_EVT_DEVICE_RECOVERY);
2799 lpfc_disc_state_machine(vports[i], ndlp,
2800 NULL, NLP_EVT_DEVICE_RM);
2801 }
2802 spin_lock_irq(shost->host_lock);
2803 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
401ee0c1 2804 spin_unlock_irq(shost->host_lock);
6b5151fd
JS
2805 /*
2806 * Whenever an SLI4 port goes offline, free the
401ee0c1
JS
2807 * RPI. Get a new RPI when the adapter port
2808 * comes back online.
6b5151fd
JS
2809 */
2810 if (phba->sli_rev == LPFC_SLI_REV4)
2811 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
87af33fe
JS
2812 lpfc_unreg_rpi(vports[i], ndlp);
2813 }
2814 }
2815 }
09372820 2816 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 2817
618a5230 2818 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
46fa311e
JS
2819}
2820
e59058c4 2821/**
3621a710 2822 * lpfc_offline - Bring a HBA offline
e59058c4
JS
2823 * @phba: pointer to lpfc hba data structure.
2824 *
2825 * This routine actually brings a HBA offline. It stops all the timers
2826 * associated with the HBA, brings down the SLI layer, and eventually
2827 * marks the HBA as in offline state for the upper layer protocol.
2828 **/
46fa311e 2829void
2e0fef85 2830lpfc_offline(struct lpfc_hba *phba)
46fa311e 2831{
549e55cd
JS
2832 struct Scsi_Host *shost;
2833 struct lpfc_vport **vports;
2834 int i;
46fa311e 2835
549e55cd 2836 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
46fa311e 2837 return;
688a8863 2838
da0436e9
JS
2839 /* stop port and all timers associated with this hba */
2840 lpfc_stop_port(phba);
51ef4c26
JS
2841 vports = lpfc_create_vport_work_array(phba);
2842 if (vports != NULL)
da0436e9 2843 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
51ef4c26 2844 lpfc_stop_vport_timers(vports[i]);
09372820 2845 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0 2846 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
e8b62011 2847 "0460 Bring Adapter offline\n");
dea3101e 2848 /* Bring down the SLI Layer and cleanup. The HBA is offline
2849 now. */
2850 lpfc_sli_hba_down(phba);
92d7f7b0 2851 spin_lock_irq(&phba->hbalock);
7054a606 2852 phba->work_ha = 0;
92d7f7b0 2853 spin_unlock_irq(&phba->hbalock);
549e55cd
JS
2854 vports = lpfc_create_vport_work_array(phba);
2855 if (vports != NULL)
da0436e9 2856 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd 2857 shost = lpfc_shost_from_vport(vports[i]);
549e55cd
JS
2858 spin_lock_irq(shost->host_lock);
2859 vports[i]->work_port_events = 0;
2860 vports[i]->fc_flag |= FC_OFFLINE_MODE;
2861 spin_unlock_irq(shost->host_lock);
2862 }
09372820 2863 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 2864}
2865
e59058c4 2866/**
3621a710 2867 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
e59058c4
JS
2868 * @phba: pointer to lpfc hba data structure.
2869 *
2870 * This routine is to free all the SCSI buffers and IOCBs from the driver
2871 * list back to kernel. It is called from lpfc_pci_remove_one to free
2872 * the internal resources before the device is removed from the system.
e59058c4 2873 **/
8a9d2e80 2874static void
2e0fef85 2875lpfc_scsi_free(struct lpfc_hba *phba)
dea3101e 2876{
2877 struct lpfc_scsi_buf *sb, *sb_next;
2878 struct lpfc_iocbq *io, *io_next;
2879
2e0fef85 2880 spin_lock_irq(&phba->hbalock);
a40fc5f0 2881
dea3101e 2882 /* Release all the lpfc_scsi_bufs maintained by this host. */
a40fc5f0
JS
2883
2884 spin_lock(&phba->scsi_buf_list_put_lock);
2885 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
2886 list) {
dea3101e 2887 list_del(&sb->list);
2888 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
92d7f7b0 2889 sb->dma_handle);
dea3101e 2890 kfree(sb);
2891 phba->total_scsi_bufs--;
2892 }
a40fc5f0
JS
2893 spin_unlock(&phba->scsi_buf_list_put_lock);
2894
2895 spin_lock(&phba->scsi_buf_list_get_lock);
2896 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
2897 list) {
dea3101e 2898 list_del(&sb->list);
2899 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
92d7f7b0 2900 sb->dma_handle);
dea3101e 2901 kfree(sb);
2902 phba->total_scsi_bufs--;
2903 }
a40fc5f0 2904 spin_unlock(&phba->scsi_buf_list_get_lock);
dea3101e 2905
2906 /* Release all the lpfc_iocbq entries maintained by this host. */
2907 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2908 list_del(&io->list);
2909 kfree(io);
2910 phba->total_iocbq_bufs--;
2911 }
6d368e53 2912
2e0fef85 2913 spin_unlock_irq(&phba->hbalock);
8a9d2e80
JS
2914}
2915
2916/**
2917 * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping
2918 * @phba: pointer to lpfc hba data structure.
2919 *
2920 * This routine first calculates the sizes of the current els and allocated
2921 * scsi sgl lists, and then goes through all sgls to updates the physical
2922 * XRIs assigned due to port function reset. During port initialization, the
2923 * current els and allocated scsi sgl lists are 0s.
2924 *
2925 * Return codes
2926 * 0 - successful (for now, it always returns 0)
2927 **/
2928int
2929lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
2930{
2931 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
2932 struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL;
2933 uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt;
2934 LIST_HEAD(els_sgl_list);
2935 LIST_HEAD(scsi_sgl_list);
2936 int rc;
2937
2938 /*
2939 * update on pci function's els xri-sgl list
2940 */
2941 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
2942 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
2943 /* els xri-sgl expanded */
2944 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
2945 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2946 "3157 ELS xri-sgl count increased from "
2947 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
2948 els_xri_cnt);
2949 /* allocate the additional els sgls */
2950 for (i = 0; i < xri_cnt; i++) {
2951 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
2952 GFP_KERNEL);
2953 if (sglq_entry == NULL) {
2954 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2955 "2562 Failure to allocate an "
2956 "ELS sgl entry:%d\n", i);
2957 rc = -ENOMEM;
2958 goto out_free_mem;
2959 }
2960 sglq_entry->buff_type = GEN_BUFF_TYPE;
2961 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
2962 &sglq_entry->phys);
2963 if (sglq_entry->virt == NULL) {
2964 kfree(sglq_entry);
2965 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2966 "2563 Failure to allocate an "
2967 "ELS mbuf:%d\n", i);
2968 rc = -ENOMEM;
2969 goto out_free_mem;
2970 }
2971 sglq_entry->sgl = sglq_entry->virt;
2972 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
2973 sglq_entry->state = SGL_FREED;
2974 list_add_tail(&sglq_entry->list, &els_sgl_list);
2975 }
38c20673 2976 spin_lock_irq(&phba->hbalock);
8a9d2e80 2977 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
38c20673 2978 spin_unlock_irq(&phba->hbalock);
8a9d2e80
JS
2979 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
2980 /* els xri-sgl shrinked */
2981 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
2982 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2983 "3158 ELS xri-sgl count decreased from "
2984 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
2985 els_xri_cnt);
2986 spin_lock_irq(&phba->hbalock);
2987 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list);
2988 spin_unlock_irq(&phba->hbalock);
2989 /* release extra els sgls from list */
2990 for (i = 0; i < xri_cnt; i++) {
2991 list_remove_head(&els_sgl_list,
2992 sglq_entry, struct lpfc_sglq, list);
2993 if (sglq_entry) {
2994 lpfc_mbuf_free(phba, sglq_entry->virt,
2995 sglq_entry->phys);
2996 kfree(sglq_entry);
2997 }
2998 }
2999 spin_lock_irq(&phba->hbalock);
3000 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
3001 spin_unlock_irq(&phba->hbalock);
3002 } else
3003 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3004 "3163 ELS xri-sgl count unchanged: %d\n",
3005 els_xri_cnt);
3006 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3007
3008 /* update xris to els sgls on the list */
3009 sglq_entry = NULL;
3010 sglq_entry_next = NULL;
3011 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3012 &phba->sli4_hba.lpfc_sgl_list, list) {
3013 lxri = lpfc_sli4_next_xritag(phba);
3014 if (lxri == NO_XRI) {
3015 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3016 "2400 Failed to allocate xri for "
3017 "ELS sgl\n");
3018 rc = -ENOMEM;
3019 goto out_free_mem;
3020 }
3021 sglq_entry->sli4_lxritag = lxri;
3022 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3023 }
3024
3025 /*
3026 * update on pci function's allocated scsi xri-sgl list
3027 */
3028 phba->total_scsi_bufs = 0;
3029
3030 /* maximum number of xris available for scsi buffers */
3031 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
3032 els_xri_cnt;
3033
3034 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3035 "2401 Current allocated SCSI xri-sgl count:%d, "
3036 "maximum SCSI xri count:%d\n",
3037 phba->sli4_hba.scsi_xri_cnt,
3038 phba->sli4_hba.scsi_xri_max);
3039
a40fc5f0 3040 spin_lock_irq(&phba->scsi_buf_list_get_lock);
164cecd1 3041 spin_lock(&phba->scsi_buf_list_put_lock);
a40fc5f0
JS
3042 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
3043 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
164cecd1 3044 spin_unlock(&phba->scsi_buf_list_put_lock);
a40fc5f0 3045 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
8a9d2e80
JS
3046
3047 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
3048 /* max scsi xri shrinked below the allocated scsi buffers */
3049 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
3050 phba->sli4_hba.scsi_xri_max;
3051 /* release the extra allocated scsi buffers */
3052 for (i = 0; i < scsi_xri_cnt; i++) {
3053 list_remove_head(&scsi_sgl_list, psb,
3054 struct lpfc_scsi_buf, list);
3055 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data,
3056 psb->dma_handle);
3057 kfree(psb);
3058 }
a40fc5f0 3059 spin_lock_irq(&phba->scsi_buf_list_get_lock);
8a9d2e80 3060 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
a40fc5f0 3061 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
8a9d2e80
JS
3062 }
3063
3064 /* update xris associated to remaining allocated scsi buffers */
3065 psb = NULL;
3066 psb_next = NULL;
3067 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
3068 lxri = lpfc_sli4_next_xritag(phba);
3069 if (lxri == NO_XRI) {
3070 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3071 "2560 Failed to allocate xri for "
3072 "scsi buffer\n");
3073 rc = -ENOMEM;
3074 goto out_free_mem;
3075 }
3076 psb->cur_iocbq.sli4_lxritag = lxri;
3077 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3078 }
a40fc5f0 3079 spin_lock_irq(&phba->scsi_buf_list_get_lock);
164cecd1 3080 spin_lock(&phba->scsi_buf_list_put_lock);
a40fc5f0
JS
3081 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
3082 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
164cecd1 3083 spin_unlock(&phba->scsi_buf_list_put_lock);
a40fc5f0 3084 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
8a9d2e80 3085
dea3101e 3086 return 0;
8a9d2e80
JS
3087
3088out_free_mem:
3089 lpfc_free_els_sgl_list(phba);
3090 lpfc_scsi_free(phba);
3091 return rc;
dea3101e 3092}
3093
e59058c4 3094/**
3621a710 3095 * lpfc_create_port - Create an FC port
e59058c4
JS
3096 * @phba: pointer to lpfc hba data structure.
3097 * @instance: a unique integer ID to this FC port.
3098 * @dev: pointer to the device data structure.
3099 *
3100 * This routine creates a FC port for the upper layer protocol. The FC port
3101 * can be created on top of either a physical port or a virtual port provided
3102 * by the HBA. This routine also allocates a SCSI host data structure (shost)
3103 * and associates the FC port created before adding the shost into the SCSI
3104 * layer.
3105 *
3106 * Return codes
3107 * @vport - pointer to the virtual N_Port data structure.
3108 * NULL - port create failed.
3109 **/
2e0fef85 3110struct lpfc_vport *
3de2a653 3111lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
47a8617c 3112{
2e0fef85
JS
3113 struct lpfc_vport *vport;
3114 struct Scsi_Host *shost;
3115 int error = 0;
47a8617c 3116
3de2a653
JS
3117 if (dev != &phba->pcidev->dev)
3118 shost = scsi_host_alloc(&lpfc_vport_template,
3119 sizeof(struct lpfc_vport));
3120 else
3121 shost = scsi_host_alloc(&lpfc_template,
3122 sizeof(struct lpfc_vport));
2e0fef85
JS
3123 if (!shost)
3124 goto out;
47a8617c 3125
2e0fef85
JS
3126 vport = (struct lpfc_vport *) shost->hostdata;
3127 vport->phba = phba;
2e0fef85 3128 vport->load_flag |= FC_LOADING;
92d7f7b0 3129 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
7f5f3d0d 3130 vport->fc_rscn_flush = 0;
47a8617c 3131
3de2a653 3132 lpfc_get_vport_cfgparam(vport);
2e0fef85
JS
3133 shost->unique_id = instance;
3134 shost->max_id = LPFC_MAX_TARGET;
3de2a653 3135 shost->max_lun = vport->cfg_max_luns;
2e0fef85
JS
3136 shost->this_id = -1;
3137 shost->max_cmd_len = 16;
da0436e9 3138 if (phba->sli_rev == LPFC_SLI_REV4) {
28baac74 3139 shost->dma_boundary =
cb5172ea 3140 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
da0436e9
JS
3141 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
3142 }
81301a9b 3143
47a8617c 3144 /*
2e0fef85
JS
3145 * Set initial can_queue value since 0 is no longer supported and
3146 * scsi_add_host will fail. This will be adjusted later based on the
3147 * max xri value determined in hba setup.
47a8617c 3148 */
2e0fef85 3149 shost->can_queue = phba->cfg_hba_queue_depth - 10;
3de2a653 3150 if (dev != &phba->pcidev->dev) {
92d7f7b0
JS
3151 shost->transportt = lpfc_vport_transport_template;
3152 vport->port_type = LPFC_NPIV_PORT;
3153 } else {
3154 shost->transportt = lpfc_transport_template;
3155 vport->port_type = LPFC_PHYSICAL_PORT;
3156 }
47a8617c 3157
2e0fef85
JS
3158 /* Initialize all internally managed lists. */
3159 INIT_LIST_HEAD(&vport->fc_nodes);
da0436e9 3160 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2e0fef85 3161 spin_lock_init(&vport->work_port_lock);
47a8617c 3162
2e0fef85
JS
3163 init_timer(&vport->fc_disctmo);
3164 vport->fc_disctmo.function = lpfc_disc_timeout;
92d7f7b0 3165 vport->fc_disctmo.data = (unsigned long)vport;
47a8617c 3166
2e0fef85
JS
3167 init_timer(&vport->fc_fdmitmo);
3168 vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
92d7f7b0 3169 vport->fc_fdmitmo.data = (unsigned long)vport;
47a8617c 3170
2e0fef85
JS
3171 init_timer(&vport->els_tmofunc);
3172 vport->els_tmofunc.function = lpfc_els_timeout;
92d7f7b0 3173 vport->els_tmofunc.data = (unsigned long)vport;
92494144
JS
3174
3175 init_timer(&vport->delayed_disc_tmo);
3176 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
3177 vport->delayed_disc_tmo.data = (unsigned long)vport;
3178
d139b9bd 3179 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2e0fef85
JS
3180 if (error)
3181 goto out_put_shost;
47a8617c 3182
549e55cd 3183 spin_lock_irq(&phba->hbalock);
2e0fef85 3184 list_add_tail(&vport->listentry, &phba->port_list);
549e55cd 3185 spin_unlock_irq(&phba->hbalock);
2e0fef85 3186 return vport;
47a8617c 3187
2e0fef85
JS
3188out_put_shost:
3189 scsi_host_put(shost);
3190out:
3191 return NULL;
47a8617c
JS
3192}
3193
e59058c4 3194/**
3621a710 3195 * destroy_port - destroy an FC port
e59058c4
JS
3196 * @vport: pointer to an lpfc virtual N_Port data structure.
3197 *
3198 * This routine destroys a FC port from the upper layer protocol. All the
3199 * resources associated with the port are released.
3200 **/
2e0fef85
JS
3201void
3202destroy_port(struct lpfc_vport *vport)
47a8617c 3203{
92d7f7b0
JS
3204 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3205 struct lpfc_hba *phba = vport->phba;
47a8617c 3206
858c9f6c 3207 lpfc_debugfs_terminate(vport);
92d7f7b0
JS
3208 fc_remove_host(shost);
3209 scsi_remove_host(shost);
47a8617c 3210
92d7f7b0
JS
3211 spin_lock_irq(&phba->hbalock);
3212 list_del_init(&vport->listentry);
3213 spin_unlock_irq(&phba->hbalock);
47a8617c 3214
92d7f7b0 3215 lpfc_cleanup(vport);
47a8617c 3216 return;
47a8617c
JS
3217}
3218
e59058c4 3219/**
3621a710 3220 * lpfc_get_instance - Get a unique integer ID
e59058c4
JS
3221 *
3222 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
3223 * uses the kernel idr facility to perform the task.
3224 *
3225 * Return codes:
3226 * instance - a unique integer ID allocated as the new instance.
3227 * -1 - lpfc get instance failed.
3228 **/
92d7f7b0
JS
3229int
3230lpfc_get_instance(void)
3231{
ab516036
TH
3232 int ret;
3233
3234 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
3235 return ret < 0 ? -1 : ret;
47a8617c
JS
3236}
3237
e59058c4 3238/**
3621a710 3239 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
e59058c4
JS
3240 * @shost: pointer to SCSI host data structure.
3241 * @time: elapsed time of the scan in jiffies.
3242 *
3243 * This routine is called by the SCSI layer with a SCSI host to determine
3244 * whether the scan host is finished.
3245 *
3246 * Note: there is no scan_start function as adapter initialization will have
3247 * asynchronously kicked off the link initialization.
3248 *
3249 * Return codes
3250 * 0 - SCSI host scan is not over yet.
3251 * 1 - SCSI host scan is over.
3252 **/
47a8617c
JS
3253int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3254{
2e0fef85
JS
3255 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3256 struct lpfc_hba *phba = vport->phba;
858c9f6c 3257 int stat = 0;
47a8617c 3258
858c9f6c
JS
3259 spin_lock_irq(shost->host_lock);
3260
51ef4c26 3261 if (vport->load_flag & FC_UNLOADING) {
858c9f6c
JS
3262 stat = 1;
3263 goto finished;
3264 }
256ec0d0 3265 if (time >= msecs_to_jiffies(30 * 1000)) {
2e0fef85 3266 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
3267 "0461 Scanning longer than 30 "
3268 "seconds. Continuing initialization\n");
858c9f6c 3269 stat = 1;
47a8617c 3270 goto finished;
2e0fef85 3271 }
256ec0d0
JS
3272 if (time >= msecs_to_jiffies(15 * 1000) &&
3273 phba->link_state <= LPFC_LINK_DOWN) {
2e0fef85 3274 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
3275 "0465 Link down longer than 15 "
3276 "seconds. Continuing initialization\n");
858c9f6c 3277 stat = 1;
47a8617c 3278 goto finished;
2e0fef85 3279 }
47a8617c 3280
2e0fef85 3281 if (vport->port_state != LPFC_VPORT_READY)
858c9f6c 3282 goto finished;
2e0fef85 3283 if (vport->num_disc_nodes || vport->fc_prli_sent)
858c9f6c 3284 goto finished;
256ec0d0 3285 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
858c9f6c 3286 goto finished;
2e0fef85 3287 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
858c9f6c
JS
3288 goto finished;
3289
3290 stat = 1;
47a8617c
JS
3291
3292finished:
858c9f6c
JS
3293 spin_unlock_irq(shost->host_lock);
3294 return stat;
92d7f7b0 3295}
47a8617c 3296
e59058c4 3297/**
3621a710 3298 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
e59058c4
JS
3299 * @shost: pointer to SCSI host data structure.
3300 *
3301 * This routine initializes a given SCSI host attributes on a FC port. The
3302 * SCSI host can be either on top of a physical port or a virtual port.
3303 **/
92d7f7b0
JS
3304void lpfc_host_attrib_init(struct Scsi_Host *shost)
3305{
3306 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3307 struct lpfc_hba *phba = vport->phba;
47a8617c 3308 /*
2e0fef85 3309 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
47a8617c
JS
3310 */
3311
2e0fef85
JS
3312 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
3313 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
47a8617c
JS
3314 fc_host_supported_classes(shost) = FC_COS_CLASS3;
3315
3316 memset(fc_host_supported_fc4s(shost), 0,
2e0fef85 3317 sizeof(fc_host_supported_fc4s(shost)));
47a8617c
JS
3318 fc_host_supported_fc4s(shost)[2] = 1;
3319 fc_host_supported_fc4s(shost)[7] = 1;
3320
92d7f7b0
JS
3321 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
3322 sizeof fc_host_symbolic_name(shost));
47a8617c
JS
3323
3324 fc_host_supported_speeds(shost) = 0;
88a2cfbb
JS
3325 if (phba->lmt & LMT_16Gb)
3326 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
47a8617c
JS
3327 if (phba->lmt & LMT_10Gb)
3328 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
a8adb832
JS
3329 if (phba->lmt & LMT_8Gb)
3330 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
47a8617c
JS
3331 if (phba->lmt & LMT_4Gb)
3332 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
3333 if (phba->lmt & LMT_2Gb)
3334 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
3335 if (phba->lmt & LMT_1Gb)
3336 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
3337
3338 fc_host_maxframe_size(shost) =
2e0fef85
JS
3339 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
3340 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
47a8617c 3341
0af5d708
MC
3342 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
3343
47a8617c
JS
3344 /* This value is also unchanging */
3345 memset(fc_host_active_fc4s(shost), 0,
2e0fef85 3346 sizeof(fc_host_active_fc4s(shost)));
47a8617c
JS
3347 fc_host_active_fc4s(shost)[2] = 1;
3348 fc_host_active_fc4s(shost)[7] = 1;
3349
92d7f7b0 3350 fc_host_max_npiv_vports(shost) = phba->max_vpi;
47a8617c 3351 spin_lock_irq(shost->host_lock);
51ef4c26 3352 vport->load_flag &= ~FC_LOADING;
47a8617c 3353 spin_unlock_irq(shost->host_lock);
47a8617c 3354}
dea3101e 3355
e59058c4 3356/**
da0436e9 3357 * lpfc_stop_port_s3 - Stop SLI3 device port
e59058c4
JS
3358 * @phba: pointer to lpfc hba data structure.
3359 *
da0436e9
JS
3360 * This routine is invoked to stop an SLI3 device port, it stops the device
3361 * from generating interrupts and stops the device driver's timers for the
3362 * device.
e59058c4 3363 **/
da0436e9
JS
3364static void
3365lpfc_stop_port_s3(struct lpfc_hba *phba)
db2378e0 3366{
da0436e9
JS
3367 /* Clear all interrupt enable conditions */
3368 writel(0, phba->HCregaddr);
3369 readl(phba->HCregaddr); /* flush */
3370 /* Clear all pending interrupts */
3371 writel(0xffffffff, phba->HAregaddr);
3372 readl(phba->HAregaddr); /* flush */
db2378e0 3373
da0436e9
JS
3374 /* Reset some HBA SLI setup states */
3375 lpfc_stop_hba_timers(phba);
3376 phba->pport->work_port_events = 0;
3377}
db2378e0 3378
da0436e9
JS
3379/**
3380 * lpfc_stop_port_s4 - Stop SLI4 device port
3381 * @phba: pointer to lpfc hba data structure.
3382 *
3383 * This routine is invoked to stop an SLI4 device port, it stops the device
3384 * from generating interrupts and stops the device driver's timers for the
3385 * device.
3386 **/
3387static void
3388lpfc_stop_port_s4(struct lpfc_hba *phba)
3389{
3390 /* Reset some HBA SLI4 setup states */
3391 lpfc_stop_hba_timers(phba);
3392 phba->pport->work_port_events = 0;
3393 phba->sli4_hba.intr_enable = 0;
da0436e9 3394}
9399627f 3395
da0436e9
JS
3396/**
3397 * lpfc_stop_port - Wrapper function for stopping hba port
3398 * @phba: Pointer to HBA context object.
3399 *
3400 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
3401 * the API jump table function pointer from the lpfc_hba struct.
3402 **/
3403void
3404lpfc_stop_port(struct lpfc_hba *phba)
3405{
3406 phba->lpfc_stop_port(phba);
3407}
db2378e0 3408
ecfd03c6
JS
3409/**
3410 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
3411 * @phba: Pointer to hba for which this call is being executed.
3412 *
3413 * This routine starts the timer waiting for the FCF rediscovery to complete.
3414 **/
3415void
3416lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
3417{
3418 unsigned long fcf_redisc_wait_tmo =
3419 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
3420 /* Start fcf rediscovery wait period timer */
3421 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
3422 spin_lock_irq(&phba->hbalock);
3423 /* Allow action to new fcf asynchronous event */
3424 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
3425 /* Mark the FCF rediscovery pending state */
3426 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
3427 spin_unlock_irq(&phba->hbalock);
3428}
3429
3430/**
3431 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
3432 * @ptr: Map to lpfc_hba data structure pointer.
3433 *
3434 * This routine is invoked when waiting for FCF table rediscover has been
3435 * timed out. If new FCF record(s) has (have) been discovered during the
3436 * wait period, a new FCF event shall be added to the FCOE async event
3437 * list, and then worker thread shall be waked up for processing from the
3438 * worker thread context.
3439 **/
3440void
3441lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
3442{
3443 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
3444
3445 /* Don't send FCF rediscovery event if timer cancelled */
3446 spin_lock_irq(&phba->hbalock);
3447 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3448 spin_unlock_irq(&phba->hbalock);
3449 return;
3450 }
3451 /* Clear FCF rediscovery timer pending flag */
3452 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3453 /* FCF rediscovery event to worker thread */
3454 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
3455 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 3456 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 3457 "2776 FCF rediscover quiescent timer expired\n");
ecfd03c6
JS
3458 /* wake up worker thread */
3459 lpfc_worker_wake_up(phba);
3460}
3461
e59058c4 3462/**
da0436e9 3463 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
e59058c4 3464 * @phba: pointer to lpfc hba data structure.
da0436e9 3465 * @acqe_link: pointer to the async link completion queue entry.
e59058c4 3466 *
da0436e9
JS
3467 * This routine is to parse the SLI4 link-attention link fault code and
3468 * translate it into the base driver's read link attention mailbox command
3469 * status.
3470 *
3471 * Return: Link-attention status in terms of base driver's coding.
e59058c4 3472 **/
da0436e9
JS
3473static uint16_t
3474lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3475 struct lpfc_acqe_link *acqe_link)
db2378e0 3476{
da0436e9 3477 uint16_t latt_fault;
9399627f 3478
da0436e9
JS
3479 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3480 case LPFC_ASYNC_LINK_FAULT_NONE:
3481 case LPFC_ASYNC_LINK_FAULT_LOCAL:
3482 case LPFC_ASYNC_LINK_FAULT_REMOTE:
3483 latt_fault = 0;
3484 break;
3485 default:
3486 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3487 "0398 Invalid link fault code: x%x\n",
3488 bf_get(lpfc_acqe_link_fault, acqe_link));
3489 latt_fault = MBXERR_ERROR;
3490 break;
3491 }
3492 return latt_fault;
db2378e0
JS
3493}
3494
5b75da2f 3495/**
da0436e9 3496 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5b75da2f 3497 * @phba: pointer to lpfc hba data structure.
da0436e9 3498 * @acqe_link: pointer to the async link completion queue entry.
5b75da2f 3499 *
da0436e9
JS
3500 * This routine is to parse the SLI4 link attention type and translate it
3501 * into the base driver's link attention type coding.
5b75da2f 3502 *
da0436e9
JS
3503 * Return: Link attention type in terms of base driver's coding.
3504 **/
3505static uint8_t
3506lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3507 struct lpfc_acqe_link *acqe_link)
5b75da2f 3508{
da0436e9 3509 uint8_t att_type;
5b75da2f 3510
da0436e9
JS
3511 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3512 case LPFC_ASYNC_LINK_STATUS_DOWN:
3513 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
76a95d75 3514 att_type = LPFC_ATT_LINK_DOWN;
da0436e9
JS
3515 break;
3516 case LPFC_ASYNC_LINK_STATUS_UP:
3517 /* Ignore physical link up events - wait for logical link up */
76a95d75 3518 att_type = LPFC_ATT_RESERVED;
da0436e9
JS
3519 break;
3520 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
76a95d75 3521 att_type = LPFC_ATT_LINK_UP;
da0436e9
JS
3522 break;
3523 default:
3524 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3525 "0399 Invalid link attention type: x%x\n",
3526 bf_get(lpfc_acqe_link_status, acqe_link));
76a95d75 3527 att_type = LPFC_ATT_RESERVED;
da0436e9 3528 break;
5b75da2f 3529 }
da0436e9 3530 return att_type;
5b75da2f
JS
3531}
3532
3533/**
da0436e9 3534 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
5b75da2f 3535 * @phba: pointer to lpfc hba data structure.
da0436e9 3536 * @acqe_link: pointer to the async link completion queue entry.
5b75da2f 3537 *
da0436e9
JS
3538 * This routine is to parse the SLI4 link-attention link speed and translate
3539 * it into the base driver's link-attention link speed coding.
3540 *
3541 * Return: Link-attention link speed in terms of base driver's coding.
3542 **/
3543static uint8_t
3544lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3545 struct lpfc_acqe_link *acqe_link)
5b75da2f 3546{
da0436e9
JS
3547 uint8_t link_speed;
3548
3549 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3550 case LPFC_ASYNC_LINK_SPEED_ZERO:
da0436e9 3551 case LPFC_ASYNC_LINK_SPEED_10MBPS:
da0436e9 3552 case LPFC_ASYNC_LINK_SPEED_100MBPS:
76a95d75 3553 link_speed = LPFC_LINK_SPEED_UNKNOWN;
da0436e9
JS
3554 break;
3555 case LPFC_ASYNC_LINK_SPEED_1GBPS:
76a95d75 3556 link_speed = LPFC_LINK_SPEED_1GHZ;
da0436e9
JS
3557 break;
3558 case LPFC_ASYNC_LINK_SPEED_10GBPS:
76a95d75 3559 link_speed = LPFC_LINK_SPEED_10GHZ;
da0436e9
JS
3560 break;
3561 default:
3562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3563 "0483 Invalid link-attention link speed: x%x\n",
3564 bf_get(lpfc_acqe_link_speed, acqe_link));
76a95d75 3565 link_speed = LPFC_LINK_SPEED_UNKNOWN;
da0436e9
JS
3566 break;
3567 }
3568 return link_speed;
3569}
3570
8b68cd52
JS
3571/**
3572 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
3573 * @phba: pointer to lpfc hba data structure.
3574 *
3575 * This routine is to get an SLI3 FC port's link speed in Mbps.
3576 *
3577 * Return: link speed in terms of Mbps.
3578 **/
3579uint32_t
3580lpfc_sli_port_speed_get(struct lpfc_hba *phba)
3581{
3582 uint32_t link_speed;
3583
3584 if (!lpfc_is_link_up(phba))
3585 return 0;
3586
3587 switch (phba->fc_linkspeed) {
3588 case LPFC_LINK_SPEED_1GHZ:
3589 link_speed = 1000;
3590 break;
3591 case LPFC_LINK_SPEED_2GHZ:
3592 link_speed = 2000;
3593 break;
3594 case LPFC_LINK_SPEED_4GHZ:
3595 link_speed = 4000;
3596 break;
3597 case LPFC_LINK_SPEED_8GHZ:
3598 link_speed = 8000;
3599 break;
3600 case LPFC_LINK_SPEED_10GHZ:
3601 link_speed = 10000;
3602 break;
3603 case LPFC_LINK_SPEED_16GHZ:
3604 link_speed = 16000;
3605 break;
3606 default:
3607 link_speed = 0;
3608 }
3609 return link_speed;
3610}
3611
3612/**
3613 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
3614 * @phba: pointer to lpfc hba data structure.
3615 * @evt_code: asynchronous event code.
3616 * @speed_code: asynchronous event link speed code.
3617 *
3618 * This routine is to parse the giving SLI4 async event link speed code into
3619 * value of Mbps for the link speed.
3620 *
3621 * Return: link speed in terms of Mbps.
3622 **/
3623static uint32_t
3624lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
3625 uint8_t speed_code)
3626{
3627 uint32_t port_speed;
3628
3629 switch (evt_code) {
3630 case LPFC_TRAILER_CODE_LINK:
3631 switch (speed_code) {
3632 case LPFC_EVT_CODE_LINK_NO_LINK:
3633 port_speed = 0;
3634 break;
3635 case LPFC_EVT_CODE_LINK_10_MBIT:
3636 port_speed = 10;
3637 break;
3638 case LPFC_EVT_CODE_LINK_100_MBIT:
3639 port_speed = 100;
3640 break;
3641 case LPFC_EVT_CODE_LINK_1_GBIT:
3642 port_speed = 1000;
3643 break;
3644 case LPFC_EVT_CODE_LINK_10_GBIT:
3645 port_speed = 10000;
3646 break;
3647 default:
3648 port_speed = 0;
3649 }
3650 break;
3651 case LPFC_TRAILER_CODE_FC:
3652 switch (speed_code) {
3653 case LPFC_EVT_CODE_FC_NO_LINK:
3654 port_speed = 0;
3655 break;
3656 case LPFC_EVT_CODE_FC_1_GBAUD:
3657 port_speed = 1000;
3658 break;
3659 case LPFC_EVT_CODE_FC_2_GBAUD:
3660 port_speed = 2000;
3661 break;
3662 case LPFC_EVT_CODE_FC_4_GBAUD:
3663 port_speed = 4000;
3664 break;
3665 case LPFC_EVT_CODE_FC_8_GBAUD:
3666 port_speed = 8000;
3667 break;
3668 case LPFC_EVT_CODE_FC_10_GBAUD:
3669 port_speed = 10000;
3670 break;
3671 case LPFC_EVT_CODE_FC_16_GBAUD:
3672 port_speed = 16000;
3673 break;
3674 default:
3675 port_speed = 0;
3676 }
3677 break;
3678 default:
3679 port_speed = 0;
3680 }
3681 return port_speed;
3682}
3683
da0436e9 3684/**
70f3c073 3685 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
da0436e9
JS
3686 * @phba: pointer to lpfc hba data structure.
3687 * @acqe_link: pointer to the async link completion queue entry.
3688 *
70f3c073 3689 * This routine is to handle the SLI4 asynchronous FCoE link event.
da0436e9
JS
3690 **/
3691static void
3692lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3693 struct lpfc_acqe_link *acqe_link)
3694{
3695 struct lpfc_dmabuf *mp;
3696 LPFC_MBOXQ_t *pmb;
3697 MAILBOX_t *mb;
76a95d75 3698 struct lpfc_mbx_read_top *la;
da0436e9 3699 uint8_t att_type;
76a95d75 3700 int rc;
da0436e9
JS
3701
3702 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
76a95d75 3703 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
da0436e9 3704 return;
32b9793f 3705 phba->fcoe_eventtag = acqe_link->event_tag;
da0436e9
JS
3706 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3707 if (!pmb) {
3708 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3709 "0395 The mboxq allocation failed\n");
3710 return;
3711 }
3712 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3713 if (!mp) {
3714 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3715 "0396 The lpfc_dmabuf allocation failed\n");
3716 goto out_free_pmb;
3717 }
3718 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3719 if (!mp->virt) {
3720 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3721 "0397 The mbuf allocation failed\n");
3722 goto out_free_dmabuf;
3723 }
3724
3725 /* Cleanup any outstanding ELS commands */
3726 lpfc_els_flush_all_cmd(phba);
3727
3728 /* Block ELS IOCBs until we have done process link event */
3729 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3730
3731 /* Update link event statistics */
3732 phba->sli.slistat.link_event++;
3733
76a95d75
JS
3734 /* Create lpfc_handle_latt mailbox command from link ACQE */
3735 lpfc_read_topology(phba, pmb, mp);
3736 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
da0436e9
JS
3737 pmb->vport = phba->pport;
3738
da0436e9
JS
3739 /* Keep the link status for extra SLI4 state machine reference */
3740 phba->sli4_hba.link_state.speed =
8b68cd52
JS
3741 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
3742 bf_get(lpfc_acqe_link_speed, acqe_link));
da0436e9
JS
3743 phba->sli4_hba.link_state.duplex =
3744 bf_get(lpfc_acqe_link_duplex, acqe_link);
3745 phba->sli4_hba.link_state.status =
3746 bf_get(lpfc_acqe_link_status, acqe_link);
70f3c073
JS
3747 phba->sli4_hba.link_state.type =
3748 bf_get(lpfc_acqe_link_type, acqe_link);
3749 phba->sli4_hba.link_state.number =
3750 bf_get(lpfc_acqe_link_number, acqe_link);
da0436e9
JS
3751 phba->sli4_hba.link_state.fault =
3752 bf_get(lpfc_acqe_link_fault, acqe_link);
65467b6b 3753 phba->sli4_hba.link_state.logical_speed =
8b68cd52
JS
3754 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
3755
70f3c073 3756 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
c31098ce
JS
3757 "2900 Async FC/FCoE Link event - Speed:%dGBit "
3758 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3759 "Logical speed:%dMbps Fault:%d\n",
70f3c073
JS
3760 phba->sli4_hba.link_state.speed,
3761 phba->sli4_hba.link_state.topology,
3762 phba->sli4_hba.link_state.status,
3763 phba->sli4_hba.link_state.type,
3764 phba->sli4_hba.link_state.number,
8b68cd52 3765 phba->sli4_hba.link_state.logical_speed,
70f3c073 3766 phba->sli4_hba.link_state.fault);
76a95d75
JS
3767 /*
3768 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3769 * topology info. Note: Optional for non FC-AL ports.
3770 */
3771 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3772 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3773 if (rc == MBX_NOT_FINISHED)
3774 goto out_free_dmabuf;
3775 return;
3776 }
3777 /*
3778 * For FCoE Mode: fill in all the topology information we need and call
3779 * the READ_TOPOLOGY completion routine to continue without actually
3780 * sending the READ_TOPOLOGY mailbox command to the port.
3781 */
3782 /* Parse and translate status field */
3783 mb = &pmb->u.mb;
3784 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3785
3786 /* Parse and translate link attention fields */
3787 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3788 la->eventTag = acqe_link->event_tag;
3789 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
3790 bf_set(lpfc_mbx_read_top_link_spd, la,
3791 lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
3792
3793 /* Fake the the following irrelvant fields */
3794 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
3795 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
3796 bf_set(lpfc_mbx_read_top_il, la, 0);
3797 bf_set(lpfc_mbx_read_top_pb, la, 0);
3798 bf_set(lpfc_mbx_read_top_fa, la, 0);
3799 bf_set(lpfc_mbx_read_top_mm, la, 0);
da0436e9
JS
3800
3801 /* Invoke the lpfc_handle_latt mailbox command callback function */
76a95d75 3802 lpfc_mbx_cmpl_read_topology(phba, pmb);
da0436e9 3803
5b75da2f 3804 return;
da0436e9
JS
3805
3806out_free_dmabuf:
3807 kfree(mp);
3808out_free_pmb:
3809 mempool_free(pmb, phba->mbox_mem_pool);
3810}
3811
70f3c073
JS
3812/**
3813 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
3814 * @phba: pointer to lpfc hba data structure.
3815 * @acqe_fc: pointer to the async fc completion queue entry.
3816 *
3817 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
3818 * that the event was received and then issue a read_topology mailbox command so
3819 * that the rest of the driver will treat it the same as SLI3.
3820 **/
3821static void
3822lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3823{
3824 struct lpfc_dmabuf *mp;
3825 LPFC_MBOXQ_t *pmb;
3826 int rc;
3827
3828 if (bf_get(lpfc_trailer_type, acqe_fc) !=
3829 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
3830 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3831 "2895 Non FC link Event detected.(%d)\n",
3832 bf_get(lpfc_trailer_type, acqe_fc));
3833 return;
3834 }
3835 /* Keep the link status for extra SLI4 state machine reference */
3836 phba->sli4_hba.link_state.speed =
8b68cd52
JS
3837 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
3838 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
70f3c073
JS
3839 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
3840 phba->sli4_hba.link_state.topology =
3841 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
3842 phba->sli4_hba.link_state.status =
3843 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
3844 phba->sli4_hba.link_state.type =
3845 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
3846 phba->sli4_hba.link_state.number =
3847 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
3848 phba->sli4_hba.link_state.fault =
3849 bf_get(lpfc_acqe_link_fault, acqe_fc);
3850 phba->sli4_hba.link_state.logical_speed =
8b68cd52 3851 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
70f3c073
JS
3852 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3853 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
3854 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
3855 "%dMbps Fault:%d\n",
3856 phba->sli4_hba.link_state.speed,
3857 phba->sli4_hba.link_state.topology,
3858 phba->sli4_hba.link_state.status,
3859 phba->sli4_hba.link_state.type,
3860 phba->sli4_hba.link_state.number,
8b68cd52 3861 phba->sli4_hba.link_state.logical_speed,
70f3c073
JS
3862 phba->sli4_hba.link_state.fault);
3863 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3864 if (!pmb) {
3865 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3866 "2897 The mboxq allocation failed\n");
3867 return;
3868 }
3869 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3870 if (!mp) {
3871 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3872 "2898 The lpfc_dmabuf allocation failed\n");
3873 goto out_free_pmb;
3874 }
3875 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3876 if (!mp->virt) {
3877 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3878 "2899 The mbuf allocation failed\n");
3879 goto out_free_dmabuf;
3880 }
3881
3882 /* Cleanup any outstanding ELS commands */
3883 lpfc_els_flush_all_cmd(phba);
3884
3885 /* Block ELS IOCBs until we have done process link event */
3886 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3887
3888 /* Update link event statistics */
3889 phba->sli.slistat.link_event++;
3890
3891 /* Create lpfc_handle_latt mailbox command from link ACQE */
3892 lpfc_read_topology(phba, pmb, mp);
3893 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3894 pmb->vport = phba->pport;
3895
3896 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3897 if (rc == MBX_NOT_FINISHED)
3898 goto out_free_dmabuf;
3899 return;
3900
3901out_free_dmabuf:
3902 kfree(mp);
3903out_free_pmb:
3904 mempool_free(pmb, phba->mbox_mem_pool);
3905}
3906
3907/**
3908 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
3909 * @phba: pointer to lpfc hba data structure.
3910 * @acqe_fc: pointer to the async SLI completion queue entry.
3911 *
3912 * This routine is to handle the SLI4 asynchronous SLI events.
3913 **/
3914static void
3915lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
3916{
4b8bae08 3917 char port_name;
8c1312e1 3918 char message[128];
4b8bae08
JS
3919 uint8_t status;
3920 struct lpfc_acqe_misconfigured_event *misconfigured;
3921
3922 /* special case misconfigured event as it contains data for all ports */
3923 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3924 LPFC_SLI_INTF_IF_TYPE_2) ||
3925 (bf_get(lpfc_trailer_type, acqe_sli) !=
3926 LPFC_SLI_EVENT_TYPE_MISCONFIGURED)) {
3927 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3928 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
3929 "x%08x SLI Event Type:%d\n",
3930 acqe_sli->event_data1, acqe_sli->event_data2,
3931 bf_get(lpfc_trailer_type, acqe_sli));
3932 return;
3933 }
3934
3935 port_name = phba->Port[0];
3936 if (port_name == 0x00)
3937 port_name = '?'; /* get port name is empty */
3938
3939 misconfigured = (struct lpfc_acqe_misconfigured_event *)
3940 &acqe_sli->event_data1;
3941
3942 /* fetch the status for this port */
3943 switch (phba->sli4_hba.lnk_info.lnk_no) {
3944 case LPFC_LINK_NUMBER_0:
3945 status = bf_get(lpfc_sli_misconfigured_port0,
3946 &misconfigured->theEvent);
3947 break;
3948 case LPFC_LINK_NUMBER_1:
3949 status = bf_get(lpfc_sli_misconfigured_port1,
3950 &misconfigured->theEvent);
3951 break;
3952 case LPFC_LINK_NUMBER_2:
3953 status = bf_get(lpfc_sli_misconfigured_port2,
3954 &misconfigured->theEvent);
3955 break;
3956 case LPFC_LINK_NUMBER_3:
3957 status = bf_get(lpfc_sli_misconfigured_port3,
3958 &misconfigured->theEvent);
3959 break;
3960 default:
3961 status = ~LPFC_SLI_EVENT_STATUS_VALID;
3962 break;
3963 }
3964
3965 switch (status) {
3966 case LPFC_SLI_EVENT_STATUS_VALID:
3967 return; /* no message if the sfp is okay */
3968 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
292098be
JS
3969 sprintf(message, "Optics faulted/incorrectly installed/not " \
3970 "installed - Reseat optics, if issue not "
3971 "resolved, replace.");
4b8bae08
JS
3972 break;
3973 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
3974 sprintf(message,
292098be
JS
3975 "Optics of two types installed - Remove one optic or " \
3976 "install matching pair of optics.");
4b8bae08
JS
3977 break;
3978 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
292098be
JS
3979 sprintf(message, "Incompatible optics - Replace with " \
3980 "compatible optics for card to function.");
4b8bae08
JS
3981 break;
3982 default:
3983 /* firmware is reporting a status we don't know about */
3984 sprintf(message, "Unknown event status x%02x", status);
3985 break;
3986 }
3987
3988 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3989 "3176 Misconfigured Physical Port - "
3990 "Port Name %c %s\n", port_name, message);
70f3c073
JS
3991}
3992
fc2b989b
JS
3993/**
3994 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3995 * @vport: pointer to vport data structure.
3996 *
3997 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3998 * response to a CVL event.
3999 *
4000 * Return the pointer to the ndlp with the vport if successful, otherwise
4001 * return NULL.
4002 **/
4003static struct lpfc_nodelist *
4004lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
4005{
4006 struct lpfc_nodelist *ndlp;
4007 struct Scsi_Host *shost;
4008 struct lpfc_hba *phba;
4009
4010 if (!vport)
4011 return NULL;
fc2b989b
JS
4012 phba = vport->phba;
4013 if (!phba)
4014 return NULL;
78730cfe
JS
4015 ndlp = lpfc_findnode_did(vport, Fabric_DID);
4016 if (!ndlp) {
4017 /* Cannot find existing Fabric ndlp, so allocate a new one */
4018 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4019 if (!ndlp)
4020 return 0;
4021 lpfc_nlp_init(vport, ndlp, Fabric_DID);
4022 /* Set the node type */
4023 ndlp->nlp_type |= NLP_FABRIC;
4024 /* Put ndlp onto node list */
4025 lpfc_enqueue_node(vport, ndlp);
4026 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4027 /* re-setup ndlp without removing from node list */
4028 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
4029 if (!ndlp)
4030 return 0;
4031 }
63e801ce
JS
4032 if ((phba->pport->port_state < LPFC_FLOGI) &&
4033 (phba->pport->port_state != LPFC_VPORT_FAILED))
fc2b989b
JS
4034 return NULL;
4035 /* If virtual link is not yet instantiated ignore CVL */
63e801ce
JS
4036 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
4037 && (vport->port_state != LPFC_VPORT_FAILED))
fc2b989b
JS
4038 return NULL;
4039 shost = lpfc_shost_from_vport(vport);
4040 if (!shost)
4041 return NULL;
4042 lpfc_linkdown_port(vport);
4043 lpfc_cleanup_pending_mbox(vport);
4044 spin_lock_irq(shost->host_lock);
4045 vport->fc_flag |= FC_VPORT_CVL_RCVD;
4046 spin_unlock_irq(shost->host_lock);
4047
4048 return ndlp;
4049}
4050
4051/**
4052 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
4053 * @vport: pointer to lpfc hba data structure.
4054 *
4055 * This routine is to perform Clear Virtual Link (CVL) on all vports in
4056 * response to a FCF dead event.
4057 **/
4058static void
4059lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
4060{
4061 struct lpfc_vport **vports;
4062 int i;
4063
4064 vports = lpfc_create_vport_work_array(phba);
4065 if (vports)
4066 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
4067 lpfc_sli4_perform_vport_cvl(vports[i]);
4068 lpfc_destroy_vport_work_array(phba, vports);
4069}
4070
da0436e9 4071/**
76a95d75 4072 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
da0436e9
JS
4073 * @phba: pointer to lpfc hba data structure.
4074 * @acqe_link: pointer to the async fcoe completion queue entry.
4075 *
4076 * This routine is to handle the SLI4 asynchronous fcoe event.
4077 **/
4078static void
76a95d75 4079lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
70f3c073 4080 struct lpfc_acqe_fip *acqe_fip)
da0436e9 4081{
70f3c073 4082 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
da0436e9 4083 int rc;
6669f9bb
JS
4084 struct lpfc_vport *vport;
4085 struct lpfc_nodelist *ndlp;
4086 struct Scsi_Host *shost;
695a814e
JS
4087 int active_vlink_present;
4088 struct lpfc_vport **vports;
4089 int i;
da0436e9 4090
70f3c073
JS
4091 phba->fc_eventTag = acqe_fip->event_tag;
4092 phba->fcoe_eventtag = acqe_fip->event_tag;
da0436e9 4093 switch (event_type) {
70f3c073
JS
4094 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
4095 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
4096 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
999d813f
JS
4097 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4098 LOG_DISCOVERY,
a93ff37a
JS
4099 "2546 New FCF event, evt_tag:x%x, "
4100 "index:x%x\n",
70f3c073
JS
4101 acqe_fip->event_tag,
4102 acqe_fip->index);
999d813f
JS
4103 else
4104 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
4105 LOG_DISCOVERY,
a93ff37a
JS
4106 "2788 FCF param modified event, "
4107 "evt_tag:x%x, index:x%x\n",
70f3c073
JS
4108 acqe_fip->event_tag,
4109 acqe_fip->index);
38b92ef8 4110 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
0c9ab6f5
JS
4111 /*
4112 * During period of FCF discovery, read the FCF
4113 * table record indexed by the event to update
a93ff37a 4114 * FCF roundrobin failover eligible FCF bmask.
0c9ab6f5
JS
4115 */
4116 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
4117 LOG_DISCOVERY,
a93ff37a
JS
4118 "2779 Read FCF (x%x) for updating "
4119 "roundrobin FCF failover bmask\n",
70f3c073
JS
4120 acqe_fip->index);
4121 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
0c9ab6f5 4122 }
38b92ef8
JS
4123
4124 /* If the FCF discovery is in progress, do nothing. */
3804dc84 4125 spin_lock_irq(&phba->hbalock);
a93ff37a 4126 if (phba->hba_flag & FCF_TS_INPROG) {
38b92ef8
JS
4127 spin_unlock_irq(&phba->hbalock);
4128 break;
4129 }
4130 /* If fast FCF failover rescan event is pending, do nothing */
4131 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
4132 spin_unlock_irq(&phba->hbalock);
4133 break;
4134 }
4135
c2b9712e
JS
4136 /* If the FCF has been in discovered state, do nothing. */
4137 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3804dc84
JS
4138 spin_unlock_irq(&phba->hbalock);
4139 break;
4140 }
4141 spin_unlock_irq(&phba->hbalock);
38b92ef8 4142
0c9ab6f5
JS
4143 /* Otherwise, scan the entire FCF table and re-discover SAN */
4144 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
a93ff37a
JS
4145 "2770 Start FCF table scan per async FCF "
4146 "event, evt_tag:x%x, index:x%x\n",
70f3c073 4147 acqe_fip->event_tag, acqe_fip->index);
0c9ab6f5
JS
4148 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
4149 LPFC_FCOE_FCF_GET_FIRST);
da0436e9 4150 if (rc)
0c9ab6f5
JS
4151 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4152 "2547 Issue FCF scan read FCF mailbox "
a93ff37a 4153 "command failed (x%x)\n", rc);
da0436e9
JS
4154 break;
4155
70f3c073 4156 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
da0436e9 4157 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e4e74273 4158 "2548 FCF Table full count 0x%x tag 0x%x\n",
70f3c073
JS
4159 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
4160 acqe_fip->event_tag);
da0436e9
JS
4161 break;
4162
70f3c073 4163 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
80c17849 4164 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
0c9ab6f5 4165 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
a93ff37a 4166 "2549 FCF (x%x) disconnected from network, "
70f3c073 4167 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
38b92ef8
JS
4168 /*
4169 * If we are in the middle of FCF failover process, clear
4170 * the corresponding FCF bit in the roundrobin bitmap.
da0436e9 4171 */
fc2b989b 4172 spin_lock_irq(&phba->hbalock);
0c9ab6f5 4173 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
fc2b989b 4174 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 4175 /* Update FLOGI FCF failover eligible FCF bmask */
70f3c073 4176 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
fc2b989b
JS
4177 break;
4178 }
38b92ef8
JS
4179 spin_unlock_irq(&phba->hbalock);
4180
4181 /* If the event is not for currently used fcf do nothing */
70f3c073 4182 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
38b92ef8
JS
4183 break;
4184
4185 /*
4186 * Otherwise, request the port to rediscover the entire FCF
4187 * table for a fast recovery from case that the current FCF
4188 * is no longer valid as we are not in the middle of FCF
4189 * failover process already.
4190 */
c2b9712e
JS
4191 spin_lock_irq(&phba->hbalock);
4192 /* Mark the fast failover process in progress */
4193 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
4194 spin_unlock_irq(&phba->hbalock);
4195
4196 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4197 "2771 Start FCF fast failover process due to "
4198 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
4199 "\n", acqe_fip->event_tag, acqe_fip->index);
4200 rc = lpfc_sli4_redisc_fcf_table(phba);
4201 if (rc) {
4202 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4203 LOG_DISCOVERY,
4204 "2772 Issue FCF rediscover mabilbox "
4205 "command failed, fail through to FCF "
4206 "dead event\n");
4207 spin_lock_irq(&phba->hbalock);
4208 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
4209 spin_unlock_irq(&phba->hbalock);
4210 /*
4211 * Last resort will fail over by treating this
4212 * as a link down to FCF registration.
4213 */
4214 lpfc_sli4_fcf_dead_failthrough(phba);
4215 } else {
4216 /* Reset FCF roundrobin bmask for new discovery */
4217 lpfc_sli4_clear_fcf_rr_bmask(phba);
4218 /*
4219 * Handling fast FCF failover to a DEAD FCF event is
4220 * considered equalivant to receiving CVL to all vports.
4221 */
4222 lpfc_sli4_perform_all_vport_cvl(phba);
4223 }
da0436e9 4224 break;
70f3c073 4225 case LPFC_FIP_EVENT_TYPE_CVL:
80c17849 4226 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
0c9ab6f5 4227 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
6669f9bb 4228 "2718 Clear Virtual Link Received for VPI 0x%x"
70f3c073 4229 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6d368e53 4230
6669f9bb 4231 vport = lpfc_find_vport_by_vpid(phba,
5248a749 4232 acqe_fip->index);
fc2b989b 4233 ndlp = lpfc_sli4_perform_vport_cvl(vport);
6669f9bb
JS
4234 if (!ndlp)
4235 break;
695a814e
JS
4236 active_vlink_present = 0;
4237
4238 vports = lpfc_create_vport_work_array(phba);
4239 if (vports) {
4240 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
4241 i++) {
4242 if ((!(vports[i]->fc_flag &
4243 FC_VPORT_CVL_RCVD)) &&
4244 (vports[i]->port_state > LPFC_FDISC)) {
4245 active_vlink_present = 1;
4246 break;
4247 }
4248 }
4249 lpfc_destroy_vport_work_array(phba, vports);
4250 }
4251
4252 if (active_vlink_present) {
4253 /*
4254 * If there are other active VLinks present,
4255 * re-instantiate the Vlink using FDISC.
4256 */
256ec0d0
JS
4257 mod_timer(&ndlp->nlp_delayfunc,
4258 jiffies + msecs_to_jiffies(1000));
fc2b989b 4259 shost = lpfc_shost_from_vport(vport);
6669f9bb
JS
4260 spin_lock_irq(shost->host_lock);
4261 ndlp->nlp_flag |= NLP_DELAY_TMO;
4262 spin_unlock_irq(shost->host_lock);
695a814e
JS
4263 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
4264 vport->port_state = LPFC_FDISC;
4265 } else {
ecfd03c6
JS
4266 /*
4267 * Otherwise, we request port to rediscover
4268 * the entire FCF table for a fast recovery
4269 * from possible case that the current FCF
0c9ab6f5
JS
4270 * is no longer valid if we are not already
4271 * in the FCF failover process.
ecfd03c6 4272 */
fc2b989b 4273 spin_lock_irq(&phba->hbalock);
0c9ab6f5 4274 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
fc2b989b
JS
4275 spin_unlock_irq(&phba->hbalock);
4276 break;
4277 }
4278 /* Mark the fast failover process in progress */
0c9ab6f5 4279 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
fc2b989b 4280 spin_unlock_irq(&phba->hbalock);
0c9ab6f5
JS
4281 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
4282 LOG_DISCOVERY,
a93ff37a 4283 "2773 Start FCF failover per CVL, "
70f3c073 4284 "evt_tag:x%x\n", acqe_fip->event_tag);
ecfd03c6 4285 rc = lpfc_sli4_redisc_fcf_table(phba);
fc2b989b 4286 if (rc) {
0c9ab6f5
JS
4287 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4288 LOG_DISCOVERY,
4289 "2774 Issue FCF rediscover "
4290 "mabilbox command failed, "
4291 "through to CVL event\n");
fc2b989b 4292 spin_lock_irq(&phba->hbalock);
0c9ab6f5 4293 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
fc2b989b 4294 spin_unlock_irq(&phba->hbalock);
ecfd03c6
JS
4295 /*
4296 * Last resort will be re-try on the
4297 * the current registered FCF entry.
4298 */
4299 lpfc_retry_pport_discovery(phba);
38b92ef8
JS
4300 } else
4301 /*
4302 * Reset FCF roundrobin bmask for new
4303 * discovery.
4304 */
7d791df7 4305 lpfc_sli4_clear_fcf_rr_bmask(phba);
6669f9bb
JS
4306 }
4307 break;
da0436e9
JS
4308 default:
4309 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4310 "0288 Unknown FCoE event type 0x%x event tag "
70f3c073 4311 "0x%x\n", event_type, acqe_fip->event_tag);
da0436e9
JS
4312 break;
4313 }
4314}
4315
4316/**
4317 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
4318 * @phba: pointer to lpfc hba data structure.
4319 * @acqe_link: pointer to the async dcbx completion queue entry.
4320 *
4321 * This routine is to handle the SLI4 asynchronous dcbx event.
4322 **/
4323static void
4324lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
4325 struct lpfc_acqe_dcbx *acqe_dcbx)
4326{
4d9ab994 4327 phba->fc_eventTag = acqe_dcbx->event_tag;
da0436e9
JS
4328 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4329 "0290 The SLI4 DCBX asynchronous event is not "
4330 "handled yet\n");
4331}
4332
b19a061a
JS
4333/**
4334 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
4335 * @phba: pointer to lpfc hba data structure.
4336 * @acqe_link: pointer to the async grp5 completion queue entry.
4337 *
4338 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
4339 * is an asynchronous notified of a logical link speed change. The Port
4340 * reports the logical link speed in units of 10Mbps.
4341 **/
4342static void
4343lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
4344 struct lpfc_acqe_grp5 *acqe_grp5)
4345{
4346 uint16_t prev_ll_spd;
4347
4348 phba->fc_eventTag = acqe_grp5->event_tag;
4349 phba->fcoe_eventtag = acqe_grp5->event_tag;
4350 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
4351 phba->sli4_hba.link_state.logical_speed =
8b68cd52 4352 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
b19a061a
JS
4353 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4354 "2789 GRP5 Async Event: Updating logical link speed "
8b68cd52
JS
4355 "from %dMbps to %dMbps\n", prev_ll_spd,
4356 phba->sli4_hba.link_state.logical_speed);
b19a061a
JS
4357}
4358
da0436e9
JS
4359/**
4360 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
4361 * @phba: pointer to lpfc hba data structure.
4362 *
4363 * This routine is invoked by the worker thread to process all the pending
4364 * SLI4 asynchronous events.
4365 **/
4366void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
4367{
4368 struct lpfc_cq_event *cq_event;
4369
4370 /* First, declare the async event has been handled */
4371 spin_lock_irq(&phba->hbalock);
4372 phba->hba_flag &= ~ASYNC_EVENT;
4373 spin_unlock_irq(&phba->hbalock);
4374 /* Now, handle all the async events */
4375 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
4376 /* Get the first event from the head of the event queue */
4377 spin_lock_irq(&phba->hbalock);
4378 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
4379 cq_event, struct lpfc_cq_event, list);
4380 spin_unlock_irq(&phba->hbalock);
4381 /* Process the asynchronous event */
4382 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
4383 case LPFC_TRAILER_CODE_LINK:
4384 lpfc_sli4_async_link_evt(phba,
4385 &cq_event->cqe.acqe_link);
4386 break;
4387 case LPFC_TRAILER_CODE_FCOE:
70f3c073 4388 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
da0436e9
JS
4389 break;
4390 case LPFC_TRAILER_CODE_DCBX:
4391 lpfc_sli4_async_dcbx_evt(phba,
4392 &cq_event->cqe.acqe_dcbx);
4393 break;
b19a061a
JS
4394 case LPFC_TRAILER_CODE_GRP5:
4395 lpfc_sli4_async_grp5_evt(phba,
4396 &cq_event->cqe.acqe_grp5);
4397 break;
70f3c073
JS
4398 case LPFC_TRAILER_CODE_FC:
4399 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
4400 break;
4401 case LPFC_TRAILER_CODE_SLI:
4402 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
4403 break;
da0436e9
JS
4404 default:
4405 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4406 "1804 Invalid asynchrous event code: "
4407 "x%x\n", bf_get(lpfc_trailer_code,
4408 &cq_event->cqe.mcqe_cmpl));
4409 break;
4410 }
4411 /* Free the completion event processed to the free pool */
4412 lpfc_sli4_cq_event_release(phba, cq_event);
4413 }
4414}
4415
ecfd03c6
JS
4416/**
4417 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
4418 * @phba: pointer to lpfc hba data structure.
4419 *
4420 * This routine is invoked by the worker thread to process FCF table
4421 * rediscovery pending completion event.
4422 **/
4423void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
4424{
4425 int rc;
4426
4427 spin_lock_irq(&phba->hbalock);
4428 /* Clear FCF rediscovery timeout event */
4429 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
4430 /* Clear driver fast failover FCF record flag */
4431 phba->fcf.failover_rec.flag = 0;
4432 /* Set state for FCF fast failover */
4433 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
4434 spin_unlock_irq(&phba->hbalock);
4435
4436 /* Scan FCF table from the first entry to re-discover SAN */
0c9ab6f5 4437 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
a93ff37a 4438 "2777 Start post-quiescent FCF table scan\n");
0c9ab6f5 4439 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
ecfd03c6 4440 if (rc)
0c9ab6f5
JS
4441 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4442 "2747 Issue FCF scan read FCF mailbox "
4443 "command failed 0x%x\n", rc);
ecfd03c6
JS
4444}
4445
da0436e9
JS
4446/**
4447 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
4448 * @phba: pointer to lpfc hba data structure.
4449 * @dev_grp: The HBA PCI-Device group number.
4450 *
4451 * This routine is invoked to set up the per HBA PCI-Device group function
4452 * API jump table entries.
4453 *
4454 * Return: 0 if success, otherwise -ENODEV
4455 **/
4456int
4457lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4458{
4459 int rc;
4460
4461 /* Set up lpfc PCI-device group */
4462 phba->pci_dev_grp = dev_grp;
4463
4464 /* The LPFC_PCI_DEV_OC uses SLI4 */
4465 if (dev_grp == LPFC_PCI_DEV_OC)
4466 phba->sli_rev = LPFC_SLI_REV4;
4467
4468 /* Set up device INIT API function jump table */
4469 rc = lpfc_init_api_table_setup(phba, dev_grp);
4470 if (rc)
4471 return -ENODEV;
4472 /* Set up SCSI API function jump table */
4473 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
4474 if (rc)
4475 return -ENODEV;
4476 /* Set up SLI API function jump table */
4477 rc = lpfc_sli_api_table_setup(phba, dev_grp);
4478 if (rc)
4479 return -ENODEV;
4480 /* Set up MBOX API function jump table */
4481 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
4482 if (rc)
4483 return -ENODEV;
4484
4485 return 0;
5b75da2f
JS
4486}
4487
4488/**
3621a710 4489 * lpfc_log_intr_mode - Log the active interrupt mode
5b75da2f
JS
4490 * @phba: pointer to lpfc hba data structure.
4491 * @intr_mode: active interrupt mode adopted.
4492 *
4493 * This routine it invoked to log the currently used active interrupt mode
4494 * to the device.
3772a991
JS
4495 **/
4496static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
5b75da2f
JS
4497{
4498 switch (intr_mode) {
4499 case 0:
4500 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4501 "0470 Enable INTx interrupt mode.\n");
4502 break;
4503 case 1:
4504 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4505 "0481 Enabled MSI interrupt mode.\n");
4506 break;
4507 case 2:
4508 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4509 "0480 Enabled MSI-X interrupt mode.\n");
4510 break;
4511 default:
4512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4513 "0482 Illegal interrupt mode.\n");
4514 break;
4515 }
4516 return;
4517}
4518
5b75da2f 4519/**
3772a991 4520 * lpfc_enable_pci_dev - Enable a generic PCI device.
5b75da2f
JS
4521 * @phba: pointer to lpfc hba data structure.
4522 *
3772a991
JS
4523 * This routine is invoked to enable the PCI device that is common to all
4524 * PCI devices.
5b75da2f
JS
4525 *
4526 * Return codes
af901ca1 4527 * 0 - successful
3772a991 4528 * other values - error
5b75da2f 4529 **/
3772a991
JS
4530static int
4531lpfc_enable_pci_dev(struct lpfc_hba *phba)
5b75da2f 4532{
3772a991 4533 struct pci_dev *pdev;
079b5c91 4534 int bars = 0;
5b75da2f 4535
3772a991
JS
4536 /* Obtain PCI device reference */
4537 if (!phba->pcidev)
4538 goto out_error;
4539 else
4540 pdev = phba->pcidev;
4541 /* Select PCI BARs */
4542 bars = pci_select_bars(pdev, IORESOURCE_MEM);
4543 /* Enable PCI device */
4544 if (pci_enable_device_mem(pdev))
4545 goto out_error;
4546 /* Request PCI resource for the device */
4547 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
4548 goto out_disable_device;
4549 /* Set up device as PCI master and save state for EEH */
4550 pci_set_master(pdev);
4551 pci_try_set_mwi(pdev);
4552 pci_save_state(pdev);
5b75da2f 4553
0558056c 4554 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
453193e0 4555 if (pci_is_pcie(pdev))
0558056c
JS
4556 pdev->needs_freset = 1;
4557
3772a991 4558 return 0;
5b75da2f 4559
3772a991
JS
4560out_disable_device:
4561 pci_disable_device(pdev);
4562out_error:
079b5c91
JS
4563 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4564 "1401 Failed to enable pci device, bars:x%x\n", bars);
3772a991 4565 return -ENODEV;
5b75da2f
JS
4566}
4567
4568/**
3772a991 4569 * lpfc_disable_pci_dev - Disable a generic PCI device.
5b75da2f
JS
4570 * @phba: pointer to lpfc hba data structure.
4571 *
3772a991
JS
4572 * This routine is invoked to disable the PCI device that is common to all
4573 * PCI devices.
5b75da2f
JS
4574 **/
4575static void
3772a991 4576lpfc_disable_pci_dev(struct lpfc_hba *phba)
5b75da2f 4577{
3772a991
JS
4578 struct pci_dev *pdev;
4579 int bars;
5b75da2f 4580
3772a991
JS
4581 /* Obtain PCI device reference */
4582 if (!phba->pcidev)
4583 return;
4584 else
4585 pdev = phba->pcidev;
4586 /* Select PCI BARs */
4587 bars = pci_select_bars(pdev, IORESOURCE_MEM);
4588 /* Release PCI resource and disable PCI device */
4589 pci_release_selected_regions(pdev, bars);
4590 pci_disable_device(pdev);
5b75da2f
JS
4591
4592 return;
4593}
4594
e59058c4 4595/**
3772a991
JS
4596 * lpfc_reset_hba - Reset a hba
4597 * @phba: pointer to lpfc hba data structure.
e59058c4 4598 *
3772a991
JS
4599 * This routine is invoked to reset a hba device. It brings the HBA
4600 * offline, performs a board restart, and then brings the board back
4601 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
4602 * on outstanding mailbox commands.
e59058c4 4603 **/
3772a991
JS
4604void
4605lpfc_reset_hba(struct lpfc_hba *phba)
dea3101e 4606{
3772a991
JS
4607 /* If resets are disabled then set error state and return. */
4608 if (!phba->cfg_enable_hba_reset) {
4609 phba->link_state = LPFC_HBA_ERROR;
4610 return;
4611 }
618a5230 4612 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
3772a991
JS
4613 lpfc_offline(phba);
4614 lpfc_sli_brdrestart(phba);
4615 lpfc_online(phba);
4616 lpfc_unblock_mgmt_io(phba);
4617}
dea3101e 4618
0a96e975
JS
4619/**
4620 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
4621 * @phba: pointer to lpfc hba data structure.
4622 *
4623 * This function enables the PCI SR-IOV virtual functions to a physical
4624 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4625 * enable the number of virtual functions to the physical function. As
4626 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4627 * API call does not considered as an error condition for most of the device.
4628 **/
4629uint16_t
4630lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
4631{
4632 struct pci_dev *pdev = phba->pcidev;
4633 uint16_t nr_virtfn;
4634 int pos;
4635
0a96e975
JS
4636 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4637 if (pos == 0)
4638 return 0;
4639
4640 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
4641 return nr_virtfn;
4642}
4643
912e3acd
JS
4644/**
4645 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4646 * @phba: pointer to lpfc hba data structure.
4647 * @nr_vfn: number of virtual functions to be enabled.
4648 *
4649 * This function enables the PCI SR-IOV virtual functions to a physical
4650 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4651 * enable the number of virtual functions to the physical function. As
4652 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4653 * API call does not considered as an error condition for most of the device.
4654 **/
4655int
4656lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4657{
4658 struct pci_dev *pdev = phba->pcidev;
0a96e975 4659 uint16_t max_nr_vfn;
912e3acd
JS
4660 int rc;
4661
0a96e975
JS
4662 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
4663 if (nr_vfn > max_nr_vfn) {
4664 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4665 "3057 Requested vfs (%d) greater than "
4666 "supported vfs (%d)", nr_vfn, max_nr_vfn);
4667 return -EINVAL;
4668 }
4669
912e3acd
JS
4670 rc = pci_enable_sriov(pdev, nr_vfn);
4671 if (rc) {
4672 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4673 "2806 Failed to enable sriov on this device "
4674 "with vfn number nr_vf:%d, rc:%d\n",
4675 nr_vfn, rc);
4676 } else
4677 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4678 "2807 Successful enable sriov on this device "
4679 "with vfn number nr_vf:%d\n", nr_vfn);
4680 return rc;
4681}
4682
3772a991
JS
4683/**
4684 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4685 * @phba: pointer to lpfc hba data structure.
4686 *
4687 * This routine is invoked to set up the driver internal resources specific to
4688 * support the SLI-3 HBA device it attached to.
4689 *
4690 * Return codes
af901ca1 4691 * 0 - successful
3772a991
JS
4692 * other values - error
4693 **/
4694static int
4695lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4696{
4697 struct lpfc_sli *psli;
912e3acd 4698 int rc;
dea3101e 4699
2e0fef85 4700 /*
3772a991 4701 * Initialize timers used by driver
2e0fef85 4702 */
dea3101e 4703
3772a991 4704 /* Heartbeat timer */
858c9f6c
JS
4705 init_timer(&phba->hb_tmofunc);
4706 phba->hb_tmofunc.function = lpfc_hb_timeout;
4707 phba->hb_tmofunc.data = (unsigned long)phba;
4708
dea3101e 4709 psli = &phba->sli;
3772a991 4710 /* MBOX heartbeat timer */
dea3101e 4711 init_timer(&psli->mbox_tmo);
4712 psli->mbox_tmo.function = lpfc_mbox_timeout;
2e0fef85 4713 psli->mbox_tmo.data = (unsigned long) phba;
3772a991 4714 /* FCP polling mode timer */
875fbdfe
JSEC
4715 init_timer(&phba->fcp_poll_timer);
4716 phba->fcp_poll_timer.function = lpfc_poll_timeout;
2e0fef85 4717 phba->fcp_poll_timer.data = (unsigned long) phba;
3772a991 4718 /* Fabric block timer */
92d7f7b0
JS
4719 init_timer(&phba->fabric_block_timer);
4720 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4721 phba->fabric_block_timer.data = (unsigned long) phba;
3772a991 4722 /* EA polling mode timer */
9399627f
JS
4723 init_timer(&phba->eratt_poll);
4724 phba->eratt_poll.function = lpfc_poll_eratt;
4725 phba->eratt_poll.data = (unsigned long) phba;
dea3101e 4726
3772a991
JS
4727 /* Host attention work mask setup */
4728 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
4729 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
dea3101e 4730
3772a991
JS
4731 /* Get all the module params for configuring this host */
4732 lpfc_get_cfgparam(phba);
49198b37
JS
4733 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
4734 phba->menlo_flag |= HBA_MENLO_SUPPORT;
4735 /* check for menlo minimum sg count */
4736 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
4737 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
4738 }
4739
2a76a283
JS
4740 if (!phba->sli.ring)
4741 phba->sli.ring = (struct lpfc_sli_ring *)
4742 kzalloc(LPFC_SLI3_MAX_RING *
4743 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4744 if (!phba->sli.ring)
4745 return -ENOMEM;
4746
dea3101e 4747 /*
96f7077f 4748 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
3772a991 4749 * used to create the sg_dma_buf_pool must be dynamically calculated.
dea3101e 4750 */
3772a991 4751
96f7077f
JS
4752 /* Initialize the host templates the configured values. */
4753 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4754 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4755
4756 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
3772a991 4757 if (phba->cfg_enable_bg) {
96f7077f
JS
4758 /*
4759 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
4760 * the FCP rsp, and a BDE for each. Sice we have no control
4761 * over how many protection data segments the SCSI Layer
4762 * will hand us (ie: there could be one for every block
4763 * in the IO), we just allocate enough BDEs to accomidate
4764 * our max amount and we need to limit lpfc_sg_seg_cnt to
4765 * minimize the risk of running out.
4766 */
4767 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4768 sizeof(struct fcp_rsp) +
4769 (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
4770
4771 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
4772 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
4773
4774 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
4775 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4776 } else {
4777 /*
4778 * The scsi_buf for a regular I/O will hold the FCP cmnd,
4779 * the FCP rsp, a BDE for each, and a BDE for up to
4780 * cfg_sg_seg_cnt data segments.
4781 */
4782 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4783 sizeof(struct fcp_rsp) +
4784 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4785
4786 /* Total BDEs in BPL for scsi_sg_list */
4787 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
901a920f 4788 }
dea3101e 4789
96f7077f
JS
4790 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4791 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
4792 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
4793 phba->cfg_total_seg_cnt);
dea3101e 4794
3772a991
JS
4795 phba->max_vpi = LPFC_MAX_VPI;
4796 /* This will be set to correct value after config_port mbox */
4797 phba->max_vports = 0;
dea3101e 4798
3772a991
JS
4799 /*
4800 * Initialize the SLI Layer to run with lpfc HBAs.
4801 */
4802 lpfc_sli_setup(phba);
4803 lpfc_sli_queue_setup(phba);
ed957684 4804
3772a991
JS
4805 /* Allocate device driver memory */
4806 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4807 return -ENOMEM;
51ef4c26 4808
912e3acd
JS
4809 /*
4810 * Enable sr-iov virtual functions if supported and configured
4811 * through the module parameter.
4812 */
4813 if (phba->cfg_sriov_nr_virtfn > 0) {
4814 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4815 phba->cfg_sriov_nr_virtfn);
4816 if (rc) {
4817 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4818 "2808 Requested number of SR-IOV "
4819 "virtual functions (%d) is not "
4820 "supported\n",
4821 phba->cfg_sriov_nr_virtfn);
4822 phba->cfg_sriov_nr_virtfn = 0;
4823 }
4824 }
4825
3772a991
JS
4826 return 0;
4827}
ed957684 4828
3772a991
JS
4829/**
4830 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
4831 * @phba: pointer to lpfc hba data structure.
4832 *
4833 * This routine is invoked to unset the driver internal resources set up
4834 * specific for supporting the SLI-3 HBA device it attached to.
4835 **/
4836static void
4837lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
4838{
4839 /* Free device driver memory allocated */
4840 lpfc_mem_free_all(phba);
3163f725 4841
3772a991
JS
4842 return;
4843}
dea3101e 4844
3772a991 4845/**
da0436e9 4846 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3772a991
JS
4847 * @phba: pointer to lpfc hba data structure.
4848 *
da0436e9
JS
4849 * This routine is invoked to set up the driver internal resources specific to
4850 * support the SLI-4 HBA device it attached to.
3772a991
JS
4851 *
4852 * Return codes
af901ca1 4853 * 0 - successful
da0436e9 4854 * other values - error
3772a991
JS
4855 **/
4856static int
da0436e9 4857lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3772a991 4858{
7bb03bbf 4859 struct lpfc_vector_map_info *cpup;
da0436e9 4860 struct lpfc_sli *psli;
28baac74 4861 LPFC_MBOXQ_t *mboxq;
96f7077f 4862 int rc, i, hbq_count, max_buf_size;
28baac74
JS
4863 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4864 struct lpfc_mqe *mqe;
09294d46 4865 int longs;
1ba981fd 4866 int fof_vectors = 0;
da0436e9 4867
716d3bc5
JS
4868 /* Get all the module params for configuring this host */
4869 lpfc_get_cfgparam(phba);
4870
da0436e9
JS
4871 /* Before proceed, wait for POST done and device ready */
4872 rc = lpfc_sli4_post_status_check(phba);
4873 if (rc)
4874 return -ENODEV;
4875
3772a991 4876 /*
da0436e9 4877 * Initialize timers used by driver
3772a991 4878 */
3772a991 4879
da0436e9
JS
4880 /* Heartbeat timer */
4881 init_timer(&phba->hb_tmofunc);
4882 phba->hb_tmofunc.function = lpfc_hb_timeout;
4883 phba->hb_tmofunc.data = (unsigned long)phba;
19ca7609
JS
4884 init_timer(&phba->rrq_tmr);
4885 phba->rrq_tmr.function = lpfc_rrq_timeout;
4886 phba->rrq_tmr.data = (unsigned long)phba;
3772a991 4887
da0436e9
JS
4888 psli = &phba->sli;
4889 /* MBOX heartbeat timer */
4890 init_timer(&psli->mbox_tmo);
4891 psli->mbox_tmo.function = lpfc_mbox_timeout;
4892 psli->mbox_tmo.data = (unsigned long) phba;
4893 /* Fabric block timer */
4894 init_timer(&phba->fabric_block_timer);
4895 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4896 phba->fabric_block_timer.data = (unsigned long) phba;
4897 /* EA polling mode timer */
4898 init_timer(&phba->eratt_poll);
4899 phba->eratt_poll.function = lpfc_poll_eratt;
4900 phba->eratt_poll.data = (unsigned long) phba;
ecfd03c6
JS
4901 /* FCF rediscover timer */
4902 init_timer(&phba->fcf.redisc_wait);
4903 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
4904 phba->fcf.redisc_wait.data = (unsigned long)phba;
4905
7ad20aa9
JS
4906 /*
4907 * Control structure for handling external multi-buffer mailbox
4908 * command pass-through.
4909 */
4910 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
4911 sizeof(struct lpfc_mbox_ext_buf_ctx));
4912 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4913
da0436e9 4914 phba->max_vpi = LPFC_MAX_VPI;
67d12733 4915
da0436e9
JS
4916 /* This will be set to correct value after the read_config mbox */
4917 phba->max_vports = 0;
3772a991 4918
da0436e9
JS
4919 /* Program the default value of vlan_id and fc_map */
4920 phba->valid_vlan = 0;
4921 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4922 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4923 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3772a991 4924
2a76a283
JS
4925 /*
4926 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
4927 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
4928 */
4929 if (!phba->sli.ring)
4930 phba->sli.ring = kzalloc(
67d12733 4931 (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) *
2a76a283
JS
4932 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4933 if (!phba->sli.ring)
4934 return -ENOMEM;
09294d46 4935
da0436e9 4936 /*
09294d46
JS
4937 * It doesn't matter what family our adapter is in, we are
4938 * limited to 2 Pages, 512 SGEs, for our SGL.
4939 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
4940 */
4941 max_buf_size = (2 * SLI4_PAGE_SIZE);
4942 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
4943 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
09294d46 4944
da0436e9 4945 /*
96f7077f 4946 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
da0436e9 4947 * used to create the sg_dma_buf_pool must be dynamically calculated.
da0436e9 4948 */
96f7077f
JS
4949
4950 if (phba->cfg_enable_bg) {
4951 /*
4952 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
4953 * the FCP rsp, and a SGE for each. Sice we have no control
4954 * over how many protection data segments the SCSI Layer
4955 * will hand us (ie: there could be one for every block
4956 * in the IO), we just allocate enough SGEs to accomidate
4957 * our max amount and we need to limit lpfc_sg_seg_cnt to
4958 * minimize the risk of running out.
4959 */
4960 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4961 sizeof(struct fcp_rsp) + max_buf_size;
4962
4963 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
4964 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
4965
4966 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
4967 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
4968 } else {
4969 /*
4970 * The scsi_buf for a regular I/O will hold the FCP cmnd,
4971 * the FCP rsp, a SGE for each, and a SGE for up to
4972 * cfg_sg_seg_cnt data segments.
4973 */
4974 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4975 sizeof(struct fcp_rsp) +
4976 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
4977
4978 /* Total SGEs for scsi_sg_list */
4979 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
4980 /*
4981 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need
4982 * to post 1 page for the SGL.
4983 */
085c647c 4984 }
acd6859b 4985
96f7077f
JS
4986 /* Initialize the host templates with the updated values. */
4987 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4988 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4989
4990 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
4991 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
4992 else
4993 phba->cfg_sg_dma_buf_size =
4994 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
4995
4996 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4997 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
4998 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
4999 phba->cfg_total_seg_cnt);
3772a991 5000
da0436e9
JS
5001 /* Initialize buffer queue management fields */
5002 hbq_count = lpfc_sli_hbq_count();
5003 for (i = 0; i < hbq_count; ++i)
5004 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5005 INIT_LIST_HEAD(&phba->rb_pend_list);
5006 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
5007 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3772a991 5008
da0436e9
JS
5009 /*
5010 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
5011 */
5012 /* Initialize the Abort scsi buffer list used by driver */
5013 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
5014 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
5015 /* This abort list used by worker thread */
5016 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
3772a991 5017
da0436e9 5018 /*
6d368e53 5019 * Initialize driver internal slow-path work queues
da0436e9 5020 */
3772a991 5021
da0436e9
JS
5022 /* Driver internel slow-path CQ Event pool */
5023 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
5024 /* Response IOCB work queue list */
45ed1190 5025 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
da0436e9
JS
5026 /* Asynchronous event CQ Event work queue list */
5027 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
5028 /* Fast-path XRI aborted CQ Event work queue list */
5029 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
5030 /* Slow-path XRI aborted CQ Event work queue list */
5031 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
5032 /* Receive queue CQ Event work queue list */
5033 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
5034
6d368e53
JS
5035 /* Initialize extent block lists. */
5036 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
5037 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
5038 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
5039 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
5040
da0436e9
JS
5041 /* Initialize the driver internal SLI layer lists. */
5042 lpfc_sli_setup(phba);
5043 lpfc_sli_queue_setup(phba);
3772a991 5044
da0436e9
JS
5045 /* Allocate device driver memory */
5046 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
5047 if (rc)
5048 return -ENOMEM;
5049
2fcee4bf
JS
5050 /* IF Type 2 ports get initialized now. */
5051 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
5052 LPFC_SLI_INTF_IF_TYPE_2) {
5053 rc = lpfc_pci_function_reset(phba);
5054 if (unlikely(rc))
5055 return -ENODEV;
5056 }
5057
da0436e9
JS
5058 /* Create the bootstrap mailbox command */
5059 rc = lpfc_create_bootstrap_mbox(phba);
5060 if (unlikely(rc))
5061 goto out_free_mem;
5062
5063 /* Set up the host's endian order with the device. */
5064 rc = lpfc_setup_endian_order(phba);
5065 if (unlikely(rc))
5066 goto out_free_bsmbx;
5067
5068 /* Set up the hba's configuration parameters. */
5069 rc = lpfc_sli4_read_config(phba);
cff261f6
JS
5070 if (unlikely(rc))
5071 goto out_free_bsmbx;
5072 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
da0436e9
JS
5073 if (unlikely(rc))
5074 goto out_free_bsmbx;
5075
2fcee4bf
JS
5076 /* IF Type 0 ports get initialized now. */
5077 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
5078 LPFC_SLI_INTF_IF_TYPE_0) {
5079 rc = lpfc_pci_function_reset(phba);
5080 if (unlikely(rc))
5081 goto out_free_bsmbx;
5082 }
da0436e9 5083
cb5172ea
JS
5084 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
5085 GFP_KERNEL);
5086 if (!mboxq) {
5087 rc = -ENOMEM;
5088 goto out_free_bsmbx;
5089 }
5090
fedd3b7b 5091 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
cb5172ea
JS
5092 lpfc_supported_pages(mboxq);
5093 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
fedd3b7b
JS
5094 if (!rc) {
5095 mqe = &mboxq->u.mqe;
5096 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
5097 LPFC_MAX_SUPPORTED_PAGES);
5098 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
5099 switch (pn_page[i]) {
5100 case LPFC_SLI4_PARAMETERS:
5101 phba->sli4_hba.pc_sli4_params.supported = 1;
5102 break;
5103 default:
5104 break;
5105 }
5106 }
5107 /* Read the port's SLI4 Parameters capabilities if supported. */
5108 if (phba->sli4_hba.pc_sli4_params.supported)
5109 rc = lpfc_pc_sli4_params_get(phba, mboxq);
5110 if (rc) {
5111 mempool_free(mboxq, phba->mbox_mem_pool);
5112 rc = -EIO;
5113 goto out_free_bsmbx;
cb5172ea
JS
5114 }
5115 }
fedd3b7b
JS
5116 /*
5117 * Get sli4 parameters that override parameters from Port capabilities.
6d368e53
JS
5118 * If this call fails, it isn't critical unless the SLI4 parameters come
5119 * back in conflict.
fedd3b7b 5120 */
6d368e53
JS
5121 rc = lpfc_get_sli4_parameters(phba, mboxq);
5122 if (rc) {
5123 if (phba->sli4_hba.extents_in_use &&
5124 phba->sli4_hba.rpi_hdrs_in_use) {
5125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5126 "2999 Unsupported SLI4 Parameters "
5127 "Extents and RPI headers enabled.\n");
5128 goto out_free_bsmbx;
5129 }
5130 }
cb5172ea 5131 mempool_free(mboxq, phba->mbox_mem_pool);
1ba981fd
JS
5132
5133 /* Verify OAS is supported */
5134 lpfc_sli4_oas_verify(phba);
5135 if (phba->cfg_fof)
5136 fof_vectors = 1;
5137
5350d872
JS
5138 /* Verify all the SLI4 queues */
5139 rc = lpfc_sli4_queue_verify(phba);
da0436e9
JS
5140 if (rc)
5141 goto out_free_bsmbx;
5142
5143 /* Create driver internal CQE event pool */
5144 rc = lpfc_sli4_cq_event_pool_create(phba);
5145 if (rc)
5350d872 5146 goto out_free_bsmbx;
da0436e9 5147
8a9d2e80
JS
5148 /* Initialize sgl lists per host */
5149 lpfc_init_sgl_list(phba);
5150
5151 /* Allocate and initialize active sgl array */
da0436e9
JS
5152 rc = lpfc_init_active_sgl_array(phba);
5153 if (rc) {
5154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5155 "1430 Failed to initialize sgl list.\n");
8a9d2e80 5156 goto out_destroy_cq_event_pool;
da0436e9 5157 }
da0436e9
JS
5158 rc = lpfc_sli4_init_rpi_hdrs(phba);
5159 if (rc) {
5160 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5161 "1432 Failed to initialize rpi headers.\n");
5162 goto out_free_active_sgl;
5163 }
5164
a93ff37a 5165 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
0c9ab6f5
JS
5166 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
5167 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
5168 GFP_KERNEL);
5169 if (!phba->fcf.fcf_rr_bmask) {
5170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5171 "2759 Failed allocate memory for FCF round "
5172 "robin failover bmask\n");
0558056c 5173 rc = -ENOMEM;
0c9ab6f5
JS
5174 goto out_remove_rpi_hdrs;
5175 }
5176
67d12733
JS
5177 phba->sli4_hba.fcp_eq_hdl =
5178 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
1ba981fd
JS
5179 (fof_vectors + phba->cfg_fcp_io_channel)),
5180 GFP_KERNEL);
67d12733
JS
5181 if (!phba->sli4_hba.fcp_eq_hdl) {
5182 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5183 "2572 Failed allocate memory for "
5184 "fast-path per-EQ handle array\n");
5185 rc = -ENOMEM;
5186 goto out_free_fcf_rr_bmask;
da0436e9
JS
5187 }
5188
5189 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
1ba981fd
JS
5190 (fof_vectors +
5191 phba->cfg_fcp_io_channel)), GFP_KERNEL);
da0436e9
JS
5192 if (!phba->sli4_hba.msix_entries) {
5193 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5194 "2573 Failed allocate memory for msi-x "
5195 "interrupt vector entries\n");
0558056c 5196 rc = -ENOMEM;
da0436e9
JS
5197 goto out_free_fcp_eq_hdl;
5198 }
5199
7bb03bbf
JS
5200 phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) *
5201 phba->sli4_hba.num_present_cpu),
5202 GFP_KERNEL);
5203 if (!phba->sli4_hba.cpu_map) {
5204 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5205 "3327 Failed allocate memory for msi-x "
5206 "interrupt vector mapping\n");
5207 rc = -ENOMEM;
5208 goto out_free_msix;
5209 }
b246de17
JS
5210 if (lpfc_used_cpu == NULL) {
5211 lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu),
5212 GFP_KERNEL);
5213 if (!lpfc_used_cpu) {
5214 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5215 "3335 Failed allocate memory for msi-x "
5216 "interrupt vector mapping\n");
5217 kfree(phba->sli4_hba.cpu_map);
5218 rc = -ENOMEM;
5219 goto out_free_msix;
5220 }
5221 for (i = 0; i < lpfc_present_cpu; i++)
5222 lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
5223 }
5224
7bb03bbf
JS
5225 /* Initialize io channels for round robin */
5226 cpup = phba->sli4_hba.cpu_map;
5227 rc = 0;
5228 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
5229 cpup->channel_id = rc;
5230 rc++;
5231 if (rc >= phba->cfg_fcp_io_channel)
5232 rc = 0;
5233 }
5234
912e3acd
JS
5235 /*
5236 * Enable sr-iov virtual functions if supported and configured
5237 * through the module parameter.
5238 */
5239 if (phba->cfg_sriov_nr_virtfn > 0) {
5240 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
5241 phba->cfg_sriov_nr_virtfn);
5242 if (rc) {
5243 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5244 "3020 Requested number of SR-IOV "
5245 "virtual functions (%d) is not "
5246 "supported\n",
5247 phba->cfg_sriov_nr_virtfn);
5248 phba->cfg_sriov_nr_virtfn = 0;
5249 }
5250 }
5251
5248a749 5252 return 0;
da0436e9 5253
7bb03bbf
JS
5254out_free_msix:
5255 kfree(phba->sli4_hba.msix_entries);
da0436e9
JS
5256out_free_fcp_eq_hdl:
5257 kfree(phba->sli4_hba.fcp_eq_hdl);
0c9ab6f5
JS
5258out_free_fcf_rr_bmask:
5259 kfree(phba->fcf.fcf_rr_bmask);
da0436e9
JS
5260out_remove_rpi_hdrs:
5261 lpfc_sli4_remove_rpi_hdrs(phba);
5262out_free_active_sgl:
5263 lpfc_free_active_sgl(phba);
da0436e9
JS
5264out_destroy_cq_event_pool:
5265 lpfc_sli4_cq_event_pool_destroy(phba);
da0436e9
JS
5266out_free_bsmbx:
5267 lpfc_destroy_bootstrap_mbox(phba);
5268out_free_mem:
5269 lpfc_mem_free(phba);
5270 return rc;
5271}
5272
5273/**
5274 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
5275 * @phba: pointer to lpfc hba data structure.
5276 *
5277 * This routine is invoked to unset the driver internal resources set up
5278 * specific for supporting the SLI-4 HBA device it attached to.
5279 **/
5280static void
5281lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
5282{
5283 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
5284
7bb03bbf
JS
5285 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
5286 kfree(phba->sli4_hba.cpu_map);
5287 phba->sli4_hba.num_present_cpu = 0;
5288 phba->sli4_hba.num_online_cpu = 0;
76fd07a6 5289 phba->sli4_hba.curr_disp_cpu = 0;
7bb03bbf 5290
da0436e9
JS
5291 /* Free memory allocated for msi-x interrupt vector entries */
5292 kfree(phba->sli4_hba.msix_entries);
5293
5294 /* Free memory allocated for fast-path work queue handles */
5295 kfree(phba->sli4_hba.fcp_eq_hdl);
5296
5297 /* Free the allocated rpi headers. */
5298 lpfc_sli4_remove_rpi_hdrs(phba);
d11e31dd 5299 lpfc_sli4_remove_rpis(phba);
da0436e9 5300
0c9ab6f5
JS
5301 /* Free eligible FCF index bmask */
5302 kfree(phba->fcf.fcf_rr_bmask);
5303
da0436e9
JS
5304 /* Free the ELS sgl list */
5305 lpfc_free_active_sgl(phba);
8a9d2e80 5306 lpfc_free_els_sgl_list(phba);
da0436e9 5307
da0436e9
JS
5308 /* Free the completion queue EQ event pool */
5309 lpfc_sli4_cq_event_release_all(phba);
5310 lpfc_sli4_cq_event_pool_destroy(phba);
5311
6d368e53
JS
5312 /* Release resource identifiers. */
5313 lpfc_sli4_dealloc_resource_identifiers(phba);
5314
da0436e9
JS
5315 /* Free the bsmbx region. */
5316 lpfc_destroy_bootstrap_mbox(phba);
5317
5318 /* Free the SLI Layer memory with SLI4 HBAs */
5319 lpfc_mem_free_all(phba);
5320
5321 /* Free the current connect table */
5322 list_for_each_entry_safe(conn_entry, next_conn_entry,
4d9ab994
JS
5323 &phba->fcf_conn_rec_list, list) {
5324 list_del_init(&conn_entry->list);
da0436e9 5325 kfree(conn_entry);
4d9ab994 5326 }
da0436e9
JS
5327
5328 return;
5329}
5330
5331/**
25985edc 5332 * lpfc_init_api_table_setup - Set up init api function jump table
da0436e9
JS
5333 * @phba: The hba struct for which this call is being executed.
5334 * @dev_grp: The HBA PCI-Device group number.
5335 *
5336 * This routine sets up the device INIT interface API function jump table
5337 * in @phba struct.
5338 *
5339 * Returns: 0 - success, -ENODEV - failure.
5340 **/
5341int
5342lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5343{
84d1b006
JS
5344 phba->lpfc_hba_init_link = lpfc_hba_init_link;
5345 phba->lpfc_hba_down_link = lpfc_hba_down_link;
7f86059a 5346 phba->lpfc_selective_reset = lpfc_selective_reset;
da0436e9
JS
5347 switch (dev_grp) {
5348 case LPFC_PCI_DEV_LP:
5349 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
5350 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
5351 phba->lpfc_stop_port = lpfc_stop_port_s3;
5352 break;
5353 case LPFC_PCI_DEV_OC:
5354 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
5355 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
5356 phba->lpfc_stop_port = lpfc_stop_port_s4;
5357 break;
5358 default:
5359 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5360 "1431 Invalid HBA PCI-device group: 0x%x\n",
5361 dev_grp);
5362 return -ENODEV;
5363 break;
5364 }
5365 return 0;
5366}
5367
5368/**
5369 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
5370 * @phba: pointer to lpfc hba data structure.
5371 *
5372 * This routine is invoked to set up the driver internal resources before the
5373 * device specific resource setup to support the HBA device it attached to.
5374 *
5375 * Return codes
af901ca1 5376 * 0 - successful
da0436e9
JS
5377 * other values - error
5378 **/
5379static int
5380lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
5381{
5382 /*
5383 * Driver resources common to all SLI revisions
5384 */
5385 atomic_set(&phba->fast_event_count, 0);
5386 spin_lock_init(&phba->hbalock);
5387
5388 /* Initialize ndlp management spinlock */
5389 spin_lock_init(&phba->ndlp_lock);
5390
5391 INIT_LIST_HEAD(&phba->port_list);
5392 INIT_LIST_HEAD(&phba->work_list);
5393 init_waitqueue_head(&phba->wait_4_mlo_m_q);
5394
5395 /* Initialize the wait queue head for the kernel thread */
5396 init_waitqueue_head(&phba->work_waitq);
5397
5398 /* Initialize the scsi buffer list used by driver for scsi IO */
a40fc5f0
JS
5399 spin_lock_init(&phba->scsi_buf_list_get_lock);
5400 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
5401 spin_lock_init(&phba->scsi_buf_list_put_lock);
5402 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
da0436e9
JS
5403
5404 /* Initialize the fabric iocb list */
5405 INIT_LIST_HEAD(&phba->fabric_iocb_list);
5406
5407 /* Initialize list to save ELS buffers */
5408 INIT_LIST_HEAD(&phba->elsbuf);
5409
5410 /* Initialize FCF connection rec list */
5411 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
5412
1ba981fd
JS
5413 /* Initialize OAS configuration list */
5414 spin_lock_init(&phba->devicelock);
5415 INIT_LIST_HEAD(&phba->luns);
5416
da0436e9
JS
5417 return 0;
5418}
5419
5420/**
5421 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
5422 * @phba: pointer to lpfc hba data structure.
5423 *
5424 * This routine is invoked to set up the driver internal resources after the
5425 * device specific resource setup to support the HBA device it attached to.
5426 *
5427 * Return codes
af901ca1 5428 * 0 - successful
da0436e9
JS
5429 * other values - error
5430 **/
5431static int
5432lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
5433{
5434 int error;
5435
5436 /* Startup the kernel thread for this host adapter. */
5437 phba->worker_thread = kthread_run(lpfc_do_work, phba,
5438 "lpfc_worker_%d", phba->brd_no);
5439 if (IS_ERR(phba->worker_thread)) {
5440 error = PTR_ERR(phba->worker_thread);
5441 return error;
3772a991
JS
5442 }
5443
5444 return 0;
5445}
5446
5447/**
5448 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
5449 * @phba: pointer to lpfc hba data structure.
5450 *
5451 * This routine is invoked to unset the driver internal resources set up after
5452 * the device specific resource setup for supporting the HBA device it
5453 * attached to.
5454 **/
5455static void
5456lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
5457{
5458 /* Stop kernel worker thread */
5459 kthread_stop(phba->worker_thread);
5460}
5461
5462/**
5463 * lpfc_free_iocb_list - Free iocb list.
5464 * @phba: pointer to lpfc hba data structure.
5465 *
5466 * This routine is invoked to free the driver's IOCB list and memory.
5467 **/
5468static void
5469lpfc_free_iocb_list(struct lpfc_hba *phba)
5470{
5471 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
5472
5473 spin_lock_irq(&phba->hbalock);
5474 list_for_each_entry_safe(iocbq_entry, iocbq_next,
5475 &phba->lpfc_iocb_list, list) {
5476 list_del(&iocbq_entry->list);
5477 kfree(iocbq_entry);
5478 phba->total_iocbq_bufs--;
98c9ea5c 5479 }
3772a991
JS
5480 spin_unlock_irq(&phba->hbalock);
5481
5482 return;
5483}
5484
5485/**
5486 * lpfc_init_iocb_list - Allocate and initialize iocb list.
5487 * @phba: pointer to lpfc hba data structure.
5488 *
5489 * This routine is invoked to allocate and initizlize the driver's IOCB
5490 * list and set up the IOCB tag array accordingly.
5491 *
5492 * Return codes
af901ca1 5493 * 0 - successful
3772a991
JS
5494 * other values - error
5495 **/
5496static int
5497lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
5498{
5499 struct lpfc_iocbq *iocbq_entry = NULL;
5500 uint16_t iotag;
5501 int i;
dea3101e 5502
5503 /* Initialize and populate the iocb list per host. */
5504 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3772a991 5505 for (i = 0; i < iocb_count; i++) {
dd00cc48 5506 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
dea3101e 5507 if (iocbq_entry == NULL) {
5508 printk(KERN_ERR "%s: only allocated %d iocbs of "
5509 "expected %d count. Unloading driver.\n",
cadbd4a5 5510 __func__, i, LPFC_IOCB_LIST_CNT);
dea3101e 5511 goto out_free_iocbq;
5512 }
5513
604a3e30
JB
5514 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
5515 if (iotag == 0) {
3772a991 5516 kfree(iocbq_entry);
604a3e30 5517 printk(KERN_ERR "%s: failed to allocate IOTAG. "
3772a991 5518 "Unloading driver.\n", __func__);
604a3e30
JB
5519 goto out_free_iocbq;
5520 }
6d368e53 5521 iocbq_entry->sli4_lxritag = NO_XRI;
3772a991 5522 iocbq_entry->sli4_xritag = NO_XRI;
2e0fef85
JS
5523
5524 spin_lock_irq(&phba->hbalock);
dea3101e 5525 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
5526 phba->total_iocbq_bufs++;
2e0fef85 5527 spin_unlock_irq(&phba->hbalock);
dea3101e 5528 }
5529
3772a991 5530 return 0;
dea3101e 5531
3772a991
JS
5532out_free_iocbq:
5533 lpfc_free_iocb_list(phba);
dea3101e 5534
3772a991
JS
5535 return -ENOMEM;
5536}
5e9d9b82 5537
3772a991 5538/**
8a9d2e80 5539 * lpfc_free_sgl_list - Free a given sgl list.
da0436e9 5540 * @phba: pointer to lpfc hba data structure.
8a9d2e80 5541 * @sglq_list: pointer to the head of sgl list.
3772a991 5542 *
8a9d2e80 5543 * This routine is invoked to free a give sgl list and memory.
3772a991 5544 **/
8a9d2e80
JS
5545void
5546lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
3772a991 5547{
da0436e9 5548 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8a9d2e80
JS
5549
5550 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
5551 list_del(&sglq_entry->list);
5552 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
5553 kfree(sglq_entry);
5554 }
5555}
5556
5557/**
5558 * lpfc_free_els_sgl_list - Free els sgl list.
5559 * @phba: pointer to lpfc hba data structure.
5560 *
5561 * This routine is invoked to free the driver's els sgl list and memory.
5562 **/
5563static void
5564lpfc_free_els_sgl_list(struct lpfc_hba *phba)
5565{
da0436e9 5566 LIST_HEAD(sglq_list);
dea3101e 5567
8a9d2e80 5568 /* Retrieve all els sgls from driver list */
da0436e9
JS
5569 spin_lock_irq(&phba->hbalock);
5570 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
5571 spin_unlock_irq(&phba->hbalock);
dea3101e 5572
8a9d2e80
JS
5573 /* Now free the sgl list */
5574 lpfc_free_sgl_list(phba, &sglq_list);
da0436e9 5575}
92d7f7b0 5576
da0436e9
JS
5577/**
5578 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
5579 * @phba: pointer to lpfc hba data structure.
5580 *
5581 * This routine is invoked to allocate the driver's active sgl memory.
5582 * This array will hold the sglq_entry's for active IOs.
5583 **/
5584static int
5585lpfc_init_active_sgl_array(struct lpfc_hba *phba)
5586{
5587 int size;
5588 size = sizeof(struct lpfc_sglq *);
5589 size *= phba->sli4_hba.max_cfg_param.max_xri;
5590
5591 phba->sli4_hba.lpfc_sglq_active_list =
5592 kzalloc(size, GFP_KERNEL);
5593 if (!phba->sli4_hba.lpfc_sglq_active_list)
5594 return -ENOMEM;
5595 return 0;
3772a991
JS
5596}
5597
5598/**
da0436e9 5599 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3772a991
JS
5600 * @phba: pointer to lpfc hba data structure.
5601 *
da0436e9
JS
5602 * This routine is invoked to walk through the array of active sglq entries
5603 * and free all of the resources.
5604 * This is just a place holder for now.
3772a991
JS
5605 **/
5606static void
da0436e9 5607lpfc_free_active_sgl(struct lpfc_hba *phba)
3772a991 5608{
da0436e9 5609 kfree(phba->sli4_hba.lpfc_sglq_active_list);
3772a991
JS
5610}
5611
5612/**
da0436e9 5613 * lpfc_init_sgl_list - Allocate and initialize sgl list.
3772a991
JS
5614 * @phba: pointer to lpfc hba data structure.
5615 *
da0436e9
JS
5616 * This routine is invoked to allocate and initizlize the driver's sgl
5617 * list and set up the sgl xritag tag array accordingly.
3772a991 5618 *
3772a991 5619 **/
8a9d2e80 5620static void
da0436e9 5621lpfc_init_sgl_list(struct lpfc_hba *phba)
3772a991 5622{
da0436e9
JS
5623 /* Initialize and populate the sglq list per host/VF. */
5624 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
5625 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
5626
8a9d2e80
JS
5627 /* els xri-sgl book keeping */
5628 phba->sli4_hba.els_xri_cnt = 0;
0ff10d46 5629
8a9d2e80 5630 /* scsi xri-buffer book keeping */
da0436e9 5631 phba->sli4_hba.scsi_xri_cnt = 0;
da0436e9
JS
5632}
5633
5634/**
5635 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
5636 * @phba: pointer to lpfc hba data structure.
5637 *
5638 * This routine is invoked to post rpi header templates to the
88a2cfbb 5639 * port for those SLI4 ports that do not support extents. This routine
da0436e9 5640 * posts a PAGE_SIZE memory region to the port to hold up to
88a2cfbb
JS
5641 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
5642 * and should be called only when interrupts are disabled.
da0436e9
JS
5643 *
5644 * Return codes
af901ca1 5645 * 0 - successful
88a2cfbb 5646 * -ERROR - otherwise.
da0436e9
JS
5647 **/
5648int
5649lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
5650{
5651 int rc = 0;
da0436e9
JS
5652 struct lpfc_rpi_hdr *rpi_hdr;
5653
5654 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
ff78d8f9 5655 if (!phba->sli4_hba.rpi_hdrs_in_use)
6d368e53 5656 return rc;
6d368e53
JS
5657 if (phba->sli4_hba.extents_in_use)
5658 return -EIO;
da0436e9
JS
5659
5660 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
5661 if (!rpi_hdr) {
5662 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5663 "0391 Error during rpi post operation\n");
5664 lpfc_sli4_remove_rpis(phba);
5665 rc = -ENODEV;
5666 }
5667
5668 return rc;
5669}
5670
5671/**
5672 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
5673 * @phba: pointer to lpfc hba data structure.
5674 *
5675 * This routine is invoked to allocate a single 4KB memory region to
5676 * support rpis and stores them in the phba. This single region
5677 * provides support for up to 64 rpis. The region is used globally
5678 * by the device.
5679 *
5680 * Returns:
5681 * A valid rpi hdr on success.
5682 * A NULL pointer on any failure.
5683 **/
5684struct lpfc_rpi_hdr *
5685lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5686{
5687 uint16_t rpi_limit, curr_rpi_range;
5688 struct lpfc_dmabuf *dmabuf;
5689 struct lpfc_rpi_hdr *rpi_hdr;
9589b062 5690 uint32_t rpi_count;
da0436e9 5691
6d368e53
JS
5692 /*
5693 * If the SLI4 port supports extents, posting the rpi header isn't
5694 * required. Set the expected maximum count and let the actual value
5695 * get set when extents are fully allocated.
5696 */
5697 if (!phba->sli4_hba.rpi_hdrs_in_use)
5698 return NULL;
5699 if (phba->sli4_hba.extents_in_use)
5700 return NULL;
5701
5702 /* The limit on the logical index is just the max_rpi count. */
da0436e9 5703 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
6d368e53 5704 phba->sli4_hba.max_cfg_param.max_rpi - 1;
da0436e9
JS
5705
5706 spin_lock_irq(&phba->hbalock);
6d368e53
JS
5707 /*
5708 * Establish the starting RPI in this header block. The starting
5709 * rpi is normalized to a zero base because the physical rpi is
5710 * port based.
5711 */
97f2ecf1 5712 curr_rpi_range = phba->sli4_hba.next_rpi;
da0436e9
JS
5713 spin_unlock_irq(&phba->hbalock);
5714
5715 /*
5716 * The port has a limited number of rpis. The increment here
5717 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
5718 * and to allow the full max_rpi range per port.
5719 */
5720 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
9589b062
JS
5721 rpi_count = rpi_limit - curr_rpi_range;
5722 else
5723 rpi_count = LPFC_RPI_HDR_COUNT;
da0436e9 5724
6d368e53
JS
5725 if (!rpi_count)
5726 return NULL;
da0436e9
JS
5727 /*
5728 * First allocate the protocol header region for the port. The
5729 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
5730 */
5731 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5732 if (!dmabuf)
5733 return NULL;
5734
5735 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5736 LPFC_HDR_TEMPLATE_SIZE,
5737 &dmabuf->phys,
5738 GFP_KERNEL);
5739 if (!dmabuf->virt) {
5740 rpi_hdr = NULL;
5741 goto err_free_dmabuf;
5742 }
5743
5744 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
5745 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
5746 rpi_hdr = NULL;
5747 goto err_free_coherent;
5748 }
5749
5750 /* Save the rpi header data for cleanup later. */
5751 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
5752 if (!rpi_hdr)
5753 goto err_free_coherent;
5754
5755 rpi_hdr->dmabuf = dmabuf;
5756 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
5757 rpi_hdr->page_count = 1;
5758 spin_lock_irq(&phba->hbalock);
6d368e53
JS
5759
5760 /* The rpi_hdr stores the logical index only. */
5761 rpi_hdr->start_rpi = curr_rpi_range;
da0436e9
JS
5762 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
5763
5764 /*
6d368e53
JS
5765 * The next_rpi stores the next logical module-64 rpi value used
5766 * to post physical rpis in subsequent rpi postings.
da0436e9 5767 */
9589b062 5768 phba->sli4_hba.next_rpi += rpi_count;
da0436e9
JS
5769 spin_unlock_irq(&phba->hbalock);
5770 return rpi_hdr;
5771
5772 err_free_coherent:
5773 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
5774 dmabuf->virt, dmabuf->phys);
5775 err_free_dmabuf:
5776 kfree(dmabuf);
5777 return NULL;
5778}
5779
5780/**
5781 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
5782 * @phba: pointer to lpfc hba data structure.
5783 *
5784 * This routine is invoked to remove all memory resources allocated
6d368e53
JS
5785 * to support rpis for SLI4 ports not supporting extents. This routine
5786 * presumes the caller has released all rpis consumed by fabric or port
5787 * logins and is prepared to have the header pages removed.
da0436e9
JS
5788 **/
5789void
5790lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
5791{
5792 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
5793
6d368e53
JS
5794 if (!phba->sli4_hba.rpi_hdrs_in_use)
5795 goto exit;
5796
da0436e9
JS
5797 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
5798 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
5799 list_del(&rpi_hdr->list);
5800 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
5801 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
5802 kfree(rpi_hdr->dmabuf);
5803 kfree(rpi_hdr);
5804 }
6d368e53
JS
5805 exit:
5806 /* There are no rpis available to the port now. */
5807 phba->sli4_hba.next_rpi = 0;
da0436e9
JS
5808}
5809
5810/**
5811 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
5812 * @pdev: pointer to pci device data structure.
5813 *
5814 * This routine is invoked to allocate the driver hba data structure for an
5815 * HBA device. If the allocation is successful, the phba reference to the
5816 * PCI device data structure is set.
5817 *
5818 * Return codes
af901ca1 5819 * pointer to @phba - successful
da0436e9
JS
5820 * NULL - error
5821 **/
5822static struct lpfc_hba *
5823lpfc_hba_alloc(struct pci_dev *pdev)
5824{
5825 struct lpfc_hba *phba;
5826
5827 /* Allocate memory for HBA structure */
5828 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
5829 if (!phba) {
e34ccdfe 5830 dev_err(&pdev->dev, "failed to allocate hba struct\n");
da0436e9
JS
5831 return NULL;
5832 }
5833
5834 /* Set reference to PCI device in HBA structure */
5835 phba->pcidev = pdev;
5836
5837 /* Assign an unused board number */
5838 phba->brd_no = lpfc_get_instance();
5839 if (phba->brd_no < 0) {
5840 kfree(phba);
5841 return NULL;
5842 }
5843
4fede78f 5844 spin_lock_init(&phba->ct_ev_lock);
f1c3b0fc
JS
5845 INIT_LIST_HEAD(&phba->ct_ev_waiters);
5846
da0436e9
JS
5847 return phba;
5848}
5849
5850/**
5851 * lpfc_hba_free - Free driver hba data structure with a device.
5852 * @phba: pointer to lpfc hba data structure.
5853 *
5854 * This routine is invoked to free the driver hba data structure with an
5855 * HBA device.
5856 **/
5857static void
5858lpfc_hba_free(struct lpfc_hba *phba)
5859{
5860 /* Release the driver assigned board number */
5861 idr_remove(&lpfc_hba_index, phba->brd_no);
5862
2a76a283
JS
5863 /* Free memory allocated with sli rings */
5864 kfree(phba->sli.ring);
5865 phba->sli.ring = NULL;
5866
da0436e9
JS
5867 kfree(phba);
5868 return;
5869}
5870
5871/**
5872 * lpfc_create_shost - Create hba physical port with associated scsi host.
5873 * @phba: pointer to lpfc hba data structure.
5874 *
5875 * This routine is invoked to create HBA physical port and associate a SCSI
5876 * host with it.
5877 *
5878 * Return codes
af901ca1 5879 * 0 - successful
da0436e9
JS
5880 * other values - error
5881 **/
5882static int
5883lpfc_create_shost(struct lpfc_hba *phba)
5884{
5885 struct lpfc_vport *vport;
5886 struct Scsi_Host *shost;
5887
5888 /* Initialize HBA FC structure */
5889 phba->fc_edtov = FF_DEF_EDTOV;
5890 phba->fc_ratov = FF_DEF_RATOV;
5891 phba->fc_altov = FF_DEF_ALTOV;
5892 phba->fc_arbtov = FF_DEF_ARBTOV;
5893
d7c47992 5894 atomic_set(&phba->sdev_cnt, 0);
da0436e9
JS
5895 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
5896 if (!vport)
5897 return -ENODEV;
5898
5899 shost = lpfc_shost_from_vport(vport);
5900 phba->pport = vport;
5901 lpfc_debugfs_initialize(vport);
5902 /* Put reference to SCSI host to driver's device private data */
5903 pci_set_drvdata(phba->pcidev, shost);
2e0fef85 5904
3772a991
JS
5905 return 0;
5906}
db2378e0 5907
3772a991
JS
5908/**
5909 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
5910 * @phba: pointer to lpfc hba data structure.
5911 *
5912 * This routine is invoked to destroy HBA physical port and the associated
5913 * SCSI host.
5914 **/
5915static void
5916lpfc_destroy_shost(struct lpfc_hba *phba)
5917{
5918 struct lpfc_vport *vport = phba->pport;
5919
5920 /* Destroy physical port that associated with the SCSI host */
5921 destroy_port(vport);
5922
5923 return;
5924}
5925
5926/**
5927 * lpfc_setup_bg - Setup Block guard structures and debug areas.
5928 * @phba: pointer to lpfc hba data structure.
5929 * @shost: the shost to be used to detect Block guard settings.
5930 *
5931 * This routine sets up the local Block guard protocol settings for @shost.
5932 * This routine also allocates memory for debugging bg buffers.
5933 **/
5934static void
5935lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
5936{
bbeb79b9
JS
5937 uint32_t old_mask;
5938 uint32_t old_guard;
5939
3772a991
JS
5940 int pagecnt = 10;
5941 if (lpfc_prot_mask && lpfc_prot_guard) {
5942 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5943 "1478 Registering BlockGuard with the "
5944 "SCSI layer\n");
bbeb79b9
JS
5945
5946 old_mask = lpfc_prot_mask;
5947 old_guard = lpfc_prot_guard;
5948
5949 /* Only allow supported values */
5950 lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
5951 SHOST_DIX_TYPE0_PROTECTION |
5952 SHOST_DIX_TYPE1_PROTECTION);
5953 lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC);
5954
5955 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
5956 if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
5957 lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
5958
5959 if (lpfc_prot_mask && lpfc_prot_guard) {
5960 if ((old_mask != lpfc_prot_mask) ||
5961 (old_guard != lpfc_prot_guard))
5962 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5963 "1475 Registering BlockGuard with the "
5964 "SCSI layer: mask %d guard %d\n",
5965 lpfc_prot_mask, lpfc_prot_guard);
5966
5967 scsi_host_set_prot(shost, lpfc_prot_mask);
5968 scsi_host_set_guard(shost, lpfc_prot_guard);
5969 } else
5970 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5971 "1479 Not Registering BlockGuard with the SCSI "
5972 "layer, Bad protection parameters: %d %d\n",
5973 old_mask, old_guard);
3772a991 5974 }
bbeb79b9 5975
3772a991
JS
5976 if (!_dump_buf_data) {
5977 while (pagecnt) {
5978 spin_lock_init(&_dump_buf_lock);
5979 _dump_buf_data =
5980 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5981 if (_dump_buf_data) {
6a9c52cf
JS
5982 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5983 "9043 BLKGRD: allocated %d pages for "
3772a991
JS
5984 "_dump_buf_data at 0x%p\n",
5985 (1 << pagecnt), _dump_buf_data);
5986 _dump_buf_data_order = pagecnt;
5987 memset(_dump_buf_data, 0,
5988 ((1 << PAGE_SHIFT) << pagecnt));
5989 break;
5990 } else
5991 --pagecnt;
5992 }
5993 if (!_dump_buf_data_order)
6a9c52cf
JS
5994 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5995 "9044 BLKGRD: ERROR unable to allocate "
3772a991
JS
5996 "memory for hexdump\n");
5997 } else
6a9c52cf
JS
5998 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5999 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
3772a991
JS
6000 "\n", _dump_buf_data);
6001 if (!_dump_buf_dif) {
6002 while (pagecnt) {
6003 _dump_buf_dif =
6004 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
6005 if (_dump_buf_dif) {
6a9c52cf
JS
6006 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6007 "9046 BLKGRD: allocated %d pages for "
3772a991
JS
6008 "_dump_buf_dif at 0x%p\n",
6009 (1 << pagecnt), _dump_buf_dif);
6010 _dump_buf_dif_order = pagecnt;
6011 memset(_dump_buf_dif, 0,
6012 ((1 << PAGE_SHIFT) << pagecnt));
6013 break;
6014 } else
6015 --pagecnt;
6016 }
6017 if (!_dump_buf_dif_order)
6a9c52cf
JS
6018 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6019 "9047 BLKGRD: ERROR unable to allocate "
3772a991
JS
6020 "memory for hexdump\n");
6021 } else
6a9c52cf
JS
6022 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6023 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
3772a991
JS
6024 _dump_buf_dif);
6025}
6026
6027/**
6028 * lpfc_post_init_setup - Perform necessary device post initialization setup.
6029 * @phba: pointer to lpfc hba data structure.
6030 *
6031 * This routine is invoked to perform all the necessary post initialization
6032 * setup for the device.
6033 **/
6034static void
6035lpfc_post_init_setup(struct lpfc_hba *phba)
6036{
6037 struct Scsi_Host *shost;
6038 struct lpfc_adapter_event_header adapter_event;
6039
6040 /* Get the default values for Model Name and Description */
6041 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
6042
6043 /*
6044 * hba setup may have changed the hba_queue_depth so we need to
6045 * adjust the value of can_queue.
6046 */
6047 shost = pci_get_drvdata(phba->pcidev);
6048 shost->can_queue = phba->cfg_hba_queue_depth - 10;
6049 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
6050 lpfc_setup_bg(phba, shost);
6051
6052 lpfc_host_attrib_init(shost);
6053
6054 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
6055 spin_lock_irq(shost->host_lock);
6056 lpfc_poll_start_timer(phba);
6057 spin_unlock_irq(shost->host_lock);
6058 }
6059
6060 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6061 "0428 Perform SCSI scan\n");
6062 /* Send board arrival event to upper layer */
6063 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
6064 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
6065 fc_host_post_vendor_event(shost, fc_get_event_number(),
6066 sizeof(adapter_event),
6067 (char *) &adapter_event,
6068 LPFC_NL_VENDOR_ID);
6069 return;
6070}
6071
6072/**
6073 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
6074 * @phba: pointer to lpfc hba data structure.
6075 *
6076 * This routine is invoked to set up the PCI device memory space for device
6077 * with SLI-3 interface spec.
6078 *
6079 * Return codes
af901ca1 6080 * 0 - successful
3772a991
JS
6081 * other values - error
6082 **/
6083static int
6084lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
6085{
6086 struct pci_dev *pdev;
6087 unsigned long bar0map_len, bar2map_len;
6088 int i, hbq_count;
6089 void *ptr;
6090 int error = -ENODEV;
6091
6092 /* Obtain PCI device reference */
6093 if (!phba->pcidev)
6094 return error;
6095 else
6096 pdev = phba->pcidev;
6097
6098 /* Set the device DMA mask size */
8e68597d
MR
6099 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6100 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6101 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6102 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
3772a991 6103 return error;
8e68597d
MR
6104 }
6105 }
3772a991
JS
6106
6107 /* Get the bus address of Bar0 and Bar2 and the number of bytes
6108 * required by each mapping.
6109 */
6110 phba->pci_bar0_map = pci_resource_start(pdev, 0);
6111 bar0map_len = pci_resource_len(pdev, 0);
6112
6113 phba->pci_bar2_map = pci_resource_start(pdev, 2);
6114 bar2map_len = pci_resource_len(pdev, 2);
6115
6116 /* Map HBA SLIM to a kernel virtual address. */
6117 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
6118 if (!phba->slim_memmap_p) {
6119 dev_printk(KERN_ERR, &pdev->dev,
6120 "ioremap failed for SLIM memory.\n");
6121 goto out;
6122 }
6123
6124 /* Map HBA Control Registers to a kernel virtual address. */
6125 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
6126 if (!phba->ctrl_regs_memmap_p) {
6127 dev_printk(KERN_ERR, &pdev->dev,
6128 "ioremap failed for HBA control registers.\n");
6129 goto out_iounmap_slim;
6130 }
6131
6132 /* Allocate memory for SLI-2 structures */
6133 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
6134 SLI2_SLIM_SIZE,
6135 &phba->slim2p.phys,
6136 GFP_KERNEL);
6137 if (!phba->slim2p.virt)
6138 goto out_iounmap;
6139
6140 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
6141 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
7a470277
JS
6142 phba->mbox_ext = (phba->slim2p.virt +
6143 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
3772a991
JS
6144 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
6145 phba->IOCBs = (phba->slim2p.virt +
6146 offsetof(struct lpfc_sli2_slim, IOCBs));
6147
6148 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
6149 lpfc_sli_hbq_size(),
6150 &phba->hbqslimp.phys,
6151 GFP_KERNEL);
6152 if (!phba->hbqslimp.virt)
6153 goto out_free_slim;
6154
6155 hbq_count = lpfc_sli_hbq_count();
6156 ptr = phba->hbqslimp.virt;
6157 for (i = 0; i < hbq_count; ++i) {
6158 phba->hbqs[i].hbq_virt = ptr;
6159 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
6160 ptr += (lpfc_hbq_defs[i]->entry_count *
6161 sizeof(struct lpfc_hbq_entry));
6162 }
6163 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
6164 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
6165
6166 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
6167
6168 INIT_LIST_HEAD(&phba->rb_pend_list);
6169
6170 phba->MBslimaddr = phba->slim_memmap_p;
6171 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
6172 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
6173 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
6174 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
6175
6176 return 0;
6177
6178out_free_slim:
6179 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6180 phba->slim2p.virt, phba->slim2p.phys);
6181out_iounmap:
6182 iounmap(phba->ctrl_regs_memmap_p);
6183out_iounmap_slim:
6184 iounmap(phba->slim_memmap_p);
6185out:
6186 return error;
6187}
6188
6189/**
6190 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
6191 * @phba: pointer to lpfc hba data structure.
6192 *
6193 * This routine is invoked to unset the PCI device memory space for device
6194 * with SLI-3 interface spec.
6195 **/
6196static void
6197lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
6198{
6199 struct pci_dev *pdev;
6200
6201 /* Obtain PCI device reference */
6202 if (!phba->pcidev)
6203 return;
6204 else
6205 pdev = phba->pcidev;
6206
6207 /* Free coherent DMA memory allocated */
6208 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
6209 phba->hbqslimp.virt, phba->hbqslimp.phys);
6210 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6211 phba->slim2p.virt, phba->slim2p.phys);
6212
6213 /* I/O memory unmap */
6214 iounmap(phba->ctrl_regs_memmap_p);
6215 iounmap(phba->slim_memmap_p);
6216
6217 return;
6218}
6219
6220/**
da0436e9 6221 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
3772a991
JS
6222 * @phba: pointer to lpfc hba data structure.
6223 *
da0436e9
JS
6224 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
6225 * done and check status.
3772a991 6226 *
da0436e9 6227 * Return 0 if successful, otherwise -ENODEV.
3772a991 6228 **/
da0436e9
JS
6229int
6230lpfc_sli4_post_status_check(struct lpfc_hba *phba)
3772a991 6231{
2fcee4bf
JS
6232 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
6233 struct lpfc_register reg_data;
6234 int i, port_error = 0;
6235 uint32_t if_type;
3772a991 6236
9940b97b
JS
6237 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
6238 memset(&reg_data, 0, sizeof(reg_data));
2fcee4bf 6239 if (!phba->sli4_hba.PSMPHRregaddr)
da0436e9 6240 return -ENODEV;
3772a991 6241
da0436e9
JS
6242 /* Wait up to 30 seconds for the SLI Port POST done and ready */
6243 for (i = 0; i < 3000; i++) {
9940b97b
JS
6244 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
6245 &portsmphr_reg.word0) ||
6246 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
2fcee4bf 6247 /* Port has a fatal POST error, break out */
da0436e9
JS
6248 port_error = -ENODEV;
6249 break;
6250 }
2fcee4bf
JS
6251 if (LPFC_POST_STAGE_PORT_READY ==
6252 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
da0436e9 6253 break;
da0436e9 6254 msleep(10);
3772a991
JS
6255 }
6256
2fcee4bf
JS
6257 /*
6258 * If there was a port error during POST, then don't proceed with
6259 * other register reads as the data may not be valid. Just exit.
6260 */
6261 if (port_error) {
da0436e9 6262 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2fcee4bf
JS
6263 "1408 Port Failed POST - portsmphr=0x%x, "
6264 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
6265 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
6266 portsmphr_reg.word0,
6267 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
6268 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
6269 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
6270 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
6271 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
6272 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
6273 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
6274 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
6275 } else {
28baac74 6276 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2fcee4bf
JS
6277 "2534 Device Info: SLIFamily=0x%x, "
6278 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
6279 "SLIHint_2=0x%x, FT=0x%x\n",
28baac74
JS
6280 bf_get(lpfc_sli_intf_sli_family,
6281 &phba->sli4_hba.sli_intf),
6282 bf_get(lpfc_sli_intf_slirev,
6283 &phba->sli4_hba.sli_intf),
085c647c
JS
6284 bf_get(lpfc_sli_intf_if_type,
6285 &phba->sli4_hba.sli_intf),
6286 bf_get(lpfc_sli_intf_sli_hint1,
28baac74 6287 &phba->sli4_hba.sli_intf),
085c647c
JS
6288 bf_get(lpfc_sli_intf_sli_hint2,
6289 &phba->sli4_hba.sli_intf),
6290 bf_get(lpfc_sli_intf_func_type,
28baac74 6291 &phba->sli4_hba.sli_intf));
2fcee4bf
JS
6292 /*
6293 * Check for other Port errors during the initialization
6294 * process. Fail the load if the port did not come up
6295 * correctly.
6296 */
6297 if_type = bf_get(lpfc_sli_intf_if_type,
6298 &phba->sli4_hba.sli_intf);
6299 switch (if_type) {
6300 case LPFC_SLI_INTF_IF_TYPE_0:
6301 phba->sli4_hba.ue_mask_lo =
6302 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
6303 phba->sli4_hba.ue_mask_hi =
6304 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
6305 uerrlo_reg.word0 =
6306 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
6307 uerrhi_reg.word0 =
6308 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
6309 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
6310 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
6311 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6312 "1422 Unrecoverable Error "
6313 "Detected during POST "
6314 "uerr_lo_reg=0x%x, "
6315 "uerr_hi_reg=0x%x, "
6316 "ue_mask_lo_reg=0x%x, "
6317 "ue_mask_hi_reg=0x%x\n",
6318 uerrlo_reg.word0,
6319 uerrhi_reg.word0,
6320 phba->sli4_hba.ue_mask_lo,
6321 phba->sli4_hba.ue_mask_hi);
6322 port_error = -ENODEV;
6323 }
6324 break;
6325 case LPFC_SLI_INTF_IF_TYPE_2:
6326 /* Final checks. The port status should be clean. */
9940b97b
JS
6327 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
6328 &reg_data.word0) ||
0558056c
JS
6329 (bf_get(lpfc_sliport_status_err, &reg_data) &&
6330 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
2fcee4bf
JS
6331 phba->work_status[0] =
6332 readl(phba->sli4_hba.u.if_type2.
6333 ERR1regaddr);
6334 phba->work_status[1] =
6335 readl(phba->sli4_hba.u.if_type2.
6336 ERR2regaddr);
6337 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8fcb8acd
JS
6338 "2888 Unrecoverable port error "
6339 "following POST: port status reg "
6340 "0x%x, port_smphr reg 0x%x, "
2fcee4bf
JS
6341 "error 1=0x%x, error 2=0x%x\n",
6342 reg_data.word0,
6343 portsmphr_reg.word0,
6344 phba->work_status[0],
6345 phba->work_status[1]);
6346 port_error = -ENODEV;
6347 }
6348 break;
6349 case LPFC_SLI_INTF_IF_TYPE_1:
6350 default:
6351 break;
6352 }
28baac74 6353 }
da0436e9
JS
6354 return port_error;
6355}
3772a991 6356
da0436e9
JS
6357/**
6358 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
6359 * @phba: pointer to lpfc hba data structure.
2fcee4bf 6360 * @if_type: The SLI4 interface type getting configured.
da0436e9
JS
6361 *
6362 * This routine is invoked to set up SLI4 BAR0 PCI config space register
6363 * memory map.
6364 **/
6365static void
2fcee4bf
JS
6366lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
6367{
6368 switch (if_type) {
6369 case LPFC_SLI_INTF_IF_TYPE_0:
6370 phba->sli4_hba.u.if_type0.UERRLOregaddr =
6371 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
6372 phba->sli4_hba.u.if_type0.UERRHIregaddr =
6373 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
6374 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
6375 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
6376 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
6377 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
6378 phba->sli4_hba.SLIINTFregaddr =
6379 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
6380 break;
6381 case LPFC_SLI_INTF_IF_TYPE_2:
6382 phba->sli4_hba.u.if_type2.ERR1regaddr =
88a2cfbb
JS
6383 phba->sli4_hba.conf_regs_memmap_p +
6384 LPFC_CTL_PORT_ER1_OFFSET;
2fcee4bf 6385 phba->sli4_hba.u.if_type2.ERR2regaddr =
88a2cfbb
JS
6386 phba->sli4_hba.conf_regs_memmap_p +
6387 LPFC_CTL_PORT_ER2_OFFSET;
2fcee4bf 6388 phba->sli4_hba.u.if_type2.CTRLregaddr =
88a2cfbb
JS
6389 phba->sli4_hba.conf_regs_memmap_p +
6390 LPFC_CTL_PORT_CTL_OFFSET;
2fcee4bf 6391 phba->sli4_hba.u.if_type2.STATUSregaddr =
88a2cfbb
JS
6392 phba->sli4_hba.conf_regs_memmap_p +
6393 LPFC_CTL_PORT_STA_OFFSET;
2fcee4bf
JS
6394 phba->sli4_hba.SLIINTFregaddr =
6395 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
6396 phba->sli4_hba.PSMPHRregaddr =
88a2cfbb
JS
6397 phba->sli4_hba.conf_regs_memmap_p +
6398 LPFC_CTL_PORT_SEM_OFFSET;
2fcee4bf 6399 phba->sli4_hba.RQDBregaddr =
962bc51b
JS
6400 phba->sli4_hba.conf_regs_memmap_p +
6401 LPFC_ULP0_RQ_DOORBELL;
2fcee4bf 6402 phba->sli4_hba.WQDBregaddr =
962bc51b
JS
6403 phba->sli4_hba.conf_regs_memmap_p +
6404 LPFC_ULP0_WQ_DOORBELL;
2fcee4bf
JS
6405 phba->sli4_hba.EQCQDBregaddr =
6406 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
6407 phba->sli4_hba.MQDBregaddr =
6408 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
6409 phba->sli4_hba.BMBXregaddr =
6410 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
6411 break;
6412 case LPFC_SLI_INTF_IF_TYPE_1:
6413 default:
6414 dev_printk(KERN_ERR, &phba->pcidev->dev,
6415 "FATAL - unsupported SLI4 interface type - %d\n",
6416 if_type);
6417 break;
6418 }
da0436e9 6419}
3772a991 6420
da0436e9
JS
6421/**
6422 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
6423 * @phba: pointer to lpfc hba data structure.
6424 *
6425 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
6426 * memory map.
6427 **/
6428static void
6429lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
6430{
2fcee4bf
JS
6431 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6432 LPFC_SLIPORT_IF0_SMPHR;
da0436e9 6433 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
2fcee4bf 6434 LPFC_HST_ISR0;
da0436e9 6435 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
2fcee4bf 6436 LPFC_HST_IMR0;
da0436e9 6437 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
2fcee4bf 6438 LPFC_HST_ISCR0;
3772a991
JS
6439}
6440
6441/**
da0436e9 6442 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
3772a991 6443 * @phba: pointer to lpfc hba data structure.
da0436e9 6444 * @vf: virtual function number
3772a991 6445 *
da0436e9
JS
6446 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
6447 * based on the given viftual function number, @vf.
6448 *
6449 * Return 0 if successful, otherwise -ENODEV.
3772a991 6450 **/
da0436e9
JS
6451static int
6452lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
3772a991 6453{
da0436e9
JS
6454 if (vf > LPFC_VIR_FUNC_MAX)
6455 return -ENODEV;
3772a991 6456
da0436e9 6457 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
962bc51b
JS
6458 vf * LPFC_VFR_PAGE_SIZE +
6459 LPFC_ULP0_RQ_DOORBELL);
da0436e9 6460 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
962bc51b
JS
6461 vf * LPFC_VFR_PAGE_SIZE +
6462 LPFC_ULP0_WQ_DOORBELL);
da0436e9
JS
6463 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6464 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
6465 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6466 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
6467 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6468 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
6469 return 0;
3772a991
JS
6470}
6471
6472/**
da0436e9 6473 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
3772a991
JS
6474 * @phba: pointer to lpfc hba data structure.
6475 *
da0436e9
JS
6476 * This routine is invoked to create the bootstrap mailbox
6477 * region consistent with the SLI-4 interface spec. This
6478 * routine allocates all memory necessary to communicate
6479 * mailbox commands to the port and sets up all alignment
6480 * needs. No locks are expected to be held when calling
6481 * this routine.
3772a991
JS
6482 *
6483 * Return codes
af901ca1 6484 * 0 - successful
d439d286 6485 * -ENOMEM - could not allocated memory.
da0436e9 6486 **/
3772a991 6487static int
da0436e9 6488lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
3772a991 6489{
da0436e9
JS
6490 uint32_t bmbx_size;
6491 struct lpfc_dmabuf *dmabuf;
6492 struct dma_address *dma_address;
6493 uint32_t pa_addr;
6494 uint64_t phys_addr;
6495
6496 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6497 if (!dmabuf)
6498 return -ENOMEM;
3772a991 6499
da0436e9
JS
6500 /*
6501 * The bootstrap mailbox region is comprised of 2 parts
6502 * plus an alignment restriction of 16 bytes.
6503 */
6504 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
6505 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6506 bmbx_size,
6507 &dmabuf->phys,
6508 GFP_KERNEL);
6509 if (!dmabuf->virt) {
6510 kfree(dmabuf);
6511 return -ENOMEM;
3772a991 6512 }
da0436e9 6513 memset(dmabuf->virt, 0, bmbx_size);
3772a991 6514
da0436e9
JS
6515 /*
6516 * Initialize the bootstrap mailbox pointers now so that the register
6517 * operations are simple later. The mailbox dma address is required
6518 * to be 16-byte aligned. Also align the virtual memory as each
6519 * maibox is copied into the bmbx mailbox region before issuing the
6520 * command to the port.
6521 */
6522 phba->sli4_hba.bmbx.dmabuf = dmabuf;
6523 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
6524
6525 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
6526 LPFC_ALIGN_16_BYTE);
6527 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
6528 LPFC_ALIGN_16_BYTE);
6529
6530 /*
6531 * Set the high and low physical addresses now. The SLI4 alignment
6532 * requirement is 16 bytes and the mailbox is posted to the port
6533 * as two 30-bit addresses. The other data is a bit marking whether
6534 * the 30-bit address is the high or low address.
6535 * Upcast bmbx aphys to 64bits so shift instruction compiles
6536 * clean on 32 bit machines.
6537 */
6538 dma_address = &phba->sli4_hba.bmbx.dma_address;
6539 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
6540 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
6541 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
6542 LPFC_BMBX_BIT1_ADDR_HI);
6543
6544 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
6545 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
6546 LPFC_BMBX_BIT1_ADDR_LO);
6547 return 0;
3772a991
JS
6548}
6549
6550/**
da0436e9 6551 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
3772a991
JS
6552 * @phba: pointer to lpfc hba data structure.
6553 *
da0436e9
JS
6554 * This routine is invoked to teardown the bootstrap mailbox
6555 * region and release all host resources. This routine requires
6556 * the caller to ensure all mailbox commands recovered, no
6557 * additional mailbox comands are sent, and interrupts are disabled
6558 * before calling this routine.
6559 *
6560 **/
3772a991 6561static void
da0436e9 6562lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
3772a991 6563{
da0436e9
JS
6564 dma_free_coherent(&phba->pcidev->dev,
6565 phba->sli4_hba.bmbx.bmbx_size,
6566 phba->sli4_hba.bmbx.dmabuf->virt,
6567 phba->sli4_hba.bmbx.dmabuf->phys);
6568
6569 kfree(phba->sli4_hba.bmbx.dmabuf);
6570 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
3772a991
JS
6571}
6572
6573/**
da0436e9 6574 * lpfc_sli4_read_config - Get the config parameters.
3772a991
JS
6575 * @phba: pointer to lpfc hba data structure.
6576 *
da0436e9
JS
6577 * This routine is invoked to read the configuration parameters from the HBA.
6578 * The configuration parameters are used to set the base and maximum values
6579 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
6580 * allocation for the port.
3772a991
JS
6581 *
6582 * Return codes
af901ca1 6583 * 0 - successful
25985edc 6584 * -ENOMEM - No available memory
d439d286 6585 * -EIO - The mailbox failed to complete successfully.
3772a991 6586 **/
ff78d8f9 6587int
da0436e9 6588lpfc_sli4_read_config(struct lpfc_hba *phba)
3772a991 6589{
da0436e9
JS
6590 LPFC_MBOXQ_t *pmb;
6591 struct lpfc_mbx_read_config *rd_config;
912e3acd
JS
6592 union lpfc_sli4_cfg_shdr *shdr;
6593 uint32_t shdr_status, shdr_add_status;
6594 struct lpfc_mbx_get_func_cfg *get_func_cfg;
6595 struct lpfc_rsrc_desc_fcfcoe *desc;
8aa134a8 6596 char *pdesc_0;
912e3acd 6597 uint32_t desc_count;
8aa134a8 6598 int length, i, rc = 0, rc2;
3772a991 6599
da0436e9
JS
6600 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6601 if (!pmb) {
6602 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6603 "2011 Unable to allocate memory for issuing "
6604 "SLI_CONFIG_SPECIAL mailbox command\n");
6605 return -ENOMEM;
3772a991
JS
6606 }
6607
da0436e9 6608 lpfc_read_config(phba, pmb);
3772a991 6609
da0436e9
JS
6610 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6611 if (rc != MBX_SUCCESS) {
6612 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6613 "2012 Mailbox failed , mbxCmd x%x "
6614 "READ_CONFIG, mbxStatus x%x\n",
6615 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6616 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6617 rc = -EIO;
6618 } else {
6619 rd_config = &pmb->u.mqe.un.rd_config;
ff78d8f9
JS
6620 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
6621 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
6622 phba->sli4_hba.lnk_info.lnk_tp =
6623 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
6624 phba->sli4_hba.lnk_info.lnk_no =
6625 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
6626 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6627 "3081 lnk_type:%d, lnk_numb:%d\n",
6628 phba->sli4_hba.lnk_info.lnk_tp,
6629 phba->sli4_hba.lnk_info.lnk_no);
6630 } else
6631 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6632 "3082 Mailbox (x%x) returned ldv:x0\n",
6633 bf_get(lpfc_mqe_command, &pmb->u.mqe));
6d368e53
JS
6634 phba->sli4_hba.extents_in_use =
6635 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
da0436e9
JS
6636 phba->sli4_hba.max_cfg_param.max_xri =
6637 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
6638 phba->sli4_hba.max_cfg_param.xri_base =
6639 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
6640 phba->sli4_hba.max_cfg_param.max_vpi =
6641 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
6642 phba->sli4_hba.max_cfg_param.vpi_base =
6643 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
6644 phba->sli4_hba.max_cfg_param.max_rpi =
6645 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
6646 phba->sli4_hba.max_cfg_param.rpi_base =
6647 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
6648 phba->sli4_hba.max_cfg_param.max_vfi =
6649 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
6650 phba->sli4_hba.max_cfg_param.vfi_base =
6651 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
6652 phba->sli4_hba.max_cfg_param.max_fcfi =
6653 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
da0436e9
JS
6654 phba->sli4_hba.max_cfg_param.max_eq =
6655 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
6656 phba->sli4_hba.max_cfg_param.max_rq =
6657 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
6658 phba->sli4_hba.max_cfg_param.max_wq =
6659 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
6660 phba->sli4_hba.max_cfg_param.max_cq =
6661 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
6662 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
6663 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
6664 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
6665 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
5ffc266e
JS
6666 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
6667 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
da0436e9
JS
6668 phba->max_vports = phba->max_vpi;
6669 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6d368e53
JS
6670 "2003 cfg params Extents? %d "
6671 "XRI(B:%d M:%d), "
da0436e9
JS
6672 "VPI(B:%d M:%d) "
6673 "VFI(B:%d M:%d) "
6674 "RPI(B:%d M:%d) "
6d368e53
JS
6675 "FCFI(Count:%d)\n",
6676 phba->sli4_hba.extents_in_use,
da0436e9
JS
6677 phba->sli4_hba.max_cfg_param.xri_base,
6678 phba->sli4_hba.max_cfg_param.max_xri,
6679 phba->sli4_hba.max_cfg_param.vpi_base,
6680 phba->sli4_hba.max_cfg_param.max_vpi,
6681 phba->sli4_hba.max_cfg_param.vfi_base,
6682 phba->sli4_hba.max_cfg_param.max_vfi,
6683 phba->sli4_hba.max_cfg_param.rpi_base,
6684 phba->sli4_hba.max_cfg_param.max_rpi,
da0436e9 6685 phba->sli4_hba.max_cfg_param.max_fcfi);
3772a991 6686 }
912e3acd
JS
6687
6688 if (rc)
6689 goto read_cfg_out;
da0436e9
JS
6690
6691 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
572709e2
JS
6692 length = phba->sli4_hba.max_cfg_param.max_xri -
6693 lpfc_sli4_get_els_iocb_cnt(phba);
6694 if (phba->cfg_hba_queue_depth > length) {
6695 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6696 "3361 HBA queue depth changed from %d to %d\n",
6697 phba->cfg_hba_queue_depth, length);
6698 phba->cfg_hba_queue_depth = length;
6699 }
912e3acd
JS
6700
6701 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
6702 LPFC_SLI_INTF_IF_TYPE_2)
6703 goto read_cfg_out;
6704
6705 /* get the pf# and vf# for SLI4 if_type 2 port */
6706 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
6707 sizeof(struct lpfc_sli4_cfg_mhdr));
6708 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
6709 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
6710 length, LPFC_SLI4_MBX_EMBED);
6711
8aa134a8 6712 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
912e3acd
JS
6713 shdr = (union lpfc_sli4_cfg_shdr *)
6714 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
6715 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6716 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8aa134a8 6717 if (rc2 || shdr_status || shdr_add_status) {
912e3acd
JS
6718 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6719 "3026 Mailbox failed , mbxCmd x%x "
6720 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6721 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6722 bf_get(lpfc_mqe_status, &pmb->u.mqe));
912e3acd
JS
6723 goto read_cfg_out;
6724 }
6725
6726 /* search for fc_fcoe resrouce descriptor */
6727 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6728 desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6729
8aa134a8
JS
6730 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
6731 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
6732 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
6733 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
6734 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
6735 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
6736 goto read_cfg_out;
6737
912e3acd 6738 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
8aa134a8 6739 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
912e3acd 6740 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
8aa134a8 6741 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
912e3acd
JS
6742 phba->sli4_hba.iov.pf_number =
6743 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6744 phba->sli4_hba.iov.vf_number =
6745 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
6746 break;
6747 }
6748 }
6749
6750 if (i < LPFC_RSRC_DESC_MAX_NUM)
6751 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6752 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6753 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6754 phba->sli4_hba.iov.vf_number);
8aa134a8 6755 else
912e3acd
JS
6756 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6757 "3028 GET_FUNCTION_CONFIG: failed to find "
6758 "Resrouce Descriptor:x%x\n",
6759 LPFC_RSRC_DESC_TYPE_FCFCOE);
912e3acd
JS
6760
6761read_cfg_out:
6762 mempool_free(pmb, phba->mbox_mem_pool);
da0436e9 6763 return rc;
3772a991
JS
6764}
6765
6766/**
2fcee4bf 6767 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
3772a991
JS
6768 * @phba: pointer to lpfc hba data structure.
6769 *
2fcee4bf
JS
6770 * This routine is invoked to setup the port-side endian order when
6771 * the port if_type is 0. This routine has no function for other
6772 * if_types.
da0436e9
JS
6773 *
6774 * Return codes
af901ca1 6775 * 0 - successful
25985edc 6776 * -ENOMEM - No available memory
d439d286 6777 * -EIO - The mailbox failed to complete successfully.
3772a991 6778 **/
da0436e9
JS
6779static int
6780lpfc_setup_endian_order(struct lpfc_hba *phba)
3772a991 6781{
da0436e9 6782 LPFC_MBOXQ_t *mboxq;
2fcee4bf 6783 uint32_t if_type, rc = 0;
da0436e9
JS
6784 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
6785 HOST_ENDIAN_HIGH_WORD1};
3772a991 6786
2fcee4bf
JS
6787 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6788 switch (if_type) {
6789 case LPFC_SLI_INTF_IF_TYPE_0:
6790 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6791 GFP_KERNEL);
6792 if (!mboxq) {
6793 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6794 "0492 Unable to allocate memory for "
6795 "issuing SLI_CONFIG_SPECIAL mailbox "
6796 "command\n");
6797 return -ENOMEM;
6798 }
3772a991 6799
2fcee4bf
JS
6800 /*
6801 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
6802 * two words to contain special data values and no other data.
6803 */
6804 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
6805 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
6806 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6807 if (rc != MBX_SUCCESS) {
6808 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6809 "0493 SLI_CONFIG_SPECIAL mailbox "
6810 "failed with status x%x\n",
6811 rc);
6812 rc = -EIO;
6813 }
6814 mempool_free(mboxq, phba->mbox_mem_pool);
6815 break;
6816 case LPFC_SLI_INTF_IF_TYPE_2:
6817 case LPFC_SLI_INTF_IF_TYPE_1:
6818 default:
6819 break;
da0436e9 6820 }
da0436e9 6821 return rc;
3772a991
JS
6822}
6823
6824/**
5350d872 6825 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
3772a991
JS
6826 * @phba: pointer to lpfc hba data structure.
6827 *
5350d872
JS
6828 * This routine is invoked to check the user settable queue counts for EQs and
6829 * CQs. after this routine is called the counts will be set to valid values that
6830 * adhere to the constraints of the system's interrupt vectors and the port's
6831 * queue resources.
da0436e9
JS
6832 *
6833 * Return codes
af901ca1 6834 * 0 - successful
25985edc 6835 * -ENOMEM - No available memory
3772a991 6836 **/
da0436e9 6837static int
5350d872 6838lpfc_sli4_queue_verify(struct lpfc_hba *phba)
3772a991 6839{
67d12733 6840 int cfg_fcp_io_channel;
90695ee0
JS
6841 uint32_t cpu;
6842 uint32_t i = 0;
1ba981fd 6843 int fof_vectors = phba->cfg_fof ? 1 : 0;
3772a991 6844
da0436e9 6845 /*
67d12733 6846 * Sanity check for configured queue parameters against the run-time
da0436e9
JS
6847 * device parameters
6848 */
3772a991 6849
67d12733
JS
6850 /* Sanity check on HBA EQ parameters */
6851 cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
6852
7bb03bbf
JS
6853 /* It doesn't make sense to have more io channels then online CPUs */
6854 for_each_present_cpu(cpu) {
6855 if (cpu_online(cpu))
6856 i++;
90695ee0 6857 }
7bb03bbf 6858 phba->sli4_hba.num_online_cpu = i;
b246de17 6859 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
76fd07a6 6860 phba->sli4_hba.curr_disp_cpu = 0;
7bb03bbf 6861
90695ee0 6862 if (i < cfg_fcp_io_channel) {
82c3e9ba
JS
6863 lpfc_printf_log(phba,
6864 KERN_ERR, LOG_INIT,
90695ee0 6865 "3188 Reducing IO channels to match number of "
7bb03bbf
JS
6866 "online CPUs: from %d to %d\n",
6867 cfg_fcp_io_channel, i);
90695ee0
JS
6868 cfg_fcp_io_channel = i;
6869 }
6870
1ba981fd 6871 if (cfg_fcp_io_channel + fof_vectors >
67d12733 6872 phba->sli4_hba.max_cfg_param.max_eq) {
82c3e9ba
JS
6873 if (phba->sli4_hba.max_cfg_param.max_eq <
6874 LPFC_FCP_IO_CHAN_MIN) {
da0436e9
JS
6875 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6876 "2574 Not enough EQs (%d) from the "
6877 "pci function for supporting FCP "
6878 "EQs (%d)\n",
6879 phba->sli4_hba.max_cfg_param.max_eq,
67d12733 6880 phba->cfg_fcp_io_channel);
da0436e9
JS
6881 goto out_error;
6882 }
82c3e9ba
JS
6883 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6884 "2575 Reducing IO channels to match number of "
6885 "available EQs: from %d to %d\n",
6886 cfg_fcp_io_channel,
6887 phba->sli4_hba.max_cfg_param.max_eq);
1ba981fd
JS
6888 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq -
6889 fof_vectors;
da0436e9 6890 }
67d12733 6891
da0436e9 6892 /* The actual number of FCP event queues adopted */
67d12733 6893 phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
3772a991 6894
da0436e9
JS
6895 /* Get EQ depth from module parameter, fake the default for now */
6896 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
6897 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
3772a991 6898
5350d872
JS
6899 /* Get CQ depth from module parameter, fake the default for now */
6900 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
6901 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
6902
6903 return 0;
6904out_error:
6905 return -ENOMEM;
6906}
6907
6908/**
6909 * lpfc_sli4_queue_create - Create all the SLI4 queues
6910 * @phba: pointer to lpfc hba data structure.
6911 *
6912 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
6913 * operation. For each SLI4 queue type, the parameters such as queue entry
6914 * count (queue depth) shall be taken from the module parameter. For now,
6915 * we just use some constant number as place holder.
6916 *
6917 * Return codes
4907cb7b 6918 * 0 - successful
5350d872
JS
6919 * -ENOMEM - No availble memory
6920 * -EIO - The mailbox failed to complete successfully.
6921 **/
6922int
6923lpfc_sli4_queue_create(struct lpfc_hba *phba)
6924{
6925 struct lpfc_queue *qdesc;
67d12733 6926 int idx;
5350d872
JS
6927
6928 /*
67d12733 6929 * Create HBA Record arrays.
5350d872 6930 */
67d12733
JS
6931 if (!phba->cfg_fcp_io_channel)
6932 return -ERANGE;
5350d872 6933
67d12733
JS
6934 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6935 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6936 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6937 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6938 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6939 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6940
6941 phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) *
6942 phba->cfg_fcp_io_channel), GFP_KERNEL);
6943 if (!phba->sli4_hba.hba_eq) {
6944 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6945 "2576 Failed allocate memory for "
6946 "fast-path EQ record array\n");
6947 goto out_error;
6948 }
6949
6950 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6951 phba->cfg_fcp_io_channel), GFP_KERNEL);
6952 if (!phba->sli4_hba.fcp_cq) {
da0436e9 6953 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
67d12733
JS
6954 "2577 Failed allocate memory for fast-path "
6955 "CQ record array\n");
6956 goto out_error;
6957 }
6958
6959 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6960 phba->cfg_fcp_io_channel), GFP_KERNEL);
6961 if (!phba->sli4_hba.fcp_wq) {
6962 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6963 "2578 Failed allocate memory for fast-path "
6964 "WQ record array\n");
da0436e9
JS
6965 goto out_error;
6966 }
da0436e9 6967
5350d872 6968 /*
67d12733
JS
6969 * Since the first EQ can have multiple CQs associated with it,
6970 * this array is used to quickly see if we have a FCP fast-path
6971 * CQ match.
5350d872 6972 */
67d12733
JS
6973 phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) *
6974 phba->cfg_fcp_io_channel), GFP_KERNEL);
6975 if (!phba->sli4_hba.fcp_cq_map) {
6976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6977 "2545 Failed allocate memory for fast-path "
6978 "CQ map\n");
6979 goto out_error;
da0436e9 6980 }
67d12733
JS
6981
6982 /*
6983 * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies
6984 * how many EQs to create.
6985 */
6986 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6987
6988 /* Create EQs */
da0436e9
JS
6989 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6990 phba->sli4_hba.eq_ecount);
6991 if (!qdesc) {
6992 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
67d12733
JS
6993 "0497 Failed allocate EQ (%d)\n", idx);
6994 goto out_error;
da0436e9 6995 }
67d12733
JS
6996 phba->sli4_hba.hba_eq[idx] = qdesc;
6997
6998 /* Create Fast Path FCP CQs */
6999 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7000 phba->sli4_hba.cq_ecount);
7001 if (!qdesc) {
7002 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7003 "0499 Failed allocate fast-path FCP "
7004 "CQ (%d)\n", idx);
7005 goto out_error;
7006 }
7007 phba->sli4_hba.fcp_cq[idx] = qdesc;
7008
7009 /* Create Fast Path FCP WQs */
7010 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
7011 phba->sli4_hba.wq_ecount);
7012 if (!qdesc) {
7013 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7014 "0503 Failed allocate fast-path FCP "
7015 "WQ (%d)\n", idx);
7016 goto out_error;
7017 }
7018 phba->sli4_hba.fcp_wq[idx] = qdesc;
da0436e9
JS
7019 }
7020
67d12733 7021
da0436e9 7022 /*
67d12733 7023 * Create Slow Path Completion Queues (CQs)
da0436e9
JS
7024 */
7025
da0436e9
JS
7026 /* Create slow-path Mailbox Command Complete Queue */
7027 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7028 phba->sli4_hba.cq_ecount);
7029 if (!qdesc) {
7030 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7031 "0500 Failed allocate slow-path mailbox CQ\n");
67d12733 7032 goto out_error;
da0436e9
JS
7033 }
7034 phba->sli4_hba.mbx_cq = qdesc;
7035
7036 /* Create slow-path ELS Complete Queue */
7037 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7038 phba->sli4_hba.cq_ecount);
7039 if (!qdesc) {
7040 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7041 "0501 Failed allocate slow-path ELS CQ\n");
67d12733 7042 goto out_error;
da0436e9
JS
7043 }
7044 phba->sli4_hba.els_cq = qdesc;
7045
da0436e9 7046
5350d872 7047 /*
67d12733 7048 * Create Slow Path Work Queues (WQs)
5350d872 7049 */
da0436e9
JS
7050
7051 /* Create Mailbox Command Queue */
da0436e9
JS
7052
7053 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
7054 phba->sli4_hba.mq_ecount);
7055 if (!qdesc) {
7056 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7057 "0505 Failed allocate slow-path MQ\n");
67d12733 7058 goto out_error;
da0436e9
JS
7059 }
7060 phba->sli4_hba.mbx_wq = qdesc;
7061
7062 /*
67d12733 7063 * Create ELS Work Queues
da0436e9 7064 */
da0436e9
JS
7065
7066 /* Create slow-path ELS Work Queue */
7067 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
7068 phba->sli4_hba.wq_ecount);
7069 if (!qdesc) {
7070 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7071 "0504 Failed allocate slow-path ELS WQ\n");
67d12733 7072 goto out_error;
da0436e9
JS
7073 }
7074 phba->sli4_hba.els_wq = qdesc;
7075
da0436e9
JS
7076 /*
7077 * Create Receive Queue (RQ)
7078 */
da0436e9
JS
7079
7080 /* Create Receive Queue for header */
7081 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
7082 phba->sli4_hba.rq_ecount);
7083 if (!qdesc) {
7084 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7085 "0506 Failed allocate receive HRQ\n");
67d12733 7086 goto out_error;
da0436e9
JS
7087 }
7088 phba->sli4_hba.hdr_rq = qdesc;
7089
7090 /* Create Receive Queue for data */
7091 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
7092 phba->sli4_hba.rq_ecount);
7093 if (!qdesc) {
7094 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7095 "0507 Failed allocate receive DRQ\n");
67d12733 7096 goto out_error;
da0436e9
JS
7097 }
7098 phba->sli4_hba.dat_rq = qdesc;
7099
1ba981fd
JS
7100 /* Create the Queues needed for Flash Optimized Fabric operations */
7101 if (phba->cfg_fof)
7102 lpfc_fof_queue_create(phba);
da0436e9
JS
7103 return 0;
7104
da0436e9 7105out_error:
67d12733 7106 lpfc_sli4_queue_destroy(phba);
da0436e9
JS
7107 return -ENOMEM;
7108}
7109
7110/**
7111 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
7112 * @phba: pointer to lpfc hba data structure.
7113 *
7114 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
7115 * operation.
7116 *
7117 * Return codes
af901ca1 7118 * 0 - successful
25985edc 7119 * -ENOMEM - No available memory
d439d286 7120 * -EIO - The mailbox failed to complete successfully.
da0436e9 7121 **/
5350d872 7122void
da0436e9
JS
7123lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
7124{
67d12733
JS
7125 int idx;
7126
1ba981fd
JS
7127 if (phba->cfg_fof)
7128 lpfc_fof_queue_destroy(phba);
7129
67d12733
JS
7130 if (phba->sli4_hba.hba_eq != NULL) {
7131 /* Release HBA event queue */
7132 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7133 if (phba->sli4_hba.hba_eq[idx] != NULL) {
7134 lpfc_sli4_queue_free(
7135 phba->sli4_hba.hba_eq[idx]);
7136 phba->sli4_hba.hba_eq[idx] = NULL;
7137 }
7138 }
7139 kfree(phba->sli4_hba.hba_eq);
7140 phba->sli4_hba.hba_eq = NULL;
7141 }
7142
7143 if (phba->sli4_hba.fcp_cq != NULL) {
7144 /* Release FCP completion queue */
7145 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7146 if (phba->sli4_hba.fcp_cq[idx] != NULL) {
7147 lpfc_sli4_queue_free(
7148 phba->sli4_hba.fcp_cq[idx]);
7149 phba->sli4_hba.fcp_cq[idx] = NULL;
7150 }
7151 }
7152 kfree(phba->sli4_hba.fcp_cq);
7153 phba->sli4_hba.fcp_cq = NULL;
7154 }
7155
7156 if (phba->sli4_hba.fcp_wq != NULL) {
7157 /* Release FCP work queue */
7158 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7159 if (phba->sli4_hba.fcp_wq[idx] != NULL) {
7160 lpfc_sli4_queue_free(
7161 phba->sli4_hba.fcp_wq[idx]);
7162 phba->sli4_hba.fcp_wq[idx] = NULL;
7163 }
7164 }
7165 kfree(phba->sli4_hba.fcp_wq);
7166 phba->sli4_hba.fcp_wq = NULL;
7167 }
7168
7169 /* Release FCP CQ mapping array */
7170 if (phba->sli4_hba.fcp_cq_map != NULL) {
7171 kfree(phba->sli4_hba.fcp_cq_map);
7172 phba->sli4_hba.fcp_cq_map = NULL;
7173 }
da0436e9
JS
7174
7175 /* Release mailbox command work queue */
67d12733
JS
7176 if (phba->sli4_hba.mbx_wq != NULL) {
7177 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
7178 phba->sli4_hba.mbx_wq = NULL;
7179 }
da0436e9
JS
7180
7181 /* Release ELS work queue */
67d12733
JS
7182 if (phba->sli4_hba.els_wq != NULL) {
7183 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
7184 phba->sli4_hba.els_wq = NULL;
7185 }
da0436e9
JS
7186
7187 /* Release unsolicited receive queue */
67d12733
JS
7188 if (phba->sli4_hba.hdr_rq != NULL) {
7189 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
7190 phba->sli4_hba.hdr_rq = NULL;
7191 }
7192 if (phba->sli4_hba.dat_rq != NULL) {
7193 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
7194 phba->sli4_hba.dat_rq = NULL;
7195 }
da0436e9 7196
da0436e9 7197 /* Release ELS complete queue */
67d12733
JS
7198 if (phba->sli4_hba.els_cq != NULL) {
7199 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
7200 phba->sli4_hba.els_cq = NULL;
7201 }
da0436e9
JS
7202
7203 /* Release mailbox command complete queue */
67d12733
JS
7204 if (phba->sli4_hba.mbx_cq != NULL) {
7205 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
7206 phba->sli4_hba.mbx_cq = NULL;
7207 }
da0436e9
JS
7208
7209 return;
7210}
7211
7212/**
7213 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
7214 * @phba: pointer to lpfc hba data structure.
7215 *
7216 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
7217 * operation.
7218 *
7219 * Return codes
af901ca1 7220 * 0 - successful
25985edc 7221 * -ENOMEM - No available memory
d439d286 7222 * -EIO - The mailbox failed to complete successfully.
da0436e9
JS
7223 **/
7224int
7225lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7226{
2a76a283
JS
7227 struct lpfc_sli *psli = &phba->sli;
7228 struct lpfc_sli_ring *pring;
da0436e9
JS
7229 int rc = -ENOMEM;
7230 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
7231 int fcp_cq_index = 0;
962bc51b
JS
7232 uint32_t shdr_status, shdr_add_status;
7233 union lpfc_sli4_cfg_shdr *shdr;
7234 LPFC_MBOXQ_t *mboxq;
7235 uint32_t length;
7236
7237 /* Check for dual-ULP support */
7238 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7239 if (!mboxq) {
7240 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7241 "3249 Unable to allocate memory for "
7242 "QUERY_FW_CFG mailbox command\n");
7243 return -ENOMEM;
7244 }
7245 length = (sizeof(struct lpfc_mbx_query_fw_config) -
7246 sizeof(struct lpfc_sli4_cfg_mhdr));
7247 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7248 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
7249 length, LPFC_SLI4_MBX_EMBED);
7250
7251 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7252
7253 shdr = (union lpfc_sli4_cfg_shdr *)
7254 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7255 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7256 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7257 if (shdr_status || shdr_add_status || rc) {
7258 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7259 "3250 QUERY_FW_CFG mailbox failed with status "
7260 "x%x add_status x%x, mbx status x%x\n",
7261 shdr_status, shdr_add_status, rc);
7262 if (rc != MBX_TIMEOUT)
7263 mempool_free(mboxq, phba->mbox_mem_pool);
7264 rc = -ENXIO;
7265 goto out_error;
7266 }
7267
7268 phba->sli4_hba.fw_func_mode =
7269 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
7270 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
7271 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
7272 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7273 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
7274 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
7275 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
7276
7277 if (rc != MBX_TIMEOUT)
7278 mempool_free(mboxq, phba->mbox_mem_pool);
da0436e9
JS
7279
7280 /*
67d12733 7281 * Set up HBA Event Queues (EQs)
da0436e9
JS
7282 */
7283
67d12733
JS
7284 /* Set up HBA event queue */
7285 if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) {
2e90f4b5
JS
7286 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7287 "3147 Fast-path EQs not allocated\n");
1b51197d 7288 rc = -ENOMEM;
67d12733 7289 goto out_error;
2e90f4b5 7290 }
67d12733
JS
7291 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
7292 if (!phba->sli4_hba.hba_eq[fcp_eqidx]) {
da0436e9
JS
7293 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7294 "0522 Fast-path EQ (%d) not "
7295 "allocated\n", fcp_eqidx);
1b51197d 7296 rc = -ENOMEM;
67d12733 7297 goto out_destroy_hba_eq;
da0436e9 7298 }
67d12733 7299 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx],
bf8dae83 7300 (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel));
da0436e9
JS
7301 if (rc) {
7302 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7303 "0523 Failed setup of fast-path EQ "
7304 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
67d12733 7305 goto out_destroy_hba_eq;
da0436e9
JS
7306 }
7307 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
67d12733 7308 "2584 HBA EQ setup: "
da0436e9 7309 "queue[%d]-id=%d\n", fcp_eqidx,
67d12733 7310 phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id);
da0436e9
JS
7311 }
7312
67d12733
JS
7313 /* Set up fast-path FCP Response Complete Queue */
7314 if (!phba->sli4_hba.fcp_cq) {
7315 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7316 "3148 Fast-path FCP CQ array not "
7317 "allocated\n");
7318 rc = -ENOMEM;
7319 goto out_destroy_hba_eq;
7320 }
7321
7322 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) {
7323 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
7324 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7325 "0526 Fast-path FCP CQ (%d) not "
7326 "allocated\n", fcp_cqidx);
7327 rc = -ENOMEM;
7328 goto out_destroy_fcp_cq;
7329 }
7330 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
7331 phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP);
7332 if (rc) {
7333 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7334 "0527 Failed setup of fast-path FCP "
7335 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
7336 goto out_destroy_fcp_cq;
7337 }
7338
7339 /* Setup fcp_cq_map for fast lookup */
7340 phba->sli4_hba.fcp_cq_map[fcp_cqidx] =
7341 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id;
7342
7343 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7344 "2588 FCP CQ setup: cq[%d]-id=%d, "
7345 "parent seq[%d]-id=%d\n",
7346 fcp_cqidx,
7347 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
7348 fcp_cqidx,
7349 phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id);
7350 }
7351
7352 /* Set up fast-path FCP Work Queue */
7353 if (!phba->sli4_hba.fcp_wq) {
7354 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7355 "3149 Fast-path FCP WQ array not "
7356 "allocated\n");
7357 rc = -ENOMEM;
7358 goto out_destroy_fcp_cq;
7359 }
7360
7361 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) {
7362 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
7363 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7364 "0534 Fast-path FCP WQ (%d) not "
7365 "allocated\n", fcp_wqidx);
7366 rc = -ENOMEM;
7367 goto out_destroy_fcp_wq;
7368 }
7369 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
7370 phba->sli4_hba.fcp_cq[fcp_wqidx],
7371 LPFC_FCP);
7372 if (rc) {
7373 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7374 "0535 Failed setup of fast-path FCP "
7375 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
7376 goto out_destroy_fcp_wq;
7377 }
7378
7379 /* Bind this WQ to the next FCP ring */
7380 pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
7381 pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
7382 phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring;
7383
7384 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7385 "2591 FCP WQ setup: wq[%d]-id=%d, "
7386 "parent cq[%d]-id=%d\n",
7387 fcp_wqidx,
7388 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
7389 fcp_cq_index,
7390 phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id);
7391 }
da0436e9
JS
7392 /*
7393 * Set up Complete Queues (CQs)
7394 */
7395
7396 /* Set up slow-path MBOX Complete Queue as the first CQ */
7397 if (!phba->sli4_hba.mbx_cq) {
7398 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7399 "0528 Mailbox CQ not allocated\n");
1b51197d 7400 rc = -ENOMEM;
67d12733 7401 goto out_destroy_fcp_wq;
da0436e9 7402 }
67d12733
JS
7403 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq,
7404 phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX);
da0436e9
JS
7405 if (rc) {
7406 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7407 "0529 Failed setup of slow-path mailbox CQ: "
7408 "rc = 0x%x\n", rc);
67d12733 7409 goto out_destroy_fcp_wq;
da0436e9
JS
7410 }
7411 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7412 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
7413 phba->sli4_hba.mbx_cq->queue_id,
67d12733 7414 phba->sli4_hba.hba_eq[0]->queue_id);
da0436e9
JS
7415
7416 /* Set up slow-path ELS Complete Queue */
7417 if (!phba->sli4_hba.els_cq) {
7418 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7419 "0530 ELS CQ not allocated\n");
1b51197d 7420 rc = -ENOMEM;
da0436e9
JS
7421 goto out_destroy_mbx_cq;
7422 }
67d12733
JS
7423 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq,
7424 phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS);
da0436e9
JS
7425 if (rc) {
7426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7427 "0531 Failed setup of slow-path ELS CQ: "
7428 "rc = 0x%x\n", rc);
7429 goto out_destroy_mbx_cq;
7430 }
7431 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7432 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
7433 phba->sli4_hba.els_cq->queue_id,
67d12733 7434 phba->sli4_hba.hba_eq[0]->queue_id);
da0436e9
JS
7435
7436 /*
7437 * Set up all the Work Queues (WQs)
7438 */
7439
7440 /* Set up Mailbox Command Queue */
7441 if (!phba->sli4_hba.mbx_wq) {
7442 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7443 "0538 Slow-path MQ not allocated\n");
1b51197d 7444 rc = -ENOMEM;
67d12733 7445 goto out_destroy_els_cq;
da0436e9
JS
7446 }
7447 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
7448 phba->sli4_hba.mbx_cq, LPFC_MBOX);
7449 if (rc) {
7450 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7451 "0539 Failed setup of slow-path MQ: "
7452 "rc = 0x%x\n", rc);
67d12733 7453 goto out_destroy_els_cq;
da0436e9
JS
7454 }
7455 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7456 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
7457 phba->sli4_hba.mbx_wq->queue_id,
7458 phba->sli4_hba.mbx_cq->queue_id);
7459
7460 /* Set up slow-path ELS Work Queue */
7461 if (!phba->sli4_hba.els_wq) {
7462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7463 "0536 Slow-path ELS WQ not allocated\n");
1b51197d 7464 rc = -ENOMEM;
da0436e9
JS
7465 goto out_destroy_mbx_wq;
7466 }
7467 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
7468 phba->sli4_hba.els_cq, LPFC_ELS);
7469 if (rc) {
7470 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7471 "0537 Failed setup of slow-path ELS WQ: "
7472 "rc = 0x%x\n", rc);
7473 goto out_destroy_mbx_wq;
7474 }
2a76a283
JS
7475
7476 /* Bind this WQ to the ELS ring */
7477 pring = &psli->ring[LPFC_ELS_RING];
7478 pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq;
7479 phba->sli4_hba.els_cq->pring = pring;
7480
da0436e9
JS
7481 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7482 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
7483 phba->sli4_hba.els_wq->queue_id,
7484 phba->sli4_hba.els_cq->queue_id);
7485
da0436e9
JS
7486 /*
7487 * Create Receive Queue (RQ)
7488 */
7489 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
7490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7491 "0540 Receive Queue not allocated\n");
1b51197d 7492 rc = -ENOMEM;
67d12733 7493 goto out_destroy_els_wq;
da0436e9 7494 }
73d91e50
JS
7495
7496 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
7497 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
7498
da0436e9 7499 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
4d9ab994 7500 phba->sli4_hba.els_cq, LPFC_USOL);
da0436e9
JS
7501 if (rc) {
7502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7503 "0541 Failed setup of Receive Queue: "
7504 "rc = 0x%x\n", rc);
7505 goto out_destroy_fcp_wq;
7506 }
73d91e50 7507
da0436e9
JS
7508 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7509 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
7510 "parent cq-id=%d\n",
7511 phba->sli4_hba.hdr_rq->queue_id,
7512 phba->sli4_hba.dat_rq->queue_id,
4d9ab994 7513 phba->sli4_hba.els_cq->queue_id);
1ba981fd
JS
7514
7515 if (phba->cfg_fof) {
7516 rc = lpfc_fof_queue_setup(phba);
7517 if (rc) {
7518 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7519 "0549 Failed setup of FOF Queues: "
7520 "rc = 0x%x\n", rc);
7521 goto out_destroy_els_rq;
7522 }
7523 }
da0436e9
JS
7524 return 0;
7525
1ba981fd
JS
7526out_destroy_els_rq:
7527 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
2e90f4b5 7528out_destroy_els_wq:
da0436e9
JS
7529 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7530out_destroy_mbx_wq:
7531 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
2e90f4b5 7532out_destroy_els_cq:
da0436e9
JS
7533 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7534out_destroy_mbx_cq:
7535 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
67d12733
JS
7536out_destroy_fcp_wq:
7537 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
7538 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
7539out_destroy_fcp_cq:
7540 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
7541 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
7542out_destroy_hba_eq:
da0436e9 7543 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
67d12733 7544 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]);
da0436e9
JS
7545out_error:
7546 return rc;
7547}
7548
7549/**
7550 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
7551 * @phba: pointer to lpfc hba data structure.
7552 *
7553 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
7554 * operation.
7555 *
7556 * Return codes
af901ca1 7557 * 0 - successful
25985edc 7558 * -ENOMEM - No available memory
d439d286 7559 * -EIO - The mailbox failed to complete successfully.
da0436e9
JS
7560 **/
7561void
7562lpfc_sli4_queue_unset(struct lpfc_hba *phba)
7563{
7564 int fcp_qidx;
7565
1ba981fd
JS
7566 /* Unset the queues created for Flash Optimized Fabric operations */
7567 if (phba->cfg_fof)
7568 lpfc_fof_queue_destroy(phba);
da0436e9
JS
7569 /* Unset mailbox command work queue */
7570 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7571 /* Unset ELS work queue */
7572 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7573 /* Unset unsolicited receive queue */
7574 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
7575 /* Unset FCP work queue */
67d12733
JS
7576 if (phba->sli4_hba.fcp_wq) {
7577 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7578 fcp_qidx++)
7579 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
7580 }
da0436e9
JS
7581 /* Unset mailbox command complete queue */
7582 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7583 /* Unset ELS complete queue */
7584 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
da0436e9 7585 /* Unset FCP response complete queue */
2e90f4b5 7586 if (phba->sli4_hba.fcp_cq) {
67d12733
JS
7587 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7588 fcp_qidx++)
2e90f4b5 7589 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
2e90f4b5 7590 }
da0436e9 7591 /* Unset fast-path event queue */
67d12733
JS
7592 if (phba->sli4_hba.hba_eq) {
7593 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
2e90f4b5 7594 fcp_qidx++)
67d12733 7595 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]);
2e90f4b5 7596 }
da0436e9
JS
7597}
7598
7599/**
7600 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
7601 * @phba: pointer to lpfc hba data structure.
7602 *
7603 * This routine is invoked to allocate and set up a pool of completion queue
7604 * events. The body of the completion queue event is a completion queue entry
7605 * CQE. For now, this pool is used for the interrupt service routine to queue
7606 * the following HBA completion queue events for the worker thread to process:
7607 * - Mailbox asynchronous events
7608 * - Receive queue completion unsolicited events
7609 * Later, this can be used for all the slow-path events.
7610 *
7611 * Return codes
af901ca1 7612 * 0 - successful
25985edc 7613 * -ENOMEM - No available memory
da0436e9
JS
7614 **/
7615static int
7616lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
7617{
7618 struct lpfc_cq_event *cq_event;
7619 int i;
7620
7621 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
7622 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
7623 if (!cq_event)
7624 goto out_pool_create_fail;
7625 list_add_tail(&cq_event->list,
7626 &phba->sli4_hba.sp_cqe_event_pool);
7627 }
7628 return 0;
7629
7630out_pool_create_fail:
7631 lpfc_sli4_cq_event_pool_destroy(phba);
7632 return -ENOMEM;
7633}
7634
7635/**
7636 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
7637 * @phba: pointer to lpfc hba data structure.
7638 *
7639 * This routine is invoked to free the pool of completion queue events at
7640 * driver unload time. Note that, it is the responsibility of the driver
7641 * cleanup routine to free all the outstanding completion-queue events
7642 * allocated from this pool back into the pool before invoking this routine
7643 * to destroy the pool.
7644 **/
7645static void
7646lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
7647{
7648 struct lpfc_cq_event *cq_event, *next_cq_event;
7649
7650 list_for_each_entry_safe(cq_event, next_cq_event,
7651 &phba->sli4_hba.sp_cqe_event_pool, list) {
7652 list_del(&cq_event->list);
7653 kfree(cq_event);
7654 }
7655}
7656
7657/**
7658 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
7659 * @phba: pointer to lpfc hba data structure.
7660 *
7661 * This routine is the lock free version of the API invoked to allocate a
7662 * completion-queue event from the free pool.
7663 *
7664 * Return: Pointer to the newly allocated completion-queue event if successful
7665 * NULL otherwise.
7666 **/
7667struct lpfc_cq_event *
7668__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
7669{
7670 struct lpfc_cq_event *cq_event = NULL;
7671
7672 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
7673 struct lpfc_cq_event, list);
7674 return cq_event;
7675}
7676
7677/**
7678 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
7679 * @phba: pointer to lpfc hba data structure.
7680 *
7681 * This routine is the lock version of the API invoked to allocate a
7682 * completion-queue event from the free pool.
7683 *
7684 * Return: Pointer to the newly allocated completion-queue event if successful
7685 * NULL otherwise.
7686 **/
7687struct lpfc_cq_event *
7688lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
7689{
7690 struct lpfc_cq_event *cq_event;
7691 unsigned long iflags;
7692
7693 spin_lock_irqsave(&phba->hbalock, iflags);
7694 cq_event = __lpfc_sli4_cq_event_alloc(phba);
7695 spin_unlock_irqrestore(&phba->hbalock, iflags);
7696 return cq_event;
7697}
7698
7699/**
7700 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
7701 * @phba: pointer to lpfc hba data structure.
7702 * @cq_event: pointer to the completion queue event to be freed.
7703 *
7704 * This routine is the lock free version of the API invoked to release a
7705 * completion-queue event back into the free pool.
7706 **/
7707void
7708__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
7709 struct lpfc_cq_event *cq_event)
7710{
7711 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
7712}
7713
7714/**
7715 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
7716 * @phba: pointer to lpfc hba data structure.
7717 * @cq_event: pointer to the completion queue event to be freed.
7718 *
7719 * This routine is the lock version of the API invoked to release a
7720 * completion-queue event back into the free pool.
7721 **/
7722void
7723lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
7724 struct lpfc_cq_event *cq_event)
7725{
7726 unsigned long iflags;
7727 spin_lock_irqsave(&phba->hbalock, iflags);
7728 __lpfc_sli4_cq_event_release(phba, cq_event);
7729 spin_unlock_irqrestore(&phba->hbalock, iflags);
7730}
7731
7732/**
7733 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
7734 * @phba: pointer to lpfc hba data structure.
7735 *
7736 * This routine is to free all the pending completion-queue events to the
7737 * back into the free pool for device reset.
7738 **/
7739static void
7740lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
7741{
7742 LIST_HEAD(cqelist);
7743 struct lpfc_cq_event *cqe;
7744 unsigned long iflags;
7745
7746 /* Retrieve all the pending WCQEs from pending WCQE lists */
7747 spin_lock_irqsave(&phba->hbalock, iflags);
7748 /* Pending FCP XRI abort events */
7749 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
7750 &cqelist);
7751 /* Pending ELS XRI abort events */
7752 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
7753 &cqelist);
7754 /* Pending asynnc events */
7755 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
7756 &cqelist);
7757 spin_unlock_irqrestore(&phba->hbalock, iflags);
7758
7759 while (!list_empty(&cqelist)) {
7760 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
7761 lpfc_sli4_cq_event_release(phba, cqe);
7762 }
7763}
7764
7765/**
7766 * lpfc_pci_function_reset - Reset pci function.
7767 * @phba: pointer to lpfc hba data structure.
7768 *
7769 * This routine is invoked to request a PCI function reset. It will destroys
7770 * all resources assigned to the PCI function which originates this request.
7771 *
7772 * Return codes
af901ca1 7773 * 0 - successful
25985edc 7774 * -ENOMEM - No available memory
d439d286 7775 * -EIO - The mailbox failed to complete successfully.
da0436e9
JS
7776 **/
7777int
7778lpfc_pci_function_reset(struct lpfc_hba *phba)
7779{
7780 LPFC_MBOXQ_t *mboxq;
2fcee4bf 7781 uint32_t rc = 0, if_type;
da0436e9 7782 uint32_t shdr_status, shdr_add_status;
2fcee4bf 7783 uint32_t rdy_chk, num_resets = 0, reset_again = 0;
da0436e9 7784 union lpfc_sli4_cfg_shdr *shdr;
2fcee4bf 7785 struct lpfc_register reg_data;
2b81f942 7786 uint16_t devid;
da0436e9 7787
2fcee4bf
JS
7788 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7789 switch (if_type) {
7790 case LPFC_SLI_INTF_IF_TYPE_0:
7791 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7792 GFP_KERNEL);
7793 if (!mboxq) {
7794 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7795 "0494 Unable to allocate memory for "
7796 "issuing SLI_FUNCTION_RESET mailbox "
7797 "command\n");
7798 return -ENOMEM;
7799 }
da0436e9 7800
2fcee4bf
JS
7801 /* Setup PCI function reset mailbox-ioctl command */
7802 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7803 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
7804 LPFC_SLI4_MBX_EMBED);
7805 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7806 shdr = (union lpfc_sli4_cfg_shdr *)
7807 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7808 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7809 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7810 &shdr->response);
7811 if (rc != MBX_TIMEOUT)
7812 mempool_free(mboxq, phba->mbox_mem_pool);
7813 if (shdr_status || shdr_add_status || rc) {
7814 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7815 "0495 SLI_FUNCTION_RESET mailbox "
7816 "failed with status x%x add_status x%x,"
7817 " mbx status x%x\n",
7818 shdr_status, shdr_add_status, rc);
7819 rc = -ENXIO;
7820 }
7821 break;
7822 case LPFC_SLI_INTF_IF_TYPE_2:
7823 for (num_resets = 0;
7824 num_resets < MAX_IF_TYPE_2_RESETS;
7825 num_resets++) {
7826 reg_data.word0 = 0;
7827 bf_set(lpfc_sliport_ctrl_end, &reg_data,
7828 LPFC_SLIPORT_LITTLE_ENDIAN);
7829 bf_set(lpfc_sliport_ctrl_ip, &reg_data,
7830 LPFC_SLIPORT_INIT_PORT);
7831 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
7832 CTRLregaddr);
8fcb8acd 7833 /* flush */
2b81f942
JS
7834 pci_read_config_word(phba->pcidev,
7835 PCI_DEVICE_ID, &devid);
2fcee4bf
JS
7836 /*
7837 * Poll the Port Status Register and wait for RDY for
7838 * up to 10 seconds. If the port doesn't respond, treat
7839 * it as an error. If the port responds with RN, start
7840 * the loop again.
7841 */
7842 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
73d91e50 7843 msleep(10);
9940b97b
JS
7844 if (lpfc_readl(phba->sli4_hba.u.if_type2.
7845 STATUSregaddr, &reg_data.word0)) {
7846 rc = -ENODEV;
73d91e50 7847 goto out;
9940b97b 7848 }
6b5151fd 7849 if (bf_get(lpfc_sliport_status_rn, &reg_data))
2fcee4bf 7850 reset_again++;
6b5151fd 7851 if (bf_get(lpfc_sliport_status_rdy, &reg_data))
2fcee4bf 7852 break;
2fcee4bf
JS
7853 }
7854
7855 /*
7856 * If the port responds to the init request with
7857 * reset needed, delay for a bit and restart the loop.
7858 */
6b5151fd 7859 if (reset_again && (rdy_chk < 1000)) {
2fcee4bf
JS
7860 msleep(10);
7861 reset_again = 0;
7862 continue;
7863 }
7864
7865 /* Detect any port errors. */
2fcee4bf
JS
7866 if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
7867 (rdy_chk >= 1000)) {
7868 phba->work_status[0] = readl(
7869 phba->sli4_hba.u.if_type2.ERR1regaddr);
7870 phba->work_status[1] = readl(
7871 phba->sli4_hba.u.if_type2.ERR2regaddr);
7872 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8fcb8acd 7873 "2890 Port error detected during port "
8a9d2e80
JS
7874 "reset(%d): wait_tmo:%d ms, "
7875 "port status reg 0x%x, "
2fcee4bf 7876 "error 1=0x%x, error 2=0x%x\n",
8a9d2e80
JS
7877 num_resets, rdy_chk*10,
7878 reg_data.word0,
2fcee4bf
JS
7879 phba->work_status[0],
7880 phba->work_status[1]);
7881 rc = -ENODEV;
7882 }
7883
7884 /*
7885 * Terminate the outer loop provided the Port indicated
7886 * ready within 10 seconds.
7887 */
7888 if (rdy_chk < 1000)
7889 break;
7890 }
0558056c
JS
7891 /* delay driver action following IF_TYPE_2 function reset */
7892 msleep(100);
2fcee4bf
JS
7893 break;
7894 case LPFC_SLI_INTF_IF_TYPE_1:
7895 default:
7896 break;
da0436e9 7897 }
2fcee4bf 7898
73d91e50 7899out:
2fcee4bf 7900 /* Catch the not-ready port failure after a port reset. */
229adb0e
JS
7901 if (num_resets >= MAX_IF_TYPE_2_RESETS) {
7902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7903 "3317 HBA not functional: IP Reset Failed "
7904 "after (%d) retries, try: "
7905 "echo fw_reset > board_mode\n", num_resets);
2fcee4bf 7906 rc = -ENODEV;
229adb0e 7907 }
2fcee4bf 7908
da0436e9
JS
7909 return rc;
7910}
7911
da0436e9
JS
7912/**
7913 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
7914 * @phba: pointer to lpfc hba data structure.
7915 *
7916 * This routine is invoked to set up the PCI device memory space for device
7917 * with SLI-4 interface spec.
7918 *
7919 * Return codes
af901ca1 7920 * 0 - successful
da0436e9
JS
7921 * other values - error
7922 **/
7923static int
7924lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7925{
7926 struct pci_dev *pdev;
7927 unsigned long bar0map_len, bar1map_len, bar2map_len;
7928 int error = -ENODEV;
2fcee4bf 7929 uint32_t if_type;
da0436e9
JS
7930
7931 /* Obtain PCI device reference */
7932 if (!phba->pcidev)
7933 return error;
7934 else
7935 pdev = phba->pcidev;
7936
7937 /* Set the device DMA mask size */
8e68597d
MR
7938 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
7939 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
7940 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
7941 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
da0436e9 7942 return error;
8e68597d
MR
7943 }
7944 }
da0436e9 7945
2fcee4bf
JS
7946 /*
7947 * The BARs and register set definitions and offset locations are
7948 * dependent on the if_type.
7949 */
7950 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
7951 &phba->sli4_hba.sli_intf.word0)) {
7952 return error;
7953 }
7954
7955 /* There is no SLI3 failback for SLI4 devices. */
7956 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
7957 LPFC_SLI_INTF_VALID) {
7958 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7959 "2894 SLI_INTF reg contents invalid "
7960 "sli_intf reg 0x%x\n",
7961 phba->sli4_hba.sli_intf.word0);
7962 return error;
7963 }
7964
7965 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7966 /*
7967 * Get the bus address of SLI4 device Bar regions and the
7968 * number of bytes required by each mapping. The mapping of the
7969 * particular PCI BARs regions is dependent on the type of
7970 * SLI4 device.
da0436e9 7971 */
f5ca6f2e
JS
7972 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
7973 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
7974 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
2fcee4bf
JS
7975
7976 /*
7977 * Map SLI4 PCI Config Space Register base to a kernel virtual
7978 * addr
7979 */
7980 phba->sli4_hba.conf_regs_memmap_p =
7981 ioremap(phba->pci_bar0_map, bar0map_len);
7982 if (!phba->sli4_hba.conf_regs_memmap_p) {
7983 dev_printk(KERN_ERR, &pdev->dev,
7984 "ioremap failed for SLI4 PCI config "
7985 "registers.\n");
7986 goto out;
7987 }
f5ca6f2e 7988 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
2fcee4bf
JS
7989 /* Set up BAR0 PCI config space register memory map */
7990 lpfc_sli4_bar0_register_memmap(phba, if_type);
1dfb5a47
JS
7991 } else {
7992 phba->pci_bar0_map = pci_resource_start(pdev, 1);
7993 bar0map_len = pci_resource_len(pdev, 1);
2fcee4bf
JS
7994 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
7995 dev_printk(KERN_ERR, &pdev->dev,
7996 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
7997 goto out;
7998 }
7999 phba->sli4_hba.conf_regs_memmap_p =
da0436e9 8000 ioremap(phba->pci_bar0_map, bar0map_len);
2fcee4bf
JS
8001 if (!phba->sli4_hba.conf_regs_memmap_p) {
8002 dev_printk(KERN_ERR, &pdev->dev,
8003 "ioremap failed for SLI4 PCI config "
8004 "registers.\n");
8005 goto out;
8006 }
8007 lpfc_sli4_bar0_register_memmap(phba, if_type);
da0436e9
JS
8008 }
8009
c31098ce 8010 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
f5ca6f2e 8011 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
2fcee4bf
JS
8012 /*
8013 * Map SLI4 if type 0 HBA Control Register base to a kernel
8014 * virtual address and setup the registers.
8015 */
f5ca6f2e
JS
8016 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
8017 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
2fcee4bf 8018 phba->sli4_hba.ctrl_regs_memmap_p =
da0436e9 8019 ioremap(phba->pci_bar1_map, bar1map_len);
2fcee4bf
JS
8020 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
8021 dev_printk(KERN_ERR, &pdev->dev,
da0436e9 8022 "ioremap failed for SLI4 HBA control registers.\n");
2fcee4bf
JS
8023 goto out_iounmap_conf;
8024 }
f5ca6f2e 8025 phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
2fcee4bf 8026 lpfc_sli4_bar1_register_memmap(phba);
da0436e9
JS
8027 }
8028
c31098ce 8029 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
f5ca6f2e 8030 (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
2fcee4bf
JS
8031 /*
8032 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
8033 * virtual address and setup the registers.
8034 */
f5ca6f2e
JS
8035 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
8036 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
2fcee4bf 8037 phba->sli4_hba.drbl_regs_memmap_p =
da0436e9 8038 ioremap(phba->pci_bar2_map, bar2map_len);
2fcee4bf
JS
8039 if (!phba->sli4_hba.drbl_regs_memmap_p) {
8040 dev_printk(KERN_ERR, &pdev->dev,
da0436e9 8041 "ioremap failed for SLI4 HBA doorbell registers.\n");
2fcee4bf
JS
8042 goto out_iounmap_ctrl;
8043 }
f5ca6f2e 8044 phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
2fcee4bf
JS
8045 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
8046 if (error)
8047 goto out_iounmap_all;
da0436e9
JS
8048 }
8049
da0436e9
JS
8050 return 0;
8051
8052out_iounmap_all:
8053 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
8054out_iounmap_ctrl:
8055 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
8056out_iounmap_conf:
8057 iounmap(phba->sli4_hba.conf_regs_memmap_p);
8058out:
8059 return error;
8060}
8061
8062/**
8063 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
8064 * @phba: pointer to lpfc hba data structure.
8065 *
8066 * This routine is invoked to unset the PCI device memory space for device
8067 * with SLI-4 interface spec.
8068 **/
8069static void
8070lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
8071{
2e90f4b5
JS
8072 uint32_t if_type;
8073 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
da0436e9 8074
2e90f4b5
JS
8075 switch (if_type) {
8076 case LPFC_SLI_INTF_IF_TYPE_0:
8077 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
8078 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
8079 iounmap(phba->sli4_hba.conf_regs_memmap_p);
8080 break;
8081 case LPFC_SLI_INTF_IF_TYPE_2:
8082 iounmap(phba->sli4_hba.conf_regs_memmap_p);
8083 break;
8084 case LPFC_SLI_INTF_IF_TYPE_1:
8085 default:
8086 dev_printk(KERN_ERR, &phba->pcidev->dev,
8087 "FATAL - unsupported SLI4 interface type - %d\n",
8088 if_type);
8089 break;
8090 }
da0436e9
JS
8091}
8092
8093/**
8094 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
8095 * @phba: pointer to lpfc hba data structure.
8096 *
8097 * This routine is invoked to enable the MSI-X interrupt vectors to device
8098 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
8099 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
8100 * invoked, enables either all or nothing, depending on the current
8101 * availability of PCI vector resources. The device driver is responsible
8102 * for calling the individual request_irq() to register each MSI-X vector
8103 * with a interrupt handler, which is done in this function. Note that
8104 * later when device is unloading, the driver should always call free_irq()
8105 * on all MSI-X vectors it has done request_irq() on before calling
8106 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
8107 * will be left with MSI-X enabled and leaks its vectors.
8108 *
8109 * Return codes
af901ca1 8110 * 0 - successful
da0436e9
JS
8111 * other values - error
8112 **/
8113static int
8114lpfc_sli_enable_msix(struct lpfc_hba *phba)
8115{
8116 int rc, i;
8117 LPFC_MBOXQ_t *pmb;
8118
8119 /* Set up MSI-X multi-message vectors */
8120 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
8121 phba->msix_entries[i].entry = i;
8122
8123 /* Configure MSI-X capability structure */
8124 rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
8125 ARRAY_SIZE(phba->msix_entries));
8126 if (rc) {
8127 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8128 "0420 PCI enable MSI-X failed (%d)\n", rc);
8129 goto msi_fail_out;
8130 }
8131 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
8132 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8133 "0477 MSI-X entry[%d]: vector=x%x "
8134 "message=%d\n", i,
8135 phba->msix_entries[i].vector,
8136 phba->msix_entries[i].entry);
8137 /*
8138 * Assign MSI-X vectors to interrupt handlers
8139 */
8140
8141 /* vector-0 is associated to slow-path handler */
8142 rc = request_irq(phba->msix_entries[0].vector,
8143 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
8144 LPFC_SP_DRIVER_HANDLER_NAME, phba);
8145 if (rc) {
8146 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8147 "0421 MSI-X slow-path request_irq failed "
8148 "(%d)\n", rc);
8149 goto msi_fail_out;
8150 }
8151
8152 /* vector-1 is associated to fast-path handler */
8153 rc = request_irq(phba->msix_entries[1].vector,
8154 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
8155 LPFC_FP_DRIVER_HANDLER_NAME, phba);
8156
8157 if (rc) {
8158 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8159 "0429 MSI-X fast-path request_irq failed "
8160 "(%d)\n", rc);
8161 goto irq_fail_out;
8162 }
8163
8164 /*
8165 * Configure HBA MSI-X attention conditions to messages
8166 */
8167 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8168
8169 if (!pmb) {
8170 rc = -ENOMEM;
8171 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8172 "0474 Unable to allocate memory for issuing "
8173 "MBOX_CONFIG_MSI command\n");
8174 goto mem_fail_out;
8175 }
8176 rc = lpfc_config_msi(phba, pmb);
8177 if (rc)
8178 goto mbx_fail_out;
8179 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8180 if (rc != MBX_SUCCESS) {
8181 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
8182 "0351 Config MSI mailbox command failed, "
8183 "mbxCmd x%x, mbxStatus x%x\n",
8184 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
8185 goto mbx_fail_out;
8186 }
8187
8188 /* Free memory allocated for mailbox command */
8189 mempool_free(pmb, phba->mbox_mem_pool);
8190 return rc;
8191
8192mbx_fail_out:
8193 /* Free memory allocated for mailbox command */
8194 mempool_free(pmb, phba->mbox_mem_pool);
8195
8196mem_fail_out:
8197 /* free the irq already requested */
8198 free_irq(phba->msix_entries[1].vector, phba);
8199
8200irq_fail_out:
8201 /* free the irq already requested */
8202 free_irq(phba->msix_entries[0].vector, phba);
8203
8204msi_fail_out:
8205 /* Unconfigure MSI-X capability structure */
8206 pci_disable_msix(phba->pcidev);
8207 return rc;
8208}
8209
8210/**
8211 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
8212 * @phba: pointer to lpfc hba data structure.
8213 *
8214 * This routine is invoked to release the MSI-X vectors and then disable the
8215 * MSI-X interrupt mode to device with SLI-3 interface spec.
8216 **/
8217static void
8218lpfc_sli_disable_msix(struct lpfc_hba *phba)
8219{
8220 int i;
8221
8222 /* Free up MSI-X multi-message vectors */
8223 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
8224 free_irq(phba->msix_entries[i].vector, phba);
8225 /* Disable MSI-X */
8226 pci_disable_msix(phba->pcidev);
8227
8228 return;
8229}
8230
8231/**
8232 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
8233 * @phba: pointer to lpfc hba data structure.
8234 *
8235 * This routine is invoked to enable the MSI interrupt mode to device with
8236 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
8237 * enable the MSI vector. The device driver is responsible for calling the
8238 * request_irq() to register MSI vector with a interrupt the handler, which
8239 * is done in this function.
8240 *
8241 * Return codes
af901ca1 8242 * 0 - successful
da0436e9
JS
8243 * other values - error
8244 */
8245static int
8246lpfc_sli_enable_msi(struct lpfc_hba *phba)
8247{
8248 int rc;
8249
8250 rc = pci_enable_msi(phba->pcidev);
8251 if (!rc)
8252 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8253 "0462 PCI enable MSI mode success.\n");
8254 else {
8255 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8256 "0471 PCI enable MSI mode failed (%d)\n", rc);
8257 return rc;
8258 }
8259
8260 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
8261 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8262 if (rc) {
8263 pci_disable_msi(phba->pcidev);
8264 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8265 "0478 MSI request_irq failed (%d)\n", rc);
8266 }
8267 return rc;
8268}
8269
8270/**
8271 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
8272 * @phba: pointer to lpfc hba data structure.
8273 *
8274 * This routine is invoked to disable the MSI interrupt mode to device with
8275 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
8276 * done request_irq() on before calling pci_disable_msi(). Failure to do so
8277 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
8278 * its vector.
8279 */
8280static void
8281lpfc_sli_disable_msi(struct lpfc_hba *phba)
8282{
8283 free_irq(phba->pcidev->irq, phba);
8284 pci_disable_msi(phba->pcidev);
8285 return;
8286}
8287
8288/**
8289 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
8290 * @phba: pointer to lpfc hba data structure.
8291 *
8292 * This routine is invoked to enable device interrupt and associate driver's
8293 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
8294 * spec. Depends on the interrupt mode configured to the driver, the driver
8295 * will try to fallback from the configured interrupt mode to an interrupt
8296 * mode which is supported by the platform, kernel, and device in the order
8297 * of:
8298 * MSI-X -> MSI -> IRQ.
8299 *
8300 * Return codes
af901ca1 8301 * 0 - successful
da0436e9
JS
8302 * other values - error
8303 **/
8304static uint32_t
8305lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8306{
8307 uint32_t intr_mode = LPFC_INTR_ERROR;
8308 int retval;
8309
8310 if (cfg_mode == 2) {
8311 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
8312 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
8313 if (!retval) {
8314 /* Now, try to enable MSI-X interrupt mode */
8315 retval = lpfc_sli_enable_msix(phba);
8316 if (!retval) {
8317 /* Indicate initialization to MSI-X mode */
8318 phba->intr_type = MSIX;
8319 intr_mode = 2;
8320 }
8321 }
8322 }
8323
8324 /* Fallback to MSI if MSI-X initialization failed */
8325 if (cfg_mode >= 1 && phba->intr_type == NONE) {
8326 retval = lpfc_sli_enable_msi(phba);
8327 if (!retval) {
8328 /* Indicate initialization to MSI mode */
8329 phba->intr_type = MSI;
8330 intr_mode = 1;
8331 }
8332 }
8333
8334 /* Fallback to INTx if both MSI-X/MSI initalization failed */
8335 if (phba->intr_type == NONE) {
8336 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
8337 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8338 if (!retval) {
8339 /* Indicate initialization to INTx mode */
8340 phba->intr_type = INTx;
8341 intr_mode = 0;
8342 }
8343 }
8344 return intr_mode;
8345}
8346
8347/**
8348 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
8349 * @phba: pointer to lpfc hba data structure.
8350 *
8351 * This routine is invoked to disable device interrupt and disassociate the
8352 * driver's interrupt handler(s) from interrupt vector(s) to device with
8353 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
8354 * release the interrupt vector(s) for the message signaled interrupt.
8355 **/
8356static void
8357lpfc_sli_disable_intr(struct lpfc_hba *phba)
8358{
8359 /* Disable the currently initialized interrupt mode */
8360 if (phba->intr_type == MSIX)
8361 lpfc_sli_disable_msix(phba);
8362 else if (phba->intr_type == MSI)
8363 lpfc_sli_disable_msi(phba);
8364 else if (phba->intr_type == INTx)
8365 free_irq(phba->pcidev->irq, phba);
8366
8367 /* Reset interrupt management states */
8368 phba->intr_type = NONE;
8369 phba->sli.slistat.sli_intr = 0;
8370
8371 return;
8372}
8373
7bb03bbf
JS
8374/**
8375 * lpfc_find_next_cpu - Find next available CPU that matches the phys_id
8376 * @phba: pointer to lpfc hba data structure.
8377 *
8378 * Find next available CPU to use for IRQ to CPU affinity.
8379 */
8380static int
8381lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id)
8382{
8383 struct lpfc_vector_map_info *cpup;
8384 int cpu;
8385
8386 cpup = phba->sli4_hba.cpu_map;
8387 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8388 /* CPU must be online */
8389 if (cpu_online(cpu)) {
8390 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
8391 (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) &&
8392 (cpup->phys_id == phys_id)) {
8393 return cpu;
8394 }
8395 }
8396 cpup++;
8397 }
8398
8399 /*
8400 * If we get here, we have used ALL CPUs for the specific
8401 * phys_id. Now we need to clear out lpfc_used_cpu and start
8402 * reusing CPUs.
8403 */
8404
8405 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8406 if (lpfc_used_cpu[cpu] == phys_id)
8407 lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
8408 }
8409
8410 cpup = phba->sli4_hba.cpu_map;
8411 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8412 /* CPU must be online */
8413 if (cpu_online(cpu)) {
8414 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
8415 (cpup->phys_id == phys_id)) {
8416 return cpu;
8417 }
8418 }
8419 cpup++;
8420 }
8421 return LPFC_VECTOR_MAP_EMPTY;
8422}
8423
8424/**
8425 * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors
8426 * @phba: pointer to lpfc hba data structure.
8427 * @vectors: number of HBA vectors
8428 *
8429 * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector
8430 * affinization across multple physical CPUs (numa nodes).
8431 * In addition, this routine will assign an IO channel for each CPU
8432 * to use when issuing I/Os.
8433 */
8434static int
8435lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
8436{
8437 int i, idx, saved_chann, used_chann, cpu, phys_id;
ec2087a7
JS
8438 int max_phys_id, min_phys_id;
8439 int num_io_channel, first_cpu, chan;
7bb03bbf
JS
8440 struct lpfc_vector_map_info *cpup;
8441#ifdef CONFIG_X86
8442 struct cpuinfo_x86 *cpuinfo;
8443#endif
8444 struct cpumask *mask;
8445 uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
8446
8447 /* If there is no mapping, just return */
8448 if (!phba->cfg_fcp_cpu_map)
8449 return 1;
8450
8451 /* Init cpu_map array */
8452 memset(phba->sli4_hba.cpu_map, 0xff,
8453 (sizeof(struct lpfc_vector_map_info) *
8454 phba->sli4_hba.num_present_cpu));
8455
8456 max_phys_id = 0;
ec2087a7 8457 min_phys_id = 0xff;
7bb03bbf
JS
8458 phys_id = 0;
8459 num_io_channel = 0;
8460 first_cpu = LPFC_VECTOR_MAP_EMPTY;
8461
8462 /* Update CPU map with physical id and core id of each CPU */
8463 cpup = phba->sli4_hba.cpu_map;
8464 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8465#ifdef CONFIG_X86
8466 cpuinfo = &cpu_data(cpu);
8467 cpup->phys_id = cpuinfo->phys_proc_id;
8468 cpup->core_id = cpuinfo->cpu_core_id;
8469#else
8470 /* No distinction between CPUs for other platforms */
8471 cpup->phys_id = 0;
8472 cpup->core_id = 0;
8473#endif
8474
8475 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8476 "3328 CPU physid %d coreid %d\n",
8477 cpup->phys_id, cpup->core_id);
8478
8479 if (cpup->phys_id > max_phys_id)
8480 max_phys_id = cpup->phys_id;
ec2087a7
JS
8481 if (cpup->phys_id < min_phys_id)
8482 min_phys_id = cpup->phys_id;
7bb03bbf
JS
8483 cpup++;
8484 }
8485
ec2087a7 8486 phys_id = min_phys_id;
7bb03bbf
JS
8487 /* Now associate the HBA vectors with specific CPUs */
8488 for (idx = 0; idx < vectors; idx++) {
8489 cpup = phba->sli4_hba.cpu_map;
8490 cpu = lpfc_find_next_cpu(phba, phys_id);
8491 if (cpu == LPFC_VECTOR_MAP_EMPTY) {
8492
8493 /* Try for all phys_id's */
8494 for (i = 1; i < max_phys_id; i++) {
8495 phys_id++;
8496 if (phys_id > max_phys_id)
ec2087a7 8497 phys_id = min_phys_id;
7bb03bbf
JS
8498 cpu = lpfc_find_next_cpu(phba, phys_id);
8499 if (cpu == LPFC_VECTOR_MAP_EMPTY)
8500 continue;
8501 goto found;
8502 }
8503
ec2087a7
JS
8504 /* Use round robin for scheduling */
8505 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN;
8506 chan = 0;
8507 cpup = phba->sli4_hba.cpu_map;
8508 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
8509 cpup->channel_id = chan;
8510 cpup++;
8511 chan++;
8512 if (chan >= phba->cfg_fcp_io_channel)
8513 chan = 0;
8514 }
8515
7bb03bbf
JS
8516 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8517 "3329 Cannot set affinity:"
8518 "Error mapping vector %d (%d)\n",
8519 idx, vectors);
8520 return 0;
8521 }
8522found:
8523 cpup += cpu;
8524 if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP)
8525 lpfc_used_cpu[cpu] = phys_id;
8526
8527 /* Associate vector with selected CPU */
8528 cpup->irq = phba->sli4_hba.msix_entries[idx].vector;
8529
8530 /* Associate IO channel with selected CPU */
8531 cpup->channel_id = idx;
8532 num_io_channel++;
8533
8534 if (first_cpu == LPFC_VECTOR_MAP_EMPTY)
8535 first_cpu = cpu;
8536
8537 /* Now affinitize to the selected CPU */
8538 mask = &cpup->maskbits;
8539 cpumask_clear(mask);
8540 cpumask_set_cpu(cpu, mask);
8541 i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
8542 vector, mask);
8543
8544 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8545 "3330 Set Affinity: CPU %d channel %d "
8546 "irq %d (%x)\n",
8547 cpu, cpup->channel_id,
8548 phba->sli4_hba.msix_entries[idx].vector, i);
8549
8550 /* Spread vector mapping across multple physical CPU nodes */
8551 phys_id++;
8552 if (phys_id > max_phys_id)
ec2087a7 8553 phys_id = min_phys_id;
7bb03bbf
JS
8554 }
8555
8556 /*
8557 * Finally fill in the IO channel for any remaining CPUs.
8558 * At this point, all IO channels have been assigned to a specific
8559 * MSIx vector, mapped to a specific CPU.
8560 * Base the remaining IO channel assigned, to IO channels already
8561 * assigned to other CPUs on the same phys_id.
8562 */
ec2087a7 8563 for (i = min_phys_id; i <= max_phys_id; i++) {
7bb03bbf
JS
8564 /*
8565 * If there are no io channels already mapped to
8566 * this phys_id, just round robin thru the io_channels.
8567 * Setup chann[] for round robin.
8568 */
8569 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
8570 chann[idx] = idx;
8571
8572 saved_chann = 0;
8573 used_chann = 0;
8574
8575 /*
8576 * First build a list of IO channels already assigned
8577 * to this phys_id before reassigning the same IO
8578 * channels to the remaining CPUs.
8579 */
8580 cpup = phba->sli4_hba.cpu_map;
8581 cpu = first_cpu;
8582 cpup += cpu;
8583 for (idx = 0; idx < phba->sli4_hba.num_present_cpu;
8584 idx++) {
8585 if (cpup->phys_id == i) {
8586 /*
8587 * Save any IO channels that are
8588 * already mapped to this phys_id.
8589 */
8590 if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
8591 chann[saved_chann] =
8592 cpup->channel_id;
8593 saved_chann++;
8594 goto out;
8595 }
8596
8597 /* See if we are using round-robin */
8598 if (saved_chann == 0)
8599 saved_chann =
8600 phba->cfg_fcp_io_channel;
8601
8602 /* Associate next IO channel with CPU */
8603 cpup->channel_id = chann[used_chann];
8604 num_io_channel++;
8605 used_chann++;
8606 if (used_chann == saved_chann)
8607 used_chann = 0;
8608
8609 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8610 "3331 Set IO_CHANN "
8611 "CPU %d channel %d\n",
8612 idx, cpup->channel_id);
8613 }
8614out:
8615 cpu++;
8616 if (cpu >= phba->sli4_hba.num_present_cpu) {
8617 cpup = phba->sli4_hba.cpu_map;
8618 cpu = 0;
8619 } else {
8620 cpup++;
8621 }
8622 }
8623 }
8624
8625 if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) {
8626 cpup = phba->sli4_hba.cpu_map;
8627 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
8628 if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) {
8629 cpup->channel_id = 0;
8630 num_io_channel++;
8631
8632 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8633 "3332 Assign IO_CHANN "
8634 "CPU %d channel %d\n",
8635 idx, cpup->channel_id);
8636 }
8637 cpup++;
8638 }
8639 }
8640
8641 /* Sanity check */
8642 if (num_io_channel != phba->sli4_hba.num_present_cpu)
8643 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8644 "3333 Set affinity mismatch:"
ec2087a7 8645 "%d chann != %d cpus: %d vectors\n",
7bb03bbf
JS
8646 num_io_channel, phba->sli4_hba.num_present_cpu,
8647 vectors);
8648
ec2087a7 8649 /* Enable using cpu affinity for scheduling */
7bb03bbf
JS
8650 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
8651 return 1;
8652}
8653
8654
da0436e9
JS
8655/**
8656 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
8657 * @phba: pointer to lpfc hba data structure.
8658 *
8659 * This routine is invoked to enable the MSI-X interrupt vectors to device
8660 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
8661 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
8662 * enables either all or nothing, depending on the current availability of
8663 * PCI vector resources. The device driver is responsible for calling the
8664 * individual request_irq() to register each MSI-X vector with a interrupt
8665 * handler, which is done in this function. Note that later when device is
8666 * unloading, the driver should always call free_irq() on all MSI-X vectors
8667 * it has done request_irq() on before calling pci_disable_msix(). Failure
8668 * to do so results in a BUG_ON() and a device will be left with MSI-X
8669 * enabled and leaks its vectors.
8670 *
8671 * Return codes
af901ca1 8672 * 0 - successful
da0436e9
JS
8673 * other values - error
8674 **/
8675static int
8676lpfc_sli4_enable_msix(struct lpfc_hba *phba)
8677{
75baf696 8678 int vectors, rc, index;
da0436e9
JS
8679
8680 /* Set up MSI-X multi-message vectors */
82c3e9ba 8681 for (index = 0; index < phba->cfg_fcp_io_channel; index++)
da0436e9
JS
8682 phba->sli4_hba.msix_entries[index].entry = index;
8683
8684 /* Configure MSI-X capability structure */
82c3e9ba 8685 vectors = phba->cfg_fcp_io_channel;
1ba981fd
JS
8686 if (phba->cfg_fof) {
8687 phba->sli4_hba.msix_entries[index].entry = index;
8688 vectors++;
8689 }
75baf696 8690enable_msix_vectors:
da0436e9 8691 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
75baf696
JS
8692 vectors);
8693 if (rc > 1) {
8694 vectors = rc;
8695 goto enable_msix_vectors;
8696 } else if (rc) {
da0436e9
JS
8697 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8698 "0484 PCI enable MSI-X failed (%d)\n", rc);
8699 goto msi_fail_out;
8700 }
75baf696 8701
da0436e9 8702 /* Log MSI-X vector assignment */
75baf696 8703 for (index = 0; index < vectors; index++)
da0436e9
JS
8704 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8705 "0489 MSI-X entry[%d]: vector=x%x "
8706 "message=%d\n", index,
8707 phba->sli4_hba.msix_entries[index].vector,
8708 phba->sli4_hba.msix_entries[index].entry);
67d12733 8709
7bb03bbf 8710 /* Assign MSI-X vectors to interrupt handlers */
67d12733 8711 for (index = 0; index < vectors; index++) {
4305f183
JS
8712 memset(&phba->sli4_hba.handler_name[index], 0, 16);
8713 sprintf((char *)&phba->sli4_hba.handler_name[index],
8714 LPFC_DRIVER_HANDLER_NAME"%d", index);
da0436e9 8715
67d12733
JS
8716 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8717 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
ba20c853 8718 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
1ba981fd
JS
8719 if (phba->cfg_fof && (index == (vectors - 1)))
8720 rc = request_irq(
8721 phba->sli4_hba.msix_entries[index].vector,
8722 &lpfc_sli4_fof_intr_handler, IRQF_SHARED,
8723 (char *)&phba->sli4_hba.handler_name[index],
8724 &phba->sli4_hba.fcp_eq_hdl[index]);
8725 else
8726 rc = request_irq(
8727 phba->sli4_hba.msix_entries[index].vector,
67d12733 8728 &lpfc_sli4_hba_intr_handler, IRQF_SHARED,
4305f183 8729 (char *)&phba->sli4_hba.handler_name[index],
67d12733 8730 &phba->sli4_hba.fcp_eq_hdl[index]);
da0436e9
JS
8731 if (rc) {
8732 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8733 "0486 MSI-X fast-path (%d) "
8734 "request_irq failed (%d)\n", index, rc);
8735 goto cfg_fail_out;
8736 }
8737 }
8738
1ba981fd
JS
8739 if (phba->cfg_fof)
8740 vectors--;
8741
82c3e9ba
JS
8742 if (vectors != phba->cfg_fcp_io_channel) {
8743 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8744 "3238 Reducing IO channels to match number of "
8745 "MSI-X vectors, requested %d got %d\n",
8746 phba->cfg_fcp_io_channel, vectors);
8747 phba->cfg_fcp_io_channel = vectors;
8748 }
7bb03bbf
JS
8749
8750 lpfc_sli4_set_affinity(phba, vectors);
da0436e9
JS
8751 return rc;
8752
8753cfg_fail_out:
8754 /* free the irq already requested */
acbd8616
JS
8755 for (--index; index >= 0; index--) {
8756 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
8757 vector, NULL);
67d12733
JS
8758 free_irq(phba->sli4_hba.msix_entries[index].vector,
8759 &phba->sli4_hba.fcp_eq_hdl[index]);
acbd8616 8760 }
da0436e9
JS
8761
8762msi_fail_out:
8763 /* Unconfigure MSI-X capability structure */
8764 pci_disable_msix(phba->pcidev);
8765 return rc;
8766}
8767
8768/**
8769 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
8770 * @phba: pointer to lpfc hba data structure.
8771 *
8772 * This routine is invoked to release the MSI-X vectors and then disable the
8773 * MSI-X interrupt mode to device with SLI-4 interface spec.
8774 **/
8775static void
8776lpfc_sli4_disable_msix(struct lpfc_hba *phba)
8777{
8778 int index;
8779
8780 /* Free up MSI-X multi-message vectors */
acbd8616
JS
8781 for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
8782 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
8783 vector, NULL);
da0436e9 8784 free_irq(phba->sli4_hba.msix_entries[index].vector,
67d12733 8785 &phba->sli4_hba.fcp_eq_hdl[index]);
acbd8616 8786 }
1ba981fd
JS
8787 if (phba->cfg_fof) {
8788 free_irq(phba->sli4_hba.msix_entries[index].vector,
8789 &phba->sli4_hba.fcp_eq_hdl[index]);
8790 }
da0436e9
JS
8791 /* Disable MSI-X */
8792 pci_disable_msix(phba->pcidev);
8793
8794 return;
8795}
8796
8797/**
8798 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
8799 * @phba: pointer to lpfc hba data structure.
8800 *
8801 * This routine is invoked to enable the MSI interrupt mode to device with
8802 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
8803 * to enable the MSI vector. The device driver is responsible for calling
8804 * the request_irq() to register MSI vector with a interrupt the handler,
8805 * which is done in this function.
8806 *
8807 * Return codes
af901ca1 8808 * 0 - successful
da0436e9
JS
8809 * other values - error
8810 **/
8811static int
8812lpfc_sli4_enable_msi(struct lpfc_hba *phba)
8813{
8814 int rc, index;
8815
8816 rc = pci_enable_msi(phba->pcidev);
8817 if (!rc)
8818 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8819 "0487 PCI enable MSI mode success.\n");
8820 else {
8821 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8822 "0488 PCI enable MSI mode failed (%d)\n", rc);
8823 return rc;
8824 }
8825
8826 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
8827 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8828 if (rc) {
8829 pci_disable_msi(phba->pcidev);
8830 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8831 "0490 MSI request_irq failed (%d)\n", rc);
75baf696 8832 return rc;
da0436e9
JS
8833 }
8834
67d12733 8835 for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
da0436e9
JS
8836 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8837 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8838 }
8839
1ba981fd
JS
8840 if (phba->cfg_fof) {
8841 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8842 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8843 }
75baf696 8844 return 0;
da0436e9
JS
8845}
8846
8847/**
8848 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
8849 * @phba: pointer to lpfc hba data structure.
8850 *
8851 * This routine is invoked to disable the MSI interrupt mode to device with
8852 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
8853 * done request_irq() on before calling pci_disable_msi(). Failure to do so
8854 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
8855 * its vector.
8856 **/
8857static void
8858lpfc_sli4_disable_msi(struct lpfc_hba *phba)
8859{
8860 free_irq(phba->pcidev->irq, phba);
8861 pci_disable_msi(phba->pcidev);
8862 return;
8863}
8864
8865/**
8866 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
8867 * @phba: pointer to lpfc hba data structure.
8868 *
8869 * This routine is invoked to enable device interrupt and associate driver's
8870 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
8871 * interface spec. Depends on the interrupt mode configured to the driver,
8872 * the driver will try to fallback from the configured interrupt mode to an
8873 * interrupt mode which is supported by the platform, kernel, and device in
8874 * the order of:
8875 * MSI-X -> MSI -> IRQ.
8876 *
8877 * Return codes
af901ca1 8878 * 0 - successful
da0436e9
JS
8879 * other values - error
8880 **/
8881static uint32_t
8882lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8883{
8884 uint32_t intr_mode = LPFC_INTR_ERROR;
8885 int retval, index;
8886
8887 if (cfg_mode == 2) {
8888 /* Preparation before conf_msi mbox cmd */
8889 retval = 0;
8890 if (!retval) {
8891 /* Now, try to enable MSI-X interrupt mode */
8892 retval = lpfc_sli4_enable_msix(phba);
8893 if (!retval) {
8894 /* Indicate initialization to MSI-X mode */
8895 phba->intr_type = MSIX;
8896 intr_mode = 2;
8897 }
8898 }
8899 }
8900
8901 /* Fallback to MSI if MSI-X initialization failed */
8902 if (cfg_mode >= 1 && phba->intr_type == NONE) {
8903 retval = lpfc_sli4_enable_msi(phba);
8904 if (!retval) {
8905 /* Indicate initialization to MSI mode */
8906 phba->intr_type = MSI;
8907 intr_mode = 1;
8908 }
8909 }
8910
8911 /* Fallback to INTx if both MSI-X/MSI initalization failed */
8912 if (phba->intr_type == NONE) {
8913 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
8914 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8915 if (!retval) {
8916 /* Indicate initialization to INTx mode */
8917 phba->intr_type = INTx;
8918 intr_mode = 0;
67d12733 8919 for (index = 0; index < phba->cfg_fcp_io_channel;
da0436e9
JS
8920 index++) {
8921 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8922 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
ba20c853
JS
8923 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
8924 fcp_eq_in_use, 1);
da0436e9 8925 }
1ba981fd
JS
8926 if (phba->cfg_fof) {
8927 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8928 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8929 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
8930 fcp_eq_in_use, 1);
8931 }
da0436e9
JS
8932 }
8933 }
8934 return intr_mode;
8935}
8936
8937/**
8938 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
8939 * @phba: pointer to lpfc hba data structure.
8940 *
8941 * This routine is invoked to disable device interrupt and disassociate
8942 * the driver's interrupt handler(s) from interrupt vector(s) to device
8943 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
8944 * will release the interrupt vector(s) for the message signaled interrupt.
8945 **/
8946static void
8947lpfc_sli4_disable_intr(struct lpfc_hba *phba)
8948{
8949 /* Disable the currently initialized interrupt mode */
8950 if (phba->intr_type == MSIX)
8951 lpfc_sli4_disable_msix(phba);
8952 else if (phba->intr_type == MSI)
8953 lpfc_sli4_disable_msi(phba);
8954 else if (phba->intr_type == INTx)
8955 free_irq(phba->pcidev->irq, phba);
8956
8957 /* Reset interrupt management states */
8958 phba->intr_type = NONE;
8959 phba->sli.slistat.sli_intr = 0;
8960
8961 return;
8962}
8963
8964/**
8965 * lpfc_unset_hba - Unset SLI3 hba device initialization
8966 * @phba: pointer to lpfc hba data structure.
8967 *
8968 * This routine is invoked to unset the HBA device initialization steps to
8969 * a device with SLI-3 interface spec.
8970 **/
8971static void
8972lpfc_unset_hba(struct lpfc_hba *phba)
8973{
8974 struct lpfc_vport *vport = phba->pport;
8975 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8976
8977 spin_lock_irq(shost->host_lock);
8978 vport->load_flag |= FC_UNLOADING;
8979 spin_unlock_irq(shost->host_lock);
8980
72859909
JS
8981 kfree(phba->vpi_bmask);
8982 kfree(phba->vpi_ids);
8983
da0436e9
JS
8984 lpfc_stop_hba_timers(phba);
8985
8986 phba->pport->work_port_events = 0;
8987
8988 lpfc_sli_hba_down(phba);
8989
8990 lpfc_sli_brdrestart(phba);
8991
8992 lpfc_sli_disable_intr(phba);
8993
8994 return;
8995}
8996
5af5eee7
JS
8997/**
8998 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
8999 * @phba: Pointer to HBA context object.
9000 *
9001 * This function is called in the SLI4 code path to wait for completion
9002 * of device's XRIs exchange busy. It will check the XRI exchange busy
9003 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
9004 * that, it will check the XRI exchange busy on outstanding FCP and ELS
9005 * I/Os every 30 seconds, log error message, and wait forever. Only when
9006 * all XRI exchange busy complete, the driver unload shall proceed with
9007 * invoking the function reset ioctl mailbox command to the CNA and the
9008 * the rest of the driver unload resource release.
9009 **/
9010static void
9011lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
9012{
9013 int wait_time = 0;
9014 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
9015 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
9016
9017 while (!fcp_xri_cmpl || !els_xri_cmpl) {
9018 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
9019 if (!fcp_xri_cmpl)
9020 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9021 "2877 FCP XRI exchange busy "
9022 "wait time: %d seconds.\n",
9023 wait_time/1000);
9024 if (!els_xri_cmpl)
9025 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9026 "2878 ELS XRI exchange busy "
9027 "wait time: %d seconds.\n",
9028 wait_time/1000);
9029 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
9030 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
9031 } else {
9032 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
9033 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
9034 }
9035 fcp_xri_cmpl =
9036 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
9037 els_xri_cmpl =
9038 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
9039 }
9040}
9041
da0436e9
JS
9042/**
9043 * lpfc_sli4_hba_unset - Unset the fcoe hba
9044 * @phba: Pointer to HBA context object.
9045 *
9046 * This function is called in the SLI4 code path to reset the HBA's FCoE
9047 * function. The caller is not required to hold any lock. This routine
9048 * issues PCI function reset mailbox command to reset the FCoE function.
9049 * At the end of the function, it calls lpfc_hba_down_post function to
9050 * free any pending commands.
9051 **/
9052static void
9053lpfc_sli4_hba_unset(struct lpfc_hba *phba)
9054{
9055 int wait_cnt = 0;
9056 LPFC_MBOXQ_t *mboxq;
912e3acd 9057 struct pci_dev *pdev = phba->pcidev;
da0436e9
JS
9058
9059 lpfc_stop_hba_timers(phba);
9060 phba->sli4_hba.intr_enable = 0;
9061
9062 /*
9063 * Gracefully wait out the potential current outstanding asynchronous
9064 * mailbox command.
9065 */
9066
9067 /* First, block any pending async mailbox command from posted */
9068 spin_lock_irq(&phba->hbalock);
9069 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9070 spin_unlock_irq(&phba->hbalock);
9071 /* Now, trying to wait it out if we can */
9072 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9073 msleep(10);
9074 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
9075 break;
9076 }
9077 /* Forcefully release the outstanding mailbox command if timed out */
9078 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9079 spin_lock_irq(&phba->hbalock);
9080 mboxq = phba->sli.mbox_active;
9081 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
9082 __lpfc_mbox_cmpl_put(phba, mboxq);
9083 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9084 phba->sli.mbox_active = NULL;
9085 spin_unlock_irq(&phba->hbalock);
9086 }
9087
5af5eee7
JS
9088 /* Abort all iocbs associated with the hba */
9089 lpfc_sli_hba_iocb_abort(phba);
9090
9091 /* Wait for completion of device XRI exchange busy */
9092 lpfc_sli4_xri_exchange_busy_wait(phba);
9093
da0436e9
JS
9094 /* Disable PCI subsystem interrupt */
9095 lpfc_sli4_disable_intr(phba);
9096
912e3acd
JS
9097 /* Disable SR-IOV if enabled */
9098 if (phba->cfg_sriov_nr_virtfn)
9099 pci_disable_sriov(pdev);
9100
da0436e9
JS
9101 /* Stop kthread signal shall trigger work_done one more time */
9102 kthread_stop(phba->worker_thread);
9103
3677a3a7
JS
9104 /* Reset SLI4 HBA FCoE function */
9105 lpfc_pci_function_reset(phba);
5350d872 9106 lpfc_sli4_queue_destroy(phba);
3677a3a7 9107
da0436e9
JS
9108 /* Stop the SLI4 device port */
9109 phba->pport->work_port_events = 0;
9110}
9111
28baac74
JS
9112 /**
9113 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
9114 * @phba: Pointer to HBA context object.
9115 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
9116 *
9117 * This function is called in the SLI4 code path to read the port's
9118 * sli4 capabilities.
9119 *
9120 * This function may be be called from any context that can block-wait
9121 * for the completion. The expectation is that this routine is called
9122 * typically from probe_one or from the online routine.
9123 **/
9124int
9125lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9126{
9127 int rc;
9128 struct lpfc_mqe *mqe;
9129 struct lpfc_pc_sli4_params *sli4_params;
9130 uint32_t mbox_tmo;
9131
9132 rc = 0;
9133 mqe = &mboxq->u.mqe;
9134
9135 /* Read the port's SLI4 Parameters port capabilities */
fedd3b7b 9136 lpfc_pc_sli4_params(mboxq);
28baac74
JS
9137 if (!phba->sli4_hba.intr_enable)
9138 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9139 else {
a183a15f 9140 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
28baac74
JS
9141 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
9142 }
9143
9144 if (unlikely(rc))
9145 return 1;
9146
9147 sli4_params = &phba->sli4_hba.pc_sli4_params;
9148 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
9149 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
9150 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
9151 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
9152 &mqe->un.sli4_params);
9153 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
9154 &mqe->un.sli4_params);
9155 sli4_params->proto_types = mqe->un.sli4_params.word3;
9156 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
9157 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
9158 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
9159 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
9160 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
9161 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
9162 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
9163 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
9164 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
9165 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
9166 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
9167 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
9168 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
9169 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
9170 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
9171 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
9172 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
9173 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
9174 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
9175 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
0558056c
JS
9176
9177 /* Make sure that sge_supp_len can be handled by the driver */
9178 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
9179 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
9180
28baac74
JS
9181 return rc;
9182}
9183
fedd3b7b
JS
9184/**
9185 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
9186 * @phba: Pointer to HBA context object.
9187 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
9188 *
9189 * This function is called in the SLI4 code path to read the port's
9190 * sli4 capabilities.
9191 *
9192 * This function may be be called from any context that can block-wait
9193 * for the completion. The expectation is that this routine is called
9194 * typically from probe_one or from the online routine.
9195 **/
9196int
9197lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9198{
9199 int rc;
9200 struct lpfc_mqe *mqe = &mboxq->u.mqe;
9201 struct lpfc_pc_sli4_params *sli4_params;
a183a15f 9202 uint32_t mbox_tmo;
fedd3b7b
JS
9203 int length;
9204 struct lpfc_sli4_parameters *mbx_sli4_parameters;
9205
6d368e53
JS
9206 /*
9207 * By default, the driver assumes the SLI4 port requires RPI
9208 * header postings. The SLI4_PARAM response will correct this
9209 * assumption.
9210 */
9211 phba->sli4_hba.rpi_hdrs_in_use = 1;
9212
fedd3b7b
JS
9213 /* Read the port's SLI4 Config Parameters */
9214 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
9215 sizeof(struct lpfc_sli4_cfg_mhdr));
9216 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9217 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
9218 length, LPFC_SLI4_MBX_EMBED);
9219 if (!phba->sli4_hba.intr_enable)
9220 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
a183a15f
JS
9221 else {
9222 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
9223 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
9224 }
fedd3b7b
JS
9225 if (unlikely(rc))
9226 return rc;
9227 sli4_params = &phba->sli4_hba.pc_sli4_params;
9228 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
9229 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
9230 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
9231 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
9232 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
9233 mbx_sli4_parameters);
9234 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
9235 mbx_sli4_parameters);
9236 if (bf_get(cfg_phwq, mbx_sli4_parameters))
9237 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
9238 else
9239 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
9240 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
9241 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
1ba981fd 9242 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
fedd3b7b
JS
9243 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
9244 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
9245 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
9246 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
0c651878 9247 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
fedd3b7b
JS
9248 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
9249 mbx_sli4_parameters);
9250 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
9251 mbx_sli4_parameters);
6d368e53
JS
9252 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
9253 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
0558056c
JS
9254
9255 /* Make sure that sge_supp_len can be handled by the driver */
9256 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
9257 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
9258
fedd3b7b
JS
9259 return 0;
9260}
9261
da0436e9
JS
9262/**
9263 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
9264 * @pdev: pointer to PCI device
9265 * @pid: pointer to PCI device identifier
9266 *
9267 * This routine is to be called to attach a device with SLI-3 interface spec
9268 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
9269 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
9270 * information of the device and driver to see if the driver state that it can
9271 * support this kind of device. If the match is successful, the driver core
9272 * invokes this routine. If this routine determines it can claim the HBA, it
9273 * does all the initialization that it needs to do to handle the HBA properly.
9274 *
9275 * Return code
9276 * 0 - driver can claim the device
9277 * negative value - driver can not claim the device
9278 **/
6f039790 9279static int
da0436e9
JS
9280lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
9281{
9282 struct lpfc_hba *phba;
9283 struct lpfc_vport *vport = NULL;
6669f9bb 9284 struct Scsi_Host *shost = NULL;
da0436e9
JS
9285 int error;
9286 uint32_t cfg_mode, intr_mode;
9287
9288 /* Allocate memory for HBA structure */
9289 phba = lpfc_hba_alloc(pdev);
9290 if (!phba)
9291 return -ENOMEM;
9292
9293 /* Perform generic PCI device enabling operation */
9294 error = lpfc_enable_pci_dev(phba);
079b5c91 9295 if (error)
da0436e9 9296 goto out_free_phba;
da0436e9
JS
9297
9298 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
9299 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
9300 if (error)
9301 goto out_disable_pci_dev;
9302
9303 /* Set up SLI-3 specific device PCI memory space */
9304 error = lpfc_sli_pci_mem_setup(phba);
9305 if (error) {
9306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9307 "1402 Failed to set up pci memory space.\n");
9308 goto out_disable_pci_dev;
9309 }
9310
9311 /* Set up phase-1 common device driver resources */
9312 error = lpfc_setup_driver_resource_phase1(phba);
9313 if (error) {
9314 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9315 "1403 Failed to set up driver resource.\n");
9316 goto out_unset_pci_mem_s3;
9317 }
9318
9319 /* Set up SLI-3 specific device driver resources */
9320 error = lpfc_sli_driver_resource_setup(phba);
9321 if (error) {
9322 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9323 "1404 Failed to set up driver resource.\n");
9324 goto out_unset_pci_mem_s3;
9325 }
9326
9327 /* Initialize and populate the iocb list per host */
9328 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
9329 if (error) {
9330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9331 "1405 Failed to initialize iocb list.\n");
9332 goto out_unset_driver_resource_s3;
9333 }
9334
9335 /* Set up common device driver resources */
9336 error = lpfc_setup_driver_resource_phase2(phba);
9337 if (error) {
9338 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9339 "1406 Failed to set up driver resource.\n");
9340 goto out_free_iocb_list;
9341 }
9342
079b5c91
JS
9343 /* Get the default values for Model Name and Description */
9344 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9345
da0436e9
JS
9346 /* Create SCSI host to the physical port */
9347 error = lpfc_create_shost(phba);
9348 if (error) {
9349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9350 "1407 Failed to create scsi host.\n");
9351 goto out_unset_driver_resource;
9352 }
9353
9354 /* Configure sysfs attributes */
9355 vport = phba->pport;
9356 error = lpfc_alloc_sysfs_attr(vport);
9357 if (error) {
9358 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9359 "1476 Failed to allocate sysfs attr\n");
9360 goto out_destroy_shost;
9361 }
9362
6669f9bb 9363 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
da0436e9
JS
9364 /* Now, trying to enable interrupt and bring up the device */
9365 cfg_mode = phba->cfg_use_msi;
9366 while (true) {
9367 /* Put device to a known state before enabling interrupt */
9368 lpfc_stop_port(phba);
9369 /* Configure and enable interrupt */
9370 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
9371 if (intr_mode == LPFC_INTR_ERROR) {
9372 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9373 "0431 Failed to enable interrupt.\n");
9374 error = -ENODEV;
9375 goto out_free_sysfs_attr;
9376 }
9377 /* SLI-3 HBA setup */
9378 if (lpfc_sli_hba_setup(phba)) {
9379 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9380 "1477 Failed to set up hba\n");
9381 error = -ENODEV;
9382 goto out_remove_device;
9383 }
9384
9385 /* Wait 50ms for the interrupts of previous mailbox commands */
9386 msleep(50);
9387 /* Check active interrupts on message signaled interrupts */
9388 if (intr_mode == 0 ||
9389 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
9390 /* Log the current active interrupt mode */
9391 phba->intr_mode = intr_mode;
9392 lpfc_log_intr_mode(phba, intr_mode);
9393 break;
9394 } else {
9395 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9396 "0447 Configure interrupt mode (%d) "
9397 "failed active interrupt test.\n",
9398 intr_mode);
9399 /* Disable the current interrupt mode */
9400 lpfc_sli_disable_intr(phba);
9401 /* Try next level of interrupt mode */
9402 cfg_mode = --intr_mode;
9403 }
9404 }
9405
9406 /* Perform post initialization setup */
9407 lpfc_post_init_setup(phba);
9408
9409 /* Check if there are static vports to be created. */
9410 lpfc_create_static_vport(phba);
9411
9412 return 0;
9413
9414out_remove_device:
9415 lpfc_unset_hba(phba);
9416out_free_sysfs_attr:
9417 lpfc_free_sysfs_attr(vport);
9418out_destroy_shost:
9419 lpfc_destroy_shost(phba);
9420out_unset_driver_resource:
9421 lpfc_unset_driver_resource_phase2(phba);
9422out_free_iocb_list:
9423 lpfc_free_iocb_list(phba);
9424out_unset_driver_resource_s3:
9425 lpfc_sli_driver_resource_unset(phba);
9426out_unset_pci_mem_s3:
9427 lpfc_sli_pci_mem_unset(phba);
9428out_disable_pci_dev:
9429 lpfc_disable_pci_dev(phba);
6669f9bb
JS
9430 if (shost)
9431 scsi_host_put(shost);
da0436e9
JS
9432out_free_phba:
9433 lpfc_hba_free(phba);
9434 return error;
9435}
9436
9437/**
9438 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
9439 * @pdev: pointer to PCI device
9440 *
9441 * This routine is to be called to disattach a device with SLI-3 interface
9442 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
9443 * removed from PCI bus, it performs all the necessary cleanup for the HBA
9444 * device to be removed from the PCI subsystem properly.
9445 **/
6f039790 9446static void
da0436e9
JS
9447lpfc_pci_remove_one_s3(struct pci_dev *pdev)
9448{
9449 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9450 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
9451 struct lpfc_vport **vports;
9452 struct lpfc_hba *phba = vport->phba;
9453 int i;
9454 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
9455
9456 spin_lock_irq(&phba->hbalock);
9457 vport->load_flag |= FC_UNLOADING;
9458 spin_unlock_irq(&phba->hbalock);
9459
9460 lpfc_free_sysfs_attr(vport);
9461
9462 /* Release all the vports against this physical port */
9463 vports = lpfc_create_vport_work_array(phba);
9464 if (vports != NULL)
587a37f6
JS
9465 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
9466 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
9467 continue;
da0436e9 9468 fc_vport_terminate(vports[i]->fc_vport);
587a37f6 9469 }
da0436e9
JS
9470 lpfc_destroy_vport_work_array(phba, vports);
9471
9472 /* Remove FC host and then SCSI host with the physical port */
9473 fc_remove_host(shost);
9474 scsi_remove_host(shost);
9475 lpfc_cleanup(vport);
9476
9477 /*
9478 * Bring down the SLI Layer. This step disable all interrupts,
9479 * clears the rings, discards all mailbox commands, and resets
9480 * the HBA.
9481 */
9482
48e34d0f 9483 /* HBA interrupt will be disabled after this call */
da0436e9
JS
9484 lpfc_sli_hba_down(phba);
9485 /* Stop kthread signal shall trigger work_done one more time */
9486 kthread_stop(phba->worker_thread);
9487 /* Final cleanup of txcmplq and reset the HBA */
9488 lpfc_sli_brdrestart(phba);
9489
72859909
JS
9490 kfree(phba->vpi_bmask);
9491 kfree(phba->vpi_ids);
9492
da0436e9
JS
9493 lpfc_stop_hba_timers(phba);
9494 spin_lock_irq(&phba->hbalock);
9495 list_del_init(&vport->listentry);
9496 spin_unlock_irq(&phba->hbalock);
9497
9498 lpfc_debugfs_terminate(vport);
9499
912e3acd
JS
9500 /* Disable SR-IOV if enabled */
9501 if (phba->cfg_sriov_nr_virtfn)
9502 pci_disable_sriov(pdev);
9503
da0436e9
JS
9504 /* Disable interrupt */
9505 lpfc_sli_disable_intr(phba);
9506
da0436e9
JS
9507 scsi_host_put(shost);
9508
9509 /*
9510 * Call scsi_free before mem_free since scsi bufs are released to their
9511 * corresponding pools here.
9512 */
9513 lpfc_scsi_free(phba);
9514 lpfc_mem_free_all(phba);
9515
9516 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9517 phba->hbqslimp.virt, phba->hbqslimp.phys);
9518
9519 /* Free resources associated with SLI2 interface */
9520 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9521 phba->slim2p.virt, phba->slim2p.phys);
9522
9523 /* unmap adapter SLIM and Control Registers */
9524 iounmap(phba->ctrl_regs_memmap_p);
9525 iounmap(phba->slim_memmap_p);
9526
9527 lpfc_hba_free(phba);
9528
9529 pci_release_selected_regions(pdev, bars);
9530 pci_disable_device(pdev);
9531}
9532
9533/**
9534 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
9535 * @pdev: pointer to PCI device
9536 * @msg: power management message
9537 *
9538 * This routine is to be called from the kernel's PCI subsystem to support
9539 * system Power Management (PM) to device with SLI-3 interface spec. When
9540 * PM invokes this method, it quiesces the device by stopping the driver's
9541 * worker thread for the device, turning off device's interrupt and DMA,
9542 * and bring the device offline. Note that as the driver implements the
9543 * minimum PM requirements to a power-aware driver's PM support for the
9544 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
9545 * to the suspend() method call will be treated as SUSPEND and the driver will
9546 * fully reinitialize its device during resume() method call, the driver will
9547 * set device to PCI_D3hot state in PCI config space instead of setting it
9548 * according to the @msg provided by the PM.
9549 *
9550 * Return code
9551 * 0 - driver suspended the device
9552 * Error otherwise
9553 **/
9554static int
9555lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
9556{
9557 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9558 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9559
9560 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9561 "0473 PCI device Power Management suspend.\n");
9562
9563 /* Bring down the device */
618a5230 9564 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
da0436e9
JS
9565 lpfc_offline(phba);
9566 kthread_stop(phba->worker_thread);
9567
9568 /* Disable interrupt from device */
9569 lpfc_sli_disable_intr(phba);
9570
9571 /* Save device state to PCI config space */
9572 pci_save_state(pdev);
9573 pci_set_power_state(pdev, PCI_D3hot);
9574
9575 return 0;
9576}
9577
9578/**
9579 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
9580 * @pdev: pointer to PCI device
9581 *
9582 * This routine is to be called from the kernel's PCI subsystem to support
9583 * system Power Management (PM) to device with SLI-3 interface spec. When PM
9584 * invokes this method, it restores the device's PCI config space state and
9585 * fully reinitializes the device and brings it online. Note that as the
9586 * driver implements the minimum PM requirements to a power-aware driver's
9587 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
9588 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
9589 * driver will fully reinitialize its device during resume() method call,
9590 * the device will be set to PCI_D0 directly in PCI config space before
9591 * restoring the state.
9592 *
9593 * Return code
9594 * 0 - driver suspended the device
9595 * Error otherwise
9596 **/
9597static int
9598lpfc_pci_resume_one_s3(struct pci_dev *pdev)
9599{
9600 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9601 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9602 uint32_t intr_mode;
9603 int error;
9604
9605 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9606 "0452 PCI device Power Management resume.\n");
9607
9608 /* Restore device state from PCI config space */
9609 pci_set_power_state(pdev, PCI_D0);
9610 pci_restore_state(pdev);
0d878419 9611
1dfb5a47
JS
9612 /*
9613 * As the new kernel behavior of pci_restore_state() API call clears
9614 * device saved_state flag, need to save the restored state again.
9615 */
9616 pci_save_state(pdev);
9617
da0436e9
JS
9618 if (pdev->is_busmaster)
9619 pci_set_master(pdev);
9620
9621 /* Startup the kernel thread for this host adapter. */
9622 phba->worker_thread = kthread_run(lpfc_do_work, phba,
9623 "lpfc_worker_%d", phba->brd_no);
9624 if (IS_ERR(phba->worker_thread)) {
9625 error = PTR_ERR(phba->worker_thread);
9626 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9627 "0434 PM resume failed to start worker "
9628 "thread: error=x%x.\n", error);
9629 return error;
9630 }
9631
9632 /* Configure and enable interrupt */
9633 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
9634 if (intr_mode == LPFC_INTR_ERROR) {
9635 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9636 "0430 PM resume Failed to enable interrupt\n");
9637 return -EIO;
9638 } else
9639 phba->intr_mode = intr_mode;
9640
9641 /* Restart HBA and bring it online */
9642 lpfc_sli_brdrestart(phba);
9643 lpfc_online(phba);
9644
9645 /* Log the current active interrupt mode */
9646 lpfc_log_intr_mode(phba, phba->intr_mode);
9647
9648 return 0;
9649}
9650
891478a2
JS
9651/**
9652 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
9653 * @phba: pointer to lpfc hba data structure.
9654 *
9655 * This routine is called to prepare the SLI3 device for PCI slot recover. It
e2af0d2e 9656 * aborts all the outstanding SCSI I/Os to the pci device.
891478a2
JS
9657 **/
9658static void
9659lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
9660{
e2af0d2e
JS
9661 struct lpfc_sli *psli = &phba->sli;
9662 struct lpfc_sli_ring *pring;
9663
891478a2
JS
9664 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9665 "2723 PCI channel I/O abort preparing for recovery\n");
e2af0d2e
JS
9666
9667 /*
9668 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
9669 * and let the SCSI mid-layer to retry them to recover.
9670 */
9671 pring = &psli->ring[psli->fcp_ring];
9672 lpfc_sli_abort_iocb_ring(phba, pring);
891478a2
JS
9673}
9674
0d878419
JS
9675/**
9676 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
9677 * @phba: pointer to lpfc hba data structure.
9678 *
9679 * This routine is called to prepare the SLI3 device for PCI slot reset. It
9680 * disables the device interrupt and pci device, and aborts the internal FCP
9681 * pending I/Os.
9682 **/
9683static void
9684lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
9685{
0d878419 9686 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
891478a2 9687 "2710 PCI channel disable preparing for reset\n");
e2af0d2e 9688
75baf696 9689 /* Block any management I/Os to the device */
618a5230 9690 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
75baf696 9691
e2af0d2e
JS
9692 /* Block all SCSI devices' I/Os on the host */
9693 lpfc_scsi_dev_block(phba);
9694
ea714f3d
JS
9695 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9696 lpfc_sli_flush_fcp_rings(phba);
9697
e2af0d2e
JS
9698 /* stop all timers */
9699 lpfc_stop_hba_timers(phba);
9700
0d878419
JS
9701 /* Disable interrupt and pci device */
9702 lpfc_sli_disable_intr(phba);
9703 pci_disable_device(phba->pcidev);
0d878419
JS
9704}
9705
9706/**
9707 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
9708 * @phba: pointer to lpfc hba data structure.
9709 *
9710 * This routine is called to prepare the SLI3 device for PCI slot permanently
9711 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
9712 * pending I/Os.
9713 **/
9714static void
75baf696 9715lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
0d878419
JS
9716{
9717 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
891478a2 9718 "2711 PCI channel permanent disable for failure\n");
e2af0d2e
JS
9719 /* Block all SCSI devices' I/Os on the host */
9720 lpfc_scsi_dev_block(phba);
9721
9722 /* stop all timers */
9723 lpfc_stop_hba_timers(phba);
9724
0d878419
JS
9725 /* Clean up all driver's outstanding SCSI I/Os */
9726 lpfc_sli_flush_fcp_rings(phba);
9727}
9728
da0436e9
JS
9729/**
9730 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
9731 * @pdev: pointer to PCI device.
9732 * @state: the current PCI connection state.
9733 *
9734 * This routine is called from the PCI subsystem for I/O error handling to
9735 * device with SLI-3 interface spec. This function is called by the PCI
9736 * subsystem after a PCI bus error affecting this device has been detected.
9737 * When this function is invoked, it will need to stop all the I/Os and
9738 * interrupt(s) to the device. Once that is done, it will return
9739 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
9740 * as desired.
9741 *
9742 * Return codes
0d878419 9743 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
da0436e9
JS
9744 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9745 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9746 **/
9747static pci_ers_result_t
9748lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
9749{
9750 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9751 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
da0436e9 9752
0d878419
JS
9753 switch (state) {
9754 case pci_channel_io_normal:
891478a2
JS
9755 /* Non-fatal error, prepare for recovery */
9756 lpfc_sli_prep_dev_for_recover(phba);
0d878419
JS
9757 return PCI_ERS_RESULT_CAN_RECOVER;
9758 case pci_channel_io_frozen:
9759 /* Fatal error, prepare for slot reset */
9760 lpfc_sli_prep_dev_for_reset(phba);
9761 return PCI_ERS_RESULT_NEED_RESET;
9762 case pci_channel_io_perm_failure:
9763 /* Permanent failure, prepare for device down */
75baf696 9764 lpfc_sli_prep_dev_for_perm_failure(phba);
da0436e9 9765 return PCI_ERS_RESULT_DISCONNECT;
0d878419
JS
9766 default:
9767 /* Unknown state, prepare and request slot reset */
9768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9769 "0472 Unknown PCI error state: x%x\n", state);
9770 lpfc_sli_prep_dev_for_reset(phba);
9771 return PCI_ERS_RESULT_NEED_RESET;
da0436e9 9772 }
da0436e9
JS
9773}
9774
9775/**
9776 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
9777 * @pdev: pointer to PCI device.
9778 *
9779 * This routine is called from the PCI subsystem for error handling to
9780 * device with SLI-3 interface spec. This is called after PCI bus has been
9781 * reset to restart the PCI card from scratch, as if from a cold-boot.
9782 * During the PCI subsystem error recovery, after driver returns
9783 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
9784 * recovery and then call this routine before calling the .resume method
9785 * to recover the device. This function will initialize the HBA device,
9786 * enable the interrupt, but it will just put the HBA to offline state
9787 * without passing any I/O traffic.
9788 *
9789 * Return codes
9790 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
9791 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9792 */
9793static pci_ers_result_t
9794lpfc_io_slot_reset_s3(struct pci_dev *pdev)
9795{
9796 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9797 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9798 struct lpfc_sli *psli = &phba->sli;
9799 uint32_t intr_mode;
9800
9801 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
9802 if (pci_enable_device_mem(pdev)) {
9803 printk(KERN_ERR "lpfc: Cannot re-enable "
9804 "PCI device after reset.\n");
9805 return PCI_ERS_RESULT_DISCONNECT;
9806 }
9807
9808 pci_restore_state(pdev);
1dfb5a47
JS
9809
9810 /*
9811 * As the new kernel behavior of pci_restore_state() API call clears
9812 * device saved_state flag, need to save the restored state again.
9813 */
9814 pci_save_state(pdev);
9815
da0436e9
JS
9816 if (pdev->is_busmaster)
9817 pci_set_master(pdev);
9818
9819 spin_lock_irq(&phba->hbalock);
9820 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9821 spin_unlock_irq(&phba->hbalock);
9822
9823 /* Configure and enable interrupt */
9824 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
9825 if (intr_mode == LPFC_INTR_ERROR) {
9826 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9827 "0427 Cannot re-enable interrupt after "
9828 "slot reset.\n");
9829 return PCI_ERS_RESULT_DISCONNECT;
9830 } else
9831 phba->intr_mode = intr_mode;
9832
75baf696 9833 /* Take device offline, it will perform cleanup */
618a5230 9834 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
da0436e9
JS
9835 lpfc_offline(phba);
9836 lpfc_sli_brdrestart(phba);
9837
9838 /* Log the current active interrupt mode */
9839 lpfc_log_intr_mode(phba, phba->intr_mode);
9840
9841 return PCI_ERS_RESULT_RECOVERED;
9842}
9843
9844/**
9845 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
9846 * @pdev: pointer to PCI device
9847 *
9848 * This routine is called from the PCI subsystem for error handling to device
9849 * with SLI-3 interface spec. It is called when kernel error recovery tells
9850 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
9851 * error recovery. After this call, traffic can start to flow from this device
9852 * again.
9853 */
9854static void
9855lpfc_io_resume_s3(struct pci_dev *pdev)
9856{
9857 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9858 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3772a991 9859
e2af0d2e 9860 /* Bring device online, it will be no-op for non-fatal error resume */
da0436e9 9861 lpfc_online(phba);
0d878419
JS
9862
9863 /* Clean up Advanced Error Reporting (AER) if needed */
9864 if (phba->hba_flag & HBA_AER_ENABLED)
9865 pci_cleanup_aer_uncorrect_error_status(pdev);
da0436e9 9866}
3772a991 9867
da0436e9
JS
9868/**
9869 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
9870 * @phba: pointer to lpfc hba data structure.
9871 *
9872 * returns the number of ELS/CT IOCBs to reserve
9873 **/
9874int
9875lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
9876{
9877 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
9878
f1126688
JS
9879 if (phba->sli_rev == LPFC_SLI_REV4) {
9880 if (max_xri <= 100)
6a9c52cf 9881 return 10;
f1126688 9882 else if (max_xri <= 256)
6a9c52cf 9883 return 25;
f1126688 9884 else if (max_xri <= 512)
6a9c52cf 9885 return 50;
f1126688 9886 else if (max_xri <= 1024)
6a9c52cf 9887 return 100;
8a9d2e80 9888 else if (max_xri <= 1536)
6a9c52cf 9889 return 150;
8a9d2e80
JS
9890 else if (max_xri <= 2048)
9891 return 200;
9892 else
9893 return 250;
f1126688
JS
9894 } else
9895 return 0;
3772a991
JS
9896}
9897
52d52440
JS
9898/**
9899 * lpfc_write_firmware - attempt to write a firmware image to the port
52d52440 9900 * @fw: pointer to firmware image returned from request_firmware.
ce396282 9901 * @phba: pointer to lpfc hba data structure.
52d52440 9902 *
52d52440 9903 **/
ce396282
JS
9904static void
9905lpfc_write_firmware(const struct firmware *fw, void *context)
52d52440 9906{
ce396282 9907 struct lpfc_hba *phba = (struct lpfc_hba *)context;
6b5151fd 9908 char fwrev[FW_REV_STR_SIZE];
ce396282 9909 struct lpfc_grp_hdr *image;
52d52440
JS
9910 struct list_head dma_buffer_list;
9911 int i, rc = 0;
9912 struct lpfc_dmabuf *dmabuf, *next;
9913 uint32_t offset = 0, temp_offset = 0;
9914
c71ab861 9915 /* It can be null in no-wait mode, sanity check */
ce396282
JS
9916 if (!fw) {
9917 rc = -ENXIO;
9918 goto out;
9919 }
9920 image = (struct lpfc_grp_hdr *)fw->data;
9921
52d52440 9922 INIT_LIST_HEAD(&dma_buffer_list);
079b5c91
JS
9923 if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
9924 (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
9925 LPFC_FILE_TYPE_GROUP) ||
9926 (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
9927 (be32_to_cpu(image->size) != fw->size)) {
52d52440
JS
9928 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9929 "3022 Invalid FW image found. "
079b5c91
JS
9930 "Magic:%x Type:%x ID:%x\n",
9931 be32_to_cpu(image->magic_number),
9932 bf_get_be32(lpfc_grp_hdr_file_type, image),
9933 bf_get_be32(lpfc_grp_hdr_id, image));
ce396282
JS
9934 rc = -EINVAL;
9935 goto release_out;
52d52440
JS
9936 }
9937 lpfc_decode_firmware_rev(phba, fwrev, 1);
88a2cfbb 9938 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
52d52440 9939 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
ce396282 9940 "3023 Updating Firmware, Current Version:%s "
52d52440 9941 "New Version:%s\n",
88a2cfbb 9942 fwrev, image->revision);
52d52440
JS
9943 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
9944 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
9945 GFP_KERNEL);
9946 if (!dmabuf) {
9947 rc = -ENOMEM;
ce396282 9948 goto release_out;
52d52440
JS
9949 }
9950 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9951 SLI4_PAGE_SIZE,
9952 &dmabuf->phys,
9953 GFP_KERNEL);
9954 if (!dmabuf->virt) {
9955 kfree(dmabuf);
9956 rc = -ENOMEM;
ce396282 9957 goto release_out;
52d52440
JS
9958 }
9959 list_add_tail(&dmabuf->list, &dma_buffer_list);
9960 }
9961 while (offset < fw->size) {
9962 temp_offset = offset;
9963 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
079b5c91 9964 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
52d52440
JS
9965 memcpy(dmabuf->virt,
9966 fw->data + temp_offset,
079b5c91
JS
9967 fw->size - temp_offset);
9968 temp_offset = fw->size;
52d52440
JS
9969 break;
9970 }
52d52440
JS
9971 memcpy(dmabuf->virt, fw->data + temp_offset,
9972 SLI4_PAGE_SIZE);
88a2cfbb 9973 temp_offset += SLI4_PAGE_SIZE;
52d52440
JS
9974 }
9975 rc = lpfc_wr_object(phba, &dma_buffer_list,
9976 (fw->size - offset), &offset);
ce396282
JS
9977 if (rc)
9978 goto release_out;
52d52440
JS
9979 }
9980 rc = offset;
9981 }
ce396282
JS
9982
9983release_out:
52d52440
JS
9984 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
9985 list_del(&dmabuf->list);
9986 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
9987 dmabuf->virt, dmabuf->phys);
9988 kfree(dmabuf);
9989 }
ce396282
JS
9990 release_firmware(fw);
9991out:
9992 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
c71ab861 9993 "3024 Firmware update done: %d.\n", rc);
ce396282 9994 return;
52d52440
JS
9995}
9996
c71ab861
JS
9997/**
9998 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
9999 * @phba: pointer to lpfc hba data structure.
10000 *
10001 * This routine is called to perform Linux generic firmware upgrade on device
10002 * that supports such feature.
10003 **/
10004int
10005lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
10006{
10007 uint8_t file_name[ELX_MODEL_NAME_SIZE];
10008 int ret;
10009 const struct firmware *fw;
10010
10011 /* Only supported on SLI4 interface type 2 for now */
10012 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
10013 LPFC_SLI_INTF_IF_TYPE_2)
10014 return -EPERM;
10015
10016 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
10017
10018 if (fw_upgrade == INT_FW_UPGRADE) {
10019 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
10020 file_name, &phba->pcidev->dev,
10021 GFP_KERNEL, (void *)phba,
10022 lpfc_write_firmware);
10023 } else if (fw_upgrade == RUN_FW_UPGRADE) {
10024 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
10025 if (!ret)
10026 lpfc_write_firmware(fw, (void *)phba);
10027 } else {
10028 ret = -EINVAL;
10029 }
10030
10031 return ret;
10032}
10033
3772a991 10034/**
da0436e9 10035 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
3772a991
JS
10036 * @pdev: pointer to PCI device
10037 * @pid: pointer to PCI device identifier
10038 *
da0436e9
JS
10039 * This routine is called from the kernel's PCI subsystem to device with
10040 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
3772a991 10041 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
da0436e9
JS
10042 * information of the device and driver to see if the driver state that it
10043 * can support this kind of device. If the match is successful, the driver
10044 * core invokes this routine. If this routine determines it can claim the HBA,
10045 * it does all the initialization that it needs to do to handle the HBA
10046 * properly.
3772a991
JS
10047 *
10048 * Return code
10049 * 0 - driver can claim the device
10050 * negative value - driver can not claim the device
10051 **/
6f039790 10052static int
da0436e9 10053lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
3772a991
JS
10054{
10055 struct lpfc_hba *phba;
10056 struct lpfc_vport *vport = NULL;
6669f9bb 10057 struct Scsi_Host *shost = NULL;
ce396282 10058 int error, ret;
3772a991 10059 uint32_t cfg_mode, intr_mode;
67d12733 10060 int adjusted_fcp_io_channel;
3772a991
JS
10061
10062 /* Allocate memory for HBA structure */
10063 phba = lpfc_hba_alloc(pdev);
10064 if (!phba)
10065 return -ENOMEM;
10066
10067 /* Perform generic PCI device enabling operation */
10068 error = lpfc_enable_pci_dev(phba);
079b5c91 10069 if (error)
3772a991 10070 goto out_free_phba;
3772a991 10071
da0436e9
JS
10072 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
10073 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
3772a991
JS
10074 if (error)
10075 goto out_disable_pci_dev;
10076
da0436e9
JS
10077 /* Set up SLI-4 specific device PCI memory space */
10078 error = lpfc_sli4_pci_mem_setup(phba);
3772a991
JS
10079 if (error) {
10080 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 10081 "1410 Failed to set up pci memory space.\n");
3772a991
JS
10082 goto out_disable_pci_dev;
10083 }
10084
10085 /* Set up phase-1 common device driver resources */
10086 error = lpfc_setup_driver_resource_phase1(phba);
10087 if (error) {
10088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9
JS
10089 "1411 Failed to set up driver resource.\n");
10090 goto out_unset_pci_mem_s4;
3772a991
JS
10091 }
10092
da0436e9
JS
10093 /* Set up SLI-4 Specific device driver resources */
10094 error = lpfc_sli4_driver_resource_setup(phba);
3772a991
JS
10095 if (error) {
10096 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9
JS
10097 "1412 Failed to set up driver resource.\n");
10098 goto out_unset_pci_mem_s4;
3772a991
JS
10099 }
10100
10101 /* Initialize and populate the iocb list per host */
2a9bf3d0
JS
10102
10103 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10104 "2821 initialize iocb list %d.\n",
10105 phba->cfg_iocb_cnt*1024);
10106 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
10107
3772a991
JS
10108 if (error) {
10109 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9
JS
10110 "1413 Failed to initialize iocb list.\n");
10111 goto out_unset_driver_resource_s4;
3772a991
JS
10112 }
10113
19ca7609 10114 INIT_LIST_HEAD(&phba->active_rrq_list);
7d791df7 10115 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
19ca7609 10116
3772a991
JS
10117 /* Set up common device driver resources */
10118 error = lpfc_setup_driver_resource_phase2(phba);
10119 if (error) {
10120 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 10121 "1414 Failed to set up driver resource.\n");
3772a991
JS
10122 goto out_free_iocb_list;
10123 }
10124
079b5c91
JS
10125 /* Get the default values for Model Name and Description */
10126 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
10127
3772a991
JS
10128 /* Create SCSI host to the physical port */
10129 error = lpfc_create_shost(phba);
10130 if (error) {
10131 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 10132 "1415 Failed to create scsi host.\n");
3772a991
JS
10133 goto out_unset_driver_resource;
10134 }
9399627f 10135
5b75da2f 10136 /* Configure sysfs attributes */
3772a991
JS
10137 vport = phba->pport;
10138 error = lpfc_alloc_sysfs_attr(vport);
10139 if (error) {
9399627f 10140 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 10141 "1416 Failed to allocate sysfs attr\n");
3772a991 10142 goto out_destroy_shost;
98c9ea5c 10143 }
875fbdfe 10144
6669f9bb 10145 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
3772a991 10146 /* Now, trying to enable interrupt and bring up the device */
5b75da2f 10147 cfg_mode = phba->cfg_use_msi;
5b75da2f 10148
7b15db32
JS
10149 /* Put device to a known state before enabling interrupt */
10150 lpfc_stop_port(phba);
10151 /* Configure and enable interrupt */
10152 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
10153 if (intr_mode == LPFC_INTR_ERROR) {
10154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10155 "0426 Failed to enable interrupt.\n");
10156 error = -ENODEV;
10157 goto out_free_sysfs_attr;
10158 }
10159 /* Default to single EQ for non-MSI-X */
10160 if (phba->intr_type != MSIX)
10161 adjusted_fcp_io_channel = 1;
10162 else
10163 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
10164 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
10165 /* Set up SLI-4 HBA */
10166 if (lpfc_sli4_hba_setup(phba)) {
10167 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10168 "1421 Failed to set up hba\n");
10169 error = -ENODEV;
10170 goto out_disable_intr;
98c9ea5c 10171 }
858c9f6c 10172
7b15db32
JS
10173 /* Log the current active interrupt mode */
10174 phba->intr_mode = intr_mode;
10175 lpfc_log_intr_mode(phba, intr_mode);
10176
3772a991
JS
10177 /* Perform post initialization setup */
10178 lpfc_post_init_setup(phba);
dea3101e 10179
c71ab861
JS
10180 /* check for firmware upgrade or downgrade */
10181 if (phba->cfg_request_firmware_upgrade)
10182 ret = lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
52d52440 10183
1c6834a7
JS
10184 /* Check if there are static vports to be created. */
10185 lpfc_create_static_vport(phba);
dea3101e 10186 return 0;
10187
da0436e9
JS
10188out_disable_intr:
10189 lpfc_sli4_disable_intr(phba);
5b75da2f
JS
10190out_free_sysfs_attr:
10191 lpfc_free_sysfs_attr(vport);
3772a991
JS
10192out_destroy_shost:
10193 lpfc_destroy_shost(phba);
10194out_unset_driver_resource:
10195 lpfc_unset_driver_resource_phase2(phba);
10196out_free_iocb_list:
10197 lpfc_free_iocb_list(phba);
da0436e9
JS
10198out_unset_driver_resource_s4:
10199 lpfc_sli4_driver_resource_unset(phba);
10200out_unset_pci_mem_s4:
10201 lpfc_sli4_pci_mem_unset(phba);
3772a991
JS
10202out_disable_pci_dev:
10203 lpfc_disable_pci_dev(phba);
6669f9bb
JS
10204 if (shost)
10205 scsi_host_put(shost);
2e0fef85 10206out_free_phba:
3772a991 10207 lpfc_hba_free(phba);
dea3101e 10208 return error;
10209}
10210
e59058c4 10211/**
da0436e9 10212 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
e59058c4
JS
10213 * @pdev: pointer to PCI device
10214 *
da0436e9
JS
10215 * This routine is called from the kernel's PCI subsystem to device with
10216 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
3772a991
JS
10217 * removed from PCI bus, it performs all the necessary cleanup for the HBA
10218 * device to be removed from the PCI subsystem properly.
e59058c4 10219 **/
6f039790 10220static void
da0436e9 10221lpfc_pci_remove_one_s4(struct pci_dev *pdev)
dea3101e 10222{
da0436e9 10223 struct Scsi_Host *shost = pci_get_drvdata(pdev);
2e0fef85 10224 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
eada272d 10225 struct lpfc_vport **vports;
da0436e9 10226 struct lpfc_hba *phba = vport->phba;
eada272d 10227 int i;
8a4df120 10228
da0436e9 10229 /* Mark the device unloading flag */
549e55cd 10230 spin_lock_irq(&phba->hbalock);
51ef4c26 10231 vport->load_flag |= FC_UNLOADING;
549e55cd 10232 spin_unlock_irq(&phba->hbalock);
2e0fef85 10233
da0436e9 10234 /* Free the HBA sysfs attributes */
858c9f6c
JS
10235 lpfc_free_sysfs_attr(vport);
10236
eada272d
JS
10237 /* Release all the vports against this physical port */
10238 vports = lpfc_create_vport_work_array(phba);
10239 if (vports != NULL)
587a37f6
JS
10240 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
10241 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
10242 continue;
eada272d 10243 fc_vport_terminate(vports[i]->fc_vport);
587a37f6 10244 }
eada272d
JS
10245 lpfc_destroy_vport_work_array(phba, vports);
10246
10247 /* Remove FC host and then SCSI host with the physical port */
858c9f6c
JS
10248 fc_remove_host(shost);
10249 scsi_remove_host(shost);
da0436e9
JS
10250
10251 /* Perform cleanup on the physical port */
87af33fe
JS
10252 lpfc_cleanup(vport);
10253
2e0fef85 10254 /*
da0436e9 10255 * Bring down the SLI Layer. This step disables all interrupts,
2e0fef85 10256 * clears the rings, discards all mailbox commands, and resets
da0436e9 10257 * the HBA FCoE function.
2e0fef85 10258 */
da0436e9
JS
10259 lpfc_debugfs_terminate(vport);
10260 lpfc_sli4_hba_unset(phba);
a257bf90 10261
858c9f6c
JS
10262 spin_lock_irq(&phba->hbalock);
10263 list_del_init(&vport->listentry);
10264 spin_unlock_irq(&phba->hbalock);
10265
3677a3a7 10266 /* Perform scsi free before driver resource_unset since scsi
da0436e9 10267 * buffers are released to their corresponding pools here.
2e0fef85
JS
10268 */
10269 lpfc_scsi_free(phba);
67d12733 10270
da0436e9 10271 lpfc_sli4_driver_resource_unset(phba);
ed957684 10272
da0436e9
JS
10273 /* Unmap adapter Control and Doorbell registers */
10274 lpfc_sli4_pci_mem_unset(phba);
2e0fef85 10275
da0436e9
JS
10276 /* Release PCI resources and disable device's PCI function */
10277 scsi_host_put(shost);
10278 lpfc_disable_pci_dev(phba);
2e0fef85 10279
da0436e9 10280 /* Finally, free the driver's device data structure */
3772a991 10281 lpfc_hba_free(phba);
2e0fef85 10282
da0436e9 10283 return;
dea3101e 10284}
10285
3a55b532 10286/**
da0436e9 10287 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
3a55b532
JS
10288 * @pdev: pointer to PCI device
10289 * @msg: power management message
10290 *
da0436e9
JS
10291 * This routine is called from the kernel's PCI subsystem to support system
10292 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
10293 * this method, it quiesces the device by stopping the driver's worker
10294 * thread for the device, turning off device's interrupt and DMA, and bring
10295 * the device offline. Note that as the driver implements the minimum PM
10296 * requirements to a power-aware driver's PM support for suspend/resume -- all
10297 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
10298 * method call will be treated as SUSPEND and the driver will fully
10299 * reinitialize its device during resume() method call, the driver will set
10300 * device to PCI_D3hot state in PCI config space instead of setting it
3772a991 10301 * according to the @msg provided by the PM.
3a55b532
JS
10302 *
10303 * Return code
3772a991
JS
10304 * 0 - driver suspended the device
10305 * Error otherwise
3a55b532
JS
10306 **/
10307static int
da0436e9 10308lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
3a55b532
JS
10309{
10310 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10311 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10312
10313 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
75baf696 10314 "2843 PCI device Power Management suspend.\n");
3a55b532
JS
10315
10316 /* Bring down the device */
618a5230 10317 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
3a55b532
JS
10318 lpfc_offline(phba);
10319 kthread_stop(phba->worker_thread);
10320
10321 /* Disable interrupt from device */
da0436e9 10322 lpfc_sli4_disable_intr(phba);
5350d872 10323 lpfc_sli4_queue_destroy(phba);
3a55b532
JS
10324
10325 /* Save device state to PCI config space */
10326 pci_save_state(pdev);
10327 pci_set_power_state(pdev, PCI_D3hot);
10328
10329 return 0;
10330}
10331
10332/**
da0436e9 10333 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
3a55b532
JS
10334 * @pdev: pointer to PCI device
10335 *
da0436e9
JS
10336 * This routine is called from the kernel's PCI subsystem to support system
10337 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
10338 * this method, it restores the device's PCI config space state and fully
10339 * reinitializes the device and brings it online. Note that as the driver
10340 * implements the minimum PM requirements to a power-aware driver's PM for
10341 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
10342 * to the suspend() method call will be treated as SUSPEND and the driver
10343 * will fully reinitialize its device during resume() method call, the device
10344 * will be set to PCI_D0 directly in PCI config space before restoring the
10345 * state.
3a55b532
JS
10346 *
10347 * Return code
3772a991
JS
10348 * 0 - driver suspended the device
10349 * Error otherwise
3a55b532
JS
10350 **/
10351static int
da0436e9 10352lpfc_pci_resume_one_s4(struct pci_dev *pdev)
3a55b532
JS
10353{
10354 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10355 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
5b75da2f 10356 uint32_t intr_mode;
3a55b532
JS
10357 int error;
10358
10359 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
da0436e9 10360 "0292 PCI device Power Management resume.\n");
3a55b532
JS
10361
10362 /* Restore device state from PCI config space */
10363 pci_set_power_state(pdev, PCI_D0);
10364 pci_restore_state(pdev);
1dfb5a47
JS
10365
10366 /*
10367 * As the new kernel behavior of pci_restore_state() API call clears
10368 * device saved_state flag, need to save the restored state again.
10369 */
10370 pci_save_state(pdev);
10371
3a55b532
JS
10372 if (pdev->is_busmaster)
10373 pci_set_master(pdev);
10374
da0436e9 10375 /* Startup the kernel thread for this host adapter. */
3a55b532
JS
10376 phba->worker_thread = kthread_run(lpfc_do_work, phba,
10377 "lpfc_worker_%d", phba->brd_no);
10378 if (IS_ERR(phba->worker_thread)) {
10379 error = PTR_ERR(phba->worker_thread);
10380 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 10381 "0293 PM resume failed to start worker "
3a55b532
JS
10382 "thread: error=x%x.\n", error);
10383 return error;
10384 }
10385
5b75da2f 10386 /* Configure and enable interrupt */
da0436e9 10387 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
5b75da2f 10388 if (intr_mode == LPFC_INTR_ERROR) {
3a55b532 10389 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 10390 "0294 PM resume Failed to enable interrupt\n");
5b75da2f
JS
10391 return -EIO;
10392 } else
10393 phba->intr_mode = intr_mode;
3a55b532
JS
10394
10395 /* Restart HBA and bring it online */
10396 lpfc_sli_brdrestart(phba);
10397 lpfc_online(phba);
10398
5b75da2f
JS
10399 /* Log the current active interrupt mode */
10400 lpfc_log_intr_mode(phba, phba->intr_mode);
10401
3a55b532
JS
10402 return 0;
10403}
10404
75baf696
JS
10405/**
10406 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
10407 * @phba: pointer to lpfc hba data structure.
10408 *
10409 * This routine is called to prepare the SLI4 device for PCI slot recover. It
10410 * aborts all the outstanding SCSI I/Os to the pci device.
10411 **/
10412static void
10413lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
10414{
10415 struct lpfc_sli *psli = &phba->sli;
10416 struct lpfc_sli_ring *pring;
10417
10418 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10419 "2828 PCI channel I/O abort preparing for recovery\n");
10420 /*
10421 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
10422 * and let the SCSI mid-layer to retry them to recover.
10423 */
10424 pring = &psli->ring[psli->fcp_ring];
10425 lpfc_sli_abort_iocb_ring(phba, pring);
10426}
10427
10428/**
10429 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
10430 * @phba: pointer to lpfc hba data structure.
10431 *
10432 * This routine is called to prepare the SLI4 device for PCI slot reset. It
10433 * disables the device interrupt and pci device, and aborts the internal FCP
10434 * pending I/Os.
10435 **/
10436static void
10437lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
10438{
10439 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10440 "2826 PCI channel disable preparing for reset\n");
10441
10442 /* Block any management I/Os to the device */
618a5230 10443 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
75baf696
JS
10444
10445 /* Block all SCSI devices' I/Os on the host */
10446 lpfc_scsi_dev_block(phba);
10447
ea714f3d
JS
10448 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
10449 lpfc_sli_flush_fcp_rings(phba);
10450
75baf696
JS
10451 /* stop all timers */
10452 lpfc_stop_hba_timers(phba);
10453
10454 /* Disable interrupt and pci device */
10455 lpfc_sli4_disable_intr(phba);
5350d872 10456 lpfc_sli4_queue_destroy(phba);
75baf696 10457 pci_disable_device(phba->pcidev);
75baf696
JS
10458}
10459
10460/**
10461 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
10462 * @phba: pointer to lpfc hba data structure.
10463 *
10464 * This routine is called to prepare the SLI4 device for PCI slot permanently
10465 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
10466 * pending I/Os.
10467 **/
10468static void
10469lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
10470{
10471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10472 "2827 PCI channel permanent disable for failure\n");
10473
10474 /* Block all SCSI devices' I/Os on the host */
10475 lpfc_scsi_dev_block(phba);
10476
10477 /* stop all timers */
10478 lpfc_stop_hba_timers(phba);
10479
10480 /* Clean up all driver's outstanding SCSI I/Os */
10481 lpfc_sli_flush_fcp_rings(phba);
10482}
10483
8d63f375 10484/**
da0436e9 10485 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
e59058c4
JS
10486 * @pdev: pointer to PCI device.
10487 * @state: the current PCI connection state.
8d63f375 10488 *
da0436e9
JS
10489 * This routine is called from the PCI subsystem for error handling to device
10490 * with SLI-4 interface spec. This function is called by the PCI subsystem
10491 * after a PCI bus error affecting this device has been detected. When this
10492 * function is invoked, it will need to stop all the I/Os and interrupt(s)
10493 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
10494 * for the PCI subsystem to perform proper recovery as desired.
e59058c4
JS
10495 *
10496 * Return codes
3772a991
JS
10497 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
10498 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
e59058c4 10499 **/
3772a991 10500static pci_ers_result_t
da0436e9 10501lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8d63f375 10502{
75baf696
JS
10503 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10504 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10505
10506 switch (state) {
10507 case pci_channel_io_normal:
10508 /* Non-fatal error, prepare for recovery */
10509 lpfc_sli4_prep_dev_for_recover(phba);
10510 return PCI_ERS_RESULT_CAN_RECOVER;
10511 case pci_channel_io_frozen:
10512 /* Fatal error, prepare for slot reset */
10513 lpfc_sli4_prep_dev_for_reset(phba);
10514 return PCI_ERS_RESULT_NEED_RESET;
10515 case pci_channel_io_perm_failure:
10516 /* Permanent failure, prepare for device down */
10517 lpfc_sli4_prep_dev_for_perm_failure(phba);
10518 return PCI_ERS_RESULT_DISCONNECT;
10519 default:
10520 /* Unknown state, prepare and request slot reset */
10521 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10522 "2825 Unknown PCI error state: x%x\n", state);
10523 lpfc_sli4_prep_dev_for_reset(phba);
10524 return PCI_ERS_RESULT_NEED_RESET;
10525 }
8d63f375
LV
10526}
10527
10528/**
da0436e9 10529 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
e59058c4
JS
10530 * @pdev: pointer to PCI device.
10531 *
da0436e9
JS
10532 * This routine is called from the PCI subsystem for error handling to device
10533 * with SLI-4 interface spec. It is called after PCI bus has been reset to
10534 * restart the PCI card from scratch, as if from a cold-boot. During the
10535 * PCI subsystem error recovery, after the driver returns
3772a991 10536 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
da0436e9
JS
10537 * recovery and then call this routine before calling the .resume method to
10538 * recover the device. This function will initialize the HBA device, enable
10539 * the interrupt, but it will just put the HBA to offline state without
10540 * passing any I/O traffic.
8d63f375 10541 *
e59058c4 10542 * Return codes
3772a991
JS
10543 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
10544 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8d63f375 10545 */
3772a991 10546static pci_ers_result_t
da0436e9 10547lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8d63f375 10548{
75baf696
JS
10549 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10550 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10551 struct lpfc_sli *psli = &phba->sli;
10552 uint32_t intr_mode;
10553
10554 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
10555 if (pci_enable_device_mem(pdev)) {
10556 printk(KERN_ERR "lpfc: Cannot re-enable "
10557 "PCI device after reset.\n");
10558 return PCI_ERS_RESULT_DISCONNECT;
10559 }
10560
10561 pci_restore_state(pdev);
0a96e975
JS
10562
10563 /*
10564 * As the new kernel behavior of pci_restore_state() API call clears
10565 * device saved_state flag, need to save the restored state again.
10566 */
10567 pci_save_state(pdev);
10568
75baf696
JS
10569 if (pdev->is_busmaster)
10570 pci_set_master(pdev);
10571
10572 spin_lock_irq(&phba->hbalock);
10573 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
10574 spin_unlock_irq(&phba->hbalock);
10575
10576 /* Configure and enable interrupt */
10577 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
10578 if (intr_mode == LPFC_INTR_ERROR) {
10579 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10580 "2824 Cannot re-enable interrupt after "
10581 "slot reset.\n");
10582 return PCI_ERS_RESULT_DISCONNECT;
10583 } else
10584 phba->intr_mode = intr_mode;
10585
10586 /* Log the current active interrupt mode */
10587 lpfc_log_intr_mode(phba, phba->intr_mode);
10588
8d63f375
LV
10589 return PCI_ERS_RESULT_RECOVERED;
10590}
10591
10592/**
da0436e9 10593 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
e59058c4 10594 * @pdev: pointer to PCI device
8d63f375 10595 *
3772a991 10596 * This routine is called from the PCI subsystem for error handling to device
da0436e9 10597 * with SLI-4 interface spec. It is called when kernel error recovery tells
3772a991
JS
10598 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
10599 * error recovery. After this call, traffic can start to flow from this device
10600 * again.
da0436e9 10601 **/
3772a991 10602static void
da0436e9 10603lpfc_io_resume_s4(struct pci_dev *pdev)
8d63f375 10604{
75baf696
JS
10605 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10606 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10607
10608 /*
10609 * In case of slot reset, as function reset is performed through
10610 * mailbox command which needs DMA to be enabled, this operation
10611 * has to be moved to the io resume phase. Taking device offline
10612 * will perform the necessary cleanup.
10613 */
10614 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
10615 /* Perform device reset */
618a5230 10616 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
75baf696
JS
10617 lpfc_offline(phba);
10618 lpfc_sli_brdrestart(phba);
10619 /* Bring the device back online */
10620 lpfc_online(phba);
10621 }
10622
10623 /* Clean up Advanced Error Reporting (AER) if needed */
10624 if (phba->hba_flag & HBA_AER_ENABLED)
10625 pci_cleanup_aer_uncorrect_error_status(pdev);
8d63f375
LV
10626}
10627
3772a991
JS
10628/**
10629 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
10630 * @pdev: pointer to PCI device
10631 * @pid: pointer to PCI device identifier
10632 *
10633 * This routine is to be registered to the kernel's PCI subsystem. When an
10634 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
10635 * at PCI device-specific information of the device and driver to see if the
10636 * driver state that it can support this kind of device. If the match is
10637 * successful, the driver core invokes this routine. This routine dispatches
10638 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
10639 * do all the initialization that it needs to do to handle the HBA device
10640 * properly.
10641 *
10642 * Return code
10643 * 0 - driver can claim the device
10644 * negative value - driver can not claim the device
10645 **/
6f039790 10646static int
3772a991
JS
10647lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
10648{
10649 int rc;
8fa38513 10650 struct lpfc_sli_intf intf;
3772a991 10651
28baac74 10652 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
3772a991
JS
10653 return -ENODEV;
10654
8fa38513 10655 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
28baac74 10656 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
da0436e9 10657 rc = lpfc_pci_probe_one_s4(pdev, pid);
8fa38513 10658 else
3772a991 10659 rc = lpfc_pci_probe_one_s3(pdev, pid);
8fa38513 10660
3772a991
JS
10661 return rc;
10662}
10663
10664/**
10665 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
10666 * @pdev: pointer to PCI device
10667 *
10668 * This routine is to be registered to the kernel's PCI subsystem. When an
10669 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
10670 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
10671 * remove routine, which will perform all the necessary cleanup for the
10672 * device to be removed from the PCI subsystem properly.
10673 **/
6f039790 10674static void
3772a991
JS
10675lpfc_pci_remove_one(struct pci_dev *pdev)
10676{
10677 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10678 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10679
10680 switch (phba->pci_dev_grp) {
10681 case LPFC_PCI_DEV_LP:
10682 lpfc_pci_remove_one_s3(pdev);
10683 break;
da0436e9
JS
10684 case LPFC_PCI_DEV_OC:
10685 lpfc_pci_remove_one_s4(pdev);
10686 break;
3772a991
JS
10687 default:
10688 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10689 "1424 Invalid PCI device group: 0x%x\n",
10690 phba->pci_dev_grp);
10691 break;
10692 }
10693 return;
10694}
10695
10696/**
10697 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
10698 * @pdev: pointer to PCI device
10699 * @msg: power management message
10700 *
10701 * This routine is to be registered to the kernel's PCI subsystem to support
10702 * system Power Management (PM). When PM invokes this method, it dispatches
10703 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
10704 * suspend the device.
10705 *
10706 * Return code
10707 * 0 - driver suspended the device
10708 * Error otherwise
10709 **/
10710static int
10711lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
10712{
10713 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10714 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10715 int rc = -ENODEV;
10716
10717 switch (phba->pci_dev_grp) {
10718 case LPFC_PCI_DEV_LP:
10719 rc = lpfc_pci_suspend_one_s3(pdev, msg);
10720 break;
da0436e9
JS
10721 case LPFC_PCI_DEV_OC:
10722 rc = lpfc_pci_suspend_one_s4(pdev, msg);
10723 break;
3772a991
JS
10724 default:
10725 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10726 "1425 Invalid PCI device group: 0x%x\n",
10727 phba->pci_dev_grp);
10728 break;
10729 }
10730 return rc;
10731}
10732
10733/**
10734 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
10735 * @pdev: pointer to PCI device
10736 *
10737 * This routine is to be registered to the kernel's PCI subsystem to support
10738 * system Power Management (PM). When PM invokes this method, it dispatches
10739 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
10740 * resume the device.
10741 *
10742 * Return code
10743 * 0 - driver suspended the device
10744 * Error otherwise
10745 **/
10746static int
10747lpfc_pci_resume_one(struct pci_dev *pdev)
10748{
10749 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10750 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10751 int rc = -ENODEV;
10752
10753 switch (phba->pci_dev_grp) {
10754 case LPFC_PCI_DEV_LP:
10755 rc = lpfc_pci_resume_one_s3(pdev);
10756 break;
da0436e9
JS
10757 case LPFC_PCI_DEV_OC:
10758 rc = lpfc_pci_resume_one_s4(pdev);
10759 break;
3772a991
JS
10760 default:
10761 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10762 "1426 Invalid PCI device group: 0x%x\n",
10763 phba->pci_dev_grp);
10764 break;
10765 }
10766 return rc;
10767}
10768
10769/**
10770 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
10771 * @pdev: pointer to PCI device.
10772 * @state: the current PCI connection state.
10773 *
10774 * This routine is registered to the PCI subsystem for error handling. This
10775 * function is called by the PCI subsystem after a PCI bus error affecting
10776 * this device has been detected. When this routine is invoked, it dispatches
10777 * the action to the proper SLI-3 or SLI-4 device error detected handling
10778 * routine, which will perform the proper error detected operation.
10779 *
10780 * Return codes
10781 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
10782 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10783 **/
10784static pci_ers_result_t
10785lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
10786{
10787 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10788 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10789 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
10790
10791 switch (phba->pci_dev_grp) {
10792 case LPFC_PCI_DEV_LP:
10793 rc = lpfc_io_error_detected_s3(pdev, state);
10794 break;
da0436e9
JS
10795 case LPFC_PCI_DEV_OC:
10796 rc = lpfc_io_error_detected_s4(pdev, state);
10797 break;
3772a991
JS
10798 default:
10799 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10800 "1427 Invalid PCI device group: 0x%x\n",
10801 phba->pci_dev_grp);
10802 break;
10803 }
10804 return rc;
10805}
10806
10807/**
10808 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
10809 * @pdev: pointer to PCI device.
10810 *
10811 * This routine is registered to the PCI subsystem for error handling. This
10812 * function is called after PCI bus has been reset to restart the PCI card
10813 * from scratch, as if from a cold-boot. When this routine is invoked, it
10814 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
10815 * routine, which will perform the proper device reset.
10816 *
10817 * Return codes
10818 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
10819 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10820 **/
10821static pci_ers_result_t
10822lpfc_io_slot_reset(struct pci_dev *pdev)
10823{
10824 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10825 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10826 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
10827
10828 switch (phba->pci_dev_grp) {
10829 case LPFC_PCI_DEV_LP:
10830 rc = lpfc_io_slot_reset_s3(pdev);
10831 break;
da0436e9
JS
10832 case LPFC_PCI_DEV_OC:
10833 rc = lpfc_io_slot_reset_s4(pdev);
10834 break;
3772a991
JS
10835 default:
10836 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10837 "1428 Invalid PCI device group: 0x%x\n",
10838 phba->pci_dev_grp);
10839 break;
10840 }
10841 return rc;
10842}
10843
10844/**
10845 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
10846 * @pdev: pointer to PCI device
10847 *
10848 * This routine is registered to the PCI subsystem for error handling. It
10849 * is called when kernel error recovery tells the lpfc driver that it is
10850 * OK to resume normal PCI operation after PCI bus error recovery. When
10851 * this routine is invoked, it dispatches the action to the proper SLI-3
10852 * or SLI-4 device io_resume routine, which will resume the device operation.
10853 **/
10854static void
10855lpfc_io_resume(struct pci_dev *pdev)
10856{
10857 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10858 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10859
10860 switch (phba->pci_dev_grp) {
10861 case LPFC_PCI_DEV_LP:
10862 lpfc_io_resume_s3(pdev);
10863 break;
da0436e9
JS
10864 case LPFC_PCI_DEV_OC:
10865 lpfc_io_resume_s4(pdev);
10866 break;
3772a991
JS
10867 default:
10868 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10869 "1429 Invalid PCI device group: 0x%x\n",
10870 phba->pci_dev_grp);
10871 break;
10872 }
10873 return;
10874}
10875
1ba981fd
JS
10876/**
10877 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
10878 * @phba: pointer to lpfc hba data structure.
10879 *
10880 * This routine checks to see if OAS is supported for this adapter. If
10881 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
10882 * the enable oas flag is cleared and the pool created for OAS device data
10883 * is destroyed.
10884 *
10885 **/
10886void
10887lpfc_sli4_oas_verify(struct lpfc_hba *phba)
10888{
10889
10890 if (!phba->cfg_EnableXLane)
10891 return;
10892
10893 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
10894 phba->cfg_fof = 1;
10895 } else {
10896 phba->cfg_EnableXLane = 0;
10897 if (phba->device_data_mem_pool)
10898 mempool_destroy(phba->device_data_mem_pool);
10899 phba->device_data_mem_pool = NULL;
10900 }
10901
10902 return;
10903}
10904
10905/**
10906 * lpfc_fof_queue_setup - Set up all the fof queues
10907 * @phba: pointer to lpfc hba data structure.
10908 *
10909 * This routine is invoked to set up all the fof queues for the FC HBA
10910 * operation.
10911 *
10912 * Return codes
10913 * 0 - successful
10914 * -ENOMEM - No available memory
10915 **/
10916int
10917lpfc_fof_queue_setup(struct lpfc_hba *phba)
10918{
10919 struct lpfc_sli *psli = &phba->sli;
10920 int rc;
10921
10922 rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
10923 if (rc)
10924 return -ENOMEM;
10925
10926 if (phba->cfg_EnableXLane) {
10927
10928 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
10929 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
10930 if (rc)
10931 goto out_oas_cq;
10932
10933 rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
10934 phba->sli4_hba.oas_cq, LPFC_FCP);
10935 if (rc)
10936 goto out_oas_wq;
10937
10938 phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING];
10939 phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING];
10940 }
10941
10942 return 0;
10943
10944out_oas_wq:
10945 if (phba->cfg_EnableXLane)
10946 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
10947out_oas_cq:
10948 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
10949 return rc;
10950
10951}
10952
10953/**
10954 * lpfc_fof_queue_create - Create all the fof queues
10955 * @phba: pointer to lpfc hba data structure.
10956 *
10957 * This routine is invoked to allocate all the fof queues for the FC HBA
10958 * operation. For each SLI4 queue type, the parameters such as queue entry
10959 * count (queue depth) shall be taken from the module parameter. For now,
10960 * we just use some constant number as place holder.
10961 *
10962 * Return codes
10963 * 0 - successful
10964 * -ENOMEM - No availble memory
10965 * -EIO - The mailbox failed to complete successfully.
10966 **/
10967int
10968lpfc_fof_queue_create(struct lpfc_hba *phba)
10969{
10970 struct lpfc_queue *qdesc;
10971
10972 /* Create FOF EQ */
10973 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
10974 phba->sli4_hba.eq_ecount);
10975 if (!qdesc)
10976 goto out_error;
10977
10978 phba->sli4_hba.fof_eq = qdesc;
10979
10980 if (phba->cfg_EnableXLane) {
10981
10982 /* Create OAS CQ */
10983 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
10984 phba->sli4_hba.cq_ecount);
10985 if (!qdesc)
10986 goto out_error;
10987
10988 phba->sli4_hba.oas_cq = qdesc;
10989
10990 /* Create OAS WQ */
10991 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
10992 phba->sli4_hba.wq_ecount);
10993 if (!qdesc)
10994 goto out_error;
10995
10996 phba->sli4_hba.oas_wq = qdesc;
10997
10998 }
10999 return 0;
11000
11001out_error:
11002 lpfc_fof_queue_destroy(phba);
11003 return -ENOMEM;
11004}
11005
11006/**
11007 * lpfc_fof_queue_destroy - Destroy all the fof queues
11008 * @phba: pointer to lpfc hba data structure.
11009 *
11010 * This routine is invoked to release all the SLI4 queues with the FC HBA
11011 * operation.
11012 *
11013 * Return codes
11014 * 0 - successful
11015 **/
11016int
11017lpfc_fof_queue_destroy(struct lpfc_hba *phba)
11018{
11019 /* Release FOF Event queue */
11020 if (phba->sli4_hba.fof_eq != NULL) {
11021 lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
11022 phba->sli4_hba.fof_eq = NULL;
11023 }
11024
11025 /* Release OAS Completion queue */
11026 if (phba->sli4_hba.oas_cq != NULL) {
11027 lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
11028 phba->sli4_hba.oas_cq = NULL;
11029 }
11030
11031 /* Release OAS Work queue */
11032 if (phba->sli4_hba.oas_wq != NULL) {
11033 lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
11034 phba->sli4_hba.oas_wq = NULL;
11035 }
11036 return 0;
11037}
11038
dea3101e 11039static struct pci_device_id lpfc_id_table[] = {
11040 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
11041 PCI_ANY_ID, PCI_ANY_ID, },
06325e74
JSEC
11042 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
11043 PCI_ANY_ID, PCI_ANY_ID, },
dea3101e 11044 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
11045 PCI_ANY_ID, PCI_ANY_ID, },
11046 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
11047 PCI_ANY_ID, PCI_ANY_ID, },
11048 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
11049 PCI_ANY_ID, PCI_ANY_ID, },
11050 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
11051 PCI_ANY_ID, PCI_ANY_ID, },
11052 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
11053 PCI_ANY_ID, PCI_ANY_ID, },
11054 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
11055 PCI_ANY_ID, PCI_ANY_ID, },
11056 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
11057 PCI_ANY_ID, PCI_ANY_ID, },
e4adb204
JSEC
11058 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
11059 PCI_ANY_ID, PCI_ANY_ID, },
11060 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
11061 PCI_ANY_ID, PCI_ANY_ID, },
11062 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
11063 PCI_ANY_ID, PCI_ANY_ID, },
dea3101e 11064 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
11065 PCI_ANY_ID, PCI_ANY_ID, },
e4adb204
JSEC
11066 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
11067 PCI_ANY_ID, PCI_ANY_ID, },
11068 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
11069 PCI_ANY_ID, PCI_ANY_ID, },
dea3101e 11070 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
11071 PCI_ANY_ID, PCI_ANY_ID, },
11072 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
11073 PCI_ANY_ID, PCI_ANY_ID, },
11074 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
11075 PCI_ANY_ID, PCI_ANY_ID, },
84774a4d
JS
11076 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
11077 PCI_ANY_ID, PCI_ANY_ID, },
e4adb204
JSEC
11078 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
11079 PCI_ANY_ID, PCI_ANY_ID, },
11080 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
11081 PCI_ANY_ID, PCI_ANY_ID, },
dea3101e 11082 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
11083 PCI_ANY_ID, PCI_ANY_ID, },
11084 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
11085 PCI_ANY_ID, PCI_ANY_ID, },
11086 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
11087 PCI_ANY_ID, PCI_ANY_ID, },
11088 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
11089 PCI_ANY_ID, PCI_ANY_ID, },
11090 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
11091 PCI_ANY_ID, PCI_ANY_ID, },
e4adb204
JSEC
11092 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
11093 PCI_ANY_ID, PCI_ANY_ID, },
11094 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
11095 PCI_ANY_ID, PCI_ANY_ID, },
b87eab38
JS
11096 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
11097 PCI_ANY_ID, PCI_ANY_ID, },
11098 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
11099 PCI_ANY_ID, PCI_ANY_ID, },
11100 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
11101 PCI_ANY_ID, PCI_ANY_ID, },
11102 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
11103 PCI_ANY_ID, PCI_ANY_ID, },
11104 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
11105 PCI_ANY_ID, PCI_ANY_ID, },
11106 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
11107 PCI_ANY_ID, PCI_ANY_ID, },
84774a4d
JS
11108 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
11109 PCI_ANY_ID, PCI_ANY_ID, },
11110 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
11111 PCI_ANY_ID, PCI_ANY_ID, },
11112 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
11113 PCI_ANY_ID, PCI_ANY_ID, },
3772a991
JS
11114 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
11115 PCI_ANY_ID, PCI_ANY_ID, },
a747c9ce
JS
11116 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
11117 PCI_ANY_ID, PCI_ANY_ID, },
11118 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
6669f9bb 11119 PCI_ANY_ID, PCI_ANY_ID, },
98fc5dd9
JS
11120 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
11121 PCI_ANY_ID, PCI_ANY_ID, },
085c647c
JS
11122 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
11123 PCI_ANY_ID, PCI_ANY_ID, },
11124 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
11125 PCI_ANY_ID, PCI_ANY_ID, },
c0c11512
JS
11126 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
11127 PCI_ANY_ID, PCI_ANY_ID, },
11128 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
11129 PCI_ANY_ID, PCI_ANY_ID, },
f8cafd38
JS
11130 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
11131 PCI_ANY_ID, PCI_ANY_ID, },
11132 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
11133 PCI_ANY_ID, PCI_ANY_ID, },
dea3101e 11134 { 0 }
11135};
11136
11137MODULE_DEVICE_TABLE(pci, lpfc_id_table);
11138
a55b2d21 11139static const struct pci_error_handlers lpfc_err_handler = {
8d63f375
LV
11140 .error_detected = lpfc_io_error_detected,
11141 .slot_reset = lpfc_io_slot_reset,
11142 .resume = lpfc_io_resume,
11143};
11144
dea3101e 11145static struct pci_driver lpfc_driver = {
11146 .name = LPFC_DRIVER_NAME,
11147 .id_table = lpfc_id_table,
11148 .probe = lpfc_pci_probe_one,
6f039790 11149 .remove = lpfc_pci_remove_one,
3a55b532 11150 .suspend = lpfc_pci_suspend_one,
3772a991 11151 .resume = lpfc_pci_resume_one,
2e0fef85 11152 .err_handler = &lpfc_err_handler,
dea3101e 11153};
11154
3ef6d24c 11155static const struct file_operations lpfc_mgmt_fop = {
858feacd 11156 .owner = THIS_MODULE,
3ef6d24c
JS
11157};
11158
11159static struct miscdevice lpfc_mgmt_dev = {
11160 .minor = MISC_DYNAMIC_MINOR,
11161 .name = "lpfcmgmt",
11162 .fops = &lpfc_mgmt_fop,
11163};
11164
e59058c4 11165/**
3621a710 11166 * lpfc_init - lpfc module initialization routine
e59058c4
JS
11167 *
11168 * This routine is to be invoked when the lpfc module is loaded into the
11169 * kernel. The special kernel macro module_init() is used to indicate the
11170 * role of this routine to the kernel as lpfc module entry point.
11171 *
11172 * Return codes
11173 * 0 - successful
11174 * -ENOMEM - FC attach transport failed
11175 * all others - failed
11176 */
dea3101e 11177static int __init
11178lpfc_init(void)
11179{
7bb03bbf 11180 int cpu;
dea3101e 11181 int error = 0;
11182
11183 printk(LPFC_MODULE_DESC "\n");
c44ce173 11184 printk(LPFC_COPYRIGHT "\n");
dea3101e 11185
3ef6d24c
JS
11186 error = misc_register(&lpfc_mgmt_dev);
11187 if (error)
11188 printk(KERN_ERR "Could not register lpfcmgmt device, "
11189 "misc_register returned with status %d", error);
11190
7ee5d43e
JS
11191 if (lpfc_enable_npiv) {
11192 lpfc_transport_functions.vport_create = lpfc_vport_create;
11193 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
11194 }
dea3101e 11195 lpfc_transport_template =
11196 fc_attach_transport(&lpfc_transport_functions);
7ee5d43e 11197 if (lpfc_transport_template == NULL)
dea3101e 11198 return -ENOMEM;
7ee5d43e 11199 if (lpfc_enable_npiv) {
7ee5d43e 11200 lpfc_vport_transport_template =
98c9ea5c
JS
11201 fc_attach_transport(&lpfc_vport_transport_functions);
11202 if (lpfc_vport_transport_template == NULL) {
11203 fc_release_transport(lpfc_transport_template);
7ee5d43e 11204 return -ENOMEM;
98c9ea5c 11205 }
7ee5d43e 11206 }
7bb03bbf
JS
11207
11208 /* Initialize in case vector mapping is needed */
b246de17
JS
11209 lpfc_used_cpu = NULL;
11210 lpfc_present_cpu = 0;
11211 for_each_present_cpu(cpu)
11212 lpfc_present_cpu++;
7bb03bbf 11213
dea3101e 11214 error = pci_register_driver(&lpfc_driver);
92d7f7b0 11215 if (error) {
dea3101e 11216 fc_release_transport(lpfc_transport_template);
d7c255b2
JS
11217 if (lpfc_enable_npiv)
11218 fc_release_transport(lpfc_vport_transport_template);
92d7f7b0 11219 }
dea3101e 11220
11221 return error;
11222}
11223
e59058c4 11224/**
3621a710 11225 * lpfc_exit - lpfc module removal routine
e59058c4
JS
11226 *
11227 * This routine is invoked when the lpfc module is removed from the kernel.
11228 * The special kernel macro module_exit() is used to indicate the role of
11229 * this routine to the kernel as lpfc module exit point.
11230 */
dea3101e 11231static void __exit
11232lpfc_exit(void)
11233{
3ef6d24c 11234 misc_deregister(&lpfc_mgmt_dev);
dea3101e 11235 pci_unregister_driver(&lpfc_driver);
11236 fc_release_transport(lpfc_transport_template);
7ee5d43e
JS
11237 if (lpfc_enable_npiv)
11238 fc_release_transport(lpfc_vport_transport_template);
81301a9b 11239 if (_dump_buf_data) {
6a9c52cf
JS
11240 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
11241 "_dump_buf_data at 0x%p\n",
81301a9b
JS
11242 (1L << _dump_buf_data_order), _dump_buf_data);
11243 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
11244 }
11245
11246 if (_dump_buf_dif) {
6a9c52cf
JS
11247 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
11248 "_dump_buf_dif at 0x%p\n",
81301a9b
JS
11249 (1L << _dump_buf_dif_order), _dump_buf_dif);
11250 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
11251 }
b246de17 11252 kfree(lpfc_used_cpu);
dea3101e 11253}
11254
11255module_init(lpfc_init);
11256module_exit(lpfc_exit);
11257MODULE_LICENSE("GPL");
11258MODULE_DESCRIPTION(LPFC_MODULE_DESC);
11259MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
11260MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
This page took 3.101004 seconds and 5 git commands to generate.