[SCSI] qla2xxx: Resolve a performance issue in interrupt
[deliverable/linux.git] / drivers / scsi / lpfc / lpfc_init.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
d8e93df1 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
21
dea3101e 22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kthread.h>
28#include <linux/pci.h>
29#include <linux/spinlock.h>
92d7f7b0 30#include <linux/ctype.h>
dea3101e 31
91886523 32#include <scsi/scsi.h>
dea3101e 33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h>
36
da0436e9 37#include "lpfc_hw4.h"
dea3101e 38#include "lpfc_hw.h"
39#include "lpfc_sli.h"
da0436e9 40#include "lpfc_sli4.h"
ea2151b4 41#include "lpfc_nl.h"
dea3101e 42#include "lpfc_disc.h"
43#include "lpfc_scsi.h"
44#include "lpfc.h"
45#include "lpfc_logmsg.h"
46#include "lpfc_crtn.h"
92d7f7b0 47#include "lpfc_vport.h"
dea3101e 48#include "lpfc_version.h"
49
81301a9b
JS
50char *_dump_buf_data;
51unsigned long _dump_buf_data_order;
52char *_dump_buf_dif;
53unsigned long _dump_buf_dif_order;
54spinlock_t _dump_buf_lock;
55
dea3101e 56static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
57static int lpfc_post_rcv_buf(struct lpfc_hba *);
da0436e9
JS
58static int lpfc_sli4_queue_create(struct lpfc_hba *);
59static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
60static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
61static int lpfc_setup_endian_order(struct lpfc_hba *);
62static int lpfc_sli4_read_config(struct lpfc_hba *);
63static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
64static void lpfc_free_sgl_list(struct lpfc_hba *);
65static int lpfc_init_sgl_list(struct lpfc_hba *);
66static int lpfc_init_active_sgl_array(struct lpfc_hba *);
67static void lpfc_free_active_sgl(struct lpfc_hba *);
68static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
69static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
70static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
71static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
72static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
dea3101e 73
74static struct scsi_transport_template *lpfc_transport_template = NULL;
92d7f7b0 75static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
dea3101e 76static DEFINE_IDR(lpfc_hba_index);
77
e59058c4 78/**
3621a710 79 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
e59058c4
JS
80 * @phba: pointer to lpfc hba data structure.
81 *
82 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
83 * mailbox command. It retrieves the revision information from the HBA and
84 * collects the Vital Product Data (VPD) about the HBA for preparing the
85 * configuration of the HBA.
86 *
87 * Return codes:
88 * 0 - success.
89 * -ERESTART - requests the SLI layer to reset the HBA and try again.
90 * Any other value - indicates an error.
91 **/
dea3101e 92int
2e0fef85 93lpfc_config_port_prep(struct lpfc_hba *phba)
dea3101e 94{
95 lpfc_vpd_t *vp = &phba->vpd;
96 int i = 0, rc;
97 LPFC_MBOXQ_t *pmb;
98 MAILBOX_t *mb;
99 char *lpfc_vpd_data = NULL;
100 uint16_t offset = 0;
101 static char licensed[56] =
102 "key unlock for use with gnu public licensed code only\0";
65a29c16 103 static int init_key = 1;
dea3101e 104
105 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
106 if (!pmb) {
2e0fef85 107 phba->link_state = LPFC_HBA_ERROR;
dea3101e 108 return -ENOMEM;
109 }
110
04c68496 111 mb = &pmb->u.mb;
2e0fef85 112 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 113
114 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
65a29c16
JS
115 if (init_key) {
116 uint32_t *ptext = (uint32_t *) licensed;
dea3101e 117
65a29c16
JS
118 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
119 *ptext = cpu_to_be32(*ptext);
120 init_key = 0;
121 }
dea3101e 122
123 lpfc_read_nv(phba, pmb);
124 memset((char*)mb->un.varRDnvp.rsvd3, 0,
125 sizeof (mb->un.varRDnvp.rsvd3));
126 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
127 sizeof (licensed));
128
129 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
130
131 if (rc != MBX_SUCCESS) {
ed957684 132 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
e8b62011 133 "0324 Config Port initialization "
dea3101e 134 "error, mbxCmd x%x READ_NVPARM, "
135 "mbxStatus x%x\n",
dea3101e 136 mb->mbxCommand, mb->mbxStatus);
137 mempool_free(pmb, phba->mbox_mem_pool);
138 return -ERESTART;
139 }
140 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
2e0fef85
JS
141 sizeof(phba->wwnn));
142 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
143 sizeof(phba->wwpn));
dea3101e 144 }
145
92d7f7b0
JS
146 phba->sli3_options = 0x0;
147
dea3101e 148 /* Setup and issue mailbox READ REV command */
149 lpfc_read_rev(phba, pmb);
150 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
151 if (rc != MBX_SUCCESS) {
ed957684 152 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 153 "0439 Adapter failed to init, mbxCmd x%x "
dea3101e 154 "READ_REV, mbxStatus x%x\n",
dea3101e 155 mb->mbxCommand, mb->mbxStatus);
156 mempool_free( pmb, phba->mbox_mem_pool);
157 return -ERESTART;
158 }
159
92d7f7b0 160
1de933f3
JSEC
161 /*
162 * The value of rr must be 1 since the driver set the cv field to 1.
163 * This setting requires the FW to set all revision fields.
dea3101e 164 */
1de933f3 165 if (mb->un.varRdRev.rr == 0) {
dea3101e 166 vp->rev.rBit = 0;
1de933f3 167 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011
JS
168 "0440 Adapter failed to init, READ_REV has "
169 "missing revision information.\n");
dea3101e 170 mempool_free(pmb, phba->mbox_mem_pool);
171 return -ERESTART;
dea3101e 172 }
173
495a714c
JS
174 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
175 mempool_free(pmb, phba->mbox_mem_pool);
ed957684 176 return -EINVAL;
495a714c 177 }
ed957684 178
dea3101e 179 /* Save information as VPD data */
1de933f3 180 vp->rev.rBit = 1;
92d7f7b0 181 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
1de933f3
JSEC
182 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
183 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
184 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
185 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
dea3101e 186 vp->rev.biuRev = mb->un.varRdRev.biuRev;
187 vp->rev.smRev = mb->un.varRdRev.smRev;
188 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
189 vp->rev.endecRev = mb->un.varRdRev.endecRev;
190 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
191 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
192 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
193 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
194 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
195 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
196
92d7f7b0
JS
197 /* If the sli feature level is less then 9, we must
198 * tear down all RPIs and VPIs on link down if NPIV
199 * is enabled.
200 */
201 if (vp->rev.feaLevelHigh < 9)
202 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
203
dea3101e 204 if (lpfc_is_LC_HBA(phba->pcidev->device))
205 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
206 sizeof (phba->RandomData));
207
dea3101e 208 /* Get adapter VPD information */
dea3101e 209 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
210 if (!lpfc_vpd_data)
d7c255b2 211 goto out_free_mbox;
dea3101e 212
213 do {
214 lpfc_dump_mem(phba, pmb, offset);
215 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
216
217 if (rc != MBX_SUCCESS) {
218 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011 219 "0441 VPD not present on adapter, "
dea3101e 220 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
dea3101e 221 mb->mbxCommand, mb->mbxStatus);
74b72a59 222 mb->un.varDmp.word_cnt = 0;
dea3101e 223 }
04c68496
JS
224 /* dump mem may return a zero when finished or we got a
225 * mailbox error, either way we are done.
226 */
227 if (mb->un.varDmp.word_cnt == 0)
228 break;
74b72a59
JW
229 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
230 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
d7c255b2
JS
231 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
232 lpfc_vpd_data + offset,
92d7f7b0 233 mb->un.varDmp.word_cnt);
dea3101e 234 offset += mb->un.varDmp.word_cnt;
74b72a59
JW
235 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
236 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
dea3101e 237
238 kfree(lpfc_vpd_data);
dea3101e 239out_free_mbox:
240 mempool_free(pmb, phba->mbox_mem_pool);
241 return 0;
242}
243
e59058c4 244/**
3621a710 245 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
e59058c4
JS
246 * @phba: pointer to lpfc hba data structure.
247 * @pmboxq: pointer to the driver internal queue element for mailbox command.
248 *
249 * This is the completion handler for driver's configuring asynchronous event
250 * mailbox command to the device. If the mailbox command returns successfully,
251 * it will set internal async event support flag to 1; otherwise, it will
252 * set internal async event support flag to 0.
253 **/
57127f15
JS
254static void
255lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
256{
04c68496 257 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
57127f15
JS
258 phba->temp_sensor_support = 1;
259 else
260 phba->temp_sensor_support = 0;
261 mempool_free(pmboxq, phba->mbox_mem_pool);
262 return;
263}
264
97207482 265/**
3621a710 266 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
97207482
JS
267 * @phba: pointer to lpfc hba data structure.
268 * @pmboxq: pointer to the driver internal queue element for mailbox command.
269 *
270 * This is the completion handler for dump mailbox command for getting
271 * wake up parameters. When this command complete, the response contain
272 * Option rom version of the HBA. This function translate the version number
273 * into a human readable string and store it in OptionROMVersion.
274 **/
275static void
276lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
277{
278 struct prog_id *prg;
279 uint32_t prog_id_word;
280 char dist = ' ';
281 /* character array used for decoding dist type. */
282 char dist_char[] = "nabx";
283
04c68496 284 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
9f1e1b50 285 mempool_free(pmboxq, phba->mbox_mem_pool);
97207482 286 return;
9f1e1b50 287 }
97207482
JS
288
289 prg = (struct prog_id *) &prog_id_word;
290
291 /* word 7 contain option rom version */
04c68496 292 prog_id_word = pmboxq->u.mb.un.varWords[7];
97207482
JS
293
294 /* Decode the Option rom version word to a readable string */
295 if (prg->dist < 4)
296 dist = dist_char[prg->dist];
297
298 if ((prg->dist == 3) && (prg->num == 0))
299 sprintf(phba->OptionROMVersion, "%d.%d%d",
300 prg->ver, prg->rev, prg->lev);
301 else
302 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
303 prg->ver, prg->rev, prg->lev,
304 dist, prg->num);
9f1e1b50 305 mempool_free(pmboxq, phba->mbox_mem_pool);
97207482
JS
306 return;
307}
308
e59058c4 309/**
3621a710 310 * lpfc_config_port_post - Perform lpfc initialization after config port
e59058c4
JS
311 * @phba: pointer to lpfc hba data structure.
312 *
313 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
314 * command call. It performs all internal resource and state setups on the
315 * port: post IOCB buffers, enable appropriate host interrupt attentions,
316 * ELS ring timers, etc.
317 *
318 * Return codes
319 * 0 - success.
320 * Any other value - error.
321 **/
dea3101e 322int
2e0fef85 323lpfc_config_port_post(struct lpfc_hba *phba)
dea3101e 324{
2e0fef85 325 struct lpfc_vport *vport = phba->pport;
a257bf90 326 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 327 LPFC_MBOXQ_t *pmb;
328 MAILBOX_t *mb;
329 struct lpfc_dmabuf *mp;
330 struct lpfc_sli *psli = &phba->sli;
331 uint32_t status, timeout;
2e0fef85
JS
332 int i, j;
333 int rc;
dea3101e 334
7af67051
JS
335 spin_lock_irq(&phba->hbalock);
336 /*
337 * If the Config port completed correctly the HBA is not
338 * over heated any more.
339 */
340 if (phba->over_temp_state == HBA_OVER_TEMP)
341 phba->over_temp_state = HBA_NORMAL_TEMP;
342 spin_unlock_irq(&phba->hbalock);
343
dea3101e 344 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
345 if (!pmb) {
2e0fef85 346 phba->link_state = LPFC_HBA_ERROR;
dea3101e 347 return -ENOMEM;
348 }
04c68496 349 mb = &pmb->u.mb;
dea3101e 350
dea3101e 351 /* Get login parameters for NID. */
92d7f7b0 352 lpfc_read_sparam(phba, pmb, 0);
ed957684 353 pmb->vport = vport;
dea3101e 354 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
ed957684 355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 356 "0448 Adapter failed init, mbxCmd x%x "
dea3101e 357 "READ_SPARM mbxStatus x%x\n",
dea3101e 358 mb->mbxCommand, mb->mbxStatus);
2e0fef85 359 phba->link_state = LPFC_HBA_ERROR;
dea3101e 360 mp = (struct lpfc_dmabuf *) pmb->context1;
361 mempool_free( pmb, phba->mbox_mem_pool);
362 lpfc_mbuf_free(phba, mp->virt, mp->phys);
363 kfree(mp);
364 return -EIO;
365 }
366
367 mp = (struct lpfc_dmabuf *) pmb->context1;
368
2e0fef85 369 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
dea3101e 370 lpfc_mbuf_free(phba, mp->virt, mp->phys);
371 kfree(mp);
372 pmb->context1 = NULL;
373
a12e07bc 374 if (phba->cfg_soft_wwnn)
2e0fef85
JS
375 u64_to_wwn(phba->cfg_soft_wwnn,
376 vport->fc_sparam.nodeName.u.wwn);
c3f28afa 377 if (phba->cfg_soft_wwpn)
2e0fef85
JS
378 u64_to_wwn(phba->cfg_soft_wwpn,
379 vport->fc_sparam.portName.u.wwn);
380 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
dea3101e 381 sizeof (struct lpfc_name));
2e0fef85 382 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
dea3101e 383 sizeof (struct lpfc_name));
a257bf90
JS
384
385 /* Update the fc_host data structures with new wwn. */
386 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
387 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
21e9a0a5 388 fc_host_max_npiv_vports(shost) = phba->max_vpi;
a257bf90 389
dea3101e 390 /* If no serial number in VPD data, use low 6 bytes of WWNN */
391 /* This should be consolidated into parse_vpd ? - mr */
392 if (phba->SerialNumber[0] == 0) {
393 uint8_t *outptr;
394
2e0fef85 395 outptr = &vport->fc_nodename.u.s.IEEE[0];
dea3101e 396 for (i = 0; i < 12; i++) {
397 status = *outptr++;
398 j = ((status & 0xf0) >> 4);
399 if (j <= 9)
400 phba->SerialNumber[i] =
401 (char)((uint8_t) 0x30 + (uint8_t) j);
402 else
403 phba->SerialNumber[i] =
404 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
405 i++;
406 j = (status & 0xf);
407 if (j <= 9)
408 phba->SerialNumber[i] =
409 (char)((uint8_t) 0x30 + (uint8_t) j);
410 else
411 phba->SerialNumber[i] =
412 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
413 }
414 }
415
dea3101e 416 lpfc_read_config(phba, pmb);
ed957684 417 pmb->vport = vport;
dea3101e 418 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
ed957684 419 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 420 "0453 Adapter failed to init, mbxCmd x%x "
dea3101e 421 "READ_CONFIG, mbxStatus x%x\n",
dea3101e 422 mb->mbxCommand, mb->mbxStatus);
2e0fef85 423 phba->link_state = LPFC_HBA_ERROR;
dea3101e 424 mempool_free( pmb, phba->mbox_mem_pool);
425 return -EIO;
426 }
427
428 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
429 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
430 phba->cfg_hba_queue_depth =
431 mb->un.varRdConfig.max_xri + 1;
432
433 phba->lmt = mb->un.varRdConfig.lmt;
74b72a59
JW
434
435 /* Get the default values for Model Name and Description */
436 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
437
438 if ((phba->cfg_link_speed > LINK_SPEED_10G)
439 || ((phba->cfg_link_speed == LINK_SPEED_1G)
440 && !(phba->lmt & LMT_1Gb))
441 || ((phba->cfg_link_speed == LINK_SPEED_2G)
442 && !(phba->lmt & LMT_2Gb))
443 || ((phba->cfg_link_speed == LINK_SPEED_4G)
444 && !(phba->lmt & LMT_4Gb))
445 || ((phba->cfg_link_speed == LINK_SPEED_8G)
446 && !(phba->lmt & LMT_8Gb))
447 || ((phba->cfg_link_speed == LINK_SPEED_10G)
448 && !(phba->lmt & LMT_10Gb))) {
449 /* Reset link speed to auto */
ed957684 450 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
e8b62011 451 "1302 Invalid speed for this board: "
dea3101e 452 "Reset link speed to auto: x%x\n",
dea3101e 453 phba->cfg_link_speed);
454 phba->cfg_link_speed = LINK_SPEED_AUTO;
455 }
456
2e0fef85 457 phba->link_state = LPFC_LINK_DOWN;
dea3101e 458
0b727fea 459 /* Only process IOCBs on ELS ring till hba_state is READY */
a4bc3379
JS
460 if (psli->ring[psli->extra_ring].cmdringaddr)
461 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
dea3101e 462 if (psli->ring[psli->fcp_ring].cmdringaddr)
463 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
464 if (psli->ring[psli->next_ring].cmdringaddr)
465 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
466
467 /* Post receive buffers for desired rings */
ed957684
JS
468 if (phba->sli_rev != 3)
469 lpfc_post_rcv_buf(phba);
dea3101e 470
9399627f
JS
471 /*
472 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
473 */
474 if (phba->intr_type == MSIX) {
475 rc = lpfc_config_msi(phba, pmb);
476 if (rc) {
477 mempool_free(pmb, phba->mbox_mem_pool);
478 return -EIO;
479 }
480 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
481 if (rc != MBX_SUCCESS) {
482 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
483 "0352 Config MSI mailbox command "
484 "failed, mbxCmd x%x, mbxStatus x%x\n",
04c68496
JS
485 pmb->u.mb.mbxCommand,
486 pmb->u.mb.mbxStatus);
9399627f
JS
487 mempool_free(pmb, phba->mbox_mem_pool);
488 return -EIO;
489 }
490 }
491
04c68496 492 spin_lock_irq(&phba->hbalock);
9399627f
JS
493 /* Initialize ERATT handling flag */
494 phba->hba_flag &= ~HBA_ERATT_HANDLED;
495
dea3101e 496 /* Enable appropriate host interrupts */
dea3101e 497 status = readl(phba->HCregaddr);
498 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
499 if (psli->num_rings > 0)
500 status |= HC_R0INT_ENA;
501 if (psli->num_rings > 1)
502 status |= HC_R1INT_ENA;
503 if (psli->num_rings > 2)
504 status |= HC_R2INT_ENA;
505 if (psli->num_rings > 3)
506 status |= HC_R3INT_ENA;
507
875fbdfe
JSEC
508 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
509 (phba->cfg_poll & DISABLE_FCP_RING_INT))
9399627f 510 status &= ~(HC_R0INT_ENA);
875fbdfe 511
dea3101e 512 writel(status, phba->HCregaddr);
513 readl(phba->HCregaddr); /* flush */
2e0fef85 514 spin_unlock_irq(&phba->hbalock);
dea3101e 515
9399627f
JS
516 /* Set up ring-0 (ELS) timer */
517 timeout = phba->fc_ratov * 2;
2e0fef85 518 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
9399627f 519 /* Set up heart beat (HB) timer */
858c9f6c
JS
520 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
521 phba->hb_outstanding = 0;
522 phba->last_completion_time = jiffies;
9399627f
JS
523 /* Set up error attention (ERATT) polling timer */
524 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
dea3101e 525
526 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
527 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5b8bd0c9 528 lpfc_set_loopback_flag(phba);
d7c255b2 529 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
8aee918a 530 if (rc != MBX_SUCCESS) {
ed957684 531 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 532 "0454 Adapter failed to init, mbxCmd x%x "
dea3101e 533 "INIT_LINK, mbxStatus x%x\n",
dea3101e 534 mb->mbxCommand, mb->mbxStatus);
535
536 /* Clear all interrupt enable conditions */
537 writel(0, phba->HCregaddr);
538 readl(phba->HCregaddr); /* flush */
539 /* Clear all pending interrupts */
540 writel(0xffffffff, phba->HAregaddr);
541 readl(phba->HAregaddr); /* flush */
542
2e0fef85 543 phba->link_state = LPFC_HBA_ERROR;
8aee918a
JS
544 if (rc != MBX_BUSY)
545 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 546 return -EIO;
547 }
548 /* MBOX buffer will be freed in mbox compl */
57127f15
JS
549 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
550 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
551 pmb->mbox_cmpl = lpfc_config_async_cmpl;
552 pmb->vport = phba->pport;
553 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
dea3101e 554
57127f15
JS
555 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
556 lpfc_printf_log(phba,
557 KERN_ERR,
558 LOG_INIT,
559 "0456 Adapter failed to issue "
560 "ASYNCEVT_ENABLE mbox status x%x \n.",
561 rc);
562 mempool_free(pmb, phba->mbox_mem_pool);
563 }
97207482
JS
564
565 /* Get Option rom version */
566 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
567 lpfc_dump_wakeup_param(phba, pmb);
568 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
569 pmb->vport = phba->pport;
570 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
571
572 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
573 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
574 "to get Option ROM version status x%x\n.", rc);
575 mempool_free(pmb, phba->mbox_mem_pool);
576 }
577
d7c255b2 578 return 0;
ce8b3ce5
JS
579}
580
e59058c4 581/**
3621a710 582 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
e59058c4
JS
583 * @phba: pointer to lpfc HBA data structure.
584 *
585 * This routine will do LPFC uninitialization before the HBA is reset when
586 * bringing down the SLI Layer.
587 *
588 * Return codes
589 * 0 - success.
590 * Any other value - error.
591 **/
dea3101e 592int
2e0fef85 593lpfc_hba_down_prep(struct lpfc_hba *phba)
dea3101e 594{
1b32f6aa
JS
595 struct lpfc_vport **vports;
596 int i;
3772a991
JS
597
598 if (phba->sli_rev <= LPFC_SLI_REV3) {
599 /* Disable interrupts */
600 writel(0, phba->HCregaddr);
601 readl(phba->HCregaddr); /* flush */
602 }
dea3101e 603
1b32f6aa
JS
604 if (phba->pport->load_flag & FC_UNLOADING)
605 lpfc_cleanup_discovery_resources(phba->pport);
606 else {
607 vports = lpfc_create_vport_work_array(phba);
608 if (vports != NULL)
3772a991
JS
609 for (i = 0; i <= phba->max_vports &&
610 vports[i] != NULL; i++)
1b32f6aa
JS
611 lpfc_cleanup_discovery_resources(vports[i]);
612 lpfc_destroy_vport_work_array(phba, vports);
7f5f3d0d
JS
613 }
614 return 0;
dea3101e 615}
616
e59058c4 617/**
3772a991 618 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
e59058c4
JS
619 * @phba: pointer to lpfc HBA data structure.
620 *
621 * This routine will do uninitialization after the HBA is reset when bring
622 * down the SLI Layer.
623 *
624 * Return codes
625 * 0 - sucess.
626 * Any other value - error.
627 **/
3772a991
JS
628static int
629lpfc_hba_down_post_s3(struct lpfc_hba *phba)
41415862
JW
630{
631 struct lpfc_sli *psli = &phba->sli;
632 struct lpfc_sli_ring *pring;
633 struct lpfc_dmabuf *mp, *next_mp;
09372820 634 LIST_HEAD(completions);
41415862
JW
635 int i;
636
92d7f7b0
JS
637 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
638 lpfc_sli_hbqbuf_free_all(phba);
639 else {
640 /* Cleanup preposted buffers on the ELS ring */
641 pring = &psli->ring[LPFC_ELS_RING];
642 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
643 list_del(&mp->list);
644 pring->postbufq_cnt--;
645 lpfc_mbuf_free(phba, mp->virt, mp->phys);
646 kfree(mp);
647 }
41415862
JW
648 }
649
09372820 650 spin_lock_irq(&phba->hbalock);
41415862
JW
651 for (i = 0; i < psli->num_rings; i++) {
652 pring = &psli->ring[i];
09372820
JS
653
654 /* At this point in time the HBA is either reset or DOA. Either
655 * way, nothing should be on txcmplq as it will NEVER complete.
656 */
657 list_splice_init(&pring->txcmplq, &completions);
658 pring->txcmplq_cnt = 0;
659 spin_unlock_irq(&phba->hbalock);
660
a257bf90
JS
661 /* Cancel all the IOCBs from the completions list */
662 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
663 IOERR_SLI_ABORTED);
09372820 664
41415862 665 lpfc_sli_abort_iocb_ring(phba, pring);
09372820 666 spin_lock_irq(&phba->hbalock);
41415862 667 }
09372820 668 spin_unlock_irq(&phba->hbalock);
41415862
JW
669
670 return 0;
671}
da0436e9
JS
672/**
673 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
674 * @phba: pointer to lpfc HBA data structure.
675 *
676 * This routine will do uninitialization after the HBA is reset when bring
677 * down the SLI Layer.
678 *
679 * Return codes
680 * 0 - sucess.
681 * Any other value - error.
682 **/
683static int
684lpfc_hba_down_post_s4(struct lpfc_hba *phba)
685{
686 struct lpfc_scsi_buf *psb, *psb_next;
687 LIST_HEAD(aborts);
688 int ret;
689 unsigned long iflag = 0;
690 ret = lpfc_hba_down_post_s3(phba);
691 if (ret)
692 return ret;
693 /* At this point in time the HBA is either reset or DOA. Either
694 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
695 * on the lpfc_sgl_list so that it can either be freed if the
696 * driver is unloading or reposted if the driver is restarting
697 * the port.
698 */
699 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
700 /* scsl_buf_list */
701 /* abts_sgl_list_lock required because worker thread uses this
702 * list.
703 */
704 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
705 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
706 &phba->sli4_hba.lpfc_sgl_list);
707 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
708 /* abts_scsi_buf_list_lock required because worker thread uses this
709 * list.
710 */
711 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
712 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
713 &aborts);
714 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
715 spin_unlock_irq(&phba->hbalock);
716
717 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
718 psb->pCmd = NULL;
719 psb->status = IOSTAT_SUCCESS;
720 }
721 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
722 list_splice(&aborts, &phba->lpfc_scsi_buf_list);
723 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
724 return 0;
725}
726
727/**
728 * lpfc_hba_down_post - Wrapper func for hba down post routine
729 * @phba: pointer to lpfc HBA data structure.
730 *
731 * This routine wraps the actual SLI3 or SLI4 routine for performing
732 * uninitialization after the HBA is reset when bring down the SLI Layer.
733 *
734 * Return codes
735 * 0 - sucess.
736 * Any other value - error.
737 **/
738int
739lpfc_hba_down_post(struct lpfc_hba *phba)
740{
741 return (*phba->lpfc_hba_down_post)(phba);
742}
41415862 743
e59058c4 744/**
3621a710 745 * lpfc_hb_timeout - The HBA-timer timeout handler
e59058c4
JS
746 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
747 *
748 * This is the HBA-timer timeout handler registered to the lpfc driver. When
749 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
750 * work-port-events bitmap and the worker thread is notified. This timeout
751 * event will be used by the worker thread to invoke the actual timeout
752 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
753 * be performed in the timeout handler and the HBA timeout event bit shall
754 * be cleared by the worker thread after it has taken the event bitmap out.
755 **/
a6ababd2 756static void
858c9f6c
JS
757lpfc_hb_timeout(unsigned long ptr)
758{
759 struct lpfc_hba *phba;
5e9d9b82 760 uint32_t tmo_posted;
858c9f6c
JS
761 unsigned long iflag;
762
763 phba = (struct lpfc_hba *)ptr;
9399627f
JS
764
765 /* Check for heart beat timeout conditions */
858c9f6c 766 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
5e9d9b82
JS
767 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
768 if (!tmo_posted)
858c9f6c
JS
769 phba->pport->work_port_events |= WORKER_HB_TMO;
770 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
771
9399627f 772 /* Tell the worker thread there is work to do */
5e9d9b82
JS
773 if (!tmo_posted)
774 lpfc_worker_wake_up(phba);
858c9f6c
JS
775 return;
776}
777
e59058c4 778/**
3621a710 779 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
e59058c4
JS
780 * @phba: pointer to lpfc hba data structure.
781 * @pmboxq: pointer to the driver internal queue element for mailbox command.
782 *
783 * This is the callback function to the lpfc heart-beat mailbox command.
784 * If configured, the lpfc driver issues the heart-beat mailbox command to
785 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
786 * heart-beat mailbox command is issued, the driver shall set up heart-beat
787 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
788 * heart-beat outstanding state. Once the mailbox command comes back and
789 * no error conditions detected, the heart-beat mailbox command timer is
790 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
791 * state is cleared for the next heart-beat. If the timer expired with the
792 * heart-beat outstanding state set, the driver will put the HBA offline.
793 **/
858c9f6c
JS
794static void
795lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
796{
797 unsigned long drvr_flag;
798
799 spin_lock_irqsave(&phba->hbalock, drvr_flag);
800 phba->hb_outstanding = 0;
801 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
802
9399627f 803 /* Check and reset heart-beat timer is necessary */
858c9f6c
JS
804 mempool_free(pmboxq, phba->mbox_mem_pool);
805 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
806 !(phba->link_state == LPFC_HBA_ERROR) &&
51ef4c26 807 !(phba->pport->load_flag & FC_UNLOADING))
858c9f6c
JS
808 mod_timer(&phba->hb_tmofunc,
809 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
810 return;
811}
812
e59058c4 813/**
3621a710 814 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
e59058c4
JS
815 * @phba: pointer to lpfc hba data structure.
816 *
817 * This is the actual HBA-timer timeout handler to be invoked by the worker
818 * thread whenever the HBA timer fired and HBA-timeout event posted. This
819 * handler performs any periodic operations needed for the device. If such
820 * periodic event has already been attended to either in the interrupt handler
821 * or by processing slow-ring or fast-ring events within the HBA-timer
822 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
823 * the timer for the next timeout period. If lpfc heart-beat mailbox command
824 * is configured and there is no heart-beat mailbox command outstanding, a
825 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
826 * has been a heart-beat mailbox command outstanding, the HBA shall be put
827 * to offline.
828 **/
858c9f6c
JS
829void
830lpfc_hb_timeout_handler(struct lpfc_hba *phba)
831{
832 LPFC_MBOXQ_t *pmboxq;
0ff10d46 833 struct lpfc_dmabuf *buf_ptr;
858c9f6c
JS
834 int retval;
835 struct lpfc_sli *psli = &phba->sli;
0ff10d46 836 LIST_HEAD(completions);
858c9f6c
JS
837
838 if ((phba->link_state == LPFC_HBA_ERROR) ||
51ef4c26 839 (phba->pport->load_flag & FC_UNLOADING) ||
858c9f6c
JS
840 (phba->pport->fc_flag & FC_OFFLINE_MODE))
841 return;
842
843 spin_lock_irq(&phba->pport->work_port_lock);
858c9f6c
JS
844
845 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
846 jiffies)) {
847 spin_unlock_irq(&phba->pport->work_port_lock);
848 if (!phba->hb_outstanding)
849 mod_timer(&phba->hb_tmofunc,
850 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
851 else
852 mod_timer(&phba->hb_tmofunc,
853 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
854 return;
855 }
856 spin_unlock_irq(&phba->pport->work_port_lock);
857
0ff10d46
JS
858 if (phba->elsbuf_cnt &&
859 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
860 spin_lock_irq(&phba->hbalock);
861 list_splice_init(&phba->elsbuf, &completions);
862 phba->elsbuf_cnt = 0;
863 phba->elsbuf_prev_cnt = 0;
864 spin_unlock_irq(&phba->hbalock);
865
866 while (!list_empty(&completions)) {
867 list_remove_head(&completions, buf_ptr,
868 struct lpfc_dmabuf, list);
869 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
870 kfree(buf_ptr);
871 }
872 }
873 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
874
858c9f6c 875 /* If there is no heart beat outstanding, issue a heartbeat command */
13815c83
JS
876 if (phba->cfg_enable_hba_heartbeat) {
877 if (!phba->hb_outstanding) {
878 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
879 if (!pmboxq) {
880 mod_timer(&phba->hb_tmofunc,
881 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
882 return;
883 }
858c9f6c 884
13815c83
JS
885 lpfc_heart_beat(phba, pmboxq);
886 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
887 pmboxq->vport = phba->pport;
888 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
858c9f6c 889
13815c83
JS
890 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
891 mempool_free(pmboxq, phba->mbox_mem_pool);
892 mod_timer(&phba->hb_tmofunc,
893 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
894 return;
895 }
858c9f6c 896 mod_timer(&phba->hb_tmofunc,
13815c83
JS
897 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
898 phba->hb_outstanding = 1;
858c9f6c 899 return;
13815c83
JS
900 } else {
901 /*
902 * If heart beat timeout called with hb_outstanding set
903 * we need to take the HBA offline.
904 */
905 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
906 "0459 Adapter heartbeat failure, "
907 "taking this port offline.\n");
908
909 spin_lock_irq(&phba->hbalock);
f4b4c68f 910 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
13815c83
JS
911 spin_unlock_irq(&phba->hbalock);
912
913 lpfc_offline_prep(phba);
914 lpfc_offline(phba);
915 lpfc_unblock_mgmt_io(phba);
916 phba->link_state = LPFC_HBA_ERROR;
917 lpfc_hba_down_post(phba);
858c9f6c 918 }
858c9f6c
JS
919 }
920}
921
e59058c4 922/**
3621a710 923 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
e59058c4
JS
924 * @phba: pointer to lpfc hba data structure.
925 *
926 * This routine is called to bring the HBA offline when HBA hardware error
927 * other than Port Error 6 has been detected.
928 **/
09372820
JS
929static void
930lpfc_offline_eratt(struct lpfc_hba *phba)
931{
932 struct lpfc_sli *psli = &phba->sli;
933
934 spin_lock_irq(&phba->hbalock);
f4b4c68f 935 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
09372820
JS
936 spin_unlock_irq(&phba->hbalock);
937 lpfc_offline_prep(phba);
938
939 lpfc_offline(phba);
940 lpfc_reset_barrier(phba);
f4b4c68f 941 spin_lock_irq(&phba->hbalock);
09372820 942 lpfc_sli_brdreset(phba);
f4b4c68f 943 spin_unlock_irq(&phba->hbalock);
09372820
JS
944 lpfc_hba_down_post(phba);
945 lpfc_sli_brdready(phba, HS_MBRDY);
946 lpfc_unblock_mgmt_io(phba);
947 phba->link_state = LPFC_HBA_ERROR;
948 return;
949}
950
da0436e9
JS
951/**
952 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
953 * @phba: pointer to lpfc hba data structure.
954 *
955 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
956 * other than Port Error 6 has been detected.
957 **/
958static void
959lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
960{
961 lpfc_offline_prep(phba);
962 lpfc_offline(phba);
963 lpfc_sli4_brdreset(phba);
964 lpfc_hba_down_post(phba);
965 lpfc_sli4_post_status_check(phba);
966 lpfc_unblock_mgmt_io(phba);
967 phba->link_state = LPFC_HBA_ERROR;
968}
969
a257bf90
JS
970/**
971 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
972 * @phba: pointer to lpfc hba data structure.
973 *
974 * This routine is invoked to handle the deferred HBA hardware error
975 * conditions. This type of error is indicated by HBA by setting ER1
976 * and another ER bit in the host status register. The driver will
977 * wait until the ER1 bit clears before handling the error condition.
978 **/
979static void
980lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
981{
982 uint32_t old_host_status = phba->work_hs;
983 struct lpfc_sli_ring *pring;
984 struct lpfc_sli *psli = &phba->sli;
985
f4b4c68f
JS
986 /* If the pci channel is offline, ignore possible errors,
987 * since we cannot communicate with the pci card anyway.
988 */
989 if (pci_channel_offline(phba->pcidev)) {
990 spin_lock_irq(&phba->hbalock);
991 phba->hba_flag &= ~DEFER_ERATT;
992 spin_unlock_irq(&phba->hbalock);
993 return;
994 }
995
a257bf90
JS
996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
997 "0479 Deferred Adapter Hardware Error "
998 "Data: x%x x%x x%x\n",
999 phba->work_hs,
1000 phba->work_status[0], phba->work_status[1]);
1001
1002 spin_lock_irq(&phba->hbalock);
f4b4c68f 1003 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
a257bf90
JS
1004 spin_unlock_irq(&phba->hbalock);
1005
1006
1007 /*
1008 * Firmware stops when it triggred erratt. That could cause the I/Os
1009 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1010 * SCSI layer retry it after re-establishing link.
1011 */
1012 pring = &psli->ring[psli->fcp_ring];
1013 lpfc_sli_abort_iocb_ring(phba, pring);
1014
1015 /*
1016 * There was a firmware error. Take the hba offline and then
1017 * attempt to restart it.
1018 */
1019 lpfc_offline_prep(phba);
1020 lpfc_offline(phba);
1021
1022 /* Wait for the ER1 bit to clear.*/
1023 while (phba->work_hs & HS_FFER1) {
1024 msleep(100);
1025 phba->work_hs = readl(phba->HSregaddr);
1026 /* If driver is unloading let the worker thread continue */
1027 if (phba->pport->load_flag & FC_UNLOADING) {
1028 phba->work_hs = 0;
1029 break;
1030 }
1031 }
1032
1033 /*
1034 * This is to ptrotect against a race condition in which
1035 * first write to the host attention register clear the
1036 * host status register.
1037 */
1038 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1039 phba->work_hs = old_host_status & ~HS_FFER1;
1040
3772a991 1041 spin_lock_irq(&phba->hbalock);
a257bf90 1042 phba->hba_flag &= ~DEFER_ERATT;
3772a991 1043 spin_unlock_irq(&phba->hbalock);
a257bf90
JS
1044 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1045 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1046}
1047
3772a991
JS
1048static void
1049lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1050{
1051 struct lpfc_board_event_header board_event;
1052 struct Scsi_Host *shost;
1053
1054 board_event.event_type = FC_REG_BOARD_EVENT;
1055 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1056 shost = lpfc_shost_from_vport(phba->pport);
1057 fc_host_post_vendor_event(shost, fc_get_event_number(),
1058 sizeof(board_event),
1059 (char *) &board_event,
1060 LPFC_NL_VENDOR_ID);
1061}
1062
e59058c4 1063/**
3772a991 1064 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
e59058c4
JS
1065 * @phba: pointer to lpfc hba data structure.
1066 *
1067 * This routine is invoked to handle the following HBA hardware error
1068 * conditions:
1069 * 1 - HBA error attention interrupt
1070 * 2 - DMA ring index out of range
1071 * 3 - Mailbox command came back as unknown
1072 **/
3772a991
JS
1073static void
1074lpfc_handle_eratt_s3(struct lpfc_hba *phba)
dea3101e 1075{
2e0fef85 1076 struct lpfc_vport *vport = phba->pport;
2e0fef85 1077 struct lpfc_sli *psli = &phba->sli;
dea3101e 1078 struct lpfc_sli_ring *pring;
d2873e4c 1079 uint32_t event_data;
57127f15
JS
1080 unsigned long temperature;
1081 struct temp_event temp_event_data;
92d7f7b0 1082 struct Scsi_Host *shost;
2e0fef85 1083
8d63f375 1084 /* If the pci channel is offline, ignore possible errors,
3772a991
JS
1085 * since we cannot communicate with the pci card anyway.
1086 */
1087 if (pci_channel_offline(phba->pcidev)) {
1088 spin_lock_irq(&phba->hbalock);
1089 phba->hba_flag &= ~DEFER_ERATT;
1090 spin_unlock_irq(&phba->hbalock);
8d63f375 1091 return;
3772a991
JS
1092 }
1093
13815c83
JS
1094 /* If resets are disabled then leave the HBA alone and return */
1095 if (!phba->cfg_enable_hba_reset)
1096 return;
dea3101e 1097
ea2151b4 1098 /* Send an internal error event to mgmt application */
3772a991 1099 lpfc_board_errevt_to_mgmt(phba);
ea2151b4 1100
a257bf90
JS
1101 if (phba->hba_flag & DEFER_ERATT)
1102 lpfc_handle_deferred_eratt(phba);
1103
97eab634 1104 if (phba->work_hs & HS_FFER6) {
dea3101e 1105 /* Re-establishing Link */
1106 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
e8b62011 1107 "1301 Re-establishing Link "
dea3101e 1108 "Data: x%x x%x x%x\n",
e8b62011 1109 phba->work_hs,
dea3101e 1110 phba->work_status[0], phba->work_status[1]);
58da1ffb 1111
92d7f7b0 1112 spin_lock_irq(&phba->hbalock);
f4b4c68f 1113 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
92d7f7b0 1114 spin_unlock_irq(&phba->hbalock);
dea3101e 1115
1116 /*
1117 * Firmware stops when it triggled erratt with HS_FFER6.
1118 * That could cause the I/Os dropped by the firmware.
1119 * Error iocb (I/O) on txcmplq and let the SCSI layer
1120 * retry it after re-establishing link.
1121 */
1122 pring = &psli->ring[psli->fcp_ring];
1123 lpfc_sli_abort_iocb_ring(phba, pring);
1124
dea3101e 1125 /*
1126 * There was a firmware error. Take the hba offline and then
1127 * attempt to restart it.
1128 */
46fa311e 1129 lpfc_offline_prep(phba);
dea3101e 1130 lpfc_offline(phba);
41415862 1131 lpfc_sli_brdrestart(phba);
dea3101e 1132 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
46fa311e 1133 lpfc_unblock_mgmt_io(phba);
dea3101e 1134 return;
1135 }
46fa311e 1136 lpfc_unblock_mgmt_io(phba);
57127f15
JS
1137 } else if (phba->work_hs & HS_CRIT_TEMP) {
1138 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1139 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1140 temp_event_data.event_code = LPFC_CRIT_TEMP;
1141 temp_event_data.data = (uint32_t)temperature;
1142
1143 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d7c255b2 1144 "0406 Adapter maximum temperature exceeded "
57127f15
JS
1145 "(%ld), taking this port offline "
1146 "Data: x%x x%x x%x\n",
1147 temperature, phba->work_hs,
1148 phba->work_status[0], phba->work_status[1]);
1149
1150 shost = lpfc_shost_from_vport(phba->pport);
1151 fc_host_post_vendor_event(shost, fc_get_event_number(),
1152 sizeof(temp_event_data),
1153 (char *) &temp_event_data,
1154 SCSI_NL_VID_TYPE_PCI
1155 | PCI_VENDOR_ID_EMULEX);
1156
7af67051 1157 spin_lock_irq(&phba->hbalock);
7af67051
JS
1158 phba->over_temp_state = HBA_OVER_TEMP;
1159 spin_unlock_irq(&phba->hbalock);
09372820 1160 lpfc_offline_eratt(phba);
57127f15 1161
dea3101e 1162 } else {
1163 /* The if clause above forces this code path when the status
9399627f
JS
1164 * failure is a value other than FFER6. Do not call the offline
1165 * twice. This is the adapter hardware error path.
dea3101e 1166 */
1167 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 1168 "0457 Adapter Hardware Error "
dea3101e 1169 "Data: x%x x%x x%x\n",
e8b62011 1170 phba->work_hs,
dea3101e 1171 phba->work_status[0], phba->work_status[1]);
1172
d2873e4c 1173 event_data = FC_REG_DUMP_EVENT;
92d7f7b0 1174 shost = lpfc_shost_from_vport(vport);
2e0fef85 1175 fc_host_post_vendor_event(shost, fc_get_event_number(),
d2873e4c
JS
1176 sizeof(event_data), (char *) &event_data,
1177 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1178
09372820 1179 lpfc_offline_eratt(phba);
dea3101e 1180 }
9399627f 1181 return;
dea3101e 1182}
1183
da0436e9
JS
1184/**
1185 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1186 * @phba: pointer to lpfc hba data structure.
1187 *
1188 * This routine is invoked to handle the SLI4 HBA hardware error attention
1189 * conditions.
1190 **/
1191static void
1192lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1193{
1194 struct lpfc_vport *vport = phba->pport;
1195 uint32_t event_data;
1196 struct Scsi_Host *shost;
1197
1198 /* If the pci channel is offline, ignore possible errors, since
1199 * we cannot communicate with the pci card anyway.
1200 */
1201 if (pci_channel_offline(phba->pcidev))
1202 return;
1203 /* If resets are disabled then leave the HBA alone and return */
1204 if (!phba->cfg_enable_hba_reset)
1205 return;
1206
1207 /* Send an internal error event to mgmt application */
1208 lpfc_board_errevt_to_mgmt(phba);
1209
1210 /* For now, the actual action for SLI4 device handling is not
1211 * specified yet, just treated it as adaptor hardware failure
1212 */
1213 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1214 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1215 phba->work_status[0], phba->work_status[1]);
1216
1217 event_data = FC_REG_DUMP_EVENT;
1218 shost = lpfc_shost_from_vport(vport);
1219 fc_host_post_vendor_event(shost, fc_get_event_number(),
1220 sizeof(event_data), (char *) &event_data,
1221 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1222
1223 lpfc_sli4_offline_eratt(phba);
1224}
1225
1226/**
1227 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1228 * @phba: pointer to lpfc HBA data structure.
1229 *
1230 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1231 * routine from the API jump table function pointer from the lpfc_hba struct.
1232 *
1233 * Return codes
1234 * 0 - sucess.
1235 * Any other value - error.
1236 **/
1237void
1238lpfc_handle_eratt(struct lpfc_hba *phba)
1239{
1240 (*phba->lpfc_handle_eratt)(phba);
1241}
1242
e59058c4 1243/**
3621a710 1244 * lpfc_handle_latt - The HBA link event handler
e59058c4
JS
1245 * @phba: pointer to lpfc hba data structure.
1246 *
1247 * This routine is invoked from the worker thread to handle a HBA host
1248 * attention link event.
1249 **/
dea3101e 1250void
2e0fef85 1251lpfc_handle_latt(struct lpfc_hba *phba)
dea3101e 1252{
2e0fef85
JS
1253 struct lpfc_vport *vport = phba->pport;
1254 struct lpfc_sli *psli = &phba->sli;
dea3101e 1255 LPFC_MBOXQ_t *pmb;
1256 volatile uint32_t control;
1257 struct lpfc_dmabuf *mp;
09372820 1258 int rc = 0;
dea3101e 1259
1260 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
09372820
JS
1261 if (!pmb) {
1262 rc = 1;
dea3101e 1263 goto lpfc_handle_latt_err_exit;
09372820 1264 }
dea3101e 1265
1266 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
09372820
JS
1267 if (!mp) {
1268 rc = 2;
dea3101e 1269 goto lpfc_handle_latt_free_pmb;
09372820 1270 }
dea3101e 1271
1272 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
09372820
JS
1273 if (!mp->virt) {
1274 rc = 3;
dea3101e 1275 goto lpfc_handle_latt_free_mp;
09372820 1276 }
dea3101e 1277
6281bfe0 1278 /* Cleanup any outstanding ELS commands */
549e55cd 1279 lpfc_els_flush_all_cmd(phba);
dea3101e 1280
1281 psli->slistat.link_event++;
1282 lpfc_read_la(phba, pmb, mp);
1283 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
2e0fef85 1284 pmb->vport = vport;
0d2b6b83
JS
1285 /* Block ELS IOCBs until we have processed this mbox command */
1286 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
0b727fea 1287 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
09372820
JS
1288 if (rc == MBX_NOT_FINISHED) {
1289 rc = 4;
14691150 1290 goto lpfc_handle_latt_free_mbuf;
09372820 1291 }
dea3101e 1292
1293 /* Clear Link Attention in HA REG */
2e0fef85 1294 spin_lock_irq(&phba->hbalock);
dea3101e 1295 writel(HA_LATT, phba->HAregaddr);
1296 readl(phba->HAregaddr); /* flush */
2e0fef85 1297 spin_unlock_irq(&phba->hbalock);
dea3101e 1298
1299 return;
1300
14691150 1301lpfc_handle_latt_free_mbuf:
0d2b6b83 1302 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
14691150 1303 lpfc_mbuf_free(phba, mp->virt, mp->phys);
dea3101e 1304lpfc_handle_latt_free_mp:
1305 kfree(mp);
1306lpfc_handle_latt_free_pmb:
1dcb58e5 1307 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 1308lpfc_handle_latt_err_exit:
1309 /* Enable Link attention interrupts */
2e0fef85 1310 spin_lock_irq(&phba->hbalock);
dea3101e 1311 psli->sli_flag |= LPFC_PROCESS_LA;
1312 control = readl(phba->HCregaddr);
1313 control |= HC_LAINT_ENA;
1314 writel(control, phba->HCregaddr);
1315 readl(phba->HCregaddr); /* flush */
1316
1317 /* Clear Link Attention in HA REG */
1318 writel(HA_LATT, phba->HAregaddr);
1319 readl(phba->HAregaddr); /* flush */
2e0fef85 1320 spin_unlock_irq(&phba->hbalock);
dea3101e 1321 lpfc_linkdown(phba);
2e0fef85 1322 phba->link_state = LPFC_HBA_ERROR;
dea3101e 1323
09372820
JS
1324 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1325 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
dea3101e 1326
1327 return;
1328}
1329
e59058c4 1330/**
3621a710 1331 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
e59058c4
JS
1332 * @phba: pointer to lpfc hba data structure.
1333 * @vpd: pointer to the vital product data.
1334 * @len: length of the vital product data in bytes.
1335 *
1336 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1337 * an array of characters. In this routine, the ModelName, ProgramType, and
1338 * ModelDesc, etc. fields of the phba data structure will be populated.
1339 *
1340 * Return codes
1341 * 0 - pointer to the VPD passed in is NULL
1342 * 1 - success
1343 **/
3772a991 1344int
2e0fef85 1345lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
dea3101e 1346{
1347 uint8_t lenlo, lenhi;
07da60c1 1348 int Length;
dea3101e 1349 int i, j;
1350 int finished = 0;
1351 int index = 0;
1352
1353 if (!vpd)
1354 return 0;
1355
1356 /* Vital Product */
ed957684 1357 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011 1358 "0455 Vital Product Data: x%x x%x x%x x%x\n",
dea3101e 1359 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1360 (uint32_t) vpd[3]);
74b72a59 1361 while (!finished && (index < (len - 4))) {
dea3101e 1362 switch (vpd[index]) {
1363 case 0x82:
74b72a59 1364 case 0x91:
dea3101e 1365 index += 1;
1366 lenlo = vpd[index];
1367 index += 1;
1368 lenhi = vpd[index];
1369 index += 1;
1370 i = ((((unsigned short)lenhi) << 8) + lenlo);
1371 index += i;
1372 break;
1373 case 0x90:
1374 index += 1;
1375 lenlo = vpd[index];
1376 index += 1;
1377 lenhi = vpd[index];
1378 index += 1;
1379 Length = ((((unsigned short)lenhi) << 8) + lenlo);
74b72a59
JW
1380 if (Length > len - index)
1381 Length = len - index;
dea3101e 1382 while (Length > 0) {
1383 /* Look for Serial Number */
1384 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1385 index += 2;
1386 i = vpd[index];
1387 index += 1;
1388 j = 0;
1389 Length -= (3+i);
1390 while(i--) {
1391 phba->SerialNumber[j++] = vpd[index++];
1392 if (j == 31)
1393 break;
1394 }
1395 phba->SerialNumber[j] = 0;
1396 continue;
1397 }
1398 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1399 phba->vpd_flag |= VPD_MODEL_DESC;
1400 index += 2;
1401 i = vpd[index];
1402 index += 1;
1403 j = 0;
1404 Length -= (3+i);
1405 while(i--) {
1406 phba->ModelDesc[j++] = vpd[index++];
1407 if (j == 255)
1408 break;
1409 }
1410 phba->ModelDesc[j] = 0;
1411 continue;
1412 }
1413 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1414 phba->vpd_flag |= VPD_MODEL_NAME;
1415 index += 2;
1416 i = vpd[index];
1417 index += 1;
1418 j = 0;
1419 Length -= (3+i);
1420 while(i--) {
1421 phba->ModelName[j++] = vpd[index++];
1422 if (j == 79)
1423 break;
1424 }
1425 phba->ModelName[j] = 0;
1426 continue;
1427 }
1428 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1429 phba->vpd_flag |= VPD_PROGRAM_TYPE;
1430 index += 2;
1431 i = vpd[index];
1432 index += 1;
1433 j = 0;
1434 Length -= (3+i);
1435 while(i--) {
1436 phba->ProgramType[j++] = vpd[index++];
1437 if (j == 255)
1438 break;
1439 }
1440 phba->ProgramType[j] = 0;
1441 continue;
1442 }
1443 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1444 phba->vpd_flag |= VPD_PORT;
1445 index += 2;
1446 i = vpd[index];
1447 index += 1;
1448 j = 0;
1449 Length -= (3+i);
1450 while(i--) {
1451 phba->Port[j++] = vpd[index++];
1452 if (j == 19)
1453 break;
1454 }
1455 phba->Port[j] = 0;
1456 continue;
1457 }
1458 else {
1459 index += 2;
1460 i = vpd[index];
1461 index += 1;
1462 index += i;
1463 Length -= (3 + i);
1464 }
1465 }
1466 finished = 0;
1467 break;
1468 case 0x78:
1469 finished = 1;
1470 break;
1471 default:
1472 index ++;
1473 break;
1474 }
74b72a59 1475 }
dea3101e 1476
1477 return(1);
1478}
1479
e59058c4 1480/**
3621a710 1481 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
e59058c4
JS
1482 * @phba: pointer to lpfc hba data structure.
1483 * @mdp: pointer to the data structure to hold the derived model name.
1484 * @descp: pointer to the data structure to hold the derived description.
1485 *
1486 * This routine retrieves HBA's description based on its registered PCI device
1487 * ID. The @descp passed into this function points to an array of 256 chars. It
1488 * shall be returned with the model name, maximum speed, and the host bus type.
1489 * The @mdp passed into this function points to an array of 80 chars. When the
1490 * function returns, the @mdp will be filled with the model name.
1491 **/
dea3101e 1492static void
2e0fef85 1493lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
dea3101e 1494{
1495 lpfc_vpd_t *vp;
fefcb2b6 1496 uint16_t dev_id = phba->pcidev->device;
74b72a59 1497 int max_speed;
84774a4d 1498 int GE = 0;
da0436e9 1499 int oneConnect = 0; /* default is not a oneConnect */
74b72a59
JW
1500 struct {
1501 char * name;
1502 int max_speed;
74b72a59 1503 char * bus;
18a3b596 1504 } m = {"<Unknown>", 0, ""};
74b72a59
JW
1505
1506 if (mdp && mdp[0] != '\0'
1507 && descp && descp[0] != '\0')
1508 return;
1509
1510 if (phba->lmt & LMT_10Gb)
1511 max_speed = 10;
1512 else if (phba->lmt & LMT_8Gb)
1513 max_speed = 8;
1514 else if (phba->lmt & LMT_4Gb)
1515 max_speed = 4;
1516 else if (phba->lmt & LMT_2Gb)
1517 max_speed = 2;
1518 else
1519 max_speed = 1;
dea3101e 1520
1521 vp = &phba->vpd;
dea3101e 1522
e4adb204 1523 switch (dev_id) {
06325e74 1524 case PCI_DEVICE_ID_FIREFLY:
18a3b596 1525 m = (typeof(m)){"LP6000", max_speed, "PCI"};
06325e74 1526 break;
dea3101e 1527 case PCI_DEVICE_ID_SUPERFLY:
1528 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
18a3b596 1529 m = (typeof(m)){"LP7000", max_speed, "PCI"};
dea3101e 1530 else
18a3b596 1531 m = (typeof(m)){"LP7000E", max_speed, "PCI"};
dea3101e 1532 break;
1533 case PCI_DEVICE_ID_DRAGONFLY:
18a3b596 1534 m = (typeof(m)){"LP8000", max_speed, "PCI"};
dea3101e 1535 break;
1536 case PCI_DEVICE_ID_CENTAUR:
1537 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
18a3b596 1538 m = (typeof(m)){"LP9002", max_speed, "PCI"};
dea3101e 1539 else
18a3b596 1540 m = (typeof(m)){"LP9000", max_speed, "PCI"};
dea3101e 1541 break;
1542 case PCI_DEVICE_ID_RFLY:
18a3b596 1543 m = (typeof(m)){"LP952", max_speed, "PCI"};
dea3101e 1544 break;
1545 case PCI_DEVICE_ID_PEGASUS:
18a3b596 1546 m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
dea3101e 1547 break;
1548 case PCI_DEVICE_ID_THOR:
18a3b596 1549 m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
dea3101e 1550 break;
1551 case PCI_DEVICE_ID_VIPER:
18a3b596 1552 m = (typeof(m)){"LPX1000", max_speed, "PCI-X"};
dea3101e 1553 break;
1554 case PCI_DEVICE_ID_PFLY:
18a3b596 1555 m = (typeof(m)){"LP982", max_speed, "PCI-X"};
dea3101e 1556 break;
1557 case PCI_DEVICE_ID_TFLY:
18a3b596 1558 m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
dea3101e 1559 break;
1560 case PCI_DEVICE_ID_HELIOS:
18a3b596 1561 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
dea3101e 1562 break;
e4adb204 1563 case PCI_DEVICE_ID_HELIOS_SCSP:
18a3b596 1564 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
e4adb204
JSEC
1565 break;
1566 case PCI_DEVICE_ID_HELIOS_DCSP:
18a3b596 1567 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
e4adb204
JSEC
1568 break;
1569 case PCI_DEVICE_ID_NEPTUNE:
18a3b596 1570 m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
e4adb204
JSEC
1571 break;
1572 case PCI_DEVICE_ID_NEPTUNE_SCSP:
18a3b596 1573 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
e4adb204
JSEC
1574 break;
1575 case PCI_DEVICE_ID_NEPTUNE_DCSP:
18a3b596 1576 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
e4adb204 1577 break;
dea3101e 1578 case PCI_DEVICE_ID_BMID:
18a3b596 1579 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
dea3101e 1580 break;
1581 case PCI_DEVICE_ID_BSMB:
18a3b596 1582 m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
dea3101e 1583 break;
1584 case PCI_DEVICE_ID_ZEPHYR:
18a3b596 1585 m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
dea3101e 1586 break;
e4adb204 1587 case PCI_DEVICE_ID_ZEPHYR_SCSP:
18a3b596 1588 m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
e4adb204
JSEC
1589 break;
1590 case PCI_DEVICE_ID_ZEPHYR_DCSP:
a257bf90
JS
1591 m = (typeof(m)){"LP2105", max_speed, "PCIe"};
1592 GE = 1;
e4adb204 1593 break;
dea3101e 1594 case PCI_DEVICE_ID_ZMID:
18a3b596 1595 m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
dea3101e 1596 break;
1597 case PCI_DEVICE_ID_ZSMB:
18a3b596 1598 m = (typeof(m)){"LPe111", max_speed, "PCIe"};
dea3101e 1599 break;
1600 case PCI_DEVICE_ID_LP101:
18a3b596 1601 m = (typeof(m)){"LP101", max_speed, "PCI-X"};
dea3101e 1602 break;
1603 case PCI_DEVICE_ID_LP10000S:
18a3b596 1604 m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
06325e74 1605 break;
e4adb204 1606 case PCI_DEVICE_ID_LP11000S:
18a3b596
JS
1607 m = (typeof(m)){"LP11000-S", max_speed,
1608 "PCI-X2"};
1609 break;
e4adb204 1610 case PCI_DEVICE_ID_LPE11000S:
18a3b596
JS
1611 m = (typeof(m)){"LPe11000-S", max_speed,
1612 "PCIe"};
5cc36b3c 1613 break;
b87eab38
JS
1614 case PCI_DEVICE_ID_SAT:
1615 m = (typeof(m)){"LPe12000", max_speed, "PCIe"};
1616 break;
1617 case PCI_DEVICE_ID_SAT_MID:
1618 m = (typeof(m)){"LPe1250", max_speed, "PCIe"};
1619 break;
1620 case PCI_DEVICE_ID_SAT_SMB:
1621 m = (typeof(m)){"LPe121", max_speed, "PCIe"};
1622 break;
1623 case PCI_DEVICE_ID_SAT_DCSP:
1624 m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"};
1625 break;
1626 case PCI_DEVICE_ID_SAT_SCSP:
1627 m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"};
1628 break;
1629 case PCI_DEVICE_ID_SAT_S:
1630 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
1631 break;
84774a4d
JS
1632 case PCI_DEVICE_ID_HORNET:
1633 m = (typeof(m)){"LP21000", max_speed, "PCIe"};
1634 GE = 1;
1635 break;
1636 case PCI_DEVICE_ID_PROTEUS_VF:
1637 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1638 break;
1639 case PCI_DEVICE_ID_PROTEUS_PF:
1640 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1641 break;
1642 case PCI_DEVICE_ID_PROTEUS_S:
1643 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
1644 break;
da0436e9
JS
1645 case PCI_DEVICE_ID_TIGERSHARK:
1646 oneConnect = 1;
1647 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
1648 break;
1649 case PCI_DEVICE_ID_TIGERSHARK_S:
1650 oneConnect = 1;
1651 m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"};
1652 break;
5cc36b3c 1653 default:
041976fb 1654 m = (typeof(m)){ NULL };
e4adb204 1655 break;
dea3101e 1656 }
74b72a59
JW
1657
1658 if (mdp && mdp[0] == '\0')
1659 snprintf(mdp, 79,"%s", m.name);
da0436e9
JS
1660 /* oneConnect hba requires special processing, they are all initiators
1661 * and we put the port number on the end
1662 */
1663 if (descp && descp[0] == '\0') {
1664 if (oneConnect)
1665 snprintf(descp, 255,
1666 "Emulex OneConnect %s, FCoE Initiator, Port %s",
1667 m.name,
1668 phba->Port);
1669 else
1670 snprintf(descp, 255,
1671 "Emulex %s %d%s %s %s",
1672 m.name, m.max_speed,
1673 (GE) ? "GE" : "Gb",
1674 m.bus,
1675 (GE) ? "FCoE Adapter" :
1676 "Fibre Channel Adapter");
1677 }
dea3101e 1678}
1679
e59058c4 1680/**
3621a710 1681 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
e59058c4
JS
1682 * @phba: pointer to lpfc hba data structure.
1683 * @pring: pointer to a IOCB ring.
1684 * @cnt: the number of IOCBs to be posted to the IOCB ring.
1685 *
1686 * This routine posts a given number of IOCBs with the associated DMA buffer
1687 * descriptors specified by the cnt argument to the given IOCB ring.
1688 *
1689 * Return codes
1690 * The number of IOCBs NOT able to be posted to the IOCB ring.
1691 **/
dea3101e 1692int
495a714c 1693lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
dea3101e 1694{
1695 IOCB_t *icmd;
0bd4ca25 1696 struct lpfc_iocbq *iocb;
dea3101e 1697 struct lpfc_dmabuf *mp1, *mp2;
1698
1699 cnt += pring->missbufcnt;
1700
1701 /* While there are buffers to post */
1702 while (cnt > 0) {
1703 /* Allocate buffer for command iocb */
0bd4ca25 1704 iocb = lpfc_sli_get_iocbq(phba);
dea3101e 1705 if (iocb == NULL) {
1706 pring->missbufcnt = cnt;
1707 return cnt;
1708 }
dea3101e 1709 icmd = &iocb->iocb;
1710
1711 /* 2 buffers can be posted per command */
1712 /* Allocate buffer to post */
1713 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1714 if (mp1)
98c9ea5c
JS
1715 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1716 if (!mp1 || !mp1->virt) {
c9475cb0 1717 kfree(mp1);
604a3e30 1718 lpfc_sli_release_iocbq(phba, iocb);
dea3101e 1719 pring->missbufcnt = cnt;
1720 return cnt;
1721 }
1722
1723 INIT_LIST_HEAD(&mp1->list);
1724 /* Allocate buffer to post */
1725 if (cnt > 1) {
1726 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1727 if (mp2)
1728 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1729 &mp2->phys);
98c9ea5c 1730 if (!mp2 || !mp2->virt) {
c9475cb0 1731 kfree(mp2);
dea3101e 1732 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1733 kfree(mp1);
604a3e30 1734 lpfc_sli_release_iocbq(phba, iocb);
dea3101e 1735 pring->missbufcnt = cnt;
1736 return cnt;
1737 }
1738
1739 INIT_LIST_HEAD(&mp2->list);
1740 } else {
1741 mp2 = NULL;
1742 }
1743
1744 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1745 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1746 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1747 icmd->ulpBdeCount = 1;
1748 cnt--;
1749 if (mp2) {
1750 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1751 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1752 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1753 cnt--;
1754 icmd->ulpBdeCount = 2;
1755 }
1756
1757 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1758 icmd->ulpLe = 1;
1759
3772a991
JS
1760 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1761 IOCB_ERROR) {
dea3101e 1762 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1763 kfree(mp1);
1764 cnt++;
1765 if (mp2) {
1766 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
1767 kfree(mp2);
1768 cnt++;
1769 }
604a3e30 1770 lpfc_sli_release_iocbq(phba, iocb);
dea3101e 1771 pring->missbufcnt = cnt;
dea3101e 1772 return cnt;
1773 }
dea3101e 1774 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
92d7f7b0 1775 if (mp2)
dea3101e 1776 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
dea3101e 1777 }
1778 pring->missbufcnt = 0;
1779 return 0;
1780}
1781
e59058c4 1782/**
3621a710 1783 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
e59058c4
JS
1784 * @phba: pointer to lpfc hba data structure.
1785 *
1786 * This routine posts initial receive IOCB buffers to the ELS ring. The
1787 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
1788 * set to 64 IOCBs.
1789 *
1790 * Return codes
1791 * 0 - success (currently always success)
1792 **/
dea3101e 1793static int
2e0fef85 1794lpfc_post_rcv_buf(struct lpfc_hba *phba)
dea3101e 1795{
1796 struct lpfc_sli *psli = &phba->sli;
1797
1798 /* Ring 0, ELS / CT buffers */
495a714c 1799 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
dea3101e 1800 /* Ring 2 - FCP no buffers needed */
1801
1802 return 0;
1803}
1804
1805#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1806
e59058c4 1807/**
3621a710 1808 * lpfc_sha_init - Set up initial array of hash table entries
e59058c4
JS
1809 * @HashResultPointer: pointer to an array as hash table.
1810 *
1811 * This routine sets up the initial values to the array of hash table entries
1812 * for the LC HBAs.
1813 **/
dea3101e 1814static void
1815lpfc_sha_init(uint32_t * HashResultPointer)
1816{
1817 HashResultPointer[0] = 0x67452301;
1818 HashResultPointer[1] = 0xEFCDAB89;
1819 HashResultPointer[2] = 0x98BADCFE;
1820 HashResultPointer[3] = 0x10325476;
1821 HashResultPointer[4] = 0xC3D2E1F0;
1822}
1823
e59058c4 1824/**
3621a710 1825 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
e59058c4
JS
1826 * @HashResultPointer: pointer to an initial/result hash table.
1827 * @HashWorkingPointer: pointer to an working hash table.
1828 *
1829 * This routine iterates an initial hash table pointed by @HashResultPointer
1830 * with the values from the working hash table pointeed by @HashWorkingPointer.
1831 * The results are putting back to the initial hash table, returned through
1832 * the @HashResultPointer as the result hash table.
1833 **/
dea3101e 1834static void
1835lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
1836{
1837 int t;
1838 uint32_t TEMP;
1839 uint32_t A, B, C, D, E;
1840 t = 16;
1841 do {
1842 HashWorkingPointer[t] =
1843 S(1,
1844 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
1845 8] ^
1846 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
1847 } while (++t <= 79);
1848 t = 0;
1849 A = HashResultPointer[0];
1850 B = HashResultPointer[1];
1851 C = HashResultPointer[2];
1852 D = HashResultPointer[3];
1853 E = HashResultPointer[4];
1854
1855 do {
1856 if (t < 20) {
1857 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
1858 } else if (t < 40) {
1859 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
1860 } else if (t < 60) {
1861 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
1862 } else {
1863 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
1864 }
1865 TEMP += S(5, A) + E + HashWorkingPointer[t];
1866 E = D;
1867 D = C;
1868 C = S(30, B);
1869 B = A;
1870 A = TEMP;
1871 } while (++t <= 79);
1872
1873 HashResultPointer[0] += A;
1874 HashResultPointer[1] += B;
1875 HashResultPointer[2] += C;
1876 HashResultPointer[3] += D;
1877 HashResultPointer[4] += E;
1878
1879}
1880
e59058c4 1881/**
3621a710 1882 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
e59058c4
JS
1883 * @RandomChallenge: pointer to the entry of host challenge random number array.
1884 * @HashWorking: pointer to the entry of the working hash array.
1885 *
1886 * This routine calculates the working hash array referred by @HashWorking
1887 * from the challenge random numbers associated with the host, referred by
1888 * @RandomChallenge. The result is put into the entry of the working hash
1889 * array and returned by reference through @HashWorking.
1890 **/
dea3101e 1891static void
1892lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
1893{
1894 *HashWorking = (*RandomChallenge ^ *HashWorking);
1895}
1896
e59058c4 1897/**
3621a710 1898 * lpfc_hba_init - Perform special handling for LC HBA initialization
e59058c4
JS
1899 * @phba: pointer to lpfc hba data structure.
1900 * @hbainit: pointer to an array of unsigned 32-bit integers.
1901 *
1902 * This routine performs the special handling for LC HBA initialization.
1903 **/
dea3101e 1904void
1905lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1906{
1907 int t;
1908 uint32_t *HashWorking;
2e0fef85 1909 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
dea3101e 1910
bbfbbbc1 1911 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
dea3101e 1912 if (!HashWorking)
1913 return;
1914
dea3101e 1915 HashWorking[0] = HashWorking[78] = *pwwnn++;
1916 HashWorking[1] = HashWorking[79] = *pwwnn;
1917
1918 for (t = 0; t < 7; t++)
1919 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
1920
1921 lpfc_sha_init(hbainit);
1922 lpfc_sha_iterate(hbainit, HashWorking);
1923 kfree(HashWorking);
1924}
1925
e59058c4 1926/**
3621a710 1927 * lpfc_cleanup - Performs vport cleanups before deleting a vport
e59058c4
JS
1928 * @vport: pointer to a virtual N_Port data structure.
1929 *
1930 * This routine performs the necessary cleanups before deleting the @vport.
1931 * It invokes the discovery state machine to perform necessary state
1932 * transitions and to release the ndlps associated with the @vport. Note,
1933 * the physical port is treated as @vport 0.
1934 **/
87af33fe 1935void
2e0fef85 1936lpfc_cleanup(struct lpfc_vport *vport)
dea3101e 1937{
87af33fe 1938 struct lpfc_hba *phba = vport->phba;
dea3101e 1939 struct lpfc_nodelist *ndlp, *next_ndlp;
a8adb832 1940 int i = 0;
dea3101e 1941
87af33fe
JS
1942 if (phba->link_state > LPFC_LINK_DOWN)
1943 lpfc_port_link_failure(vport);
1944
1945 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
1946 if (!NLP_CHK_NODE_ACT(ndlp)) {
1947 ndlp = lpfc_enable_node(vport, ndlp,
1948 NLP_STE_UNUSED_NODE);
1949 if (!ndlp)
1950 continue;
1951 spin_lock_irq(&phba->ndlp_lock);
1952 NLP_SET_FREE_REQ(ndlp);
1953 spin_unlock_irq(&phba->ndlp_lock);
1954 /* Trigger the release of the ndlp memory */
1955 lpfc_nlp_put(ndlp);
1956 continue;
1957 }
1958 spin_lock_irq(&phba->ndlp_lock);
1959 if (NLP_CHK_FREE_REQ(ndlp)) {
1960 /* The ndlp should not be in memory free mode already */
1961 spin_unlock_irq(&phba->ndlp_lock);
1962 continue;
1963 } else
1964 /* Indicate request for freeing ndlp memory */
1965 NLP_SET_FREE_REQ(ndlp);
1966 spin_unlock_irq(&phba->ndlp_lock);
1967
58da1ffb
JS
1968 if (vport->port_type != LPFC_PHYSICAL_PORT &&
1969 ndlp->nlp_DID == Fabric_DID) {
1970 /* Just free up ndlp with Fabric_DID for vports */
1971 lpfc_nlp_put(ndlp);
1972 continue;
1973 }
1974
87af33fe
JS
1975 if (ndlp->nlp_type & NLP_FABRIC)
1976 lpfc_disc_state_machine(vport, ndlp, NULL,
1977 NLP_EVT_DEVICE_RECOVERY);
e47c9093 1978
87af33fe
JS
1979 lpfc_disc_state_machine(vport, ndlp, NULL,
1980 NLP_EVT_DEVICE_RM);
495a714c 1981
87af33fe
JS
1982 }
1983
a8adb832
JS
1984 /* At this point, ALL ndlp's should be gone
1985 * because of the previous NLP_EVT_DEVICE_RM.
1986 * Lets wait for this to happen, if needed.
1987 */
87af33fe 1988 while (!list_empty(&vport->fc_nodes)) {
a8adb832 1989 if (i++ > 3000) {
87af33fe 1990 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
a8adb832 1991 "0233 Nodelist not empty\n");
e47c9093
JS
1992 list_for_each_entry_safe(ndlp, next_ndlp,
1993 &vport->fc_nodes, nlp_listp) {
1994 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
1995 LOG_NODE,
d7c255b2 1996 "0282 did:x%x ndlp:x%p "
e47c9093
JS
1997 "usgmap:x%x refcnt:%d\n",
1998 ndlp->nlp_DID, (void *)ndlp,
1999 ndlp->nlp_usg_map,
2000 atomic_read(
2001 &ndlp->kref.refcount));
2002 }
a8adb832 2003 break;
87af33fe 2004 }
a8adb832
JS
2005
2006 /* Wait for any activity on ndlps to settle */
2007 msleep(10);
87af33fe 2008 }
dea3101e 2009}
2010
e59058c4 2011/**
3621a710 2012 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
e59058c4
JS
2013 * @vport: pointer to a virtual N_Port data structure.
2014 *
2015 * This routine stops all the timers associated with a @vport. This function
2016 * is invoked before disabling or deleting a @vport. Note that the physical
2017 * port is treated as @vport 0.
2018 **/
92d7f7b0
JS
2019void
2020lpfc_stop_vport_timers(struct lpfc_vport *vport)
dea3101e 2021{
92d7f7b0
JS
2022 del_timer_sync(&vport->els_tmofunc);
2023 del_timer_sync(&vport->fc_fdmitmo);
2024 lpfc_can_disctmo(vport);
2025 return;
dea3101e 2026}
2027
e59058c4 2028/**
3772a991 2029 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
e59058c4
JS
2030 * @phba: pointer to lpfc hba data structure.
2031 *
2032 * This routine stops all the timers associated with a HBA. This function is
2033 * invoked before either putting a HBA offline or unloading the driver.
2034 **/
3772a991
JS
2035void
2036lpfc_stop_hba_timers(struct lpfc_hba *phba)
dea3101e 2037{
51ef4c26 2038 lpfc_stop_vport_timers(phba->pport);
2e0fef85 2039 del_timer_sync(&phba->sli.mbox_tmo);
92d7f7b0 2040 del_timer_sync(&phba->fabric_block_timer);
9399627f 2041 del_timer_sync(&phba->eratt_poll);
3772a991
JS
2042 del_timer_sync(&phba->hb_tmofunc);
2043 phba->hb_outstanding = 0;
2044
2045 switch (phba->pci_dev_grp) {
2046 case LPFC_PCI_DEV_LP:
2047 /* Stop any LightPulse device specific driver timers */
2048 del_timer_sync(&phba->fcp_poll_timer);
2049 break;
2050 case LPFC_PCI_DEV_OC:
2051 /* Stop any OneConnect device sepcific driver timers */
2052 break;
2053 default:
2054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2055 "0297 Invalid device group (x%x)\n",
2056 phba->pci_dev_grp);
2057 break;
2058 }
2e0fef85 2059 return;
dea3101e 2060}
2061
e59058c4 2062/**
3621a710 2063 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
e59058c4
JS
2064 * @phba: pointer to lpfc hba data structure.
2065 *
2066 * This routine marks a HBA's management interface as blocked. Once the HBA's
2067 * management interface is marked as blocked, all the user space access to
2068 * the HBA, whether they are from sysfs interface or libdfc interface will
2069 * all be blocked. The HBA is set to block the management interface when the
2070 * driver prepares the HBA interface for online or offline.
2071 **/
a6ababd2
AB
2072static void
2073lpfc_block_mgmt_io(struct lpfc_hba * phba)
2074{
2075 unsigned long iflag;
2076
2077 spin_lock_irqsave(&phba->hbalock, iflag);
2078 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2079 spin_unlock_irqrestore(&phba->hbalock, iflag);
2080}
2081
e59058c4 2082/**
3621a710 2083 * lpfc_online - Initialize and bring a HBA online
e59058c4
JS
2084 * @phba: pointer to lpfc hba data structure.
2085 *
2086 * This routine initializes the HBA and brings a HBA online. During this
2087 * process, the management interface is blocked to prevent user space access
2088 * to the HBA interfering with the driver initialization.
2089 *
2090 * Return codes
2091 * 0 - successful
2092 * 1 - failed
2093 **/
dea3101e 2094int
2e0fef85 2095lpfc_online(struct lpfc_hba *phba)
dea3101e 2096{
372bd282 2097 struct lpfc_vport *vport;
549e55cd
JS
2098 struct lpfc_vport **vports;
2099 int i;
2e0fef85 2100
dea3101e 2101 if (!phba)
2102 return 0;
372bd282 2103 vport = phba->pport;
dea3101e 2104
2e0fef85 2105 if (!(vport->fc_flag & FC_OFFLINE_MODE))
dea3101e 2106 return 0;
2107
ed957684 2108 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
e8b62011 2109 "0458 Bring Adapter online\n");
dea3101e 2110
46fa311e
JS
2111 lpfc_block_mgmt_io(phba);
2112
2113 if (!lpfc_sli_queue_setup(phba)) {
2114 lpfc_unblock_mgmt_io(phba);
dea3101e 2115 return 1;
46fa311e 2116 }
dea3101e 2117
da0436e9
JS
2118 if (phba->sli_rev == LPFC_SLI_REV4) {
2119 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2120 lpfc_unblock_mgmt_io(phba);
2121 return 1;
2122 }
2123 } else {
2124 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2125 lpfc_unblock_mgmt_io(phba);
2126 return 1;
2127 }
46fa311e 2128 }
dea3101e 2129
549e55cd
JS
2130 vports = lpfc_create_vport_work_array(phba);
2131 if (vports != NULL)
da0436e9 2132 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd
JS
2133 struct Scsi_Host *shost;
2134 shost = lpfc_shost_from_vport(vports[i]);
2135 spin_lock_irq(shost->host_lock);
2136 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2137 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2138 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2139 spin_unlock_irq(shost->host_lock);
2140 }
09372820 2141 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 2142
46fa311e 2143 lpfc_unblock_mgmt_io(phba);
dea3101e 2144 return 0;
2145}
2146
e59058c4 2147/**
3621a710 2148 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
e59058c4
JS
2149 * @phba: pointer to lpfc hba data structure.
2150 *
2151 * This routine marks a HBA's management interface as not blocked. Once the
2152 * HBA's management interface is marked as not blocked, all the user space
2153 * access to the HBA, whether they are from sysfs interface or libdfc
2154 * interface will be allowed. The HBA is set to block the management interface
2155 * when the driver prepares the HBA interface for online or offline and then
2156 * set to unblock the management interface afterwards.
2157 **/
46fa311e
JS
2158void
2159lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2160{
2161 unsigned long iflag;
2162
2e0fef85
JS
2163 spin_lock_irqsave(&phba->hbalock, iflag);
2164 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2165 spin_unlock_irqrestore(&phba->hbalock, iflag);
46fa311e
JS
2166}
2167
e59058c4 2168/**
3621a710 2169 * lpfc_offline_prep - Prepare a HBA to be brought offline
e59058c4
JS
2170 * @phba: pointer to lpfc hba data structure.
2171 *
2172 * This routine is invoked to prepare a HBA to be brought offline. It performs
2173 * unregistration login to all the nodes on all vports and flushes the mailbox
2174 * queue to make it ready to be brought offline.
2175 **/
46fa311e
JS
2176void
2177lpfc_offline_prep(struct lpfc_hba * phba)
2178{
2e0fef85 2179 struct lpfc_vport *vport = phba->pport;
46fa311e 2180 struct lpfc_nodelist *ndlp, *next_ndlp;
87af33fe
JS
2181 struct lpfc_vport **vports;
2182 int i;
dea3101e 2183
2e0fef85 2184 if (vport->fc_flag & FC_OFFLINE_MODE)
46fa311e 2185 return;
dea3101e 2186
46fa311e 2187 lpfc_block_mgmt_io(phba);
dea3101e 2188
2189 lpfc_linkdown(phba);
2190
87af33fe
JS
2191 /* Issue an unreg_login to all nodes on all vports */
2192 vports = lpfc_create_vport_work_array(phba);
2193 if (vports != NULL) {
da0436e9 2194 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
87af33fe
JS
2195 struct Scsi_Host *shost;
2196
a8adb832
JS
2197 if (vports[i]->load_flag & FC_UNLOADING)
2198 continue;
da0436e9 2199 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
87af33fe
JS
2200 shost = lpfc_shost_from_vport(vports[i]);
2201 list_for_each_entry_safe(ndlp, next_ndlp,
2202 &vports[i]->fc_nodes,
2203 nlp_listp) {
e47c9093
JS
2204 if (!NLP_CHK_NODE_ACT(ndlp))
2205 continue;
87af33fe
JS
2206 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2207 continue;
2208 if (ndlp->nlp_type & NLP_FABRIC) {
2209 lpfc_disc_state_machine(vports[i], ndlp,
2210 NULL, NLP_EVT_DEVICE_RECOVERY);
2211 lpfc_disc_state_machine(vports[i], ndlp,
2212 NULL, NLP_EVT_DEVICE_RM);
2213 }
2214 spin_lock_irq(shost->host_lock);
2215 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2216 spin_unlock_irq(shost->host_lock);
2217 lpfc_unreg_rpi(vports[i], ndlp);
2218 }
2219 }
2220 }
09372820 2221 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 2222
04c68496 2223 lpfc_sli_mbox_sys_shutdown(phba);
46fa311e
JS
2224}
2225
e59058c4 2226/**
3621a710 2227 * lpfc_offline - Bring a HBA offline
e59058c4
JS
2228 * @phba: pointer to lpfc hba data structure.
2229 *
2230 * This routine actually brings a HBA offline. It stops all the timers
2231 * associated with the HBA, brings down the SLI layer, and eventually
2232 * marks the HBA as in offline state for the upper layer protocol.
2233 **/
46fa311e 2234void
2e0fef85 2235lpfc_offline(struct lpfc_hba *phba)
46fa311e 2236{
549e55cd
JS
2237 struct Scsi_Host *shost;
2238 struct lpfc_vport **vports;
2239 int i;
46fa311e 2240
549e55cd 2241 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
46fa311e 2242 return;
688a8863 2243
da0436e9
JS
2244 /* stop port and all timers associated with this hba */
2245 lpfc_stop_port(phba);
51ef4c26
JS
2246 vports = lpfc_create_vport_work_array(phba);
2247 if (vports != NULL)
da0436e9 2248 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
51ef4c26 2249 lpfc_stop_vport_timers(vports[i]);
09372820 2250 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0 2251 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
e8b62011 2252 "0460 Bring Adapter offline\n");
dea3101e 2253 /* Bring down the SLI Layer and cleanup. The HBA is offline
2254 now. */
2255 lpfc_sli_hba_down(phba);
92d7f7b0 2256 spin_lock_irq(&phba->hbalock);
7054a606 2257 phba->work_ha = 0;
92d7f7b0 2258 spin_unlock_irq(&phba->hbalock);
549e55cd
JS
2259 vports = lpfc_create_vport_work_array(phba);
2260 if (vports != NULL)
da0436e9 2261 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd 2262 shost = lpfc_shost_from_vport(vports[i]);
549e55cd
JS
2263 spin_lock_irq(shost->host_lock);
2264 vports[i]->work_port_events = 0;
2265 vports[i]->fc_flag |= FC_OFFLINE_MODE;
2266 spin_unlock_irq(shost->host_lock);
2267 }
09372820 2268 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 2269}
2270
e59058c4 2271/**
3621a710 2272 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
e59058c4
JS
2273 * @phba: pointer to lpfc hba data structure.
2274 *
2275 * This routine is to free all the SCSI buffers and IOCBs from the driver
2276 * list back to kernel. It is called from lpfc_pci_remove_one to free
2277 * the internal resources before the device is removed from the system.
2278 *
2279 * Return codes
2280 * 0 - successful (for now, it always returns 0)
2281 **/
dea3101e 2282static int
2e0fef85 2283lpfc_scsi_free(struct lpfc_hba *phba)
dea3101e 2284{
2285 struct lpfc_scsi_buf *sb, *sb_next;
2286 struct lpfc_iocbq *io, *io_next;
2287
2e0fef85 2288 spin_lock_irq(&phba->hbalock);
dea3101e 2289 /* Release all the lpfc_scsi_bufs maintained by this host. */
2290 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2291 list_del(&sb->list);
2292 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
92d7f7b0 2293 sb->dma_handle);
dea3101e 2294 kfree(sb);
2295 phba->total_scsi_bufs--;
2296 }
2297
2298 /* Release all the lpfc_iocbq entries maintained by this host. */
2299 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2300 list_del(&io->list);
2301 kfree(io);
2302 phba->total_iocbq_bufs--;
2303 }
2304
2e0fef85 2305 spin_unlock_irq(&phba->hbalock);
dea3101e 2306
2307 return 0;
2308}
2309
e59058c4 2310/**
3621a710 2311 * lpfc_create_port - Create an FC port
e59058c4
JS
2312 * @phba: pointer to lpfc hba data structure.
2313 * @instance: a unique integer ID to this FC port.
2314 * @dev: pointer to the device data structure.
2315 *
2316 * This routine creates a FC port for the upper layer protocol. The FC port
2317 * can be created on top of either a physical port or a virtual port provided
2318 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2319 * and associates the FC port created before adding the shost into the SCSI
2320 * layer.
2321 *
2322 * Return codes
2323 * @vport - pointer to the virtual N_Port data structure.
2324 * NULL - port create failed.
2325 **/
2e0fef85 2326struct lpfc_vport *
3de2a653 2327lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
47a8617c 2328{
2e0fef85
JS
2329 struct lpfc_vport *vport;
2330 struct Scsi_Host *shost;
2331 int error = 0;
47a8617c 2332
3de2a653
JS
2333 if (dev != &phba->pcidev->dev)
2334 shost = scsi_host_alloc(&lpfc_vport_template,
2335 sizeof(struct lpfc_vport));
2336 else
2337 shost = scsi_host_alloc(&lpfc_template,
2338 sizeof(struct lpfc_vport));
2e0fef85
JS
2339 if (!shost)
2340 goto out;
47a8617c 2341
2e0fef85
JS
2342 vport = (struct lpfc_vport *) shost->hostdata;
2343 vport->phba = phba;
2e0fef85 2344 vport->load_flag |= FC_LOADING;
92d7f7b0 2345 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
7f5f3d0d 2346 vport->fc_rscn_flush = 0;
47a8617c 2347
3de2a653 2348 lpfc_get_vport_cfgparam(vport);
2e0fef85
JS
2349 shost->unique_id = instance;
2350 shost->max_id = LPFC_MAX_TARGET;
3de2a653 2351 shost->max_lun = vport->cfg_max_luns;
2e0fef85
JS
2352 shost->this_id = -1;
2353 shost->max_cmd_len = 16;
da0436e9
JS
2354 if (phba->sli_rev == LPFC_SLI_REV4) {
2355 shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
2356 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2357 }
81301a9b 2358
47a8617c 2359 /*
2e0fef85
JS
2360 * Set initial can_queue value since 0 is no longer supported and
2361 * scsi_add_host will fail. This will be adjusted later based on the
2362 * max xri value determined in hba setup.
47a8617c 2363 */
2e0fef85 2364 shost->can_queue = phba->cfg_hba_queue_depth - 10;
3de2a653 2365 if (dev != &phba->pcidev->dev) {
92d7f7b0
JS
2366 shost->transportt = lpfc_vport_transport_template;
2367 vport->port_type = LPFC_NPIV_PORT;
2368 } else {
2369 shost->transportt = lpfc_transport_template;
2370 vport->port_type = LPFC_PHYSICAL_PORT;
2371 }
47a8617c 2372
2e0fef85
JS
2373 /* Initialize all internally managed lists. */
2374 INIT_LIST_HEAD(&vport->fc_nodes);
da0436e9 2375 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2e0fef85 2376 spin_lock_init(&vport->work_port_lock);
47a8617c 2377
2e0fef85
JS
2378 init_timer(&vport->fc_disctmo);
2379 vport->fc_disctmo.function = lpfc_disc_timeout;
92d7f7b0 2380 vport->fc_disctmo.data = (unsigned long)vport;
47a8617c 2381
2e0fef85
JS
2382 init_timer(&vport->fc_fdmitmo);
2383 vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
92d7f7b0 2384 vport->fc_fdmitmo.data = (unsigned long)vport;
47a8617c 2385
2e0fef85
JS
2386 init_timer(&vport->els_tmofunc);
2387 vport->els_tmofunc.function = lpfc_els_timeout;
92d7f7b0 2388 vport->els_tmofunc.data = (unsigned long)vport;
47a8617c 2389
3de2a653 2390 error = scsi_add_host(shost, dev);
2e0fef85
JS
2391 if (error)
2392 goto out_put_shost;
47a8617c 2393
549e55cd 2394 spin_lock_irq(&phba->hbalock);
2e0fef85 2395 list_add_tail(&vport->listentry, &phba->port_list);
549e55cd 2396 spin_unlock_irq(&phba->hbalock);
2e0fef85 2397 return vport;
47a8617c 2398
2e0fef85
JS
2399out_put_shost:
2400 scsi_host_put(shost);
2401out:
2402 return NULL;
47a8617c
JS
2403}
2404
e59058c4 2405/**
3621a710 2406 * destroy_port - destroy an FC port
e59058c4
JS
2407 * @vport: pointer to an lpfc virtual N_Port data structure.
2408 *
2409 * This routine destroys a FC port from the upper layer protocol. All the
2410 * resources associated with the port are released.
2411 **/
2e0fef85
JS
2412void
2413destroy_port(struct lpfc_vport *vport)
47a8617c 2414{
92d7f7b0
JS
2415 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2416 struct lpfc_hba *phba = vport->phba;
47a8617c 2417
858c9f6c 2418 lpfc_debugfs_terminate(vport);
92d7f7b0
JS
2419 fc_remove_host(shost);
2420 scsi_remove_host(shost);
47a8617c 2421
92d7f7b0
JS
2422 spin_lock_irq(&phba->hbalock);
2423 list_del_init(&vport->listentry);
2424 spin_unlock_irq(&phba->hbalock);
47a8617c 2425
92d7f7b0 2426 lpfc_cleanup(vport);
47a8617c 2427 return;
47a8617c
JS
2428}
2429
e59058c4 2430/**
3621a710 2431 * lpfc_get_instance - Get a unique integer ID
e59058c4
JS
2432 *
2433 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2434 * uses the kernel idr facility to perform the task.
2435 *
2436 * Return codes:
2437 * instance - a unique integer ID allocated as the new instance.
2438 * -1 - lpfc get instance failed.
2439 **/
92d7f7b0
JS
2440int
2441lpfc_get_instance(void)
2442{
2443 int instance = 0;
47a8617c 2444
92d7f7b0
JS
2445 /* Assign an unused number */
2446 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2447 return -1;
2448 if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2449 return -1;
2450 return instance;
47a8617c
JS
2451}
2452
e59058c4 2453/**
3621a710 2454 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
e59058c4
JS
2455 * @shost: pointer to SCSI host data structure.
2456 * @time: elapsed time of the scan in jiffies.
2457 *
2458 * This routine is called by the SCSI layer with a SCSI host to determine
2459 * whether the scan host is finished.
2460 *
2461 * Note: there is no scan_start function as adapter initialization will have
2462 * asynchronously kicked off the link initialization.
2463 *
2464 * Return codes
2465 * 0 - SCSI host scan is not over yet.
2466 * 1 - SCSI host scan is over.
2467 **/
47a8617c
JS
2468int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2469{
2e0fef85
JS
2470 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2471 struct lpfc_hba *phba = vport->phba;
858c9f6c 2472 int stat = 0;
47a8617c 2473
858c9f6c
JS
2474 spin_lock_irq(shost->host_lock);
2475
51ef4c26 2476 if (vport->load_flag & FC_UNLOADING) {
858c9f6c
JS
2477 stat = 1;
2478 goto finished;
2479 }
2e0fef85
JS
2480 if (time >= 30 * HZ) {
2481 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
2482 "0461 Scanning longer than 30 "
2483 "seconds. Continuing initialization\n");
858c9f6c 2484 stat = 1;
47a8617c 2485 goto finished;
2e0fef85
JS
2486 }
2487 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2488 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
2489 "0465 Link down longer than 15 "
2490 "seconds. Continuing initialization\n");
858c9f6c 2491 stat = 1;
47a8617c 2492 goto finished;
2e0fef85 2493 }
47a8617c 2494
2e0fef85 2495 if (vport->port_state != LPFC_VPORT_READY)
858c9f6c 2496 goto finished;
2e0fef85 2497 if (vport->num_disc_nodes || vport->fc_prli_sent)
858c9f6c 2498 goto finished;
2e0fef85 2499 if (vport->fc_map_cnt == 0 && time < 2 * HZ)
858c9f6c 2500 goto finished;
2e0fef85 2501 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
858c9f6c
JS
2502 goto finished;
2503
2504 stat = 1;
47a8617c
JS
2505
2506finished:
858c9f6c
JS
2507 spin_unlock_irq(shost->host_lock);
2508 return stat;
92d7f7b0 2509}
47a8617c 2510
e59058c4 2511/**
3621a710 2512 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
e59058c4
JS
2513 * @shost: pointer to SCSI host data structure.
2514 *
2515 * This routine initializes a given SCSI host attributes on a FC port. The
2516 * SCSI host can be either on top of a physical port or a virtual port.
2517 **/
92d7f7b0
JS
2518void lpfc_host_attrib_init(struct Scsi_Host *shost)
2519{
2520 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2521 struct lpfc_hba *phba = vport->phba;
47a8617c 2522 /*
2e0fef85 2523 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
47a8617c
JS
2524 */
2525
2e0fef85
JS
2526 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2527 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
47a8617c
JS
2528 fc_host_supported_classes(shost) = FC_COS_CLASS3;
2529
2530 memset(fc_host_supported_fc4s(shost), 0,
2e0fef85 2531 sizeof(fc_host_supported_fc4s(shost)));
47a8617c
JS
2532 fc_host_supported_fc4s(shost)[2] = 1;
2533 fc_host_supported_fc4s(shost)[7] = 1;
2534
92d7f7b0
JS
2535 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2536 sizeof fc_host_symbolic_name(shost));
47a8617c
JS
2537
2538 fc_host_supported_speeds(shost) = 0;
2539 if (phba->lmt & LMT_10Gb)
2540 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
a8adb832
JS
2541 if (phba->lmt & LMT_8Gb)
2542 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
47a8617c
JS
2543 if (phba->lmt & LMT_4Gb)
2544 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2545 if (phba->lmt & LMT_2Gb)
2546 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2547 if (phba->lmt & LMT_1Gb)
2548 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2549
2550 fc_host_maxframe_size(shost) =
2e0fef85
JS
2551 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2552 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
47a8617c
JS
2553
2554 /* This value is also unchanging */
2555 memset(fc_host_active_fc4s(shost), 0,
2e0fef85 2556 sizeof(fc_host_active_fc4s(shost)));
47a8617c
JS
2557 fc_host_active_fc4s(shost)[2] = 1;
2558 fc_host_active_fc4s(shost)[7] = 1;
2559
92d7f7b0 2560 fc_host_max_npiv_vports(shost) = phba->max_vpi;
47a8617c 2561 spin_lock_irq(shost->host_lock);
51ef4c26 2562 vport->load_flag &= ~FC_LOADING;
47a8617c 2563 spin_unlock_irq(shost->host_lock);
47a8617c 2564}
dea3101e 2565
e59058c4 2566/**
da0436e9 2567 * lpfc_stop_port_s3 - Stop SLI3 device port
e59058c4
JS
2568 * @phba: pointer to lpfc hba data structure.
2569 *
da0436e9
JS
2570 * This routine is invoked to stop an SLI3 device port, it stops the device
2571 * from generating interrupts and stops the device driver's timers for the
2572 * device.
e59058c4 2573 **/
da0436e9
JS
2574static void
2575lpfc_stop_port_s3(struct lpfc_hba *phba)
db2378e0 2576{
da0436e9
JS
2577 /* Clear all interrupt enable conditions */
2578 writel(0, phba->HCregaddr);
2579 readl(phba->HCregaddr); /* flush */
2580 /* Clear all pending interrupts */
2581 writel(0xffffffff, phba->HAregaddr);
2582 readl(phba->HAregaddr); /* flush */
db2378e0 2583
da0436e9
JS
2584 /* Reset some HBA SLI setup states */
2585 lpfc_stop_hba_timers(phba);
2586 phba->pport->work_port_events = 0;
2587}
db2378e0 2588
da0436e9
JS
2589/**
2590 * lpfc_stop_port_s4 - Stop SLI4 device port
2591 * @phba: pointer to lpfc hba data structure.
2592 *
2593 * This routine is invoked to stop an SLI4 device port, it stops the device
2594 * from generating interrupts and stops the device driver's timers for the
2595 * device.
2596 **/
2597static void
2598lpfc_stop_port_s4(struct lpfc_hba *phba)
2599{
2600 /* Reset some HBA SLI4 setup states */
2601 lpfc_stop_hba_timers(phba);
2602 phba->pport->work_port_events = 0;
2603 phba->sli4_hba.intr_enable = 0;
2604 /* Hard clear it for now, shall have more graceful way to wait later */
2605 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2606}
9399627f 2607
da0436e9
JS
2608/**
2609 * lpfc_stop_port - Wrapper function for stopping hba port
2610 * @phba: Pointer to HBA context object.
2611 *
2612 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2613 * the API jump table function pointer from the lpfc_hba struct.
2614 **/
2615void
2616lpfc_stop_port(struct lpfc_hba *phba)
2617{
2618 phba->lpfc_stop_port(phba);
2619}
db2378e0 2620
da0436e9
JS
2621/**
2622 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2623 * @phba: pointer to lpfc hba data structure.
2624 *
2625 * This routine is invoked to remove the driver default fcf record from
2626 * the port. This routine currently acts on FCF Index 0.
2627 *
2628 **/
2629void
2630lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2631{
2632 int rc = 0;
2633 LPFC_MBOXQ_t *mboxq;
2634 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2635 uint32_t mbox_tmo, req_len;
2636 uint32_t shdr_status, shdr_add_status;
9399627f 2637
da0436e9
JS
2638 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2639 if (!mboxq) {
2640 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2641 "2020 Failed to allocate mbox for ADD_FCF cmd\n");
2642 return;
db2378e0 2643 }
9399627f 2644
da0436e9
JS
2645 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2646 sizeof(struct lpfc_sli4_cfg_mhdr);
2647 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2648 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2649 req_len, LPFC_SLI4_MBX_EMBED);
9399627f 2650 /*
da0436e9
JS
2651 * In phase 1, there is a single FCF index, 0. In phase2, the driver
2652 * supports multiple FCF indices.
9399627f 2653 */
da0436e9
JS
2654 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2655 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2656 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2657 phba->fcf.fcf_indx);
9399627f 2658
da0436e9
JS
2659 if (!phba->sli4_hba.intr_enable)
2660 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2661 else {
2662 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2663 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
9399627f 2664 }
da0436e9
JS
2665 /* The IOCTL status is embedded in the mailbox subheader. */
2666 shdr_status = bf_get(lpfc_mbox_hdr_status,
2667 &del_fcf_record->header.cfg_shdr.response);
2668 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2669 &del_fcf_record->header.cfg_shdr.response);
2670 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2671 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2672 "2516 DEL FCF of default FCF Index failed "
2673 "mbx status x%x, status x%x add_status x%x\n",
2674 rc, shdr_status, shdr_add_status);
9399627f 2675 }
da0436e9
JS
2676 if (rc != MBX_TIMEOUT)
2677 mempool_free(mboxq, phba->mbox_mem_pool);
db2378e0
JS
2678}
2679
e59058c4 2680/**
da0436e9 2681 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
e59058c4 2682 * @phba: pointer to lpfc hba data structure.
da0436e9 2683 * @acqe_link: pointer to the async link completion queue entry.
e59058c4 2684 *
da0436e9
JS
2685 * This routine is to parse the SLI4 link-attention link fault code and
2686 * translate it into the base driver's read link attention mailbox command
2687 * status.
2688 *
2689 * Return: Link-attention status in terms of base driver's coding.
e59058c4 2690 **/
da0436e9
JS
2691static uint16_t
2692lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
2693 struct lpfc_acqe_link *acqe_link)
db2378e0 2694{
da0436e9 2695 uint16_t latt_fault;
9399627f 2696
da0436e9
JS
2697 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
2698 case LPFC_ASYNC_LINK_FAULT_NONE:
2699 case LPFC_ASYNC_LINK_FAULT_LOCAL:
2700 case LPFC_ASYNC_LINK_FAULT_REMOTE:
2701 latt_fault = 0;
2702 break;
2703 default:
2704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2705 "0398 Invalid link fault code: x%x\n",
2706 bf_get(lpfc_acqe_link_fault, acqe_link));
2707 latt_fault = MBXERR_ERROR;
2708 break;
2709 }
2710 return latt_fault;
db2378e0
JS
2711}
2712
5b75da2f 2713/**
da0436e9 2714 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5b75da2f 2715 * @phba: pointer to lpfc hba data structure.
da0436e9 2716 * @acqe_link: pointer to the async link completion queue entry.
5b75da2f 2717 *
da0436e9
JS
2718 * This routine is to parse the SLI4 link attention type and translate it
2719 * into the base driver's link attention type coding.
5b75da2f 2720 *
da0436e9
JS
2721 * Return: Link attention type in terms of base driver's coding.
2722 **/
2723static uint8_t
2724lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
2725 struct lpfc_acqe_link *acqe_link)
5b75da2f 2726{
da0436e9 2727 uint8_t att_type;
5b75da2f 2728
da0436e9
JS
2729 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
2730 case LPFC_ASYNC_LINK_STATUS_DOWN:
2731 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
2732 att_type = AT_LINK_DOWN;
2733 break;
2734 case LPFC_ASYNC_LINK_STATUS_UP:
2735 /* Ignore physical link up events - wait for logical link up */
2736 att_type = AT_RESERVED;
2737 break;
2738 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
2739 att_type = AT_LINK_UP;
2740 break;
2741 default:
2742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2743 "0399 Invalid link attention type: x%x\n",
2744 bf_get(lpfc_acqe_link_status, acqe_link));
2745 att_type = AT_RESERVED;
2746 break;
5b75da2f 2747 }
da0436e9 2748 return att_type;
5b75da2f
JS
2749}
2750
2751/**
da0436e9 2752 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
5b75da2f 2753 * @phba: pointer to lpfc hba data structure.
da0436e9 2754 * @acqe_link: pointer to the async link completion queue entry.
5b75da2f 2755 *
da0436e9
JS
2756 * This routine is to parse the SLI4 link-attention link speed and translate
2757 * it into the base driver's link-attention link speed coding.
2758 *
2759 * Return: Link-attention link speed in terms of base driver's coding.
2760 **/
2761static uint8_t
2762lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
2763 struct lpfc_acqe_link *acqe_link)
5b75da2f 2764{
da0436e9
JS
2765 uint8_t link_speed;
2766
2767 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
2768 case LPFC_ASYNC_LINK_SPEED_ZERO:
2769 link_speed = LA_UNKNW_LINK;
2770 break;
2771 case LPFC_ASYNC_LINK_SPEED_10MBPS:
2772 link_speed = LA_UNKNW_LINK;
2773 break;
2774 case LPFC_ASYNC_LINK_SPEED_100MBPS:
2775 link_speed = LA_UNKNW_LINK;
2776 break;
2777 case LPFC_ASYNC_LINK_SPEED_1GBPS:
2778 link_speed = LA_1GHZ_LINK;
2779 break;
2780 case LPFC_ASYNC_LINK_SPEED_10GBPS:
2781 link_speed = LA_10GHZ_LINK;
2782 break;
2783 default:
2784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2785 "0483 Invalid link-attention link speed: x%x\n",
2786 bf_get(lpfc_acqe_link_speed, acqe_link));
2787 link_speed = LA_UNKNW_LINK;
2788 break;
2789 }
2790 return link_speed;
2791}
2792
2793/**
2794 * lpfc_sli4_async_link_evt - Process the asynchronous link event
2795 * @phba: pointer to lpfc hba data structure.
2796 * @acqe_link: pointer to the async link completion queue entry.
2797 *
2798 * This routine is to handle the SLI4 asynchronous link event.
2799 **/
2800static void
2801lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2802 struct lpfc_acqe_link *acqe_link)
2803{
2804 struct lpfc_dmabuf *mp;
2805 LPFC_MBOXQ_t *pmb;
2806 MAILBOX_t *mb;
2807 READ_LA_VAR *la;
2808 uint8_t att_type;
2809
2810 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
2811 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
2812 return;
2813 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2814 if (!pmb) {
2815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2816 "0395 The mboxq allocation failed\n");
2817 return;
2818 }
2819 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2820 if (!mp) {
2821 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2822 "0396 The lpfc_dmabuf allocation failed\n");
2823 goto out_free_pmb;
2824 }
2825 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2826 if (!mp->virt) {
2827 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2828 "0397 The mbuf allocation failed\n");
2829 goto out_free_dmabuf;
2830 }
2831
2832 /* Cleanup any outstanding ELS commands */
2833 lpfc_els_flush_all_cmd(phba);
2834
2835 /* Block ELS IOCBs until we have done process link event */
2836 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2837
2838 /* Update link event statistics */
2839 phba->sli.slistat.link_event++;
2840
2841 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
2842 lpfc_read_la(phba, pmb, mp);
2843 pmb->vport = phba->pport;
2844
2845 /* Parse and translate status field */
2846 mb = &pmb->u.mb;
2847 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
2848
2849 /* Parse and translate link attention fields */
2850 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
2851 la->eventTag = acqe_link->event_tag;
2852 la->attType = att_type;
2853 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
2854
2855 /* Fake the the following irrelvant fields */
2856 la->topology = TOPOLOGY_PT_PT;
2857 la->granted_AL_PA = 0;
2858 la->il = 0;
2859 la->pb = 0;
2860 la->fa = 0;
2861 la->mm = 0;
2862
2863 /* Keep the link status for extra SLI4 state machine reference */
2864 phba->sli4_hba.link_state.speed =
2865 bf_get(lpfc_acqe_link_speed, acqe_link);
2866 phba->sli4_hba.link_state.duplex =
2867 bf_get(lpfc_acqe_link_duplex, acqe_link);
2868 phba->sli4_hba.link_state.status =
2869 bf_get(lpfc_acqe_link_status, acqe_link);
2870 phba->sli4_hba.link_state.physical =
2871 bf_get(lpfc_acqe_link_physical, acqe_link);
2872 phba->sli4_hba.link_state.fault =
2873 bf_get(lpfc_acqe_link_fault, acqe_link);
2874
2875 /* Invoke the lpfc_handle_latt mailbox command callback function */
2876 lpfc_mbx_cmpl_read_la(phba, pmb);
2877
5b75da2f 2878 return;
da0436e9
JS
2879
2880out_free_dmabuf:
2881 kfree(mp);
2882out_free_pmb:
2883 mempool_free(pmb, phba->mbox_mem_pool);
2884}
2885
2886/**
2887 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
2888 * @phba: pointer to lpfc hba data structure.
2889 * @acqe_link: pointer to the async fcoe completion queue entry.
2890 *
2891 * This routine is to handle the SLI4 asynchronous fcoe event.
2892 **/
2893static void
2894lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2895 struct lpfc_acqe_fcoe *acqe_fcoe)
2896{
2897 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2898 int rc;
2899
2900 switch (event_type) {
2901 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2902 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2903 "2546 New FCF found index 0x%x tag 0x%x \n",
2904 acqe_fcoe->fcf_index,
2905 acqe_fcoe->event_tag);
2906 /*
2907 * If the current FCF is in discovered state,
2908 * do nothing.
2909 */
2910 spin_lock_irq(&phba->hbalock);
2911 if (phba->fcf.fcf_flag & FCF_DISCOVERED) {
2912 spin_unlock_irq(&phba->hbalock);
2913 break;
2914 }
2915 spin_unlock_irq(&phba->hbalock);
2916
2917 /* Read the FCF table and re-discover SAN. */
2918 rc = lpfc_sli4_read_fcf_record(phba,
2919 LPFC_FCOE_FCF_GET_FIRST);
2920 if (rc)
2921 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2922 "2547 Read FCF record failed 0x%x\n",
2923 rc);
2924 break;
2925
2926 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
2927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2928 "2548 FCF Table full count 0x%x tag 0x%x \n",
2929 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
2930 acqe_fcoe->event_tag);
2931 break;
2932
2933 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2934 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2935 "2549 FCF disconnected fron network index 0x%x"
2936 " tag 0x%x \n", acqe_fcoe->fcf_index,
2937 acqe_fcoe->event_tag);
2938 /* If the event is not for currently used fcf do nothing */
2939 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
2940 break;
2941 /*
2942 * Currently, driver support only one FCF - so treat this as
2943 * a link down.
2944 */
2945 lpfc_linkdown(phba);
2946 /* Unregister FCF if no devices connected to it */
2947 lpfc_unregister_unused_fcf(phba);
2948 break;
2949
2950 default:
2951 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2952 "0288 Unknown FCoE event type 0x%x event tag "
2953 "0x%x\n", event_type, acqe_fcoe->event_tag);
2954 break;
2955 }
2956}
2957
2958/**
2959 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
2960 * @phba: pointer to lpfc hba data structure.
2961 * @acqe_link: pointer to the async dcbx completion queue entry.
2962 *
2963 * This routine is to handle the SLI4 asynchronous dcbx event.
2964 **/
2965static void
2966lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
2967 struct lpfc_acqe_dcbx *acqe_dcbx)
2968{
2969 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2970 "0290 The SLI4 DCBX asynchronous event is not "
2971 "handled yet\n");
2972}
2973
2974/**
2975 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
2976 * @phba: pointer to lpfc hba data structure.
2977 *
2978 * This routine is invoked by the worker thread to process all the pending
2979 * SLI4 asynchronous events.
2980 **/
2981void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
2982{
2983 struct lpfc_cq_event *cq_event;
2984
2985 /* First, declare the async event has been handled */
2986 spin_lock_irq(&phba->hbalock);
2987 phba->hba_flag &= ~ASYNC_EVENT;
2988 spin_unlock_irq(&phba->hbalock);
2989 /* Now, handle all the async events */
2990 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
2991 /* Get the first event from the head of the event queue */
2992 spin_lock_irq(&phba->hbalock);
2993 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
2994 cq_event, struct lpfc_cq_event, list);
2995 spin_unlock_irq(&phba->hbalock);
2996 /* Process the asynchronous event */
2997 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
2998 case LPFC_TRAILER_CODE_LINK:
2999 lpfc_sli4_async_link_evt(phba,
3000 &cq_event->cqe.acqe_link);
3001 break;
3002 case LPFC_TRAILER_CODE_FCOE:
3003 lpfc_sli4_async_fcoe_evt(phba,
3004 &cq_event->cqe.acqe_fcoe);
3005 break;
3006 case LPFC_TRAILER_CODE_DCBX:
3007 lpfc_sli4_async_dcbx_evt(phba,
3008 &cq_event->cqe.acqe_dcbx);
3009 break;
3010 default:
3011 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3012 "1804 Invalid asynchrous event code: "
3013 "x%x\n", bf_get(lpfc_trailer_code,
3014 &cq_event->cqe.mcqe_cmpl));
3015 break;
3016 }
3017 /* Free the completion event processed to the free pool */
3018 lpfc_sli4_cq_event_release(phba, cq_event);
3019 }
3020}
3021
3022/**
3023 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3024 * @phba: pointer to lpfc hba data structure.
3025 * @dev_grp: The HBA PCI-Device group number.
3026 *
3027 * This routine is invoked to set up the per HBA PCI-Device group function
3028 * API jump table entries.
3029 *
3030 * Return: 0 if success, otherwise -ENODEV
3031 **/
3032int
3033lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3034{
3035 int rc;
3036
3037 /* Set up lpfc PCI-device group */
3038 phba->pci_dev_grp = dev_grp;
3039
3040 /* The LPFC_PCI_DEV_OC uses SLI4 */
3041 if (dev_grp == LPFC_PCI_DEV_OC)
3042 phba->sli_rev = LPFC_SLI_REV4;
3043
3044 /* Set up device INIT API function jump table */
3045 rc = lpfc_init_api_table_setup(phba, dev_grp);
3046 if (rc)
3047 return -ENODEV;
3048 /* Set up SCSI API function jump table */
3049 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3050 if (rc)
3051 return -ENODEV;
3052 /* Set up SLI API function jump table */
3053 rc = lpfc_sli_api_table_setup(phba, dev_grp);
3054 if (rc)
3055 return -ENODEV;
3056 /* Set up MBOX API function jump table */
3057 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3058 if (rc)
3059 return -ENODEV;
3060
3061 return 0;
5b75da2f
JS
3062}
3063
3064/**
3621a710 3065 * lpfc_log_intr_mode - Log the active interrupt mode
5b75da2f
JS
3066 * @phba: pointer to lpfc hba data structure.
3067 * @intr_mode: active interrupt mode adopted.
3068 *
3069 * This routine it invoked to log the currently used active interrupt mode
3070 * to the device.
3772a991
JS
3071 **/
3072static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
5b75da2f
JS
3073{
3074 switch (intr_mode) {
3075 case 0:
3076 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3077 "0470 Enable INTx interrupt mode.\n");
3078 break;
3079 case 1:
3080 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3081 "0481 Enabled MSI interrupt mode.\n");
3082 break;
3083 case 2:
3084 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3085 "0480 Enabled MSI-X interrupt mode.\n");
3086 break;
3087 default:
3088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3089 "0482 Illegal interrupt mode.\n");
3090 break;
3091 }
3092 return;
3093}
3094
5b75da2f 3095/**
3772a991 3096 * lpfc_enable_pci_dev - Enable a generic PCI device.
5b75da2f
JS
3097 * @phba: pointer to lpfc hba data structure.
3098 *
3772a991
JS
3099 * This routine is invoked to enable the PCI device that is common to all
3100 * PCI devices.
5b75da2f
JS
3101 *
3102 * Return codes
3772a991
JS
3103 * 0 - sucessful
3104 * other values - error
5b75da2f 3105 **/
3772a991
JS
3106static int
3107lpfc_enable_pci_dev(struct lpfc_hba *phba)
5b75da2f 3108{
3772a991
JS
3109 struct pci_dev *pdev;
3110 int bars;
5b75da2f 3111
3772a991
JS
3112 /* Obtain PCI device reference */
3113 if (!phba->pcidev)
3114 goto out_error;
3115 else
3116 pdev = phba->pcidev;
3117 /* Select PCI BARs */
3118 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3119 /* Enable PCI device */
3120 if (pci_enable_device_mem(pdev))
3121 goto out_error;
3122 /* Request PCI resource for the device */
3123 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3124 goto out_disable_device;
3125 /* Set up device as PCI master and save state for EEH */
3126 pci_set_master(pdev);
3127 pci_try_set_mwi(pdev);
3128 pci_save_state(pdev);
5b75da2f 3129
3772a991 3130 return 0;
5b75da2f 3131
3772a991
JS
3132out_disable_device:
3133 pci_disable_device(pdev);
3134out_error:
3135 return -ENODEV;
5b75da2f
JS
3136}
3137
3138/**
3772a991 3139 * lpfc_disable_pci_dev - Disable a generic PCI device.
5b75da2f
JS
3140 * @phba: pointer to lpfc hba data structure.
3141 *
3772a991
JS
3142 * This routine is invoked to disable the PCI device that is common to all
3143 * PCI devices.
5b75da2f
JS
3144 **/
3145static void
3772a991 3146lpfc_disable_pci_dev(struct lpfc_hba *phba)
5b75da2f 3147{
3772a991
JS
3148 struct pci_dev *pdev;
3149 int bars;
5b75da2f 3150
3772a991
JS
3151 /* Obtain PCI device reference */
3152 if (!phba->pcidev)
3153 return;
3154 else
3155 pdev = phba->pcidev;
3156 /* Select PCI BARs */
3157 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3158 /* Release PCI resource and disable PCI device */
3159 pci_release_selected_regions(pdev, bars);
3160 pci_disable_device(pdev);
3161 /* Null out PCI private reference to driver */
3162 pci_set_drvdata(pdev, NULL);
5b75da2f
JS
3163
3164 return;
3165}
3166
e59058c4 3167/**
3772a991
JS
3168 * lpfc_reset_hba - Reset a hba
3169 * @phba: pointer to lpfc hba data structure.
e59058c4 3170 *
3772a991
JS
3171 * This routine is invoked to reset a hba device. It brings the HBA
3172 * offline, performs a board restart, and then brings the board back
3173 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3174 * on outstanding mailbox commands.
e59058c4 3175 **/
3772a991
JS
3176void
3177lpfc_reset_hba(struct lpfc_hba *phba)
dea3101e 3178{
3772a991
JS
3179 /* If resets are disabled then set error state and return. */
3180 if (!phba->cfg_enable_hba_reset) {
3181 phba->link_state = LPFC_HBA_ERROR;
3182 return;
3183 }
3184 lpfc_offline_prep(phba);
3185 lpfc_offline(phba);
3186 lpfc_sli_brdrestart(phba);
3187 lpfc_online(phba);
3188 lpfc_unblock_mgmt_io(phba);
3189}
dea3101e 3190
3772a991
JS
3191/**
3192 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3193 * @phba: pointer to lpfc hba data structure.
3194 *
3195 * This routine is invoked to set up the driver internal resources specific to
3196 * support the SLI-3 HBA device it attached to.
3197 *
3198 * Return codes
3199 * 0 - sucessful
3200 * other values - error
3201 **/
3202static int
3203lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3204{
3205 struct lpfc_sli *psli;
dea3101e 3206
2e0fef85 3207 /*
3772a991 3208 * Initialize timers used by driver
2e0fef85 3209 */
dea3101e 3210
3772a991 3211 /* Heartbeat timer */
858c9f6c
JS
3212 init_timer(&phba->hb_tmofunc);
3213 phba->hb_tmofunc.function = lpfc_hb_timeout;
3214 phba->hb_tmofunc.data = (unsigned long)phba;
3215
dea3101e 3216 psli = &phba->sli;
3772a991 3217 /* MBOX heartbeat timer */
dea3101e 3218 init_timer(&psli->mbox_tmo);
3219 psli->mbox_tmo.function = lpfc_mbox_timeout;
2e0fef85 3220 psli->mbox_tmo.data = (unsigned long) phba;
3772a991 3221 /* FCP polling mode timer */
875fbdfe
JSEC
3222 init_timer(&phba->fcp_poll_timer);
3223 phba->fcp_poll_timer.function = lpfc_poll_timeout;
2e0fef85 3224 phba->fcp_poll_timer.data = (unsigned long) phba;
3772a991 3225 /* Fabric block timer */
92d7f7b0
JS
3226 init_timer(&phba->fabric_block_timer);
3227 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3228 phba->fabric_block_timer.data = (unsigned long) phba;
3772a991 3229 /* EA polling mode timer */
9399627f
JS
3230 init_timer(&phba->eratt_poll);
3231 phba->eratt_poll.function = lpfc_poll_eratt;
3232 phba->eratt_poll.data = (unsigned long) phba;
dea3101e 3233
3772a991
JS
3234 /* Host attention work mask setup */
3235 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3236 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
dea3101e 3237
3772a991
JS
3238 /* Get all the module params for configuring this host */
3239 lpfc_get_cfgparam(phba);
dea3101e 3240 /*
3772a991
JS
3241 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3242 * used to create the sg_dma_buf_pool must be dynamically calculated.
3243 * 2 segments are added since the IOCB needs a command and response bde.
dea3101e 3244 */
3772a991
JS
3245 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3246 sizeof(struct fcp_rsp) +
3247 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3248
3249 if (phba->cfg_enable_bg) {
3250 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3251 phba->cfg_sg_dma_buf_size +=
3252 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
901a920f 3253 }
dea3101e 3254
3772a991
JS
3255 /* Also reinitialize the host templates with new values. */
3256 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3257 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
dea3101e 3258
3772a991
JS
3259 phba->max_vpi = LPFC_MAX_VPI;
3260 /* This will be set to correct value after config_port mbox */
3261 phba->max_vports = 0;
dea3101e 3262
3772a991
JS
3263 /*
3264 * Initialize the SLI Layer to run with lpfc HBAs.
3265 */
3266 lpfc_sli_setup(phba);
3267 lpfc_sli_queue_setup(phba);
ed957684 3268
3772a991
JS
3269 /* Allocate device driver memory */
3270 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3271 return -ENOMEM;
51ef4c26 3272
3772a991
JS
3273 return 0;
3274}
ed957684 3275
3772a991
JS
3276/**
3277 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3278 * @phba: pointer to lpfc hba data structure.
3279 *
3280 * This routine is invoked to unset the driver internal resources set up
3281 * specific for supporting the SLI-3 HBA device it attached to.
3282 **/
3283static void
3284lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3285{
3286 /* Free device driver memory allocated */
3287 lpfc_mem_free_all(phba);
3163f725 3288
3772a991
JS
3289 return;
3290}
dea3101e 3291
3772a991 3292/**
da0436e9 3293 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3772a991
JS
3294 * @phba: pointer to lpfc hba data structure.
3295 *
da0436e9
JS
3296 * This routine is invoked to set up the driver internal resources specific to
3297 * support the SLI-4 HBA device it attached to.
3772a991
JS
3298 *
3299 * Return codes
da0436e9
JS
3300 * 0 - sucessful
3301 * other values - error
3772a991
JS
3302 **/
3303static int
da0436e9 3304lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3772a991 3305{
da0436e9
JS
3306 struct lpfc_sli *psli;
3307 int rc;
3308 int i, hbq_count;
3309
3310 /* Before proceed, wait for POST done and device ready */
3311 rc = lpfc_sli4_post_status_check(phba);
3312 if (rc)
3313 return -ENODEV;
3314
3772a991 3315 /*
da0436e9 3316 * Initialize timers used by driver
3772a991 3317 */
3772a991 3318
da0436e9
JS
3319 /* Heartbeat timer */
3320 init_timer(&phba->hb_tmofunc);
3321 phba->hb_tmofunc.function = lpfc_hb_timeout;
3322 phba->hb_tmofunc.data = (unsigned long)phba;
3772a991 3323
da0436e9
JS
3324 psli = &phba->sli;
3325 /* MBOX heartbeat timer */
3326 init_timer(&psli->mbox_tmo);
3327 psli->mbox_tmo.function = lpfc_mbox_timeout;
3328 psli->mbox_tmo.data = (unsigned long) phba;
3329 /* Fabric block timer */
3330 init_timer(&phba->fabric_block_timer);
3331 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3332 phba->fabric_block_timer.data = (unsigned long) phba;
3333 /* EA polling mode timer */
3334 init_timer(&phba->eratt_poll);
3335 phba->eratt_poll.function = lpfc_poll_eratt;
3336 phba->eratt_poll.data = (unsigned long) phba;
3337 /*
3338 * We need to do a READ_CONFIG mailbox command here before
3339 * calling lpfc_get_cfgparam. For VFs this will report the
3340 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
3341 * All of the resources allocated
3342 * for this Port are tied to these values.
3343 */
3344 /* Get all the module params for configuring this host */
3345 lpfc_get_cfgparam(phba);
3346 phba->max_vpi = LPFC_MAX_VPI;
3347 /* This will be set to correct value after the read_config mbox */
3348 phba->max_vports = 0;
3772a991 3349
da0436e9
JS
3350 /* Program the default value of vlan_id and fc_map */
3351 phba->valid_vlan = 0;
3352 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
3353 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
3354 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3772a991 3355
da0436e9
JS
3356 /*
3357 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3358 * used to create the sg_dma_buf_pool must be dynamically calculated.
3359 * 2 segments are added since the IOCB needs a command and response bde.
3360 * To insure that the scsi sgl does not cross a 4k page boundary only
3361 * sgl sizes of 1k, 2k, 4k, and 8k are supported.
3362 * Table of sgl sizes and seg_cnt:
3363 * sgl size, sg_seg_cnt total seg
3364 * 1k 50 52
3365 * 2k 114 116
3366 * 4k 242 244
3367 * 8k 498 500
3368 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
3369 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
3370 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
3371 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
3372 */
3373 if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
3374 phba->cfg_sg_seg_cnt = 50;
3375 else if (phba->cfg_sg_seg_cnt <= 114)
3376 phba->cfg_sg_seg_cnt = 114;
3377 else if (phba->cfg_sg_seg_cnt <= 242)
3378 phba->cfg_sg_seg_cnt = 242;
3379 else
3380 phba->cfg_sg_seg_cnt = 498;
3772a991 3381
da0436e9
JS
3382 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
3383 + sizeof(struct fcp_rsp);
3384 phba->cfg_sg_dma_buf_size +=
3385 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
3772a991 3386
da0436e9
JS
3387 /* Initialize buffer queue management fields */
3388 hbq_count = lpfc_sli_hbq_count();
3389 for (i = 0; i < hbq_count; ++i)
3390 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
3391 INIT_LIST_HEAD(&phba->rb_pend_list);
3392 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
3393 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3772a991 3394
da0436e9
JS
3395 /*
3396 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
3397 */
3398 /* Initialize the Abort scsi buffer list used by driver */
3399 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
3400 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
3401 /* This abort list used by worker thread */
3402 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
3772a991 3403
da0436e9
JS
3404 /*
3405 * Initialize dirver internal slow-path work queues
3406 */
3772a991 3407
da0436e9
JS
3408 /* Driver internel slow-path CQ Event pool */
3409 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3410 /* Response IOCB work queue list */
3411 INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue);
3412 /* Asynchronous event CQ Event work queue list */
3413 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3414 /* Fast-path XRI aborted CQ Event work queue list */
3415 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
3416 /* Slow-path XRI aborted CQ Event work queue list */
3417 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
3418 /* Receive queue CQ Event work queue list */
3419 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
3420
3421 /* Initialize the driver internal SLI layer lists. */
3422 lpfc_sli_setup(phba);
3423 lpfc_sli_queue_setup(phba);
3772a991 3424
da0436e9
JS
3425 /* Allocate device driver memory */
3426 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
3427 if (rc)
3428 return -ENOMEM;
3429
3430 /* Create the bootstrap mailbox command */
3431 rc = lpfc_create_bootstrap_mbox(phba);
3432 if (unlikely(rc))
3433 goto out_free_mem;
3434
3435 /* Set up the host's endian order with the device. */
3436 rc = lpfc_setup_endian_order(phba);
3437 if (unlikely(rc))
3438 goto out_free_bsmbx;
3439
3440 /* Set up the hba's configuration parameters. */
3441 rc = lpfc_sli4_read_config(phba);
3442 if (unlikely(rc))
3443 goto out_free_bsmbx;
3444
3445 /* Perform a function reset */
3446 rc = lpfc_pci_function_reset(phba);
3447 if (unlikely(rc))
3448 goto out_free_bsmbx;
3449
3450 /* Create all the SLI4 queues */
3451 rc = lpfc_sli4_queue_create(phba);
3452 if (rc)
3453 goto out_free_bsmbx;
3454
3455 /* Create driver internal CQE event pool */
3456 rc = lpfc_sli4_cq_event_pool_create(phba);
3457 if (rc)
3458 goto out_destroy_queue;
3459
3460 /* Initialize and populate the iocb list per host */
3461 rc = lpfc_init_sgl_list(phba);
3462 if (rc) {
3463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3464 "1400 Failed to initialize sgl list.\n");
3465 goto out_destroy_cq_event_pool;
3466 }
3467 rc = lpfc_init_active_sgl_array(phba);
3468 if (rc) {
3469 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3470 "1430 Failed to initialize sgl list.\n");
3471 goto out_free_sgl_list;
3472 }
3473
3474 rc = lpfc_sli4_init_rpi_hdrs(phba);
3475 if (rc) {
3476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3477 "1432 Failed to initialize rpi headers.\n");
3478 goto out_free_active_sgl;
3479 }
3480
3481 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3482 phba->cfg_fcp_eq_count), GFP_KERNEL);
3483 if (!phba->sli4_hba.fcp_eq_hdl) {
3484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3485 "2572 Failed allocate memory for fast-path "
3486 "per-EQ handle array\n");
3487 goto out_remove_rpi_hdrs;
3488 }
3489
3490 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
3491 phba->sli4_hba.cfg_eqn), GFP_KERNEL);
3492 if (!phba->sli4_hba.msix_entries) {
3493 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3494 "2573 Failed allocate memory for msi-x "
3495 "interrupt vector entries\n");
3496 goto out_free_fcp_eq_hdl;
3497 }
3498
3499 return rc;
3500
3501out_free_fcp_eq_hdl:
3502 kfree(phba->sli4_hba.fcp_eq_hdl);
3503out_remove_rpi_hdrs:
3504 lpfc_sli4_remove_rpi_hdrs(phba);
3505out_free_active_sgl:
3506 lpfc_free_active_sgl(phba);
3507out_free_sgl_list:
3508 lpfc_free_sgl_list(phba);
3509out_destroy_cq_event_pool:
3510 lpfc_sli4_cq_event_pool_destroy(phba);
3511out_destroy_queue:
3512 lpfc_sli4_queue_destroy(phba);
3513out_free_bsmbx:
3514 lpfc_destroy_bootstrap_mbox(phba);
3515out_free_mem:
3516 lpfc_mem_free(phba);
3517 return rc;
3518}
3519
3520/**
3521 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
3522 * @phba: pointer to lpfc hba data structure.
3523 *
3524 * This routine is invoked to unset the driver internal resources set up
3525 * specific for supporting the SLI-4 HBA device it attached to.
3526 **/
3527static void
3528lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3529{
3530 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
3531
3532 /* unregister default FCFI from the HBA */
3533 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
3534
3535 /* Free the default FCR table */
3536 lpfc_sli_remove_dflt_fcf(phba);
3537
3538 /* Free memory allocated for msi-x interrupt vector entries */
3539 kfree(phba->sli4_hba.msix_entries);
3540
3541 /* Free memory allocated for fast-path work queue handles */
3542 kfree(phba->sli4_hba.fcp_eq_hdl);
3543
3544 /* Free the allocated rpi headers. */
3545 lpfc_sli4_remove_rpi_hdrs(phba);
3546
3547 /* Free the ELS sgl list */
3548 lpfc_free_active_sgl(phba);
3549 lpfc_free_sgl_list(phba);
3550
3551 /* Free the SCSI sgl management array */
3552 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3553
3554 /* Free the SLI4 queues */
3555 lpfc_sli4_queue_destroy(phba);
3556
3557 /* Free the completion queue EQ event pool */
3558 lpfc_sli4_cq_event_release_all(phba);
3559 lpfc_sli4_cq_event_pool_destroy(phba);
3560
3561 /* Reset SLI4 HBA FCoE function */
3562 lpfc_pci_function_reset(phba);
3563
3564 /* Free the bsmbx region. */
3565 lpfc_destroy_bootstrap_mbox(phba);
3566
3567 /* Free the SLI Layer memory with SLI4 HBAs */
3568 lpfc_mem_free_all(phba);
3569
3570 /* Free the current connect table */
3571 list_for_each_entry_safe(conn_entry, next_conn_entry,
3572 &phba->fcf_conn_rec_list, list)
3573 kfree(conn_entry);
3574
3575 return;
3576}
3577
3578/**
3579 * lpfc_init_api_table_setup - Set up init api fucntion jump table
3580 * @phba: The hba struct for which this call is being executed.
3581 * @dev_grp: The HBA PCI-Device group number.
3582 *
3583 * This routine sets up the device INIT interface API function jump table
3584 * in @phba struct.
3585 *
3586 * Returns: 0 - success, -ENODEV - failure.
3587 **/
3588int
3589lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3590{
3591 switch (dev_grp) {
3592 case LPFC_PCI_DEV_LP:
3593 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
3594 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
3595 phba->lpfc_stop_port = lpfc_stop_port_s3;
3596 break;
3597 case LPFC_PCI_DEV_OC:
3598 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
3599 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
3600 phba->lpfc_stop_port = lpfc_stop_port_s4;
3601 break;
3602 default:
3603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3604 "1431 Invalid HBA PCI-device group: 0x%x\n",
3605 dev_grp);
3606 return -ENODEV;
3607 break;
3608 }
3609 return 0;
3610}
3611
3612/**
3613 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
3614 * @phba: pointer to lpfc hba data structure.
3615 *
3616 * This routine is invoked to set up the driver internal resources before the
3617 * device specific resource setup to support the HBA device it attached to.
3618 *
3619 * Return codes
3620 * 0 - sucessful
3621 * other values - error
3622 **/
3623static int
3624lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3625{
3626 /*
3627 * Driver resources common to all SLI revisions
3628 */
3629 atomic_set(&phba->fast_event_count, 0);
3630 spin_lock_init(&phba->hbalock);
3631
3632 /* Initialize ndlp management spinlock */
3633 spin_lock_init(&phba->ndlp_lock);
3634
3635 INIT_LIST_HEAD(&phba->port_list);
3636 INIT_LIST_HEAD(&phba->work_list);
3637 init_waitqueue_head(&phba->wait_4_mlo_m_q);
3638
3639 /* Initialize the wait queue head for the kernel thread */
3640 init_waitqueue_head(&phba->work_waitq);
3641
3642 /* Initialize the scsi buffer list used by driver for scsi IO */
3643 spin_lock_init(&phba->scsi_buf_list_lock);
3644 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
3645
3646 /* Initialize the fabric iocb list */
3647 INIT_LIST_HEAD(&phba->fabric_iocb_list);
3648
3649 /* Initialize list to save ELS buffers */
3650 INIT_LIST_HEAD(&phba->elsbuf);
3651
3652 /* Initialize FCF connection rec list */
3653 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
3654
3655 return 0;
3656}
3657
3658/**
3659 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
3660 * @phba: pointer to lpfc hba data structure.
3661 *
3662 * This routine is invoked to set up the driver internal resources after the
3663 * device specific resource setup to support the HBA device it attached to.
3664 *
3665 * Return codes
3666 * 0 - sucessful
3667 * other values - error
3668 **/
3669static int
3670lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
3671{
3672 int error;
3673
3674 /* Startup the kernel thread for this host adapter. */
3675 phba->worker_thread = kthread_run(lpfc_do_work, phba,
3676 "lpfc_worker_%d", phba->brd_no);
3677 if (IS_ERR(phba->worker_thread)) {
3678 error = PTR_ERR(phba->worker_thread);
3679 return error;
3772a991
JS
3680 }
3681
3682 return 0;
3683}
3684
3685/**
3686 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
3687 * @phba: pointer to lpfc hba data structure.
3688 *
3689 * This routine is invoked to unset the driver internal resources set up after
3690 * the device specific resource setup for supporting the HBA device it
3691 * attached to.
3692 **/
3693static void
3694lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
3695{
3696 /* Stop kernel worker thread */
3697 kthread_stop(phba->worker_thread);
3698}
3699
3700/**
3701 * lpfc_free_iocb_list - Free iocb list.
3702 * @phba: pointer to lpfc hba data structure.
3703 *
3704 * This routine is invoked to free the driver's IOCB list and memory.
3705 **/
3706static void
3707lpfc_free_iocb_list(struct lpfc_hba *phba)
3708{
3709 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
3710
3711 spin_lock_irq(&phba->hbalock);
3712 list_for_each_entry_safe(iocbq_entry, iocbq_next,
3713 &phba->lpfc_iocb_list, list) {
3714 list_del(&iocbq_entry->list);
3715 kfree(iocbq_entry);
3716 phba->total_iocbq_bufs--;
98c9ea5c 3717 }
3772a991
JS
3718 spin_unlock_irq(&phba->hbalock);
3719
3720 return;
3721}
3722
3723/**
3724 * lpfc_init_iocb_list - Allocate and initialize iocb list.
3725 * @phba: pointer to lpfc hba data structure.
3726 *
3727 * This routine is invoked to allocate and initizlize the driver's IOCB
3728 * list and set up the IOCB tag array accordingly.
3729 *
3730 * Return codes
3731 * 0 - sucessful
3732 * other values - error
3733 **/
3734static int
3735lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
3736{
3737 struct lpfc_iocbq *iocbq_entry = NULL;
3738 uint16_t iotag;
3739 int i;
dea3101e 3740
3741 /* Initialize and populate the iocb list per host. */
3742 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3772a991 3743 for (i = 0; i < iocb_count; i++) {
dd00cc48 3744 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
dea3101e 3745 if (iocbq_entry == NULL) {
3746 printk(KERN_ERR "%s: only allocated %d iocbs of "
3747 "expected %d count. Unloading driver.\n",
cadbd4a5 3748 __func__, i, LPFC_IOCB_LIST_CNT);
dea3101e 3749 goto out_free_iocbq;
3750 }
3751
604a3e30
JB
3752 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
3753 if (iotag == 0) {
3772a991 3754 kfree(iocbq_entry);
604a3e30 3755 printk(KERN_ERR "%s: failed to allocate IOTAG. "
3772a991 3756 "Unloading driver.\n", __func__);
604a3e30
JB
3757 goto out_free_iocbq;
3758 }
3772a991 3759 iocbq_entry->sli4_xritag = NO_XRI;
2e0fef85
JS
3760
3761 spin_lock_irq(&phba->hbalock);
dea3101e 3762 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
3763 phba->total_iocbq_bufs++;
2e0fef85 3764 spin_unlock_irq(&phba->hbalock);
dea3101e 3765 }
3766
3772a991 3767 return 0;
dea3101e 3768
3772a991
JS
3769out_free_iocbq:
3770 lpfc_free_iocb_list(phba);
dea3101e 3771
3772a991
JS
3772 return -ENOMEM;
3773}
5e9d9b82 3774
3772a991 3775/**
da0436e9
JS
3776 * lpfc_free_sgl_list - Free sgl list.
3777 * @phba: pointer to lpfc hba data structure.
3772a991 3778 *
da0436e9 3779 * This routine is invoked to free the driver's sgl list and memory.
3772a991 3780 **/
da0436e9
JS
3781static void
3782lpfc_free_sgl_list(struct lpfc_hba *phba)
3772a991 3783{
da0436e9
JS
3784 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
3785 LIST_HEAD(sglq_list);
3786 int rc = 0;
dea3101e 3787
da0436e9
JS
3788 spin_lock_irq(&phba->hbalock);
3789 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
3790 spin_unlock_irq(&phba->hbalock);
dea3101e 3791
da0436e9
JS
3792 list_for_each_entry_safe(sglq_entry, sglq_next,
3793 &sglq_list, list) {
3794 list_del(&sglq_entry->list);
3795 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
3796 kfree(sglq_entry);
3797 phba->sli4_hba.total_sglq_bufs--;
3798 }
3799 rc = lpfc_sli4_remove_all_sgl_pages(phba);
3800 if (rc) {
3801 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3802 "2005 Unable to deregister pages from HBA: %x", rc);
3772a991 3803 }
da0436e9
JS
3804 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3805}
92d7f7b0 3806
da0436e9
JS
3807/**
3808 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
3809 * @phba: pointer to lpfc hba data structure.
3810 *
3811 * This routine is invoked to allocate the driver's active sgl memory.
3812 * This array will hold the sglq_entry's for active IOs.
3813 **/
3814static int
3815lpfc_init_active_sgl_array(struct lpfc_hba *phba)
3816{
3817 int size;
3818 size = sizeof(struct lpfc_sglq *);
3819 size *= phba->sli4_hba.max_cfg_param.max_xri;
3820
3821 phba->sli4_hba.lpfc_sglq_active_list =
3822 kzalloc(size, GFP_KERNEL);
3823 if (!phba->sli4_hba.lpfc_sglq_active_list)
3824 return -ENOMEM;
3825 return 0;
3772a991
JS
3826}
3827
3828/**
da0436e9 3829 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3772a991
JS
3830 * @phba: pointer to lpfc hba data structure.
3831 *
da0436e9
JS
3832 * This routine is invoked to walk through the array of active sglq entries
3833 * and free all of the resources.
3834 * This is just a place holder for now.
3772a991
JS
3835 **/
3836static void
da0436e9 3837lpfc_free_active_sgl(struct lpfc_hba *phba)
3772a991 3838{
da0436e9 3839 kfree(phba->sli4_hba.lpfc_sglq_active_list);
3772a991
JS
3840}
3841
3842/**
da0436e9 3843 * lpfc_init_sgl_list - Allocate and initialize sgl list.
3772a991
JS
3844 * @phba: pointer to lpfc hba data structure.
3845 *
da0436e9
JS
3846 * This routine is invoked to allocate and initizlize the driver's sgl
3847 * list and set up the sgl xritag tag array accordingly.
3772a991
JS
3848 *
3849 * Return codes
da0436e9
JS
3850 * 0 - sucessful
3851 * other values - error
3772a991
JS
3852 **/
3853static int
da0436e9 3854lpfc_init_sgl_list(struct lpfc_hba *phba)
3772a991 3855{
da0436e9
JS
3856 struct lpfc_sglq *sglq_entry = NULL;
3857 int i;
3858 int els_xri_cnt;
3859
3860 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3861 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3862 "2400 lpfc_init_sgl_list els %d.\n",
3863 els_xri_cnt);
3864 /* Initialize and populate the sglq list per host/VF. */
3865 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
3866 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
3867
3868 /* Sanity check on XRI management */
3869 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
3870 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3871 "2562 No room left for SCSI XRI allocation: "
3872 "max_xri=%d, els_xri=%d\n",
3873 phba->sli4_hba.max_cfg_param.max_xri,
3874 els_xri_cnt);
3875 return -ENOMEM;
3876 }
3772a991 3877
da0436e9
JS
3878 /* Allocate memory for the ELS XRI management array */
3879 phba->sli4_hba.lpfc_els_sgl_array =
3880 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
3881 GFP_KERNEL);
0ff10d46 3882
da0436e9
JS
3883 if (!phba->sli4_hba.lpfc_els_sgl_array) {
3884 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3885 "2401 Failed to allocate memory for ELS "
3886 "XRI management array of size %d.\n",
3887 els_xri_cnt);
3888 return -ENOMEM;
3889 }
2e0fef85 3890
da0436e9
JS
3891 /* Keep the SCSI XRI into the XRI management array */
3892 phba->sli4_hba.scsi_xri_max =
3893 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3894 phba->sli4_hba.scsi_xri_cnt = 0;
3895
3896 phba->sli4_hba.lpfc_scsi_psb_array =
3897 kzalloc((sizeof(struct lpfc_scsi_buf *) *
3898 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
3899
3900 if (!phba->sli4_hba.lpfc_scsi_psb_array) {
3901 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3902 "2563 Failed to allocate memory for SCSI "
3903 "XRI management array of size %d.\n",
3904 phba->sli4_hba.scsi_xri_max);
3905 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3906 return -ENOMEM;
3907 }
3908
3909 for (i = 0; i < els_xri_cnt; i++) {
3910 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
3911 if (sglq_entry == NULL) {
3912 printk(KERN_ERR "%s: only allocated %d sgls of "
3913 "expected %d count. Unloading driver.\n",
3914 __func__, i, els_xri_cnt);
3915 goto out_free_mem;
3916 }
3917
3918 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
3919 if (sglq_entry->sli4_xritag == NO_XRI) {
3920 kfree(sglq_entry);
3921 printk(KERN_ERR "%s: failed to allocate XRI.\n"
3922 "Unloading driver.\n", __func__);
3923 goto out_free_mem;
3924 }
3925 sglq_entry->buff_type = GEN_BUFF_TYPE;
3926 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
3927 if (sglq_entry->virt == NULL) {
3928 kfree(sglq_entry);
3929 printk(KERN_ERR "%s: failed to allocate mbuf.\n"
3930 "Unloading driver.\n", __func__);
3931 goto out_free_mem;
3932 }
3933 sglq_entry->sgl = sglq_entry->virt;
3934 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3935
3936 /* The list order is used by later block SGL registraton */
3937 spin_lock_irq(&phba->hbalock);
3938 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
3939 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
3940 phba->sli4_hba.total_sglq_bufs++;
3941 spin_unlock_irq(&phba->hbalock);
3942 }
3943 return 0;
3944
3945out_free_mem:
3946 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3947 lpfc_free_sgl_list(phba);
3948 return -ENOMEM;
3949}
3950
3951/**
3952 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
3953 * @phba: pointer to lpfc hba data structure.
3954 *
3955 * This routine is invoked to post rpi header templates to the
3956 * HBA consistent with the SLI-4 interface spec. This routine
3957 * posts a PAGE_SIZE memory region to the port to hold up to
3958 * PAGE_SIZE modulo 64 rpi context headers.
3959 * No locks are held here because this is an initialization routine
3960 * called only from probe or lpfc_online when interrupts are not
3961 * enabled and the driver is reinitializing the device.
3962 *
3963 * Return codes
3964 * 0 - sucessful
3965 * ENOMEM - No availble memory
3966 * EIO - The mailbox failed to complete successfully.
3967 **/
3968int
3969lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
3970{
3971 int rc = 0;
3972 int longs;
3973 uint16_t rpi_count;
3974 struct lpfc_rpi_hdr *rpi_hdr;
3975
3976 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
3977
3978 /*
3979 * Provision an rpi bitmask range for discovery. The total count
3980 * is the difference between max and base + 1.
3981 */
3982 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
3983 phba->sli4_hba.max_cfg_param.max_rpi - 1;
3984
3985 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
3986 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
3987 GFP_KERNEL);
3988 if (!phba->sli4_hba.rpi_bmask)
3989 return -ENOMEM;
3990
3991 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
3992 if (!rpi_hdr) {
3993 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3994 "0391 Error during rpi post operation\n");
3995 lpfc_sli4_remove_rpis(phba);
3996 rc = -ENODEV;
3997 }
3998
3999 return rc;
4000}
4001
4002/**
4003 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4004 * @phba: pointer to lpfc hba data structure.
4005 *
4006 * This routine is invoked to allocate a single 4KB memory region to
4007 * support rpis and stores them in the phba. This single region
4008 * provides support for up to 64 rpis. The region is used globally
4009 * by the device.
4010 *
4011 * Returns:
4012 * A valid rpi hdr on success.
4013 * A NULL pointer on any failure.
4014 **/
4015struct lpfc_rpi_hdr *
4016lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4017{
4018 uint16_t rpi_limit, curr_rpi_range;
4019 struct lpfc_dmabuf *dmabuf;
4020 struct lpfc_rpi_hdr *rpi_hdr;
4021
4022 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4023 phba->sli4_hba.max_cfg_param.max_rpi - 1;
4024
4025 spin_lock_irq(&phba->hbalock);
4026 curr_rpi_range = phba->sli4_hba.next_rpi;
4027 spin_unlock_irq(&phba->hbalock);
4028
4029 /*
4030 * The port has a limited number of rpis. The increment here
4031 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4032 * and to allow the full max_rpi range per port.
4033 */
4034 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4035 return NULL;
4036
4037 /*
4038 * First allocate the protocol header region for the port. The
4039 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4040 */
4041 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4042 if (!dmabuf)
4043 return NULL;
4044
4045 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4046 LPFC_HDR_TEMPLATE_SIZE,
4047 &dmabuf->phys,
4048 GFP_KERNEL);
4049 if (!dmabuf->virt) {
4050 rpi_hdr = NULL;
4051 goto err_free_dmabuf;
4052 }
4053
4054 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4055 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4056 rpi_hdr = NULL;
4057 goto err_free_coherent;
4058 }
4059
4060 /* Save the rpi header data for cleanup later. */
4061 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4062 if (!rpi_hdr)
4063 goto err_free_coherent;
4064
4065 rpi_hdr->dmabuf = dmabuf;
4066 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4067 rpi_hdr->page_count = 1;
4068 spin_lock_irq(&phba->hbalock);
4069 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4070 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4071
4072 /*
4073 * The next_rpi stores the next module-64 rpi value to post
4074 * in any subsequent rpi memory region postings.
4075 */
4076 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4077 spin_unlock_irq(&phba->hbalock);
4078 return rpi_hdr;
4079
4080 err_free_coherent:
4081 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4082 dmabuf->virt, dmabuf->phys);
4083 err_free_dmabuf:
4084 kfree(dmabuf);
4085 return NULL;
4086}
4087
4088/**
4089 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
4090 * @phba: pointer to lpfc hba data structure.
4091 *
4092 * This routine is invoked to remove all memory resources allocated
4093 * to support rpis. This routine presumes the caller has released all
4094 * rpis consumed by fabric or port logins and is prepared to have
4095 * the header pages removed.
4096 **/
4097void
4098lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4099{
4100 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4101
4102 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4103 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4104 list_del(&rpi_hdr->list);
4105 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4106 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4107 kfree(rpi_hdr->dmabuf);
4108 kfree(rpi_hdr);
4109 }
4110
4111 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4112 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4113}
4114
4115/**
4116 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4117 * @pdev: pointer to pci device data structure.
4118 *
4119 * This routine is invoked to allocate the driver hba data structure for an
4120 * HBA device. If the allocation is successful, the phba reference to the
4121 * PCI device data structure is set.
4122 *
4123 * Return codes
4124 * pointer to @phba - sucessful
4125 * NULL - error
4126 **/
4127static struct lpfc_hba *
4128lpfc_hba_alloc(struct pci_dev *pdev)
4129{
4130 struct lpfc_hba *phba;
4131
4132 /* Allocate memory for HBA structure */
4133 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4134 if (!phba) {
4135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4136 "1417 Failed to allocate hba struct.\n");
4137 return NULL;
4138 }
4139
4140 /* Set reference to PCI device in HBA structure */
4141 phba->pcidev = pdev;
4142
4143 /* Assign an unused board number */
4144 phba->brd_no = lpfc_get_instance();
4145 if (phba->brd_no < 0) {
4146 kfree(phba);
4147 return NULL;
4148 }
4149
4150 return phba;
4151}
4152
4153/**
4154 * lpfc_hba_free - Free driver hba data structure with a device.
4155 * @phba: pointer to lpfc hba data structure.
4156 *
4157 * This routine is invoked to free the driver hba data structure with an
4158 * HBA device.
4159 **/
4160static void
4161lpfc_hba_free(struct lpfc_hba *phba)
4162{
4163 /* Release the driver assigned board number */
4164 idr_remove(&lpfc_hba_index, phba->brd_no);
4165
4166 kfree(phba);
4167 return;
4168}
4169
4170/**
4171 * lpfc_create_shost - Create hba physical port with associated scsi host.
4172 * @phba: pointer to lpfc hba data structure.
4173 *
4174 * This routine is invoked to create HBA physical port and associate a SCSI
4175 * host with it.
4176 *
4177 * Return codes
4178 * 0 - sucessful
4179 * other values - error
4180 **/
4181static int
4182lpfc_create_shost(struct lpfc_hba *phba)
4183{
4184 struct lpfc_vport *vport;
4185 struct Scsi_Host *shost;
4186
4187 /* Initialize HBA FC structure */
4188 phba->fc_edtov = FF_DEF_EDTOV;
4189 phba->fc_ratov = FF_DEF_RATOV;
4190 phba->fc_altov = FF_DEF_ALTOV;
4191 phba->fc_arbtov = FF_DEF_ARBTOV;
4192
4193 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4194 if (!vport)
4195 return -ENODEV;
4196
4197 shost = lpfc_shost_from_vport(vport);
4198 phba->pport = vport;
4199 lpfc_debugfs_initialize(vport);
4200 /* Put reference to SCSI host to driver's device private data */
4201 pci_set_drvdata(phba->pcidev, shost);
2e0fef85 4202
3772a991
JS
4203 return 0;
4204}
db2378e0 4205
3772a991
JS
4206/**
4207 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4208 * @phba: pointer to lpfc hba data structure.
4209 *
4210 * This routine is invoked to destroy HBA physical port and the associated
4211 * SCSI host.
4212 **/
4213static void
4214lpfc_destroy_shost(struct lpfc_hba *phba)
4215{
4216 struct lpfc_vport *vport = phba->pport;
4217
4218 /* Destroy physical port that associated with the SCSI host */
4219 destroy_port(vport);
4220
4221 return;
4222}
4223
4224/**
4225 * lpfc_setup_bg - Setup Block guard structures and debug areas.
4226 * @phba: pointer to lpfc hba data structure.
4227 * @shost: the shost to be used to detect Block guard settings.
4228 *
4229 * This routine sets up the local Block guard protocol settings for @shost.
4230 * This routine also allocates memory for debugging bg buffers.
4231 **/
4232static void
4233lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4234{
4235 int pagecnt = 10;
4236 if (lpfc_prot_mask && lpfc_prot_guard) {
4237 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4238 "1478 Registering BlockGuard with the "
4239 "SCSI layer\n");
4240 scsi_host_set_prot(shost, lpfc_prot_mask);
4241 scsi_host_set_guard(shost, lpfc_prot_guard);
4242 }
4243 if (!_dump_buf_data) {
4244 while (pagecnt) {
4245 spin_lock_init(&_dump_buf_lock);
4246 _dump_buf_data =
4247 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4248 if (_dump_buf_data) {
4249 printk(KERN_ERR "BLKGRD allocated %d pages for "
4250 "_dump_buf_data at 0x%p\n",
4251 (1 << pagecnt), _dump_buf_data);
4252 _dump_buf_data_order = pagecnt;
4253 memset(_dump_buf_data, 0,
4254 ((1 << PAGE_SHIFT) << pagecnt));
4255 break;
4256 } else
4257 --pagecnt;
4258 }
4259 if (!_dump_buf_data_order)
4260 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4261 "memory for hexdump\n");
4262 } else
4263 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
4264 "\n", _dump_buf_data);
4265 if (!_dump_buf_dif) {
4266 while (pagecnt) {
4267 _dump_buf_dif =
4268 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4269 if (_dump_buf_dif) {
4270 printk(KERN_ERR "BLKGRD allocated %d pages for "
4271 "_dump_buf_dif at 0x%p\n",
4272 (1 << pagecnt), _dump_buf_dif);
4273 _dump_buf_dif_order = pagecnt;
4274 memset(_dump_buf_dif, 0,
4275 ((1 << PAGE_SHIFT) << pagecnt));
4276 break;
4277 } else
4278 --pagecnt;
4279 }
4280 if (!_dump_buf_dif_order)
4281 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4282 "memory for hexdump\n");
4283 } else
4284 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
4285 _dump_buf_dif);
4286}
4287
4288/**
4289 * lpfc_post_init_setup - Perform necessary device post initialization setup.
4290 * @phba: pointer to lpfc hba data structure.
4291 *
4292 * This routine is invoked to perform all the necessary post initialization
4293 * setup for the device.
4294 **/
4295static void
4296lpfc_post_init_setup(struct lpfc_hba *phba)
4297{
4298 struct Scsi_Host *shost;
4299 struct lpfc_adapter_event_header adapter_event;
4300
4301 /* Get the default values for Model Name and Description */
4302 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
4303
4304 /*
4305 * hba setup may have changed the hba_queue_depth so we need to
4306 * adjust the value of can_queue.
4307 */
4308 shost = pci_get_drvdata(phba->pcidev);
4309 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4310 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4311 lpfc_setup_bg(phba, shost);
4312
4313 lpfc_host_attrib_init(shost);
4314
4315 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
4316 spin_lock_irq(shost->host_lock);
4317 lpfc_poll_start_timer(phba);
4318 spin_unlock_irq(shost->host_lock);
4319 }
4320
4321 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4322 "0428 Perform SCSI scan\n");
4323 /* Send board arrival event to upper layer */
4324 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
4325 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
4326 fc_host_post_vendor_event(shost, fc_get_event_number(),
4327 sizeof(adapter_event),
4328 (char *) &adapter_event,
4329 LPFC_NL_VENDOR_ID);
4330 return;
4331}
4332
4333/**
4334 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
4335 * @phba: pointer to lpfc hba data structure.
4336 *
4337 * This routine is invoked to set up the PCI device memory space for device
4338 * with SLI-3 interface spec.
4339 *
4340 * Return codes
4341 * 0 - sucessful
4342 * other values - error
4343 **/
4344static int
4345lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4346{
4347 struct pci_dev *pdev;
4348 unsigned long bar0map_len, bar2map_len;
4349 int i, hbq_count;
4350 void *ptr;
4351 int error = -ENODEV;
4352
4353 /* Obtain PCI device reference */
4354 if (!phba->pcidev)
4355 return error;
4356 else
4357 pdev = phba->pcidev;
4358
4359 /* Set the device DMA mask size */
4360 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
4361 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
4362 return error;
4363
4364 /* Get the bus address of Bar0 and Bar2 and the number of bytes
4365 * required by each mapping.
4366 */
4367 phba->pci_bar0_map = pci_resource_start(pdev, 0);
4368 bar0map_len = pci_resource_len(pdev, 0);
4369
4370 phba->pci_bar2_map = pci_resource_start(pdev, 2);
4371 bar2map_len = pci_resource_len(pdev, 2);
4372
4373 /* Map HBA SLIM to a kernel virtual address. */
4374 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
4375 if (!phba->slim_memmap_p) {
4376 dev_printk(KERN_ERR, &pdev->dev,
4377 "ioremap failed for SLIM memory.\n");
4378 goto out;
4379 }
4380
4381 /* Map HBA Control Registers to a kernel virtual address. */
4382 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
4383 if (!phba->ctrl_regs_memmap_p) {
4384 dev_printk(KERN_ERR, &pdev->dev,
4385 "ioremap failed for HBA control registers.\n");
4386 goto out_iounmap_slim;
4387 }
4388
4389 /* Allocate memory for SLI-2 structures */
4390 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
4391 SLI2_SLIM_SIZE,
4392 &phba->slim2p.phys,
4393 GFP_KERNEL);
4394 if (!phba->slim2p.virt)
4395 goto out_iounmap;
4396
4397 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
4398 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
4399 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
4400 phba->IOCBs = (phba->slim2p.virt +
4401 offsetof(struct lpfc_sli2_slim, IOCBs));
4402
4403 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
4404 lpfc_sli_hbq_size(),
4405 &phba->hbqslimp.phys,
4406 GFP_KERNEL);
4407 if (!phba->hbqslimp.virt)
4408 goto out_free_slim;
4409
4410 hbq_count = lpfc_sli_hbq_count();
4411 ptr = phba->hbqslimp.virt;
4412 for (i = 0; i < hbq_count; ++i) {
4413 phba->hbqs[i].hbq_virt = ptr;
4414 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4415 ptr += (lpfc_hbq_defs[i]->entry_count *
4416 sizeof(struct lpfc_hbq_entry));
4417 }
4418 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
4419 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
4420
4421 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
4422
4423 INIT_LIST_HEAD(&phba->rb_pend_list);
4424
4425 phba->MBslimaddr = phba->slim_memmap_p;
4426 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
4427 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
4428 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
4429 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
4430
4431 return 0;
4432
4433out_free_slim:
4434 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4435 phba->slim2p.virt, phba->slim2p.phys);
4436out_iounmap:
4437 iounmap(phba->ctrl_regs_memmap_p);
4438out_iounmap_slim:
4439 iounmap(phba->slim_memmap_p);
4440out:
4441 return error;
4442}
4443
4444/**
4445 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
4446 * @phba: pointer to lpfc hba data structure.
4447 *
4448 * This routine is invoked to unset the PCI device memory space for device
4449 * with SLI-3 interface spec.
4450 **/
4451static void
4452lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
4453{
4454 struct pci_dev *pdev;
4455
4456 /* Obtain PCI device reference */
4457 if (!phba->pcidev)
4458 return;
4459 else
4460 pdev = phba->pcidev;
4461
4462 /* Free coherent DMA memory allocated */
4463 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
4464 phba->hbqslimp.virt, phba->hbqslimp.phys);
4465 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4466 phba->slim2p.virt, phba->slim2p.phys);
4467
4468 /* I/O memory unmap */
4469 iounmap(phba->ctrl_regs_memmap_p);
4470 iounmap(phba->slim_memmap_p);
4471
4472 return;
4473}
4474
4475/**
da0436e9 4476 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
3772a991
JS
4477 * @phba: pointer to lpfc hba data structure.
4478 *
da0436e9
JS
4479 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
4480 * done and check status.
3772a991 4481 *
da0436e9 4482 * Return 0 if successful, otherwise -ENODEV.
3772a991 4483 **/
da0436e9
JS
4484int
4485lpfc_sli4_post_status_check(struct lpfc_hba *phba)
3772a991 4486{
da0436e9
JS
4487 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
4488 uint32_t onlnreg0, onlnreg1;
4489 int i, port_error = -ENODEV;
3772a991 4490
da0436e9
JS
4491 if (!phba->sli4_hba.STAregaddr)
4492 return -ENODEV;
3772a991 4493
da0436e9
JS
4494 /* With uncoverable error, log the error message and return error */
4495 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
4496 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
4497 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
4498 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4499 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4500 if (uerrlo_reg.word0 || uerrhi_reg.word0) {
4501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4502 "1422 HBA Unrecoverable error: "
4503 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4504 "online0_reg=0x%x, online1_reg=0x%x\n",
4505 uerrlo_reg.word0, uerrhi_reg.word0,
4506 onlnreg0, onlnreg1);
4507 }
4508 return -ENODEV;
3772a991 4509 }
3772a991 4510
da0436e9
JS
4511 /* Wait up to 30 seconds for the SLI Port POST done and ready */
4512 for (i = 0; i < 3000; i++) {
4513 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
4514 /* Encounter fatal POST error, break out */
4515 if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
4516 port_error = -ENODEV;
4517 break;
4518 }
4519 if (LPFC_POST_STAGE_ARMFW_READY ==
4520 bf_get(lpfc_hst_state_port_status, &sta_reg)) {
4521 port_error = 0;
4522 break;
4523 }
4524 msleep(10);
3772a991
JS
4525 }
4526
da0436e9
JS
4527 if (port_error)
4528 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4529 "1408 Failure HBA POST Status: sta_reg=0x%x, "
4530 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
4531 "dl=x%x, pstatus=x%x\n", sta_reg.word0,
4532 bf_get(lpfc_hst_state_perr, &sta_reg),
4533 bf_get(lpfc_hst_state_sfi, &sta_reg),
4534 bf_get(lpfc_hst_state_nip, &sta_reg),
4535 bf_get(lpfc_hst_state_ipc, &sta_reg),
4536 bf_get(lpfc_hst_state_xrom, &sta_reg),
4537 bf_get(lpfc_hst_state_dl, &sta_reg),
4538 bf_get(lpfc_hst_state_port_status, &sta_reg));
4539
4540 /* Log device information */
4541 scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr);
4542 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4543 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
4544 "FeatureL1=0x%x, FeatureL2=0x%x\n",
4545 bf_get(lpfc_scratchpad_chiptype, &scratchpad),
4546 bf_get(lpfc_scratchpad_slirev, &scratchpad),
4547 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
4548 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
4549
4550 return port_error;
4551}
3772a991 4552
da0436e9
JS
4553/**
4554 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
4555 * @phba: pointer to lpfc hba data structure.
4556 *
4557 * This routine is invoked to set up SLI4 BAR0 PCI config space register
4558 * memory map.
4559 **/
4560static void
4561lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4562{
4563 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
4564 LPFC_UERR_STATUS_LO;
4565 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4566 LPFC_UERR_STATUS_HI;
4567 phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p +
4568 LPFC_ONLINE0;
4569 phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p +
4570 LPFC_ONLINE1;
4571 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
4572 LPFC_SCRATCHPAD;
4573}
3772a991 4574
da0436e9
JS
4575/**
4576 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
4577 * @phba: pointer to lpfc hba data structure.
4578 *
4579 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
4580 * memory map.
4581 **/
4582static void
4583lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
4584{
3772a991 4585
da0436e9
JS
4586 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4587 LPFC_HST_STATE;
4588 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4589 LPFC_HST_ISR0;
4590 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4591 LPFC_HST_IMR0;
4592 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4593 LPFC_HST_ISCR0;
4594 return;
3772a991
JS
4595}
4596
4597/**
da0436e9 4598 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
3772a991 4599 * @phba: pointer to lpfc hba data structure.
da0436e9 4600 * @vf: virtual function number
3772a991 4601 *
da0436e9
JS
4602 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
4603 * based on the given viftual function number, @vf.
4604 *
4605 * Return 0 if successful, otherwise -ENODEV.
3772a991 4606 **/
da0436e9
JS
4607static int
4608lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
3772a991 4609{
da0436e9
JS
4610 if (vf > LPFC_VIR_FUNC_MAX)
4611 return -ENODEV;
3772a991 4612
da0436e9
JS
4613 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4614 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
4615 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4616 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
4617 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4618 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
4619 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4620 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
4621 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4622 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
4623 return 0;
3772a991
JS
4624}
4625
4626/**
da0436e9 4627 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
3772a991
JS
4628 * @phba: pointer to lpfc hba data structure.
4629 *
da0436e9
JS
4630 * This routine is invoked to create the bootstrap mailbox
4631 * region consistent with the SLI-4 interface spec. This
4632 * routine allocates all memory necessary to communicate
4633 * mailbox commands to the port and sets up all alignment
4634 * needs. No locks are expected to be held when calling
4635 * this routine.
3772a991
JS
4636 *
4637 * Return codes
4638 * 0 - sucessful
da0436e9
JS
4639 * ENOMEM - could not allocated memory.
4640 **/
3772a991 4641static int
da0436e9 4642lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
3772a991 4643{
da0436e9
JS
4644 uint32_t bmbx_size;
4645 struct lpfc_dmabuf *dmabuf;
4646 struct dma_address *dma_address;
4647 uint32_t pa_addr;
4648 uint64_t phys_addr;
4649
4650 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4651 if (!dmabuf)
4652 return -ENOMEM;
3772a991 4653
da0436e9
JS
4654 /*
4655 * The bootstrap mailbox region is comprised of 2 parts
4656 * plus an alignment restriction of 16 bytes.
4657 */
4658 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
4659 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4660 bmbx_size,
4661 &dmabuf->phys,
4662 GFP_KERNEL);
4663 if (!dmabuf->virt) {
4664 kfree(dmabuf);
4665 return -ENOMEM;
3772a991 4666 }
da0436e9 4667 memset(dmabuf->virt, 0, bmbx_size);
3772a991 4668
da0436e9
JS
4669 /*
4670 * Initialize the bootstrap mailbox pointers now so that the register
4671 * operations are simple later. The mailbox dma address is required
4672 * to be 16-byte aligned. Also align the virtual memory as each
4673 * maibox is copied into the bmbx mailbox region before issuing the
4674 * command to the port.
4675 */
4676 phba->sli4_hba.bmbx.dmabuf = dmabuf;
4677 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
4678
4679 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
4680 LPFC_ALIGN_16_BYTE);
4681 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
4682 LPFC_ALIGN_16_BYTE);
4683
4684 /*
4685 * Set the high and low physical addresses now. The SLI4 alignment
4686 * requirement is 16 bytes and the mailbox is posted to the port
4687 * as two 30-bit addresses. The other data is a bit marking whether
4688 * the 30-bit address is the high or low address.
4689 * Upcast bmbx aphys to 64bits so shift instruction compiles
4690 * clean on 32 bit machines.
4691 */
4692 dma_address = &phba->sli4_hba.bmbx.dma_address;
4693 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
4694 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
4695 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
4696 LPFC_BMBX_BIT1_ADDR_HI);
4697
4698 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
4699 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
4700 LPFC_BMBX_BIT1_ADDR_LO);
4701 return 0;
3772a991
JS
4702}
4703
4704/**
da0436e9 4705 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
3772a991
JS
4706 * @phba: pointer to lpfc hba data structure.
4707 *
da0436e9
JS
4708 * This routine is invoked to teardown the bootstrap mailbox
4709 * region and release all host resources. This routine requires
4710 * the caller to ensure all mailbox commands recovered, no
4711 * additional mailbox comands are sent, and interrupts are disabled
4712 * before calling this routine.
4713 *
4714 **/
3772a991 4715static void
da0436e9 4716lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
3772a991 4717{
da0436e9
JS
4718 dma_free_coherent(&phba->pcidev->dev,
4719 phba->sli4_hba.bmbx.bmbx_size,
4720 phba->sli4_hba.bmbx.dmabuf->virt,
4721 phba->sli4_hba.bmbx.dmabuf->phys);
4722
4723 kfree(phba->sli4_hba.bmbx.dmabuf);
4724 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
3772a991
JS
4725}
4726
4727/**
da0436e9 4728 * lpfc_sli4_read_config - Get the config parameters.
3772a991
JS
4729 * @phba: pointer to lpfc hba data structure.
4730 *
da0436e9
JS
4731 * This routine is invoked to read the configuration parameters from the HBA.
4732 * The configuration parameters are used to set the base and maximum values
4733 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
4734 * allocation for the port.
3772a991
JS
4735 *
4736 * Return codes
da0436e9
JS
4737 * 0 - sucessful
4738 * ENOMEM - No availble memory
4739 * EIO - The mailbox failed to complete successfully.
3772a991 4740 **/
da0436e9
JS
4741static int
4742lpfc_sli4_read_config(struct lpfc_hba *phba)
3772a991 4743{
da0436e9
JS
4744 LPFC_MBOXQ_t *pmb;
4745 struct lpfc_mbx_read_config *rd_config;
4746 uint32_t rc = 0;
3772a991 4747
da0436e9
JS
4748 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4749 if (!pmb) {
4750 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4751 "2011 Unable to allocate memory for issuing "
4752 "SLI_CONFIG_SPECIAL mailbox command\n");
4753 return -ENOMEM;
3772a991
JS
4754 }
4755
da0436e9 4756 lpfc_read_config(phba, pmb);
3772a991 4757
da0436e9
JS
4758 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4759 if (rc != MBX_SUCCESS) {
4760 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4761 "2012 Mailbox failed , mbxCmd x%x "
4762 "READ_CONFIG, mbxStatus x%x\n",
4763 bf_get(lpfc_mqe_command, &pmb->u.mqe),
4764 bf_get(lpfc_mqe_status, &pmb->u.mqe));
4765 rc = -EIO;
4766 } else {
4767 rd_config = &pmb->u.mqe.un.rd_config;
4768 phba->sli4_hba.max_cfg_param.max_xri =
4769 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
4770 phba->sli4_hba.max_cfg_param.xri_base =
4771 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
4772 phba->sli4_hba.max_cfg_param.max_vpi =
4773 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
4774 phba->sli4_hba.max_cfg_param.vpi_base =
4775 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
4776 phba->sli4_hba.max_cfg_param.max_rpi =
4777 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
4778 phba->sli4_hba.max_cfg_param.rpi_base =
4779 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
4780 phba->sli4_hba.max_cfg_param.max_vfi =
4781 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
4782 phba->sli4_hba.max_cfg_param.vfi_base =
4783 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
4784 phba->sli4_hba.max_cfg_param.max_fcfi =
4785 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
4786 phba->sli4_hba.max_cfg_param.fcfi_base =
4787 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
4788 phba->sli4_hba.max_cfg_param.max_eq =
4789 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
4790 phba->sli4_hba.max_cfg_param.max_rq =
4791 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
4792 phba->sli4_hba.max_cfg_param.max_wq =
4793 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
4794 phba->sli4_hba.max_cfg_param.max_cq =
4795 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
4796 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
4797 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
4798 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
4799 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
4800 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4801 phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi;
4802 phba->max_vports = phba->max_vpi;
4803 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4804 "2003 cfg params XRI(B:%d M:%d), "
4805 "VPI(B:%d M:%d) "
4806 "VFI(B:%d M:%d) "
4807 "RPI(B:%d M:%d) "
4808 "FCFI(B:%d M:%d)\n",
4809 phba->sli4_hba.max_cfg_param.xri_base,
4810 phba->sli4_hba.max_cfg_param.max_xri,
4811 phba->sli4_hba.max_cfg_param.vpi_base,
4812 phba->sli4_hba.max_cfg_param.max_vpi,
4813 phba->sli4_hba.max_cfg_param.vfi_base,
4814 phba->sli4_hba.max_cfg_param.max_vfi,
4815 phba->sli4_hba.max_cfg_param.rpi_base,
4816 phba->sli4_hba.max_cfg_param.max_rpi,
4817 phba->sli4_hba.max_cfg_param.fcfi_base,
4818 phba->sli4_hba.max_cfg_param.max_fcfi);
3772a991 4819 }
da0436e9
JS
4820 mempool_free(pmb, phba->mbox_mem_pool);
4821
4822 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
4823 if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
4824 phba->cfg_hba_queue_depth =
4825 phba->sli4_hba.max_cfg_param.max_xri;
4826 return rc;
3772a991
JS
4827}
4828
4829/**
da0436e9 4830 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
3772a991
JS
4831 * @phba: pointer to lpfc hba data structure.
4832 *
da0436e9
JS
4833 * This routine is invoked to setup the host-side endian order to the
4834 * HBA consistent with the SLI-4 interface spec.
4835 *
4836 * Return codes
4837 * 0 - sucessful
4838 * ENOMEM - No availble memory
4839 * EIO - The mailbox failed to complete successfully.
3772a991 4840 **/
da0436e9
JS
4841static int
4842lpfc_setup_endian_order(struct lpfc_hba *phba)
3772a991 4843{
da0436e9
JS
4844 LPFC_MBOXQ_t *mboxq;
4845 uint32_t rc = 0;
4846 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
4847 HOST_ENDIAN_HIGH_WORD1};
3772a991 4848
da0436e9
JS
4849 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4850 if (!mboxq) {
4851 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4852 "0492 Unable to allocate memory for issuing "
4853 "SLI_CONFIG_SPECIAL mailbox command\n");
4854 return -ENOMEM;
4855 }
3772a991 4856
da0436e9
JS
4857 /*
4858 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
4859 * words to contain special data values and no other data.
4860 */
4861 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
4862 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
4863 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4864 if (rc != MBX_SUCCESS) {
4865 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4866 "0493 SLI_CONFIG_SPECIAL mailbox failed with "
4867 "status x%x\n",
4868 rc);
4869 rc = -EIO;
4870 }
4871
4872 mempool_free(mboxq, phba->mbox_mem_pool);
4873 return rc;
3772a991
JS
4874}
4875
4876/**
da0436e9 4877 * lpfc_sli4_queue_create - Create all the SLI4 queues
3772a991
JS
4878 * @phba: pointer to lpfc hba data structure.
4879 *
da0436e9
JS
4880 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
4881 * operation. For each SLI4 queue type, the parameters such as queue entry
4882 * count (queue depth) shall be taken from the module parameter. For now,
4883 * we just use some constant number as place holder.
4884 *
4885 * Return codes
4886 * 0 - sucessful
4887 * ENOMEM - No availble memory
4888 * EIO - The mailbox failed to complete successfully.
3772a991 4889 **/
da0436e9
JS
4890static int
4891lpfc_sli4_queue_create(struct lpfc_hba *phba)
3772a991 4892{
da0436e9
JS
4893 struct lpfc_queue *qdesc;
4894 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
4895 int cfg_fcp_wq_count;
4896 int cfg_fcp_eq_count;
3772a991 4897
da0436e9
JS
4898 /*
4899 * Sanity check for confiugred queue parameters against the run-time
4900 * device parameters
4901 */
3772a991 4902
da0436e9
JS
4903 /* Sanity check on FCP fast-path WQ parameters */
4904 cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
4905 if (cfg_fcp_wq_count >
4906 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
4907 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
4908 LPFC_SP_WQN_DEF;
4909 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
4910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4911 "2581 Not enough WQs (%d) from "
4912 "the pci function for supporting "
4913 "FCP WQs (%d)\n",
4914 phba->sli4_hba.max_cfg_param.max_wq,
4915 phba->cfg_fcp_wq_count);
4916 goto out_error;
4917 }
4918 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4919 "2582 Not enough WQs (%d) from the pci "
4920 "function for supporting the requested "
4921 "FCP WQs (%d), the actual FCP WQs can "
4922 "be supported: %d\n",
4923 phba->sli4_hba.max_cfg_param.max_wq,
4924 phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
4925 }
4926 /* The actual number of FCP work queues adopted */
4927 phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
4928
4929 /* Sanity check on FCP fast-path EQ parameters */
4930 cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
4931 if (cfg_fcp_eq_count >
4932 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
4933 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
4934 LPFC_SP_EQN_DEF;
4935 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
4936 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4937 "2574 Not enough EQs (%d) from the "
4938 "pci function for supporting FCP "
4939 "EQs (%d)\n",
4940 phba->sli4_hba.max_cfg_param.max_eq,
4941 phba->cfg_fcp_eq_count);
4942 goto out_error;
4943 }
4944 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4945 "2575 Not enough EQs (%d) from the pci "
4946 "function for supporting the requested "
4947 "FCP EQs (%d), the actual FCP EQs can "
4948 "be supported: %d\n",
4949 phba->sli4_hba.max_cfg_param.max_eq,
4950 phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
4951 }
4952 /* It does not make sense to have more EQs than WQs */
4953 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
4954 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4955 "2593 The number of FCP EQs (%d) is more "
4956 "than the number of FCP WQs (%d), take "
4957 "the number of FCP EQs same as than of "
4958 "WQs (%d)\n", cfg_fcp_eq_count,
4959 phba->cfg_fcp_wq_count,
4960 phba->cfg_fcp_wq_count);
4961 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
4962 }
4963 /* The actual number of FCP event queues adopted */
4964 phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
4965 /* The overall number of event queues used */
4966 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
3772a991 4967
da0436e9
JS
4968 /*
4969 * Create Event Queues (EQs)
4970 */
3772a991 4971
da0436e9
JS
4972 /* Get EQ depth from module parameter, fake the default for now */
4973 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
4974 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
3772a991 4975
da0436e9
JS
4976 /* Create slow path event queue */
4977 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4978 phba->sli4_hba.eq_ecount);
4979 if (!qdesc) {
4980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4981 "0496 Failed allocate slow-path EQ\n");
4982 goto out_error;
4983 }
4984 phba->sli4_hba.sp_eq = qdesc;
4985
4986 /* Create fast-path FCP Event Queue(s) */
4987 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
4988 phba->cfg_fcp_eq_count), GFP_KERNEL);
4989 if (!phba->sli4_hba.fp_eq) {
4990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4991 "2576 Failed allocate memory for fast-path "
4992 "EQ record array\n");
4993 goto out_free_sp_eq;
4994 }
4995 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
4996 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4997 phba->sli4_hba.eq_ecount);
4998 if (!qdesc) {
4999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5000 "0497 Failed allocate fast-path EQ\n");
5001 goto out_free_fp_eq;
5002 }
5003 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5004 }
5005
5006 /*
5007 * Create Complete Queues (CQs)
5008 */
5009
5010 /* Get CQ depth from module parameter, fake the default for now */
5011 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5012 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5013
5014 /* Create slow-path Mailbox Command Complete Queue */
5015 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5016 phba->sli4_hba.cq_ecount);
5017 if (!qdesc) {
5018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5019 "0500 Failed allocate slow-path mailbox CQ\n");
5020 goto out_free_fp_eq;
5021 }
5022 phba->sli4_hba.mbx_cq = qdesc;
5023
5024 /* Create slow-path ELS Complete Queue */
5025 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5026 phba->sli4_hba.cq_ecount);
5027 if (!qdesc) {
5028 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5029 "0501 Failed allocate slow-path ELS CQ\n");
5030 goto out_free_mbx_cq;
5031 }
5032 phba->sli4_hba.els_cq = qdesc;
5033
5034 /* Create slow-path Unsolicited Receive Complete Queue */
5035 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5036 phba->sli4_hba.cq_ecount);
5037 if (!qdesc) {
5038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5039 "0502 Failed allocate slow-path USOL RX CQ\n");
5040 goto out_free_els_cq;
5041 }
5042 phba->sli4_hba.rxq_cq = qdesc;
5043
5044 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5045 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5046 phba->cfg_fcp_eq_count), GFP_KERNEL);
5047 if (!phba->sli4_hba.fcp_cq) {
5048 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5049 "2577 Failed allocate memory for fast-path "
5050 "CQ record array\n");
5051 goto out_free_rxq_cq;
5052 }
5053 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5054 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5055 phba->sli4_hba.cq_ecount);
5056 if (!qdesc) {
5057 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5058 "0499 Failed allocate fast-path FCP "
5059 "CQ (%d)\n", fcp_cqidx);
5060 goto out_free_fcp_cq;
5061 }
5062 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5063 }
5064
5065 /* Create Mailbox Command Queue */
5066 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5067 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5068
5069 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5070 phba->sli4_hba.mq_ecount);
5071 if (!qdesc) {
5072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5073 "0505 Failed allocate slow-path MQ\n");
5074 goto out_free_fcp_cq;
5075 }
5076 phba->sli4_hba.mbx_wq = qdesc;
5077
5078 /*
5079 * Create all the Work Queues (WQs)
5080 */
5081 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5082 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5083
5084 /* Create slow-path ELS Work Queue */
5085 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5086 phba->sli4_hba.wq_ecount);
5087 if (!qdesc) {
5088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5089 "0504 Failed allocate slow-path ELS WQ\n");
5090 goto out_free_mbx_wq;
5091 }
5092 phba->sli4_hba.els_wq = qdesc;
5093
5094 /* Create fast-path FCP Work Queue(s) */
5095 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5096 phba->cfg_fcp_wq_count), GFP_KERNEL);
5097 if (!phba->sli4_hba.fcp_wq) {
5098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5099 "2578 Failed allocate memory for fast-path "
5100 "WQ record array\n");
5101 goto out_free_els_wq;
5102 }
5103 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5104 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5105 phba->sli4_hba.wq_ecount);
5106 if (!qdesc) {
5107 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5108 "0503 Failed allocate fast-path FCP "
5109 "WQ (%d)\n", fcp_wqidx);
5110 goto out_free_fcp_wq;
5111 }
5112 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5113 }
5114
5115 /*
5116 * Create Receive Queue (RQ)
5117 */
5118 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5119 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5120
5121 /* Create Receive Queue for header */
5122 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5123 phba->sli4_hba.rq_ecount);
5124 if (!qdesc) {
5125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5126 "0506 Failed allocate receive HRQ\n");
5127 goto out_free_fcp_wq;
5128 }
5129 phba->sli4_hba.hdr_rq = qdesc;
5130
5131 /* Create Receive Queue for data */
5132 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5133 phba->sli4_hba.rq_ecount);
5134 if (!qdesc) {
5135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5136 "0507 Failed allocate receive DRQ\n");
5137 goto out_free_hdr_rq;
5138 }
5139 phba->sli4_hba.dat_rq = qdesc;
5140
5141 return 0;
5142
5143out_free_hdr_rq:
5144 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5145 phba->sli4_hba.hdr_rq = NULL;
5146out_free_fcp_wq:
5147 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5148 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5149 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5150 }
5151 kfree(phba->sli4_hba.fcp_wq);
5152out_free_els_wq:
5153 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5154 phba->sli4_hba.els_wq = NULL;
5155out_free_mbx_wq:
5156 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5157 phba->sli4_hba.mbx_wq = NULL;
5158out_free_fcp_cq:
5159 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5160 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5161 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5162 }
5163 kfree(phba->sli4_hba.fcp_cq);
5164out_free_rxq_cq:
5165 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5166 phba->sli4_hba.rxq_cq = NULL;
5167out_free_els_cq:
5168 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5169 phba->sli4_hba.els_cq = NULL;
5170out_free_mbx_cq:
5171 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5172 phba->sli4_hba.mbx_cq = NULL;
5173out_free_fp_eq:
5174 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5175 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5176 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5177 }
5178 kfree(phba->sli4_hba.fp_eq);
5179out_free_sp_eq:
5180 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5181 phba->sli4_hba.sp_eq = NULL;
5182out_error:
5183 return -ENOMEM;
5184}
5185
5186/**
5187 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5188 * @phba: pointer to lpfc hba data structure.
5189 *
5190 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5191 * operation.
5192 *
5193 * Return codes
5194 * 0 - sucessful
5195 * ENOMEM - No availble memory
5196 * EIO - The mailbox failed to complete successfully.
5197 **/
5198static void
5199lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5200{
5201 int fcp_qidx;
5202
5203 /* Release mailbox command work queue */
5204 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5205 phba->sli4_hba.mbx_wq = NULL;
5206
5207 /* Release ELS work queue */
5208 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5209 phba->sli4_hba.els_wq = NULL;
5210
5211 /* Release FCP work queue */
5212 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5213 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5214 kfree(phba->sli4_hba.fcp_wq);
5215 phba->sli4_hba.fcp_wq = NULL;
5216
5217 /* Release unsolicited receive queue */
5218 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5219 phba->sli4_hba.hdr_rq = NULL;
5220 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5221 phba->sli4_hba.dat_rq = NULL;
5222
5223 /* Release unsolicited receive complete queue */
5224 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5225 phba->sli4_hba.rxq_cq = NULL;
5226
5227 /* Release ELS complete queue */
5228 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5229 phba->sli4_hba.els_cq = NULL;
5230
5231 /* Release mailbox command complete queue */
5232 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5233 phba->sli4_hba.mbx_cq = NULL;
5234
5235 /* Release FCP response complete queue */
5236 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5237 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5238 kfree(phba->sli4_hba.fcp_cq);
5239 phba->sli4_hba.fcp_cq = NULL;
5240
5241 /* Release fast-path event queue */
5242 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5243 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5244 kfree(phba->sli4_hba.fp_eq);
5245 phba->sli4_hba.fp_eq = NULL;
5246
5247 /* Release slow-path event queue */
5248 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5249 phba->sli4_hba.sp_eq = NULL;
5250
5251 return;
5252}
5253
5254/**
5255 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5256 * @phba: pointer to lpfc hba data structure.
5257 *
5258 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5259 * operation.
5260 *
5261 * Return codes
5262 * 0 - sucessful
5263 * ENOMEM - No availble memory
5264 * EIO - The mailbox failed to complete successfully.
5265 **/
5266int
5267lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5268{
5269 int rc = -ENOMEM;
5270 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5271 int fcp_cq_index = 0;
5272
5273 /*
5274 * Set up Event Queues (EQs)
5275 */
5276
5277 /* Set up slow-path event queue */
5278 if (!phba->sli4_hba.sp_eq) {
5279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5280 "0520 Slow-path EQ not allocated\n");
5281 goto out_error;
5282 }
5283 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
5284 LPFC_SP_DEF_IMAX);
5285 if (rc) {
5286 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5287 "0521 Failed setup of slow-path EQ: "
5288 "rc = 0x%x\n", rc);
5289 goto out_error;
5290 }
5291 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5292 "2583 Slow-path EQ setup: queue-id=%d\n",
5293 phba->sli4_hba.sp_eq->queue_id);
5294
5295 /* Set up fast-path event queue */
5296 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5297 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
5298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5299 "0522 Fast-path EQ (%d) not "
5300 "allocated\n", fcp_eqidx);
5301 goto out_destroy_fp_eq;
5302 }
5303 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
5304 phba->cfg_fcp_imax);
5305 if (rc) {
5306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5307 "0523 Failed setup of fast-path EQ "
5308 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
5309 goto out_destroy_fp_eq;
5310 }
5311 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5312 "2584 Fast-path EQ setup: "
5313 "queue[%d]-id=%d\n", fcp_eqidx,
5314 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
5315 }
5316
5317 /*
5318 * Set up Complete Queues (CQs)
5319 */
5320
5321 /* Set up slow-path MBOX Complete Queue as the first CQ */
5322 if (!phba->sli4_hba.mbx_cq) {
5323 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5324 "0528 Mailbox CQ not allocated\n");
5325 goto out_destroy_fp_eq;
5326 }
5327 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
5328 LPFC_MCQ, LPFC_MBOX);
5329 if (rc) {
5330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5331 "0529 Failed setup of slow-path mailbox CQ: "
5332 "rc = 0x%x\n", rc);
5333 goto out_destroy_fp_eq;
5334 }
5335 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5336 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
5337 phba->sli4_hba.mbx_cq->queue_id,
5338 phba->sli4_hba.sp_eq->queue_id);
5339
5340 /* Set up slow-path ELS Complete Queue */
5341 if (!phba->sli4_hba.els_cq) {
5342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5343 "0530 ELS CQ not allocated\n");
5344 goto out_destroy_mbx_cq;
5345 }
5346 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
5347 LPFC_WCQ, LPFC_ELS);
5348 if (rc) {
5349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5350 "0531 Failed setup of slow-path ELS CQ: "
5351 "rc = 0x%x\n", rc);
5352 goto out_destroy_mbx_cq;
5353 }
5354 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5355 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
5356 phba->sli4_hba.els_cq->queue_id,
5357 phba->sli4_hba.sp_eq->queue_id);
5358
5359 /* Set up slow-path Unsolicited Receive Complete Queue */
5360 if (!phba->sli4_hba.rxq_cq) {
5361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5362 "0532 USOL RX CQ not allocated\n");
5363 goto out_destroy_els_cq;
5364 }
5365 rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
5366 LPFC_RCQ, LPFC_USOL);
5367 if (rc) {
5368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5369 "0533 Failed setup of slow-path USOL RX CQ: "
5370 "rc = 0x%x\n", rc);
5371 goto out_destroy_els_cq;
5372 }
5373 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5374 "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
5375 phba->sli4_hba.rxq_cq->queue_id,
5376 phba->sli4_hba.sp_eq->queue_id);
5377
5378 /* Set up fast-path FCP Response Complete Queue */
5379 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5380 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
5381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5382 "0526 Fast-path FCP CQ (%d) not "
5383 "allocated\n", fcp_cqidx);
5384 goto out_destroy_fcp_cq;
5385 }
5386 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
5387 phba->sli4_hba.fp_eq[fcp_cqidx],
5388 LPFC_WCQ, LPFC_FCP);
5389 if (rc) {
5390 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5391 "0527 Failed setup of fast-path FCP "
5392 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
5393 goto out_destroy_fcp_cq;
5394 }
5395 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5396 "2588 FCP CQ setup: cq[%d]-id=%d, "
5397 "parent eq[%d]-id=%d\n",
5398 fcp_cqidx,
5399 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
5400 fcp_cqidx,
5401 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
5402 }
5403
5404 /*
5405 * Set up all the Work Queues (WQs)
5406 */
5407
5408 /* Set up Mailbox Command Queue */
5409 if (!phba->sli4_hba.mbx_wq) {
5410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5411 "0538 Slow-path MQ not allocated\n");
5412 goto out_destroy_fcp_cq;
5413 }
5414 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
5415 phba->sli4_hba.mbx_cq, LPFC_MBOX);
5416 if (rc) {
5417 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5418 "0539 Failed setup of slow-path MQ: "
5419 "rc = 0x%x\n", rc);
5420 goto out_destroy_fcp_cq;
5421 }
5422 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5423 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
5424 phba->sli4_hba.mbx_wq->queue_id,
5425 phba->sli4_hba.mbx_cq->queue_id);
5426
5427 /* Set up slow-path ELS Work Queue */
5428 if (!phba->sli4_hba.els_wq) {
5429 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5430 "0536 Slow-path ELS WQ not allocated\n");
5431 goto out_destroy_mbx_wq;
5432 }
5433 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
5434 phba->sli4_hba.els_cq, LPFC_ELS);
5435 if (rc) {
5436 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5437 "0537 Failed setup of slow-path ELS WQ: "
5438 "rc = 0x%x\n", rc);
5439 goto out_destroy_mbx_wq;
5440 }
5441 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5442 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
5443 phba->sli4_hba.els_wq->queue_id,
5444 phba->sli4_hba.els_cq->queue_id);
5445
5446 /* Set up fast-path FCP Work Queue */
5447 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5448 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
5449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5450 "0534 Fast-path FCP WQ (%d) not "
5451 "allocated\n", fcp_wqidx);
5452 goto out_destroy_fcp_wq;
5453 }
5454 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
5455 phba->sli4_hba.fcp_cq[fcp_cq_index],
5456 LPFC_FCP);
5457 if (rc) {
5458 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5459 "0535 Failed setup of fast-path FCP "
5460 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
5461 goto out_destroy_fcp_wq;
5462 }
5463 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5464 "2591 FCP WQ setup: wq[%d]-id=%d, "
5465 "parent cq[%d]-id=%d\n",
5466 fcp_wqidx,
5467 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
5468 fcp_cq_index,
5469 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
5470 /* Round robin FCP Work Queue's Completion Queue assignment */
5471 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
5472 }
5473
5474 /*
5475 * Create Receive Queue (RQ)
5476 */
5477 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
5478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5479 "0540 Receive Queue not allocated\n");
5480 goto out_destroy_fcp_wq;
5481 }
5482 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5483 phba->sli4_hba.rxq_cq, LPFC_USOL);
5484 if (rc) {
5485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5486 "0541 Failed setup of Receive Queue: "
5487 "rc = 0x%x\n", rc);
5488 goto out_destroy_fcp_wq;
5489 }
5490 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5491 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
5492 "parent cq-id=%d\n",
5493 phba->sli4_hba.hdr_rq->queue_id,
5494 phba->sli4_hba.dat_rq->queue_id,
5495 phba->sli4_hba.rxq_cq->queue_id);
5496 return 0;
5497
5498out_destroy_fcp_wq:
5499 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
5500 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
5501 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5502out_destroy_mbx_wq:
5503 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5504out_destroy_fcp_cq:
5505 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5506 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5507 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5508out_destroy_els_cq:
5509 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5510out_destroy_mbx_cq:
5511 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5512out_destroy_fp_eq:
5513 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
5514 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
5515 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5516out_error:
5517 return rc;
5518}
5519
5520/**
5521 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
5522 * @phba: pointer to lpfc hba data structure.
5523 *
5524 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
5525 * operation.
5526 *
5527 * Return codes
5528 * 0 - sucessful
5529 * ENOMEM - No availble memory
5530 * EIO - The mailbox failed to complete successfully.
5531 **/
5532void
5533lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5534{
5535 int fcp_qidx;
5536
5537 /* Unset mailbox command work queue */
5538 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5539 /* Unset ELS work queue */
5540 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5541 /* Unset unsolicited receive queue */
5542 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
5543 /* Unset FCP work queue */
5544 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5545 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
5546 /* Unset mailbox command complete queue */
5547 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5548 /* Unset ELS complete queue */
5549 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5550 /* Unset unsolicited receive complete queue */
5551 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5552 /* Unset FCP response complete queue */
5553 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5554 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
5555 /* Unset fast-path event queue */
5556 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5557 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
5558 /* Unset slow-path event queue */
5559 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5560}
5561
5562/**
5563 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
5564 * @phba: pointer to lpfc hba data structure.
5565 *
5566 * This routine is invoked to allocate and set up a pool of completion queue
5567 * events. The body of the completion queue event is a completion queue entry
5568 * CQE. For now, this pool is used for the interrupt service routine to queue
5569 * the following HBA completion queue events for the worker thread to process:
5570 * - Mailbox asynchronous events
5571 * - Receive queue completion unsolicited events
5572 * Later, this can be used for all the slow-path events.
5573 *
5574 * Return codes
5575 * 0 - sucessful
5576 * -ENOMEM - No availble memory
5577 **/
5578static int
5579lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
5580{
5581 struct lpfc_cq_event *cq_event;
5582 int i;
5583
5584 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
5585 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
5586 if (!cq_event)
5587 goto out_pool_create_fail;
5588 list_add_tail(&cq_event->list,
5589 &phba->sli4_hba.sp_cqe_event_pool);
5590 }
5591 return 0;
5592
5593out_pool_create_fail:
5594 lpfc_sli4_cq_event_pool_destroy(phba);
5595 return -ENOMEM;
5596}
5597
5598/**
5599 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
5600 * @phba: pointer to lpfc hba data structure.
5601 *
5602 * This routine is invoked to free the pool of completion queue events at
5603 * driver unload time. Note that, it is the responsibility of the driver
5604 * cleanup routine to free all the outstanding completion-queue events
5605 * allocated from this pool back into the pool before invoking this routine
5606 * to destroy the pool.
5607 **/
5608static void
5609lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
5610{
5611 struct lpfc_cq_event *cq_event, *next_cq_event;
5612
5613 list_for_each_entry_safe(cq_event, next_cq_event,
5614 &phba->sli4_hba.sp_cqe_event_pool, list) {
5615 list_del(&cq_event->list);
5616 kfree(cq_event);
5617 }
5618}
5619
5620/**
5621 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5622 * @phba: pointer to lpfc hba data structure.
5623 *
5624 * This routine is the lock free version of the API invoked to allocate a
5625 * completion-queue event from the free pool.
5626 *
5627 * Return: Pointer to the newly allocated completion-queue event if successful
5628 * NULL otherwise.
5629 **/
5630struct lpfc_cq_event *
5631__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5632{
5633 struct lpfc_cq_event *cq_event = NULL;
5634
5635 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
5636 struct lpfc_cq_event, list);
5637 return cq_event;
5638}
5639
5640/**
5641 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5642 * @phba: pointer to lpfc hba data structure.
5643 *
5644 * This routine is the lock version of the API invoked to allocate a
5645 * completion-queue event from the free pool.
5646 *
5647 * Return: Pointer to the newly allocated completion-queue event if successful
5648 * NULL otherwise.
5649 **/
5650struct lpfc_cq_event *
5651lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5652{
5653 struct lpfc_cq_event *cq_event;
5654 unsigned long iflags;
5655
5656 spin_lock_irqsave(&phba->hbalock, iflags);
5657 cq_event = __lpfc_sli4_cq_event_alloc(phba);
5658 spin_unlock_irqrestore(&phba->hbalock, iflags);
5659 return cq_event;
5660}
5661
5662/**
5663 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5664 * @phba: pointer to lpfc hba data structure.
5665 * @cq_event: pointer to the completion queue event to be freed.
5666 *
5667 * This routine is the lock free version of the API invoked to release a
5668 * completion-queue event back into the free pool.
5669 **/
5670void
5671__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5672 struct lpfc_cq_event *cq_event)
5673{
5674 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
5675}
5676
5677/**
5678 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5679 * @phba: pointer to lpfc hba data structure.
5680 * @cq_event: pointer to the completion queue event to be freed.
5681 *
5682 * This routine is the lock version of the API invoked to release a
5683 * completion-queue event back into the free pool.
5684 **/
5685void
5686lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5687 struct lpfc_cq_event *cq_event)
5688{
5689 unsigned long iflags;
5690 spin_lock_irqsave(&phba->hbalock, iflags);
5691 __lpfc_sli4_cq_event_release(phba, cq_event);
5692 spin_unlock_irqrestore(&phba->hbalock, iflags);
5693}
5694
5695/**
5696 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
5697 * @phba: pointer to lpfc hba data structure.
5698 *
5699 * This routine is to free all the pending completion-queue events to the
5700 * back into the free pool for device reset.
5701 **/
5702static void
5703lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
5704{
5705 LIST_HEAD(cqelist);
5706 struct lpfc_cq_event *cqe;
5707 unsigned long iflags;
5708
5709 /* Retrieve all the pending WCQEs from pending WCQE lists */
5710 spin_lock_irqsave(&phba->hbalock, iflags);
5711 /* Pending FCP XRI abort events */
5712 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
5713 &cqelist);
5714 /* Pending ELS XRI abort events */
5715 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
5716 &cqelist);
5717 /* Pending asynnc events */
5718 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
5719 &cqelist);
5720 spin_unlock_irqrestore(&phba->hbalock, iflags);
5721
5722 while (!list_empty(&cqelist)) {
5723 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
5724 lpfc_sli4_cq_event_release(phba, cqe);
5725 }
5726}
5727
5728/**
5729 * lpfc_pci_function_reset - Reset pci function.
5730 * @phba: pointer to lpfc hba data structure.
5731 *
5732 * This routine is invoked to request a PCI function reset. It will destroys
5733 * all resources assigned to the PCI function which originates this request.
5734 *
5735 * Return codes
5736 * 0 - sucessful
5737 * ENOMEM - No availble memory
5738 * EIO - The mailbox failed to complete successfully.
5739 **/
5740int
5741lpfc_pci_function_reset(struct lpfc_hba *phba)
5742{
5743 LPFC_MBOXQ_t *mboxq;
5744 uint32_t rc = 0;
5745 uint32_t shdr_status, shdr_add_status;
5746 union lpfc_sli4_cfg_shdr *shdr;
5747
5748 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5749 if (!mboxq) {
5750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5751 "0494 Unable to allocate memory for issuing "
5752 "SLI_FUNCTION_RESET mailbox command\n");
5753 return -ENOMEM;
5754 }
5755
5756 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
5757 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5758 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
5759 LPFC_SLI4_MBX_EMBED);
5760 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5761 shdr = (union lpfc_sli4_cfg_shdr *)
5762 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5763 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5764 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5765 if (rc != MBX_TIMEOUT)
5766 mempool_free(mboxq, phba->mbox_mem_pool);
5767 if (shdr_status || shdr_add_status || rc) {
5768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5769 "0495 SLI_FUNCTION_RESET mailbox failed with "
5770 "status x%x add_status x%x, mbx status x%x\n",
5771 shdr_status, shdr_add_status, rc);
5772 rc = -ENXIO;
5773 }
5774 return rc;
5775}
5776
5777/**
5778 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
5779 * @phba: pointer to lpfc hba data structure.
5780 * @cnt: number of nop mailbox commands to send.
5781 *
5782 * This routine is invoked to send a number @cnt of NOP mailbox command and
5783 * wait for each command to complete.
5784 *
5785 * Return: the number of NOP mailbox command completed.
5786 **/
5787static int
5788lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
5789{
5790 LPFC_MBOXQ_t *mboxq;
5791 int length, cmdsent;
5792 uint32_t mbox_tmo;
5793 uint32_t rc = 0;
5794 uint32_t shdr_status, shdr_add_status;
5795 union lpfc_sli4_cfg_shdr *shdr;
5796
5797 if (cnt == 0) {
5798 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5799 "2518 Requested to send 0 NOP mailbox cmd\n");
5800 return cnt;
5801 }
5802
5803 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5804 if (!mboxq) {
5805 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5806 "2519 Unable to allocate memory for issuing "
5807 "NOP mailbox command\n");
5808 return 0;
5809 }
5810
5811 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
5812 length = (sizeof(struct lpfc_mbx_nop) -
5813 sizeof(struct lpfc_sli4_cfg_mhdr));
5814 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5815 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
5816
5817 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5818 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
5819 if (!phba->sli4_hba.intr_enable)
5820 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5821 else
5822 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
5823 if (rc == MBX_TIMEOUT)
5824 break;
5825 /* Check return status */
5826 shdr = (union lpfc_sli4_cfg_shdr *)
5827 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5828 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5829 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
5830 &shdr->response);
5831 if (shdr_status || shdr_add_status || rc) {
5832 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5833 "2520 NOP mailbox command failed "
5834 "status x%x add_status x%x mbx "
5835 "status x%x\n", shdr_status,
5836 shdr_add_status, rc);
5837 break;
5838 }
5839 }
5840
5841 if (rc != MBX_TIMEOUT)
5842 mempool_free(mboxq, phba->mbox_mem_pool);
5843
5844 return cmdsent;
5845}
5846
5847/**
5848 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
5849 * @phba: pointer to lpfc hba data structure.
5850 * @fcfi: fcf index.
5851 *
5852 * This routine is invoked to unregister a FCFI from device.
5853 **/
5854void
5855lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5856{
5857 LPFC_MBOXQ_t *mbox;
5858 uint32_t mbox_tmo;
5859 int rc;
5860 unsigned long flags;
5861
5862 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5863
5864 if (!mbox)
5865 return;
5866
5867 lpfc_unreg_fcfi(mbox, fcfi);
5868
5869 if (!phba->sli4_hba.intr_enable)
5870 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5871 else {
5872 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5873 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5874 }
5875 if (rc != MBX_TIMEOUT)
5876 mempool_free(mbox, phba->mbox_mem_pool);
5877 if (rc != MBX_SUCCESS)
5878 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5879 "2517 Unregister FCFI command failed "
5880 "status %d, mbxStatus x%x\n", rc,
5881 bf_get(lpfc_mqe_status, &mbox->u.mqe));
5882 else {
5883 spin_lock_irqsave(&phba->hbalock, flags);
5884 /* Mark the FCFI is no longer registered */
5885 phba->fcf.fcf_flag &=
5886 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
5887 spin_unlock_irqrestore(&phba->hbalock, flags);
5888 }
5889}
5890
5891/**
5892 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
5893 * @phba: pointer to lpfc hba data structure.
5894 *
5895 * This routine is invoked to set up the PCI device memory space for device
5896 * with SLI-4 interface spec.
5897 *
5898 * Return codes
5899 * 0 - sucessful
5900 * other values - error
5901 **/
5902static int
5903lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
5904{
5905 struct pci_dev *pdev;
5906 unsigned long bar0map_len, bar1map_len, bar2map_len;
5907 int error = -ENODEV;
5908
5909 /* Obtain PCI device reference */
5910 if (!phba->pcidev)
5911 return error;
5912 else
5913 pdev = phba->pcidev;
5914
5915 /* Set the device DMA mask size */
5916 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
5917 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5918 return error;
5919
5920 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
5921 * number of bytes required by each mapping. They are actually
5922 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
5923 */
5924 phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
5925 bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
5926
5927 phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
5928 bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
5929
5930 phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
5931 bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
5932
5933 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
5934 phba->sli4_hba.conf_regs_memmap_p =
5935 ioremap(phba->pci_bar0_map, bar0map_len);
5936 if (!phba->sli4_hba.conf_regs_memmap_p) {
5937 dev_printk(KERN_ERR, &pdev->dev,
5938 "ioremap failed for SLI4 PCI config registers.\n");
5939 goto out;
5940 }
5941
5942 /* Map SLI4 HBA Control Register base to a kernel virtual address. */
5943 phba->sli4_hba.ctrl_regs_memmap_p =
5944 ioremap(phba->pci_bar1_map, bar1map_len);
5945 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
5946 dev_printk(KERN_ERR, &pdev->dev,
5947 "ioremap failed for SLI4 HBA control registers.\n");
5948 goto out_iounmap_conf;
5949 }
5950
5951 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
5952 phba->sli4_hba.drbl_regs_memmap_p =
5953 ioremap(phba->pci_bar2_map, bar2map_len);
5954 if (!phba->sli4_hba.drbl_regs_memmap_p) {
5955 dev_printk(KERN_ERR, &pdev->dev,
5956 "ioremap failed for SLI4 HBA doorbell registers.\n");
5957 goto out_iounmap_ctrl;
5958 }
5959
5960 /* Set up BAR0 PCI config space register memory map */
5961 lpfc_sli4_bar0_register_memmap(phba);
5962
5963 /* Set up BAR1 register memory map */
5964 lpfc_sli4_bar1_register_memmap(phba);
5965
5966 /* Set up BAR2 register memory map */
5967 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
5968 if (error)
5969 goto out_iounmap_all;
5970
5971 return 0;
5972
5973out_iounmap_all:
5974 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
5975out_iounmap_ctrl:
5976 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
5977out_iounmap_conf:
5978 iounmap(phba->sli4_hba.conf_regs_memmap_p);
5979out:
5980 return error;
5981}
5982
5983/**
5984 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
5985 * @phba: pointer to lpfc hba data structure.
5986 *
5987 * This routine is invoked to unset the PCI device memory space for device
5988 * with SLI-4 interface spec.
5989 **/
5990static void
5991lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
5992{
5993 struct pci_dev *pdev;
5994
5995 /* Obtain PCI device reference */
5996 if (!phba->pcidev)
5997 return;
5998 else
5999 pdev = phba->pcidev;
6000
6001 /* Free coherent DMA memory allocated */
6002
6003 /* Unmap I/O memory space */
6004 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6005 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6006 iounmap(phba->sli4_hba.conf_regs_memmap_p);
6007
6008 return;
6009}
6010
6011/**
6012 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6013 * @phba: pointer to lpfc hba data structure.
6014 *
6015 * This routine is invoked to enable the MSI-X interrupt vectors to device
6016 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6017 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6018 * invoked, enables either all or nothing, depending on the current
6019 * availability of PCI vector resources. The device driver is responsible
6020 * for calling the individual request_irq() to register each MSI-X vector
6021 * with a interrupt handler, which is done in this function. Note that
6022 * later when device is unloading, the driver should always call free_irq()
6023 * on all MSI-X vectors it has done request_irq() on before calling
6024 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
6025 * will be left with MSI-X enabled and leaks its vectors.
6026 *
6027 * Return codes
6028 * 0 - sucessful
6029 * other values - error
6030 **/
6031static int
6032lpfc_sli_enable_msix(struct lpfc_hba *phba)
6033{
6034 int rc, i;
6035 LPFC_MBOXQ_t *pmb;
6036
6037 /* Set up MSI-X multi-message vectors */
6038 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6039 phba->msix_entries[i].entry = i;
6040
6041 /* Configure MSI-X capability structure */
6042 rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
6043 ARRAY_SIZE(phba->msix_entries));
6044 if (rc) {
6045 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6046 "0420 PCI enable MSI-X failed (%d)\n", rc);
6047 goto msi_fail_out;
6048 }
6049 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6050 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6051 "0477 MSI-X entry[%d]: vector=x%x "
6052 "message=%d\n", i,
6053 phba->msix_entries[i].vector,
6054 phba->msix_entries[i].entry);
6055 /*
6056 * Assign MSI-X vectors to interrupt handlers
6057 */
6058
6059 /* vector-0 is associated to slow-path handler */
6060 rc = request_irq(phba->msix_entries[0].vector,
6061 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6062 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6063 if (rc) {
6064 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6065 "0421 MSI-X slow-path request_irq failed "
6066 "(%d)\n", rc);
6067 goto msi_fail_out;
6068 }
6069
6070 /* vector-1 is associated to fast-path handler */
6071 rc = request_irq(phba->msix_entries[1].vector,
6072 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6073 LPFC_FP_DRIVER_HANDLER_NAME, phba);
6074
6075 if (rc) {
6076 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6077 "0429 MSI-X fast-path request_irq failed "
6078 "(%d)\n", rc);
6079 goto irq_fail_out;
6080 }
6081
6082 /*
6083 * Configure HBA MSI-X attention conditions to messages
6084 */
6085 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6086
6087 if (!pmb) {
6088 rc = -ENOMEM;
6089 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6090 "0474 Unable to allocate memory for issuing "
6091 "MBOX_CONFIG_MSI command\n");
6092 goto mem_fail_out;
6093 }
6094 rc = lpfc_config_msi(phba, pmb);
6095 if (rc)
6096 goto mbx_fail_out;
6097 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6098 if (rc != MBX_SUCCESS) {
6099 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
6100 "0351 Config MSI mailbox command failed, "
6101 "mbxCmd x%x, mbxStatus x%x\n",
6102 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
6103 goto mbx_fail_out;
6104 }
6105
6106 /* Free memory allocated for mailbox command */
6107 mempool_free(pmb, phba->mbox_mem_pool);
6108 return rc;
6109
6110mbx_fail_out:
6111 /* Free memory allocated for mailbox command */
6112 mempool_free(pmb, phba->mbox_mem_pool);
6113
6114mem_fail_out:
6115 /* free the irq already requested */
6116 free_irq(phba->msix_entries[1].vector, phba);
6117
6118irq_fail_out:
6119 /* free the irq already requested */
6120 free_irq(phba->msix_entries[0].vector, phba);
6121
6122msi_fail_out:
6123 /* Unconfigure MSI-X capability structure */
6124 pci_disable_msix(phba->pcidev);
6125 return rc;
6126}
6127
6128/**
6129 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
6130 * @phba: pointer to lpfc hba data structure.
6131 *
6132 * This routine is invoked to release the MSI-X vectors and then disable the
6133 * MSI-X interrupt mode to device with SLI-3 interface spec.
6134 **/
6135static void
6136lpfc_sli_disable_msix(struct lpfc_hba *phba)
6137{
6138 int i;
6139
6140 /* Free up MSI-X multi-message vectors */
6141 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6142 free_irq(phba->msix_entries[i].vector, phba);
6143 /* Disable MSI-X */
6144 pci_disable_msix(phba->pcidev);
6145
6146 return;
6147}
6148
6149/**
6150 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
6151 * @phba: pointer to lpfc hba data structure.
6152 *
6153 * This routine is invoked to enable the MSI interrupt mode to device with
6154 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
6155 * enable the MSI vector. The device driver is responsible for calling the
6156 * request_irq() to register MSI vector with a interrupt the handler, which
6157 * is done in this function.
6158 *
6159 * Return codes
6160 * 0 - sucessful
6161 * other values - error
6162 */
6163static int
6164lpfc_sli_enable_msi(struct lpfc_hba *phba)
6165{
6166 int rc;
6167
6168 rc = pci_enable_msi(phba->pcidev);
6169 if (!rc)
6170 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6171 "0462 PCI enable MSI mode success.\n");
6172 else {
6173 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6174 "0471 PCI enable MSI mode failed (%d)\n", rc);
6175 return rc;
6176 }
6177
6178 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6179 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6180 if (rc) {
6181 pci_disable_msi(phba->pcidev);
6182 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6183 "0478 MSI request_irq failed (%d)\n", rc);
6184 }
6185 return rc;
6186}
6187
6188/**
6189 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
6190 * @phba: pointer to lpfc hba data structure.
6191 *
6192 * This routine is invoked to disable the MSI interrupt mode to device with
6193 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
6194 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6195 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6196 * its vector.
6197 */
6198static void
6199lpfc_sli_disable_msi(struct lpfc_hba *phba)
6200{
6201 free_irq(phba->pcidev->irq, phba);
6202 pci_disable_msi(phba->pcidev);
6203 return;
6204}
6205
6206/**
6207 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
6208 * @phba: pointer to lpfc hba data structure.
6209 *
6210 * This routine is invoked to enable device interrupt and associate driver's
6211 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
6212 * spec. Depends on the interrupt mode configured to the driver, the driver
6213 * will try to fallback from the configured interrupt mode to an interrupt
6214 * mode which is supported by the platform, kernel, and device in the order
6215 * of:
6216 * MSI-X -> MSI -> IRQ.
6217 *
6218 * Return codes
6219 * 0 - sucessful
6220 * other values - error
6221 **/
6222static uint32_t
6223lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6224{
6225 uint32_t intr_mode = LPFC_INTR_ERROR;
6226 int retval;
6227
6228 if (cfg_mode == 2) {
6229 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6230 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6231 if (!retval) {
6232 /* Now, try to enable MSI-X interrupt mode */
6233 retval = lpfc_sli_enable_msix(phba);
6234 if (!retval) {
6235 /* Indicate initialization to MSI-X mode */
6236 phba->intr_type = MSIX;
6237 intr_mode = 2;
6238 }
6239 }
6240 }
6241
6242 /* Fallback to MSI if MSI-X initialization failed */
6243 if (cfg_mode >= 1 && phba->intr_type == NONE) {
6244 retval = lpfc_sli_enable_msi(phba);
6245 if (!retval) {
6246 /* Indicate initialization to MSI mode */
6247 phba->intr_type = MSI;
6248 intr_mode = 1;
6249 }
6250 }
6251
6252 /* Fallback to INTx if both MSI-X/MSI initalization failed */
6253 if (phba->intr_type == NONE) {
6254 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6255 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6256 if (!retval) {
6257 /* Indicate initialization to INTx mode */
6258 phba->intr_type = INTx;
6259 intr_mode = 0;
6260 }
6261 }
6262 return intr_mode;
6263}
6264
6265/**
6266 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6267 * @phba: pointer to lpfc hba data structure.
6268 *
6269 * This routine is invoked to disable device interrupt and disassociate the
6270 * driver's interrupt handler(s) from interrupt vector(s) to device with
6271 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6272 * release the interrupt vector(s) for the message signaled interrupt.
6273 **/
6274static void
6275lpfc_sli_disable_intr(struct lpfc_hba *phba)
6276{
6277 /* Disable the currently initialized interrupt mode */
6278 if (phba->intr_type == MSIX)
6279 lpfc_sli_disable_msix(phba);
6280 else if (phba->intr_type == MSI)
6281 lpfc_sli_disable_msi(phba);
6282 else if (phba->intr_type == INTx)
6283 free_irq(phba->pcidev->irq, phba);
6284
6285 /* Reset interrupt management states */
6286 phba->intr_type = NONE;
6287 phba->sli.slistat.sli_intr = 0;
6288
6289 return;
6290}
6291
6292/**
6293 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
6294 * @phba: pointer to lpfc hba data structure.
6295 *
6296 * This routine is invoked to enable the MSI-X interrupt vectors to device
6297 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
6298 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
6299 * enables either all or nothing, depending on the current availability of
6300 * PCI vector resources. The device driver is responsible for calling the
6301 * individual request_irq() to register each MSI-X vector with a interrupt
6302 * handler, which is done in this function. Note that later when device is
6303 * unloading, the driver should always call free_irq() on all MSI-X vectors
6304 * it has done request_irq() on before calling pci_disable_msix(). Failure
6305 * to do so results in a BUG_ON() and a device will be left with MSI-X
6306 * enabled and leaks its vectors.
6307 *
6308 * Return codes
6309 * 0 - sucessful
6310 * other values - error
6311 **/
6312static int
6313lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6314{
6315 int rc, index;
6316
6317 /* Set up MSI-X multi-message vectors */
6318 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6319 phba->sli4_hba.msix_entries[index].entry = index;
6320
6321 /* Configure MSI-X capability structure */
6322 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
6323 phba->sli4_hba.cfg_eqn);
6324 if (rc) {
6325 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6326 "0484 PCI enable MSI-X failed (%d)\n", rc);
6327 goto msi_fail_out;
6328 }
6329 /* Log MSI-X vector assignment */
6330 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6331 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6332 "0489 MSI-X entry[%d]: vector=x%x "
6333 "message=%d\n", index,
6334 phba->sli4_hba.msix_entries[index].vector,
6335 phba->sli4_hba.msix_entries[index].entry);
6336 /*
6337 * Assign MSI-X vectors to interrupt handlers
6338 */
6339
6340 /* The first vector must associated to slow-path handler for MQ */
6341 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
6342 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
6343 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6344 if (rc) {
6345 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6346 "0485 MSI-X slow-path request_irq failed "
6347 "(%d)\n", rc);
6348 goto msi_fail_out;
6349 }
6350
6351 /* The rest of the vector(s) are associated to fast-path handler(s) */
6352 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
6353 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
6354 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
6355 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
6356 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
6357 LPFC_FP_DRIVER_HANDLER_NAME,
6358 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6359 if (rc) {
6360 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6361 "0486 MSI-X fast-path (%d) "
6362 "request_irq failed (%d)\n", index, rc);
6363 goto cfg_fail_out;
6364 }
6365 }
6366
6367 return rc;
6368
6369cfg_fail_out:
6370 /* free the irq already requested */
6371 for (--index; index >= 1; index--)
6372 free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
6373 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6374
6375 /* free the irq already requested */
6376 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6377
6378msi_fail_out:
6379 /* Unconfigure MSI-X capability structure */
6380 pci_disable_msix(phba->pcidev);
6381 return rc;
6382}
6383
6384/**
6385 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
6386 * @phba: pointer to lpfc hba data structure.
6387 *
6388 * This routine is invoked to release the MSI-X vectors and then disable the
6389 * MSI-X interrupt mode to device with SLI-4 interface spec.
6390 **/
6391static void
6392lpfc_sli4_disable_msix(struct lpfc_hba *phba)
6393{
6394 int index;
6395
6396 /* Free up MSI-X multi-message vectors */
6397 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6398
6399 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
6400 free_irq(phba->sli4_hba.msix_entries[index].vector,
6401 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6402 /* Disable MSI-X */
6403 pci_disable_msix(phba->pcidev);
6404
6405 return;
6406}
6407
6408/**
6409 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
6410 * @phba: pointer to lpfc hba data structure.
6411 *
6412 * This routine is invoked to enable the MSI interrupt mode to device with
6413 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
6414 * to enable the MSI vector. The device driver is responsible for calling
6415 * the request_irq() to register MSI vector with a interrupt the handler,
6416 * which is done in this function.
6417 *
6418 * Return codes
6419 * 0 - sucessful
6420 * other values - error
6421 **/
6422static int
6423lpfc_sli4_enable_msi(struct lpfc_hba *phba)
6424{
6425 int rc, index;
6426
6427 rc = pci_enable_msi(phba->pcidev);
6428 if (!rc)
6429 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6430 "0487 PCI enable MSI mode success.\n");
6431 else {
6432 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6433 "0488 PCI enable MSI mode failed (%d)\n", rc);
6434 return rc;
6435 }
6436
6437 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6438 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6439 if (rc) {
6440 pci_disable_msi(phba->pcidev);
6441 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6442 "0490 MSI request_irq failed (%d)\n", rc);
6443 }
6444
6445 for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
6446 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6447 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6448 }
6449
6450 return rc;
6451}
6452
6453/**
6454 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
6455 * @phba: pointer to lpfc hba data structure.
6456 *
6457 * This routine is invoked to disable the MSI interrupt mode to device with
6458 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
6459 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6460 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6461 * its vector.
6462 **/
6463static void
6464lpfc_sli4_disable_msi(struct lpfc_hba *phba)
6465{
6466 free_irq(phba->pcidev->irq, phba);
6467 pci_disable_msi(phba->pcidev);
6468 return;
6469}
6470
6471/**
6472 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
6473 * @phba: pointer to lpfc hba data structure.
6474 *
6475 * This routine is invoked to enable device interrupt and associate driver's
6476 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
6477 * interface spec. Depends on the interrupt mode configured to the driver,
6478 * the driver will try to fallback from the configured interrupt mode to an
6479 * interrupt mode which is supported by the platform, kernel, and device in
6480 * the order of:
6481 * MSI-X -> MSI -> IRQ.
6482 *
6483 * Return codes
6484 * 0 - sucessful
6485 * other values - error
6486 **/
6487static uint32_t
6488lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6489{
6490 uint32_t intr_mode = LPFC_INTR_ERROR;
6491 int retval, index;
6492
6493 if (cfg_mode == 2) {
6494 /* Preparation before conf_msi mbox cmd */
6495 retval = 0;
6496 if (!retval) {
6497 /* Now, try to enable MSI-X interrupt mode */
6498 retval = lpfc_sli4_enable_msix(phba);
6499 if (!retval) {
6500 /* Indicate initialization to MSI-X mode */
6501 phba->intr_type = MSIX;
6502 intr_mode = 2;
6503 }
6504 }
6505 }
6506
6507 /* Fallback to MSI if MSI-X initialization failed */
6508 if (cfg_mode >= 1 && phba->intr_type == NONE) {
6509 retval = lpfc_sli4_enable_msi(phba);
6510 if (!retval) {
6511 /* Indicate initialization to MSI mode */
6512 phba->intr_type = MSI;
6513 intr_mode = 1;
6514 }
6515 }
6516
6517 /* Fallback to INTx if both MSI-X/MSI initalization failed */
6518 if (phba->intr_type == NONE) {
6519 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6520 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6521 if (!retval) {
6522 /* Indicate initialization to INTx mode */
6523 phba->intr_type = INTx;
6524 intr_mode = 0;
6525 for (index = 0; index < phba->cfg_fcp_eq_count;
6526 index++) {
6527 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6528 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6529 }
6530 }
6531 }
6532 return intr_mode;
6533}
6534
6535/**
6536 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
6537 * @phba: pointer to lpfc hba data structure.
6538 *
6539 * This routine is invoked to disable device interrupt and disassociate
6540 * the driver's interrupt handler(s) from interrupt vector(s) to device
6541 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
6542 * will release the interrupt vector(s) for the message signaled interrupt.
6543 **/
6544static void
6545lpfc_sli4_disable_intr(struct lpfc_hba *phba)
6546{
6547 /* Disable the currently initialized interrupt mode */
6548 if (phba->intr_type == MSIX)
6549 lpfc_sli4_disable_msix(phba);
6550 else if (phba->intr_type == MSI)
6551 lpfc_sli4_disable_msi(phba);
6552 else if (phba->intr_type == INTx)
6553 free_irq(phba->pcidev->irq, phba);
6554
6555 /* Reset interrupt management states */
6556 phba->intr_type = NONE;
6557 phba->sli.slistat.sli_intr = 0;
6558
6559 return;
6560}
6561
6562/**
6563 * lpfc_unset_hba - Unset SLI3 hba device initialization
6564 * @phba: pointer to lpfc hba data structure.
6565 *
6566 * This routine is invoked to unset the HBA device initialization steps to
6567 * a device with SLI-3 interface spec.
6568 **/
6569static void
6570lpfc_unset_hba(struct lpfc_hba *phba)
6571{
6572 struct lpfc_vport *vport = phba->pport;
6573 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6574
6575 spin_lock_irq(shost->host_lock);
6576 vport->load_flag |= FC_UNLOADING;
6577 spin_unlock_irq(shost->host_lock);
6578
6579 lpfc_stop_hba_timers(phba);
6580
6581 phba->pport->work_port_events = 0;
6582
6583 lpfc_sli_hba_down(phba);
6584
6585 lpfc_sli_brdrestart(phba);
6586
6587 lpfc_sli_disable_intr(phba);
6588
6589 return;
6590}
6591
6592/**
6593 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
6594 * @phba: pointer to lpfc hba data structure.
6595 *
6596 * This routine is invoked to unset the HBA device initialization steps to
6597 * a device with SLI-4 interface spec.
6598 **/
6599static void
6600lpfc_sli4_unset_hba(struct lpfc_hba *phba)
6601{
6602 struct lpfc_vport *vport = phba->pport;
6603 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6604
6605 spin_lock_irq(shost->host_lock);
6606 vport->load_flag |= FC_UNLOADING;
6607 spin_unlock_irq(shost->host_lock);
6608
6609 phba->pport->work_port_events = 0;
6610
6611 lpfc_sli4_hba_down(phba);
6612
6613 lpfc_sli4_disable_intr(phba);
6614
6615 return;
6616}
6617
6618/**
6619 * lpfc_sli4_hba_unset - Unset the fcoe hba
6620 * @phba: Pointer to HBA context object.
6621 *
6622 * This function is called in the SLI4 code path to reset the HBA's FCoE
6623 * function. The caller is not required to hold any lock. This routine
6624 * issues PCI function reset mailbox command to reset the FCoE function.
6625 * At the end of the function, it calls lpfc_hba_down_post function to
6626 * free any pending commands.
6627 **/
6628static void
6629lpfc_sli4_hba_unset(struct lpfc_hba *phba)
6630{
6631 int wait_cnt = 0;
6632 LPFC_MBOXQ_t *mboxq;
6633
6634 lpfc_stop_hba_timers(phba);
6635 phba->sli4_hba.intr_enable = 0;
6636
6637 /*
6638 * Gracefully wait out the potential current outstanding asynchronous
6639 * mailbox command.
6640 */
6641
6642 /* First, block any pending async mailbox command from posted */
6643 spin_lock_irq(&phba->hbalock);
6644 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
6645 spin_unlock_irq(&phba->hbalock);
6646 /* Now, trying to wait it out if we can */
6647 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6648 msleep(10);
6649 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
6650 break;
6651 }
6652 /* Forcefully release the outstanding mailbox command if timed out */
6653 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6654 spin_lock_irq(&phba->hbalock);
6655 mboxq = phba->sli.mbox_active;
6656 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
6657 __lpfc_mbox_cmpl_put(phba, mboxq);
6658 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6659 phba->sli.mbox_active = NULL;
6660 spin_unlock_irq(&phba->hbalock);
6661 }
6662
6663 /* Tear down the queues in the HBA */
6664 lpfc_sli4_queue_unset(phba);
6665
6666 /* Disable PCI subsystem interrupt */
6667 lpfc_sli4_disable_intr(phba);
6668
6669 /* Stop kthread signal shall trigger work_done one more time */
6670 kthread_stop(phba->worker_thread);
6671
6672 /* Stop the SLI4 device port */
6673 phba->pport->work_port_events = 0;
6674}
6675
6676/**
6677 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
6678 * @pdev: pointer to PCI device
6679 * @pid: pointer to PCI device identifier
6680 *
6681 * This routine is to be called to attach a device with SLI-3 interface spec
6682 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6683 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
6684 * information of the device and driver to see if the driver state that it can
6685 * support this kind of device. If the match is successful, the driver core
6686 * invokes this routine. If this routine determines it can claim the HBA, it
6687 * does all the initialization that it needs to do to handle the HBA properly.
6688 *
6689 * Return code
6690 * 0 - driver can claim the device
6691 * negative value - driver can not claim the device
6692 **/
6693static int __devinit
6694lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6695{
6696 struct lpfc_hba *phba;
6697 struct lpfc_vport *vport = NULL;
6698 int error;
6699 uint32_t cfg_mode, intr_mode;
6700
6701 /* Allocate memory for HBA structure */
6702 phba = lpfc_hba_alloc(pdev);
6703 if (!phba)
6704 return -ENOMEM;
6705
6706 /* Perform generic PCI device enabling operation */
6707 error = lpfc_enable_pci_dev(phba);
6708 if (error) {
6709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6710 "1401 Failed to enable pci device.\n");
6711 goto out_free_phba;
6712 }
6713
6714 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
6715 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
6716 if (error)
6717 goto out_disable_pci_dev;
6718
6719 /* Set up SLI-3 specific device PCI memory space */
6720 error = lpfc_sli_pci_mem_setup(phba);
6721 if (error) {
6722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6723 "1402 Failed to set up pci memory space.\n");
6724 goto out_disable_pci_dev;
6725 }
6726
6727 /* Set up phase-1 common device driver resources */
6728 error = lpfc_setup_driver_resource_phase1(phba);
6729 if (error) {
6730 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6731 "1403 Failed to set up driver resource.\n");
6732 goto out_unset_pci_mem_s3;
6733 }
6734
6735 /* Set up SLI-3 specific device driver resources */
6736 error = lpfc_sli_driver_resource_setup(phba);
6737 if (error) {
6738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6739 "1404 Failed to set up driver resource.\n");
6740 goto out_unset_pci_mem_s3;
6741 }
6742
6743 /* Initialize and populate the iocb list per host */
6744 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
6745 if (error) {
6746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6747 "1405 Failed to initialize iocb list.\n");
6748 goto out_unset_driver_resource_s3;
6749 }
6750
6751 /* Set up common device driver resources */
6752 error = lpfc_setup_driver_resource_phase2(phba);
6753 if (error) {
6754 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6755 "1406 Failed to set up driver resource.\n");
6756 goto out_free_iocb_list;
6757 }
6758
6759 /* Create SCSI host to the physical port */
6760 error = lpfc_create_shost(phba);
6761 if (error) {
6762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6763 "1407 Failed to create scsi host.\n");
6764 goto out_unset_driver_resource;
6765 }
6766
6767 /* Configure sysfs attributes */
6768 vport = phba->pport;
6769 error = lpfc_alloc_sysfs_attr(vport);
6770 if (error) {
6771 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6772 "1476 Failed to allocate sysfs attr\n");
6773 goto out_destroy_shost;
6774 }
6775
6776 /* Now, trying to enable interrupt and bring up the device */
6777 cfg_mode = phba->cfg_use_msi;
6778 while (true) {
6779 /* Put device to a known state before enabling interrupt */
6780 lpfc_stop_port(phba);
6781 /* Configure and enable interrupt */
6782 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
6783 if (intr_mode == LPFC_INTR_ERROR) {
6784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6785 "0431 Failed to enable interrupt.\n");
6786 error = -ENODEV;
6787 goto out_free_sysfs_attr;
6788 }
6789 /* SLI-3 HBA setup */
6790 if (lpfc_sli_hba_setup(phba)) {
6791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6792 "1477 Failed to set up hba\n");
6793 error = -ENODEV;
6794 goto out_remove_device;
6795 }
6796
6797 /* Wait 50ms for the interrupts of previous mailbox commands */
6798 msleep(50);
6799 /* Check active interrupts on message signaled interrupts */
6800 if (intr_mode == 0 ||
6801 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
6802 /* Log the current active interrupt mode */
6803 phba->intr_mode = intr_mode;
6804 lpfc_log_intr_mode(phba, intr_mode);
6805 break;
6806 } else {
6807 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6808 "0447 Configure interrupt mode (%d) "
6809 "failed active interrupt test.\n",
6810 intr_mode);
6811 /* Disable the current interrupt mode */
6812 lpfc_sli_disable_intr(phba);
6813 /* Try next level of interrupt mode */
6814 cfg_mode = --intr_mode;
6815 }
6816 }
6817
6818 /* Perform post initialization setup */
6819 lpfc_post_init_setup(phba);
6820
6821 /* Check if there are static vports to be created. */
6822 lpfc_create_static_vport(phba);
6823
6824 return 0;
6825
6826out_remove_device:
6827 lpfc_unset_hba(phba);
6828out_free_sysfs_attr:
6829 lpfc_free_sysfs_attr(vport);
6830out_destroy_shost:
6831 lpfc_destroy_shost(phba);
6832out_unset_driver_resource:
6833 lpfc_unset_driver_resource_phase2(phba);
6834out_free_iocb_list:
6835 lpfc_free_iocb_list(phba);
6836out_unset_driver_resource_s3:
6837 lpfc_sli_driver_resource_unset(phba);
6838out_unset_pci_mem_s3:
6839 lpfc_sli_pci_mem_unset(phba);
6840out_disable_pci_dev:
6841 lpfc_disable_pci_dev(phba);
6842out_free_phba:
6843 lpfc_hba_free(phba);
6844 return error;
6845}
6846
6847/**
6848 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
6849 * @pdev: pointer to PCI device
6850 *
6851 * This routine is to be called to disattach a device with SLI-3 interface
6852 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6853 * removed from PCI bus, it performs all the necessary cleanup for the HBA
6854 * device to be removed from the PCI subsystem properly.
6855 **/
6856static void __devexit
6857lpfc_pci_remove_one_s3(struct pci_dev *pdev)
6858{
6859 struct Scsi_Host *shost = pci_get_drvdata(pdev);
6860 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6861 struct lpfc_vport **vports;
6862 struct lpfc_hba *phba = vport->phba;
6863 int i;
6864 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
6865
6866 spin_lock_irq(&phba->hbalock);
6867 vport->load_flag |= FC_UNLOADING;
6868 spin_unlock_irq(&phba->hbalock);
6869
6870 lpfc_free_sysfs_attr(vport);
6871
6872 /* Release all the vports against this physical port */
6873 vports = lpfc_create_vport_work_array(phba);
6874 if (vports != NULL)
6875 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
6876 fc_vport_terminate(vports[i]->fc_vport);
6877 lpfc_destroy_vport_work_array(phba, vports);
6878
6879 /* Remove FC host and then SCSI host with the physical port */
6880 fc_remove_host(shost);
6881 scsi_remove_host(shost);
6882 lpfc_cleanup(vport);
6883
6884 /*
6885 * Bring down the SLI Layer. This step disable all interrupts,
6886 * clears the rings, discards all mailbox commands, and resets
6887 * the HBA.
6888 */
6889
6890 /* HBA interrupt will be diabled after this call */
6891 lpfc_sli_hba_down(phba);
6892 /* Stop kthread signal shall trigger work_done one more time */
6893 kthread_stop(phba->worker_thread);
6894 /* Final cleanup of txcmplq and reset the HBA */
6895 lpfc_sli_brdrestart(phba);
6896
6897 lpfc_stop_hba_timers(phba);
6898 spin_lock_irq(&phba->hbalock);
6899 list_del_init(&vport->listentry);
6900 spin_unlock_irq(&phba->hbalock);
6901
6902 lpfc_debugfs_terminate(vport);
6903
6904 /* Disable interrupt */
6905 lpfc_sli_disable_intr(phba);
6906
6907 pci_set_drvdata(pdev, NULL);
6908 scsi_host_put(shost);
6909
6910 /*
6911 * Call scsi_free before mem_free since scsi bufs are released to their
6912 * corresponding pools here.
6913 */
6914 lpfc_scsi_free(phba);
6915 lpfc_mem_free_all(phba);
6916
6917 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
6918 phba->hbqslimp.virt, phba->hbqslimp.phys);
6919
6920 /* Free resources associated with SLI2 interface */
6921 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6922 phba->slim2p.virt, phba->slim2p.phys);
6923
6924 /* unmap adapter SLIM and Control Registers */
6925 iounmap(phba->ctrl_regs_memmap_p);
6926 iounmap(phba->slim_memmap_p);
6927
6928 lpfc_hba_free(phba);
6929
6930 pci_release_selected_regions(pdev, bars);
6931 pci_disable_device(pdev);
6932}
6933
6934/**
6935 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
6936 * @pdev: pointer to PCI device
6937 * @msg: power management message
6938 *
6939 * This routine is to be called from the kernel's PCI subsystem to support
6940 * system Power Management (PM) to device with SLI-3 interface spec. When
6941 * PM invokes this method, it quiesces the device by stopping the driver's
6942 * worker thread for the device, turning off device's interrupt and DMA,
6943 * and bring the device offline. Note that as the driver implements the
6944 * minimum PM requirements to a power-aware driver's PM support for the
6945 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
6946 * to the suspend() method call will be treated as SUSPEND and the driver will
6947 * fully reinitialize its device during resume() method call, the driver will
6948 * set device to PCI_D3hot state in PCI config space instead of setting it
6949 * according to the @msg provided by the PM.
6950 *
6951 * Return code
6952 * 0 - driver suspended the device
6953 * Error otherwise
6954 **/
6955static int
6956lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
6957{
6958 struct Scsi_Host *shost = pci_get_drvdata(pdev);
6959 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
6960
6961 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6962 "0473 PCI device Power Management suspend.\n");
6963
6964 /* Bring down the device */
6965 lpfc_offline_prep(phba);
6966 lpfc_offline(phba);
6967 kthread_stop(phba->worker_thread);
6968
6969 /* Disable interrupt from device */
6970 lpfc_sli_disable_intr(phba);
6971
6972 /* Save device state to PCI config space */
6973 pci_save_state(pdev);
6974 pci_set_power_state(pdev, PCI_D3hot);
6975
6976 return 0;
6977}
6978
6979/**
6980 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
6981 * @pdev: pointer to PCI device
6982 *
6983 * This routine is to be called from the kernel's PCI subsystem to support
6984 * system Power Management (PM) to device with SLI-3 interface spec. When PM
6985 * invokes this method, it restores the device's PCI config space state and
6986 * fully reinitializes the device and brings it online. Note that as the
6987 * driver implements the minimum PM requirements to a power-aware driver's
6988 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
6989 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
6990 * driver will fully reinitialize its device during resume() method call,
6991 * the device will be set to PCI_D0 directly in PCI config space before
6992 * restoring the state.
6993 *
6994 * Return code
6995 * 0 - driver suspended the device
6996 * Error otherwise
6997 **/
6998static int
6999lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7000{
7001 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7002 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7003 uint32_t intr_mode;
7004 int error;
7005
7006 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7007 "0452 PCI device Power Management resume.\n");
7008
7009 /* Restore device state from PCI config space */
7010 pci_set_power_state(pdev, PCI_D0);
7011 pci_restore_state(pdev);
7012 if (pdev->is_busmaster)
7013 pci_set_master(pdev);
7014
7015 /* Startup the kernel thread for this host adapter. */
7016 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7017 "lpfc_worker_%d", phba->brd_no);
7018 if (IS_ERR(phba->worker_thread)) {
7019 error = PTR_ERR(phba->worker_thread);
7020 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7021 "0434 PM resume failed to start worker "
7022 "thread: error=x%x.\n", error);
7023 return error;
7024 }
7025
7026 /* Configure and enable interrupt */
7027 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7028 if (intr_mode == LPFC_INTR_ERROR) {
7029 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7030 "0430 PM resume Failed to enable interrupt\n");
7031 return -EIO;
7032 } else
7033 phba->intr_mode = intr_mode;
7034
7035 /* Restart HBA and bring it online */
7036 lpfc_sli_brdrestart(phba);
7037 lpfc_online(phba);
7038
7039 /* Log the current active interrupt mode */
7040 lpfc_log_intr_mode(phba, phba->intr_mode);
7041
7042 return 0;
7043}
7044
7045/**
7046 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7047 * @pdev: pointer to PCI device.
7048 * @state: the current PCI connection state.
7049 *
7050 * This routine is called from the PCI subsystem for I/O error handling to
7051 * device with SLI-3 interface spec. This function is called by the PCI
7052 * subsystem after a PCI bus error affecting this device has been detected.
7053 * When this function is invoked, it will need to stop all the I/Os and
7054 * interrupt(s) to the device. Once that is done, it will return
7055 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7056 * as desired.
7057 *
7058 * Return codes
7059 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7060 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7061 **/
7062static pci_ers_result_t
7063lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7064{
7065 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7066 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7067 struct lpfc_sli *psli = &phba->sli;
7068 struct lpfc_sli_ring *pring;
7069
7070 if (state == pci_channel_io_perm_failure) {
7071 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7072 "0472 PCI channel I/O permanent failure\n");
7073 /* Block all SCSI devices' I/Os on the host */
7074 lpfc_scsi_dev_block(phba);
7075 /* Clean up all driver's outstanding SCSI I/Os */
7076 lpfc_sli_flush_fcp_rings(phba);
7077 return PCI_ERS_RESULT_DISCONNECT;
7078 }
7079
7080 pci_disable_device(pdev);
7081 /*
7082 * There may be I/Os dropped by the firmware.
7083 * Error iocb (I/O) on txcmplq and let the SCSI layer
7084 * retry it after re-establishing link.
7085 */
7086 pring = &psli->ring[psli->fcp_ring];
7087 lpfc_sli_abort_iocb_ring(phba, pring);
7088
7089 /* Disable interrupt */
7090 lpfc_sli_disable_intr(phba);
7091
7092 /* Request a slot reset. */
7093 return PCI_ERS_RESULT_NEED_RESET;
7094}
7095
7096/**
7097 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
7098 * @pdev: pointer to PCI device.
7099 *
7100 * This routine is called from the PCI subsystem for error handling to
7101 * device with SLI-3 interface spec. This is called after PCI bus has been
7102 * reset to restart the PCI card from scratch, as if from a cold-boot.
7103 * During the PCI subsystem error recovery, after driver returns
7104 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7105 * recovery and then call this routine before calling the .resume method
7106 * to recover the device. This function will initialize the HBA device,
7107 * enable the interrupt, but it will just put the HBA to offline state
7108 * without passing any I/O traffic.
7109 *
7110 * Return codes
7111 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7112 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7113 */
7114static pci_ers_result_t
7115lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7116{
7117 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7118 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7119 struct lpfc_sli *psli = &phba->sli;
7120 uint32_t intr_mode;
7121
7122 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
7123 if (pci_enable_device_mem(pdev)) {
7124 printk(KERN_ERR "lpfc: Cannot re-enable "
7125 "PCI device after reset.\n");
7126 return PCI_ERS_RESULT_DISCONNECT;
7127 }
7128
7129 pci_restore_state(pdev);
7130 if (pdev->is_busmaster)
7131 pci_set_master(pdev);
7132
7133 spin_lock_irq(&phba->hbalock);
7134 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7135 spin_unlock_irq(&phba->hbalock);
7136
7137 /* Configure and enable interrupt */
7138 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7139 if (intr_mode == LPFC_INTR_ERROR) {
7140 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7141 "0427 Cannot re-enable interrupt after "
7142 "slot reset.\n");
7143 return PCI_ERS_RESULT_DISCONNECT;
7144 } else
7145 phba->intr_mode = intr_mode;
7146
7147 /* Take device offline; this will perform cleanup */
7148 lpfc_offline(phba);
7149 lpfc_sli_brdrestart(phba);
7150
7151 /* Log the current active interrupt mode */
7152 lpfc_log_intr_mode(phba, phba->intr_mode);
7153
7154 return PCI_ERS_RESULT_RECOVERED;
7155}
7156
7157/**
7158 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
7159 * @pdev: pointer to PCI device
7160 *
7161 * This routine is called from the PCI subsystem for error handling to device
7162 * with SLI-3 interface spec. It is called when kernel error recovery tells
7163 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7164 * error recovery. After this call, traffic can start to flow from this device
7165 * again.
7166 */
7167static void
7168lpfc_io_resume_s3(struct pci_dev *pdev)
7169{
7170 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7171 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3772a991 7172
da0436e9
JS
7173 lpfc_online(phba);
7174}
3772a991 7175
da0436e9
JS
7176/**
7177 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
7178 * @phba: pointer to lpfc hba data structure.
7179 *
7180 * returns the number of ELS/CT IOCBs to reserve
7181 **/
7182int
7183lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7184{
7185 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
7186
7187 if (max_xri <= 100)
7188 return 4;
7189 else if (max_xri <= 256)
7190 return 8;
7191 else if (max_xri <= 512)
7192 return 16;
7193 else if (max_xri <= 1024)
7194 return 32;
7195 else
7196 return 48;
3772a991
JS
7197}
7198
7199/**
da0436e9 7200 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
3772a991
JS
7201 * @pdev: pointer to PCI device
7202 * @pid: pointer to PCI device identifier
7203 *
da0436e9
JS
7204 * This routine is called from the kernel's PCI subsystem to device with
7205 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
3772a991 7206 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
da0436e9
JS
7207 * information of the device and driver to see if the driver state that it
7208 * can support this kind of device. If the match is successful, the driver
7209 * core invokes this routine. If this routine determines it can claim the HBA,
7210 * it does all the initialization that it needs to do to handle the HBA
7211 * properly.
3772a991
JS
7212 *
7213 * Return code
7214 * 0 - driver can claim the device
7215 * negative value - driver can not claim the device
7216 **/
7217static int __devinit
da0436e9 7218lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
3772a991
JS
7219{
7220 struct lpfc_hba *phba;
7221 struct lpfc_vport *vport = NULL;
7222 int error;
7223 uint32_t cfg_mode, intr_mode;
da0436e9 7224 int mcnt;
3772a991
JS
7225
7226 /* Allocate memory for HBA structure */
7227 phba = lpfc_hba_alloc(pdev);
7228 if (!phba)
7229 return -ENOMEM;
7230
7231 /* Perform generic PCI device enabling operation */
7232 error = lpfc_enable_pci_dev(phba);
7233 if (error) {
7234 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 7235 "1409 Failed to enable pci device.\n");
3772a991
JS
7236 goto out_free_phba;
7237 }
7238
da0436e9
JS
7239 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
7240 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
3772a991
JS
7241 if (error)
7242 goto out_disable_pci_dev;
7243
da0436e9
JS
7244 /* Set up SLI-4 specific device PCI memory space */
7245 error = lpfc_sli4_pci_mem_setup(phba);
3772a991
JS
7246 if (error) {
7247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 7248 "1410 Failed to set up pci memory space.\n");
3772a991
JS
7249 goto out_disable_pci_dev;
7250 }
7251
7252 /* Set up phase-1 common device driver resources */
7253 error = lpfc_setup_driver_resource_phase1(phba);
7254 if (error) {
7255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9
JS
7256 "1411 Failed to set up driver resource.\n");
7257 goto out_unset_pci_mem_s4;
3772a991
JS
7258 }
7259
da0436e9
JS
7260 /* Set up SLI-4 Specific device driver resources */
7261 error = lpfc_sli4_driver_resource_setup(phba);
3772a991
JS
7262 if (error) {
7263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9
JS
7264 "1412 Failed to set up driver resource.\n");
7265 goto out_unset_pci_mem_s4;
3772a991
JS
7266 }
7267
7268 /* Initialize and populate the iocb list per host */
da0436e9
JS
7269 error = lpfc_init_iocb_list(phba,
7270 phba->sli4_hba.max_cfg_param.max_xri);
3772a991
JS
7271 if (error) {
7272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9
JS
7273 "1413 Failed to initialize iocb list.\n");
7274 goto out_unset_driver_resource_s4;
3772a991
JS
7275 }
7276
7277 /* Set up common device driver resources */
7278 error = lpfc_setup_driver_resource_phase2(phba);
7279 if (error) {
7280 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 7281 "1414 Failed to set up driver resource.\n");
3772a991
JS
7282 goto out_free_iocb_list;
7283 }
7284
7285 /* Create SCSI host to the physical port */
7286 error = lpfc_create_shost(phba);
7287 if (error) {
7288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 7289 "1415 Failed to create scsi host.\n");
3772a991
JS
7290 goto out_unset_driver_resource;
7291 }
9399627f 7292
5b75da2f 7293 /* Configure sysfs attributes */
3772a991
JS
7294 vport = phba->pport;
7295 error = lpfc_alloc_sysfs_attr(vport);
7296 if (error) {
9399627f 7297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 7298 "1416 Failed to allocate sysfs attr\n");
3772a991 7299 goto out_destroy_shost;
98c9ea5c 7300 }
875fbdfe 7301
3772a991 7302 /* Now, trying to enable interrupt and bring up the device */
5b75da2f
JS
7303 cfg_mode = phba->cfg_use_msi;
7304 while (true) {
3772a991
JS
7305 /* Put device to a known state before enabling interrupt */
7306 lpfc_stop_port(phba);
5b75da2f 7307 /* Configure and enable interrupt */
da0436e9 7308 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
5b75da2f
JS
7309 if (intr_mode == LPFC_INTR_ERROR) {
7310 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 7311 "0426 Failed to enable interrupt.\n");
3772a991 7312 error = -ENODEV;
5b75da2f
JS
7313 goto out_free_sysfs_attr;
7314 }
da0436e9
JS
7315 /* Set up SLI-4 HBA */
7316 if (lpfc_sli4_hba_setup(phba)) {
5b75da2f 7317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 7318 "1421 Failed to set up hba\n");
5b75da2f 7319 error = -ENODEV;
da0436e9 7320 goto out_disable_intr;
5b75da2f
JS
7321 }
7322
da0436e9
JS
7323 /* Send NOP mbx cmds for non-INTx mode active interrupt test */
7324 if (intr_mode != 0)
7325 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
7326 LPFC_ACT_INTR_CNT);
7327
7328 /* Check active interrupts received only for MSI/MSI-X */
3772a991 7329 if (intr_mode == 0 ||
da0436e9 7330 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
5b75da2f
JS
7331 /* Log the current active interrupt mode */
7332 phba->intr_mode = intr_mode;
7333 lpfc_log_intr_mode(phba, intr_mode);
7334 break;
5b75da2f 7335 }
da0436e9
JS
7336 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7337 "0451 Configure interrupt mode (%d) "
7338 "failed active interrupt test.\n",
7339 intr_mode);
7340 /* Unset the preivous SLI-4 HBA setup */
7341 lpfc_sli4_unset_hba(phba);
7342 /* Try next level of interrupt mode */
7343 cfg_mode = --intr_mode;
98c9ea5c 7344 }
858c9f6c 7345
3772a991
JS
7346 /* Perform post initialization setup */
7347 lpfc_post_init_setup(phba);
dea3101e 7348
dea3101e 7349 return 0;
7350
da0436e9
JS
7351out_disable_intr:
7352 lpfc_sli4_disable_intr(phba);
5b75da2f
JS
7353out_free_sysfs_attr:
7354 lpfc_free_sysfs_attr(vport);
3772a991
JS
7355out_destroy_shost:
7356 lpfc_destroy_shost(phba);
7357out_unset_driver_resource:
7358 lpfc_unset_driver_resource_phase2(phba);
7359out_free_iocb_list:
7360 lpfc_free_iocb_list(phba);
da0436e9
JS
7361out_unset_driver_resource_s4:
7362 lpfc_sli4_driver_resource_unset(phba);
7363out_unset_pci_mem_s4:
7364 lpfc_sli4_pci_mem_unset(phba);
3772a991
JS
7365out_disable_pci_dev:
7366 lpfc_disable_pci_dev(phba);
2e0fef85 7367out_free_phba:
3772a991 7368 lpfc_hba_free(phba);
dea3101e 7369 return error;
7370}
7371
e59058c4 7372/**
da0436e9 7373 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
e59058c4
JS
7374 * @pdev: pointer to PCI device
7375 *
da0436e9
JS
7376 * This routine is called from the kernel's PCI subsystem to device with
7377 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
3772a991
JS
7378 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7379 * device to be removed from the PCI subsystem properly.
e59058c4 7380 **/
dea3101e 7381static void __devexit
da0436e9 7382lpfc_pci_remove_one_s4(struct pci_dev *pdev)
dea3101e 7383{
da0436e9 7384 struct Scsi_Host *shost = pci_get_drvdata(pdev);
2e0fef85 7385 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
eada272d 7386 struct lpfc_vport **vports;
da0436e9 7387 struct lpfc_hba *phba = vport->phba;
eada272d 7388 int i;
8a4df120 7389
da0436e9 7390 /* Mark the device unloading flag */
549e55cd 7391 spin_lock_irq(&phba->hbalock);
51ef4c26 7392 vport->load_flag |= FC_UNLOADING;
549e55cd 7393 spin_unlock_irq(&phba->hbalock);
2e0fef85 7394
da0436e9 7395 /* Free the HBA sysfs attributes */
858c9f6c
JS
7396 lpfc_free_sysfs_attr(vport);
7397
eada272d
JS
7398 /* Release all the vports against this physical port */
7399 vports = lpfc_create_vport_work_array(phba);
7400 if (vports != NULL)
3772a991 7401 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
eada272d
JS
7402 fc_vport_terminate(vports[i]->fc_vport);
7403 lpfc_destroy_vport_work_array(phba, vports);
7404
7405 /* Remove FC host and then SCSI host with the physical port */
858c9f6c
JS
7406 fc_remove_host(shost);
7407 scsi_remove_host(shost);
da0436e9
JS
7408
7409 /* Perform cleanup on the physical port */
87af33fe
JS
7410 lpfc_cleanup(vport);
7411
2e0fef85 7412 /*
da0436e9 7413 * Bring down the SLI Layer. This step disables all interrupts,
2e0fef85 7414 * clears the rings, discards all mailbox commands, and resets
da0436e9 7415 * the HBA FCoE function.
2e0fef85 7416 */
da0436e9
JS
7417 lpfc_debugfs_terminate(vport);
7418 lpfc_sli4_hba_unset(phba);
a257bf90 7419
858c9f6c
JS
7420 spin_lock_irq(&phba->hbalock);
7421 list_del_init(&vport->listentry);
7422 spin_unlock_irq(&phba->hbalock);
7423
da0436e9
JS
7424 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
7425 * buffers are released to their corresponding pools here.
2e0fef85
JS
7426 */
7427 lpfc_scsi_free(phba);
da0436e9 7428 lpfc_sli4_driver_resource_unset(phba);
ed957684 7429
da0436e9
JS
7430 /* Unmap adapter Control and Doorbell registers */
7431 lpfc_sli4_pci_mem_unset(phba);
2e0fef85 7432
da0436e9
JS
7433 /* Release PCI resources and disable device's PCI function */
7434 scsi_host_put(shost);
7435 lpfc_disable_pci_dev(phba);
2e0fef85 7436
da0436e9 7437 /* Finally, free the driver's device data structure */
3772a991 7438 lpfc_hba_free(phba);
2e0fef85 7439
da0436e9 7440 return;
dea3101e 7441}
7442
3a55b532 7443/**
da0436e9 7444 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
3a55b532
JS
7445 * @pdev: pointer to PCI device
7446 * @msg: power management message
7447 *
da0436e9
JS
7448 * This routine is called from the kernel's PCI subsystem to support system
7449 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
7450 * this method, it quiesces the device by stopping the driver's worker
7451 * thread for the device, turning off device's interrupt and DMA, and bring
7452 * the device offline. Note that as the driver implements the minimum PM
7453 * requirements to a power-aware driver's PM support for suspend/resume -- all
7454 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
7455 * method call will be treated as SUSPEND and the driver will fully
7456 * reinitialize its device during resume() method call, the driver will set
7457 * device to PCI_D3hot state in PCI config space instead of setting it
3772a991 7458 * according to the @msg provided by the PM.
3a55b532
JS
7459 *
7460 * Return code
3772a991
JS
7461 * 0 - driver suspended the device
7462 * Error otherwise
3a55b532
JS
7463 **/
7464static int
da0436e9 7465lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
3a55b532
JS
7466{
7467 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7468 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7469
7470 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
da0436e9 7471 "0298 PCI device Power Management suspend.\n");
3a55b532
JS
7472
7473 /* Bring down the device */
7474 lpfc_offline_prep(phba);
7475 lpfc_offline(phba);
7476 kthread_stop(phba->worker_thread);
7477
7478 /* Disable interrupt from device */
da0436e9 7479 lpfc_sli4_disable_intr(phba);
3a55b532
JS
7480
7481 /* Save device state to PCI config space */
7482 pci_save_state(pdev);
7483 pci_set_power_state(pdev, PCI_D3hot);
7484
7485 return 0;
7486}
7487
7488/**
da0436e9 7489 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
3a55b532
JS
7490 * @pdev: pointer to PCI device
7491 *
da0436e9
JS
7492 * This routine is called from the kernel's PCI subsystem to support system
7493 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
7494 * this method, it restores the device's PCI config space state and fully
7495 * reinitializes the device and brings it online. Note that as the driver
7496 * implements the minimum PM requirements to a power-aware driver's PM for
7497 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7498 * to the suspend() method call will be treated as SUSPEND and the driver
7499 * will fully reinitialize its device during resume() method call, the device
7500 * will be set to PCI_D0 directly in PCI config space before restoring the
7501 * state.
3a55b532
JS
7502 *
7503 * Return code
3772a991
JS
7504 * 0 - driver suspended the device
7505 * Error otherwise
3a55b532
JS
7506 **/
7507static int
da0436e9 7508lpfc_pci_resume_one_s4(struct pci_dev *pdev)
3a55b532
JS
7509{
7510 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7511 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
5b75da2f 7512 uint32_t intr_mode;
3a55b532
JS
7513 int error;
7514
7515 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
da0436e9 7516 "0292 PCI device Power Management resume.\n");
3a55b532
JS
7517
7518 /* Restore device state from PCI config space */
7519 pci_set_power_state(pdev, PCI_D0);
7520 pci_restore_state(pdev);
7521 if (pdev->is_busmaster)
7522 pci_set_master(pdev);
7523
da0436e9 7524 /* Startup the kernel thread for this host adapter. */
3a55b532
JS
7525 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7526 "lpfc_worker_%d", phba->brd_no);
7527 if (IS_ERR(phba->worker_thread)) {
7528 error = PTR_ERR(phba->worker_thread);
7529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 7530 "0293 PM resume failed to start worker "
3a55b532
JS
7531 "thread: error=x%x.\n", error);
7532 return error;
7533 }
7534
5b75da2f 7535 /* Configure and enable interrupt */
da0436e9 7536 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
5b75da2f 7537 if (intr_mode == LPFC_INTR_ERROR) {
3a55b532 7538 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 7539 "0294 PM resume Failed to enable interrupt\n");
5b75da2f
JS
7540 return -EIO;
7541 } else
7542 phba->intr_mode = intr_mode;
3a55b532
JS
7543
7544 /* Restart HBA and bring it online */
7545 lpfc_sli_brdrestart(phba);
7546 lpfc_online(phba);
7547
5b75da2f
JS
7548 /* Log the current active interrupt mode */
7549 lpfc_log_intr_mode(phba, phba->intr_mode);
7550
3a55b532
JS
7551 return 0;
7552}
7553
8d63f375 7554/**
da0436e9 7555 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
e59058c4
JS
7556 * @pdev: pointer to PCI device.
7557 * @state: the current PCI connection state.
8d63f375 7558 *
da0436e9
JS
7559 * This routine is called from the PCI subsystem for error handling to device
7560 * with SLI-4 interface spec. This function is called by the PCI subsystem
7561 * after a PCI bus error affecting this device has been detected. When this
7562 * function is invoked, it will need to stop all the I/Os and interrupt(s)
7563 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
7564 * for the PCI subsystem to perform proper recovery as desired.
e59058c4
JS
7565 *
7566 * Return codes
3772a991
JS
7567 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7568 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
e59058c4 7569 **/
3772a991 7570static pci_ers_result_t
da0436e9 7571lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8d63f375 7572{
8d63f375
LV
7573 return PCI_ERS_RESULT_NEED_RESET;
7574}
7575
7576/**
da0436e9 7577 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
e59058c4
JS
7578 * @pdev: pointer to PCI device.
7579 *
da0436e9
JS
7580 * This routine is called from the PCI subsystem for error handling to device
7581 * with SLI-4 interface spec. It is called after PCI bus has been reset to
7582 * restart the PCI card from scratch, as if from a cold-boot. During the
7583 * PCI subsystem error recovery, after the driver returns
3772a991 7584 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
da0436e9
JS
7585 * recovery and then call this routine before calling the .resume method to
7586 * recover the device. This function will initialize the HBA device, enable
7587 * the interrupt, but it will just put the HBA to offline state without
7588 * passing any I/O traffic.
8d63f375 7589 *
e59058c4 7590 * Return codes
3772a991
JS
7591 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7592 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8d63f375 7593 */
3772a991 7594static pci_ers_result_t
da0436e9 7595lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8d63f375 7596{
8d63f375
LV
7597 return PCI_ERS_RESULT_RECOVERED;
7598}
7599
7600/**
da0436e9 7601 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
e59058c4 7602 * @pdev: pointer to PCI device
8d63f375 7603 *
3772a991 7604 * This routine is called from the PCI subsystem for error handling to device
da0436e9 7605 * with SLI-4 interface spec. It is called when kernel error recovery tells
3772a991
JS
7606 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7607 * error recovery. After this call, traffic can start to flow from this device
7608 * again.
da0436e9 7609 **/
3772a991 7610static void
da0436e9 7611lpfc_io_resume_s4(struct pci_dev *pdev)
8d63f375 7612{
da0436e9 7613 return;
8d63f375
LV
7614}
7615
3772a991
JS
7616/**
7617 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
7618 * @pdev: pointer to PCI device
7619 * @pid: pointer to PCI device identifier
7620 *
7621 * This routine is to be registered to the kernel's PCI subsystem. When an
7622 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
7623 * at PCI device-specific information of the device and driver to see if the
7624 * driver state that it can support this kind of device. If the match is
7625 * successful, the driver core invokes this routine. This routine dispatches
7626 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
7627 * do all the initialization that it needs to do to handle the HBA device
7628 * properly.
7629 *
7630 * Return code
7631 * 0 - driver can claim the device
7632 * negative value - driver can not claim the device
7633 **/
7634static int __devinit
7635lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7636{
7637 int rc;
7638 uint16_t dev_id;
7639
7640 if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id))
7641 return -ENODEV;
7642
7643 switch (dev_id) {
da0436e9
JS
7644 case PCI_DEVICE_ID_TIGERSHARK:
7645 case PCI_DEVICE_ID_TIGERSHARK_S:
7646 rc = lpfc_pci_probe_one_s4(pdev, pid);
7647 break;
3772a991
JS
7648 default:
7649 rc = lpfc_pci_probe_one_s3(pdev, pid);
7650 break;
7651 }
7652 return rc;
7653}
7654
7655/**
7656 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
7657 * @pdev: pointer to PCI device
7658 *
7659 * This routine is to be registered to the kernel's PCI subsystem. When an
7660 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
7661 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
7662 * remove routine, which will perform all the necessary cleanup for the
7663 * device to be removed from the PCI subsystem properly.
7664 **/
7665static void __devexit
7666lpfc_pci_remove_one(struct pci_dev *pdev)
7667{
7668 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7669 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7670
7671 switch (phba->pci_dev_grp) {
7672 case LPFC_PCI_DEV_LP:
7673 lpfc_pci_remove_one_s3(pdev);
7674 break;
da0436e9
JS
7675 case LPFC_PCI_DEV_OC:
7676 lpfc_pci_remove_one_s4(pdev);
7677 break;
3772a991
JS
7678 default:
7679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7680 "1424 Invalid PCI device group: 0x%x\n",
7681 phba->pci_dev_grp);
7682 break;
7683 }
7684 return;
7685}
7686
7687/**
7688 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
7689 * @pdev: pointer to PCI device
7690 * @msg: power management message
7691 *
7692 * This routine is to be registered to the kernel's PCI subsystem to support
7693 * system Power Management (PM). When PM invokes this method, it dispatches
7694 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
7695 * suspend the device.
7696 *
7697 * Return code
7698 * 0 - driver suspended the device
7699 * Error otherwise
7700 **/
7701static int
7702lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
7703{
7704 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7705 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7706 int rc = -ENODEV;
7707
7708 switch (phba->pci_dev_grp) {
7709 case LPFC_PCI_DEV_LP:
7710 rc = lpfc_pci_suspend_one_s3(pdev, msg);
7711 break;
da0436e9
JS
7712 case LPFC_PCI_DEV_OC:
7713 rc = lpfc_pci_suspend_one_s4(pdev, msg);
7714 break;
3772a991
JS
7715 default:
7716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7717 "1425 Invalid PCI device group: 0x%x\n",
7718 phba->pci_dev_grp);
7719 break;
7720 }
7721 return rc;
7722}
7723
7724/**
7725 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
7726 * @pdev: pointer to PCI device
7727 *
7728 * This routine is to be registered to the kernel's PCI subsystem to support
7729 * system Power Management (PM). When PM invokes this method, it dispatches
7730 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
7731 * resume the device.
7732 *
7733 * Return code
7734 * 0 - driver suspended the device
7735 * Error otherwise
7736 **/
7737static int
7738lpfc_pci_resume_one(struct pci_dev *pdev)
7739{
7740 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7741 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7742 int rc = -ENODEV;
7743
7744 switch (phba->pci_dev_grp) {
7745 case LPFC_PCI_DEV_LP:
7746 rc = lpfc_pci_resume_one_s3(pdev);
7747 break;
da0436e9
JS
7748 case LPFC_PCI_DEV_OC:
7749 rc = lpfc_pci_resume_one_s4(pdev);
7750 break;
3772a991
JS
7751 default:
7752 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7753 "1426 Invalid PCI device group: 0x%x\n",
7754 phba->pci_dev_grp);
7755 break;
7756 }
7757 return rc;
7758}
7759
7760/**
7761 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
7762 * @pdev: pointer to PCI device.
7763 * @state: the current PCI connection state.
7764 *
7765 * This routine is registered to the PCI subsystem for error handling. This
7766 * function is called by the PCI subsystem after a PCI bus error affecting
7767 * this device has been detected. When this routine is invoked, it dispatches
7768 * the action to the proper SLI-3 or SLI-4 device error detected handling
7769 * routine, which will perform the proper error detected operation.
7770 *
7771 * Return codes
7772 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7773 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7774 **/
7775static pci_ers_result_t
7776lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
7777{
7778 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7779 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7780 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7781
7782 switch (phba->pci_dev_grp) {
7783 case LPFC_PCI_DEV_LP:
7784 rc = lpfc_io_error_detected_s3(pdev, state);
7785 break;
da0436e9
JS
7786 case LPFC_PCI_DEV_OC:
7787 rc = lpfc_io_error_detected_s4(pdev, state);
7788 break;
3772a991
JS
7789 default:
7790 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7791 "1427 Invalid PCI device group: 0x%x\n",
7792 phba->pci_dev_grp);
7793 break;
7794 }
7795 return rc;
7796}
7797
7798/**
7799 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
7800 * @pdev: pointer to PCI device.
7801 *
7802 * This routine is registered to the PCI subsystem for error handling. This
7803 * function is called after PCI bus has been reset to restart the PCI card
7804 * from scratch, as if from a cold-boot. When this routine is invoked, it
7805 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
7806 * routine, which will perform the proper device reset.
7807 *
7808 * Return codes
7809 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7810 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7811 **/
7812static pci_ers_result_t
7813lpfc_io_slot_reset(struct pci_dev *pdev)
7814{
7815 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7816 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7817 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7818
7819 switch (phba->pci_dev_grp) {
7820 case LPFC_PCI_DEV_LP:
7821 rc = lpfc_io_slot_reset_s3(pdev);
7822 break;
da0436e9
JS
7823 case LPFC_PCI_DEV_OC:
7824 rc = lpfc_io_slot_reset_s4(pdev);
7825 break;
3772a991
JS
7826 default:
7827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7828 "1428 Invalid PCI device group: 0x%x\n",
7829 phba->pci_dev_grp);
7830 break;
7831 }
7832 return rc;
7833}
7834
7835/**
7836 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
7837 * @pdev: pointer to PCI device
7838 *
7839 * This routine is registered to the PCI subsystem for error handling. It
7840 * is called when kernel error recovery tells the lpfc driver that it is
7841 * OK to resume normal PCI operation after PCI bus error recovery. When
7842 * this routine is invoked, it dispatches the action to the proper SLI-3
7843 * or SLI-4 device io_resume routine, which will resume the device operation.
7844 **/
7845static void
7846lpfc_io_resume(struct pci_dev *pdev)
7847{
7848 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7849 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7850
7851 switch (phba->pci_dev_grp) {
7852 case LPFC_PCI_DEV_LP:
7853 lpfc_io_resume_s3(pdev);
7854 break;
da0436e9
JS
7855 case LPFC_PCI_DEV_OC:
7856 lpfc_io_resume_s4(pdev);
7857 break;
3772a991
JS
7858 default:
7859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7860 "1429 Invalid PCI device group: 0x%x\n",
7861 phba->pci_dev_grp);
7862 break;
7863 }
7864 return;
7865}
7866
dea3101e 7867static struct pci_device_id lpfc_id_table[] = {
7868 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
7869 PCI_ANY_ID, PCI_ANY_ID, },
06325e74
JSEC
7870 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
7871 PCI_ANY_ID, PCI_ANY_ID, },
dea3101e 7872 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
7873 PCI_ANY_ID, PCI_ANY_ID, },
7874 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
7875 PCI_ANY_ID, PCI_ANY_ID, },
7876 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
7877 PCI_ANY_ID, PCI_ANY_ID, },
7878 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
7879 PCI_ANY_ID, PCI_ANY_ID, },
7880 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
7881 PCI_ANY_ID, PCI_ANY_ID, },
7882 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
7883 PCI_ANY_ID, PCI_ANY_ID, },
7884 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
7885 PCI_ANY_ID, PCI_ANY_ID, },
e4adb204
JSEC
7886 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
7887 PCI_ANY_ID, PCI_ANY_ID, },
7888 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
7889 PCI_ANY_ID, PCI_ANY_ID, },
7890 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
7891 PCI_ANY_ID, PCI_ANY_ID, },
dea3101e 7892 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
7893 PCI_ANY_ID, PCI_ANY_ID, },
e4adb204
JSEC
7894 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
7895 PCI_ANY_ID, PCI_ANY_ID, },
7896 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
7897 PCI_ANY_ID, PCI_ANY_ID, },
dea3101e 7898 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
7899 PCI_ANY_ID, PCI_ANY_ID, },
7900 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
7901 PCI_ANY_ID, PCI_ANY_ID, },
7902 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
7903 PCI_ANY_ID, PCI_ANY_ID, },
84774a4d
JS
7904 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
7905 PCI_ANY_ID, PCI_ANY_ID, },
e4adb204
JSEC
7906 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
7907 PCI_ANY_ID, PCI_ANY_ID, },
7908 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
7909 PCI_ANY_ID, PCI_ANY_ID, },
dea3101e 7910 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
7911 PCI_ANY_ID, PCI_ANY_ID, },
7912 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
7913 PCI_ANY_ID, PCI_ANY_ID, },
7914 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
7915 PCI_ANY_ID, PCI_ANY_ID, },
7916 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
7917 PCI_ANY_ID, PCI_ANY_ID, },
7918 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
7919 PCI_ANY_ID, PCI_ANY_ID, },
e4adb204
JSEC
7920 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
7921 PCI_ANY_ID, PCI_ANY_ID, },
7922 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
7923 PCI_ANY_ID, PCI_ANY_ID, },
b87eab38
JS
7924 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
7925 PCI_ANY_ID, PCI_ANY_ID, },
7926 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
7927 PCI_ANY_ID, PCI_ANY_ID, },
7928 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
7929 PCI_ANY_ID, PCI_ANY_ID, },
7930 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
7931 PCI_ANY_ID, PCI_ANY_ID, },
7932 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
7933 PCI_ANY_ID, PCI_ANY_ID, },
7934 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
7935 PCI_ANY_ID, PCI_ANY_ID, },
84774a4d
JS
7936 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
7937 PCI_ANY_ID, PCI_ANY_ID, },
7938 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
7939 PCI_ANY_ID, PCI_ANY_ID, },
7940 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
7941 PCI_ANY_ID, PCI_ANY_ID, },
3772a991
JS
7942 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7943 PCI_ANY_ID, PCI_ANY_ID, },
7944 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S,
7945 PCI_ANY_ID, PCI_ANY_ID, },
dea3101e 7946 { 0 }
7947};
7948
7949MODULE_DEVICE_TABLE(pci, lpfc_id_table);
7950
8d63f375
LV
7951static struct pci_error_handlers lpfc_err_handler = {
7952 .error_detected = lpfc_io_error_detected,
7953 .slot_reset = lpfc_io_slot_reset,
7954 .resume = lpfc_io_resume,
7955};
7956
dea3101e 7957static struct pci_driver lpfc_driver = {
7958 .name = LPFC_DRIVER_NAME,
7959 .id_table = lpfc_id_table,
7960 .probe = lpfc_pci_probe_one,
7961 .remove = __devexit_p(lpfc_pci_remove_one),
3a55b532 7962 .suspend = lpfc_pci_suspend_one,
3772a991 7963 .resume = lpfc_pci_resume_one,
2e0fef85 7964 .err_handler = &lpfc_err_handler,
dea3101e 7965};
7966
e59058c4 7967/**
3621a710 7968 * lpfc_init - lpfc module initialization routine
e59058c4
JS
7969 *
7970 * This routine is to be invoked when the lpfc module is loaded into the
7971 * kernel. The special kernel macro module_init() is used to indicate the
7972 * role of this routine to the kernel as lpfc module entry point.
7973 *
7974 * Return codes
7975 * 0 - successful
7976 * -ENOMEM - FC attach transport failed
7977 * all others - failed
7978 */
dea3101e 7979static int __init
7980lpfc_init(void)
7981{
7982 int error = 0;
7983
7984 printk(LPFC_MODULE_DESC "\n");
c44ce173 7985 printk(LPFC_COPYRIGHT "\n");
dea3101e 7986
7ee5d43e
JS
7987 if (lpfc_enable_npiv) {
7988 lpfc_transport_functions.vport_create = lpfc_vport_create;
7989 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
7990 }
dea3101e 7991 lpfc_transport_template =
7992 fc_attach_transport(&lpfc_transport_functions);
7ee5d43e 7993 if (lpfc_transport_template == NULL)
dea3101e 7994 return -ENOMEM;
7ee5d43e 7995 if (lpfc_enable_npiv) {
7ee5d43e 7996 lpfc_vport_transport_template =
98c9ea5c
JS
7997 fc_attach_transport(&lpfc_vport_transport_functions);
7998 if (lpfc_vport_transport_template == NULL) {
7999 fc_release_transport(lpfc_transport_template);
7ee5d43e 8000 return -ENOMEM;
98c9ea5c 8001 }
7ee5d43e 8002 }
dea3101e 8003 error = pci_register_driver(&lpfc_driver);
92d7f7b0 8004 if (error) {
dea3101e 8005 fc_release_transport(lpfc_transport_template);
d7c255b2
JS
8006 if (lpfc_enable_npiv)
8007 fc_release_transport(lpfc_vport_transport_template);
92d7f7b0 8008 }
dea3101e 8009
8010 return error;
8011}
8012
e59058c4 8013/**
3621a710 8014 * lpfc_exit - lpfc module removal routine
e59058c4
JS
8015 *
8016 * This routine is invoked when the lpfc module is removed from the kernel.
8017 * The special kernel macro module_exit() is used to indicate the role of
8018 * this routine to the kernel as lpfc module exit point.
8019 */
dea3101e 8020static void __exit
8021lpfc_exit(void)
8022{
8023 pci_unregister_driver(&lpfc_driver);
8024 fc_release_transport(lpfc_transport_template);
7ee5d43e
JS
8025 if (lpfc_enable_npiv)
8026 fc_release_transport(lpfc_vport_transport_template);
81301a9b
JS
8027 if (_dump_buf_data) {
8028 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data "
8029 "at 0x%p\n",
8030 (1L << _dump_buf_data_order), _dump_buf_data);
8031 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
8032 }
8033
8034 if (_dump_buf_dif) {
8035 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif "
8036 "at 0x%p\n",
8037 (1L << _dump_buf_dif_order), _dump_buf_dif);
8038 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
8039 }
dea3101e 8040}
8041
8042module_init(lpfc_init);
8043module_exit(lpfc_exit);
8044MODULE_LICENSE("GPL");
8045MODULE_DESCRIPTION(LPFC_MODULE_DESC);
8046MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
8047MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
This page took 0.890767 seconds and 5 git commands to generate.