[SCSI] lpfc driver 8.0.30 : dev_loss and nodev timeouts
[deliverable/linux.git] / drivers / scsi / lpfc / lpfc_init.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173
JSEC
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2005 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
21
dea3101e 22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kthread.h>
28#include <linux/pci.h>
29#include <linux/spinlock.h>
30
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport_fc.h>
34
35#include "lpfc_hw.h"
36#include "lpfc_sli.h"
37#include "lpfc_disc.h"
38#include "lpfc_scsi.h"
39#include "lpfc.h"
40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h"
42#include "lpfc_version.h"
43
44static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *);
45static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
46static int lpfc_post_rcv_buf(struct lpfc_hba *);
47
48static struct scsi_transport_template *lpfc_transport_template = NULL;
49static DEFINE_IDR(lpfc_hba_index);
50
51/************************************************************************/
52/* */
53/* lpfc_config_port_prep */
54/* This routine will do LPFC initialization prior to the */
55/* CONFIG_PORT mailbox command. This will be initialized */
56/* as a SLI layer callback routine. */
57/* This routine returns 0 on success or -ERESTART if it wants */
58/* the SLI layer to reset the HBA and try again. Any */
59/* other return value indicates an error. */
60/* */
61/************************************************************************/
62int
63lpfc_config_port_prep(struct lpfc_hba * phba)
64{
65 lpfc_vpd_t *vp = &phba->vpd;
66 int i = 0, rc;
67 LPFC_MBOXQ_t *pmb;
68 MAILBOX_t *mb;
69 char *lpfc_vpd_data = NULL;
70 uint16_t offset = 0;
71 static char licensed[56] =
72 "key unlock for use with gnu public licensed code only\0";
73
74 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
75 if (!pmb) {
76 phba->hba_state = LPFC_HBA_ERROR;
77 return -ENOMEM;
78 }
79
80 mb = &pmb->mb;
81 phba->hba_state = LPFC_INIT_MBX_CMDS;
82
83 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
84 uint32_t *ptext = (uint32_t *) licensed;
85
86 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
87 *ptext = cpu_to_be32(*ptext);
88
89 lpfc_read_nv(phba, pmb);
90 memset((char*)mb->un.varRDnvp.rsvd3, 0,
91 sizeof (mb->un.varRDnvp.rsvd3));
92 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
93 sizeof (licensed));
94
95 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
96
97 if (rc != MBX_SUCCESS) {
98 lpfc_printf_log(phba,
99 KERN_ERR,
100 LOG_MBOX,
101 "%d:0324 Config Port initialization "
102 "error, mbxCmd x%x READ_NVPARM, "
103 "mbxStatus x%x\n",
104 phba->brd_no,
105 mb->mbxCommand, mb->mbxStatus);
106 mempool_free(pmb, phba->mbox_mem_pool);
107 return -ERESTART;
108 }
109 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
110 sizeof (mb->un.varRDnvp.nodename));
111 }
112
113 /* Setup and issue mailbox READ REV command */
114 lpfc_read_rev(phba, pmb);
115 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
116 if (rc != MBX_SUCCESS) {
117 lpfc_printf_log(phba,
118 KERN_ERR,
119 LOG_INIT,
120 "%d:0439 Adapter failed to init, mbxCmd x%x "
121 "READ_REV, mbxStatus x%x\n",
122 phba->brd_no,
123 mb->mbxCommand, mb->mbxStatus);
124 mempool_free( pmb, phba->mbox_mem_pool);
125 return -ERESTART;
126 }
127
128 /* The HBA's current state is provided by the ProgType and rr fields.
129 * Read and check the value of these fields before continuing to config
130 * this port.
131 */
132 if (mb->un.varRdRev.rr == 0 || mb->un.varRdRev.un.b.ProgType != 2) {
133 /* Old firmware */
134 vp->rev.rBit = 0;
135 lpfc_printf_log(phba,
136 KERN_ERR,
137 LOG_INIT,
138 "%d:0440 Adapter failed to init, mbxCmd x%x "
139 "READ_REV detected outdated firmware"
140 "Data: x%x\n",
141 phba->brd_no,
142 mb->mbxCommand, 0);
143 mempool_free(pmb, phba->mbox_mem_pool);
144 return -ERESTART;
145 } else {
146 vp->rev.rBit = 1;
147 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
148 memcpy(vp->rev.sli1FwName,
149 (char*)mb->un.varRdRev.sli1FwName, 16);
150 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
151 memcpy(vp->rev.sli2FwName,
152 (char *)mb->un.varRdRev.sli2FwName, 16);
153 }
154
155 /* Save information as VPD data */
156 vp->rev.biuRev = mb->un.varRdRev.biuRev;
157 vp->rev.smRev = mb->un.varRdRev.smRev;
158 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
159 vp->rev.endecRev = mb->un.varRdRev.endecRev;
160 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
161 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
162 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
163 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
164 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
165 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
166
167 if (lpfc_is_LC_HBA(phba->pcidev->device))
168 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
169 sizeof (phba->RandomData));
170
171 /* Get the default values for Model Name and Description */
172 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
173
174 /* Get adapter VPD information */
175 pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL);
176 if (!pmb->context2)
177 goto out_free_mbox;
178 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
179 if (!lpfc_vpd_data)
180 goto out_free_context2;
181
182 do {
183 lpfc_dump_mem(phba, pmb, offset);
184 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
185
186 if (rc != MBX_SUCCESS) {
187 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
188 "%d:0441 VPD not present on adapter, "
189 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
190 phba->brd_no,
191 mb->mbxCommand, mb->mbxStatus);
192 kfree(lpfc_vpd_data);
193 lpfc_vpd_data = NULL;
194 break;
195 }
196
197 lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset,
198 mb->un.varDmp.word_cnt);
199 offset += mb->un.varDmp.word_cnt;
200 } while (mb->un.varDmp.word_cnt);
201 lpfc_parse_vpd(phba, lpfc_vpd_data);
202
203 kfree(lpfc_vpd_data);
204out_free_context2:
205 kfree(pmb->context2);
206out_free_mbox:
207 mempool_free(pmb, phba->mbox_mem_pool);
208 return 0;
209}
210
211/************************************************************************/
212/* */
213/* lpfc_config_port_post */
214/* This routine will do LPFC initialization after the */
215/* CONFIG_PORT mailbox command. This will be initialized */
216/* as a SLI layer callback routine. */
217/* This routine returns 0 on success. Any other return value */
218/* indicates an error. */
219/* */
220/************************************************************************/
221int
222lpfc_config_port_post(struct lpfc_hba * phba)
223{
224 LPFC_MBOXQ_t *pmb;
225 MAILBOX_t *mb;
226 struct lpfc_dmabuf *mp;
227 struct lpfc_sli *psli = &phba->sli;
228 uint32_t status, timeout;
229 int i, j, rc;
230
231 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
232 if (!pmb) {
233 phba->hba_state = LPFC_HBA_ERROR;
234 return -ENOMEM;
235 }
236 mb = &pmb->mb;
237
238 lpfc_config_link(phba, pmb);
239 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
240 if (rc != MBX_SUCCESS) {
241 lpfc_printf_log(phba,
242 KERN_ERR,
243 LOG_INIT,
244 "%d:0447 Adapter failed init, mbxCmd x%x "
245 "CONFIG_LINK mbxStatus x%x\n",
246 phba->brd_no,
247 mb->mbxCommand, mb->mbxStatus);
248 phba->hba_state = LPFC_HBA_ERROR;
249 mempool_free( pmb, phba->mbox_mem_pool);
250 return -EIO;
251 }
252
253 /* Get login parameters for NID. */
254 lpfc_read_sparam(phba, pmb);
255 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
256 lpfc_printf_log(phba,
257 KERN_ERR,
258 LOG_INIT,
259 "%d:0448 Adapter failed init, mbxCmd x%x "
260 "READ_SPARM mbxStatus x%x\n",
261 phba->brd_no,
262 mb->mbxCommand, mb->mbxStatus);
263 phba->hba_state = LPFC_HBA_ERROR;
264 mp = (struct lpfc_dmabuf *) pmb->context1;
265 mempool_free( pmb, phba->mbox_mem_pool);
266 lpfc_mbuf_free(phba, mp->virt, mp->phys);
267 kfree(mp);
268 return -EIO;
269 }
270
271 mp = (struct lpfc_dmabuf *) pmb->context1;
272
273 memcpy(&phba->fc_sparam, mp->virt, sizeof (struct serv_parm));
274 lpfc_mbuf_free(phba, mp->virt, mp->phys);
275 kfree(mp);
276 pmb->context1 = NULL;
277
278 memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
279 sizeof (struct lpfc_name));
280 memcpy(&phba->fc_portname, &phba->fc_sparam.portName,
281 sizeof (struct lpfc_name));
282 /* If no serial number in VPD data, use low 6 bytes of WWNN */
283 /* This should be consolidated into parse_vpd ? - mr */
284 if (phba->SerialNumber[0] == 0) {
285 uint8_t *outptr;
286
287 outptr = (uint8_t *) & phba->fc_nodename.IEEE[0];
288 for (i = 0; i < 12; i++) {
289 status = *outptr++;
290 j = ((status & 0xf0) >> 4);
291 if (j <= 9)
292 phba->SerialNumber[i] =
293 (char)((uint8_t) 0x30 + (uint8_t) j);
294 else
295 phba->SerialNumber[i] =
296 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
297 i++;
298 j = (status & 0xf);
299 if (j <= 9)
300 phba->SerialNumber[i] =
301 (char)((uint8_t) 0x30 + (uint8_t) j);
302 else
303 phba->SerialNumber[i] =
304 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
305 }
306 }
307
308 /* This should turn on DELAYED ABTS for ELS timeouts */
309 lpfc_set_slim(phba, pmb, 0x052198, 0x1);
310 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
311 phba->hba_state = LPFC_HBA_ERROR;
312 mempool_free( pmb, phba->mbox_mem_pool);
313 return -EIO;
314 }
315
316
317 lpfc_read_config(phba, pmb);
318 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
319 lpfc_printf_log(phba,
320 KERN_ERR,
321 LOG_INIT,
322 "%d:0453 Adapter failed to init, mbxCmd x%x "
323 "READ_CONFIG, mbxStatus x%x\n",
324 phba->brd_no,
325 mb->mbxCommand, mb->mbxStatus);
326 phba->hba_state = LPFC_HBA_ERROR;
327 mempool_free( pmb, phba->mbox_mem_pool);
328 return -EIO;
329 }
330
331 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
332 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
333 phba->cfg_hba_queue_depth =
334 mb->un.varRdConfig.max_xri + 1;
335
336 phba->lmt = mb->un.varRdConfig.lmt;
337 /* HBA is not 4GB capable, or HBA is not 2GB capable,
338 don't let link speed ask for it */
339 if ((((phba->lmt & LMT_4250_10bit) != LMT_4250_10bit) &&
340 (phba->cfg_link_speed > LINK_SPEED_2G)) ||
341 (((phba->lmt & LMT_2125_10bit) != LMT_2125_10bit) &&
342 (phba->cfg_link_speed > LINK_SPEED_1G))) {
343 /* Reset link speed to auto. 1G/2GB HBA cfg'd for 4G */
344 lpfc_printf_log(phba,
345 KERN_WARNING,
346 LOG_LINK_EVENT,
347 "%d:1302 Invalid speed for this board: "
348 "Reset link speed to auto: x%x\n",
349 phba->brd_no,
350 phba->cfg_link_speed);
351 phba->cfg_link_speed = LINK_SPEED_AUTO;
352 }
353
354 phba->hba_state = LPFC_LINK_DOWN;
355
356 /* Only process IOCBs on ring 0 till hba_state is READY */
357 if (psli->ring[psli->ip_ring].cmdringaddr)
358 psli->ring[psli->ip_ring].flag |= LPFC_STOP_IOCB_EVENT;
359 if (psli->ring[psli->fcp_ring].cmdringaddr)
360 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
361 if (psli->ring[psli->next_ring].cmdringaddr)
362 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
363
364 /* Post receive buffers for desired rings */
365 lpfc_post_rcv_buf(phba);
366
367 /* Enable appropriate host interrupts */
368 spin_lock_irq(phba->host->host_lock);
369 status = readl(phba->HCregaddr);
370 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
371 if (psli->num_rings > 0)
372 status |= HC_R0INT_ENA;
373 if (psli->num_rings > 1)
374 status |= HC_R1INT_ENA;
375 if (psli->num_rings > 2)
376 status |= HC_R2INT_ENA;
377 if (psli->num_rings > 3)
378 status |= HC_R3INT_ENA;
379
380 writel(status, phba->HCregaddr);
381 readl(phba->HCregaddr); /* flush */
382 spin_unlock_irq(phba->host->host_lock);
383
384 /*
385 * Setup the ring 0 (els) timeout handler
386 */
387 timeout = phba->fc_ratov << 1;
388 phba->els_tmofunc.expires = jiffies + HZ * timeout;
389 add_timer(&phba->els_tmofunc);
390
391 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
392 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
393 if (lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT) != MBX_SUCCESS) {
394 lpfc_printf_log(phba,
395 KERN_ERR,
396 LOG_INIT,
397 "%d:0454 Adapter failed to init, mbxCmd x%x "
398 "INIT_LINK, mbxStatus x%x\n",
399 phba->brd_no,
400 mb->mbxCommand, mb->mbxStatus);
401
402 /* Clear all interrupt enable conditions */
403 writel(0, phba->HCregaddr);
404 readl(phba->HCregaddr); /* flush */
405 /* Clear all pending interrupts */
406 writel(0xffffffff, phba->HAregaddr);
407 readl(phba->HAregaddr); /* flush */
408
409 phba->hba_state = LPFC_HBA_ERROR;
410 mempool_free(pmb, phba->mbox_mem_pool);
411 return -EIO;
412 }
413 /* MBOX buffer will be freed in mbox compl */
414
415 i = 0;
416 while ((phba->hba_state != LPFC_HBA_READY) ||
417 (phba->num_disc_nodes) || (phba->fc_prli_sent) ||
418 ((phba->fc_map_cnt == 0) && (i<2)) ||
419 (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) {
420 /* Check every second for 30 retries. */
421 i++;
422 if (i > 30) {
423 break;
424 }
425 if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) {
426 /* The link is down. Set linkdown timeout */
427 break;
428 }
429
430 /* Delay for 1 second to give discovery time to complete. */
431 msleep(1000);
432
433 }
434
435 /* Since num_disc_nodes keys off of PLOGI, delay a bit to let
436 * any potential PRLIs to flush thru the SLI sub-system.
437 */
438 msleep(50);
439
440 return (0);
441}
442
443/************************************************************************/
444/* */
445/* lpfc_hba_down_prep */
446/* This routine will do LPFC uninitialization before the */
447/* HBA is reset when bringing down the SLI Layer. This will be */
448/* initialized as a SLI layer callback routine. */
449/* This routine returns 0 on success. Any other return value */
450/* indicates an error. */
451/* */
452/************************************************************************/
453int
454lpfc_hba_down_prep(struct lpfc_hba * phba)
455{
456 /* Disable interrupts */
457 writel(0, phba->HCregaddr);
458 readl(phba->HCregaddr); /* flush */
459
460 /* Cleanup potential discovery resources */
461 lpfc_els_flush_rscn(phba);
462 lpfc_els_flush_cmd(phba);
463 lpfc_disc_flush_list(phba);
464
465 return (0);
466}
467
468/************************************************************************/
469/* */
470/* lpfc_handle_eratt */
471/* This routine will handle processing a Host Attention */
472/* Error Status event. This will be initialized */
473/* as a SLI layer callback routine. */
474/* */
475/************************************************************************/
476void
477lpfc_handle_eratt(struct lpfc_hba * phba)
478{
479 struct lpfc_sli *psli = &phba->sli;
480 struct lpfc_sli_ring *pring;
481
482 /*
483 * If a reset is sent to the HBA restore PCI configuration registers.
484 */
485 if ( phba->hba_state == LPFC_INIT_START ) {
486 mdelay(1);
487 readl(phba->HCregaddr); /* flush */
488 writel(0, phba->HCregaddr);
489 readl(phba->HCregaddr); /* flush */
490
491 /* Restore PCI cmd register */
492 pci_write_config_word(phba->pcidev,
493 PCI_COMMAND, phba->pci_cfg_value);
494 }
495
496 if (phba->work_hs & HS_FFER6) {
497 /* Re-establishing Link */
498 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
499 "%d:1301 Re-establishing Link "
500 "Data: x%x x%x x%x\n",
501 phba->brd_no, phba->work_hs,
502 phba->work_status[0], phba->work_status[1]);
503 spin_lock_irq(phba->host->host_lock);
504 phba->fc_flag |= FC_ESTABLISH_LINK;
505 spin_unlock_irq(phba->host->host_lock);
506
507 /*
508 * Firmware stops when it triggled erratt with HS_FFER6.
509 * That could cause the I/Os dropped by the firmware.
510 * Error iocb (I/O) on txcmplq and let the SCSI layer
511 * retry it after re-establishing link.
512 */
513 pring = &psli->ring[psli->fcp_ring];
514 lpfc_sli_abort_iocb_ring(phba, pring);
515
516
517 /*
518 * There was a firmware error. Take the hba offline and then
519 * attempt to restart it.
520 */
521 lpfc_offline(phba);
522 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
523 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
524 return;
525 }
526 } else {
527 /* The if clause above forces this code path when the status
528 * failure is a value other than FFER6. Do not call the offline
529 * twice. This is the adapter hardware error path.
530 */
531 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
532 "%d:0457 Adapter Hardware Error "
533 "Data: x%x x%x x%x\n",
534 phba->brd_no, phba->work_hs,
535 phba->work_status[0], phba->work_status[1]);
536
537 lpfc_offline(phba);
538
539 /*
540 * Restart all traffic to this host. Since the fc_transport
541 * block functions (future) were not called in lpfc_offline,
542 * don't call them here.
543 */
544 scsi_unblock_requests(phba->host);
545 }
546}
547
548/************************************************************************/
549/* */
550/* lpfc_handle_latt */
551/* This routine will handle processing a Host Attention */
552/* Link Status event. This will be initialized */
553/* as a SLI layer callback routine. */
554/* */
555/************************************************************************/
556void
557lpfc_handle_latt(struct lpfc_hba * phba)
558{
559 struct lpfc_sli *psli = &phba->sli;
560 LPFC_MBOXQ_t *pmb;
561 volatile uint32_t control;
562 struct lpfc_dmabuf *mp;
563 int rc = -ENOMEM;
564
565 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
566 if (!pmb)
567 goto lpfc_handle_latt_err_exit;
568
569 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
570 if (!mp)
571 goto lpfc_handle_latt_free_pmb;
572
573 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
574 if (!mp->virt)
575 goto lpfc_handle_latt_free_mp;
576
577 rc = -EIO;
578
579
580 psli->slistat.link_event++;
581 lpfc_read_la(phba, pmb, mp);
582 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
583 rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
584 if (rc == MBX_NOT_FINISHED)
585 goto lpfc_handle_latt_free_mp;
586
587 /* Clear Link Attention in HA REG */
588 spin_lock_irq(phba->host->host_lock);
589 writel(HA_LATT, phba->HAregaddr);
590 readl(phba->HAregaddr); /* flush */
591 spin_unlock_irq(phba->host->host_lock);
592
593 return;
594
595lpfc_handle_latt_free_mp:
596 kfree(mp);
597lpfc_handle_latt_free_pmb:
598 kfree(pmb);
599lpfc_handle_latt_err_exit:
600 /* Enable Link attention interrupts */
601 spin_lock_irq(phba->host->host_lock);
602 psli->sli_flag |= LPFC_PROCESS_LA;
603 control = readl(phba->HCregaddr);
604 control |= HC_LAINT_ENA;
605 writel(control, phba->HCregaddr);
606 readl(phba->HCregaddr); /* flush */
607
608 /* Clear Link Attention in HA REG */
609 writel(HA_LATT, phba->HAregaddr);
610 readl(phba->HAregaddr); /* flush */
611 spin_unlock_irq(phba->host->host_lock);
612 lpfc_linkdown(phba);
613 phba->hba_state = LPFC_HBA_ERROR;
614
615 /* The other case is an error from issue_mbox */
616 if (rc == -ENOMEM)
617 lpfc_printf_log(phba,
618 KERN_WARNING,
619 LOG_MBOX,
620 "%d:0300 READ_LA: no buffers\n",
621 phba->brd_no);
622
623 return;
624}
625
626/************************************************************************/
627/* */
628/* lpfc_parse_vpd */
629/* This routine will parse the VPD data */
630/* */
631/************************************************************************/
632static int
633lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd)
634{
635 uint8_t lenlo, lenhi;
636 uint32_t Length;
637 int i, j;
638 int finished = 0;
639 int index = 0;
640
641 if (!vpd)
642 return 0;
643
644 /* Vital Product */
645 lpfc_printf_log(phba,
646 KERN_INFO,
647 LOG_INIT,
648 "%d:0455 Vital Product Data: x%x x%x x%x x%x\n",
649 phba->brd_no,
650 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
651 (uint32_t) vpd[3]);
652 do {
653 switch (vpd[index]) {
654 case 0x82:
655 index += 1;
656 lenlo = vpd[index];
657 index += 1;
658 lenhi = vpd[index];
659 index += 1;
660 i = ((((unsigned short)lenhi) << 8) + lenlo);
661 index += i;
662 break;
663 case 0x90:
664 index += 1;
665 lenlo = vpd[index];
666 index += 1;
667 lenhi = vpd[index];
668 index += 1;
669 Length = ((((unsigned short)lenhi) << 8) + lenlo);
670
671 while (Length > 0) {
672 /* Look for Serial Number */
673 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
674 index += 2;
675 i = vpd[index];
676 index += 1;
677 j = 0;
678 Length -= (3+i);
679 while(i--) {
680 phba->SerialNumber[j++] = vpd[index++];
681 if (j == 31)
682 break;
683 }
684 phba->SerialNumber[j] = 0;
685 continue;
686 }
687 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
688 phba->vpd_flag |= VPD_MODEL_DESC;
689 index += 2;
690 i = vpd[index];
691 index += 1;
692 j = 0;
693 Length -= (3+i);
694 while(i--) {
695 phba->ModelDesc[j++] = vpd[index++];
696 if (j == 255)
697 break;
698 }
699 phba->ModelDesc[j] = 0;
700 continue;
701 }
702 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
703 phba->vpd_flag |= VPD_MODEL_NAME;
704 index += 2;
705 i = vpd[index];
706 index += 1;
707 j = 0;
708 Length -= (3+i);
709 while(i--) {
710 phba->ModelName[j++] = vpd[index++];
711 if (j == 79)
712 break;
713 }
714 phba->ModelName[j] = 0;
715 continue;
716 }
717 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
718 phba->vpd_flag |= VPD_PROGRAM_TYPE;
719 index += 2;
720 i = vpd[index];
721 index += 1;
722 j = 0;
723 Length -= (3+i);
724 while(i--) {
725 phba->ProgramType[j++] = vpd[index++];
726 if (j == 255)
727 break;
728 }
729 phba->ProgramType[j] = 0;
730 continue;
731 }
732 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
733 phba->vpd_flag |= VPD_PORT;
734 index += 2;
735 i = vpd[index];
736 index += 1;
737 j = 0;
738 Length -= (3+i);
739 while(i--) {
740 phba->Port[j++] = vpd[index++];
741 if (j == 19)
742 break;
743 }
744 phba->Port[j] = 0;
745 continue;
746 }
747 else {
748 index += 2;
749 i = vpd[index];
750 index += 1;
751 index += i;
752 Length -= (3 + i);
753 }
754 }
755 finished = 0;
756 break;
757 case 0x78:
758 finished = 1;
759 break;
760 default:
761 index ++;
762 break;
763 }
764 } while (!finished && (index < 108));
765
766 return(1);
767}
768
769static void
770lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
771{
772 lpfc_vpd_t *vp;
773 uint32_t id;
774 char str[16];
775
776 vp = &phba->vpd;
777 pci_read_config_dword(phba->pcidev, PCI_VENDOR_ID, &id);
778
779 switch ((id >> 16) & 0xffff) {
06325e74
JSEC
780 case PCI_DEVICE_ID_FIREFLY:
781 strcpy(str, "LP6000 1");
782 break;
dea3101e 783 case PCI_DEVICE_ID_SUPERFLY:
784 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
785 strcpy(str, "LP7000 1");
786 else
787 strcpy(str, "LP7000E 1");
788 break;
789 case PCI_DEVICE_ID_DRAGONFLY:
790 strcpy(str, "LP8000 1");
791 break;
792 case PCI_DEVICE_ID_CENTAUR:
793 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
794 strcpy(str, "LP9002 2");
795 else
796 strcpy(str, "LP9000 1");
797 break;
798 case PCI_DEVICE_ID_RFLY:
799 strcpy(str, "LP952 2");
800 break;
801 case PCI_DEVICE_ID_PEGASUS:
802 strcpy(str, "LP9802 2");
803 break;
804 case PCI_DEVICE_ID_THOR:
805 strcpy(str, "LP10000 2");
806 break;
807 case PCI_DEVICE_ID_VIPER:
808 strcpy(str, "LPX1000 10");
809 break;
810 case PCI_DEVICE_ID_PFLY:
811 strcpy(str, "LP982 2");
812 break;
813 case PCI_DEVICE_ID_TFLY:
814 strcpy(str, "LP1050 2");
815 break;
816 case PCI_DEVICE_ID_HELIOS:
817 strcpy(str, "LP11000 4");
818 break;
819 case PCI_DEVICE_ID_BMID:
820 strcpy(str, "LP1150 4");
821 break;
822 case PCI_DEVICE_ID_BSMB:
823 strcpy(str, "LP111 4");
824 break;
825 case PCI_DEVICE_ID_ZEPHYR:
826 strcpy(str, "LP11000e 4");
827 break;
828 case PCI_DEVICE_ID_ZMID:
829 strcpy(str, "LP1150e 4");
830 break;
831 case PCI_DEVICE_ID_ZSMB:
832 strcpy(str, "LP111e 4");
833 break;
834 case PCI_DEVICE_ID_LP101:
835 strcpy(str, "LP101 2");
836 break;
837 case PCI_DEVICE_ID_LP10000S:
838 strcpy(str, "LP10000-S 2");
839 break;
06325e74
JSEC
840 default:
841 memset(str, 0, 16);
842 break;
dea3101e 843 }
844 if (mdp)
845 sscanf(str, "%s", mdp);
846 if (descp)
847 sprintf(descp, "Emulex LightPulse %s Gigabit PCI Fibre "
848 "Channel Adapter", str);
849}
850
851/**************************************************/
852/* lpfc_post_buffer */
853/* */
854/* This routine will post count buffers to the */
855/* ring with the QUE_RING_BUF_CN command. This */
856/* allows 3 buffers / command to be posted. */
857/* Returns the number of buffers NOT posted. */
858/**************************************************/
859int
860lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
861 int type)
862{
863 IOCB_t *icmd;
864 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
865 struct lpfc_iocbq *iocb = NULL;
866 struct lpfc_dmabuf *mp1, *mp2;
867
868 cnt += pring->missbufcnt;
869
870 /* While there are buffers to post */
871 while (cnt > 0) {
872 /* Allocate buffer for command iocb */
873 spin_lock_irq(phba->host->host_lock);
874 list_remove_head(lpfc_iocb_list, iocb, struct lpfc_iocbq, list);
875 spin_unlock_irq(phba->host->host_lock);
876 if (iocb == NULL) {
877 pring->missbufcnt = cnt;
878 return cnt;
879 }
880 memset(iocb, 0, sizeof (struct lpfc_iocbq));
881 icmd = &iocb->iocb;
882
883 /* 2 buffers can be posted per command */
884 /* Allocate buffer to post */
885 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
886 if (mp1)
887 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
888 &mp1->phys);
889 if (mp1 == 0 || mp1->virt == 0) {
890 if (mp1)
891 kfree(mp1);
892 spin_lock_irq(phba->host->host_lock);
893 list_add_tail(&iocb->list, lpfc_iocb_list);
894 spin_unlock_irq(phba->host->host_lock);
895 pring->missbufcnt = cnt;
896 return cnt;
897 }
898
899 INIT_LIST_HEAD(&mp1->list);
900 /* Allocate buffer to post */
901 if (cnt > 1) {
902 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
903 if (mp2)
904 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
905 &mp2->phys);
906 if (mp2 == 0 || mp2->virt == 0) {
907 if (mp2)
908 kfree(mp2);
909 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
910 kfree(mp1);
911 spin_lock_irq(phba->host->host_lock);
912 list_add_tail(&iocb->list, lpfc_iocb_list);
913 spin_unlock_irq(phba->host->host_lock);
914 pring->missbufcnt = cnt;
915 return cnt;
916 }
917
918 INIT_LIST_HEAD(&mp2->list);
919 } else {
920 mp2 = NULL;
921 }
922
923 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
924 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
925 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
926 icmd->ulpBdeCount = 1;
927 cnt--;
928 if (mp2) {
929 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
930 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
931 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
932 cnt--;
933 icmd->ulpBdeCount = 2;
934 }
935
936 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
937 icmd->ulpLe = 1;
938
939 spin_lock_irq(phba->host->host_lock);
940 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
941 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
942 kfree(mp1);
943 cnt++;
944 if (mp2) {
945 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
946 kfree(mp2);
947 cnt++;
948 }
949 list_add_tail(&iocb->list, lpfc_iocb_list);
950 pring->missbufcnt = cnt;
951 spin_unlock_irq(phba->host->host_lock);
952 return cnt;
953 }
954 spin_unlock_irq(phba->host->host_lock);
955 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
956 if (mp2) {
957 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
958 }
959 }
960 pring->missbufcnt = 0;
961 return 0;
962}
963
964/************************************************************************/
965/* */
966/* lpfc_post_rcv_buf */
967/* This routine post initial rcv buffers to the configured rings */
968/* */
969/************************************************************************/
970static int
971lpfc_post_rcv_buf(struct lpfc_hba * phba)
972{
973 struct lpfc_sli *psli = &phba->sli;
974
975 /* Ring 0, ELS / CT buffers */
976 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1);
977 /* Ring 2 - FCP no buffers needed */
978
979 return 0;
980}
981
982#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
983
984/************************************************************************/
985/* */
986/* lpfc_sha_init */
987/* */
988/************************************************************************/
989static void
990lpfc_sha_init(uint32_t * HashResultPointer)
991{
992 HashResultPointer[0] = 0x67452301;
993 HashResultPointer[1] = 0xEFCDAB89;
994 HashResultPointer[2] = 0x98BADCFE;
995 HashResultPointer[3] = 0x10325476;
996 HashResultPointer[4] = 0xC3D2E1F0;
997}
998
999/************************************************************************/
1000/* */
1001/* lpfc_sha_iterate */
1002/* */
1003/************************************************************************/
1004static void
1005lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
1006{
1007 int t;
1008 uint32_t TEMP;
1009 uint32_t A, B, C, D, E;
1010 t = 16;
1011 do {
1012 HashWorkingPointer[t] =
1013 S(1,
1014 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
1015 8] ^
1016 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
1017 } while (++t <= 79);
1018 t = 0;
1019 A = HashResultPointer[0];
1020 B = HashResultPointer[1];
1021 C = HashResultPointer[2];
1022 D = HashResultPointer[3];
1023 E = HashResultPointer[4];
1024
1025 do {
1026 if (t < 20) {
1027 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
1028 } else if (t < 40) {
1029 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
1030 } else if (t < 60) {
1031 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
1032 } else {
1033 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
1034 }
1035 TEMP += S(5, A) + E + HashWorkingPointer[t];
1036 E = D;
1037 D = C;
1038 C = S(30, B);
1039 B = A;
1040 A = TEMP;
1041 } while (++t <= 79);
1042
1043 HashResultPointer[0] += A;
1044 HashResultPointer[1] += B;
1045 HashResultPointer[2] += C;
1046 HashResultPointer[3] += D;
1047 HashResultPointer[4] += E;
1048
1049}
1050
1051/************************************************************************/
1052/* */
1053/* lpfc_challenge_key */
1054/* */
1055/************************************************************************/
1056static void
1057lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
1058{
1059 *HashWorking = (*RandomChallenge ^ *HashWorking);
1060}
1061
1062/************************************************************************/
1063/* */
1064/* lpfc_hba_init */
1065/* */
1066/************************************************************************/
1067void
1068lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1069{
1070 int t;
1071 uint32_t *HashWorking;
1072 uint32_t *pwwnn = phba->wwnn;
1073
1074 HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL);
1075 if (!HashWorking)
1076 return;
1077
1078 memset(HashWorking, 0, (80 * sizeof(uint32_t)));
1079 HashWorking[0] = HashWorking[78] = *pwwnn++;
1080 HashWorking[1] = HashWorking[79] = *pwwnn;
1081
1082 for (t = 0; t < 7; t++)
1083 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
1084
1085 lpfc_sha_init(hbainit);
1086 lpfc_sha_iterate(hbainit, HashWorking);
1087 kfree(HashWorking);
1088}
1089
1090static void
1091lpfc_cleanup(struct lpfc_hba * phba, uint32_t save_bind)
1092{
1093 struct lpfc_nodelist *ndlp, *next_ndlp;
1094
1095 /* clean up phba - lpfc specific */
1096 lpfc_can_disctmo(phba);
1097 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list,
1098 nlp_listp) {
1099 lpfc_nlp_remove(phba, ndlp);
1100 }
1101
1102 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
1103 nlp_listp) {
1104 lpfc_nlp_remove(phba, ndlp);
1105 }
1106
1107 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
1108 nlp_listp) {
1109 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1110 }
1111
1112 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
1113 nlp_listp) {
1114 lpfc_nlp_remove(phba, ndlp);
1115 }
1116
1117 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1118 nlp_listp) {
1119 lpfc_nlp_remove(phba, ndlp);
1120 }
1121
1122 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_reglogin_list,
1123 nlp_listp) {
1124 lpfc_nlp_remove(phba, ndlp);
1125 }
1126
1127 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
1128 nlp_listp) {
1129 lpfc_nlp_remove(phba, ndlp);
1130 }
1131
1132 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
1133 nlp_listp) {
1134 lpfc_nlp_remove(phba, ndlp);
1135 }
1136
1137 INIT_LIST_HEAD(&phba->fc_nlpmap_list);
1138 INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
1139 INIT_LIST_HEAD(&phba->fc_unused_list);
1140 INIT_LIST_HEAD(&phba->fc_plogi_list);
1141 INIT_LIST_HEAD(&phba->fc_adisc_list);
1142 INIT_LIST_HEAD(&phba->fc_reglogin_list);
1143 INIT_LIST_HEAD(&phba->fc_prli_list);
1144 INIT_LIST_HEAD(&phba->fc_npr_list);
1145
1146 phba->fc_map_cnt = 0;
1147 phba->fc_unmap_cnt = 0;
1148 phba->fc_plogi_cnt = 0;
1149 phba->fc_adisc_cnt = 0;
1150 phba->fc_reglogin_cnt = 0;
1151 phba->fc_prli_cnt = 0;
1152 phba->fc_npr_cnt = 0;
1153 phba->fc_unused_cnt= 0;
1154 return;
1155}
1156
1157static void
1158lpfc_establish_link_tmo(unsigned long ptr)
1159{
1160 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
1161 unsigned long iflag;
1162
1163
1164 /* Re-establishing Link, timer expired */
1165 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1166 "%d:1300 Re-establishing Link, timer expired "
1167 "Data: x%x x%x\n",
1168 phba->brd_no, phba->fc_flag, phba->hba_state);
1169 spin_lock_irqsave(phba->host->host_lock, iflag);
1170 phba->fc_flag &= ~FC_ESTABLISH_LINK;
1171 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1172}
1173
1174static int
1175lpfc_stop_timer(struct lpfc_hba * phba)
1176{
1177 struct lpfc_sli *psli = &phba->sli;
1178
1179 /* Instead of a timer, this has been converted to a
1180 * deferred procedding list.
1181 */
1182 while (!list_empty(&phba->freebufList)) {
1183
1184 struct lpfc_dmabuf *mp = NULL;
1185
1186 list_remove_head((&phba->freebufList), mp,
1187 struct lpfc_dmabuf, list);
1188 if (mp) {
1189 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1190 kfree(mp);
1191 }
1192 }
1193
1194 del_timer_sync(&phba->fc_estabtmo);
1195 del_timer_sync(&phba->fc_disctmo);
1196 del_timer_sync(&phba->fc_fdmitmo);
1197 del_timer_sync(&phba->els_tmofunc);
1198 psli = &phba->sli;
1199 del_timer_sync(&psli->mbox_tmo);
1200 return(1);
1201}
1202
1203int
1204lpfc_online(struct lpfc_hba * phba)
1205{
1206 if (!phba)
1207 return 0;
1208
1209 if (!(phba->fc_flag & FC_OFFLINE_MODE))
1210 return 0;
1211
1212 lpfc_printf_log(phba,
1213 KERN_WARNING,
1214 LOG_INIT,
1215 "%d:0458 Bring Adapter online\n",
1216 phba->brd_no);
1217
1218 if (!lpfc_sli_queue_setup(phba))
1219 return 1;
1220
1221 if (lpfc_sli_hba_setup(phba)) /* Initialize the HBA */
1222 return 1;
1223
1224 spin_lock_irq(phba->host->host_lock);
1225 phba->fc_flag &= ~FC_OFFLINE_MODE;
1226 spin_unlock_irq(phba->host->host_lock);
1227
1228 /*
1229 * Restart all traffic to this host. Since the fc_transport block
1230 * functions (future) were not called in lpfc_offline, don't call them
1231 * here.
1232 */
1233 scsi_unblock_requests(phba->host);
1234 return 0;
1235}
1236
1237int
1238lpfc_offline(struct lpfc_hba * phba)
1239{
1240 struct lpfc_sli_ring *pring;
1241 struct lpfc_sli *psli;
1242 unsigned long iflag;
1243 int i = 0;
1244
1245 if (!phba)
1246 return 0;
1247
1248 if (phba->fc_flag & FC_OFFLINE_MODE)
1249 return 0;
1250
1251 /*
1252 * Don't call the fc_transport block api (future). The device is
1253 * going offline and causing a timer to fire in the midlayer is
1254 * unproductive. Just block all new requests until the driver
1255 * comes back online.
1256 */
1257 scsi_block_requests(phba->host);
1258 psli = &phba->sli;
1259 pring = &psli->ring[psli->fcp_ring];
1260
1261 lpfc_linkdown(phba);
1262
1263 /* The linkdown event takes 30 seconds to timeout. */
1264 while (pring->txcmplq_cnt) {
1265 mdelay(10);
1266 if (i++ > 3000)
1267 break;
1268 }
1269
1270 /* stop all timers associated with this hba */
1271 lpfc_stop_timer(phba);
1272 phba->work_hba_events = 0;
1273
1274 lpfc_printf_log(phba,
1275 KERN_WARNING,
1276 LOG_INIT,
1277 "%d:0460 Bring Adapter offline\n",
1278 phba->brd_no);
1279
1280 /* Bring down the SLI Layer and cleanup. The HBA is offline
1281 now. */
1282 lpfc_sli_hba_down(phba);
1283 lpfc_cleanup(phba, 1);
1284 spin_lock_irqsave(phba->host->host_lock, iflag);
1285 phba->fc_flag |= FC_OFFLINE_MODE;
1286 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1287 return 0;
1288}
1289
1290/******************************************************************************
1291* Function name: lpfc_scsi_free
1292*
1293* Description: Called from lpfc_pci_remove_one free internal driver resources
1294*
1295******************************************************************************/
1296static int
1297lpfc_scsi_free(struct lpfc_hba * phba)
1298{
1299 struct lpfc_scsi_buf *sb, *sb_next;
1300 struct lpfc_iocbq *io, *io_next;
1301
1302 spin_lock_irq(phba->host->host_lock);
1303 /* Release all the lpfc_scsi_bufs maintained by this host. */
1304 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
1305 list_del(&sb->list);
1306 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
1307 sb->dma_handle);
1308 kfree(sb);
1309 phba->total_scsi_bufs--;
1310 }
1311
1312 /* Release all the lpfc_iocbq entries maintained by this host. */
1313 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
1314 list_del(&io->list);
1315 kfree(io);
1316 phba->total_iocbq_bufs--;
1317 }
1318
1319 spin_unlock_irq(phba->host->host_lock);
1320
1321 return 0;
1322}
1323
1324
1325static int __devinit
1326lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1327{
1328 struct Scsi_Host *host;
1329 struct lpfc_hba *phba;
1330 struct lpfc_sli *psli;
1331 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
1332 unsigned long bar0map_len, bar2map_len;
1333 int error = -ENODEV, retval;
1334 int i;
1335 u64 wwname;
1336
1337 if (pci_enable_device(pdev))
1338 goto out;
1339 if (pci_request_regions(pdev, LPFC_DRIVER_NAME))
1340 goto out_disable_device;
1341
f888ba3c 1342 host = scsi_host_alloc(&lpfc_template, sizeof (struct lpfc_hba));
dea3101e 1343 if (!host)
1344 goto out_release_regions;
1345
1346 phba = (struct lpfc_hba*)host->hostdata;
1347 memset(phba, 0, sizeof (struct lpfc_hba));
dea3101e 1348 phba->host = host;
1349
1350 phba->fc_flag |= FC_LOADING;
1351 phba->pcidev = pdev;
1352
1353 /* Assign an unused board number */
1354 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
1355 goto out_put_host;
1356
1357 error = idr_get_new(&lpfc_hba_index, NULL, &phba->brd_no);
1358 if (error)
1359 goto out_put_host;
1360
1361 host->unique_id = phba->brd_no;
1362
1363 INIT_LIST_HEAD(&phba->ctrspbuflist);
1364 INIT_LIST_HEAD(&phba->rnidrspbuflist);
1365 INIT_LIST_HEAD(&phba->freebufList);
1366
1367 /* Initialize timers used by driver */
1368 init_timer(&phba->fc_estabtmo);
1369 phba->fc_estabtmo.function = lpfc_establish_link_tmo;
1370 phba->fc_estabtmo.data = (unsigned long)phba;
1371 init_timer(&phba->fc_disctmo);
1372 phba->fc_disctmo.function = lpfc_disc_timeout;
1373 phba->fc_disctmo.data = (unsigned long)phba;
1374
1375 init_timer(&phba->fc_fdmitmo);
1376 phba->fc_fdmitmo.function = lpfc_fdmi_tmo;
1377 phba->fc_fdmitmo.data = (unsigned long)phba;
1378 init_timer(&phba->els_tmofunc);
1379 phba->els_tmofunc.function = lpfc_els_timeout;
1380 phba->els_tmofunc.data = (unsigned long)phba;
1381 psli = &phba->sli;
1382 init_timer(&psli->mbox_tmo);
1383 psli->mbox_tmo.function = lpfc_mbox_timeout;
1384 psli->mbox_tmo.data = (unsigned long)phba;
1385
1386 /*
1387 * Get all the module params for configuring this host and then
1388 * establish the host parameters.
1389 */
1390 lpfc_get_cfgparam(phba);
1391
1392 host->max_id = LPFC_MAX_TARGET;
1393 host->max_lun = phba->cfg_max_luns;
1394 host->this_id = -1;
1395
1396 /* Initialize all internally managed lists. */
1397 INIT_LIST_HEAD(&phba->fc_nlpmap_list);
1398 INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
1399 INIT_LIST_HEAD(&phba->fc_unused_list);
1400 INIT_LIST_HEAD(&phba->fc_plogi_list);
1401 INIT_LIST_HEAD(&phba->fc_adisc_list);
1402 INIT_LIST_HEAD(&phba->fc_reglogin_list);
1403 INIT_LIST_HEAD(&phba->fc_prli_list);
1404 INIT_LIST_HEAD(&phba->fc_npr_list);
1405
1406
1407 pci_set_master(pdev);
1408 retval = pci_set_mwi(pdev);
1409 if (retval)
1410 dev_printk(KERN_WARNING, &pdev->dev,
1411 "Warning: pci_set_mwi returned %d\n", retval);
1412
1413 if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0)
1414 if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0)
1415 goto out_idr_remove;
1416
1417 /*
1418 * Get the bus address of Bar0 and Bar2 and the number of bytes
1419 * required by each mapping.
1420 */
1421 phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
1422 bar0map_len = pci_resource_len(phba->pcidev, 0);
1423
1424 phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2);
1425 bar2map_len = pci_resource_len(phba->pcidev, 2);
1426
1427 /* Map HBA SLIM and Control Registers to a kernel virtual address. */
1428 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
1429 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
1430
1431 /* Allocate memory for SLI-2 structures */
1432 phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE,
1433 &phba->slim2p_mapping, GFP_KERNEL);
1434 if (!phba->slim2p)
1435 goto out_iounmap;
1436
1437
1438 /* Initialize the SLI Layer to run with lpfc HBAs. */
1439 lpfc_sli_setup(phba);
1440 lpfc_sli_queue_setup(phba);
1441
1442 error = lpfc_mem_alloc(phba);
1443 if (error)
1444 goto out_free_slim;
1445
1446 /* Initialize and populate the iocb list per host. */
1447 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
1448 for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
1449 iocbq_entry = kmalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
1450 if (iocbq_entry == NULL) {
1451 printk(KERN_ERR "%s: only allocated %d iocbs of "
1452 "expected %d count. Unloading driver.\n",
1453 __FUNCTION__, i, LPFC_IOCB_LIST_CNT);
1454 error = -ENOMEM;
1455 goto out_free_iocbq;
1456 }
1457
1458 memset(iocbq_entry, 0, sizeof(struct lpfc_iocbq));
1459 spin_lock_irq(phba->host->host_lock);
1460 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
1461 phba->total_iocbq_bufs++;
1462 spin_unlock_irq(phba->host->host_lock);
1463 }
1464
1465 /* Initialize HBA structure */
1466 phba->fc_edtov = FF_DEF_EDTOV;
1467 phba->fc_ratov = FF_DEF_RATOV;
1468 phba->fc_altov = FF_DEF_ALTOV;
1469 phba->fc_arbtov = FF_DEF_ARBTOV;
1470
1471 INIT_LIST_HEAD(&phba->work_list);
1472 phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT);
1473 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
1474
1475 /* Startup the kernel thread for this host adapter. */
1476 phba->worker_thread = kthread_run(lpfc_do_work, phba,
1477 "lpfc_worker_%d", phba->brd_no);
1478 if (IS_ERR(phba->worker_thread)) {
1479 error = PTR_ERR(phba->worker_thread);
1480 goto out_free_iocbq;
1481 }
1482
1483 /* We can rely on a queue depth attribute only after SLI HBA setup */
1484 host->can_queue = phba->cfg_hba_queue_depth - 10;
1485
1486 /* Tell the midlayer we support 16 byte commands */
1487 host->max_cmd_len = 16;
1488
1489 /* Initialize the list of scsi buffers used by driver for scsi IO. */
1490 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
1491
1492 host->transportt = lpfc_transport_template;
1493 host->hostdata[0] = (unsigned long)phba;
1494 pci_set_drvdata(pdev, host);
1495 error = scsi_add_host(host, &pdev->dev);
1496 if (error)
1497 goto out_kthread_stop;
1498
1499 error = lpfc_alloc_sysfs_attr(phba);
1500 if (error)
1501 goto out_kthread_stop;
1502
1503 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, SA_SHIRQ,
1504 LPFC_DRIVER_NAME, phba);
1505 if (error) {
1506 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1507 "%d:0451 Enable interrupt handler failed\n",
1508 phba->brd_no);
1509 goto out_free_sysfs_attr;
1510 }
1511 phba->MBslimaddr = phba->slim_memmap_p;
1512 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
1513 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
1514 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
1515 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
1516
1517 error = lpfc_sli_hba_setup(phba);
1518 if (error)
1519 goto out_free_irq;
1520
1521 /*
1522 * set fixed host attributes
1523 * Must done after lpfc_sli_hba_setup()
1524 */
1525
1526 memcpy(&wwname, &phba->fc_nodename, sizeof(u64));
1527 fc_host_node_name(host) = be64_to_cpu(wwname);
1528 memcpy(&wwname, &phba->fc_portname, sizeof(u64));
1529 fc_host_port_name(host) = be64_to_cpu(wwname);
1530 fc_host_supported_classes(host) = FC_COS_CLASS3;
1531
1532 memset(fc_host_supported_fc4s(host), 0,
1533 sizeof(fc_host_supported_fc4s(host)));
1534 fc_host_supported_fc4s(host)[2] = 1;
1535 fc_host_supported_fc4s(host)[7] = 1;
1536
1537 lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(host));
1538
1539 fc_host_supported_speeds(host) = 0;
1540 switch (FC_JEDEC_ID(phba->vpd.rev.biuRev)) {
1541 case VIPER_JEDEC_ID:
1542 fc_host_supported_speeds(host) |= FC_PORTSPEED_10GBIT;
1543 break;
1544 case HELIOS_JEDEC_ID:
1545 fc_host_supported_speeds(host) |= FC_PORTSPEED_4GBIT;
1546 /* Fall through */
1547 case CENTAUR_2G_JEDEC_ID:
1548 case PEGASUS_JEDEC_ID:
1549 case THOR_JEDEC_ID:
1550 fc_host_supported_speeds(host) |= FC_PORTSPEED_2GBIT;
1551 /* Fall through */
1552 default:
1553 fc_host_supported_speeds(host) = FC_PORTSPEED_1GBIT;
1554 }
1555
1556 fc_host_maxframe_size(host) =
1557 ((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
1558 (uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb);
1559
1560 /* This value is also unchanging */
1561 memset(fc_host_active_fc4s(host), 0,
1562 sizeof(fc_host_active_fc4s(host)));
1563 fc_host_active_fc4s(host)[2] = 1;
1564 fc_host_active_fc4s(host)[7] = 1;
1565
1566 spin_lock_irq(phba->host->host_lock);
1567 phba->fc_flag &= ~FC_LOADING;
1568 spin_unlock_irq(phba->host->host_lock);
1569 return 0;
1570
1571out_free_irq:
1572 lpfc_stop_timer(phba);
1573 phba->work_hba_events = 0;
1574 free_irq(phba->pcidev->irq, phba);
1575out_free_sysfs_attr:
1576 lpfc_free_sysfs_attr(phba);
1577out_kthread_stop:
1578 kthread_stop(phba->worker_thread);
1579out_free_iocbq:
1580 list_for_each_entry_safe(iocbq_entry, iocbq_next,
1581 &phba->lpfc_iocb_list, list) {
1582 spin_lock_irq(phba->host->host_lock);
1583 kfree(iocbq_entry);
1584 phba->total_iocbq_bufs--;
1585 spin_unlock_irq(phba->host->host_lock);
1586 }
1587 lpfc_mem_free(phba);
1588out_free_slim:
1589 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p,
1590 phba->slim2p_mapping);
1591out_iounmap:
1592 iounmap(phba->ctrl_regs_memmap_p);
1593 iounmap(phba->slim_memmap_p);
1594out_idr_remove:
1595 idr_remove(&lpfc_hba_index, phba->brd_no);
1596out_put_host:
1597 scsi_host_put(host);
1598out_release_regions:
1599 pci_release_regions(pdev);
1600out_disable_device:
1601 pci_disable_device(pdev);
1602out:
1603 return error;
1604}
1605
1606static void __devexit
1607lpfc_pci_remove_one(struct pci_dev *pdev)
1608{
1609 struct Scsi_Host *host = pci_get_drvdata(pdev);
1610 struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata[0];
1611 unsigned long iflag;
1612
1613 lpfc_free_sysfs_attr(phba);
1614
1615 spin_lock_irqsave(phba->host->host_lock, iflag);
1616 phba->fc_flag |= FC_UNLOADING;
1617
1618 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1619
1620 fc_remove_host(phba->host);
1621 scsi_remove_host(phba->host);
1622
1623 kthread_stop(phba->worker_thread);
1624
1625 /*
1626 * Bring down the SLI Layer. This step disable all interrupts,
1627 * clears the rings, discards all mailbox commands, and resets
1628 * the HBA.
1629 */
1630 lpfc_sli_hba_down(phba);
1631
1632 /* Release the irq reservation */
1633 free_irq(phba->pcidev->irq, phba);
1634
1635 lpfc_cleanup(phba, 0);
1636 lpfc_stop_timer(phba);
1637 phba->work_hba_events = 0;
1638
1639 /*
1640 * Call scsi_free before mem_free since scsi bufs are released to their
1641 * corresponding pools here.
1642 */
1643 lpfc_scsi_free(phba);
1644 lpfc_mem_free(phba);
1645
1646 /* Free resources associated with SLI2 interface */
1647 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
1648 phba->slim2p, phba->slim2p_mapping);
1649
1650 /* unmap adapter SLIM and Control Registers */
1651 iounmap(phba->ctrl_regs_memmap_p);
1652 iounmap(phba->slim_memmap_p);
1653
1654 pci_release_regions(phba->pcidev);
1655 pci_disable_device(phba->pcidev);
1656
1657 idr_remove(&lpfc_hba_index, phba->brd_no);
1658 scsi_host_put(phba->host);
1659
1660 pci_set_drvdata(pdev, NULL);
1661}
1662
1663static struct pci_device_id lpfc_id_table[] = {
1664 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
1665 PCI_ANY_ID, PCI_ANY_ID, },
06325e74
JSEC
1666 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
1667 PCI_ANY_ID, PCI_ANY_ID, },
dea3101e 1668 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
1669 PCI_ANY_ID, PCI_ANY_ID, },
1670 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
1671 PCI_ANY_ID, PCI_ANY_ID, },
1672 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
1673 PCI_ANY_ID, PCI_ANY_ID, },
1674 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
1675 PCI_ANY_ID, PCI_ANY_ID, },
1676 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
1677 PCI_ANY_ID, PCI_ANY_ID, },
1678 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
1679 PCI_ANY_ID, PCI_ANY_ID, },
1680 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
1681 PCI_ANY_ID, PCI_ANY_ID, },
1682 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
1683 PCI_ANY_ID, PCI_ANY_ID, },
1684 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
1685 PCI_ANY_ID, PCI_ANY_ID, },
1686 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
1687 PCI_ANY_ID, PCI_ANY_ID, },
1688 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
1689 PCI_ANY_ID, PCI_ANY_ID, },
1690 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
1691 PCI_ANY_ID, PCI_ANY_ID, },
1692 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
1693 PCI_ANY_ID, PCI_ANY_ID, },
1694 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
1695 PCI_ANY_ID, PCI_ANY_ID, },
1696 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
1697 PCI_ANY_ID, PCI_ANY_ID, },
1698 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
1699 PCI_ANY_ID, PCI_ANY_ID, },
1700 { 0 }
1701};
1702
1703MODULE_DEVICE_TABLE(pci, lpfc_id_table);
1704
1705static struct pci_driver lpfc_driver = {
1706 .name = LPFC_DRIVER_NAME,
1707 .id_table = lpfc_id_table,
1708 .probe = lpfc_pci_probe_one,
1709 .remove = __devexit_p(lpfc_pci_remove_one),
1710};
1711
1712static int __init
1713lpfc_init(void)
1714{
1715 int error = 0;
1716
1717 printk(LPFC_MODULE_DESC "\n");
c44ce173 1718 printk(LPFC_COPYRIGHT "\n");
dea3101e 1719
1720 lpfc_transport_template =
1721 fc_attach_transport(&lpfc_transport_functions);
1722 if (!lpfc_transport_template)
1723 return -ENOMEM;
1724 error = pci_register_driver(&lpfc_driver);
1725 if (error)
1726 fc_release_transport(lpfc_transport_template);
1727
1728 return error;
1729}
1730
1731static void __exit
1732lpfc_exit(void)
1733{
1734 pci_unregister_driver(&lpfc_driver);
1735 fc_release_transport(lpfc_transport_template);
1736}
1737
1738module_init(lpfc_init);
1739module_exit(lpfc_exit);
1740MODULE_LICENSE("GPL");
1741MODULE_DESCRIPTION(LPFC_MODULE_DESC);
1742MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
1743MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
This page took 0.114647 seconds and 5 git commands to generate.