Merge branch 'upstream/wm8974' into for-2.6.33
[deliverable/linux.git] / drivers / scsi / qla2xxx / qla_init.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_gbl.h"
9
10 #include <linux/delay.h>
11 #include <linux/vmalloc.h>
12
13 #include "qla_devtbl.h"
14
15 #ifdef CONFIG_SPARC
16 #include <asm/prom.h>
17 #endif
18
19 /*
20 * QLogic ISP2x00 Hardware Support Function Prototypes.
21 */
22 static int qla2x00_isp_firmware(scsi_qla_host_t *);
23 static int qla2x00_setup_chip(scsi_qla_host_t *);
24 static int qla2x00_init_rings(scsi_qla_host_t *);
25 static int qla2x00_fw_ready(scsi_qla_host_t *);
26 static int qla2x00_configure_hba(scsi_qla_host_t *);
27 static int qla2x00_configure_loop(scsi_qla_host_t *);
28 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
29 static int qla2x00_configure_fabric(scsi_qla_host_t *);
30 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
31 static int qla2x00_device_resync(scsi_qla_host_t *);
32 static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
33 uint16_t *);
34
35 static int qla2x00_restart_isp(scsi_qla_host_t *);
36
37 static int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
38
39 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
40 static int qla84xx_init_chip(scsi_qla_host_t *);
41 static int qla25xx_init_queues(struct qla_hw_data *);
42
43 /****************************************************************************/
44 /* QLogic ISP2x00 Hardware Support Functions. */
45 /****************************************************************************/
46
47 /*
48 * qla2x00_initialize_adapter
49 * Initialize board.
50 *
51 * Input:
52 * ha = adapter block pointer.
53 *
54 * Returns:
55 * 0 = success
56 */
57 int
58 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
59 {
60 int rval;
61 struct qla_hw_data *ha = vha->hw;
62 struct req_que *req = ha->req_q_map[0];
63
64 /* Clear adapter flags. */
65 vha->flags.online = 0;
66 ha->flags.chip_reset_done = 0;
67 vha->flags.reset_active = 0;
68 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
69 atomic_set(&vha->loop_state, LOOP_DOWN);
70 vha->device_flags = DFLG_NO_CABLE;
71 vha->dpc_flags = 0;
72 vha->flags.management_server_logged_in = 0;
73 vha->marker_needed = 0;
74 ha->isp_abort_cnt = 0;
75 ha->beacon_blink_led = 0;
76 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
77
78 set_bit(0, ha->req_qid_map);
79 set_bit(0, ha->rsp_qid_map);
80
81 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
82 rval = ha->isp_ops->pci_config(vha);
83 if (rval) {
84 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
85 vha->host_no));
86 return (rval);
87 }
88
89 ha->isp_ops->reset_chip(vha);
90
91 rval = qla2xxx_get_flash_info(vha);
92 if (rval) {
93 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
94 vha->host_no));
95 return (rval);
96 }
97
98 ha->isp_ops->get_flash_version(vha, req->ring);
99
100 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
101
102 ha->isp_ops->nvram_config(vha);
103
104 if (ha->flags.disable_serdes) {
105 /* Mask HBA via NVRAM settings? */
106 qla_printk(KERN_INFO, ha, "Masking HBA WWPN "
107 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
108 vha->port_name[0], vha->port_name[1],
109 vha->port_name[2], vha->port_name[3],
110 vha->port_name[4], vha->port_name[5],
111 vha->port_name[6], vha->port_name[7]);
112 return QLA_FUNCTION_FAILED;
113 }
114
115 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
116
117 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
118 rval = ha->isp_ops->chip_diag(vha);
119 if (rval)
120 return (rval);
121 rval = qla2x00_setup_chip(vha);
122 if (rval)
123 return (rval);
124 }
125 if (IS_QLA84XX(ha)) {
126 ha->cs84xx = qla84xx_get_chip(vha);
127 if (!ha->cs84xx) {
128 qla_printk(KERN_ERR, ha,
129 "Unable to configure ISP84XX.\n");
130 return QLA_FUNCTION_FAILED;
131 }
132 }
133 rval = qla2x00_init_rings(vha);
134 ha->flags.chip_reset_done = 1;
135
136 return (rval);
137 }
138
139 /**
140 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
141 * @ha: HA context
142 *
143 * Returns 0 on success.
144 */
145 int
146 qla2100_pci_config(scsi_qla_host_t *vha)
147 {
148 uint16_t w;
149 unsigned long flags;
150 struct qla_hw_data *ha = vha->hw;
151 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
152
153 pci_set_master(ha->pdev);
154 pci_try_set_mwi(ha->pdev);
155
156 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
157 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
158 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
159
160 pci_disable_rom(ha->pdev);
161
162 /* Get PCI bus information. */
163 spin_lock_irqsave(&ha->hardware_lock, flags);
164 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
165 spin_unlock_irqrestore(&ha->hardware_lock, flags);
166
167 return QLA_SUCCESS;
168 }
169
170 /**
171 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
172 * @ha: HA context
173 *
174 * Returns 0 on success.
175 */
176 int
177 qla2300_pci_config(scsi_qla_host_t *vha)
178 {
179 uint16_t w;
180 unsigned long flags = 0;
181 uint32_t cnt;
182 struct qla_hw_data *ha = vha->hw;
183 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
184
185 pci_set_master(ha->pdev);
186 pci_try_set_mwi(ha->pdev);
187
188 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
189 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
190
191 if (IS_QLA2322(ha) || IS_QLA6322(ha))
192 w &= ~PCI_COMMAND_INTX_DISABLE;
193 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
194
195 /*
196 * If this is a 2300 card and not 2312, reset the
197 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
198 * the 2310 also reports itself as a 2300 so we need to get the
199 * fb revision level -- a 6 indicates it really is a 2300 and
200 * not a 2310.
201 */
202 if (IS_QLA2300(ha)) {
203 spin_lock_irqsave(&ha->hardware_lock, flags);
204
205 /* Pause RISC. */
206 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
207 for (cnt = 0; cnt < 30000; cnt++) {
208 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
209 break;
210
211 udelay(10);
212 }
213
214 /* Select FPM registers. */
215 WRT_REG_WORD(&reg->ctrl_status, 0x20);
216 RD_REG_WORD(&reg->ctrl_status);
217
218 /* Get the fb rev level */
219 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
220
221 if (ha->fb_rev == FPM_2300)
222 pci_clear_mwi(ha->pdev);
223
224 /* Deselect FPM registers. */
225 WRT_REG_WORD(&reg->ctrl_status, 0x0);
226 RD_REG_WORD(&reg->ctrl_status);
227
228 /* Release RISC module. */
229 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
230 for (cnt = 0; cnt < 30000; cnt++) {
231 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
232 break;
233
234 udelay(10);
235 }
236
237 spin_unlock_irqrestore(&ha->hardware_lock, flags);
238 }
239
240 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
241
242 pci_disable_rom(ha->pdev);
243
244 /* Get PCI bus information. */
245 spin_lock_irqsave(&ha->hardware_lock, flags);
246 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
247 spin_unlock_irqrestore(&ha->hardware_lock, flags);
248
249 return QLA_SUCCESS;
250 }
251
252 /**
253 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
254 * @ha: HA context
255 *
256 * Returns 0 on success.
257 */
258 int
259 qla24xx_pci_config(scsi_qla_host_t *vha)
260 {
261 uint16_t w;
262 unsigned long flags = 0;
263 struct qla_hw_data *ha = vha->hw;
264 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
265
266 pci_set_master(ha->pdev);
267 pci_try_set_mwi(ha->pdev);
268
269 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
270 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
271 w &= ~PCI_COMMAND_INTX_DISABLE;
272 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
273
274 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
275
276 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
277 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
278 pcix_set_mmrbc(ha->pdev, 2048);
279
280 /* PCIe -- adjust Maximum Read Request Size (2048). */
281 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
282 pcie_set_readrq(ha->pdev, 2048);
283
284 pci_disable_rom(ha->pdev);
285
286 ha->chip_revision = ha->pdev->revision;
287
288 /* Get PCI bus information. */
289 spin_lock_irqsave(&ha->hardware_lock, flags);
290 ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
291 spin_unlock_irqrestore(&ha->hardware_lock, flags);
292
293 return QLA_SUCCESS;
294 }
295
296 /**
297 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
298 * @ha: HA context
299 *
300 * Returns 0 on success.
301 */
302 int
303 qla25xx_pci_config(scsi_qla_host_t *vha)
304 {
305 uint16_t w;
306 struct qla_hw_data *ha = vha->hw;
307
308 pci_set_master(ha->pdev);
309 pci_try_set_mwi(ha->pdev);
310
311 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
312 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
313 w &= ~PCI_COMMAND_INTX_DISABLE;
314 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
315
316 /* PCIe -- adjust Maximum Read Request Size (2048). */
317 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
318 pcie_set_readrq(ha->pdev, 2048);
319
320 pci_disable_rom(ha->pdev);
321
322 ha->chip_revision = ha->pdev->revision;
323
324 return QLA_SUCCESS;
325 }
326
327 /**
328 * qla2x00_isp_firmware() - Choose firmware image.
329 * @ha: HA context
330 *
331 * Returns 0 on success.
332 */
333 static int
334 qla2x00_isp_firmware(scsi_qla_host_t *vha)
335 {
336 int rval;
337 uint16_t loop_id, topo, sw_cap;
338 uint8_t domain, area, al_pa;
339 struct qla_hw_data *ha = vha->hw;
340
341 /* Assume loading risc code */
342 rval = QLA_FUNCTION_FAILED;
343
344 if (ha->flags.disable_risc_code_load) {
345 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n",
346 vha->host_no));
347 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n");
348
349 /* Verify checksum of loaded RISC code. */
350 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
351 if (rval == QLA_SUCCESS) {
352 /* And, verify we are not in ROM code. */
353 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
354 &area, &domain, &topo, &sw_cap);
355 }
356 }
357
358 if (rval) {
359 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n",
360 vha->host_no));
361 }
362
363 return (rval);
364 }
365
366 /**
367 * qla2x00_reset_chip() - Reset ISP chip.
368 * @ha: HA context
369 *
370 * Returns 0 on success.
371 */
372 void
373 qla2x00_reset_chip(scsi_qla_host_t *vha)
374 {
375 unsigned long flags = 0;
376 struct qla_hw_data *ha = vha->hw;
377 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
378 uint32_t cnt;
379 uint16_t cmd;
380
381 ha->isp_ops->disable_intrs(ha);
382
383 spin_lock_irqsave(&ha->hardware_lock, flags);
384
385 /* Turn off master enable */
386 cmd = 0;
387 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
388 cmd &= ~PCI_COMMAND_MASTER;
389 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
390
391 if (!IS_QLA2100(ha)) {
392 /* Pause RISC. */
393 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
394 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
395 for (cnt = 0; cnt < 30000; cnt++) {
396 if ((RD_REG_WORD(&reg->hccr) &
397 HCCR_RISC_PAUSE) != 0)
398 break;
399 udelay(100);
400 }
401 } else {
402 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
403 udelay(10);
404 }
405
406 /* Select FPM registers. */
407 WRT_REG_WORD(&reg->ctrl_status, 0x20);
408 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
409
410 /* FPM Soft Reset. */
411 WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
412 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
413
414 /* Toggle Fpm Reset. */
415 if (!IS_QLA2200(ha)) {
416 WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
417 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
418 }
419
420 /* Select frame buffer registers. */
421 WRT_REG_WORD(&reg->ctrl_status, 0x10);
422 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
423
424 /* Reset frame buffer FIFOs. */
425 if (IS_QLA2200(ha)) {
426 WRT_FB_CMD_REG(ha, reg, 0xa000);
427 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
428 } else {
429 WRT_FB_CMD_REG(ha, reg, 0x00fc);
430
431 /* Read back fb_cmd until zero or 3 seconds max */
432 for (cnt = 0; cnt < 3000; cnt++) {
433 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
434 break;
435 udelay(100);
436 }
437 }
438
439 /* Select RISC module registers. */
440 WRT_REG_WORD(&reg->ctrl_status, 0);
441 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
442
443 /* Reset RISC processor. */
444 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
445 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
446
447 /* Release RISC processor. */
448 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
449 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
450 }
451
452 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
453 WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
454
455 /* Reset ISP chip. */
456 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
457
458 /* Wait for RISC to recover from reset. */
459 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
460 /*
461 * It is necessary to for a delay here since the card doesn't
462 * respond to PCI reads during a reset. On some architectures
463 * this will result in an MCA.
464 */
465 udelay(20);
466 for (cnt = 30000; cnt; cnt--) {
467 if ((RD_REG_WORD(&reg->ctrl_status) &
468 CSR_ISP_SOFT_RESET) == 0)
469 break;
470 udelay(100);
471 }
472 } else
473 udelay(10);
474
475 /* Reset RISC processor. */
476 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
477
478 WRT_REG_WORD(&reg->semaphore, 0);
479
480 /* Release RISC processor. */
481 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
482 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
483
484 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
485 for (cnt = 0; cnt < 30000; cnt++) {
486 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
487 break;
488
489 udelay(100);
490 }
491 } else
492 udelay(100);
493
494 /* Turn on master enable */
495 cmd |= PCI_COMMAND_MASTER;
496 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
497
498 /* Disable RISC pause on FPM parity error. */
499 if (!IS_QLA2100(ha)) {
500 WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
501 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
502 }
503
504 spin_unlock_irqrestore(&ha->hardware_lock, flags);
505 }
506
507 /**
508 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
509 * @ha: HA context
510 *
511 * Returns 0 on success.
512 */
513 static inline void
514 qla24xx_reset_risc(scsi_qla_host_t *vha)
515 {
516 unsigned long flags = 0;
517 struct qla_hw_data *ha = vha->hw;
518 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
519 uint32_t cnt, d2;
520 uint16_t wd;
521
522 spin_lock_irqsave(&ha->hardware_lock, flags);
523
524 /* Reset RISC. */
525 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
526 for (cnt = 0; cnt < 30000; cnt++) {
527 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
528 break;
529
530 udelay(10);
531 }
532
533 WRT_REG_DWORD(&reg->ctrl_status,
534 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
535 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
536
537 udelay(100);
538 /* Wait for firmware to complete NVRAM accesses. */
539 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
540 for (cnt = 10000 ; cnt && d2; cnt--) {
541 udelay(5);
542 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
543 barrier();
544 }
545
546 /* Wait for soft-reset to complete. */
547 d2 = RD_REG_DWORD(&reg->ctrl_status);
548 for (cnt = 6000000 ; cnt && (d2 & CSRX_ISP_SOFT_RESET); cnt--) {
549 udelay(5);
550 d2 = RD_REG_DWORD(&reg->ctrl_status);
551 barrier();
552 }
553
554 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
555 RD_REG_DWORD(&reg->hccr);
556
557 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
558 RD_REG_DWORD(&reg->hccr);
559
560 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
561 RD_REG_DWORD(&reg->hccr);
562
563 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
564 for (cnt = 6000000 ; cnt && d2; cnt--) {
565 udelay(5);
566 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
567 barrier();
568 }
569
570 spin_unlock_irqrestore(&ha->hardware_lock, flags);
571
572 if (IS_NOPOLLING_TYPE(ha))
573 ha->isp_ops->enable_intrs(ha);
574 }
575
576 /**
577 * qla24xx_reset_chip() - Reset ISP24xx chip.
578 * @ha: HA context
579 *
580 * Returns 0 on success.
581 */
582 void
583 qla24xx_reset_chip(scsi_qla_host_t *vha)
584 {
585 struct qla_hw_data *ha = vha->hw;
586 ha->isp_ops->disable_intrs(ha);
587
588 /* Perform RISC reset. */
589 qla24xx_reset_risc(vha);
590 }
591
592 /**
593 * qla2x00_chip_diag() - Test chip for proper operation.
594 * @ha: HA context
595 *
596 * Returns 0 on success.
597 */
598 int
599 qla2x00_chip_diag(scsi_qla_host_t *vha)
600 {
601 int rval;
602 struct qla_hw_data *ha = vha->hw;
603 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
604 unsigned long flags = 0;
605 uint16_t data;
606 uint32_t cnt;
607 uint16_t mb[5];
608 struct req_que *req = ha->req_q_map[0];
609
610 /* Assume a failed state */
611 rval = QLA_FUNCTION_FAILED;
612
613 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n",
614 vha->host_no, (u_long)&reg->flash_address));
615
616 spin_lock_irqsave(&ha->hardware_lock, flags);
617
618 /* Reset ISP chip. */
619 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
620
621 /*
622 * We need to have a delay here since the card will not respond while
623 * in reset causing an MCA on some architectures.
624 */
625 udelay(20);
626 data = qla2x00_debounce_register(&reg->ctrl_status);
627 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
628 udelay(5);
629 data = RD_REG_WORD(&reg->ctrl_status);
630 barrier();
631 }
632
633 if (!cnt)
634 goto chip_diag_failed;
635
636 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n",
637 vha->host_no));
638
639 /* Reset RISC processor. */
640 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
641 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
642
643 /* Workaround for QLA2312 PCI parity error */
644 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
645 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
646 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
647 udelay(5);
648 data = RD_MAILBOX_REG(ha, reg, 0);
649 barrier();
650 }
651 } else
652 udelay(10);
653
654 if (!cnt)
655 goto chip_diag_failed;
656
657 /* Check product ID of chip */
658 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no));
659
660 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
661 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
662 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
663 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
664 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
665 mb[3] != PROD_ID_3) {
666 qla_printk(KERN_WARNING, ha,
667 "Wrong product ID = 0x%x,0x%x,0x%x\n", mb[1], mb[2], mb[3]);
668
669 goto chip_diag_failed;
670 }
671 ha->product_id[0] = mb[1];
672 ha->product_id[1] = mb[2];
673 ha->product_id[2] = mb[3];
674 ha->product_id[3] = mb[4];
675
676 /* Adjust fw RISC transfer size */
677 if (req->length > 1024)
678 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
679 else
680 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
681 req->length;
682
683 if (IS_QLA2200(ha) &&
684 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
685 /* Limit firmware transfer size with a 2200A */
686 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n",
687 vha->host_no));
688
689 ha->device_type |= DT_ISP2200A;
690 ha->fw_transfer_size = 128;
691 }
692
693 /* Wrap Incoming Mailboxes Test. */
694 spin_unlock_irqrestore(&ha->hardware_lock, flags);
695
696 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no));
697 rval = qla2x00_mbx_reg_test(vha);
698 if (rval) {
699 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
700 vha->host_no));
701 qla_printk(KERN_WARNING, ha,
702 "Failed mailbox send register test\n");
703 }
704 else {
705 /* Flag a successful rval */
706 rval = QLA_SUCCESS;
707 }
708 spin_lock_irqsave(&ha->hardware_lock, flags);
709
710 chip_diag_failed:
711 if (rval)
712 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED "
713 "****\n", vha->host_no));
714
715 spin_unlock_irqrestore(&ha->hardware_lock, flags);
716
717 return (rval);
718 }
719
720 /**
721 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
722 * @ha: HA context
723 *
724 * Returns 0 on success.
725 */
726 int
727 qla24xx_chip_diag(scsi_qla_host_t *vha)
728 {
729 int rval;
730 struct qla_hw_data *ha = vha->hw;
731 struct req_que *req = ha->req_q_map[0];
732
733 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
734
735 rval = qla2x00_mbx_reg_test(vha);
736 if (rval) {
737 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
738 vha->host_no));
739 qla_printk(KERN_WARNING, ha,
740 "Failed mailbox send register test\n");
741 } else {
742 /* Flag a successful rval */
743 rval = QLA_SUCCESS;
744 }
745
746 return rval;
747 }
748
749 void
750 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
751 {
752 int rval;
753 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
754 eft_size, fce_size, mq_size;
755 dma_addr_t tc_dma;
756 void *tc;
757 struct qla_hw_data *ha = vha->hw;
758 struct req_que *req = ha->req_q_map[0];
759 struct rsp_que *rsp = ha->rsp_q_map[0];
760
761 if (ha->fw_dump) {
762 qla_printk(KERN_WARNING, ha,
763 "Firmware dump previously allocated.\n");
764 return;
765 }
766
767 ha->fw_dumped = 0;
768 fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
769 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
770 fixed_size = sizeof(struct qla2100_fw_dump);
771 } else if (IS_QLA23XX(ha)) {
772 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
773 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
774 sizeof(uint16_t);
775 } else if (IS_FWI2_CAPABLE(ha)) {
776 if (IS_QLA81XX(ha))
777 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
778 else if (IS_QLA25XX(ha))
779 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
780 else
781 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
782 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
783 sizeof(uint32_t);
784 if (ha->mqenable)
785 mq_size = sizeof(struct qla2xxx_mq_chain);
786 /* Allocate memory for Fibre Channel Event Buffer. */
787 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
788 goto try_eft;
789
790 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
791 GFP_KERNEL);
792 if (!tc) {
793 qla_printk(KERN_WARNING, ha, "Unable to allocate "
794 "(%d KB) for FCE.\n", FCE_SIZE / 1024);
795 goto try_eft;
796 }
797
798 memset(tc, 0, FCE_SIZE);
799 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
800 ha->fce_mb, &ha->fce_bufs);
801 if (rval) {
802 qla_printk(KERN_WARNING, ha, "Unable to initialize "
803 "FCE (%d).\n", rval);
804 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
805 tc_dma);
806 ha->flags.fce_enabled = 0;
807 goto try_eft;
808 }
809
810 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n",
811 FCE_SIZE / 1024);
812
813 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
814 ha->flags.fce_enabled = 1;
815 ha->fce_dma = tc_dma;
816 ha->fce = tc;
817 try_eft:
818 /* Allocate memory for Extended Trace Buffer. */
819 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
820 GFP_KERNEL);
821 if (!tc) {
822 qla_printk(KERN_WARNING, ha, "Unable to allocate "
823 "(%d KB) for EFT.\n", EFT_SIZE / 1024);
824 goto cont_alloc;
825 }
826
827 memset(tc, 0, EFT_SIZE);
828 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
829 if (rval) {
830 qla_printk(KERN_WARNING, ha, "Unable to initialize "
831 "EFT (%d).\n", rval);
832 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
833 tc_dma);
834 goto cont_alloc;
835 }
836
837 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n",
838 EFT_SIZE / 1024);
839
840 eft_size = EFT_SIZE;
841 ha->eft_dma = tc_dma;
842 ha->eft = tc;
843 }
844 cont_alloc:
845 req_q_size = req->length * sizeof(request_t);
846 rsp_q_size = rsp->length * sizeof(response_t);
847
848 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
849 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
850 ha->chain_offset = dump_size;
851 dump_size += mq_size + fce_size;
852
853 ha->fw_dump = vmalloc(dump_size);
854 if (!ha->fw_dump) {
855 qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for "
856 "firmware dump!!!\n", dump_size / 1024);
857
858 if (ha->eft) {
859 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
860 ha->eft_dma);
861 ha->eft = NULL;
862 ha->eft_dma = 0;
863 }
864 return;
865 }
866 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n",
867 dump_size / 1024);
868
869 ha->fw_dump_len = dump_size;
870 ha->fw_dump->signature[0] = 'Q';
871 ha->fw_dump->signature[1] = 'L';
872 ha->fw_dump->signature[2] = 'G';
873 ha->fw_dump->signature[3] = 'C';
874 ha->fw_dump->version = __constant_htonl(1);
875
876 ha->fw_dump->fixed_size = htonl(fixed_size);
877 ha->fw_dump->mem_size = htonl(mem_size);
878 ha->fw_dump->req_q_size = htonl(req_q_size);
879 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
880
881 ha->fw_dump->eft_size = htonl(eft_size);
882 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
883 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
884
885 ha->fw_dump->header_size =
886 htonl(offsetof(struct qla2xxx_fw_dump, isp));
887 }
888
889 static int
890 qla81xx_mpi_sync(scsi_qla_host_t *vha)
891 {
892 #define MPS_MASK 0xe0
893 int rval;
894 uint16_t dc;
895 uint32_t dw;
896 struct qla_hw_data *ha = vha->hw;
897
898 if (!IS_QLA81XX(vha->hw))
899 return QLA_SUCCESS;
900
901 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
902 if (rval != QLA_SUCCESS) {
903 DEBUG2(qla_printk(KERN_WARNING, ha,
904 "Sync-MPI: Unable to acquire semaphore.\n"));
905 goto done;
906 }
907
908 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
909 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
910 if (rval != QLA_SUCCESS) {
911 DEBUG2(qla_printk(KERN_WARNING, ha,
912 "Sync-MPI: Unable to read sync.\n"));
913 goto done_release;
914 }
915
916 dc &= MPS_MASK;
917 if (dc == (dw & MPS_MASK))
918 goto done_release;
919
920 dw &= ~MPS_MASK;
921 dw |= dc;
922 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
923 if (rval != QLA_SUCCESS) {
924 DEBUG2(qla_printk(KERN_WARNING, ha,
925 "Sync-MPI: Unable to gain sync.\n"));
926 }
927
928 done_release:
929 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
930 if (rval != QLA_SUCCESS) {
931 DEBUG2(qla_printk(KERN_WARNING, ha,
932 "Sync-MPI: Unable to release semaphore.\n"));
933 }
934
935 done:
936 return rval;
937 }
938
939 /**
940 * qla2x00_setup_chip() - Load and start RISC firmware.
941 * @ha: HA context
942 *
943 * Returns 0 on success.
944 */
945 static int
946 qla2x00_setup_chip(scsi_qla_host_t *vha)
947 {
948 int rval;
949 uint32_t srisc_address = 0;
950 struct qla_hw_data *ha = vha->hw;
951 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
952 unsigned long flags;
953 uint16_t fw_major_version;
954
955 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
956 /* Disable SRAM, Instruction RAM and GP RAM parity. */
957 spin_lock_irqsave(&ha->hardware_lock, flags);
958 WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
959 RD_REG_WORD(&reg->hccr);
960 spin_unlock_irqrestore(&ha->hardware_lock, flags);
961 }
962
963 qla81xx_mpi_sync(vha);
964
965 /* Load firmware sequences */
966 rval = ha->isp_ops->load_risc(vha, &srisc_address);
967 if (rval == QLA_SUCCESS) {
968 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC "
969 "code.\n", vha->host_no));
970
971 rval = qla2x00_verify_checksum(vha, srisc_address);
972 if (rval == QLA_SUCCESS) {
973 /* Start firmware execution. */
974 DEBUG(printk("scsi(%ld): Checksum OK, start "
975 "firmware.\n", vha->host_no));
976
977 rval = qla2x00_execute_fw(vha, srisc_address);
978 /* Retrieve firmware information. */
979 if (rval == QLA_SUCCESS) {
980 fw_major_version = ha->fw_major_version;
981 rval = qla2x00_get_fw_version(vha,
982 &ha->fw_major_version,
983 &ha->fw_minor_version,
984 &ha->fw_subminor_version,
985 &ha->fw_attributes, &ha->fw_memory_size,
986 ha->mpi_version, &ha->mpi_capabilities,
987 ha->phy_version);
988 if (rval != QLA_SUCCESS)
989 goto failed;
990
991 ha->flags.npiv_supported = 0;
992 if (IS_QLA2XXX_MIDTYPE(ha) &&
993 (ha->fw_attributes & BIT_2)) {
994 ha->flags.npiv_supported = 1;
995 if ((!ha->max_npiv_vports) ||
996 ((ha->max_npiv_vports + 1) %
997 MIN_MULTI_ID_FABRIC))
998 ha->max_npiv_vports =
999 MIN_MULTI_ID_FABRIC - 1;
1000 }
1001 qla2x00_get_resource_cnts(vha, NULL,
1002 &ha->fw_xcb_count, NULL, NULL,
1003 &ha->max_npiv_vports);
1004
1005 if (!fw_major_version && ql2xallocfwdump)
1006 qla2x00_alloc_fw_dump(vha);
1007 }
1008 } else {
1009 DEBUG2(printk(KERN_INFO
1010 "scsi(%ld): ISP Firmware failed checksum.\n",
1011 vha->host_no));
1012 }
1013 }
1014
1015 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1016 /* Enable proper parity. */
1017 spin_lock_irqsave(&ha->hardware_lock, flags);
1018 if (IS_QLA2300(ha))
1019 /* SRAM parity */
1020 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
1021 else
1022 /* SRAM, Instruction RAM and GP RAM parity */
1023 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
1024 RD_REG_WORD(&reg->hccr);
1025 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1026 }
1027
1028 if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1029 uint32_t size;
1030
1031 rval = qla81xx_fac_get_sector_size(vha, &size);
1032 if (rval == QLA_SUCCESS) {
1033 ha->flags.fac_supported = 1;
1034 ha->fdt_block_size = size << 2;
1035 } else {
1036 qla_printk(KERN_ERR, ha,
1037 "Unsupported FAC firmware (%d.%02d.%02d).\n",
1038 ha->fw_major_version, ha->fw_minor_version,
1039 ha->fw_subminor_version);
1040 }
1041 }
1042 failed:
1043 if (rval) {
1044 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
1045 vha->host_no));
1046 }
1047
1048 return (rval);
1049 }
1050
1051 /**
1052 * qla2x00_init_response_q_entries() - Initializes response queue entries.
1053 * @ha: HA context
1054 *
1055 * Beginning of request ring has initialization control block already built
1056 * by nvram config routine.
1057 *
1058 * Returns 0 on success.
1059 */
1060 void
1061 qla2x00_init_response_q_entries(struct rsp_que *rsp)
1062 {
1063 uint16_t cnt;
1064 response_t *pkt;
1065
1066 rsp->ring_ptr = rsp->ring;
1067 rsp->ring_index = 0;
1068 rsp->status_srb = NULL;
1069 pkt = rsp->ring_ptr;
1070 for (cnt = 0; cnt < rsp->length; cnt++) {
1071 pkt->signature = RESPONSE_PROCESSED;
1072 pkt++;
1073 }
1074 }
1075
1076 /**
1077 * qla2x00_update_fw_options() - Read and process firmware options.
1078 * @ha: HA context
1079 *
1080 * Returns 0 on success.
1081 */
1082 void
1083 qla2x00_update_fw_options(scsi_qla_host_t *vha)
1084 {
1085 uint16_t swing, emphasis, tx_sens, rx_sens;
1086 struct qla_hw_data *ha = vha->hw;
1087
1088 memset(ha->fw_options, 0, sizeof(ha->fw_options));
1089 qla2x00_get_fw_options(vha, ha->fw_options);
1090
1091 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1092 return;
1093
1094 /* Serial Link options. */
1095 DEBUG3(printk("scsi(%ld): Serial link options:\n",
1096 vha->host_no));
1097 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options,
1098 sizeof(ha->fw_seriallink_options)));
1099
1100 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1101 if (ha->fw_seriallink_options[3] & BIT_2) {
1102 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
1103
1104 /* 1G settings */
1105 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
1106 emphasis = (ha->fw_seriallink_options[2] &
1107 (BIT_4 | BIT_3)) >> 3;
1108 tx_sens = ha->fw_seriallink_options[0] &
1109 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1110 rx_sens = (ha->fw_seriallink_options[0] &
1111 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1112 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
1113 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1114 if (rx_sens == 0x0)
1115 rx_sens = 0x3;
1116 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
1117 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1118 ha->fw_options[10] |= BIT_5 |
1119 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1120 (tx_sens & (BIT_1 | BIT_0));
1121
1122 /* 2G settings */
1123 swing = (ha->fw_seriallink_options[2] &
1124 (BIT_7 | BIT_6 | BIT_5)) >> 5;
1125 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
1126 tx_sens = ha->fw_seriallink_options[1] &
1127 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1128 rx_sens = (ha->fw_seriallink_options[1] &
1129 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1130 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
1131 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1132 if (rx_sens == 0x0)
1133 rx_sens = 0x3;
1134 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
1135 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1136 ha->fw_options[11] |= BIT_5 |
1137 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1138 (tx_sens & (BIT_1 | BIT_0));
1139 }
1140
1141 /* FCP2 options. */
1142 /* Return command IOCBs without waiting for an ABTS to complete. */
1143 ha->fw_options[3] |= BIT_13;
1144
1145 /* LED scheme. */
1146 if (ha->flags.enable_led_scheme)
1147 ha->fw_options[2] |= BIT_12;
1148
1149 /* Detect ISP6312. */
1150 if (IS_QLA6312(ha))
1151 ha->fw_options[2] |= BIT_13;
1152
1153 /* Update firmware options. */
1154 qla2x00_set_fw_options(vha, ha->fw_options);
1155 }
1156
1157 void
1158 qla24xx_update_fw_options(scsi_qla_host_t *vha)
1159 {
1160 int rval;
1161 struct qla_hw_data *ha = vha->hw;
1162
1163 /* Update Serial Link options. */
1164 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
1165 return;
1166
1167 rval = qla2x00_set_serdes_params(vha,
1168 le16_to_cpu(ha->fw_seriallink_options24[1]),
1169 le16_to_cpu(ha->fw_seriallink_options24[2]),
1170 le16_to_cpu(ha->fw_seriallink_options24[3]));
1171 if (rval != QLA_SUCCESS) {
1172 qla_printk(KERN_WARNING, ha,
1173 "Unable to update Serial Link options (%x).\n", rval);
1174 }
1175 }
1176
1177 void
1178 qla2x00_config_rings(struct scsi_qla_host *vha)
1179 {
1180 struct qla_hw_data *ha = vha->hw;
1181 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1182 struct req_que *req = ha->req_q_map[0];
1183 struct rsp_que *rsp = ha->rsp_q_map[0];
1184
1185 /* Setup ring parameters in initialization control block. */
1186 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
1187 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
1188 ha->init_cb->request_q_length = cpu_to_le16(req->length);
1189 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
1190 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1191 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1192 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1193 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1194
1195 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
1196 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
1197 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
1198 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
1199 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
1200 }
1201
1202 void
1203 qla24xx_config_rings(struct scsi_qla_host *vha)
1204 {
1205 struct qla_hw_data *ha = vha->hw;
1206 device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0);
1207 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1208 struct qla_msix_entry *msix;
1209 struct init_cb_24xx *icb;
1210 uint16_t rid = 0;
1211 struct req_que *req = ha->req_q_map[0];
1212 struct rsp_que *rsp = ha->rsp_q_map[0];
1213
1214 /* Setup ring parameters in initialization control block. */
1215 icb = (struct init_cb_24xx *)ha->init_cb;
1216 icb->request_q_outpointer = __constant_cpu_to_le16(0);
1217 icb->response_q_inpointer = __constant_cpu_to_le16(0);
1218 icb->request_q_length = cpu_to_le16(req->length);
1219 icb->response_q_length = cpu_to_le16(rsp->length);
1220 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1221 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1222 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1223 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1224
1225 if (ha->mqenable) {
1226 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
1227 icb->rid = __constant_cpu_to_le16(rid);
1228 if (ha->flags.msix_enabled) {
1229 msix = &ha->msix_entries[1];
1230 DEBUG2_17(printk(KERN_INFO
1231 "Registering vector 0x%x for base que\n", msix->entry));
1232 icb->msix = cpu_to_le16(msix->entry);
1233 }
1234 /* Use alternate PCI bus number */
1235 if (MSB(rid))
1236 icb->firmware_options_2 |=
1237 __constant_cpu_to_le32(BIT_19);
1238 /* Use alternate PCI devfn */
1239 if (LSB(rid))
1240 icb->firmware_options_2 |=
1241 __constant_cpu_to_le32(BIT_18);
1242
1243 icb->firmware_options_2 &= __constant_cpu_to_le32(~BIT_22);
1244 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
1245
1246 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
1247 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
1248 WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
1249 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
1250 } else {
1251 WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
1252 WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
1253 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
1254 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
1255 }
1256 /* PCI posting */
1257 RD_REG_DWORD(&ioreg->hccr);
1258 }
1259
1260 /**
1261 * qla2x00_init_rings() - Initializes firmware.
1262 * @ha: HA context
1263 *
1264 * Beginning of request ring has initialization control block already built
1265 * by nvram config routine.
1266 *
1267 * Returns 0 on success.
1268 */
1269 static int
1270 qla2x00_init_rings(scsi_qla_host_t *vha)
1271 {
1272 int rval;
1273 unsigned long flags = 0;
1274 int cnt, que;
1275 struct qla_hw_data *ha = vha->hw;
1276 struct req_que *req;
1277 struct rsp_que *rsp;
1278 struct scsi_qla_host *vp;
1279 struct mid_init_cb_24xx *mid_init_cb =
1280 (struct mid_init_cb_24xx *) ha->init_cb;
1281
1282 spin_lock_irqsave(&ha->hardware_lock, flags);
1283
1284 /* Clear outstanding commands array. */
1285 for (que = 0; que < ha->max_req_queues; que++) {
1286 req = ha->req_q_map[que];
1287 if (!req)
1288 continue;
1289 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
1290 req->outstanding_cmds[cnt] = NULL;
1291
1292 req->current_outstanding_cmd = 1;
1293
1294 /* Initialize firmware. */
1295 req->ring_ptr = req->ring;
1296 req->ring_index = 0;
1297 req->cnt = req->length;
1298 }
1299
1300 for (que = 0; que < ha->max_rsp_queues; que++) {
1301 rsp = ha->rsp_q_map[que];
1302 if (!rsp)
1303 continue;
1304 /* Initialize response queue entries */
1305 qla2x00_init_response_q_entries(rsp);
1306 }
1307
1308 /* Clear RSCN queue. */
1309 list_for_each_entry(vp, &ha->vp_list, list) {
1310 vp->rscn_in_ptr = 0;
1311 vp->rscn_out_ptr = 0;
1312 }
1313 ha->isp_ops->config_rings(vha);
1314
1315 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1316
1317 /* Update any ISP specific firmware options before initialization. */
1318 ha->isp_ops->update_fw_options(vha);
1319
1320 DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no));
1321
1322 if (ha->flags.npiv_supported) {
1323 if (ha->operating_mode == LOOP)
1324 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
1325 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
1326 }
1327
1328 if (IS_FWI2_CAPABLE(ha)) {
1329 mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
1330 mid_init_cb->init_cb.execution_throttle =
1331 cpu_to_le16(ha->fw_xcb_count);
1332 }
1333
1334 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
1335 if (rval) {
1336 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n",
1337 vha->host_no));
1338 } else {
1339 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n",
1340 vha->host_no));
1341 }
1342
1343 return (rval);
1344 }
1345
1346 /**
1347 * qla2x00_fw_ready() - Waits for firmware ready.
1348 * @ha: HA context
1349 *
1350 * Returns 0 on success.
1351 */
1352 static int
1353 qla2x00_fw_ready(scsi_qla_host_t *vha)
1354 {
1355 int rval;
1356 unsigned long wtime, mtime, cs84xx_time;
1357 uint16_t min_wait; /* Minimum wait time if loop is down */
1358 uint16_t wait_time; /* Wait time if loop is coming ready */
1359 uint16_t state[5];
1360 struct qla_hw_data *ha = vha->hw;
1361
1362 rval = QLA_SUCCESS;
1363
1364 /* 20 seconds for loop down. */
1365 min_wait = 20;
1366
1367 /*
1368 * Firmware should take at most one RATOV to login, plus 5 seconds for
1369 * our own processing.
1370 */
1371 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
1372 wait_time = min_wait;
1373 }
1374
1375 /* Min wait time if loop down */
1376 mtime = jiffies + (min_wait * HZ);
1377
1378 /* wait time before firmware ready */
1379 wtime = jiffies + (wait_time * HZ);
1380
1381 /* Wait for ISP to finish LIP */
1382 if (!vha->flags.init_done)
1383 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n");
1384
1385 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n",
1386 vha->host_no));
1387
1388 do {
1389 rval = qla2x00_get_firmware_state(vha, state);
1390 if (rval == QLA_SUCCESS) {
1391 if (state[0] < FSTATE_LOSS_OF_SYNC) {
1392 vha->device_flags &= ~DFLG_NO_CABLE;
1393 }
1394 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
1395 DEBUG16(printk("scsi(%ld): fw_state=%x "
1396 "84xx=%x.\n", vha->host_no, state[0],
1397 state[2]));
1398 if ((state[2] & FSTATE_LOGGED_IN) &&
1399 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
1400 DEBUG16(printk("scsi(%ld): Sending "
1401 "verify iocb.\n", vha->host_no));
1402
1403 cs84xx_time = jiffies;
1404 rval = qla84xx_init_chip(vha);
1405 if (rval != QLA_SUCCESS)
1406 break;
1407
1408 /* Add time taken to initialize. */
1409 cs84xx_time = jiffies - cs84xx_time;
1410 wtime += cs84xx_time;
1411 mtime += cs84xx_time;
1412 DEBUG16(printk("scsi(%ld): Increasing "
1413 "wait time by %ld. New time %ld\n",
1414 vha->host_no, cs84xx_time, wtime));
1415 }
1416 } else if (state[0] == FSTATE_READY) {
1417 DEBUG(printk("scsi(%ld): F/W Ready - OK \n",
1418 vha->host_no));
1419
1420 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1421 &ha->login_timeout, &ha->r_a_tov);
1422
1423 rval = QLA_SUCCESS;
1424 break;
1425 }
1426
1427 rval = QLA_FUNCTION_FAILED;
1428
1429 if (atomic_read(&vha->loop_down_timer) &&
1430 state[0] != FSTATE_READY) {
1431 /* Loop down. Timeout on min_wait for states
1432 * other than Wait for Login.
1433 */
1434 if (time_after_eq(jiffies, mtime)) {
1435 qla_printk(KERN_INFO, ha,
1436 "Cable is unplugged...\n");
1437
1438 vha->device_flags |= DFLG_NO_CABLE;
1439 break;
1440 }
1441 }
1442 } else {
1443 /* Mailbox cmd failed. Timeout on min_wait. */
1444 if (time_after_eq(jiffies, mtime))
1445 break;
1446 }
1447
1448 if (time_after_eq(jiffies, wtime))
1449 break;
1450
1451 /* Delay for a while */
1452 msleep(500);
1453
1454 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
1455 vha->host_no, state[0], jiffies));
1456 } while (1);
1457
1458 DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n",
1459 vha->host_no, state[0], state[1], state[2], state[3], state[4],
1460 jiffies));
1461
1462 if (rval) {
1463 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
1464 vha->host_no));
1465 }
1466
1467 return (rval);
1468 }
1469
1470 /*
1471 * qla2x00_configure_hba
1472 * Setup adapter context.
1473 *
1474 * Input:
1475 * ha = adapter state pointer.
1476 *
1477 * Returns:
1478 * 0 = success
1479 *
1480 * Context:
1481 * Kernel context.
1482 */
1483 static int
1484 qla2x00_configure_hba(scsi_qla_host_t *vha)
1485 {
1486 int rval;
1487 uint16_t loop_id;
1488 uint16_t topo;
1489 uint16_t sw_cap;
1490 uint8_t al_pa;
1491 uint8_t area;
1492 uint8_t domain;
1493 char connect_type[22];
1494 struct qla_hw_data *ha = vha->hw;
1495
1496 /* Get host addresses. */
1497 rval = qla2x00_get_adapter_id(vha,
1498 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
1499 if (rval != QLA_SUCCESS) {
1500 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
1501 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
1502 DEBUG2(printk("%s(%ld) Loop is in a transition state\n",
1503 __func__, vha->host_no));
1504 } else {
1505 qla_printk(KERN_WARNING, ha,
1506 "ERROR -- Unable to get host loop ID.\n");
1507 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1508 }
1509 return (rval);
1510 }
1511
1512 if (topo == 4) {
1513 qla_printk(KERN_INFO, ha,
1514 "Cannot get topology - retrying.\n");
1515 return (QLA_FUNCTION_FAILED);
1516 }
1517
1518 vha->loop_id = loop_id;
1519
1520 /* initialize */
1521 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
1522 ha->operating_mode = LOOP;
1523 ha->switch_cap = 0;
1524
1525 switch (topo) {
1526 case 0:
1527 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n",
1528 vha->host_no));
1529 ha->current_topology = ISP_CFG_NL;
1530 strcpy(connect_type, "(Loop)");
1531 break;
1532
1533 case 1:
1534 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n",
1535 vha->host_no));
1536 ha->switch_cap = sw_cap;
1537 ha->current_topology = ISP_CFG_FL;
1538 strcpy(connect_type, "(FL_Port)");
1539 break;
1540
1541 case 2:
1542 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n",
1543 vha->host_no));
1544 ha->operating_mode = P2P;
1545 ha->current_topology = ISP_CFG_N;
1546 strcpy(connect_type, "(N_Port-to-N_Port)");
1547 break;
1548
1549 case 3:
1550 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n",
1551 vha->host_no));
1552 ha->switch_cap = sw_cap;
1553 ha->operating_mode = P2P;
1554 ha->current_topology = ISP_CFG_F;
1555 strcpy(connect_type, "(F_Port)");
1556 break;
1557
1558 default:
1559 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. "
1560 "Using NL.\n",
1561 vha->host_no, topo));
1562 ha->current_topology = ISP_CFG_NL;
1563 strcpy(connect_type, "(Loop)");
1564 break;
1565 }
1566
1567 /* Save Host port and loop ID. */
1568 /* byte order - Big Endian */
1569 vha->d_id.b.domain = domain;
1570 vha->d_id.b.area = area;
1571 vha->d_id.b.al_pa = al_pa;
1572
1573 if (!vha->flags.init_done)
1574 qla_printk(KERN_INFO, ha,
1575 "Topology - %s, Host Loop address 0x%x\n",
1576 connect_type, vha->loop_id);
1577
1578 if (rval) {
1579 DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no));
1580 } else {
1581 DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no));
1582 }
1583
1584 return(rval);
1585 }
1586
1587 static inline void
1588 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
1589 char *def)
1590 {
1591 char *st, *en;
1592 uint16_t index;
1593 struct qla_hw_data *ha = vha->hw;
1594 int use_tbl = !IS_QLA25XX(ha) && !IS_QLA81XX(ha);
1595
1596 if (memcmp(model, BINZERO, len) != 0) {
1597 strncpy(ha->model_number, model, len);
1598 st = en = ha->model_number;
1599 en += len - 1;
1600 while (en > st) {
1601 if (*en != 0x20 && *en != 0x00)
1602 break;
1603 *en-- = '\0';
1604 }
1605
1606 index = (ha->pdev->subsystem_device & 0xff);
1607 if (use_tbl &&
1608 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1609 index < QLA_MODEL_NAMES)
1610 strncpy(ha->model_desc,
1611 qla2x00_model_name[index * 2 + 1],
1612 sizeof(ha->model_desc) - 1);
1613 } else {
1614 index = (ha->pdev->subsystem_device & 0xff);
1615 if (use_tbl &&
1616 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1617 index < QLA_MODEL_NAMES) {
1618 strcpy(ha->model_number,
1619 qla2x00_model_name[index * 2]);
1620 strncpy(ha->model_desc,
1621 qla2x00_model_name[index * 2 + 1],
1622 sizeof(ha->model_desc) - 1);
1623 } else {
1624 strcpy(ha->model_number, def);
1625 }
1626 }
1627 if (IS_FWI2_CAPABLE(ha))
1628 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
1629 sizeof(ha->model_desc));
1630 }
1631
1632 /* On sparc systems, obtain port and node WWN from firmware
1633 * properties.
1634 */
1635 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
1636 {
1637 #ifdef CONFIG_SPARC
1638 struct qla_hw_data *ha = vha->hw;
1639 struct pci_dev *pdev = ha->pdev;
1640 struct device_node *dp = pci_device_to_OF_node(pdev);
1641 const u8 *val;
1642 int len;
1643
1644 val = of_get_property(dp, "port-wwn", &len);
1645 if (val && len >= WWN_SIZE)
1646 memcpy(nv->port_name, val, WWN_SIZE);
1647
1648 val = of_get_property(dp, "node-wwn", &len);
1649 if (val && len >= WWN_SIZE)
1650 memcpy(nv->node_name, val, WWN_SIZE);
1651 #endif
1652 }
1653
1654 /*
1655 * NVRAM configuration for ISP 2xxx
1656 *
1657 * Input:
1658 * ha = adapter block pointer.
1659 *
1660 * Output:
1661 * initialization control block in response_ring
1662 * host adapters parameters in host adapter block
1663 *
1664 * Returns:
1665 * 0 = success.
1666 */
1667 int
1668 qla2x00_nvram_config(scsi_qla_host_t *vha)
1669 {
1670 int rval;
1671 uint8_t chksum = 0;
1672 uint16_t cnt;
1673 uint8_t *dptr1, *dptr2;
1674 struct qla_hw_data *ha = vha->hw;
1675 init_cb_t *icb = ha->init_cb;
1676 nvram_t *nv = ha->nvram;
1677 uint8_t *ptr = ha->nvram;
1678 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1679
1680 rval = QLA_SUCCESS;
1681
1682 /* Determine NVRAM starting address. */
1683 ha->nvram_size = sizeof(nvram_t);
1684 ha->nvram_base = 0;
1685 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
1686 if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
1687 ha->nvram_base = 0x80;
1688
1689 /* Get NVRAM data and calculate checksum. */
1690 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
1691 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
1692 chksum += *ptr++;
1693
1694 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
1695 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
1696
1697 /* Bad NVRAM data, set defaults parameters. */
1698 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
1699 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
1700 /* Reset NVRAM data. */
1701 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
1702 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
1703 nv->nvram_version);
1704 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
1705 "invalid -- WWPN) defaults.\n");
1706
1707 /*
1708 * Set default initialization control block.
1709 */
1710 memset(nv, 0, ha->nvram_size);
1711 nv->parameter_block_version = ICB_VERSION;
1712
1713 if (IS_QLA23XX(ha)) {
1714 nv->firmware_options[0] = BIT_2 | BIT_1;
1715 nv->firmware_options[1] = BIT_7 | BIT_5;
1716 nv->add_firmware_options[0] = BIT_5;
1717 nv->add_firmware_options[1] = BIT_5 | BIT_4;
1718 nv->frame_payload_size = __constant_cpu_to_le16(2048);
1719 nv->special_options[1] = BIT_7;
1720 } else if (IS_QLA2200(ha)) {
1721 nv->firmware_options[0] = BIT_2 | BIT_1;
1722 nv->firmware_options[1] = BIT_7 | BIT_5;
1723 nv->add_firmware_options[0] = BIT_5;
1724 nv->add_firmware_options[1] = BIT_5 | BIT_4;
1725 nv->frame_payload_size = __constant_cpu_to_le16(1024);
1726 } else if (IS_QLA2100(ha)) {
1727 nv->firmware_options[0] = BIT_3 | BIT_1;
1728 nv->firmware_options[1] = BIT_5;
1729 nv->frame_payload_size = __constant_cpu_to_le16(1024);
1730 }
1731
1732 nv->max_iocb_allocation = __constant_cpu_to_le16(256);
1733 nv->execution_throttle = __constant_cpu_to_le16(16);
1734 nv->retry_count = 8;
1735 nv->retry_delay = 1;
1736
1737 nv->port_name[0] = 33;
1738 nv->port_name[3] = 224;
1739 nv->port_name[4] = 139;
1740
1741 qla2xxx_nvram_wwn_from_ofw(vha, nv);
1742
1743 nv->login_timeout = 4;
1744
1745 /*
1746 * Set default host adapter parameters
1747 */
1748 nv->host_p[1] = BIT_2;
1749 nv->reset_delay = 5;
1750 nv->port_down_retry_count = 8;
1751 nv->max_luns_per_target = __constant_cpu_to_le16(8);
1752 nv->link_down_timeout = 60;
1753
1754 rval = 1;
1755 }
1756
1757 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
1758 /*
1759 * The SN2 does not provide BIOS emulation which means you can't change
1760 * potentially bogus BIOS settings. Force the use of default settings
1761 * for link rate and frame size. Hope that the rest of the settings
1762 * are valid.
1763 */
1764 if (ia64_platform_is("sn2")) {
1765 nv->frame_payload_size = __constant_cpu_to_le16(2048);
1766 if (IS_QLA23XX(ha))
1767 nv->special_options[1] = BIT_7;
1768 }
1769 #endif
1770
1771 /* Reset Initialization control block */
1772 memset(icb, 0, ha->init_cb_size);
1773
1774 /*
1775 * Setup driver NVRAM options.
1776 */
1777 nv->firmware_options[0] |= (BIT_6 | BIT_1);
1778 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
1779 nv->firmware_options[1] |= (BIT_5 | BIT_0);
1780 nv->firmware_options[1] &= ~BIT_4;
1781
1782 if (IS_QLA23XX(ha)) {
1783 nv->firmware_options[0] |= BIT_2;
1784 nv->firmware_options[0] &= ~BIT_3;
1785 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
1786
1787 if (IS_QLA2300(ha)) {
1788 if (ha->fb_rev == FPM_2310) {
1789 strcpy(ha->model_number, "QLA2310");
1790 } else {
1791 strcpy(ha->model_number, "QLA2300");
1792 }
1793 } else {
1794 qla2x00_set_model_info(vha, nv->model_number,
1795 sizeof(nv->model_number), "QLA23xx");
1796 }
1797 } else if (IS_QLA2200(ha)) {
1798 nv->firmware_options[0] |= BIT_2;
1799 /*
1800 * 'Point-to-point preferred, else loop' is not a safe
1801 * connection mode setting.
1802 */
1803 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
1804 (BIT_5 | BIT_4)) {
1805 /* Force 'loop preferred, else point-to-point'. */
1806 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
1807 nv->add_firmware_options[0] |= BIT_5;
1808 }
1809 strcpy(ha->model_number, "QLA22xx");
1810 } else /*if (IS_QLA2100(ha))*/ {
1811 strcpy(ha->model_number, "QLA2100");
1812 }
1813
1814 /*
1815 * Copy over NVRAM RISC parameter block to initialization control block.
1816 */
1817 dptr1 = (uint8_t *)icb;
1818 dptr2 = (uint8_t *)&nv->parameter_block_version;
1819 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
1820 while (cnt--)
1821 *dptr1++ = *dptr2++;
1822
1823 /* Copy 2nd half. */
1824 dptr1 = (uint8_t *)icb->add_firmware_options;
1825 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
1826 while (cnt--)
1827 *dptr1++ = *dptr2++;
1828
1829 /* Use alternate WWN? */
1830 if (nv->host_p[1] & BIT_7) {
1831 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
1832 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
1833 }
1834
1835 /* Prepare nodename */
1836 if ((icb->firmware_options[1] & BIT_6) == 0) {
1837 /*
1838 * Firmware will apply the following mask if the nodename was
1839 * not provided.
1840 */
1841 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
1842 icb->node_name[0] &= 0xF0;
1843 }
1844
1845 /*
1846 * Set host adapter parameters.
1847 */
1848 if (nv->host_p[0] & BIT_7)
1849 ql2xextended_error_logging = 1;
1850 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
1851 /* Always load RISC code on non ISP2[12]00 chips. */
1852 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
1853 ha->flags.disable_risc_code_load = 0;
1854 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
1855 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
1856 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
1857 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
1858 ha->flags.disable_serdes = 0;
1859
1860 ha->operating_mode =
1861 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
1862
1863 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
1864 sizeof(ha->fw_seriallink_options));
1865
1866 /* save HBA serial number */
1867 ha->serial0 = icb->port_name[5];
1868 ha->serial1 = icb->port_name[6];
1869 ha->serial2 = icb->port_name[7];
1870 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
1871 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
1872
1873 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
1874
1875 ha->retry_count = nv->retry_count;
1876
1877 /* Set minimum login_timeout to 4 seconds. */
1878 if (nv->login_timeout < ql2xlogintimeout)
1879 nv->login_timeout = ql2xlogintimeout;
1880 if (nv->login_timeout < 4)
1881 nv->login_timeout = 4;
1882 ha->login_timeout = nv->login_timeout;
1883 icb->login_timeout = nv->login_timeout;
1884
1885 /* Set minimum RATOV to 100 tenths of a second. */
1886 ha->r_a_tov = 100;
1887
1888 ha->loop_reset_delay = nv->reset_delay;
1889
1890 /* Link Down Timeout = 0:
1891 *
1892 * When Port Down timer expires we will start returning
1893 * I/O's to OS with "DID_NO_CONNECT".
1894 *
1895 * Link Down Timeout != 0:
1896 *
1897 * The driver waits for the link to come up after link down
1898 * before returning I/Os to OS with "DID_NO_CONNECT".
1899 */
1900 if (nv->link_down_timeout == 0) {
1901 ha->loop_down_abort_time =
1902 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
1903 } else {
1904 ha->link_down_timeout = nv->link_down_timeout;
1905 ha->loop_down_abort_time =
1906 (LOOP_DOWN_TIME - ha->link_down_timeout);
1907 }
1908
1909 /*
1910 * Need enough time to try and get the port back.
1911 */
1912 ha->port_down_retry_count = nv->port_down_retry_count;
1913 if (qlport_down_retry)
1914 ha->port_down_retry_count = qlport_down_retry;
1915 /* Set login_retry_count */
1916 ha->login_retry_count = nv->retry_count;
1917 if (ha->port_down_retry_count == nv->port_down_retry_count &&
1918 ha->port_down_retry_count > 3)
1919 ha->login_retry_count = ha->port_down_retry_count;
1920 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
1921 ha->login_retry_count = ha->port_down_retry_count;
1922 if (ql2xloginretrycount)
1923 ha->login_retry_count = ql2xloginretrycount;
1924
1925 icb->lun_enables = __constant_cpu_to_le16(0);
1926 icb->command_resource_count = 0;
1927 icb->immediate_notify_resource_count = 0;
1928 icb->timeout = __constant_cpu_to_le16(0);
1929
1930 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
1931 /* Enable RIO */
1932 icb->firmware_options[0] &= ~BIT_3;
1933 icb->add_firmware_options[0] &=
1934 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
1935 icb->add_firmware_options[0] |= BIT_2;
1936 icb->response_accumulation_timer = 3;
1937 icb->interrupt_delay_timer = 5;
1938
1939 vha->flags.process_response_queue = 1;
1940 } else {
1941 /* Enable ZIO. */
1942 if (!vha->flags.init_done) {
1943 ha->zio_mode = icb->add_firmware_options[0] &
1944 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1945 ha->zio_timer = icb->interrupt_delay_timer ?
1946 icb->interrupt_delay_timer: 2;
1947 }
1948 icb->add_firmware_options[0] &=
1949 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
1950 vha->flags.process_response_queue = 0;
1951 if (ha->zio_mode != QLA_ZIO_DISABLED) {
1952 ha->zio_mode = QLA_ZIO_MODE_6;
1953
1954 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer "
1955 "delay (%d us).\n", vha->host_no, ha->zio_mode,
1956 ha->zio_timer * 100));
1957 qla_printk(KERN_INFO, ha,
1958 "ZIO mode %d enabled; timer delay (%d us).\n",
1959 ha->zio_mode, ha->zio_timer * 100);
1960
1961 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
1962 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
1963 vha->flags.process_response_queue = 1;
1964 }
1965 }
1966
1967 if (rval) {
1968 DEBUG2_3(printk(KERN_WARNING
1969 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
1970 }
1971 return (rval);
1972 }
1973
1974 static void
1975 qla2x00_rport_del(void *data)
1976 {
1977 fc_port_t *fcport = data;
1978 struct fc_rport *rport;
1979
1980 spin_lock_irq(fcport->vha->host->host_lock);
1981 rport = fcport->drport;
1982 fcport->drport = NULL;
1983 spin_unlock_irq(fcport->vha->host->host_lock);
1984 if (rport)
1985 fc_remote_port_delete(rport);
1986 }
1987
1988 /**
1989 * qla2x00_alloc_fcport() - Allocate a generic fcport.
1990 * @ha: HA context
1991 * @flags: allocation flags
1992 *
1993 * Returns a pointer to the allocated fcport, or NULL, if none available.
1994 */
1995 static fc_port_t *
1996 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
1997 {
1998 fc_port_t *fcport;
1999
2000 fcport = kzalloc(sizeof(fc_port_t), flags);
2001 if (!fcport)
2002 return NULL;
2003
2004 /* Setup fcport template structure. */
2005 fcport->vha = vha;
2006 fcport->vp_idx = vha->vp_idx;
2007 fcport->port_type = FCT_UNKNOWN;
2008 fcport->loop_id = FC_NO_LOOP_ID;
2009 atomic_set(&fcport->state, FCS_UNCONFIGURED);
2010 fcport->supported_classes = FC_COS_UNSPECIFIED;
2011
2012 return fcport;
2013 }
2014
2015 /*
2016 * qla2x00_configure_loop
2017 * Updates Fibre Channel Device Database with what is actually on loop.
2018 *
2019 * Input:
2020 * ha = adapter block pointer.
2021 *
2022 * Returns:
2023 * 0 = success.
2024 * 1 = error.
2025 * 2 = database was full and device was not configured.
2026 */
2027 static int
2028 qla2x00_configure_loop(scsi_qla_host_t *vha)
2029 {
2030 int rval;
2031 unsigned long flags, save_flags;
2032 struct qla_hw_data *ha = vha->hw;
2033 rval = QLA_SUCCESS;
2034
2035 /* Get Initiator ID */
2036 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
2037 rval = qla2x00_configure_hba(vha);
2038 if (rval != QLA_SUCCESS) {
2039 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n",
2040 vha->host_no));
2041 return (rval);
2042 }
2043 }
2044
2045 save_flags = flags = vha->dpc_flags;
2046 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n",
2047 vha->host_no, flags));
2048
2049 /*
2050 * If we have both an RSCN and PORT UPDATE pending then handle them
2051 * both at the same time.
2052 */
2053 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2054 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
2055
2056 /* Determine what we need to do */
2057 if (ha->current_topology == ISP_CFG_FL &&
2058 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2059
2060 vha->flags.rscn_queue_overflow = 1;
2061 set_bit(RSCN_UPDATE, &flags);
2062
2063 } else if (ha->current_topology == ISP_CFG_F &&
2064 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2065
2066 vha->flags.rscn_queue_overflow = 1;
2067 set_bit(RSCN_UPDATE, &flags);
2068 clear_bit(LOCAL_LOOP_UPDATE, &flags);
2069
2070 } else if (ha->current_topology == ISP_CFG_N) {
2071 clear_bit(RSCN_UPDATE, &flags);
2072
2073 } else if (!vha->flags.online ||
2074 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
2075
2076 vha->flags.rscn_queue_overflow = 1;
2077 set_bit(RSCN_UPDATE, &flags);
2078 set_bit(LOCAL_LOOP_UPDATE, &flags);
2079 }
2080
2081 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
2082 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2083 rval = QLA_FUNCTION_FAILED;
2084 else
2085 rval = qla2x00_configure_local_loop(vha);
2086 }
2087
2088 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
2089 if (LOOP_TRANSITION(vha))
2090 rval = QLA_FUNCTION_FAILED;
2091 else
2092 rval = qla2x00_configure_fabric(vha);
2093 }
2094
2095 if (rval == QLA_SUCCESS) {
2096 if (atomic_read(&vha->loop_down_timer) ||
2097 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2098 rval = QLA_FUNCTION_FAILED;
2099 } else {
2100 atomic_set(&vha->loop_state, LOOP_READY);
2101
2102 DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no));
2103 }
2104 }
2105
2106 if (rval) {
2107 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n",
2108 __func__, vha->host_no));
2109 } else {
2110 DEBUG3(printk("%s: exiting normally\n", __func__));
2111 }
2112
2113 /* Restore state if a resync event occurred during processing */
2114 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2115 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2116 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2117 if (test_bit(RSCN_UPDATE, &save_flags)) {
2118 set_bit(RSCN_UPDATE, &vha->dpc_flags);
2119 vha->flags.rscn_queue_overflow = 1;
2120 }
2121 }
2122
2123 return (rval);
2124 }
2125
2126
2127
2128 /*
2129 * qla2x00_configure_local_loop
2130 * Updates Fibre Channel Device Database with local loop devices.
2131 *
2132 * Input:
2133 * ha = adapter block pointer.
2134 *
2135 * Returns:
2136 * 0 = success.
2137 */
2138 static int
2139 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2140 {
2141 int rval, rval2;
2142 int found_devs;
2143 int found;
2144 fc_port_t *fcport, *new_fcport;
2145
2146 uint16_t index;
2147 uint16_t entries;
2148 char *id_iter;
2149 uint16_t loop_id;
2150 uint8_t domain, area, al_pa;
2151 struct qla_hw_data *ha = vha->hw;
2152
2153 found_devs = 0;
2154 new_fcport = NULL;
2155 entries = MAX_FIBRE_DEVICES;
2156
2157 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no));
2158 DEBUG3(qla2x00_get_fcal_position_map(vha, NULL));
2159
2160 /* Get list of logged in devices. */
2161 memset(ha->gid_list, 0, GID_LIST_SIZE);
2162 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
2163 &entries);
2164 if (rval != QLA_SUCCESS)
2165 goto cleanup_allocation;
2166
2167 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n",
2168 vha->host_no, entries));
2169 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list,
2170 entries * sizeof(struct gid_list_info)));
2171
2172 /* Allocate temporary fcport for any new fcports discovered. */
2173 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2174 if (new_fcport == NULL) {
2175 rval = QLA_MEMORY_ALLOC_FAILED;
2176 goto cleanup_allocation;
2177 }
2178 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
2179
2180 /*
2181 * Mark local devices that were present with FCF_DEVICE_LOST for now.
2182 */
2183 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2184 if (atomic_read(&fcport->state) == FCS_ONLINE &&
2185 fcport->port_type != FCT_BROADCAST &&
2186 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
2187
2188 DEBUG(printk("scsi(%ld): Marking port lost, "
2189 "loop_id=0x%04x\n",
2190 vha->host_no, fcport->loop_id));
2191
2192 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2193 }
2194 }
2195
2196 /* Add devices to port list. */
2197 id_iter = (char *)ha->gid_list;
2198 for (index = 0; index < entries; index++) {
2199 domain = ((struct gid_list_info *)id_iter)->domain;
2200 area = ((struct gid_list_info *)id_iter)->area;
2201 al_pa = ((struct gid_list_info *)id_iter)->al_pa;
2202 if (IS_QLA2100(ha) || IS_QLA2200(ha))
2203 loop_id = (uint16_t)
2204 ((struct gid_list_info *)id_iter)->loop_id_2100;
2205 else
2206 loop_id = le16_to_cpu(
2207 ((struct gid_list_info *)id_iter)->loop_id);
2208 id_iter += ha->gid_list_info_size;
2209
2210 /* Bypass reserved domain fields. */
2211 if ((domain & 0xf0) == 0xf0)
2212 continue;
2213
2214 /* Bypass if not same domain and area of adapter. */
2215 if (area && domain &&
2216 (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
2217 continue;
2218
2219 /* Bypass invalid local loop ID. */
2220 if (loop_id > LAST_LOCAL_LOOP_ID)
2221 continue;
2222
2223 /* Fill in member data. */
2224 new_fcport->d_id.b.domain = domain;
2225 new_fcport->d_id.b.area = area;
2226 new_fcport->d_id.b.al_pa = al_pa;
2227 new_fcport->loop_id = loop_id;
2228 new_fcport->vp_idx = vha->vp_idx;
2229 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
2230 if (rval2 != QLA_SUCCESS) {
2231 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport "
2232 "information -- get_port_database=%x, "
2233 "loop_id=0x%04x\n",
2234 vha->host_no, rval2, new_fcport->loop_id));
2235 DEBUG2(printk("scsi(%ld): Scheduling resync...\n",
2236 vha->host_no));
2237 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2238 continue;
2239 }
2240
2241 /* Check for matching device in port list. */
2242 found = 0;
2243 fcport = NULL;
2244 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2245 if (memcmp(new_fcport->port_name, fcport->port_name,
2246 WWN_SIZE))
2247 continue;
2248
2249 fcport->flags &= ~FCF_FABRIC_DEVICE;
2250 fcport->loop_id = new_fcport->loop_id;
2251 fcport->port_type = new_fcport->port_type;
2252 fcport->d_id.b24 = new_fcport->d_id.b24;
2253 memcpy(fcport->node_name, new_fcport->node_name,
2254 WWN_SIZE);
2255
2256 found++;
2257 break;
2258 }
2259
2260 if (!found) {
2261 /* New device, add to fcports list. */
2262 if (vha->vp_idx) {
2263 new_fcport->vha = vha;
2264 new_fcport->vp_idx = vha->vp_idx;
2265 }
2266 list_add_tail(&new_fcport->list, &vha->vp_fcports);
2267
2268 /* Allocate a new replacement fcport. */
2269 fcport = new_fcport;
2270 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2271 if (new_fcport == NULL) {
2272 rval = QLA_MEMORY_ALLOC_FAILED;
2273 goto cleanup_allocation;
2274 }
2275 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
2276 }
2277
2278 /* Base iIDMA settings on HBA port speed. */
2279 fcport->fp_speed = ha->link_data_rate;
2280
2281 qla2x00_update_fcport(vha, fcport);
2282
2283 found_devs++;
2284 }
2285
2286 cleanup_allocation:
2287 kfree(new_fcport);
2288
2289 if (rval != QLA_SUCCESS) {
2290 DEBUG2(printk("scsi(%ld): Configure local loop error exit: "
2291 "rval=%x\n", vha->host_no, rval));
2292 }
2293
2294 return (rval);
2295 }
2296
2297 static void
2298 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2299 {
2300 #define LS_UNKNOWN 2
2301 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
2302 char *link_speed;
2303 int rval;
2304 uint16_t mb[4];
2305 struct qla_hw_data *ha = vha->hw;
2306
2307 if (!IS_IIDMA_CAPABLE(ha))
2308 return;
2309
2310 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
2311 fcport->fp_speed > ha->link_data_rate)
2312 return;
2313
2314 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
2315 mb);
2316 if (rval != QLA_SUCCESS) {
2317 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA "
2318 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n",
2319 vha->host_no, fcport->port_name[0], fcport->port_name[1],
2320 fcport->port_name[2], fcport->port_name[3],
2321 fcport->port_name[4], fcport->port_name[5],
2322 fcport->port_name[6], fcport->port_name[7], rval,
2323 fcport->fp_speed, mb[0], mb[1]));
2324 } else {
2325 link_speed = link_speeds[LS_UNKNOWN];
2326 if (fcport->fp_speed < 5)
2327 link_speed = link_speeds[fcport->fp_speed];
2328 else if (fcport->fp_speed == 0x13)
2329 link_speed = link_speeds[5];
2330 DEBUG2(qla_printk(KERN_INFO, ha,
2331 "iIDMA adjusted to %s GB/s on "
2332 "%02x%02x%02x%02x%02x%02x%02x%02x.\n",
2333 link_speed, fcport->port_name[0],
2334 fcport->port_name[1], fcport->port_name[2],
2335 fcport->port_name[3], fcport->port_name[4],
2336 fcport->port_name[5], fcport->port_name[6],
2337 fcport->port_name[7]));
2338 }
2339 }
2340
2341 static void
2342 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2343 {
2344 struct fc_rport_identifiers rport_ids;
2345 struct fc_rport *rport;
2346 struct qla_hw_data *ha = vha->hw;
2347
2348 if (fcport->drport)
2349 qla2x00_rport_del(fcport);
2350
2351 rport_ids.node_name = wwn_to_u64(fcport->node_name);
2352 rport_ids.port_name = wwn_to_u64(fcport->port_name);
2353 rport_ids.port_id = fcport->d_id.b.domain << 16 |
2354 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2355 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2356 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
2357 if (!rport) {
2358 qla_printk(KERN_WARNING, ha,
2359 "Unable to allocate fc remote port!\n");
2360 return;
2361 }
2362 spin_lock_irq(fcport->vha->host->host_lock);
2363 *((fc_port_t **)rport->dd_data) = fcport;
2364 spin_unlock_irq(fcport->vha->host->host_lock);
2365
2366 rport->supported_classes = fcport->supported_classes;
2367
2368 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2369 if (fcport->port_type == FCT_INITIATOR)
2370 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
2371 if (fcport->port_type == FCT_TARGET)
2372 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
2373 fc_remote_port_rolechg(rport, rport_ids.roles);
2374 }
2375
2376 /*
2377 * qla2x00_update_fcport
2378 * Updates device on list.
2379 *
2380 * Input:
2381 * ha = adapter block pointer.
2382 * fcport = port structure pointer.
2383 *
2384 * Return:
2385 * 0 - Success
2386 * BIT_0 - error
2387 *
2388 * Context:
2389 * Kernel context.
2390 */
2391 void
2392 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2393 {
2394 struct qla_hw_data *ha = vha->hw;
2395
2396 fcport->vha = vha;
2397 fcport->login_retry = 0;
2398 fcport->port_login_retry_count = ha->port_down_retry_count *
2399 PORT_RETRY_TIME;
2400 atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
2401 PORT_RETRY_TIME);
2402 fcport->flags &= ~FCF_LOGIN_NEEDED;
2403
2404 qla2x00_iidma_fcport(vha, fcport);
2405
2406 atomic_set(&fcport->state, FCS_ONLINE);
2407
2408 qla2x00_reg_remote_port(vha, fcport);
2409 }
2410
2411 /*
2412 * qla2x00_configure_fabric
2413 * Setup SNS devices with loop ID's.
2414 *
2415 * Input:
2416 * ha = adapter block pointer.
2417 *
2418 * Returns:
2419 * 0 = success.
2420 * BIT_0 = error
2421 */
2422 static int
2423 qla2x00_configure_fabric(scsi_qla_host_t *vha)
2424 {
2425 int rval, rval2;
2426 fc_port_t *fcport, *fcptemp;
2427 uint16_t next_loopid;
2428 uint16_t mb[MAILBOX_REGISTER_COUNT];
2429 uint16_t loop_id;
2430 LIST_HEAD(new_fcports);
2431 struct qla_hw_data *ha = vha->hw;
2432 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2433
2434 /* If FL port exists, then SNS is present */
2435 if (IS_FWI2_CAPABLE(ha))
2436 loop_id = NPH_F_PORT;
2437 else
2438 loop_id = SNS_FL_PORT;
2439 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
2440 if (rval != QLA_SUCCESS) {
2441 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL "
2442 "Port\n", vha->host_no));
2443
2444 vha->device_flags &= ~SWITCH_FOUND;
2445 return (QLA_SUCCESS);
2446 }
2447 vha->device_flags |= SWITCH_FOUND;
2448
2449 /* Mark devices that need re-synchronization. */
2450 rval2 = qla2x00_device_resync(vha);
2451 if (rval2 == QLA_RSCNS_HANDLED) {
2452 /* No point doing the scan, just continue. */
2453 return (QLA_SUCCESS);
2454 }
2455 do {
2456 /* FDMI support. */
2457 if (ql2xfdmienable &&
2458 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
2459 qla2x00_fdmi_register(vha);
2460
2461 /* Ensure we are logged into the SNS. */
2462 if (IS_FWI2_CAPABLE(ha))
2463 loop_id = NPH_SNS;
2464 else
2465 loop_id = SIMPLE_NAME_SERVER;
2466 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
2467 0xfc, mb, BIT_1 | BIT_0);
2468 if (mb[0] != MBS_COMMAND_COMPLETE) {
2469 DEBUG2(qla_printk(KERN_INFO, ha,
2470 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
2471 "mb[2]=%x mb[6]=%x mb[7]=%x\n", loop_id,
2472 mb[0], mb[1], mb[2], mb[6], mb[7]));
2473 return (QLA_SUCCESS);
2474 }
2475
2476 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
2477 if (qla2x00_rft_id(vha)) {
2478 /* EMPTY */
2479 DEBUG2(printk("scsi(%ld): Register FC-4 "
2480 "TYPE failed.\n", vha->host_no));
2481 }
2482 if (qla2x00_rff_id(vha)) {
2483 /* EMPTY */
2484 DEBUG2(printk("scsi(%ld): Register FC-4 "
2485 "Features failed.\n", vha->host_no));
2486 }
2487 if (qla2x00_rnn_id(vha)) {
2488 /* EMPTY */
2489 DEBUG2(printk("scsi(%ld): Register Node Name "
2490 "failed.\n", vha->host_no));
2491 } else if (qla2x00_rsnn_nn(vha)) {
2492 /* EMPTY */
2493 DEBUG2(printk("scsi(%ld): Register Symbolic "
2494 "Node Name failed.\n", vha->host_no));
2495 }
2496 }
2497
2498 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
2499 if (rval != QLA_SUCCESS)
2500 break;
2501
2502 /*
2503 * Logout all previous fabric devices marked lost, except
2504 * tape devices.
2505 */
2506 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2507 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2508 break;
2509
2510 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
2511 continue;
2512
2513 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
2514 qla2x00_mark_device_lost(vha, fcport,
2515 ql2xplogiabsentdevice, 0);
2516 if (fcport->loop_id != FC_NO_LOOP_ID &&
2517 (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
2518 fcport->port_type != FCT_INITIATOR &&
2519 fcport->port_type != FCT_BROADCAST) {
2520 ha->isp_ops->fabric_logout(vha,
2521 fcport->loop_id,
2522 fcport->d_id.b.domain,
2523 fcport->d_id.b.area,
2524 fcport->d_id.b.al_pa);
2525 fcport->loop_id = FC_NO_LOOP_ID;
2526 }
2527 }
2528 }
2529
2530 /* Starting free loop ID. */
2531 next_loopid = ha->min_external_loopid;
2532
2533 /*
2534 * Scan through our port list and login entries that need to be
2535 * logged in.
2536 */
2537 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2538 if (atomic_read(&vha->loop_down_timer) ||
2539 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2540 break;
2541
2542 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
2543 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
2544 continue;
2545
2546 if (fcport->loop_id == FC_NO_LOOP_ID) {
2547 fcport->loop_id = next_loopid;
2548 rval = qla2x00_find_new_loop_id(
2549 base_vha, fcport);
2550 if (rval != QLA_SUCCESS) {
2551 /* Ran out of IDs to use */
2552 break;
2553 }
2554 }
2555 /* Login and update database */
2556 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
2557 }
2558
2559 /* Exit if out of loop IDs. */
2560 if (rval != QLA_SUCCESS) {
2561 break;
2562 }
2563
2564 /*
2565 * Login and add the new devices to our port list.
2566 */
2567 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
2568 if (atomic_read(&vha->loop_down_timer) ||
2569 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2570 break;
2571
2572 /* Find a new loop ID to use. */
2573 fcport->loop_id = next_loopid;
2574 rval = qla2x00_find_new_loop_id(base_vha, fcport);
2575 if (rval != QLA_SUCCESS) {
2576 /* Ran out of IDs to use */
2577 break;
2578 }
2579
2580 /* Login and update database */
2581 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
2582
2583 if (vha->vp_idx) {
2584 fcport->vha = vha;
2585 fcport->vp_idx = vha->vp_idx;
2586 }
2587 list_move_tail(&fcport->list, &vha->vp_fcports);
2588 }
2589 } while (0);
2590
2591 /* Free all new device structures not processed. */
2592 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
2593 list_del(&fcport->list);
2594 kfree(fcport);
2595 }
2596
2597 if (rval) {
2598 DEBUG2(printk("scsi(%ld): Configure fabric error exit: "
2599 "rval=%d\n", vha->host_no, rval));
2600 }
2601
2602 return (rval);
2603 }
2604
2605
2606 /*
2607 * qla2x00_find_all_fabric_devs
2608 *
2609 * Input:
2610 * ha = adapter block pointer.
2611 * dev = database device entry pointer.
2612 *
2613 * Returns:
2614 * 0 = success.
2615 *
2616 * Context:
2617 * Kernel context.
2618 */
2619 static int
2620 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
2621 struct list_head *new_fcports)
2622 {
2623 int rval;
2624 uint16_t loop_id;
2625 fc_port_t *fcport, *new_fcport, *fcptemp;
2626 int found;
2627
2628 sw_info_t *swl;
2629 int swl_idx;
2630 int first_dev, last_dev;
2631 port_id_t wrap, nxt_d_id;
2632 struct qla_hw_data *ha = vha->hw;
2633 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
2634 struct scsi_qla_host *tvp;
2635
2636 rval = QLA_SUCCESS;
2637
2638 /* Try GID_PT to get device list, else GAN. */
2639 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
2640 if (!swl) {
2641 /*EMPTY*/
2642 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
2643 "on GA_NXT\n", vha->host_no));
2644 } else {
2645 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
2646 kfree(swl);
2647 swl = NULL;
2648 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
2649 kfree(swl);
2650 swl = NULL;
2651 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
2652 kfree(swl);
2653 swl = NULL;
2654 } else if (ql2xiidmaenable &&
2655 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
2656 qla2x00_gpsc(vha, swl);
2657 }
2658 }
2659 swl_idx = 0;
2660
2661 /* Allocate temporary fcport for any new fcports discovered. */
2662 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2663 if (new_fcport == NULL) {
2664 kfree(swl);
2665 return (QLA_MEMORY_ALLOC_FAILED);
2666 }
2667 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
2668 /* Set start port ID scan at adapter ID. */
2669 first_dev = 1;
2670 last_dev = 0;
2671
2672 /* Starting free loop ID. */
2673 loop_id = ha->min_external_loopid;
2674 for (; loop_id <= ha->max_loop_id; loop_id++) {
2675 if (qla2x00_is_reserved_id(vha, loop_id))
2676 continue;
2677
2678 if (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha))
2679 break;
2680
2681 if (swl != NULL) {
2682 if (last_dev) {
2683 wrap.b24 = new_fcport->d_id.b24;
2684 } else {
2685 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
2686 memcpy(new_fcport->node_name,
2687 swl[swl_idx].node_name, WWN_SIZE);
2688 memcpy(new_fcport->port_name,
2689 swl[swl_idx].port_name, WWN_SIZE);
2690 memcpy(new_fcport->fabric_port_name,
2691 swl[swl_idx].fabric_port_name, WWN_SIZE);
2692 new_fcport->fp_speed = swl[swl_idx].fp_speed;
2693
2694 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
2695 last_dev = 1;
2696 }
2697 swl_idx++;
2698 }
2699 } else {
2700 /* Send GA_NXT to the switch */
2701 rval = qla2x00_ga_nxt(vha, new_fcport);
2702 if (rval != QLA_SUCCESS) {
2703 qla_printk(KERN_WARNING, ha,
2704 "SNS scan failed -- assuming zero-entry "
2705 "result...\n");
2706 list_for_each_entry_safe(fcport, fcptemp,
2707 new_fcports, list) {
2708 list_del(&fcport->list);
2709 kfree(fcport);
2710 }
2711 rval = QLA_SUCCESS;
2712 break;
2713 }
2714 }
2715
2716 /* If wrap on switch device list, exit. */
2717 if (first_dev) {
2718 wrap.b24 = new_fcport->d_id.b24;
2719 first_dev = 0;
2720 } else if (new_fcport->d_id.b24 == wrap.b24) {
2721 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n",
2722 vha->host_no, new_fcport->d_id.b.domain,
2723 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa));
2724 break;
2725 }
2726
2727 /* Bypass if same physical adapter. */
2728 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
2729 continue;
2730
2731 /* Bypass virtual ports of the same host. */
2732 found = 0;
2733 if (ha->num_vhosts) {
2734 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
2735 if (new_fcport->d_id.b24 == vp->d_id.b24) {
2736 found = 1;
2737 break;
2738 }
2739 }
2740 if (found)
2741 continue;
2742 }
2743
2744 /* Bypass if same domain and area of adapter. */
2745 if (((new_fcport->d_id.b24 & 0xffff00) ==
2746 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
2747 ISP_CFG_FL)
2748 continue;
2749
2750 /* Bypass reserved domain fields. */
2751 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
2752 continue;
2753
2754 /* Locate matching device in database. */
2755 found = 0;
2756 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2757 if (memcmp(new_fcport->port_name, fcport->port_name,
2758 WWN_SIZE))
2759 continue;
2760
2761 found++;
2762
2763 /* Update port state. */
2764 memcpy(fcport->fabric_port_name,
2765 new_fcport->fabric_port_name, WWN_SIZE);
2766 fcport->fp_speed = new_fcport->fp_speed;
2767
2768 /*
2769 * If address the same and state FCS_ONLINE, nothing
2770 * changed.
2771 */
2772 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
2773 atomic_read(&fcport->state) == FCS_ONLINE) {
2774 break;
2775 }
2776
2777 /*
2778 * If device was not a fabric device before.
2779 */
2780 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
2781 fcport->d_id.b24 = new_fcport->d_id.b24;
2782 fcport->loop_id = FC_NO_LOOP_ID;
2783 fcport->flags |= (FCF_FABRIC_DEVICE |
2784 FCF_LOGIN_NEEDED);
2785 break;
2786 }
2787
2788 /*
2789 * Port ID changed or device was marked to be updated;
2790 * Log it out if still logged in and mark it for
2791 * relogin later.
2792 */
2793 fcport->d_id.b24 = new_fcport->d_id.b24;
2794 fcport->flags |= FCF_LOGIN_NEEDED;
2795 if (fcport->loop_id != FC_NO_LOOP_ID &&
2796 (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
2797 fcport->port_type != FCT_INITIATOR &&
2798 fcport->port_type != FCT_BROADCAST) {
2799 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
2800 fcport->d_id.b.domain, fcport->d_id.b.area,
2801 fcport->d_id.b.al_pa);
2802 fcport->loop_id = FC_NO_LOOP_ID;
2803 }
2804
2805 break;
2806 }
2807
2808 if (found)
2809 continue;
2810 /* If device was not in our fcports list, then add it. */
2811 list_add_tail(&new_fcport->list, new_fcports);
2812
2813 /* Allocate a new replacement fcport. */
2814 nxt_d_id.b24 = new_fcport->d_id.b24;
2815 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2816 if (new_fcport == NULL) {
2817 kfree(swl);
2818 return (QLA_MEMORY_ALLOC_FAILED);
2819 }
2820 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
2821 new_fcport->d_id.b24 = nxt_d_id.b24;
2822 }
2823
2824 kfree(swl);
2825 kfree(new_fcport);
2826
2827 return (rval);
2828 }
2829
2830 /*
2831 * qla2x00_find_new_loop_id
2832 * Scan through our port list and find a new usable loop ID.
2833 *
2834 * Input:
2835 * ha: adapter state pointer.
2836 * dev: port structure pointer.
2837 *
2838 * Returns:
2839 * qla2x00 local function return status code.
2840 *
2841 * Context:
2842 * Kernel context.
2843 */
2844 static int
2845 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
2846 {
2847 int rval;
2848 int found;
2849 fc_port_t *fcport;
2850 uint16_t first_loop_id;
2851 struct qla_hw_data *ha = vha->hw;
2852 struct scsi_qla_host *vp;
2853 struct scsi_qla_host *tvp;
2854
2855 rval = QLA_SUCCESS;
2856
2857 /* Save starting loop ID. */
2858 first_loop_id = dev->loop_id;
2859
2860 for (;;) {
2861 /* Skip loop ID if already used by adapter. */
2862 if (dev->loop_id == vha->loop_id)
2863 dev->loop_id++;
2864
2865 /* Skip reserved loop IDs. */
2866 while (qla2x00_is_reserved_id(vha, dev->loop_id))
2867 dev->loop_id++;
2868
2869 /* Reset loop ID if passed the end. */
2870 if (dev->loop_id > ha->max_loop_id) {
2871 /* first loop ID. */
2872 dev->loop_id = ha->min_external_loopid;
2873 }
2874
2875 /* Check for loop ID being already in use. */
2876 found = 0;
2877 fcport = NULL;
2878 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
2879 list_for_each_entry(fcport, &vp->vp_fcports, list) {
2880 if (fcport->loop_id == dev->loop_id &&
2881 fcport != dev) {
2882 /* ID possibly in use */
2883 found++;
2884 break;
2885 }
2886 }
2887 if (found)
2888 break;
2889 }
2890
2891 /* If not in use then it is free to use. */
2892 if (!found) {
2893 break;
2894 }
2895
2896 /* ID in use. Try next value. */
2897 dev->loop_id++;
2898
2899 /* If wrap around. No free ID to use. */
2900 if (dev->loop_id == first_loop_id) {
2901 dev->loop_id = FC_NO_LOOP_ID;
2902 rval = QLA_FUNCTION_FAILED;
2903 break;
2904 }
2905 }
2906
2907 return (rval);
2908 }
2909
2910 /*
2911 * qla2x00_device_resync
2912 * Marks devices in the database that needs resynchronization.
2913 *
2914 * Input:
2915 * ha = adapter block pointer.
2916 *
2917 * Context:
2918 * Kernel context.
2919 */
2920 static int
2921 qla2x00_device_resync(scsi_qla_host_t *vha)
2922 {
2923 int rval;
2924 uint32_t mask;
2925 fc_port_t *fcport;
2926 uint32_t rscn_entry;
2927 uint8_t rscn_out_iter;
2928 uint8_t format;
2929 port_id_t d_id;
2930
2931 rval = QLA_RSCNS_HANDLED;
2932
2933 while (vha->rscn_out_ptr != vha->rscn_in_ptr ||
2934 vha->flags.rscn_queue_overflow) {
2935
2936 rscn_entry = vha->rscn_queue[vha->rscn_out_ptr];
2937 format = MSB(MSW(rscn_entry));
2938 d_id.b.domain = LSB(MSW(rscn_entry));
2939 d_id.b.area = MSB(LSW(rscn_entry));
2940 d_id.b.al_pa = LSB(LSW(rscn_entry));
2941
2942 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = "
2943 "[%02x/%02x%02x%02x].\n",
2944 vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain,
2945 d_id.b.area, d_id.b.al_pa));
2946
2947 vha->rscn_out_ptr++;
2948 if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
2949 vha->rscn_out_ptr = 0;
2950
2951 /* Skip duplicate entries. */
2952 for (rscn_out_iter = vha->rscn_out_ptr;
2953 !vha->flags.rscn_queue_overflow &&
2954 rscn_out_iter != vha->rscn_in_ptr;
2955 rscn_out_iter = (rscn_out_iter ==
2956 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) {
2957
2958 if (rscn_entry != vha->rscn_queue[rscn_out_iter])
2959 break;
2960
2961 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue "
2962 "entry found at [%d].\n", vha->host_no,
2963 rscn_out_iter));
2964
2965 vha->rscn_out_ptr = rscn_out_iter;
2966 }
2967
2968 /* Queue overflow, set switch default case. */
2969 if (vha->flags.rscn_queue_overflow) {
2970 DEBUG(printk("scsi(%ld): device_resync: rscn "
2971 "overflow.\n", vha->host_no));
2972
2973 format = 3;
2974 vha->flags.rscn_queue_overflow = 0;
2975 }
2976
2977 switch (format) {
2978 case 0:
2979 mask = 0xffffff;
2980 break;
2981 case 1:
2982 mask = 0xffff00;
2983 break;
2984 case 2:
2985 mask = 0xff0000;
2986 break;
2987 default:
2988 mask = 0x0;
2989 d_id.b24 = 0;
2990 vha->rscn_out_ptr = vha->rscn_in_ptr;
2991 break;
2992 }
2993
2994 rval = QLA_SUCCESS;
2995
2996 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2997 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
2998 (fcport->d_id.b24 & mask) != d_id.b24 ||
2999 fcport->port_type == FCT_BROADCAST)
3000 continue;
3001
3002 if (atomic_read(&fcport->state) == FCS_ONLINE) {
3003 if (format != 3 ||
3004 fcport->port_type != FCT_INITIATOR) {
3005 qla2x00_mark_device_lost(vha, fcport,
3006 0, 0);
3007 }
3008 }
3009 }
3010 }
3011 return (rval);
3012 }
3013
3014 /*
3015 * qla2x00_fabric_dev_login
3016 * Login fabric target device and update FC port database.
3017 *
3018 * Input:
3019 * ha: adapter state pointer.
3020 * fcport: port structure list pointer.
3021 * next_loopid: contains value of a new loop ID that can be used
3022 * by the next login attempt.
3023 *
3024 * Returns:
3025 * qla2x00 local function return status code.
3026 *
3027 * Context:
3028 * Kernel context.
3029 */
3030 static int
3031 qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3032 uint16_t *next_loopid)
3033 {
3034 int rval;
3035 int retry;
3036 uint8_t opts;
3037 struct qla_hw_data *ha = vha->hw;
3038
3039 rval = QLA_SUCCESS;
3040 retry = 0;
3041
3042 rval = qla2x00_fabric_login(vha, fcport, next_loopid);
3043 if (rval == QLA_SUCCESS) {
3044 /* Send an ADISC to tape devices.*/
3045 opts = 0;
3046 if (fcport->flags & FCF_TAPE_PRESENT)
3047 opts |= BIT_1;
3048 rval = qla2x00_get_port_database(vha, fcport, opts);
3049 if (rval != QLA_SUCCESS) {
3050 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3051 fcport->d_id.b.domain, fcport->d_id.b.area,
3052 fcport->d_id.b.al_pa);
3053 qla2x00_mark_device_lost(vha, fcport, 1, 0);
3054 } else {
3055 qla2x00_update_fcport(vha, fcport);
3056 }
3057 }
3058
3059 return (rval);
3060 }
3061
3062 /*
3063 * qla2x00_fabric_login
3064 * Issue fabric login command.
3065 *
3066 * Input:
3067 * ha = adapter block pointer.
3068 * device = pointer to FC device type structure.
3069 *
3070 * Returns:
3071 * 0 - Login successfully
3072 * 1 - Login failed
3073 * 2 - Initiator device
3074 * 3 - Fatal error
3075 */
3076 int
3077 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3078 uint16_t *next_loopid)
3079 {
3080 int rval;
3081 int retry;
3082 uint16_t tmp_loopid;
3083 uint16_t mb[MAILBOX_REGISTER_COUNT];
3084 struct qla_hw_data *ha = vha->hw;
3085
3086 retry = 0;
3087 tmp_loopid = 0;
3088
3089 for (;;) {
3090 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x "
3091 "for port %02x%02x%02x.\n",
3092 vha->host_no, fcport->loop_id, fcport->d_id.b.domain,
3093 fcport->d_id.b.area, fcport->d_id.b.al_pa));
3094
3095 /* Login fcport on switch. */
3096 ha->isp_ops->fabric_login(vha, fcport->loop_id,
3097 fcport->d_id.b.domain, fcport->d_id.b.area,
3098 fcport->d_id.b.al_pa, mb, BIT_0);
3099 if (mb[0] == MBS_PORT_ID_USED) {
3100 /*
3101 * Device has another loop ID. The firmware team
3102 * recommends the driver perform an implicit login with
3103 * the specified ID again. The ID we just used is save
3104 * here so we return with an ID that can be tried by
3105 * the next login.
3106 */
3107 retry++;
3108 tmp_loopid = fcport->loop_id;
3109 fcport->loop_id = mb[1];
3110
3111 DEBUG(printk("Fabric Login: port in use - next "
3112 "loop id=0x%04x, port Id=%02x%02x%02x.\n",
3113 fcport->loop_id, fcport->d_id.b.domain,
3114 fcport->d_id.b.area, fcport->d_id.b.al_pa));
3115
3116 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
3117 /*
3118 * Login succeeded.
3119 */
3120 if (retry) {
3121 /* A retry occurred before. */
3122 *next_loopid = tmp_loopid;
3123 } else {
3124 /*
3125 * No retry occurred before. Just increment the
3126 * ID value for next login.
3127 */
3128 *next_loopid = (fcport->loop_id + 1);
3129 }
3130
3131 if (mb[1] & BIT_0) {
3132 fcport->port_type = FCT_INITIATOR;
3133 } else {
3134 fcport->port_type = FCT_TARGET;
3135 if (mb[1] & BIT_1) {
3136 fcport->flags |= FCF_TAPE_PRESENT;
3137 }
3138 }
3139
3140 if (mb[10] & BIT_0)
3141 fcport->supported_classes |= FC_COS_CLASS2;
3142 if (mb[10] & BIT_1)
3143 fcport->supported_classes |= FC_COS_CLASS3;
3144
3145 rval = QLA_SUCCESS;
3146 break;
3147 } else if (mb[0] == MBS_LOOP_ID_USED) {
3148 /*
3149 * Loop ID already used, try next loop ID.
3150 */
3151 fcport->loop_id++;
3152 rval = qla2x00_find_new_loop_id(vha, fcport);
3153 if (rval != QLA_SUCCESS) {
3154 /* Ran out of loop IDs to use */
3155 break;
3156 }
3157 } else if (mb[0] == MBS_COMMAND_ERROR) {
3158 /*
3159 * Firmware possibly timed out during login. If NO
3160 * retries are left to do then the device is declared
3161 * dead.
3162 */
3163 *next_loopid = fcport->loop_id;
3164 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3165 fcport->d_id.b.domain, fcport->d_id.b.area,
3166 fcport->d_id.b.al_pa);
3167 qla2x00_mark_device_lost(vha, fcport, 1, 0);
3168
3169 rval = 1;
3170 break;
3171 } else {
3172 /*
3173 * unrecoverable / not handled error
3174 */
3175 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x "
3176 "loop_id=%x jiffies=%lx.\n",
3177 __func__, vha->host_no, mb[0],
3178 fcport->d_id.b.domain, fcport->d_id.b.area,
3179 fcport->d_id.b.al_pa, fcport->loop_id, jiffies));
3180
3181 *next_loopid = fcport->loop_id;
3182 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3183 fcport->d_id.b.domain, fcport->d_id.b.area,
3184 fcport->d_id.b.al_pa);
3185 fcport->loop_id = FC_NO_LOOP_ID;
3186 fcport->login_retry = 0;
3187
3188 rval = 3;
3189 break;
3190 }
3191 }
3192
3193 return (rval);
3194 }
3195
3196 /*
3197 * qla2x00_local_device_login
3198 * Issue local device login command.
3199 *
3200 * Input:
3201 * ha = adapter block pointer.
3202 * loop_id = loop id of device to login to.
3203 *
3204 * Returns (Where's the #define!!!!):
3205 * 0 - Login successfully
3206 * 1 - Login failed
3207 * 3 - Fatal error
3208 */
3209 int
3210 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
3211 {
3212 int rval;
3213 uint16_t mb[MAILBOX_REGISTER_COUNT];
3214
3215 memset(mb, 0, sizeof(mb));
3216 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
3217 if (rval == QLA_SUCCESS) {
3218 /* Interrogate mailbox registers for any errors */
3219 if (mb[0] == MBS_COMMAND_ERROR)
3220 rval = 1;
3221 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
3222 /* device not in PCB table */
3223 rval = 3;
3224 }
3225
3226 return (rval);
3227 }
3228
3229 /*
3230 * qla2x00_loop_resync
3231 * Resync with fibre channel devices.
3232 *
3233 * Input:
3234 * ha = adapter block pointer.
3235 *
3236 * Returns:
3237 * 0 = success
3238 */
3239 int
3240 qla2x00_loop_resync(scsi_qla_host_t *vha)
3241 {
3242 int rval = QLA_SUCCESS;
3243 uint32_t wait_time;
3244 struct req_que *req;
3245 struct rsp_que *rsp;
3246
3247 if (ql2xmultique_tag)
3248 req = vha->hw->req_q_map[0];
3249 else
3250 req = vha->req;
3251 rsp = req->rsp;
3252
3253 atomic_set(&vha->loop_state, LOOP_UPDATE);
3254 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3255 if (vha->flags.online) {
3256 if (!(rval = qla2x00_fw_ready(vha))) {
3257 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3258 wait_time = 256;
3259 do {
3260 atomic_set(&vha->loop_state, LOOP_UPDATE);
3261
3262 /* Issue a marker after FW becomes ready. */
3263 qla2x00_marker(vha, req, rsp, 0, 0,
3264 MK_SYNC_ALL);
3265 vha->marker_needed = 0;
3266
3267 /* Remap devices on Loop. */
3268 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3269
3270 qla2x00_configure_loop(vha);
3271 wait_time--;
3272 } while (!atomic_read(&vha->loop_down_timer) &&
3273 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3274 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
3275 &vha->dpc_flags)));
3276 }
3277 }
3278
3279 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3280 return (QLA_FUNCTION_FAILED);
3281
3282 if (rval)
3283 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
3284
3285 return (rval);
3286 }
3287
3288 void
3289 qla2x00_update_fcports(scsi_qla_host_t *vha)
3290 {
3291 fc_port_t *fcport;
3292
3293 /* Go with deferred removal of rport references. */
3294 list_for_each_entry(fcport, &vha->vp_fcports, list)
3295 if (fcport && fcport->drport &&
3296 atomic_read(&fcport->state) != FCS_UNCONFIGURED)
3297 qla2x00_rport_del(fcport);
3298 }
3299
3300 /*
3301 * qla2x00_abort_isp
3302 * Resets ISP and aborts all outstanding commands.
3303 *
3304 * Input:
3305 * ha = adapter block pointer.
3306 *
3307 * Returns:
3308 * 0 = success
3309 */
3310 int
3311 qla2x00_abort_isp(scsi_qla_host_t *vha)
3312 {
3313 int rval;
3314 uint8_t status = 0;
3315 struct qla_hw_data *ha = vha->hw;
3316 struct scsi_qla_host *vp;
3317 struct scsi_qla_host *tvp;
3318 struct req_que *req = ha->req_q_map[0];
3319
3320 if (vha->flags.online) {
3321 vha->flags.online = 0;
3322 ha->flags.chip_reset_done = 0;
3323 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3324 ha->qla_stats.total_isp_aborts++;
3325
3326 qla_printk(KERN_INFO, ha,
3327 "Performing ISP error recovery - ha= %p.\n", ha);
3328 ha->isp_ops->reset_chip(vha);
3329
3330 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
3331 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3332 atomic_set(&vha->loop_state, LOOP_DOWN);
3333 qla2x00_mark_all_devices_lost(vha, 0);
3334 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list)
3335 qla2x00_mark_all_devices_lost(vp, 0);
3336 } else {
3337 if (!atomic_read(&vha->loop_down_timer))
3338 atomic_set(&vha->loop_down_timer,
3339 LOOP_DOWN_TIME);
3340 }
3341
3342 /* Requeue all commands in outstanding command list. */
3343 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3344
3345 ha->isp_ops->get_flash_version(vha, req->ring);
3346
3347 ha->isp_ops->nvram_config(vha);
3348
3349 if (!qla2x00_restart_isp(vha)) {
3350 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
3351
3352 if (!atomic_read(&vha->loop_down_timer)) {
3353 /*
3354 * Issue marker command only when we are going
3355 * to start the I/O .
3356 */
3357 vha->marker_needed = 1;
3358 }
3359
3360 vha->flags.online = 1;
3361
3362 ha->isp_ops->enable_intrs(ha);
3363
3364 ha->isp_abort_cnt = 0;
3365 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3366
3367 if (ha->fce) {
3368 ha->flags.fce_enabled = 1;
3369 memset(ha->fce, 0,
3370 fce_calc_size(ha->fce_bufs));
3371 rval = qla2x00_enable_fce_trace(vha,
3372 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
3373 &ha->fce_bufs);
3374 if (rval) {
3375 qla_printk(KERN_WARNING, ha,
3376 "Unable to reinitialize FCE "
3377 "(%d).\n", rval);
3378 ha->flags.fce_enabled = 0;
3379 }
3380 }
3381
3382 if (ha->eft) {
3383 memset(ha->eft, 0, EFT_SIZE);
3384 rval = qla2x00_enable_eft_trace(vha,
3385 ha->eft_dma, EFT_NUM_BUFFERS);
3386 if (rval) {
3387 qla_printk(KERN_WARNING, ha,
3388 "Unable to reinitialize EFT "
3389 "(%d).\n", rval);
3390 }
3391 }
3392 } else { /* failed the ISP abort */
3393 vha->flags.online = 1;
3394 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3395 if (ha->isp_abort_cnt == 0) {
3396 qla_printk(KERN_WARNING, ha,
3397 "ISP error recovery failed - "
3398 "board disabled\n");
3399 /*
3400 * The next call disables the board
3401 * completely.
3402 */
3403 ha->isp_ops->reset_adapter(vha);
3404 vha->flags.online = 0;
3405 clear_bit(ISP_ABORT_RETRY,
3406 &vha->dpc_flags);
3407 status = 0;
3408 } else { /* schedule another ISP abort */
3409 ha->isp_abort_cnt--;
3410 DEBUG(printk("qla%ld: ISP abort - "
3411 "retry remaining %d\n",
3412 vha->host_no, ha->isp_abort_cnt));
3413 status = 1;
3414 }
3415 } else {
3416 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3417 DEBUG(printk("qla2x00(%ld): ISP error recovery "
3418 "- retrying (%d) more times\n",
3419 vha->host_no, ha->isp_abort_cnt));
3420 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3421 status = 1;
3422 }
3423 }
3424
3425 }
3426
3427 if (!status) {
3428 DEBUG(printk(KERN_INFO
3429 "qla2x00_abort_isp(%ld): succeeded.\n",
3430 vha->host_no));
3431 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3432 if (vp->vp_idx)
3433 qla2x00_vp_abort_isp(vp);
3434 }
3435 } else {
3436 qla_printk(KERN_INFO, ha,
3437 "qla2x00_abort_isp: **** FAILED ****\n");
3438 }
3439
3440 return(status);
3441 }
3442
3443 /*
3444 * qla2x00_restart_isp
3445 * restarts the ISP after a reset
3446 *
3447 * Input:
3448 * ha = adapter block pointer.
3449 *
3450 * Returns:
3451 * 0 = success
3452 */
3453 static int
3454 qla2x00_restart_isp(scsi_qla_host_t *vha)
3455 {
3456 int status = 0;
3457 uint32_t wait_time;
3458 struct qla_hw_data *ha = vha->hw;
3459 struct req_que *req = ha->req_q_map[0];
3460 struct rsp_que *rsp = ha->rsp_q_map[0];
3461
3462 /* If firmware needs to be loaded */
3463 if (qla2x00_isp_firmware(vha)) {
3464 vha->flags.online = 0;
3465 status = ha->isp_ops->chip_diag(vha);
3466 if (!status)
3467 status = qla2x00_setup_chip(vha);
3468 }
3469
3470 if (!status && !(status = qla2x00_init_rings(vha))) {
3471 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
3472 ha->flags.chip_reset_done = 1;
3473 /* Initialize the queues in use */
3474 qla25xx_init_queues(ha);
3475
3476 status = qla2x00_fw_ready(vha);
3477 if (!status) {
3478 DEBUG(printk("%s(): Start configure loop, "
3479 "status = %d\n", __func__, status));
3480
3481 /* Issue a marker after FW becomes ready. */
3482 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
3483
3484 vha->flags.online = 1;
3485 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3486 wait_time = 256;
3487 do {
3488 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3489 qla2x00_configure_loop(vha);
3490 wait_time--;
3491 } while (!atomic_read(&vha->loop_down_timer) &&
3492 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3493 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
3494 &vha->dpc_flags)));
3495 }
3496
3497 /* if no cable then assume it's good */
3498 if ((vha->device_flags & DFLG_NO_CABLE))
3499 status = 0;
3500
3501 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n",
3502 __func__,
3503 status));
3504 }
3505 return (status);
3506 }
3507
3508 static int
3509 qla25xx_init_queues(struct qla_hw_data *ha)
3510 {
3511 struct rsp_que *rsp = NULL;
3512 struct req_que *req = NULL;
3513 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3514 int ret = -1;
3515 int i;
3516
3517 for (i = 1; i < ha->max_rsp_queues; i++) {
3518 rsp = ha->rsp_q_map[i];
3519 if (rsp) {
3520 rsp->options &= ~BIT_0;
3521 ret = qla25xx_init_rsp_que(base_vha, rsp);
3522 if (ret != QLA_SUCCESS)
3523 DEBUG2_17(printk(KERN_WARNING
3524 "%s Rsp que:%d init failed\n", __func__,
3525 rsp->id));
3526 else
3527 DEBUG2_17(printk(KERN_INFO
3528 "%s Rsp que:%d inited\n", __func__,
3529 rsp->id));
3530 }
3531 }
3532 for (i = 1; i < ha->max_req_queues; i++) {
3533 req = ha->req_q_map[i];
3534 if (req) {
3535 /* Clear outstanding commands array. */
3536 req->options &= ~BIT_0;
3537 ret = qla25xx_init_req_que(base_vha, req);
3538 if (ret != QLA_SUCCESS)
3539 DEBUG2_17(printk(KERN_WARNING
3540 "%s Req que:%d init failed\n", __func__,
3541 req->id));
3542 else
3543 DEBUG2_17(printk(KERN_WARNING
3544 "%s Req que:%d inited\n", __func__,
3545 req->id));
3546 }
3547 }
3548 return ret;
3549 }
3550
3551 /*
3552 * qla2x00_reset_adapter
3553 * Reset adapter.
3554 *
3555 * Input:
3556 * ha = adapter block pointer.
3557 */
3558 void
3559 qla2x00_reset_adapter(scsi_qla_host_t *vha)
3560 {
3561 unsigned long flags = 0;
3562 struct qla_hw_data *ha = vha->hw;
3563 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3564
3565 vha->flags.online = 0;
3566 ha->isp_ops->disable_intrs(ha);
3567
3568 spin_lock_irqsave(&ha->hardware_lock, flags);
3569 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
3570 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
3571 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
3572 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
3573 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3574 }
3575
3576 void
3577 qla24xx_reset_adapter(scsi_qla_host_t *vha)
3578 {
3579 unsigned long flags = 0;
3580 struct qla_hw_data *ha = vha->hw;
3581 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3582
3583 vha->flags.online = 0;
3584 ha->isp_ops->disable_intrs(ha);
3585
3586 spin_lock_irqsave(&ha->hardware_lock, flags);
3587 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
3588 RD_REG_DWORD(&reg->hccr);
3589 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
3590 RD_REG_DWORD(&reg->hccr);
3591 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3592
3593 if (IS_NOPOLLING_TYPE(ha))
3594 ha->isp_ops->enable_intrs(ha);
3595 }
3596
3597 /* On sparc systems, obtain port and node WWN from firmware
3598 * properties.
3599 */
3600 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
3601 struct nvram_24xx *nv)
3602 {
3603 #ifdef CONFIG_SPARC
3604 struct qla_hw_data *ha = vha->hw;
3605 struct pci_dev *pdev = ha->pdev;
3606 struct device_node *dp = pci_device_to_OF_node(pdev);
3607 const u8 *val;
3608 int len;
3609
3610 val = of_get_property(dp, "port-wwn", &len);
3611 if (val && len >= WWN_SIZE)
3612 memcpy(nv->port_name, val, WWN_SIZE);
3613
3614 val = of_get_property(dp, "node-wwn", &len);
3615 if (val && len >= WWN_SIZE)
3616 memcpy(nv->node_name, val, WWN_SIZE);
3617 #endif
3618 }
3619
3620 int
3621 qla24xx_nvram_config(scsi_qla_host_t *vha)
3622 {
3623 int rval;
3624 struct init_cb_24xx *icb;
3625 struct nvram_24xx *nv;
3626 uint32_t *dptr;
3627 uint8_t *dptr1, *dptr2;
3628 uint32_t chksum;
3629 uint16_t cnt;
3630 struct qla_hw_data *ha = vha->hw;
3631
3632 rval = QLA_SUCCESS;
3633 icb = (struct init_cb_24xx *)ha->init_cb;
3634 nv = ha->nvram;
3635
3636 /* Determine NVRAM starting address. */
3637 if (ha->flags.port0) {
3638 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
3639 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
3640 } else {
3641 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
3642 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
3643 }
3644 ha->nvram_size = sizeof(struct nvram_24xx);
3645 ha->vpd_size = FA_NVRAM_VPD_SIZE;
3646
3647 /* Get VPD data into cache */
3648 ha->vpd = ha->nvram + VPD_OFFSET;
3649 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
3650 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
3651
3652 /* Get NVRAM data into cache and calculate checksum. */
3653 dptr = (uint32_t *)nv;
3654 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
3655 ha->nvram_size);
3656 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
3657 chksum += le32_to_cpu(*dptr++);
3658
3659 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
3660 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
3661
3662 /* Bad NVRAM data, set defaults parameters. */
3663 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
3664 || nv->id[3] != ' ' ||
3665 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
3666 /* Reset NVRAM data. */
3667 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
3668 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
3669 le16_to_cpu(nv->nvram_version));
3670 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
3671 "invalid -- WWPN) defaults.\n");
3672
3673 /*
3674 * Set default initialization control block.
3675 */
3676 memset(nv, 0, ha->nvram_size);
3677 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
3678 nv->version = __constant_cpu_to_le16(ICB_VERSION);
3679 nv->frame_payload_size = __constant_cpu_to_le16(2048);
3680 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
3681 nv->exchange_count = __constant_cpu_to_le16(0);
3682 nv->hard_address = __constant_cpu_to_le16(124);
3683 nv->port_name[0] = 0x21;
3684 nv->port_name[1] = 0x00 + ha->port_no;
3685 nv->port_name[2] = 0x00;
3686 nv->port_name[3] = 0xe0;
3687 nv->port_name[4] = 0x8b;
3688 nv->port_name[5] = 0x1c;
3689 nv->port_name[6] = 0x55;
3690 nv->port_name[7] = 0x86;
3691 nv->node_name[0] = 0x20;
3692 nv->node_name[1] = 0x00;
3693 nv->node_name[2] = 0x00;
3694 nv->node_name[3] = 0xe0;
3695 nv->node_name[4] = 0x8b;
3696 nv->node_name[5] = 0x1c;
3697 nv->node_name[6] = 0x55;
3698 nv->node_name[7] = 0x86;
3699 qla24xx_nvram_wwn_from_ofw(vha, nv);
3700 nv->login_retry_count = __constant_cpu_to_le16(8);
3701 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
3702 nv->login_timeout = __constant_cpu_to_le16(0);
3703 nv->firmware_options_1 =
3704 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
3705 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
3706 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
3707 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
3708 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
3709 nv->efi_parameters = __constant_cpu_to_le32(0);
3710 nv->reset_delay = 5;
3711 nv->max_luns_per_target = __constant_cpu_to_le16(128);
3712 nv->port_down_retry_count = __constant_cpu_to_le16(30);
3713 nv->link_down_timeout = __constant_cpu_to_le16(30);
3714
3715 rval = 1;
3716 }
3717
3718 /* Reset Initialization control block */
3719 memset(icb, 0, ha->init_cb_size);
3720
3721 /* Copy 1st segment. */
3722 dptr1 = (uint8_t *)icb;
3723 dptr2 = (uint8_t *)&nv->version;
3724 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
3725 while (cnt--)
3726 *dptr1++ = *dptr2++;
3727
3728 icb->login_retry_count = nv->login_retry_count;
3729 icb->link_down_on_nos = nv->link_down_on_nos;
3730
3731 /* Copy 2nd segment. */
3732 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
3733 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
3734 cnt = (uint8_t *)&icb->reserved_3 -
3735 (uint8_t *)&icb->interrupt_delay_timer;
3736 while (cnt--)
3737 *dptr1++ = *dptr2++;
3738
3739 /*
3740 * Setup driver NVRAM options.
3741 */
3742 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
3743 "QLA2462");
3744
3745 /* Use alternate WWN? */
3746 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
3747 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
3748 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
3749 }
3750
3751 /* Prepare nodename */
3752 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
3753 /*
3754 * Firmware will apply the following mask if the nodename was
3755 * not provided.
3756 */
3757 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
3758 icb->node_name[0] &= 0xF0;
3759 }
3760
3761 /* Set host adapter parameters. */
3762 ha->flags.disable_risc_code_load = 0;
3763 ha->flags.enable_lip_reset = 0;
3764 ha->flags.enable_lip_full_login =
3765 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
3766 ha->flags.enable_target_reset =
3767 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
3768 ha->flags.enable_led_scheme = 0;
3769 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
3770
3771 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
3772 (BIT_6 | BIT_5 | BIT_4)) >> 4;
3773
3774 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
3775 sizeof(ha->fw_seriallink_options24));
3776
3777 /* save HBA serial number */
3778 ha->serial0 = icb->port_name[5];
3779 ha->serial1 = icb->port_name[6];
3780 ha->serial2 = icb->port_name[7];
3781 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
3782 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
3783
3784 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
3785
3786 ha->retry_count = le16_to_cpu(nv->login_retry_count);
3787
3788 /* Set minimum login_timeout to 4 seconds. */
3789 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
3790 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
3791 if (le16_to_cpu(nv->login_timeout) < 4)
3792 nv->login_timeout = __constant_cpu_to_le16(4);
3793 ha->login_timeout = le16_to_cpu(nv->login_timeout);
3794 icb->login_timeout = nv->login_timeout;
3795
3796 /* Set minimum RATOV to 100 tenths of a second. */
3797 ha->r_a_tov = 100;
3798
3799 ha->loop_reset_delay = nv->reset_delay;
3800
3801 /* Link Down Timeout = 0:
3802 *
3803 * When Port Down timer expires we will start returning
3804 * I/O's to OS with "DID_NO_CONNECT".
3805 *
3806 * Link Down Timeout != 0:
3807 *
3808 * The driver waits for the link to come up after link down
3809 * before returning I/Os to OS with "DID_NO_CONNECT".
3810 */
3811 if (le16_to_cpu(nv->link_down_timeout) == 0) {
3812 ha->loop_down_abort_time =
3813 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
3814 } else {
3815 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
3816 ha->loop_down_abort_time =
3817 (LOOP_DOWN_TIME - ha->link_down_timeout);
3818 }
3819
3820 /* Need enough time to try and get the port back. */
3821 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
3822 if (qlport_down_retry)
3823 ha->port_down_retry_count = qlport_down_retry;
3824
3825 /* Set login_retry_count */
3826 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
3827 if (ha->port_down_retry_count ==
3828 le16_to_cpu(nv->port_down_retry_count) &&
3829 ha->port_down_retry_count > 3)
3830 ha->login_retry_count = ha->port_down_retry_count;
3831 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
3832 ha->login_retry_count = ha->port_down_retry_count;
3833 if (ql2xloginretrycount)
3834 ha->login_retry_count = ql2xloginretrycount;
3835
3836 /* Enable ZIO. */
3837 if (!vha->flags.init_done) {
3838 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
3839 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3840 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
3841 le16_to_cpu(icb->interrupt_delay_timer): 2;
3842 }
3843 icb->firmware_options_2 &= __constant_cpu_to_le32(
3844 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
3845 vha->flags.process_response_queue = 0;
3846 if (ha->zio_mode != QLA_ZIO_DISABLED) {
3847 ha->zio_mode = QLA_ZIO_MODE_6;
3848
3849 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
3850 "(%d us).\n", vha->host_no, ha->zio_mode,
3851 ha->zio_timer * 100));
3852 qla_printk(KERN_INFO, ha,
3853 "ZIO mode %d enabled; timer delay (%d us).\n",
3854 ha->zio_mode, ha->zio_timer * 100);
3855
3856 icb->firmware_options_2 |= cpu_to_le32(
3857 (uint32_t)ha->zio_mode);
3858 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
3859 vha->flags.process_response_queue = 1;
3860 }
3861
3862 if (rval) {
3863 DEBUG2_3(printk(KERN_WARNING
3864 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
3865 }
3866 return (rval);
3867 }
3868
3869 static int
3870 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
3871 uint32_t faddr)
3872 {
3873 int rval = QLA_SUCCESS;
3874 int segments, fragment;
3875 uint32_t *dcode, dlen;
3876 uint32_t risc_addr;
3877 uint32_t risc_size;
3878 uint32_t i;
3879 struct qla_hw_data *ha = vha->hw;
3880 struct req_que *req = ha->req_q_map[0];
3881
3882 qla_printk(KERN_INFO, ha,
3883 "FW: Loading from flash (%x)...\n", faddr);
3884
3885 rval = QLA_SUCCESS;
3886
3887 segments = FA_RISC_CODE_SEGMENTS;
3888 dcode = (uint32_t *)req->ring;
3889 *srisc_addr = 0;
3890
3891 /* Validate firmware image by checking version. */
3892 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
3893 for (i = 0; i < 4; i++)
3894 dcode[i] = be32_to_cpu(dcode[i]);
3895 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
3896 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
3897 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
3898 dcode[3] == 0)) {
3899 qla_printk(KERN_WARNING, ha,
3900 "Unable to verify integrity of flash firmware image!\n");
3901 qla_printk(KERN_WARNING, ha,
3902 "Firmware data: %08x %08x %08x %08x!\n", dcode[0],
3903 dcode[1], dcode[2], dcode[3]);
3904
3905 return QLA_FUNCTION_FAILED;
3906 }
3907
3908 while (segments && rval == QLA_SUCCESS) {
3909 /* Read segment's load information. */
3910 qla24xx_read_flash_data(vha, dcode, faddr, 4);
3911
3912 risc_addr = be32_to_cpu(dcode[2]);
3913 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
3914 risc_size = be32_to_cpu(dcode[3]);
3915
3916 fragment = 0;
3917 while (risc_size > 0 && rval == QLA_SUCCESS) {
3918 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
3919 if (dlen > risc_size)
3920 dlen = risc_size;
3921
3922 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
3923 "addr %x, number of dwords 0x%x, offset 0x%x.\n",
3924 vha->host_no, risc_addr, dlen, faddr));
3925
3926 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
3927 for (i = 0; i < dlen; i++)
3928 dcode[i] = swab32(dcode[i]);
3929
3930 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
3931 dlen);
3932 if (rval) {
3933 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
3934 "segment %d of firmware\n", vha->host_no,
3935 fragment));
3936 qla_printk(KERN_WARNING, ha,
3937 "[ERROR] Failed to load segment %d of "
3938 "firmware\n", fragment);
3939 break;
3940 }
3941
3942 faddr += dlen;
3943 risc_addr += dlen;
3944 risc_size -= dlen;
3945 fragment++;
3946 }
3947
3948 /* Next segment. */
3949 segments--;
3950 }
3951
3952 return rval;
3953 }
3954
3955 #define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
3956
3957 int
3958 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3959 {
3960 int rval;
3961 int i, fragment;
3962 uint16_t *wcode, *fwcode;
3963 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
3964 struct fw_blob *blob;
3965 struct qla_hw_data *ha = vha->hw;
3966 struct req_que *req = ha->req_q_map[0];
3967
3968 /* Load firmware blob. */
3969 blob = qla2x00_request_firmware(vha);
3970 if (!blob) {
3971 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
3972 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
3973 "from: " QLA_FW_URL ".\n");
3974 return QLA_FUNCTION_FAILED;
3975 }
3976
3977 rval = QLA_SUCCESS;
3978
3979 wcode = (uint16_t *)req->ring;
3980 *srisc_addr = 0;
3981 fwcode = (uint16_t *)blob->fw->data;
3982 fwclen = 0;
3983
3984 /* Validate firmware image by checking version. */
3985 if (blob->fw->size < 8 * sizeof(uint16_t)) {
3986 qla_printk(KERN_WARNING, ha,
3987 "Unable to verify integrity of firmware image (%Zd)!\n",
3988 blob->fw->size);
3989 goto fail_fw_integrity;
3990 }
3991 for (i = 0; i < 4; i++)
3992 wcode[i] = be16_to_cpu(fwcode[i + 4]);
3993 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
3994 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
3995 wcode[2] == 0 && wcode[3] == 0)) {
3996 qla_printk(KERN_WARNING, ha,
3997 "Unable to verify integrity of firmware image!\n");
3998 qla_printk(KERN_WARNING, ha,
3999 "Firmware data: %04x %04x %04x %04x!\n", wcode[0],
4000 wcode[1], wcode[2], wcode[3]);
4001 goto fail_fw_integrity;
4002 }
4003
4004 seg = blob->segs;
4005 while (*seg && rval == QLA_SUCCESS) {
4006 risc_addr = *seg;
4007 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
4008 risc_size = be16_to_cpu(fwcode[3]);
4009
4010 /* Validate firmware image size. */
4011 fwclen += risc_size * sizeof(uint16_t);
4012 if (blob->fw->size < fwclen) {
4013 qla_printk(KERN_WARNING, ha,
4014 "Unable to verify integrity of firmware image "
4015 "(%Zd)!\n", blob->fw->size);
4016 goto fail_fw_integrity;
4017 }
4018
4019 fragment = 0;
4020 while (risc_size > 0 && rval == QLA_SUCCESS) {
4021 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
4022 if (wlen > risc_size)
4023 wlen = risc_size;
4024
4025 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
4026 "addr %x, number of words 0x%x.\n", vha->host_no,
4027 risc_addr, wlen));
4028
4029 for (i = 0; i < wlen; i++)
4030 wcode[i] = swab16(fwcode[i]);
4031
4032 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4033 wlen);
4034 if (rval) {
4035 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4036 "segment %d of firmware\n", vha->host_no,
4037 fragment));
4038 qla_printk(KERN_WARNING, ha,
4039 "[ERROR] Failed to load segment %d of "
4040 "firmware\n", fragment);
4041 break;
4042 }
4043
4044 fwcode += wlen;
4045 risc_addr += wlen;
4046 risc_size -= wlen;
4047 fragment++;
4048 }
4049
4050 /* Next segment. */
4051 seg++;
4052 }
4053 return rval;
4054
4055 fail_fw_integrity:
4056 return QLA_FUNCTION_FAILED;
4057 }
4058
4059 static int
4060 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4061 {
4062 int rval;
4063 int segments, fragment;
4064 uint32_t *dcode, dlen;
4065 uint32_t risc_addr;
4066 uint32_t risc_size;
4067 uint32_t i;
4068 struct fw_blob *blob;
4069 uint32_t *fwcode, fwclen;
4070 struct qla_hw_data *ha = vha->hw;
4071 struct req_que *req = ha->req_q_map[0];
4072
4073 /* Load firmware blob. */
4074 blob = qla2x00_request_firmware(vha);
4075 if (!blob) {
4076 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
4077 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
4078 "from: " QLA_FW_URL ".\n");
4079
4080 return QLA_FUNCTION_FAILED;
4081 }
4082
4083 qla_printk(KERN_INFO, ha,
4084 "FW: Loading via request-firmware...\n");
4085
4086 rval = QLA_SUCCESS;
4087
4088 segments = FA_RISC_CODE_SEGMENTS;
4089 dcode = (uint32_t *)req->ring;
4090 *srisc_addr = 0;
4091 fwcode = (uint32_t *)blob->fw->data;
4092 fwclen = 0;
4093
4094 /* Validate firmware image by checking version. */
4095 if (blob->fw->size < 8 * sizeof(uint32_t)) {
4096 qla_printk(KERN_WARNING, ha,
4097 "Unable to verify integrity of firmware image (%Zd)!\n",
4098 blob->fw->size);
4099 goto fail_fw_integrity;
4100 }
4101 for (i = 0; i < 4; i++)
4102 dcode[i] = be32_to_cpu(fwcode[i + 4]);
4103 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
4104 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4105 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4106 dcode[3] == 0)) {
4107 qla_printk(KERN_WARNING, ha,
4108 "Unable to verify integrity of firmware image!\n");
4109 qla_printk(KERN_WARNING, ha,
4110 "Firmware data: %08x %08x %08x %08x!\n", dcode[0],
4111 dcode[1], dcode[2], dcode[3]);
4112 goto fail_fw_integrity;
4113 }
4114
4115 while (segments && rval == QLA_SUCCESS) {
4116 risc_addr = be32_to_cpu(fwcode[2]);
4117 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
4118 risc_size = be32_to_cpu(fwcode[3]);
4119
4120 /* Validate firmware image size. */
4121 fwclen += risc_size * sizeof(uint32_t);
4122 if (blob->fw->size < fwclen) {
4123 qla_printk(KERN_WARNING, ha,
4124 "Unable to verify integrity of firmware image "
4125 "(%Zd)!\n", blob->fw->size);
4126
4127 goto fail_fw_integrity;
4128 }
4129
4130 fragment = 0;
4131 while (risc_size > 0 && rval == QLA_SUCCESS) {
4132 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
4133 if (dlen > risc_size)
4134 dlen = risc_size;
4135
4136 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
4137 "addr %x, number of dwords 0x%x.\n", vha->host_no,
4138 risc_addr, dlen));
4139
4140 for (i = 0; i < dlen; i++)
4141 dcode[i] = swab32(fwcode[i]);
4142
4143 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4144 dlen);
4145 if (rval) {
4146 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4147 "segment %d of firmware\n", vha->host_no,
4148 fragment));
4149 qla_printk(KERN_WARNING, ha,
4150 "[ERROR] Failed to load segment %d of "
4151 "firmware\n", fragment);
4152 break;
4153 }
4154
4155 fwcode += dlen;
4156 risc_addr += dlen;
4157 risc_size -= dlen;
4158 fragment++;
4159 }
4160
4161 /* Next segment. */
4162 segments--;
4163 }
4164 return rval;
4165
4166 fail_fw_integrity:
4167 return QLA_FUNCTION_FAILED;
4168 }
4169
4170 int
4171 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4172 {
4173 int rval;
4174
4175 if (ql2xfwloadbin == 1)
4176 return qla81xx_load_risc(vha, srisc_addr);
4177
4178 /*
4179 * FW Load priority:
4180 * 1) Firmware via request-firmware interface (.bin file).
4181 * 2) Firmware residing in flash.
4182 */
4183 rval = qla24xx_load_risc_blob(vha, srisc_addr);
4184 if (rval == QLA_SUCCESS)
4185 return rval;
4186
4187 return qla24xx_load_risc_flash(vha, srisc_addr,
4188 vha->hw->flt_region_fw);
4189 }
4190
4191 int
4192 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4193 {
4194 int rval;
4195 struct qla_hw_data *ha = vha->hw;
4196
4197 if (ql2xfwloadbin == 2)
4198 goto try_blob_fw;
4199
4200 /*
4201 * FW Load priority:
4202 * 1) Firmware residing in flash.
4203 * 2) Firmware via request-firmware interface (.bin file).
4204 * 3) Golden-Firmware residing in flash -- limited operation.
4205 */
4206 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
4207 if (rval == QLA_SUCCESS)
4208 return rval;
4209
4210 try_blob_fw:
4211 rval = qla24xx_load_risc_blob(vha, srisc_addr);
4212 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
4213 return rval;
4214
4215 qla_printk(KERN_ERR, ha,
4216 "FW: Attempting to fallback to golden firmware...\n");
4217 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
4218 if (rval != QLA_SUCCESS)
4219 return rval;
4220
4221 qla_printk(KERN_ERR, ha,
4222 "FW: Please update operational firmware...\n");
4223 ha->flags.running_gold_fw = 1;
4224
4225 return rval;
4226 }
4227
4228 void
4229 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4230 {
4231 int ret, retries;
4232 struct qla_hw_data *ha = vha->hw;
4233
4234 if (!IS_FWI2_CAPABLE(ha))
4235 return;
4236 if (!ha->fw_major_version)
4237 return;
4238
4239 ret = qla2x00_stop_firmware(vha);
4240 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4241 ret != QLA_INVALID_COMMAND && retries ; retries--) {
4242 ha->isp_ops->reset_chip(vha);
4243 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
4244 continue;
4245 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
4246 continue;
4247 qla_printk(KERN_INFO, ha,
4248 "Attempting retry of stop-firmware command...\n");
4249 ret = qla2x00_stop_firmware(vha);
4250 }
4251 }
4252
4253 int
4254 qla24xx_configure_vhba(scsi_qla_host_t *vha)
4255 {
4256 int rval = QLA_SUCCESS;
4257 uint16_t mb[MAILBOX_REGISTER_COUNT];
4258 struct qla_hw_data *ha = vha->hw;
4259 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4260 struct req_que *req;
4261 struct rsp_que *rsp;
4262
4263 if (!vha->vp_idx)
4264 return -EINVAL;
4265
4266 rval = qla2x00_fw_ready(base_vha);
4267 if (ql2xmultique_tag)
4268 req = ha->req_q_map[0];
4269 else
4270 req = vha->req;
4271 rsp = req->rsp;
4272
4273 if (rval == QLA_SUCCESS) {
4274 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4275 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4276 }
4277
4278 vha->flags.management_server_logged_in = 0;
4279
4280 /* Login to SNS first */
4281 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
4282 if (mb[0] != MBS_COMMAND_COMPLETE) {
4283 DEBUG15(qla_printk(KERN_INFO, ha,
4284 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
4285 "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS,
4286 mb[0], mb[1], mb[2], mb[6], mb[7]));
4287 return (QLA_FUNCTION_FAILED);
4288 }
4289
4290 atomic_set(&vha->loop_down_timer, 0);
4291 atomic_set(&vha->loop_state, LOOP_UP);
4292 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4293 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4294 rval = qla2x00_loop_resync(base_vha);
4295
4296 return rval;
4297 }
4298
4299 /* 84XX Support **************************************************************/
4300
4301 static LIST_HEAD(qla_cs84xx_list);
4302 static DEFINE_MUTEX(qla_cs84xx_mutex);
4303
4304 static struct qla_chip_state_84xx *
4305 qla84xx_get_chip(struct scsi_qla_host *vha)
4306 {
4307 struct qla_chip_state_84xx *cs84xx;
4308 struct qla_hw_data *ha = vha->hw;
4309
4310 mutex_lock(&qla_cs84xx_mutex);
4311
4312 /* Find any shared 84xx chip. */
4313 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
4314 if (cs84xx->bus == ha->pdev->bus) {
4315 kref_get(&cs84xx->kref);
4316 goto done;
4317 }
4318 }
4319
4320 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
4321 if (!cs84xx)
4322 goto done;
4323
4324 kref_init(&cs84xx->kref);
4325 spin_lock_init(&cs84xx->access_lock);
4326 mutex_init(&cs84xx->fw_update_mutex);
4327 cs84xx->bus = ha->pdev->bus;
4328
4329 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
4330 done:
4331 mutex_unlock(&qla_cs84xx_mutex);
4332 return cs84xx;
4333 }
4334
4335 static void
4336 __qla84xx_chip_release(struct kref *kref)
4337 {
4338 struct qla_chip_state_84xx *cs84xx =
4339 container_of(kref, struct qla_chip_state_84xx, kref);
4340
4341 mutex_lock(&qla_cs84xx_mutex);
4342 list_del(&cs84xx->list);
4343 mutex_unlock(&qla_cs84xx_mutex);
4344 kfree(cs84xx);
4345 }
4346
4347 void
4348 qla84xx_put_chip(struct scsi_qla_host *vha)
4349 {
4350 struct qla_hw_data *ha = vha->hw;
4351 if (ha->cs84xx)
4352 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
4353 }
4354
4355 static int
4356 qla84xx_init_chip(scsi_qla_host_t *vha)
4357 {
4358 int rval;
4359 uint16_t status[2];
4360 struct qla_hw_data *ha = vha->hw;
4361
4362 mutex_lock(&ha->cs84xx->fw_update_mutex);
4363
4364 rval = qla84xx_verify_chip(vha, status);
4365
4366 mutex_unlock(&ha->cs84xx->fw_update_mutex);
4367
4368 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
4369 QLA_SUCCESS;
4370 }
4371
4372 /* 81XX Support **************************************************************/
4373
4374 int
4375 qla81xx_nvram_config(scsi_qla_host_t *vha)
4376 {
4377 int rval;
4378 struct init_cb_81xx *icb;
4379 struct nvram_81xx *nv;
4380 uint32_t *dptr;
4381 uint8_t *dptr1, *dptr2;
4382 uint32_t chksum;
4383 uint16_t cnt;
4384 struct qla_hw_data *ha = vha->hw;
4385
4386 rval = QLA_SUCCESS;
4387 icb = (struct init_cb_81xx *)ha->init_cb;
4388 nv = ha->nvram;
4389
4390 /* Determine NVRAM starting address. */
4391 ha->nvram_size = sizeof(struct nvram_81xx);
4392 ha->vpd_size = FA_NVRAM_VPD_SIZE;
4393
4394 /* Get VPD data into cache */
4395 ha->vpd = ha->nvram + VPD_OFFSET;
4396 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
4397 ha->vpd_size);
4398
4399 /* Get NVRAM data into cache and calculate checksum. */
4400 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
4401 ha->nvram_size);
4402 dptr = (uint32_t *)nv;
4403 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4404 chksum += le32_to_cpu(*dptr++);
4405
4406 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
4407 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
4408
4409 /* Bad NVRAM data, set defaults parameters. */
4410 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
4411 || nv->id[3] != ' ' ||
4412 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4413 /* Reset NVRAM data. */
4414 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
4415 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
4416 le16_to_cpu(nv->nvram_version));
4417 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
4418 "invalid -- WWPN) defaults.\n");
4419
4420 /*
4421 * Set default initialization control block.
4422 */
4423 memset(nv, 0, ha->nvram_size);
4424 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
4425 nv->version = __constant_cpu_to_le16(ICB_VERSION);
4426 nv->frame_payload_size = __constant_cpu_to_le16(2048);
4427 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4428 nv->exchange_count = __constant_cpu_to_le16(0);
4429 nv->port_name[0] = 0x21;
4430 nv->port_name[1] = 0x00 + ha->port_no;
4431 nv->port_name[2] = 0x00;
4432 nv->port_name[3] = 0xe0;
4433 nv->port_name[4] = 0x8b;
4434 nv->port_name[5] = 0x1c;
4435 nv->port_name[6] = 0x55;
4436 nv->port_name[7] = 0x86;
4437 nv->node_name[0] = 0x20;
4438 nv->node_name[1] = 0x00;
4439 nv->node_name[2] = 0x00;
4440 nv->node_name[3] = 0xe0;
4441 nv->node_name[4] = 0x8b;
4442 nv->node_name[5] = 0x1c;
4443 nv->node_name[6] = 0x55;
4444 nv->node_name[7] = 0x86;
4445 nv->login_retry_count = __constant_cpu_to_le16(8);
4446 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
4447 nv->login_timeout = __constant_cpu_to_le16(0);
4448 nv->firmware_options_1 =
4449 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
4450 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
4451 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4452 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
4453 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
4454 nv->efi_parameters = __constant_cpu_to_le32(0);
4455 nv->reset_delay = 5;
4456 nv->max_luns_per_target = __constant_cpu_to_le16(128);
4457 nv->port_down_retry_count = __constant_cpu_to_le16(30);
4458 nv->link_down_timeout = __constant_cpu_to_le16(30);
4459 nv->enode_mac[0] = 0x00;
4460 nv->enode_mac[1] = 0x02;
4461 nv->enode_mac[2] = 0x03;
4462 nv->enode_mac[3] = 0x04;
4463 nv->enode_mac[4] = 0x05;
4464 nv->enode_mac[5] = 0x06 + ha->port_no;
4465
4466 rval = 1;
4467 }
4468
4469 /* Reset Initialization control block */
4470 memset(icb, 0, sizeof(struct init_cb_81xx));
4471
4472 /* Copy 1st segment. */
4473 dptr1 = (uint8_t *)icb;
4474 dptr2 = (uint8_t *)&nv->version;
4475 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
4476 while (cnt--)
4477 *dptr1++ = *dptr2++;
4478
4479 icb->login_retry_count = nv->login_retry_count;
4480
4481 /* Copy 2nd segment. */
4482 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
4483 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
4484 cnt = (uint8_t *)&icb->reserved_5 -
4485 (uint8_t *)&icb->interrupt_delay_timer;
4486 while (cnt--)
4487 *dptr1++ = *dptr2++;
4488
4489 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
4490 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
4491 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
4492 icb->enode_mac[0] = 0x01;
4493 icb->enode_mac[1] = 0x02;
4494 icb->enode_mac[2] = 0x03;
4495 icb->enode_mac[3] = 0x04;
4496 icb->enode_mac[4] = 0x05;
4497 icb->enode_mac[5] = 0x06 + ha->port_no;
4498 }
4499
4500 /* Use extended-initialization control block. */
4501 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
4502
4503 /*
4504 * Setup driver NVRAM options.
4505 */
4506 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
4507 "QLE81XX");
4508
4509 /* Use alternate WWN? */
4510 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
4511 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4512 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4513 }
4514
4515 /* Prepare nodename */
4516 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
4517 /*
4518 * Firmware will apply the following mask if the nodename was
4519 * not provided.
4520 */
4521 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4522 icb->node_name[0] &= 0xF0;
4523 }
4524
4525 /* Set host adapter parameters. */
4526 ha->flags.disable_risc_code_load = 0;
4527 ha->flags.enable_lip_reset = 0;
4528 ha->flags.enable_lip_full_login =
4529 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
4530 ha->flags.enable_target_reset =
4531 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
4532 ha->flags.enable_led_scheme = 0;
4533 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
4534
4535 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
4536 (BIT_6 | BIT_5 | BIT_4)) >> 4;
4537
4538 /* save HBA serial number */
4539 ha->serial0 = icb->port_name[5];
4540 ha->serial1 = icb->port_name[6];
4541 ha->serial2 = icb->port_name[7];
4542 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4543 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
4544
4545 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4546
4547 ha->retry_count = le16_to_cpu(nv->login_retry_count);
4548
4549 /* Set minimum login_timeout to 4 seconds. */
4550 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
4551 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
4552 if (le16_to_cpu(nv->login_timeout) < 4)
4553 nv->login_timeout = __constant_cpu_to_le16(4);
4554 ha->login_timeout = le16_to_cpu(nv->login_timeout);
4555 icb->login_timeout = nv->login_timeout;
4556
4557 /* Set minimum RATOV to 100 tenths of a second. */
4558 ha->r_a_tov = 100;
4559
4560 ha->loop_reset_delay = nv->reset_delay;
4561
4562 /* Link Down Timeout = 0:
4563 *
4564 * When Port Down timer expires we will start returning
4565 * I/O's to OS with "DID_NO_CONNECT".
4566 *
4567 * Link Down Timeout != 0:
4568 *
4569 * The driver waits for the link to come up after link down
4570 * before returning I/Os to OS with "DID_NO_CONNECT".
4571 */
4572 if (le16_to_cpu(nv->link_down_timeout) == 0) {
4573 ha->loop_down_abort_time =
4574 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
4575 } else {
4576 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
4577 ha->loop_down_abort_time =
4578 (LOOP_DOWN_TIME - ha->link_down_timeout);
4579 }
4580
4581 /* Need enough time to try and get the port back. */
4582 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
4583 if (qlport_down_retry)
4584 ha->port_down_retry_count = qlport_down_retry;
4585
4586 /* Set login_retry_count */
4587 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
4588 if (ha->port_down_retry_count ==
4589 le16_to_cpu(nv->port_down_retry_count) &&
4590 ha->port_down_retry_count > 3)
4591 ha->login_retry_count = ha->port_down_retry_count;
4592 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4593 ha->login_retry_count = ha->port_down_retry_count;
4594 if (ql2xloginretrycount)
4595 ha->login_retry_count = ql2xloginretrycount;
4596
4597 /* Enable ZIO. */
4598 if (!vha->flags.init_done) {
4599 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
4600 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4601 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
4602 le16_to_cpu(icb->interrupt_delay_timer): 2;
4603 }
4604 icb->firmware_options_2 &= __constant_cpu_to_le32(
4605 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
4606 vha->flags.process_response_queue = 0;
4607 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4608 ha->zio_mode = QLA_ZIO_MODE_6;
4609
4610 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
4611 "(%d us).\n", vha->host_no, ha->zio_mode,
4612 ha->zio_timer * 100));
4613 qla_printk(KERN_INFO, ha,
4614 "ZIO mode %d enabled; timer delay (%d us).\n",
4615 ha->zio_mode, ha->zio_timer * 100);
4616
4617 icb->firmware_options_2 |= cpu_to_le32(
4618 (uint32_t)ha->zio_mode);
4619 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
4620 vha->flags.process_response_queue = 1;
4621 }
4622
4623 if (rval) {
4624 DEBUG2_3(printk(KERN_WARNING
4625 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
4626 }
4627 return (rval);
4628 }
4629
4630 void
4631 qla81xx_update_fw_options(scsi_qla_host_t *ha)
4632 {
4633 }
This page took 0.122857 seconds and 6 git commands to generate.